diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000000..09150d9cd3 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,1122 @@ +version: 2 + + +# Common configuration blocks as YAML anchors +# See: https://circleci.com/blog/circleci-hacks-reuse-yaml-in-your-circleci-config-with-yaml/ +httpbin_local: &httpbin_local + image: kennethreitz/httpbin@sha256:2c7abc4803080c22928265744410173b6fea3b898872c01c5fd0f0f9df4a59fb + name: httpbin.org +test_runner: &test_runner + image: datadog/docker-library:ddtrace_py +restore_cache_step: &restore_cache_step + restore_cache: + keys: + # In the cache key: + # - .Environment.CIRCLE_JOB: We do separate tox environments by job name, so caching and restoring is + # much faster. + - tox-cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "tox.ini" }} +resource_class: &resource_class small +save_cache_step: &save_cache_step + save_cache: + key: tox-cache-{{ .Environment.CIRCLE_JOB }}-{{ checksum "tox.ini" }} + paths: + - .tox +deploy_docs_filters: &deploy_docs_filters + filters: + tags: + only: /(^docs$)|(^v[0-9]+(\.[0-9]+)*$)/ + branches: + ignore: /.*/ +persist_to_workspace_step: &persist_to_workspace_step + persist_to_workspace: + root: /tmp + paths: + - "*.results" + + +jobs: + black: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'black' --result-json /tmp/black.results + - *persist_to_workspace_step + - *save_cache_step + + flake8: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'flake8' --result-json /tmp/flake8.results + - *persist_to_workspace_step + - *save_cache_step + + # Test that we can build the package properly and package long description will render + test_build: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + # Create and activate a Python3.7 virtualenv + - run: virtualenv --python python3.7 .venv/build + + # Install required dependencies + # DEV: `pyopenssl` needed until the following PR is released + # https://github.com/pypa/twine/pull/447 + # DEV: `wheel` is needed to run `bdist_wheel` + - run: .venv/build/bin/pip install twine readme_renderer[md] pyopenssl wheel + # Ensure we didn't cache from previous runs + - run: rm -rf build/ dist/ + # Manually build any extensions to ensure they succeed + # DEV: `DDTRACE_BUILD_RAISE=TRUE` will ensure we don't swallow any build errors + - run: DDTRACE_BUILD_RAISE=TRUE .venv/build/bin/python setup.py build_ext --force + # Ensure source package will build + - run: .venv/build/bin/python setup.py sdist + # Ensure wheel will build + # DEV: `DDTRACE_BUILD_RAISE=TRUE` will ensure we don't swallow any build errors + - run: DDTRACE_BUILD_RAISE=TRUE .venv/build/bin/python setup.py bdist_wheel + # Ensure package long description is valid and will render + # https://github.com/pypa/twine/tree/6c4d5ecf2596c72b89b969ccc37b82c160645df8#twine-check + - run: .venv/build/bin/twine check dist/* + + tracer: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^py..-tracer$' + - *persist_to_workspace_step + - *save_cache_step + + internal: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^py..-internal' + - *persist_to_workspace_step + - *save_cache_step + + opentracer: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^py..-opentracer' + - *persist_to_workspace_step + - *save_cache_step + + integration: + docker: + - <<: *test_runner + env: + TEST_DATADOG_INTEGRATION: 1 + - image: datadog/docker-dd-agent + env: + - DD_APM_ENABLED=true + - DD_BIND_HOST=0.0.0.0 + - DD_API_KEY=invalid_key_but_this_is_fine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^py..-integration$' + - *persist_to_workspace_step + - *save_cache_step + + futures: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^futures_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + boto: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^boto\(core\)\?_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + ddtracerun: + docker: + - *test_runner + - image: redis:4.0-alpine + resource_class: *resource_class + steps: + - checkout + - run: scripts/run-tox-scenario '^py..-ddtracerun$' + - *persist_to_workspace_step + + test_utils: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^py..-test_utils$' + - *persist_to_workspace_step + - *save_cache_step + + test_logging: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^py..-test_logging$' + - *persist_to_workspace_step + - *save_cache_step + + asyncio: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^asyncio_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + pylons: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^pylons_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + aiohttp: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^aiohttp_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + tornado: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^tornado_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + bottle: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^bottle_contrib\(_autopatch\)\?-' + - *persist_to_workspace_step + - *save_cache_step + + cassandra: + docker: + - <<: *test_runner + env: + CASS_DRIVER_NO_EXTENSIONS: 1 + - image: spotify/cassandra:latest + env: + - MAX_HEAP_SIZE=512M + - HEAP_NEWSIZE=256M + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e wait cassandra + - run: scripts/run-tox-scenario '^cassandra_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + celery: + docker: + - *test_runner + - image: redis:4.0-alpine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^celery_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + consul: + docker: + - *test_runner + - image: consul:1.6.0 + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^consul_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + dogpile_cache: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^dogpile_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + elasticsearch: + docker: + - *test_runner + - image: elasticsearch:2.3 + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^elasticsearch_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + falcon: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'falcon_contrib{,_autopatch}-{py27,py34,py35,py36}-falcon{10,11,12,13,14}' --result-json /tmp/falcon.results + - *persist_to_workspace_step + - *save_cache_step + + django: + docker: + - *test_runner + - image: redis:4.0-alpine + - image: memcached:1.5-alpine + - image: datadog/docker-dd-agent + env: + - DD_APM_ENABLED=true + - DD_BIND_HOST=0.0.0.0 + - DD_API_KEY=invalid_key_but_this_is_fine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^django_' + - *persist_to_workspace_step + - *save_cache_step + + flask: + docker: + - *test_runner + - image: redis:4.0-alpine + - image: memcached:1.5-alpine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^flask_\(cache_\)\?contrib\(_autopatch\)\?-' + - *persist_to_workspace_step + - *save_cache_step + + gevent: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^gevent_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + httplib: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^httplib_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + grpc: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^grpc_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + molten: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^molten_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + mysqlconnector: + docker: + - *test_runner + - image: mysql:5.7 + env: + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_PASSWORD=test + - MYSQL_USER=test + - MYSQL_DATABASE=test + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'wait' mysql + - run: scripts/run-tox-scenario '^mysql_contrib-.*-mysqlconnector' + - *persist_to_workspace_step + - *save_cache_step + + mysqlpython: + docker: + - *test_runner + - image: mysql:5.7 + env: + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_PASSWORD=test + - MYSQL_USER=test + - MYSQL_DATABASE=test + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'wait' mysql + - run: scripts/run-tox-scenario '^mysqldb_contrib-.*-mysqlclient' + - *persist_to_workspace_step + - *save_cache_step + + mysqldb: + docker: + - *test_runner + - image: mysql:5.7 + env: + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_PASSWORD=test + - MYSQL_USER=test + - MYSQL_DATABASE=test + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'wait' mysql + - run: scripts/run-tox-scenario '^mysqldb_contrib-.*-mysqldb' + - *persist_to_workspace_step + - *save_cache_step + + pymysql: + docker: + - *test_runner + - image: mysql:5.7 + env: + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_PASSWORD=test + - MYSQL_USER=test + - MYSQL_DATABASE=test + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'wait' mysql + - run: scripts/run-tox-scenario '^pymysql_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + pylibmc: + docker: + - *test_runner + - image: memcached:1.5-alpine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^pylibmc_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + pymemcache: + docker: + - *test_runner + - image: memcached:1.5-alpine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^pymemcache_contrib\(_autopatch\)\?-' + - *persist_to_workspace_step + - *save_cache_step + + mongoengine: + docker: + - *test_runner + - image: mongo:3.6 + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^mongoengine_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + pymongo: + docker: + - *test_runner + - image: mongo:3.6 + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^pymongo_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + pyramid: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^pyramid_contrib\(_autopatch\)\?-' + - *persist_to_workspace_step + - *save_cache_step + + requests: + docker: + - *test_runner + - *httpbin_local + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^requests_contrib\(_autopatch\)\?-' + - *persist_to_workspace_step + - *save_cache_step + + requestsgevent: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^requests_gevent_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + sqlalchemy: + docker: + - *test_runner + - image: postgres:10.5-alpine + env: + - POSTGRES_PASSWORD=postgres + - POSTGRES_USER=postgres + - POSTGRES_DB=postgres + - image: mysql:5.7 + env: + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_PASSWORD=test + - MYSQL_USER=test + - MYSQL_DATABASE=test + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'wait' postgres mysql + - run: scripts/run-tox-scenario '^sqlalchemy_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + dbapi: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^dbapi_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + psycopg: + docker: + - *test_runner + - image: postgres:10.5-alpine + env: + - POSTGRES_PASSWORD=postgres + - POSTGRES_USER=postgres + - POSTGRES_DB=postgres + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'wait' postgres + - run: scripts/run-tox-scenario '^psycopg_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + aiobotocore: + docker: + - *test_runner + - image: palazzem/moto:1.0.1 + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^aiobotocore_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + aiopg: + docker: + - *test_runner + - image: postgres:10.5-alpine + env: + - POSTGRES_PASSWORD=postgres + - POSTGRES_USER=postgres + - POSTGRES_DB=postgres + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e 'wait' postgres + - run: scripts/run-tox-scenario '^aiopg_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + redis: + docker: + - *test_runner + - image: redis:4.0-alpine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^redis_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + rediscluster: + docker: + - *test_runner + - image: grokzen/redis-cluster:4.0.9 + env: + - IP=0.0.0.0 + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e wait rediscluster + - run: scripts/run-tox-scenario '^rediscluster_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + vertica: + docker: + - *test_runner + - image: sumitchawla/vertica + env: + - VP_TEST_USER=dbadmin + - VP_TEST_PASSWORD=abc123 + - VP_TEST_DATABASE=docker + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e wait vertica + - run: scripts/run-tox-scenario '^vertica_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + kombu: + docker: + - *test_runner + - image: rabbitmq:3.7-alpine + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: tox -e wait rabbitmq + - run: scripts/run-tox-scenario '^kombu_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + sqlite3: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^sqlite3_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + unit_tests: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^unit_tests-' + - *persist_to_workspace_step + - *save_cache_step + + benchmarks: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: + command: | + mkdir -p /tmp/test-reports + tox -e 'benchmarks-{py27,py34,py35,py36,py37}' --result-json /tmp/benchmarks.results -- --benchmark-storage=file:///tmp/test-reports/ --benchmark-autosave + - store_test_results: + path: /tmp/test-reports + - store_artifacts: + path: /tmp/test-reports + - *persist_to_workspace_step + - *save_cache_step + + deploy_dev: + # build the master branch releasing development docs and wheels + docker: + - image: circleci/python:3.6 + resource_class: *resource_class + steps: + - checkout + - run: sudo apt-get -y install rake + - run: sudo pip install mkwheelhouse sphinx awscli + - run: S3_DIR=trace-dev rake release:docs + - run: S3_DIR=trace-dev rake release:wheel + + jinja2: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^jinja2_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + mako: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^mako_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + algoliasearch: + docker: + - *test_runner + resource_class: *resource_class + steps: + - checkout + - *restore_cache_step + - run: scripts/run-tox-scenario '^algoliasearch_contrib-' + - *persist_to_workspace_step + - *save_cache_step + + build_docs: + # deploy official documentation + docker: + - image: circleci/python:3.6 + resource_class: *resource_class + steps: + - checkout + - run: sudo apt-get -y install rake + # Sphinx 1.7.5 is required otherwise docs are not properly built + - run: sudo pip install mkwheelhouse sphinx==1.7.5 wrapt + - run: rake docs + - run: + command: | + mkdir -p /tmp/docs + cp -r docs/_build/html/* /tmp/docs + - store_artifacts: + path: /tmp/docs + + deploy_to_s3: + # deploy official documentation + docker: + - image: circleci/python:3.6 + resource_class: *resource_class + steps: + - checkout + - run: sudo apt-get -y install rake + # Sphinx 1.7.5 is required otherwise docs are not properly built + - run: sudo pip install mkwheelhouse sphinx==1.7.5 awscli wrapt + - run: S3_DIR=trace rake release:docs + + wait_all_tests: + # this step ensures all `tox` environments are properly executed + docker: + - *test_runner + resource_class: *resource_class + steps: + - attach_workspace: + at: /tmp/workspace + - checkout + - run: ls /tmp/workspace/* + # debug: shows how many time each test was executed + - run: jq -s ".[]|.testenvs|keys|.[]" /tmp/workspace/* | grep -v GLOB | sed 's/"//g' | sort | uniq -c | sort -rn + # list all executed test + - run: jq -s ".[]|.testenvs|keys|.[]" /tmp/workspace/* | grep -v GLOB | sed 's/"//g' | sort | uniq | tee all_executed_tests + # list all tests in tox.ini + - run: tox -l | grep -v "^wait$" | sort > all_tests + # checks that all tests were executed + - run: diff all_tests all_executed_tests + + +workflows: + version: 2 + + deploy_docs: + jobs: + - build_docs: + <<: *deploy_docs_filters + - approve_docs_deployment: + <<: *deploy_docs_filters + type: approval + requires: + - build_docs + - deploy_to_s3: + <<: *deploy_docs_filters + requires: + - approve_docs_deployment + test: + jobs: + - build_docs + - black + - flake8 + - test_build + - aiobotocore: + requires: + - flake8 + - black + - aiohttp: + requires: + - flake8 + - black + - aiopg: + requires: + - flake8 + - black + - asyncio: + requires: + - flake8 + - black + - algoliasearch: + requires: + - flake8 + - black + - benchmarks: + requires: + - flake8 + - black + - boto: + requires: + - flake8 + - black + - bottle: + requires: + - flake8 + - black + - cassandra: + requires: + - flake8 + - black + - celery: + requires: + - flake8 + - black + - consul: + requires: + - flake8 + - black + - dbapi: + requires: + - flake8 + - black + - ddtracerun: + requires: + - flake8 + - black + - django: + requires: + - flake8 + - black + - dogpile_cache: + requires: + - flake8 + - black + - elasticsearch: + requires: + - flake8 + - black + - falcon: + requires: + - flake8 + - black + - flask: + requires: + - flake8 + - black + - futures: + requires: + - flake8 + - black + - gevent: + requires: + - flake8 + - black + - grpc: + requires: + - flake8 + - black + - httplib: + requires: + - flake8 + - black + - integration: + requires: + - flake8 + - black + - internal: + requires: + - flake8 + - black + - jinja2: + requires: + - flake8 + - black + - kombu: + requires: + - flake8 + - black + - mako: + requires: + - flake8 + - black + - molten: + requires: + - flake8 + - black + - mongoengine: + requires: + - flake8 + - black + - mysqlconnector: + requires: + - flake8 + - black + - mysqldb: + requires: + - flake8 + - black + - mysqlpython: + requires: + - flake8 + - black + - opentracer: + requires: + - flake8 + - black + - psycopg: + requires: + - flake8 + - black + - pylibmc: + requires: + - flake8 + - black + - pylons: + requires: + - flake8 + - black + - pymemcache: + requires: + - flake8 + - black + - pymongo: + requires: + - flake8 + - black + - pymysql: + requires: + - flake8 + - black + - pyramid: + requires: + - flake8 + - black + - redis: + requires: + - flake8 + - black + - rediscluster: + requires: + - flake8 + - black + - requests: + requires: + - flake8 + - black + - requestsgevent: + requires: + - flake8 + - black + - sqlalchemy: + requires: + - flake8 + - black + - sqlite3: + requires: + - flake8 + - black + - test_utils: + requires: + - flake8 + - black + - test_logging: + requires: + - flake8 + - black + - tornado: + requires: + - flake8 + - black + - tracer: + requires: + - flake8 + - black + - unit_tests: + requires: + - flake8 + - black + - vertica: + requires: + - flake8 + - black + - wait_all_tests: + requires: + # Initial jobs + - build_docs + - black + - flake8 + - test_build + + # flake8 dependent jobs + - aiobotocore + - aiohttp + - aiopg + - asyncio + - algoliasearch + - benchmarks + - boto + - bottle + - cassandra + - celery + - consul + - dbapi + - ddtracerun + - dogpile_cache + - django + - elasticsearch + - falcon + - flask + - futures + - gevent + - grpc + - httplib + - integration + - internal + - jinja2 + - kombu + - mako + - molten + - mongoengine + - mysqlconnector + - mysqldb + - mysqlpython + - opentracer + - psycopg + - pylibmc + - pylons + - pymemcache + - pymongo + - pymysql + - pyramid + - redis + - rediscluster + - requests + - requestsgevent + - sqlalchemy + - sqlite3 + - test_utils + - test_logging + - tornado + - tracer + - unit_tests + - vertica + - deploy_dev: + requires: + - wait_all_tests + filters: + branches: + only: master diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 0000000000..1b5e95ea44 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1 @@ +* @DataDog/apm-python diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md new file mode 100644 index 0000000000..088ff1e00b --- /dev/null +++ b/.github/ISSUE_TEMPLATE.md @@ -0,0 +1,19 @@ +Thanks for taking the time for reporting an issue! + +Before reporting an issue on dd-trace-py, please be sure to provide all +necessary information. + +If you're hitting a bug, make sure that you're using the latest version of this +library. + +### Which version of dd-trace-py are you using? + +### Which version of the libraries are you using? + +You can copy/paste the output of `pip freeze` here. + +### How can we reproduce your problem? + +### What is the result that you get? + +### What is result that you expected? diff --git a/.gitignore b/.gitignore index 473ef20a4a..b055ac56b8 100644 --- a/.gitignore +++ b/.gitignore @@ -1,5 +1,7 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ *.py[cod] -*.sw[op] +*$py.class # C extensions *.so @@ -18,7 +20,6 @@ develop-eggs .installed.cfg lib lib64 -__pycache__ venv*/ # Installer logs @@ -55,3 +56,96 @@ _build/ # mypy .mypy_cache/ target +======= +# Distribution / packaging +.Python +env/ +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib64/ +parts/ +sdist/ +var/ +*.egg-info/ +.installed.cfg +*.egg +*.whl + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.ddtox/ +.coverage +.coverage.* +.cache +coverage.xml +*,cover +.hypothesis/ +.pytest_cache/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +target/ + +# IPython Notebook +.ipynb_checkpoints + +# pyenv +.python-version + +# celery beat schedule file +celerybeat-schedule + +# docker-compose env file +# it must be versioned to keep track of backing services defaults +!.env + +# virtualenv +venv/ +ENV/ + +# Spyder project settings +.spyderproject + +# Rope project settings +.ropeproject + +# Vim +*.swp +# IDEA +.idea/ + +# VS Code +.vscode/ +>>>>>>> dd/master diff --git a/LICENSE.Apache b/LICENSE.Apache new file mode 100644 index 0000000000..bff56b5431 --- /dev/null +++ b/LICENSE.Apache @@ -0,0 +1,200 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2016 Datadog, Inc. + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/LICENSE.BSD3 b/LICENSE.BSD3 new file mode 100644 index 0000000000..e8f3a81c1c --- /dev/null +++ b/LICENSE.BSD3 @@ -0,0 +1,24 @@ +Copyright (c) 2016, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL DATADOG BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/NOTICE b/NOTICE new file mode 100644 index 0000000000..732c748d43 --- /dev/null +++ b/NOTICE @@ -0,0 +1,4 @@ +Datadog dd-trace-py +Copyright 2016-Present Datadog, Inc. + +This product includes software developed at Datadog, Inc. (https://www.datadoghq.com/). diff --git a/Rakefile b/Rakefile new file mode 100644 index 0000000000..ea90004a40 --- /dev/null +++ b/Rakefile @@ -0,0 +1,77 @@ +desc "build the docs" +task :docs do + sh "pip install sphinx" + Dir.chdir 'docs' do + sh "make html" + end +end + +# Deploy tasks +S3_DIR = ENV['S3_DIR'] +S3_BUCKET = "pypi.datadoghq.com" + +desc "release the a new wheel" +task :'release:wheel' do + fail "Missing environment variable S3_DIR" if !S3_DIR or S3_DIR.empty? + + # Use custom `mkwheelhouse` to upload wheels and source distribution from dist/ to S3 bucket + sh "scripts/mkwheelhouse" +end + +desc "release the docs website" +task :'release:docs' => :docs do + fail "Missing environment variable S3_DIR" if !S3_DIR or S3_DIR.empty? + sh "aws s3 cp --recursive docs/_build/html/ s3://#{S3_BUCKET}/#{S3_DIR}/docs/" +end + +namespace :pypi do + RELEASE_DIR = './dist/' + + def get_version() + return `python setup.py --version`.strip + end + + def get_branch() + return `git name-rev --name-only HEAD`.strip + end + + task :confirm do + ddtrace_version = get_version + + if get_branch.downcase != 'tags/v#{ddtrace_version}' + print "WARNING: Expected current commit to be tagged as 'tags/v#{ddtrace_version}, instead we are on '#{get_branch}', proceed anyways [y|N]? " + $stdout.flush + + abort if $stdin.gets.to_s.strip.downcase != 'y' + end + + puts "WARNING: This task will build and release new wheels to https://pypi.org/project/ddtrace/, this action cannot be undone" + print " To proceed please type the version '#{ddtrace_version}': " + $stdout.flush + + abort if $stdin.gets.to_s.strip.downcase != ddtrace_version + end + + task :clean do + FileUtils.rm_rf(RELEASE_DIR) + end + + task :install do + sh 'pip install twine' + end + + task :build => :clean do + puts "building release in #{RELEASE_DIR}" + sh "scripts/build-dist" + end + + task :release => [:confirm, :install, :build] do + builds = Dir.entries(RELEASE_DIR).reject {|f| f == '.' || f == '..'} + if builds.length == 0 + fail "no build found in #{RELEASE_DIR}" + end + + puts "uploading #{RELEASE_DIR}/*" + sh "twine upload #{RELEASE_DIR}/*" + end +end diff --git a/conftest.py b/conftest.py new file mode 100644 index 0000000000..9e3fef991e --- /dev/null +++ b/conftest.py @@ -0,0 +1,55 @@ +""" +This file configures a local pytest plugin, which allows us to configure plugin hooks to control the +execution of our tests. Either by loading in fixtures, configuring directories to ignore, etc + +Local plugins: https://docs.pytest.org/en/3.10.1/writing_plugins.html#local-conftest-plugins +Hook reference: https://docs.pytest.org/en/3.10.1/reference.html#hook-reference +""" +import os +import re +import sys + +import pytest + +PY_DIR_PATTERN = re.compile(r"^py[23][0-9]$") + + +# Determine if the folder should be ignored +# https://docs.pytest.org/en/3.10.1/reference.html#_pytest.hookspec.pytest_ignore_collect +# DEV: We can only ignore folders/modules, we cannot ignore individual files +# DEV: We must wrap with `@pytest.mark.hookwrapper` to inherit from default (e.g. honor `--ignore`) +# https://github.com/pytest-dev/pytest/issues/846#issuecomment-122129189 +@pytest.mark.hookwrapper +def pytest_ignore_collect(path, config): + """ + Skip directories defining a required minimum Python version + + Example:: + + File: tests/contrib/vertica/py35/test.py + Python 2.7: Skip + Python 3.4: Skip + Python 3.5: Collect + Python 3.6: Collect + """ + # Execute original behavior first + # DEV: We need to set `outcome.force_result(True)` if we need to override + # these results and skip this directory + outcome = yield + + # Was not ignored by default behavior + if not outcome.get_result(): + # DEV: `path` is a `LocalPath` + path = str(path) + if not os.path.isdir(path): + path = os.path.dirname(path) + dirname = os.path.basename(path) + + # Directory name match `py[23][0-9]` + if PY_DIR_PATTERN.match(dirname): + # Split out version numbers into a tuple: `py35` -> `(3, 5)` + min_required = tuple((int(v) for v in dirname.strip("py"))) + + # If the current Python version does not meet the minimum required, skip this directory + if sys.version_info[0:2] < min_required: + outcome.force_result(True) diff --git a/ddtrace/__init__.py b/ddtrace/__init__.py new file mode 100644 index 0000000000..d3c4d60a07 --- /dev/null +++ b/ddtrace/__init__.py @@ -0,0 +1,51 @@ +import sys + +import pkg_resources + +from .monkey import patch, patch_all +from .pin import Pin +from .span import Span +from .tracer import Tracer +from .settings import config + + +try: + __version__ = pkg_resources.get_distribution(__name__).version +except pkg_resources.DistributionNotFound: + # package is not installed + __version__ = None + + +# a global tracer instance with integration settings +tracer = Tracer() + +__all__ = [ + 'patch', + 'patch_all', + 'Pin', + 'Span', + 'tracer', + 'Tracer', + 'config', +] + + +_ORIGINAL_EXCEPTHOOK = sys.excepthook + + +def _excepthook(tp, value, traceback): + tracer.global_excepthook(tp, value, traceback) + if _ORIGINAL_EXCEPTHOOK: + return _ORIGINAL_EXCEPTHOOK(tp, value, traceback) + + +def install_excepthook(): + """Install a hook that intercepts unhandled exception and send metrics about them.""" + global _ORIGINAL_EXCEPTHOOK + _ORIGINAL_EXCEPTHOOK = sys.excepthook + sys.excepthook = _excepthook + + +def uninstall_excepthook(): + """Uninstall the global tracer except hook.""" + sys.excepthook = _ORIGINAL_EXCEPTHOOK diff --git a/ddtrace/_worker.py b/ddtrace/_worker.py new file mode 100644 index 0000000000..ed25db4183 --- /dev/null +++ b/ddtrace/_worker.py @@ -0,0 +1,82 @@ +import atexit +import threading +import os + +from .internal.logger import get_logger + +_LOG = get_logger(__name__) + + +class PeriodicWorkerThread(object): + """Periodic worker thread. + + This class can be used to instantiate a worker thread that will run its `run_periodic` function every `interval` + seconds. + + The method `on_shutdown` will be called on worker shutdown. The worker will be shutdown when the program exits and + can be waited for with the `exit_timeout` parameter. + + """ + + _DEFAULT_INTERVAL = 1.0 + + def __init__(self, interval=_DEFAULT_INTERVAL, exit_timeout=None, name=None, daemon=True): + """Create a new worker thread that runs a function periodically. + + :param interval: The interval in seconds to wait between calls to `run_periodic`. + :param exit_timeout: The timeout to use when exiting the program and waiting for the thread to finish. + :param name: Name of the worker. + :param daemon: Whether the worker should be a daemon. + """ + + self._thread = threading.Thread(target=self._target, name=name) + self._thread.daemon = daemon + self._stop = threading.Event() + self.interval = interval + self.exit_timeout = exit_timeout + atexit.register(self._atexit) + + def _atexit(self): + self.stop() + if self.exit_timeout is not None: + key = 'ctrl-break' if os.name == 'nt' else 'ctrl-c' + _LOG.debug( + 'Waiting %d seconds for %s to finish. Hit %s to quit.', + self.exit_timeout, self._thread.name, key, + ) + self.join(self.exit_timeout) + + def start(self): + """Start the periodic worker.""" + _LOG.debug('Starting %s thread', self._thread.name) + self._thread.start() + + def stop(self): + """Stop the worker.""" + _LOG.debug('Stopping %s thread', self._thread.name) + self._stop.set() + + def is_alive(self): + return self._thread.is_alive() + + def join(self, timeout=None): + return self._thread.join(timeout) + + def _target(self): + while not self._stop.wait(self.interval): + self.run_periodic() + self._on_shutdown() + + @staticmethod + def run_periodic(): + """Method executed every interval.""" + pass + + def _on_shutdown(self): + _LOG.debug('Shutting down %s thread', self._thread.name) + self.on_shutdown() + + @staticmethod + def on_shutdown(): + """Method ran on worker shutdown.""" + pass diff --git a/ddtrace/api.py b/ddtrace/api.py new file mode 100644 index 0000000000..5654d098b7 --- /dev/null +++ b/ddtrace/api.py @@ -0,0 +1,279 @@ +# stdlib +import ddtrace +from json import loads +import socket + +# project +from .encoding import get_encoder, JSONEncoder +from .compat import httplib, PYTHON_VERSION, PYTHON_INTERPRETER, get_connection_response +from .internal.logger import get_logger +from .internal.runtime import container +from .payload import Payload, PayloadFull +from .utils.deprecation import deprecated +from .utils import time + + +log = get_logger(__name__) + + +_VERSIONS = {'v0.4': {'traces': '/v0.4/traces', + 'services': '/v0.4/services', + 'compatibility_mode': False, + 'fallback': 'v0.3'}, + 'v0.3': {'traces': '/v0.3/traces', + 'services': '/v0.3/services', + 'compatibility_mode': False, + 'fallback': 'v0.2'}, + 'v0.2': {'traces': '/v0.2/traces', + 'services': '/v0.2/services', + 'compatibility_mode': True, + 'fallback': None}} + + +class Response(object): + """ + Custom API Response object to represent a response from calling the API. + + We do this to ensure we know expected properties will exist, and so we + can call `resp.read()` and load the body once into an instance before we + close the HTTPConnection used for the request. + """ + __slots__ = ['status', 'body', 'reason', 'msg'] + + def __init__(self, status=None, body=None, reason=None, msg=None): + self.status = status + self.body = body + self.reason = reason + self.msg = msg + + @classmethod + def from_http_response(cls, resp): + """ + Build a ``Response`` from the provided ``HTTPResponse`` object. + + This function will call `.read()` to consume the body of the ``HTTPResponse`` object. + + :param resp: ``HTTPResponse`` object to build the ``Response`` from + :type resp: ``HTTPResponse`` + :rtype: ``Response`` + :returns: A new ``Response`` + """ + return cls( + status=resp.status, + body=resp.read(), + reason=getattr(resp, 'reason', None), + msg=getattr(resp, 'msg', None), + ) + + def get_json(self): + """Helper to parse the body of this request as JSON""" + try: + body = self.body + if not body: + log.debug('Empty reply from Datadog Agent, %r', self) + return + + if not isinstance(body, str) and hasattr(body, 'decode'): + body = body.decode('utf-8') + + if hasattr(body, 'startswith') and body.startswith('OK'): + # This typically happens when using a priority-sampling enabled + # library with an outdated agent. It still works, but priority sampling + # will probably send too many traces, so the next step is to upgrade agent. + log.debug('Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date') + return + + return loads(body) + except (ValueError, TypeError): + log.debug('Unable to parse Datadog Agent JSON response: %r', body, exc_info=True) + + def __repr__(self): + return '{0}(status={1!r}, body={2!r}, reason={3!r}, msg={4!r})'.format( + self.__class__.__name__, + self.status, + self.body, + self.reason, + self.msg, + ) + + +class UDSHTTPConnection(httplib.HTTPConnection): + """An HTTP connection established over a Unix Domain Socket.""" + + # It's "important" to keep the hostname and port arguments here; while there are not used by the connection + # mechanism, they are actually used as HTTP headers such as `Host`. + def __init__(self, path, https, *args, **kwargs): + if https: + httplib.HTTPSConnection.__init__(self, *args, **kwargs) + else: + httplib.HTTPConnection.__init__(self, *args, **kwargs) + self.path = path + + def connect(self): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + sock.connect(self.path) + self.sock = sock + + +class API(object): + """ + Send data to the trace agent using the HTTP protocol and JSON format + """ + + TRACE_COUNT_HEADER = 'X-Datadog-Trace-Count' + + # Default timeout when establishing HTTP connection and sending/receiving from socket. + # This ought to be enough as the agent is local + TIMEOUT = 2 + + def __init__(self, hostname, port, uds_path=None, https=False, headers=None, encoder=None, priority_sampling=False): + """Create a new connection to the Tracer API. + + :param hostname: The hostname. + :param port: The TCP port to use. + :param uds_path: The path to use if the connection is to be established with a Unix Domain Socket. + :param headers: The headers to pass along the request. + :param encoder: The encoder to use to serialize data. + :param priority_sampling: Whether to use priority sampling. + """ + self.hostname = hostname + self.port = int(port) + self.uds_path = uds_path + self.https = https + + self._headers = headers or {} + self._version = None + + if priority_sampling: + self._set_version('v0.4', encoder=encoder) + else: + self._set_version('v0.3', encoder=encoder) + + self._headers.update({ + 'Datadog-Meta-Lang': 'python', + 'Datadog-Meta-Lang-Version': PYTHON_VERSION, + 'Datadog-Meta-Lang-Interpreter': PYTHON_INTERPRETER, + 'Datadog-Meta-Tracer-Version': ddtrace.__version__, + }) + + # Add container information if we have it + self._container_info = container.get_container_info() + if self._container_info and self._container_info.container_id: + self._headers.update({ + 'Datadog-Container-Id': self._container_info.container_id, + }) + + def __str__(self): + if self.uds_path: + return 'unix://' + self.uds_path + if self.https: + scheme = 'https://' + else: + scheme = 'http://' + return '%s%s:%s' % (scheme, self.hostname, self.port) + + def _set_version(self, version, encoder=None): + if version not in _VERSIONS: + version = 'v0.2' + if version == self._version: + return + self._version = version + self._traces = _VERSIONS[version]['traces'] + self._services = _VERSIONS[version]['services'] + self._fallback = _VERSIONS[version]['fallback'] + self._compatibility_mode = _VERSIONS[version]['compatibility_mode'] + if self._compatibility_mode: + self._encoder = JSONEncoder() + else: + self._encoder = encoder or get_encoder() + # overwrite the Content-type with the one chosen in the Encoder + self._headers.update({'Content-Type': self._encoder.content_type}) + + def _downgrade(self): + """ + Downgrades the used encoder and API level. This method must fallback to a safe + encoder and API, so that it will success despite users' configurations. This action + ensures that the compatibility mode is activated so that the downgrade will be + executed only once. + """ + self._set_version(self._fallback) + + def send_traces(self, traces): + """Send traces to the API. + + :param traces: A list of traces. + :return: The list of API HTTP responses. + """ + if not traces: + return [] + + with time.StopWatch() as sw: + responses = [] + payload = Payload(encoder=self._encoder) + for trace in traces: + try: + payload.add_trace(trace) + except PayloadFull: + # Is payload full or is the trace too big? + # If payload is not empty, then using a new Payload might allow us to fit the trace. + # Let's flush the Payload and try to put the trace in a new empty Payload. + if not payload.empty: + responses.append(self._flush(payload)) + # Create a new payload + payload = Payload(encoder=self._encoder) + try: + # Add the trace that we were unable to add in that iteration + payload.add_trace(trace) + except PayloadFull: + # If the trace does not fit in a payload on its own, that's bad. Drop it. + log.warning('Trace %r is too big to fit in a payload, dropping it', trace) + + # Check that the Payload is not empty: + # it could be empty if the last trace was too big to fit. + if not payload.empty: + responses.append(self._flush(payload)) + + log.debug('reported %d traces in %.5fs', len(traces), sw.elapsed()) + + return responses + + def _flush(self, payload): + try: + response = self._put(self._traces, payload.get_payload(), payload.length) + except (httplib.HTTPException, OSError, IOError) as e: + return e + + # the API endpoint is not available so we should downgrade the connection and re-try the call + if response.status in [404, 415] and self._fallback: + log.debug("calling endpoint '%s' but received %s; downgrading API", self._traces, response.status) + self._downgrade() + return self._flush(payload) + + return response + + @deprecated(message='Sending services to the API is no longer necessary', version='1.0.0') + def send_services(self, *args, **kwargs): + return + + def _put(self, endpoint, data, count): + headers = self._headers.copy() + headers[self.TRACE_COUNT_HEADER] = str(count) + + if self.uds_path is None: + if self.https: + conn = httplib.HTTPSConnection(self.hostname, self.port, timeout=self.TIMEOUT) + else: + conn = httplib.HTTPConnection(self.hostname, self.port, timeout=self.TIMEOUT) + else: + conn = UDSHTTPConnection(self.uds_path, self.https, self.hostname, self.port, timeout=self.TIMEOUT) + + try: + conn.request('PUT', endpoint, data, headers) + + # Parse the HTTPResponse into an API.Response + # DEV: This will call `resp.read()` which must happen before the `conn.close()` below, + # if we call `.close()` then all future `.read()` calls will return `b''` + resp = get_connection_response(conn) + return Response.from_http_response(resp) + finally: + conn.close() diff --git a/ddtrace/bootstrap/__init__.py b/ddtrace/bootstrap/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ddtrace/bootstrap/sitecustomize.py b/ddtrace/bootstrap/sitecustomize.py new file mode 100644 index 0000000000..cd3fecc368 --- /dev/null +++ b/ddtrace/bootstrap/sitecustomize.py @@ -0,0 +1,150 @@ +""" +Bootstrapping code that is run when using the `ddtrace-run` Python entrypoint +Add all monkey-patching that needs to run by default here +""" + +import os +import imp +import sys +import logging + +from ddtrace.utils.formats import asbool, get_env +from ddtrace.internal.logger import get_logger +from ddtrace import constants + +logs_injection = asbool(get_env("logs", "injection")) +DD_LOG_FORMAT = "%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] {}- %(message)s".format( + "[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] " if logs_injection else "" +) + +if logs_injection: + # immediately patch logging if trace id injected + from ddtrace import patch + + patch(logging=True) + +debug = os.environ.get("DATADOG_TRACE_DEBUG") + +# Set here a default logging format for basicConfig + +# DEV: Once basicConfig is called here, future calls to it cannot be used to +# change the formatter since it applies the formatter to the root handler only +# upon initializing it the first time. +# See https://github.com/python/cpython/blob/112e4afd582515fcdcc0cde5012a4866e5cfda12/Lib/logging/__init__.py#L1550 +if debug and debug.lower() == "true": + logging.basicConfig(level=logging.DEBUG, format=DD_LOG_FORMAT) +else: + logging.basicConfig(format=DD_LOG_FORMAT) + +log = get_logger(__name__) + +EXTRA_PATCHED_MODULES = { + "bottle": True, + "django": True, + "falcon": True, + "flask": True, + "pylons": True, + "pyramid": True, +} + + +def update_patched_modules(): + modules_to_patch = os.environ.get("DATADOG_PATCH_MODULES") + if not modules_to_patch: + return + for patch in modules_to_patch.split(","): + if len(patch.split(":")) != 2: + log.debug("skipping malformed patch instruction") + continue + + module, should_patch = patch.split(":") + if should_patch.lower() not in ["true", "false"]: + log.debug("skipping malformed patch instruction for %s", module) + continue + + EXTRA_PATCHED_MODULES.update({module: should_patch.lower() == "true"}) + + +def add_global_tags(tracer): + tags = {} + for tag in os.environ.get("DD_TRACE_GLOBAL_TAGS", "").split(","): + tag_name, _, tag_value = tag.partition(":") + if not tag_name or not tag_value: + log.debug("skipping malformed tracer tag") + continue + + tags[tag_name] = tag_value + tracer.set_tags(tags) + + +try: + from ddtrace import tracer + + patch = True + + # Respect DATADOG_* environment variables in global tracer configuration + # TODO: these variables are deprecated; use utils method and update our documentation + # correct prefix should be DD_* + enabled = os.environ.get("DATADOG_TRACE_ENABLED") + hostname = os.environ.get("DD_AGENT_HOST", os.environ.get("DATADOG_TRACE_AGENT_HOSTNAME")) + port = os.environ.get("DATADOG_TRACE_AGENT_PORT") + priority_sampling = os.environ.get("DATADOG_PRIORITY_SAMPLING") + + opts = {} + + if enabled and enabled.lower() == "false": + opts["enabled"] = False + patch = False + if hostname: + opts["hostname"] = hostname + if port: + opts["port"] = int(port) + if priority_sampling: + opts["priority_sampling"] = asbool(priority_sampling) + + opts["collect_metrics"] = asbool(get_env("runtime_metrics", "enabled")) + + if opts: + tracer.configure(**opts) + + if logs_injection: + EXTRA_PATCHED_MODULES.update({"logging": True}) + + if patch: + update_patched_modules() + from ddtrace import patch_all + + patch_all(**EXTRA_PATCHED_MODULES) + + if "DATADOG_ENV" in os.environ: + tracer.set_tags({constants.ENV_KEY: os.environ["DATADOG_ENV"]}) + + if "DD_TRACE_GLOBAL_TAGS" in os.environ: + add_global_tags(tracer) + + # Ensure sitecustomize.py is properly called if available in application directories: + # * exclude `bootstrap_dir` from the search + # * find a user `sitecustomize.py` module + # * import that module via `imp` + bootstrap_dir = os.path.dirname(__file__) + path = list(sys.path) + + if bootstrap_dir in path: + path.remove(bootstrap_dir) + + try: + (f, path, description) = imp.find_module("sitecustomize", path) + except ImportError: + pass + else: + # `sitecustomize.py` found, load it + log.debug("sitecustomize from user found in: %s", path) + imp.load_module("sitecustomize", f, path, description) + + # Loading status used in tests to detect if the `sitecustomize` has been + # properly loaded without exceptions. This must be the last action in the module + # when the execution ends with a success. + loaded = True +except Exception: + loaded = False + log.warning("error configuring Datadog tracing", exc_info=True) diff --git a/ddtrace/commands/__init__.py b/ddtrace/commands/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ddtrace/commands/ddtrace_run.py b/ddtrace/commands/ddtrace_run.py new file mode 100755 index 0000000000..a13bdec3c3 --- /dev/null +++ b/ddtrace/commands/ddtrace_run.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python +from distutils import spawn +import os +import sys +import logging + +debug = os.environ.get('DATADOG_TRACE_DEBUG') +if debug and debug.lower() == 'true': + logging.basicConfig(level=logging.DEBUG) + +# Do not use `ddtrace.internal.logger.get_logger` here +# DEV: It isn't really necessary to use `DDLogger` here so we want to +# defer importing `ddtrace` until we actually need it. +# As well, no actual rate limiting would apply here since we only +# have a few logged lines +log = logging.getLogger(__name__) + +USAGE = """ +Execute the given Python program after configuring it to emit Datadog traces. +Append command line arguments to your program as usual. + +Usage: [ENV_VARS] ddtrace-run + +Available environment variables: + + DATADOG_ENV : override an application's environment (no default) + DATADOG_TRACE_ENABLED=true|false : override the value of tracer.enabled (default: true) + DATADOG_TRACE_DEBUG=true|false : enabled debug logging (default: false) + DATADOG_PATCH_MODULES=module:patch,module:patch... e.g. boto:true,redis:false : override the modules patched for this execution of the program (default: none) + DATADOG_TRACE_AGENT_HOSTNAME=localhost: override the address of the trace agent host that the default tracer will attempt to submit to (default: localhost) + DATADOG_TRACE_AGENT_PORT=8126: override the port that the default tracer will submit to (default: 8126) + DATADOG_SERVICE_NAME : override the service name to be used for this program (no default) + This value is passed through when setting up middleware for web framework integrations. + (e.g. pylons, flask, django) + For tracing without a web integration, prefer setting the service name in code. + DATADOG_PRIORITY_SAMPLING=true|false : (default: false): enables Priority Sampling. +""" # noqa: E501 + + +def _ddtrace_root(): + from ddtrace import __file__ + return os.path.dirname(__file__) + + +def _add_bootstrap_to_pythonpath(bootstrap_dir): + """ + Add our bootstrap directory to the head of $PYTHONPATH to ensure + it is loaded before program code + """ + python_path = os.environ.get('PYTHONPATH', '') + + if python_path: + new_path = '%s%s%s' % (bootstrap_dir, os.path.pathsep, os.environ['PYTHONPATH']) + os.environ['PYTHONPATH'] = new_path + else: + os.environ['PYTHONPATH'] = bootstrap_dir + + +def main(): + if len(sys.argv) < 2 or sys.argv[1] == '-h': + print(USAGE) + return + + log.debug('sys.argv: %s', sys.argv) + + root_dir = _ddtrace_root() + log.debug('ddtrace root: %s', root_dir) + + bootstrap_dir = os.path.join(root_dir, 'bootstrap') + log.debug('ddtrace bootstrap: %s', bootstrap_dir) + + _add_bootstrap_to_pythonpath(bootstrap_dir) + log.debug('PYTHONPATH: %s', os.environ['PYTHONPATH']) + log.debug('sys.path: %s', sys.path) + + executable = sys.argv[1] + + # Find the executable path + executable = spawn.find_executable(executable) + log.debug('program executable: %s', executable) + + os.execl(executable, executable, *sys.argv[2:]) diff --git a/ddtrace/compat.py b/ddtrace/compat.py new file mode 100644 index 0000000000..654f6b68aa --- /dev/null +++ b/ddtrace/compat.py @@ -0,0 +1,151 @@ +import platform +import re +import sys +import textwrap + +from ddtrace.vendor import six + +__all__ = [ + 'httplib', + 'iteritems', + 'PY2', + 'Queue', + 'stringify', + 'StringIO', + 'urlencode', + 'parse', + 'reraise', +] + +PYTHON_VERSION_INFO = sys.version_info +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +# Infos about python passed to the trace agent through the header +PYTHON_VERSION = platform.python_version() +PYTHON_INTERPRETER = platform.python_implementation() + +try: + StringIO = six.moves.cStringIO +except ImportError: + StringIO = six.StringIO + +httplib = six.moves.http_client +urlencode = six.moves.urllib.parse.urlencode +parse = six.moves.urllib.parse +Queue = six.moves.queue.Queue +iteritems = six.iteritems +reraise = six.reraise +reload_module = six.moves.reload_module + +stringify = six.text_type +string_type = six.string_types[0] +msgpack_type = six.binary_type +# DEV: `six` doesn't have `float` in `integer_types` +numeric_types = six.integer_types + (float, ) + +# Pattern class generated by `re.compile` +if PYTHON_VERSION_INFO >= (3, 7): + pattern_type = re.Pattern +else: + pattern_type = re._pattern_type + + +def is_integer(obj): + """Helper to determine if the provided ``obj`` is an integer type or not""" + # DEV: We have to make sure it is an integer and not a boolean + # >>> type(True) + # + # >>> isinstance(True, int) + # True + return isinstance(obj, six.integer_types) and not isinstance(obj, bool) + + +try: + from time import time_ns +except ImportError: + from time import time as _time + + def time_ns(): + return int(_time() * 10e5) * 1000 + + +if PYTHON_VERSION_INFO[0:2] >= (3, 4): + from asyncio import iscoroutinefunction + + # Execute from a string to get around syntax errors from `yield from` + # DEV: The idea to do this was stolen from `six` + # https://github.com/benjaminp/six/blob/15e31431af97e5e64b80af0a3f598d382bcdd49a/six.py#L719-L737 + six.exec_(textwrap.dedent(""" + import functools + import asyncio + + + def make_async_decorator(tracer, coro, *params, **kw_params): + \"\"\" + Decorator factory that creates an asynchronous wrapper that yields + a coroutine result. This factory is required to handle Python 2 + compatibilities. + + :param object tracer: the tracer instance that is used + :param function f: the coroutine that must be executed + :param tuple params: arguments given to the Tracer.trace() + :param dict kw_params: keyword arguments given to the Tracer.trace() + \"\"\" + @functools.wraps(coro) + @asyncio.coroutine + def func_wrapper(*args, **kwargs): + with tracer.trace(*params, **kw_params): + result = yield from coro(*args, **kwargs) # noqa: E999 + return result + + return func_wrapper + """)) + +else: + # asyncio is missing so we can't have coroutines; these + # functions are used only to ensure code executions in case + # of an unexpected behavior + def iscoroutinefunction(fn): + return False + + def make_async_decorator(tracer, fn, *params, **kw_params): + return fn + + +# DEV: There is `six.u()` which does something similar, but doesn't have the guard around `hasattr(s, 'decode')` +def to_unicode(s): + """ Return a unicode string for the given bytes or string instance. """ + # No reason to decode if we already have the unicode compatible object we expect + # DEV: `six.text_type` will be a `str` for python 3 and `unicode` for python 2 + # DEV: Double decoding a `unicode` can cause a `UnicodeEncodeError` + # e.g. `'\xc3\xbf'.decode('utf-8').decode('utf-8')` + if isinstance(s, six.text_type): + return s + + # If the object has a `decode` method, then decode into `utf-8` + # e.g. Python 2 `str`, Python 2/3 `bytearray`, etc + if hasattr(s, 'decode'): + return s.decode('utf-8') + + # Always try to coerce the object into the `six.text_type` object we expect + # e.g. `to_unicode(1)`, `to_unicode(dict(key='value'))` + return six.text_type(s) + + +def get_connection_response(conn): + """Returns the response for a connection. + + If using Python 2 enable buffering. + + Python 2 does not enable buffering by default resulting in many recv + syscalls. + + See: + https://bugs.python.org/issue4879 + https://github.com/python/cpython/commit/3c43fcba8b67ea0cec4a443c755ce5f25990a6cf + """ + if PY2: + return conn.getresponse(buffering=True) + else: + return conn.getresponse() diff --git a/ddtrace/constants.py b/ddtrace/constants.py new file mode 100644 index 0000000000..803e98a53f --- /dev/null +++ b/ddtrace/constants.py @@ -0,0 +1,15 @@ +FILTERS_KEY = 'FILTERS' +SAMPLE_RATE_METRIC_KEY = '_sample_rate' +SAMPLING_PRIORITY_KEY = '_sampling_priority_v1' +ANALYTICS_SAMPLE_RATE_KEY = '_dd1.sr.eausr' +SAMPLING_AGENT_DECISION = '_dd.agent_psr' +SAMPLING_RULE_DECISION = '_dd.rule_psr' +SAMPLING_LIMIT_DECISION = '_dd.limit_psr' +ORIGIN_KEY = '_dd.origin' +HOSTNAME_KEY = '_dd.hostname' +ENV_KEY = 'env' + +NUMERIC_TAGS = (ANALYTICS_SAMPLE_RATE_KEY, ) + +MANUAL_DROP_KEY = 'manual.drop' +MANUAL_KEEP_KEY = 'manual.keep' diff --git a/ddtrace/context.py b/ddtrace/context.py new file mode 100644 index 0000000000..7feff0c79f --- /dev/null +++ b/ddtrace/context.py @@ -0,0 +1,216 @@ +import logging +import threading + +from .constants import HOSTNAME_KEY, SAMPLING_PRIORITY_KEY, ORIGIN_KEY +from .internal.logger import get_logger +from .internal import hostname +from .settings import config +from .utils.formats import asbool, get_env + +log = get_logger(__name__) + + +class Context(object): + """ + Context is used to keep track of a hierarchy of spans for the current + execution flow. During each logical execution, the same ``Context`` is + used to represent a single logical trace, even if the trace is built + asynchronously. + + A single code execution may use multiple ``Context`` if part of the execution + must not be related to the current tracing. As example, a delayed job may + compose a standalone trace instead of being related to the same trace that + generates the job itself. On the other hand, if it's part of the same + ``Context``, it will be related to the original trace. + + This data structure is thread-safe. + """ + _partial_flush_enabled = asbool(get_env('tracer', 'partial_flush_enabled', 'false')) + _partial_flush_min_spans = int(get_env('tracer', 'partial_flush_min_spans', 500)) + + def __init__(self, trace_id=None, span_id=None, sampling_priority=None, _dd_origin=None): + """ + Initialize a new thread-safe ``Context``. + + :param int trace_id: trace_id of parent span + :param int span_id: span_id of parent span + """ + self._trace = [] + self._finished_spans = 0 + self._current_span = None + self._lock = threading.Lock() + + self._parent_trace_id = trace_id + self._parent_span_id = span_id + self._sampling_priority = sampling_priority + self._dd_origin = _dd_origin + + @property + def trace_id(self): + """Return current context trace_id.""" + with self._lock: + return self._parent_trace_id + + @property + def span_id(self): + """Return current context span_id.""" + with self._lock: + return self._parent_span_id + + @property + def sampling_priority(self): + """Return current context sampling priority.""" + with self._lock: + return self._sampling_priority + + @sampling_priority.setter + def sampling_priority(self, value): + """Set sampling priority.""" + with self._lock: + self._sampling_priority = value + + def clone(self): + """ + Partially clones the current context. + It copies everything EXCEPT the registered and finished spans. + """ + with self._lock: + new_ctx = Context( + trace_id=self._parent_trace_id, + span_id=self._parent_span_id, + sampling_priority=self._sampling_priority, + ) + new_ctx._current_span = self._current_span + return new_ctx + + def get_current_root_span(self): + """ + Return the root span of the context or None if it does not exist. + """ + return self._trace[0] if len(self._trace) > 0 else None + + def get_current_span(self): + """ + Return the last active span that corresponds to the last inserted + item in the trace list. This cannot be considered as the current active + span in asynchronous environments, because some spans can be closed + earlier while child spans still need to finish their traced execution. + """ + with self._lock: + return self._current_span + + def _set_current_span(self, span): + """ + Set current span internally. + + Non-safe if not used with a lock. For internal Context usage only. + """ + self._current_span = span + if span: + self._parent_trace_id = span.trace_id + self._parent_span_id = span.span_id + else: + self._parent_span_id = None + + def add_span(self, span): + """ + Add a span to the context trace list, keeping it as the last active span. + """ + with self._lock: + self._set_current_span(span) + + self._trace.append(span) + span._context = self + + def close_span(self, span): + """ + Mark a span as a finished, increasing the internal counter to prevent + cycles inside _trace list. + """ + with self._lock: + self._finished_spans += 1 + self._set_current_span(span._parent) + + # notify if the trace is not closed properly; this check is executed only + # if the debug logging is enabled and when the root span is closed + # for an unfinished trace. This logging is meant to be used for debugging + # reasons, and it doesn't mean that the trace is wrongly generated. + # In asynchronous environments, it's legit to close the root span before + # some children. On the other hand, asynchronous web frameworks still expect + # to close the root span after all the children. + if span.tracer and span.tracer.log.isEnabledFor(logging.DEBUG) and span._parent is None: + unfinished_spans = [x for x in self._trace if not x.finished] + if unfinished_spans: + log.debug('Root span "%s" closed, but the trace has %d unfinished spans:', + span.name, len(unfinished_spans)) + for wrong_span in unfinished_spans: + log.debug('\n%s', wrong_span.pprint()) + + def _is_sampled(self): + return any(span.sampled for span in self._trace) + + def get(self): + """ + Returns a tuple containing the trace list generated in the current context and + if the context is sampled or not. It returns (None, None) if the ``Context`` is + not finished. If a trace is returned, the ``Context`` will be reset so that it + can be re-used immediately. + + This operation is thread-safe. + """ + with self._lock: + # All spans are finished? + if self._finished_spans == len(self._trace): + # get the trace + trace = self._trace + sampled = self._is_sampled() + sampling_priority = self._sampling_priority + # attach the sampling priority to the context root span + if sampled and sampling_priority is not None and trace: + trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority) + origin = self._dd_origin + # attach the origin to the root span tag + if sampled and origin is not None and trace: + trace[0].set_tag(ORIGIN_KEY, origin) + + # Set hostname tag if they requested it + if config.report_hostname: + # DEV: `get_hostname()` value is cached + trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname()) + + # clean the current state + self._trace = [] + self._finished_spans = 0 + self._parent_trace_id = None + self._parent_span_id = None + self._sampling_priority = None + return trace, sampled + + elif self._partial_flush_enabled: + finished_spans = [t for t in self._trace if t.finished] + if len(finished_spans) >= self._partial_flush_min_spans: + # partial flush when enabled and we have more than the minimal required spans + trace = self._trace + sampled = self._is_sampled() + sampling_priority = self._sampling_priority + # attach the sampling priority to the context root span + if sampled and sampling_priority is not None and trace: + trace[0].set_metric(SAMPLING_PRIORITY_KEY, sampling_priority) + origin = self._dd_origin + # attach the origin to the root span tag + if sampled and origin is not None and trace: + trace[0].set_tag(ORIGIN_KEY, origin) + + # Set hostname tag if they requested it + if config.report_hostname: + # DEV: `get_hostname()` value is cached + trace[0].set_tag(HOSTNAME_KEY, hostname.get_hostname()) + + self._finished_spans = 0 + + # Any open spans will remain as `self._trace` + # Any finished spans will get returned to be flushed + self._trace = [t for t in self._trace if not t.finished] + + return finished_spans, sampled + return None, None diff --git a/ddtrace/contrib/__init__.py b/ddtrace/contrib/__init__.py new file mode 100644 index 0000000000..6a35d31c95 --- /dev/null +++ b/ddtrace/contrib/__init__.py @@ -0,0 +1 @@ +from ..utils.importlib import func_name, module_name, require_modules # noqa diff --git a/ddtrace/contrib/aiobotocore/__init__.py b/ddtrace/contrib/aiobotocore/__init__.py new file mode 100644 index 0000000000..0f5a5a1995 --- /dev/null +++ b/ddtrace/contrib/aiobotocore/__init__.py @@ -0,0 +1,30 @@ +""" +The aiobotocore integration will trace all AWS calls made with the ``aiobotocore`` +library. This integration isn't enabled when applying the default patching. +To enable it, you must run ``patch_all(aiobotocore=True)`` + +:: + + import aiobotocore.session + from ddtrace import patch + + # If not patched yet, you can patch botocore specifically + patch(aiobotocore=True) + + # This will report spans with the default instrumentation + aiobotocore.session.get_session() + lambda_client = session.create_client('lambda', region_name='us-east-1') + + # This query generates a trace + lambda_client.list_functions() +""" +from ...utils.importlib import require_modules + + +required_modules = ['aiobotocore.client'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + + __all__ = ['patch'] diff --git a/ddtrace/contrib/aiobotocore/patch.py b/ddtrace/contrib/aiobotocore/patch.py new file mode 100644 index 0000000000..ca903f170d --- /dev/null +++ b/ddtrace/contrib/aiobotocore/patch.py @@ -0,0 +1,129 @@ +import asyncio +from ddtrace.vendor import wrapt +from ddtrace import config +import aiobotocore.client + +from aiobotocore.endpoint import ClientResponseContentProxy + +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...pin import Pin +from ...ext import SpanTypes, http, aws +from ...compat import PYTHON_VERSION_INFO +from ...utils.formats import deep_getattr +from ...utils.wrappers import unwrap + + +ARGS_NAME = ('action', 'params', 'path', 'verb') +TRACED_ARGS = ['params', 'path', 'verb'] + + +def patch(): + if getattr(aiobotocore.client, '_datadog_patch', False): + return + setattr(aiobotocore.client, '_datadog_patch', True) + + wrapt.wrap_function_wrapper('aiobotocore.client', 'AioBaseClient._make_api_call', _wrapped_api_call) + Pin(service='aws', app='aws').onto(aiobotocore.client.AioBaseClient) + + +def unpatch(): + if getattr(aiobotocore.client, '_datadog_patch', False): + setattr(aiobotocore.client, '_datadog_patch', False) + unwrap(aiobotocore.client.AioBaseClient, '_make_api_call') + + +class WrappedClientResponseContentProxy(wrapt.ObjectProxy): + def __init__(self, body, pin, parent_span): + super(WrappedClientResponseContentProxy, self).__init__(body) + self._self_pin = pin + self._self_parent_span = parent_span + + @asyncio.coroutine + def read(self, *args, **kwargs): + # async read that must be child of the parent span operation + operation_name = '{}.read'.format(self._self_parent_span.name) + + with self._self_pin.tracer.start_span(operation_name, child_of=self._self_parent_span) as span: + # inherit parent attributes + span.resource = self._self_parent_span.resource + span.span_type = self._self_parent_span.span_type + span.meta = dict(self._self_parent_span.meta) + span.metrics = dict(self._self_parent_span.metrics) + + result = yield from self.__wrapped__.read(*args, **kwargs) + span.set_tag('Length', len(result)) + + return result + + # wrapt doesn't proxy `async with` context managers + if PYTHON_VERSION_INFO >= (3, 5, 0): + @asyncio.coroutine + def __aenter__(self): + # call the wrapped method but return the object proxy + yield from self.__wrapped__.__aenter__() + return self + + @asyncio.coroutine + def __aexit__(self, *args, **kwargs): + response = yield from self.__wrapped__.__aexit__(*args, **kwargs) + return response + + +@asyncio.coroutine +def _wrapped_api_call(original_func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + result = yield from original_func(*args, **kwargs) + return result + + endpoint_name = deep_getattr(instance, '_endpoint._endpoint_prefix') + + with pin.tracer.trace('{}.command'.format(endpoint_name), + service='{}.{}'.format(pin.service, endpoint_name), + span_type=SpanTypes.HTTP) as span: + + if len(args) > 0: + operation = args[0] + span.resource = '{}.{}'.format(endpoint_name, operation.lower()) + else: + operation = None + span.resource = endpoint_name + + aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS) + + region_name = deep_getattr(instance, 'meta.region_name') + + meta = { + 'aws.agent': 'aiobotocore', + 'aws.operation': operation, + 'aws.region': region_name, + } + span.set_tags(meta) + + result = yield from original_func(*args, **kwargs) + + body = result.get('Body') + if isinstance(body, ClientResponseContentProxy): + result['Body'] = WrappedClientResponseContentProxy(body, pin, span) + + response_meta = result['ResponseMetadata'] + response_headers = response_meta['HTTPHeaders'] + + span.set_tag(http.STATUS_CODE, response_meta['HTTPStatusCode']) + span.set_tag('retry_attempts', response_meta['RetryAttempts']) + + request_id = response_meta.get('RequestId') + if request_id: + span.set_tag('aws.requestid', request_id) + + request_id2 = response_headers.get('x-amz-id-2') + if request_id2: + span.set_tag('aws.requestid2', request_id2) + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.aiobotocore.get_analytics_sample_rate() + ) + + return result diff --git a/ddtrace/contrib/aiohttp/__init__.py b/ddtrace/contrib/aiohttp/__init__.py new file mode 100644 index 0000000000..881634c568 --- /dev/null +++ b/ddtrace/contrib/aiohttp/__init__.py @@ -0,0 +1,61 @@ +""" +The ``aiohttp`` integration traces all requests defined in the application handlers. +Auto instrumentation is available using the ``trace_app`` function:: + + from aiohttp import web + from ddtrace import tracer, patch + from ddtrace.contrib.aiohttp import trace_app + + # patch third-party modules like aiohttp_jinja2 + patch(aiohttp=True) + + # create your application + app = web.Application() + app.router.add_get('/', home_handler) + + # trace your application handlers + trace_app(app, tracer, service='async-api') + web.run_app(app, port=8000) + +Integration settings are attached to your application under the ``datadog_trace`` +namespace. You can read or update them as follows:: + + # disables distributed tracing for all received requests + app['datadog_trace']['distributed_tracing_enabled'] = False + +Available settings are: + +* ``tracer`` (default: ``ddtrace.tracer``): set the default tracer instance that is used to + trace `aiohttp` internals. By default the `ddtrace` tracer is used. +* ``service`` (default: ``aiohttp-web``): set the service name used by the tracer. Usually + this configuration must be updated with a meaningful name. +* ``distributed_tracing_enabled`` (default: ``True``): enable distributed tracing during + the middleware execution, so that a new span is created with the given ``trace_id`` and + ``parent_id`` injected via request headers. +* ``analytics_enabled`` (default: ``None``): enables APM events in Trace Search & Analytics. + +Third-party modules that are currently supported by the ``patch()`` method are: + +* ``aiohttp_jinja2`` + +When a request span is created, a new ``Context`` for this logical execution is attached +to the ``request`` object, so that it can be used in the application code:: + + async def home_handler(request): + ctx = request['datadog_context'] + # do something with the tracing Context +""" +from ...utils.importlib import require_modules + +required_modules = ['aiohttp'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + from .middlewares import trace_app + + __all__ = [ + 'patch', + 'unpatch', + 'trace_app', + ] diff --git a/ddtrace/contrib/aiohttp/middlewares.py b/ddtrace/contrib/aiohttp/middlewares.py new file mode 100644 index 0000000000..52269b9193 --- /dev/null +++ b/ddtrace/contrib/aiohttp/middlewares.py @@ -0,0 +1,146 @@ +import asyncio + +from ..asyncio import context_provider +from ...compat import stringify +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...propagation.http import HTTPPropagator +from ...settings import config + + +CONFIG_KEY = 'datadog_trace' +REQUEST_CONTEXT_KEY = 'datadog_context' +REQUEST_CONFIG_KEY = '__datadog_trace_config' +REQUEST_SPAN_KEY = '__datadog_request_span' + + +@asyncio.coroutine +def trace_middleware(app, handler): + """ + ``aiohttp`` middleware that traces the handler execution. + Because handlers are run in different tasks for each request, we attach the Context + instance both to the Task and to the Request objects. In this way: + + * the Task is used by the internal automatic instrumentation + * the ``Context`` attached to the request can be freely used in the application code + """ + @asyncio.coroutine + def attach_context(request): + # application configs + tracer = app[CONFIG_KEY]['tracer'] + service = app[CONFIG_KEY]['service'] + distributed_tracing = app[CONFIG_KEY]['distributed_tracing_enabled'] + + # Create a new context based on the propagated information. + if distributed_tracing: + propagator = HTTPPropagator() + context = propagator.extract(request.headers) + # Only need to active the new context if something was propagated + if context.trace_id: + tracer.context_provider.activate(context) + + # trace the handler + request_span = tracer.trace( + 'aiohttp.request', + service=service, + span_type=SpanTypes.WEB, + ) + + # Configure trace search sample rate + # DEV: aiohttp is special case maintains separate configuration from config api + analytics_enabled = app[CONFIG_KEY]['analytics_enabled'] + if (config.analytics_enabled and analytics_enabled is not False) or analytics_enabled is True: + request_span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + app[CONFIG_KEY].get('analytics_sample_rate', True) + ) + + # attach the context and the root span to the request; the Context + # may be freely used by the application code + request[REQUEST_CONTEXT_KEY] = request_span.context + request[REQUEST_SPAN_KEY] = request_span + request[REQUEST_CONFIG_KEY] = app[CONFIG_KEY] + try: + response = yield from handler(request) + return response + except Exception: + request_span.set_traceback() + raise + return attach_context + + +@asyncio.coroutine +def on_prepare(request, response): + """ + The on_prepare signal is used to close the request span that is created during + the trace middleware execution. + """ + # safe-guard: discard if we don't have a request span + request_span = request.get(REQUEST_SPAN_KEY, None) + if not request_span: + return + + # default resource name + resource = stringify(response.status) + + if request.match_info.route.resource: + # collect the resource name based on http resource type + res_info = request.match_info.route.resource.get_info() + + if res_info.get('path'): + resource = res_info.get('path') + elif res_info.get('formatter'): + resource = res_info.get('formatter') + elif res_info.get('prefix'): + resource = res_info.get('prefix') + + # prefix the resource name by the http method + resource = '{} {}'.format(request.method, resource) + + if 500 <= response.status < 600: + request_span.error = 1 + + request_span.resource = resource + request_span.set_tag('http.method', request.method) + request_span.set_tag('http.status_code', response.status) + request_span.set_tag(http.URL, request.url.with_query(None)) + # DEV: aiohttp is special case maintains separate configuration from config api + trace_query_string = request[REQUEST_CONFIG_KEY].get('trace_query_string') + if trace_query_string is None: + trace_query_string = config._http.trace_query_string + if trace_query_string: + request_span.set_tag(http.QUERY_STRING, request.query_string) + request_span.finish() + + +def trace_app(app, tracer, service='aiohttp-web'): + """ + Tracing function that patches the ``aiohttp`` application so that it will be + traced using the given ``tracer``. + + :param app: aiohttp application to trace + :param tracer: tracer instance to use + :param service: service name of tracer + """ + + # safe-guard: don't trace an application twice + if getattr(app, '__datadog_trace', False): + return + setattr(app, '__datadog_trace', True) + + # configure datadog settings + app[CONFIG_KEY] = { + 'tracer': tracer, + 'service': service, + 'distributed_tracing_enabled': True, + 'analytics_enabled': None, + 'analytics_sample_rate': 1.0, + } + + # the tracer must work with asynchronous Context propagation + tracer.configure(context_provider=context_provider) + + # add the async tracer middleware as a first middleware + # and be sure that the on_prepare signal is the last one + app.middlewares.insert(0, trace_middleware) + app.on_response_prepare.append(on_prepare) diff --git a/ddtrace/contrib/aiohttp/patch.py b/ddtrace/contrib/aiohttp/patch.py new file mode 100644 index 0000000000..81b6deae11 --- /dev/null +++ b/ddtrace/contrib/aiohttp/patch.py @@ -0,0 +1,39 @@ +from ddtrace.vendor import wrapt + +from ...pin import Pin +from ...utils.wrappers import unwrap + + +try: + # instrument external packages only if they're available + import aiohttp_jinja2 + from .template import _trace_render_template + + template_module = True +except ImportError: + template_module = False + + +def patch(): + """ + Patch aiohttp third party modules: + * aiohttp_jinja2 + """ + if template_module: + if getattr(aiohttp_jinja2, '__datadog_patch', False): + return + setattr(aiohttp_jinja2, '__datadog_patch', True) + + _w = wrapt.wrap_function_wrapper + _w('aiohttp_jinja2', 'render_template', _trace_render_template) + Pin(app='aiohttp', service=None).onto(aiohttp_jinja2) + + +def unpatch(): + """ + Remove tracing from patched modules. + """ + if template_module: + if getattr(aiohttp_jinja2, '__datadog_patch', False): + setattr(aiohttp_jinja2, '__datadog_patch', False) + unwrap(aiohttp_jinja2, 'render_template') diff --git a/ddtrace/contrib/aiohttp/template.py b/ddtrace/contrib/aiohttp/template.py new file mode 100644 index 0000000000..2b0c91479b --- /dev/null +++ b/ddtrace/contrib/aiohttp/template.py @@ -0,0 +1,29 @@ +import aiohttp_jinja2 + +from ddtrace import Pin + +from ...ext import SpanTypes + + +def _trace_render_template(func, module, args, kwargs): + """ + Trace the template rendering + """ + # get the module pin + pin = Pin.get_from(aiohttp_jinja2) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + # original signature: + # render_template(template_name, request, context, *, app_key=APP_KEY, encoding='utf-8') + template_name = args[0] + request = args[1] + env = aiohttp_jinja2.get_env(request.app) + + # the prefix is available only on PackageLoader + template_prefix = getattr(env.loader, 'package_path', '') + template_meta = '{}/{}'.format(template_prefix, template_name) + + with pin.tracer.trace('aiohttp.template', span_type=SpanTypes.TEMPLATE) as span: + span.set_meta('aiohttp.template', template_meta) + return func(*args, **kwargs) diff --git a/ddtrace/contrib/aiopg/__init__.py b/ddtrace/contrib/aiopg/__init__.py new file mode 100644 index 0000000000..df1580e107 --- /dev/null +++ b/ddtrace/contrib/aiopg/__init__.py @@ -0,0 +1,27 @@ +""" +Instrument aiopg to report a span for each executed Postgres queries:: + + from ddtrace import Pin, patch + import aiopg + + # If not patched yet, you can patch aiopg specifically + patch(aiopg=True) + + # This will report a span with the default settings + async with aiopg.connect(DSN) as db: + with (await db.cursor()) as cursor: + await cursor.execute("SELECT * FROM users WHERE id = 1") + + # Use a pin to specify metadata related to this connection + Pin.override(db, service='postgres-users') +""" +from ...utils.importlib import require_modules + + +required_modules = ['aiopg'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + + __all__ = ['patch'] diff --git a/ddtrace/contrib/aiopg/connection.py b/ddtrace/contrib/aiopg/connection.py new file mode 100644 index 0000000000..f5dc3afb47 --- /dev/null +++ b/ddtrace/contrib/aiopg/connection.py @@ -0,0 +1,97 @@ +import asyncio +from ddtrace.vendor import wrapt + +from aiopg.utils import _ContextManager + +from .. import dbapi +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, sql +from ...pin import Pin +from ...settings import config + + +class AIOTracedCursor(wrapt.ObjectProxy): + """ TracedCursor wraps a psql cursor and traces its queries. """ + + def __init__(self, cursor, pin): + super(AIOTracedCursor, self).__init__(cursor) + pin.onto(self) + name = pin.app or 'sql' + self._datadog_name = '%s.query' % name + + @asyncio.coroutine + def _trace_method(self, method, resource, extra_tags, *args, **kwargs): + pin = Pin.get_from(self) + if not pin or not pin.enabled(): + result = yield from method(*args, **kwargs) + return result + service = pin.service + + with pin.tracer.trace(self._datadog_name, service=service, + resource=resource, span_type=SpanTypes.SQL) as s: + s.set_tag(sql.QUERY, resource) + s.set_tags(pin.tags) + s.set_tags(extra_tags) + + # set analytics sample rate + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.aiopg.get_analytics_sample_rate() + ) + + try: + result = yield from method(*args, **kwargs) + return result + finally: + s.set_metric('db.rowcount', self.rowcount) + + @asyncio.coroutine + def executemany(self, query, *args, **kwargs): + # FIXME[matt] properly handle kwargs here. arg names can be different + # with different libs. + result = yield from self._trace_method( + self.__wrapped__.executemany, query, {'sql.executemany': 'true'}, + query, *args, **kwargs) + return result + + @asyncio.coroutine + def execute(self, query, *args, **kwargs): + result = yield from self._trace_method( + self.__wrapped__.execute, query, {}, query, *args, **kwargs) + return result + + @asyncio.coroutine + def callproc(self, proc, args): + result = yield from self._trace_method( + self.__wrapped__.callproc, proc, {}, proc, args) + return result + + def __aiter__(self): + return self.__wrapped__.__aiter__() + + +class AIOTracedConnection(wrapt.ObjectProxy): + """ TracedConnection wraps a Connection with tracing code. """ + + def __init__(self, conn, pin=None, cursor_cls=AIOTracedCursor): + super(AIOTracedConnection, self).__init__(conn) + name = dbapi._get_vendor(conn) + db_pin = pin or Pin(service=name, app=name) + db_pin.onto(self) + # wrapt requires prefix of `_self` for attributes that are only in the + # proxy (since some of our source objects will use `__slots__`) + self._self_cursor_cls = cursor_cls + + def cursor(self, *args, **kwargs): + # unfortunately we also need to patch this method as otherwise "self" + # ends up being the aiopg connection object + coro = self._cursor(*args, **kwargs) + return _ContextManager(coro) + + @asyncio.coroutine + def _cursor(self, *args, **kwargs): + cursor = yield from self.__wrapped__._cursor(*args, **kwargs) + pin = Pin.get_from(self) + if not pin: + return cursor + return self._self_cursor_cls(cursor, pin) diff --git a/ddtrace/contrib/aiopg/patch.py b/ddtrace/contrib/aiopg/patch.py new file mode 100644 index 0000000000..62160cf91b --- /dev/null +++ b/ddtrace/contrib/aiopg/patch.py @@ -0,0 +1,57 @@ +# 3p +import asyncio + +import aiopg.connection +import psycopg2.extensions +from ddtrace.vendor import wrapt + +from .connection import AIOTracedConnection +from ..psycopg.patch import _patch_extensions, \ + _unpatch_extensions, patch_conn as psycopg_patch_conn +from ...utils.wrappers import unwrap as _u + + +def patch(): + """ Patch monkey patches psycopg's connection function + so that the connection's functions are traced. + """ + if getattr(aiopg, '_datadog_patch', False): + return + setattr(aiopg, '_datadog_patch', True) + + wrapt.wrap_function_wrapper(aiopg.connection, '_connect', patched_connect) + _patch_extensions(_aiopg_extensions) # do this early just in case + + +def unpatch(): + if getattr(aiopg, '_datadog_patch', False): + setattr(aiopg, '_datadog_patch', False) + _u(aiopg.connection, '_connect') + _unpatch_extensions(_aiopg_extensions) + + +@asyncio.coroutine +def patched_connect(connect_func, _, args, kwargs): + conn = yield from connect_func(*args, **kwargs) + return psycopg_patch_conn(conn, traced_conn_cls=AIOTracedConnection) + + +def _extensions_register_type(func, _, args, kwargs): + def _unroll_args(obj, scope=None): + return obj, scope + obj, scope = _unroll_args(*args, **kwargs) + + # register_type performs a c-level check of the object + # type so we must be sure to pass in the actual db connection + if scope and isinstance(scope, wrapt.ObjectProxy): + scope = scope.__wrapped__._conn + + return func(obj, scope) if scope else func(obj) + + +# extension hooks +_aiopg_extensions = [ + (psycopg2.extensions.register_type, + psycopg2.extensions, 'register_type', + _extensions_register_type), +] diff --git a/ddtrace/contrib/algoliasearch/__init__.py b/ddtrace/contrib/algoliasearch/__init__.py new file mode 100644 index 0000000000..ff5cc604f7 --- /dev/null +++ b/ddtrace/contrib/algoliasearch/__init__.py @@ -0,0 +1,32 @@ +""" +The Algoliasearch__ integration will add tracing to your Algolia searches. + +:: + + from ddtrace import patch_all + patch_all() + + from algoliasearch import algoliasearch + client = alogliasearch.Client(, ) + index = client.init_index() + index.search("your query", args={"attributesToRetrieve": "attribute1,attribute1"}) + +Configuration +~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.algoliasearch['collect_query_text'] + + Whether to pass the text of your query onto Datadog. Since this may contain sensitive data it's off by default + + Default: ``False`` + +.. __: https://www.algolia.com +""" + +from ...utils.importlib import require_modules + +with require_modules(['algoliasearch', 'algoliasearch.version']) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/algoliasearch/patch.py b/ddtrace/contrib/algoliasearch/patch.py new file mode 100644 index 0000000000..859b6eb0f7 --- /dev/null +++ b/ddtrace/contrib/algoliasearch/patch.py @@ -0,0 +1,143 @@ +from ddtrace.pin import Pin +from ddtrace.settings import config +from ddtrace.utils.wrappers import unwrap as _u +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +DD_PATCH_ATTR = '_datadog_patch' + +SERVICE_NAME = 'algoliasearch' +APP_NAME = 'algoliasearch' + +try: + import algoliasearch + from algoliasearch.version import VERSION + algoliasearch_version = tuple([int(i) for i in VERSION.split('.')]) + + # Default configuration + config._add('algoliasearch', dict( + service_name=SERVICE_NAME, + collect_query_text=False + )) +except ImportError: + algoliasearch_version = (0, 0) + + +def patch(): + if algoliasearch_version == (0, 0): + return + + if getattr(algoliasearch, DD_PATCH_ATTR, False): + return + + setattr(algoliasearch, '_datadog_patch', True) + + pin = Pin( + service=config.algoliasearch.service_name, app=APP_NAME + ) + + if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): + _w(algoliasearch.index, 'Index.search', _patched_search) + pin.onto(algoliasearch.index.Index) + elif algoliasearch_version >= (2, 0) and algoliasearch_version < (3, 0): + from algoliasearch import search_index + _w(algoliasearch, 'search_index.SearchIndex.search', _patched_search) + pin.onto(search_index.SearchIndex) + else: + return + + +def unpatch(): + if algoliasearch_version == (0, 0): + return + + if getattr(algoliasearch, DD_PATCH_ATTR, False): + setattr(algoliasearch, DD_PATCH_ATTR, False) + + if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): + _u(algoliasearch.index.Index, 'search') + elif algoliasearch_version >= (2, 0) and algoliasearch_version < (3, 0): + from algoliasearch import search_index + _u(search_index.SearchIndex, 'search') + else: + return + + +# DEV: this maps serves the dual purpose of enumerating the algoliasearch.search() query_args that +# will be sent along as tags, as well as converting arguments names into tag names compliant with +# tag naming recommendations set out here: https://docs.datadoghq.com/tagging/ +QUERY_ARGS_DD_TAG_MAP = { + 'page': 'page', + 'hitsPerPage': 'hits_per_page', + 'attributesToRetrieve': 'attributes_to_retrieve', + 'attributesToHighlight': 'attributes_to_highlight', + 'attributesToSnippet': 'attributes_to_snippet', + 'minWordSizefor1Typo': 'min_word_size_for_1_typo', + 'minWordSizefor2Typos': 'min_word_size_for_2_typos', + 'getRankingInfo': 'get_ranking_info', + 'aroundLatLng': 'around_lat_lng', + 'numericFilters': 'numeric_filters', + 'tagFilters': 'tag_filters', + 'queryType': 'query_type', + 'optionalWords': 'optional_words', + 'distinct': 'distinct' +} + + +def _patched_search(func, instance, wrapt_args, wrapt_kwargs): + """ + wrapt_args is called the way it is to distinguish it from the 'args' + argument to the algoliasearch.index.Index.search() method. + """ + + if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): + function_query_arg_name = 'args' + elif algoliasearch_version >= (2, 0) and algoliasearch_version < (3, 0): + function_query_arg_name = 'request_options' + else: + return func(*wrapt_args, **wrapt_kwargs) + + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*wrapt_args, **wrapt_kwargs) + + with pin.tracer.trace('algoliasearch.search', service=pin.service) as span: + if not span.sampled: + return func(*wrapt_args, **wrapt_kwargs) + + if config.algoliasearch.collect_query_text: + span.set_tag('query.text', wrapt_kwargs.get('query', wrapt_args[0])) + + query_args = wrapt_kwargs.get(function_query_arg_name, wrapt_args[1] if len(wrapt_args) > 1 else None) + + if query_args and isinstance(query_args, dict): + for query_arg, tag_name in QUERY_ARGS_DD_TAG_MAP.items(): + value = query_args.get(query_arg) + if value is not None: + span.set_tag('query.args.{}'.format(tag_name), value) + + # Result would look like this + # { + # 'hits': [ + # { + # .... your search results ... + # } + # ], + # 'processingTimeMS': 1, + # 'nbHits': 1, + # 'hitsPerPage': 20, + # 'exhaustiveNbHits': true, + # 'params': 'query=xxx', + # 'nbPages': 1, + # 'query': 'xxx', + # 'page': 0 + # } + result = func(*wrapt_args, **wrapt_kwargs) + + if isinstance(result, dict): + if result.get('processingTimeMS', None) is not None: + span.set_metric('processing_time_ms', int(result['processingTimeMS'])) + + if result.get('nbHits', None) is not None: + span.set_metric('number_of_hits', int(result['nbHits'])) + + return result diff --git a/ddtrace/contrib/asyncio/__init__.py b/ddtrace/contrib/asyncio/__init__.py new file mode 100644 index 0000000000..57f7e99e71 --- /dev/null +++ b/ddtrace/contrib/asyncio/__init__.py @@ -0,0 +1,72 @@ +""" +This integration provides the ``AsyncioContextProvider`` that follows the execution +flow of a ``Task``, making possible to trace asynchronous code built on top +of ``asyncio``. To trace asynchronous execution, you must:: + + import asyncio + from ddtrace import tracer + from ddtrace.contrib.asyncio import context_provider + + # enable asyncio support + tracer.configure(context_provider=context_provider) + + async def some_work(): + with tracer.trace('asyncio.some_work'): + # do something + + # launch your coroutines as usual + loop = asyncio.get_event_loop() + loop.run_until_complete(some_work()) + loop.close() + +If ``contextvars`` is available, we use the +:class:`ddtrace.provider.DefaultContextProvider`, otherwise we use the legacy +:class:`ddtrace.contrib.asyncio.provider.AsyncioContextProvider`. + +In addition, helpers are provided to simplify how the tracing ``Context`` is +handled between scheduled coroutines and ``Future`` invoked in separated +threads: + + * ``set_call_context(task, ctx)``: attach the context to the given ``Task`` + so that it will be available from the ``tracer.get_call_context()`` + * ``ensure_future(coro_or_future, *, loop=None)``: wrapper for the + ``asyncio.ensure_future`` that attaches the current context to a new + ``Task`` instance + * ``run_in_executor(loop, executor, func, *args)``: wrapper for the + ``loop.run_in_executor`` that attaches the current context to the + new thread so that the trace can be resumed regardless when + it's executed + * ``create_task(coro)``: creates a new asyncio ``Task`` that inherits + the current active ``Context`` so that generated traces in the new task + are attached to the main trace + +A ``patch(asyncio=True)`` is available if you want to automatically use above +wrappers without changing your code. In that case, the patch method **must be +called before** importing stdlib functions. +""" +from ...utils.importlib import require_modules + + +required_modules = ['asyncio'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .provider import AsyncioContextProvider + from ...internal.context_manager import CONTEXTVARS_IS_AVAILABLE + from ...provider import DefaultContextProvider + + if CONTEXTVARS_IS_AVAILABLE: + context_provider = DefaultContextProvider() + else: + context_provider = AsyncioContextProvider() + + from .helpers import set_call_context, ensure_future, run_in_executor + from .patch import patch + + __all__ = [ + 'context_provider', + 'set_call_context', + 'ensure_future', + 'run_in_executor', + 'patch' + ] diff --git a/ddtrace/contrib/asyncio/compat.py b/ddtrace/contrib/asyncio/compat.py new file mode 100644 index 0000000000..b204218e08 --- /dev/null +++ b/ddtrace/contrib/asyncio/compat.py @@ -0,0 +1,9 @@ +import sys + +# asyncio.Task.current_task method is deprecated and will be removed in Python +# 3.9. Instead use asyncio.current_task +if sys.version_info >= (3, 7, 0): + from asyncio import current_task as asyncio_current_task +else: + import asyncio + asyncio_current_task = asyncio.Task.current_task diff --git a/ddtrace/contrib/asyncio/helpers.py b/ddtrace/contrib/asyncio/helpers.py new file mode 100644 index 0000000000..fadd9a58e5 --- /dev/null +++ b/ddtrace/contrib/asyncio/helpers.py @@ -0,0 +1,83 @@ +""" +This module includes a list of convenience methods that +can be used to simplify some operations while handling +Context and Spans in instrumented ``asyncio`` code. +""" +import asyncio +import ddtrace + +from .provider import CONTEXT_ATTR +from .wrappers import wrapped_create_task +from ...context import Context + + +def set_call_context(task, ctx): + """ + Updates the ``Context`` for the given Task. Useful when you need to + pass the context among different tasks. + + This method is available for backward-compatibility. Use the + ``AsyncioContextProvider`` API to set the current active ``Context``. + """ + setattr(task, CONTEXT_ATTR, ctx) + + +def ensure_future(coro_or_future, *, loop=None, tracer=None): + """Wrapper that sets a context to the newly created Task. + + If the current task already has a Context, it will be attached to the new Task so the Trace list will be preserved. + """ + tracer = tracer or ddtrace.tracer + current_ctx = tracer.get_call_context() + task = asyncio.ensure_future(coro_or_future, loop=loop) + set_call_context(task, current_ctx) + return task + + +def run_in_executor(loop, executor, func, *args, tracer=None): + """Wrapper function that sets a context to the newly created Thread. + + If the current task has a Context, it will be attached as an empty Context with the current_span activated to + inherit the ``trace_id`` and the ``parent_id``. + + Because the Executor can run the Thread immediately or after the + coroutine is executed, we may have two different scenarios: + * the Context is copied in the new Thread and the trace is sent twice + * the coroutine flushes the Context and when the Thread copies the + Context it is already empty (so it will be a root Span) + + To support both situations, we create a new Context that knows only what was + the latest active Span when the new thread was created. In this new thread, + we fallback to the thread-local ``Context`` storage. + + """ + tracer = tracer or ddtrace.tracer + ctx = Context() + current_ctx = tracer.get_call_context() + ctx._current_span = current_ctx._current_span + + # prepare the future using an executor wrapper + future = loop.run_in_executor(executor, _wrap_executor, func, args, tracer, ctx) + return future + + +def _wrap_executor(fn, args, tracer, ctx): + """ + This function is executed in the newly created Thread so the right + ``Context`` can be set in the thread-local storage. This operation + is safe because the ``Context`` class is thread-safe and can be + updated concurrently. + """ + # the AsyncioContextProvider knows that this is a new thread + # so it is legit to pass the Context in the thread-local storage; + # fn() will be executed outside the asyncio loop as a synchronous code + tracer.context_provider.activate(ctx) + return fn(*args) + + +def create_task(*args, **kwargs): + """This function spawns a task with a Context that inherits the + `trace_id` and the `parent_id` from the current active one if available. + """ + loop = asyncio.get_event_loop() + return wrapped_create_task(loop.create_task, None, args, kwargs) diff --git a/ddtrace/contrib/asyncio/patch.py b/ddtrace/contrib/asyncio/patch.py new file mode 100644 index 0000000000..4d38f0fac2 --- /dev/null +++ b/ddtrace/contrib/asyncio/patch.py @@ -0,0 +1,32 @@ +import asyncio + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from ...internal.context_manager import CONTEXTVARS_IS_AVAILABLE +from .wrappers import wrapped_create_task, wrapped_create_task_contextvars +from ...utils.wrappers import unwrap as _u + + +def patch(): + """Patches current loop `create_task()` method to enable spawned tasks to + parent to the base task context. + """ + if getattr(asyncio, '_datadog_patch', False): + return + setattr(asyncio, '_datadog_patch', True) + + loop = asyncio.get_event_loop() + if CONTEXTVARS_IS_AVAILABLE: + _w(loop, 'create_task', wrapped_create_task_contextvars) + else: + _w(loop, 'create_task', wrapped_create_task) + + +def unpatch(): + """Remove tracing from patched modules.""" + + if getattr(asyncio, '_datadog_patch', False): + setattr(asyncio, '_datadog_patch', False) + + loop = asyncio.get_event_loop() + _u(loop, 'create_task') diff --git a/ddtrace/contrib/asyncio/provider.py b/ddtrace/contrib/asyncio/provider.py new file mode 100644 index 0000000000..6748e2edd6 --- /dev/null +++ b/ddtrace/contrib/asyncio/provider.py @@ -0,0 +1,86 @@ +import asyncio + +from ...context import Context +from ...provider import DefaultContextProvider + +# Task attribute used to set/get the Context instance +CONTEXT_ATTR = '__datadog_context' + + +class AsyncioContextProvider(DefaultContextProvider): + """ + Context provider that retrieves all contexts for the current asyncio + execution. It must be used in asynchronous programming that relies + in the built-in ``asyncio`` library. Framework instrumentation that + is built on top of the ``asyncio`` library, can use this provider. + + This Context Provider inherits from ``DefaultContextProvider`` because + it uses a thread-local storage when the ``Context`` is propagated to + a different thread, than the one that is running the async loop. + """ + def activate(self, context, loop=None): + """Sets the scoped ``Context`` for the current running ``Task``. + """ + loop = self._get_loop(loop) + if not loop: + self._local.set(context) + return context + + # the current unit of work (if tasks are used) + task = asyncio.Task.current_task(loop=loop) + setattr(task, CONTEXT_ATTR, context) + return context + + def _get_loop(self, loop=None): + """Helper to try and resolve the current loop""" + try: + return loop or asyncio.get_event_loop() + except RuntimeError: + # Detects if a loop is available in the current thread; + # DEV: This happens when a new thread is created from the out that is running the async loop + # DEV: It's possible that a different Executor is handling a different Thread that + # works with blocking code. In that case, we fallback to a thread-local Context. + pass + return None + + def _has_active_context(self, loop=None): + """Helper to determine if we have a currently active context""" + loop = self._get_loop(loop=loop) + if loop is None: + return self._local._has_active_context() + + # the current unit of work (if tasks are used) + task = asyncio.Task.current_task(loop=loop) + if task is None: + return False + + ctx = getattr(task, CONTEXT_ATTR, None) + return ctx is not None + + def active(self, loop=None): + """ + Returns the scoped Context for this execution flow. The ``Context`` uses + the current task as a carrier so if a single task is used for the entire application, + the context must be handled separately. + """ + loop = self._get_loop(loop=loop) + if not loop: + return self._local.get() + + # the current unit of work (if tasks are used) + task = asyncio.Task.current_task(loop=loop) + if task is None: + # providing a detached Context from the current Task, may lead to + # wrong traces. This defensive behavior grants that a trace can + # still be built without raising exceptions + return Context() + + ctx = getattr(task, CONTEXT_ATTR, None) + if ctx is not None: + # return the active Context for this task (if any) + return ctx + + # create a new Context using the Task as a Context carrier + ctx = Context() + setattr(task, CONTEXT_ATTR, ctx) + return ctx diff --git a/ddtrace/contrib/asyncio/wrappers.py b/ddtrace/contrib/asyncio/wrappers.py new file mode 100644 index 0000000000..00d7d8db8f --- /dev/null +++ b/ddtrace/contrib/asyncio/wrappers.py @@ -0,0 +1,58 @@ +import ddtrace + +from .compat import asyncio_current_task +from .provider import CONTEXT_ATTR +from ...context import Context + + +def wrapped_create_task(wrapped, instance, args, kwargs): + """Wrapper for ``create_task(coro)`` that propagates the current active + ``Context`` to the new ``Task``. This function is useful to connect traces + of detached executions. + + Note: we can't just link the task contexts due to the following scenario: + * begin task A + * task A starts task B1..B10 + * finish task B1-B9 (B10 still on trace stack) + * task A starts task C + * now task C gets parented to task B10 since it's still on the stack, + however was not actually triggered by B10 + """ + new_task = wrapped(*args, **kwargs) + current_task = asyncio_current_task() + + ctx = getattr(current_task, CONTEXT_ATTR, None) + if ctx: + # current task has a context, so parent a new context to the base context + new_ctx = Context( + trace_id=ctx.trace_id, + span_id=ctx.span_id, + sampling_priority=ctx.sampling_priority, + ) + setattr(new_task, CONTEXT_ATTR, new_ctx) + + return new_task + + +def wrapped_create_task_contextvars(wrapped, instance, args, kwargs): + """Wrapper for ``create_task(coro)`` that propagates the current active + ``Context`` to the new ``Task``. This function is useful to connect traces + of detached executions. Uses contextvars for task-local storage. + """ + current_task_ctx = ddtrace.tracer.get_call_context() + + if not current_task_ctx: + # no current context exists so nothing special to be done in handling + # context for new task + return wrapped(*args, **kwargs) + + # clone and activate current task's context for new task to support + # detached executions + new_task_ctx = current_task_ctx.clone() + ddtrace.tracer.context_provider.activate(new_task_ctx) + try: + # activated context will now be copied to new task + return wrapped(*args, **kwargs) + finally: + # reactivate current task context + ddtrace.tracer.context_provider.activate(current_task_ctx) diff --git a/ddtrace/contrib/boto/__init__.py b/ddtrace/contrib/boto/__init__.py new file mode 100644 index 0000000000..f5b2b1fdf5 --- /dev/null +++ b/ddtrace/contrib/boto/__init__.py @@ -0,0 +1,24 @@ +""" +Boto integration will trace all AWS calls made via boto2. +This integration is automatically patched when using ``patch_all()``:: + + import boto.ec2 + from ddtrace import patch + + # If not patched yet, you can patch boto specifically + patch(boto=True) + + # This will report spans with the default instrumentation + ec2 = boto.ec2.connect_to_region("us-west-2") + # Example of instrumented query + ec2.get_all_instances() +""" + +from ...utils.importlib import require_modules + +required_modules = ['boto.connection'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + __all__ = ['patch'] diff --git a/ddtrace/contrib/boto/patch.py b/ddtrace/contrib/boto/patch.py new file mode 100644 index 0000000000..c1de9e9cb5 --- /dev/null +++ b/ddtrace/contrib/boto/patch.py @@ -0,0 +1,183 @@ +import boto.connection +from ddtrace.vendor import wrapt +import inspect + +from ddtrace import config +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...pin import Pin +from ...ext import SpanTypes, http, aws +from ...utils.wrappers import unwrap + + +# Original boto client class +_Boto_client = boto.connection.AWSQueryConnection + +AWS_QUERY_ARGS_NAME = ('operation_name', 'params', 'path', 'verb') +AWS_AUTH_ARGS_NAME = ( + 'method', + 'path', + 'headers', + 'data', + 'host', + 'auth_path', + 'sender', +) +AWS_QUERY_TRACED_ARGS = ['operation_name', 'params', 'path'] +AWS_AUTH_TRACED_ARGS = ['path', 'data', 'host'] + + +def patch(): + if getattr(boto.connection, '_datadog_patch', False): + return + setattr(boto.connection, '_datadog_patch', True) + + # AWSQueryConnection and AWSAuthConnection are two different classes called by + # different services for connection. + # For exemple EC2 uses AWSQueryConnection and S3 uses AWSAuthConnection + wrapt.wrap_function_wrapper( + 'boto.connection', 'AWSQueryConnection.make_request', patched_query_request + ) + wrapt.wrap_function_wrapper( + 'boto.connection', 'AWSAuthConnection.make_request', patched_auth_request + ) + Pin(service='aws', app='aws').onto( + boto.connection.AWSQueryConnection + ) + Pin(service='aws', app='aws').onto( + boto.connection.AWSAuthConnection + ) + + +def unpatch(): + if getattr(boto.connection, '_datadog_patch', False): + setattr(boto.connection, '_datadog_patch', False) + unwrap(boto.connection.AWSQueryConnection, 'make_request') + unwrap(boto.connection.AWSAuthConnection, 'make_request') + + +# ec2, sqs, kinesis +def patched_query_request(original_func, instance, args, kwargs): + + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return original_func(*args, **kwargs) + + endpoint_name = getattr(instance, 'host').split('.')[0] + + with pin.tracer.trace( + '{}.command'.format(endpoint_name), + service='{}.{}'.format(pin.service, endpoint_name), + span_type=SpanTypes.HTTP, + ) as span: + + operation_name = None + if args: + operation_name = args[0] + span.resource = '%s.%s' % (endpoint_name, operation_name.lower()) + else: + span.resource = endpoint_name + + aws.add_span_arg_tags(span, endpoint_name, args, AWS_QUERY_ARGS_NAME, AWS_QUERY_TRACED_ARGS) + + # Obtaining region name + region_name = _get_instance_region_name(instance) + + meta = { + aws.AGENT: 'boto', + aws.OPERATION: operation_name, + } + if region_name: + meta[aws.REGION] = region_name + + span.set_tags(meta) + + # Original func returns a boto.connection.HTTPResponse object + result = original_func(*args, **kwargs) + span.set_tag(http.STATUS_CODE, getattr(result, 'status')) + span.set_tag(http.METHOD, getattr(result, '_method')) + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.boto.get_analytics_sample_rate() + ) + + return result + + +# s3, lambda +def patched_auth_request(original_func, instance, args, kwargs): + + # Catching the name of the operation that called make_request() + operation_name = None + + # Go up the stack until we get the first non-ddtrace module + # DEV: For `lambda.list_functions()` this should be: + # - ddtrace.contrib.boto.patch + # - ddtrace.vendor.wrapt.wrappers + # - boto.awslambda.layer1 (make_request) + # - boto.awslambda.layer1 (list_functions) + # But can vary depending on Python versions; that's why we use an heuristic + frame = inspect.currentframe().f_back + operation_name = None + while frame: + if frame.f_code.co_name == 'make_request': + operation_name = frame.f_back.f_code.co_name + break + frame = frame.f_back + + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return original_func(*args, **kwargs) + + endpoint_name = getattr(instance, 'host').split('.')[0] + + with pin.tracer.trace( + '{}.command'.format(endpoint_name), + service='{}.{}'.format(pin.service, endpoint_name), + span_type=SpanTypes.HTTP, + ) as span: + + if args: + http_method = args[0] + span.resource = '%s.%s' % (endpoint_name, http_method.lower()) + else: + span.resource = endpoint_name + + aws.add_span_arg_tags(span, endpoint_name, args, AWS_AUTH_ARGS_NAME, AWS_AUTH_TRACED_ARGS) + + # Obtaining region name + region_name = _get_instance_region_name(instance) + + meta = { + aws.AGENT: 'boto', + aws.OPERATION: operation_name, + } + if region_name: + meta[aws.REGION] = region_name + + span.set_tags(meta) + + # Original func returns a boto.connection.HTTPResponse object + result = original_func(*args, **kwargs) + span.set_tag(http.STATUS_CODE, getattr(result, 'status')) + span.set_tag(http.METHOD, getattr(result, '_method')) + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.boto.get_analytics_sample_rate() + ) + + return result + + +def _get_instance_region_name(instance): + region = getattr(instance, 'region', None) + + if not region: + return None + if isinstance(region, str): + return region.split(':')[1] + else: + return region.name diff --git a/ddtrace/contrib/botocore/__init__.py b/ddtrace/contrib/botocore/__init__.py new file mode 100644 index 0000000000..adba2c01ba --- /dev/null +++ b/ddtrace/contrib/botocore/__init__.py @@ -0,0 +1,28 @@ +""" +The Botocore integration will trace all AWS calls made with the botocore +library. Libraries like Boto3 that use Botocore will also be patched. + +This integration is automatically patched when using ``patch_all()``:: + + import botocore.session + from ddtrace import patch + + # If not patched yet, you can patch botocore specifically + patch(botocore=True) + + # This will report spans with the default instrumentation + botocore.session.get_session() + lambda_client = session.create_client('lambda', region_name='us-east-1') + # Example of instrumented query + lambda_client.list_functions() +""" + + +from ...utils.importlib import require_modules + +required_modules = ['botocore.client'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + __all__ = ['patch'] diff --git a/ddtrace/contrib/botocore/patch.py b/ddtrace/contrib/botocore/patch.py new file mode 100644 index 0000000000..87ed79ee96 --- /dev/null +++ b/ddtrace/contrib/botocore/patch.py @@ -0,0 +1,81 @@ +""" +Trace queries to aws api done via botocore client +""" +# 3p +from ddtrace.vendor import wrapt +from ddtrace import config +import botocore.client + +# project +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...pin import Pin +from ...ext import SpanTypes, http, aws +from ...utils.formats import deep_getattr +from ...utils.wrappers import unwrap + + +# Original botocore client class +_Botocore_client = botocore.client.BaseClient + +ARGS_NAME = ('action', 'params', 'path', 'verb') +TRACED_ARGS = ['params', 'path', 'verb'] + + +def patch(): + if getattr(botocore.client, '_datadog_patch', False): + return + setattr(botocore.client, '_datadog_patch', True) + + wrapt.wrap_function_wrapper('botocore.client', 'BaseClient._make_api_call', patched_api_call) + Pin(service='aws', app='aws').onto(botocore.client.BaseClient) + + +def unpatch(): + if getattr(botocore.client, '_datadog_patch', False): + setattr(botocore.client, '_datadog_patch', False) + unwrap(botocore.client.BaseClient, '_make_api_call') + + +def patched_api_call(original_func, instance, args, kwargs): + + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return original_func(*args, **kwargs) + + endpoint_name = deep_getattr(instance, '_endpoint._endpoint_prefix') + + with pin.tracer.trace('{}.command'.format(endpoint_name), + service='{}.{}'.format(pin.service, endpoint_name), + span_type=SpanTypes.HTTP) as span: + + operation = None + if args: + operation = args[0] + span.resource = '%s.%s' % (endpoint_name, operation.lower()) + + else: + span.resource = endpoint_name + + aws.add_span_arg_tags(span, endpoint_name, args, ARGS_NAME, TRACED_ARGS) + + region_name = deep_getattr(instance, 'meta.region_name') + + meta = { + 'aws.agent': 'botocore', + 'aws.operation': operation, + 'aws.region': region_name, + } + span.set_tags(meta) + + result = original_func(*args, **kwargs) + + span.set_tag(http.STATUS_CODE, result['ResponseMetadata']['HTTPStatusCode']) + span.set_tag('retry_attempts', result['ResponseMetadata']['RetryAttempts']) + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.botocore.get_analytics_sample_rate() + ) + + return result diff --git a/ddtrace/contrib/bottle/__init__.py b/ddtrace/contrib/bottle/__init__.py new file mode 100644 index 0000000000..bcf3a5715a --- /dev/null +++ b/ddtrace/contrib/bottle/__init__.py @@ -0,0 +1,23 @@ +""" +The bottle integration traces the Bottle web framework. Add the following +plugin to your app:: + + import bottle + from ddtrace import tracer + from ddtrace.contrib.bottle import TracePlugin + + app = bottle.Bottle() + plugin = TracePlugin(service="my-web-app") + app.install(plugin) +""" + +from ...utils.importlib import require_modules + +required_modules = ['bottle'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .trace import TracePlugin + from .patch import patch + + __all__ = ['TracePlugin', 'patch'] diff --git a/ddtrace/contrib/bottle/patch.py b/ddtrace/contrib/bottle/patch.py new file mode 100644 index 0000000000..802b4e0704 --- /dev/null +++ b/ddtrace/contrib/bottle/patch.py @@ -0,0 +1,26 @@ +import os + +from .trace import TracePlugin + +import bottle + +from ddtrace.vendor import wrapt + + +def patch(): + """Patch the bottle.Bottle class + """ + if getattr(bottle, '_datadog_patch', False): + return + + setattr(bottle, '_datadog_patch', True) + wrapt.wrap_function_wrapper('bottle', 'Bottle.__init__', traced_init) + + +def traced_init(wrapped, instance, args, kwargs): + wrapped(*args, **kwargs) + + service = os.environ.get('DATADOG_SERVICE_NAME') or 'bottle' + + plugin = TracePlugin(service=service) + instance.install(plugin) diff --git a/ddtrace/contrib/bottle/trace.py b/ddtrace/contrib/bottle/trace.py new file mode 100644 index 0000000000..12c196b32e --- /dev/null +++ b/ddtrace/contrib/bottle/trace.py @@ -0,0 +1,83 @@ +# 3p +from bottle import response, request, HTTPError, HTTPResponse + +# stdlib +import ddtrace + +# project +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...propagation.http import HTTPPropagator +from ...settings import config + + +class TracePlugin(object): + name = 'trace' + api = 2 + + def __init__(self, service='bottle', tracer=None, distributed_tracing=True): + self.service = service + self.tracer = tracer or ddtrace.tracer + self.distributed_tracing = distributed_tracing + + def apply(self, callback, route): + + def wrapped(*args, **kwargs): + if not self.tracer or not self.tracer.enabled: + return callback(*args, **kwargs) + + resource = '{} {}'.format(request.method, route.rule) + + # Propagate headers such as x-datadog-trace-id. + if self.distributed_tracing: + propagator = HTTPPropagator() + context = propagator.extract(request.headers) + if context.trace_id: + self.tracer.context_provider.activate(context) + + with self.tracer.trace( + 'bottle.request', service=self.service, resource=resource, span_type=SpanTypes.WEB + ) as s: + # set analytics sample rate with global config enabled + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.bottle.get_analytics_sample_rate(use_global_config=True) + ) + + code = None + result = None + try: + result = callback(*args, **kwargs) + return result + except (HTTPError, HTTPResponse) as e: + # you can interrupt flows using abort(status_code, 'message')... + # we need to respect the defined status_code. + # we also need to handle when response is raised as is the + # case with a 4xx status + code = e.status_code + raise + except Exception: + # bottle doesn't always translate unhandled exceptions, so + # we mark it here. + code = 500 + raise + finally: + if isinstance(result, HTTPResponse): + response_code = result.status_code + elif code: + response_code = code + else: + # bottle local response has not yet been updated so this + # will be default + response_code = response.status_code + + if 500 <= response_code < 600: + s.error = 1 + + s.set_tag(http.STATUS_CODE, response_code) + s.set_tag(http.URL, request.urlparts._replace(query='').geturl()) + s.set_tag(http.METHOD, request.method) + if config.bottle.trace_query_string: + s.set_tag(http.QUERY_STRING, request.query_string) + + return wrapped diff --git a/ddtrace/contrib/cassandra/__init__.py b/ddtrace/contrib/cassandra/__init__.py new file mode 100644 index 0000000000..bc2bf63814 --- /dev/null +++ b/ddtrace/contrib/cassandra/__init__.py @@ -0,0 +1,35 @@ +"""Instrument Cassandra to report Cassandra queries. + +``patch_all`` will automatically patch your Cluster instance to make it work. +:: + + from ddtrace import Pin, patch + from cassandra.cluster import Cluster + + # If not patched yet, you can patch cassandra specifically + patch(cassandra=True) + + # This will report spans with the default instrumentation + cluster = Cluster(contact_points=["127.0.0.1"], port=9042) + session = cluster.connect("my_keyspace") + # Example of instrumented query + session.execute("select id from my_table limit 10;") + + # Use a pin to specify metadata related to this cluster + cluster = Cluster(contact_points=['10.1.1.3', '10.1.1.4', '10.1.1.5'], port=9042) + Pin.override(cluster, service='cassandra-backend') + session = cluster.connect("my_keyspace") + session.execute("select id from my_table limit 10;") +""" +from ...utils.importlib import require_modules + + +required_modules = ['cassandra.cluster'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .session import get_traced_cassandra, patch + __all__ = [ + 'get_traced_cassandra', + 'patch', + ] diff --git a/ddtrace/contrib/cassandra/patch.py b/ddtrace/contrib/cassandra/patch.py new file mode 100644 index 0000000000..52f5d90017 --- /dev/null +++ b/ddtrace/contrib/cassandra/patch.py @@ -0,0 +1,3 @@ +from .session import patch, unpatch + +__all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/cassandra/session.py b/ddtrace/contrib/cassandra/session.py new file mode 100644 index 0000000000..512aba7758 --- /dev/null +++ b/ddtrace/contrib/cassandra/session.py @@ -0,0 +1,297 @@ +""" +Trace queries along a session to a cassandra cluster +""" +import sys + +# 3p +import cassandra.cluster + +# project +from ...compat import stringify +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, net, cassandra as cassx, errors +from ...internal.logger import get_logger +from ...pin import Pin +from ...settings import config +from ...utils.deprecation import deprecated +from ...utils.formats import deep_getattr +from ...vendor import wrapt + +log = get_logger(__name__) + +RESOURCE_MAX_LENGTH = 5000 +SERVICE = 'cassandra' +CURRENT_SPAN = '_ddtrace_current_span' +PAGE_NUMBER = '_ddtrace_page_number' + +# Original connect connect function +_connect = cassandra.cluster.Cluster.connect + + +def patch(): + """ patch will add tracing to the cassandra library. """ + setattr(cassandra.cluster.Cluster, 'connect', + wrapt.FunctionWrapper(_connect, traced_connect)) + Pin(service=SERVICE, app=SERVICE).onto(cassandra.cluster.Cluster) + + +def unpatch(): + cassandra.cluster.Cluster.connect = _connect + + +def traced_connect(func, instance, args, kwargs): + session = func(*args, **kwargs) + if not isinstance(session.execute, wrapt.FunctionWrapper): + # FIXME[matt] this should probably be private. + setattr(session, 'execute_async', wrapt.FunctionWrapper(session.execute_async, traced_execute_async)) + return session + + +def _close_span_on_success(result, future): + span = getattr(future, CURRENT_SPAN, None) + if not span: + log.debug('traced_set_final_result was not able to get the current span from the ResponseFuture') + return + try: + span.set_tags(_extract_result_metas(cassandra.cluster.ResultSet(future, result))) + except Exception: + log.debug('an exception occured while setting tags', exc_info=True) + finally: + span.finish() + delattr(future, CURRENT_SPAN) + + +def traced_set_final_result(func, instance, args, kwargs): + result = args[0] + _close_span_on_success(result, instance) + return func(*args, **kwargs) + + +def _close_span_on_error(exc, future): + span = getattr(future, CURRENT_SPAN, None) + if not span: + log.debug('traced_set_final_exception was not able to get the current span from the ResponseFuture') + return + try: + # handling the exception manually because we + # don't have an ongoing exception here + span.error = 1 + span.set_tag(errors.ERROR_MSG, exc.args[0]) + span.set_tag(errors.ERROR_TYPE, exc.__class__.__name__) + except Exception: + log.debug('traced_set_final_exception was not able to set the error, failed with error', exc_info=True) + finally: + span.finish() + delattr(future, CURRENT_SPAN) + + +def traced_set_final_exception(func, instance, args, kwargs): + exc = args[0] + _close_span_on_error(exc, instance) + return func(*args, **kwargs) + + +def traced_start_fetching_next_page(func, instance, args, kwargs): + has_more_pages = getattr(instance, 'has_more_pages', True) + if not has_more_pages: + return func(*args, **kwargs) + session = getattr(instance, 'session', None) + cluster = getattr(session, 'cluster', None) + pin = Pin.get_from(cluster) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + # In case the current span is not finished we make sure to finish it + old_span = getattr(instance, CURRENT_SPAN, None) + if old_span: + log.debug('previous span was not finished before fetching next page') + old_span.finish() + + query = getattr(instance, 'query', None) + + span = _start_span_and_set_tags(pin, query, session, cluster) + + page_number = getattr(instance, PAGE_NUMBER, 1) + 1 + setattr(instance, PAGE_NUMBER, page_number) + setattr(instance, CURRENT_SPAN, span) + try: + return func(*args, **kwargs) + except Exception: + with span: + span.set_exc_info(*sys.exc_info()) + raise + + +def traced_execute_async(func, instance, args, kwargs): + cluster = getattr(instance, 'cluster', None) + pin = Pin.get_from(cluster) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + query = kwargs.get('query') or args[0] + + span = _start_span_and_set_tags(pin, query, instance, cluster) + + try: + result = func(*args, **kwargs) + setattr(result, CURRENT_SPAN, span) + setattr(result, PAGE_NUMBER, 1) + setattr( + result, + '_set_final_result', + wrapt.FunctionWrapper( + result._set_final_result, + traced_set_final_result + ) + ) + setattr( + result, + '_set_final_exception', + wrapt.FunctionWrapper( + result._set_final_exception, + traced_set_final_exception + ) + ) + setattr( + result, + 'start_fetching_next_page', + wrapt.FunctionWrapper( + result.start_fetching_next_page, + traced_start_fetching_next_page + ) + ) + # Since we cannot be sure that the previous methods were overwritten + # before the call ended, we add callbacks that will be run + # synchronously if the call already returned and we remove them right + # after. + result.add_callbacks( + _close_span_on_success, + _close_span_on_error, + callback_args=(result,), + errback_args=(result,) + ) + result.clear_callbacks() + return result + except Exception: + with span: + span.set_exc_info(*sys.exc_info()) + raise + + +def _start_span_and_set_tags(pin, query, session, cluster): + service = pin.service + tracer = pin.tracer + span = tracer.trace('cassandra.query', service=service, span_type=SpanTypes.CASSANDRA) + _sanitize_query(span, query) + span.set_tags(_extract_session_metas(session)) # FIXME[matt] do once? + span.set_tags(_extract_cluster_metas(cluster)) + # set analytics sample rate if enabled + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.cassandra.get_analytics_sample_rate() + ) + return span + + +def _extract_session_metas(session): + metas = {} + + if getattr(session, 'keyspace', None): + # FIXME the keyspace can be overridden explicitly in the query itself + # e.g. 'select * from trace.hash_to_resource' + metas[cassx.KEYSPACE] = session.keyspace.lower() + + return metas + + +def _extract_cluster_metas(cluster): + metas = {} + if deep_getattr(cluster, 'metadata.cluster_name'): + metas[cassx.CLUSTER] = cluster.metadata.cluster_name + if getattr(cluster, 'port', None): + metas[net.TARGET_PORT] = cluster.port + + return metas + + +def _extract_result_metas(result): + metas = {} + if result is None: + return metas + + future = getattr(result, 'response_future', None) + + if future: + # get the host + host = getattr(future, 'coordinator_host', None) + if host: + metas[net.TARGET_HOST] = host + elif hasattr(future, '_current_host'): + address = deep_getattr(future, '_current_host.address') + if address: + metas[net.TARGET_HOST] = address + + query = getattr(future, 'query', None) + if getattr(query, 'consistency_level', None): + metas[cassx.CONSISTENCY_LEVEL] = query.consistency_level + if getattr(query, 'keyspace', None): + metas[cassx.KEYSPACE] = query.keyspace.lower() + + page_number = getattr(future, PAGE_NUMBER, 1) + has_more_pages = getattr(future, 'has_more_pages') + is_paginated = has_more_pages or page_number > 1 + metas[cassx.PAGINATED] = is_paginated + if is_paginated: + metas[cassx.PAGE_NUMBER] = page_number + + if hasattr(result, 'current_rows'): + result_rows = result.current_rows or [] + metas[cassx.ROW_COUNT] = len(result_rows) + + return metas + + +def _sanitize_query(span, query): + # TODO (aaditya): fix this hacky type check. we need it to avoid circular imports + t = type(query).__name__ + + resource = None + if t in ('SimpleStatement', 'PreparedStatement'): + # reset query if a string is available + resource = getattr(query, 'query_string', query) + elif t == 'BatchStatement': + resource = 'BatchStatement' + # Each element in `_statements_and_parameters` is: + # (is_prepared, statement, parameters) + # ref:https://github.com/datastax/python-driver/blob/13d6d72be74f40fcef5ec0f2b3e98538b3b87459/cassandra/query.py#L844 + # + # For prepared statements, the `statement` value is just the query_id + # which is not a statement and when trying to join with other strings + # raises an error in python3 around joining bytes to unicode, so this + # just filters out prepared statements from this tag value + q = '; '.join(q[1] for q in query._statements_and_parameters[:2] if not q[0]) + span.set_tag('cassandra.query', q) + span.set_metric('cassandra.batch_size', len(query._statements_and_parameters)) + elif t == 'BoundStatement': + ps = getattr(query, 'prepared_statement', None) + if ps: + resource = getattr(ps, 'query_string', None) + elif t == 'str': + resource = query + else: + resource = 'unknown-query-type' # FIXME[matt] what else do to here? + + span.resource = stringify(resource)[:RESOURCE_MAX_LENGTH] + + +# +# DEPRECATED +# + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def get_traced_cassandra(*args, **kwargs): + return _get_traced_cluster(*args, **kwargs) + + +def _get_traced_cluster(*args, **kwargs): + return cassandra.cluster.Cluster diff --git a/ddtrace/contrib/celery/__init__.py b/ddtrace/contrib/celery/__init__.py new file mode 100644 index 0000000000..1acd7fb72c --- /dev/null +++ b/ddtrace/contrib/celery/__init__.py @@ -0,0 +1,54 @@ +""" +The Celery integration will trace all tasks that are executed in the +background. Functions and class based tasks are traced only if the Celery API +is used, so calling the function directly or via the ``run()`` method will not +generate traces. However, calling ``apply()``, ``apply_async()`` and ``delay()`` +will produce tracing data. To trace your Celery application, call the patch method:: + + import celery + from ddtrace import patch + + patch(celery=True) + app = celery.Celery() + + @app.task + def my_task(): + pass + + class MyTask(app.Task): + def run(self): + pass + + +To change Celery service name, you can use the ``Config`` API as follows:: + + from ddtrace import config + + # change service names for producers and workers + config.celery['producer_service_name'] = 'task-queue' + config.celery['worker_service_name'] = 'worker-notify' + +By default, reported service names are: + * ``celery-producer`` when tasks are enqueued for processing + * ``celery-worker`` when tasks are processed by a Celery process + +""" +from ...utils.importlib import require_modules + + +required_modules = ['celery'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .app import patch_app, unpatch_app + from .patch import patch, unpatch + from .task import patch_task, unpatch_task + + __all__ = [ + 'patch', + 'patch_app', + 'patch_task', + 'unpatch', + 'unpatch_app', + 'unpatch_task', + ] diff --git a/ddtrace/contrib/celery/app.py b/ddtrace/contrib/celery/app.py new file mode 100644 index 0000000000..0c3a835c7f --- /dev/null +++ b/ddtrace/contrib/celery/app.py @@ -0,0 +1,60 @@ +from celery import signals + +from ddtrace import Pin, config +from ddtrace.pin import _DD_PIN_NAME + +from .constants import APP +from .signals import ( + trace_prerun, + trace_postrun, + trace_before_publish, + trace_after_publish, + trace_failure, + trace_retry, +) + + +def patch_app(app, pin=None): + """Attach the Pin class to the application and connect + our handlers to Celery signals. + """ + if getattr(app, '__datadog_patch', False): + return + setattr(app, '__datadog_patch', True) + + # attach the PIN object + pin = pin or Pin( + service=config.celery['worker_service_name'], + app=APP, + _config=config.celery, + ) + pin.onto(app) + # connect to the Signal framework + + signals.task_prerun.connect(trace_prerun, weak=False) + signals.task_postrun.connect(trace_postrun, weak=False) + signals.before_task_publish.connect(trace_before_publish, weak=False) + signals.after_task_publish.connect(trace_after_publish, weak=False) + signals.task_failure.connect(trace_failure, weak=False) + signals.task_retry.connect(trace_retry, weak=False) + return app + + +def unpatch_app(app): + """Remove the Pin instance from the application and disconnect + our handlers from Celery signal framework. + """ + if not getattr(app, '__datadog_patch', False): + return + setattr(app, '__datadog_patch', False) + + pin = Pin.get_from(app) + if pin is not None: + delattr(app, _DD_PIN_NAME) + + signals.task_prerun.disconnect(trace_prerun) + signals.task_postrun.disconnect(trace_postrun) + signals.before_task_publish.disconnect(trace_before_publish) + signals.after_task_publish.disconnect(trace_after_publish) + signals.task_failure.disconnect(trace_failure) + signals.task_retry.disconnect(trace_retry) diff --git a/ddtrace/contrib/celery/constants.py b/ddtrace/contrib/celery/constants.py new file mode 100644 index 0000000000..407c2125c8 --- /dev/null +++ b/ddtrace/contrib/celery/constants.py @@ -0,0 +1,22 @@ +from os import getenv + +# Celery Context key +CTX_KEY = '__dd_task_span' + +# Span names +PRODUCER_ROOT_SPAN = 'celery.apply' +WORKER_ROOT_SPAN = 'celery.run' + +# Task operations +TASK_TAG_KEY = 'celery.action' +TASK_APPLY = 'apply' +TASK_APPLY_ASYNC = 'apply_async' +TASK_RUN = 'run' +TASK_RETRY_REASON_KEY = 'celery.retry.reason' + +# Service info +APP = 'celery' +# `getenv()` call must be kept for backward compatibility; we may remove it +# later when we do a full migration to the `Config` class +PRODUCER_SERVICE = getenv('DATADOG_SERVICE_NAME') or 'celery-producer' +WORKER_SERVICE = getenv('DATADOG_SERVICE_NAME') or 'celery-worker' diff --git a/ddtrace/contrib/celery/patch.py b/ddtrace/contrib/celery/patch.py new file mode 100644 index 0000000000..b6e9793840 --- /dev/null +++ b/ddtrace/contrib/celery/patch.py @@ -0,0 +1,28 @@ +import celery + +from ddtrace import config + +from .app import patch_app, unpatch_app +from .constants import PRODUCER_SERVICE, WORKER_SERVICE +from ...utils.formats import get_env + + +# Celery default settings +config._add('celery', { + 'producer_service_name': get_env('celery', 'producer_service_name', PRODUCER_SERVICE), + 'worker_service_name': get_env('celery', 'worker_service_name', WORKER_SERVICE), +}) + + +def patch(): + """Instrument Celery base application and the `TaskRegistry` so + that any new registered task is automatically instrumented. In the + case of Django-Celery integration, also the `@shared_task` decorator + must be instrumented because Django doesn't use the Celery registry. + """ + patch_app(celery.Celery) + + +def unpatch(): + """Disconnect all signals and remove Tracing capabilities""" + unpatch_app(celery.Celery) diff --git a/ddtrace/contrib/celery/signals.py b/ddtrace/contrib/celery/signals.py new file mode 100644 index 0000000000..2afcce556a --- /dev/null +++ b/ddtrace/contrib/celery/signals.py @@ -0,0 +1,154 @@ +from ddtrace import Pin, config + +from celery import registry + +from ...ext import SpanTypes +from ...internal.logger import get_logger +from . import constants as c +from .utils import tags_from_context, retrieve_task_id, attach_span, detach_span, retrieve_span + +log = get_logger(__name__) + + +def trace_prerun(*args, **kwargs): + # safe-guard to avoid crashes in case the signals API + # changes in Celery + task = kwargs.get('sender') + task_id = kwargs.get('task_id') + log.debug('prerun signal start task_id=%s', task_id) + if task is None or task_id is None: + log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') + return + + # retrieve the task Pin or fallback to the global one + pin = Pin.get_from(task) or Pin.get_from(task.app) + if pin is None: + log.debug('no pin found on task or task.app task_id=%s', task_id) + return + + # propagate the `Span` in the current task Context + service = config.celery['worker_service_name'] + span = pin.tracer.trace(c.WORKER_ROOT_SPAN, service=service, resource=task.name, span_type=SpanTypes.WORKER) + attach_span(task, task_id, span) + + +def trace_postrun(*args, **kwargs): + # safe-guard to avoid crashes in case the signals API + # changes in Celery + task = kwargs.get('sender') + task_id = kwargs.get('task_id') + log.debug('postrun signal task_id=%s', task_id) + if task is None or task_id is None: + log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') + return + + # retrieve and finish the Span + span = retrieve_span(task, task_id) + if span is None: + log.warning('no existing span found for task_id=%s', task_id) + return + else: + # request context tags + span.set_tag(c.TASK_TAG_KEY, c.TASK_RUN) + span.set_tags(tags_from_context(kwargs)) + span.set_tags(tags_from_context(task.request)) + span.finish() + detach_span(task, task_id) + + +def trace_before_publish(*args, **kwargs): + # `before_task_publish` signal doesn't propagate the task instance so + # we need to retrieve it from the Celery Registry to access the `Pin`. The + # `Task` instance **does not** include any information about the current + # execution, so it **must not** be used to retrieve `request` data. + task_name = kwargs.get('sender') + task = registry.tasks.get(task_name) + task_id = retrieve_task_id(kwargs) + # safe-guard to avoid crashes in case the signals API + # changes in Celery + if task is None or task_id is None: + log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') + return + + # propagate the `Span` in the current task Context + pin = Pin.get_from(task) or Pin.get_from(task.app) + if pin is None: + return + + # apply some tags here because most of the data is not available + # in the task_after_publish signal + service = config.celery['producer_service_name'] + span = pin.tracer.trace(c.PRODUCER_ROOT_SPAN, service=service, resource=task_name) + span.set_tag(c.TASK_TAG_KEY, c.TASK_APPLY_ASYNC) + span.set_tag('celery.id', task_id) + span.set_tags(tags_from_context(kwargs)) + # Note: adding tags from `traceback` or `state` calls will make an + # API call to the backend for the properties so we should rely + # only on the given `Context` + attach_span(task, task_id, span, is_publish=True) + + +def trace_after_publish(*args, **kwargs): + task_name = kwargs.get('sender') + task = registry.tasks.get(task_name) + task_id = retrieve_task_id(kwargs) + # safe-guard to avoid crashes in case the signals API + # changes in Celery + if task is None or task_id is None: + log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') + return + + # retrieve and finish the Span + span = retrieve_span(task, task_id, is_publish=True) + if span is None: + return + else: + span.finish() + detach_span(task, task_id, is_publish=True) + + +def trace_failure(*args, **kwargs): + # safe-guard to avoid crashes in case the signals API + # changes in Celery + task = kwargs.get('sender') + task_id = kwargs.get('task_id') + if task is None or task_id is None: + log.debug('unable to extract the Task and the task_id. This version of Celery may not be supported.') + return + + # retrieve and finish the Span + span = retrieve_span(task, task_id) + if span is None: + return + else: + # add Exception tags; post signals are still called + # so we don't need to attach other tags here + ex = kwargs.get('einfo') + if ex is None: + return + if hasattr(task, 'throws') and isinstance(ex.exception, task.throws): + return + span.set_exc_info(ex.type, ex.exception, ex.tb) + + +def trace_retry(*args, **kwargs): + # safe-guard to avoid crashes in case the signals API + # changes in Celery + task = kwargs.get('sender') + context = kwargs.get('request') + if task is None or context is None: + log.debug('unable to extract the Task or the Context. This version of Celery may not be supported.') + return + + reason = kwargs.get('reason') + if not reason: + log.debug('unable to extract the retry reason. This version of Celery may not be supported.') + return + + span = retrieve_span(task, context.id) + if span is None: + return + + # Add retry reason metadata to span + # DEV: Use `str(reason)` instead of `reason.message` in case we get something that isn't an `Exception` + span.set_tag(c.TASK_RETRY_REASON_KEY, str(reason)) diff --git a/ddtrace/contrib/celery/task.py b/ddtrace/contrib/celery/task.py new file mode 100644 index 0000000000..be6c1dd187 --- /dev/null +++ b/ddtrace/contrib/celery/task.py @@ -0,0 +1,32 @@ +from .app import patch_app + +from ...utils.deprecation import deprecation + + +def patch_task(task, pin=None): + """Deprecated API. The new API uses signals that can be activated via + patch(celery=True) or through `ddtrace-run` script. Using this API + enables instrumentation on all tasks. + """ + deprecation( + name='ddtrace.contrib.celery.patch_task', + message='Use `patch(celery=True)` or `ddtrace-run` script instead', + version='1.0.0', + ) + + # Enable instrumentation everywhere + patch_app(task.app) + return task + + +def unpatch_task(task): + """Deprecated API. The new API uses signals that can be deactivated + via unpatch() API. This API is now a no-op implementation so it doesn't + affect instrumented tasks. + """ + deprecation( + name='ddtrace.contrib.celery.patch_task', + message='Use `unpatch()` instead', + version='1.0.0', + ) + return task diff --git a/ddtrace/contrib/celery/utils.py b/ddtrace/contrib/celery/utils.py new file mode 100644 index 0000000000..71fda21d3e --- /dev/null +++ b/ddtrace/contrib/celery/utils.py @@ -0,0 +1,106 @@ +from weakref import WeakValueDictionary + +from .constants import CTX_KEY + + +def tags_from_context(context): + """Helper to extract meta values from a Celery Context""" + tag_keys = ( + 'compression', 'correlation_id', 'countdown', 'delivery_info', 'eta', + 'exchange', 'expires', 'hostname', 'id', 'priority', 'queue', 'reply_to', + 'retries', 'routing_key', 'serializer', 'timelimit', 'origin', 'state', + ) + + tags = {} + for key in tag_keys: + value = context.get(key) + + # Skip this key if it is not set + if value is None or value == '': + continue + + # Skip `timelimit` if it is not set (it's default/unset value is a + # tuple or a list of `None` values + if key == 'timelimit' and value in [(None, None), [None, None]]: + continue + + # Skip `retries` if it's value is `0` + if key == 'retries' and value == 0: + continue + + # Celery 4.0 uses `origin` instead of `hostname`; this change preserves + # the same name for the tag despite Celery version + if key == 'origin': + key = 'hostname' + + # prefix the tag as 'celery' + tag_name = 'celery.{}'.format(key) + tags[tag_name] = value + return tags + + +def attach_span(task, task_id, span, is_publish=False): + """Helper to propagate a `Span` for the given `Task` instance. This + function uses a `WeakValueDictionary` that stores a Datadog Span using + the `(task_id, is_publish)` as a key. This is useful when information must be + propagated from one Celery signal to another. + + DEV: We use (task_id, is_publish) for the key to ensure that publishing a + task from within another task does not cause any conflicts. + + This mostly happens when either a task fails and a retry policy is in place, + or when a task is manually retries (e.g. `task.retry()`), we end up trying + to publish a task with the same id as the task currently running. + + Previously publishing the new task would overwrite the existing `celery.run` span + in the `weak_dict` causing that span to be forgotten and never finished. + + NOTE: We cannot test for this well yet, because we do not run a celery worker, + and cannot run `task.apply_async()` + """ + weak_dict = getattr(task, CTX_KEY, None) + if weak_dict is None: + weak_dict = WeakValueDictionary() + setattr(task, CTX_KEY, weak_dict) + + weak_dict[(task_id, is_publish)] = span + + +def detach_span(task, task_id, is_publish=False): + """Helper to remove a `Span` in a Celery task when it's propagated. + This function handles tasks where the `Span` is not attached. + """ + weak_dict = getattr(task, CTX_KEY, None) + if weak_dict is None: + return + + # DEV: See note in `attach_span` for key info + weak_dict.pop((task_id, is_publish), None) + + +def retrieve_span(task, task_id, is_publish=False): + """Helper to retrieve an active `Span` stored in a `Task` + instance + """ + weak_dict = getattr(task, CTX_KEY, None) + if weak_dict is None: + return + else: + # DEV: See note in `attach_span` for key info + return weak_dict.get((task_id, is_publish)) + + +def retrieve_task_id(context): + """Helper to retrieve the `Task` identifier from the message `body`. + This helper supports Protocol Version 1 and 2. The Protocol is well + detailed in the official documentation: + http://docs.celeryproject.org/en/latest/internals/protocol.html + """ + headers = context.get('headers') + body = context.get('body') + if headers: + # Protocol Version 2 (default from Celery 4.0) + return headers.get('id') + else: + # Protocol Version 1 + return body.get('id') diff --git a/ddtrace/contrib/consul/__init__.py b/ddtrace/contrib/consul/__init__.py new file mode 100644 index 0000000000..a90eaf3879 --- /dev/null +++ b/ddtrace/contrib/consul/__init__.py @@ -0,0 +1,29 @@ +"""Instrument Consul to trace KV queries. + +Only supports tracing for the syncronous client. + +``patch_all`` will automatically patch your Consul client to make it work. +:: + + from ddtrace import Pin, patch + import consul + + # If not patched yet, you can patch consul specifically + patch(consul=True) + + # This will report a span with the default settings + client = consul.Consul(host="127.0.0.1", port=8500) + client.get("my-key") + + # Use a pin to specify metadata related to this client + Pin.override(client, service='consul-kv') +""" + +from ...utils.importlib import require_modules + +required_modules = ['consul'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + __all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/consul/patch.py b/ddtrace/contrib/consul/patch.py new file mode 100644 index 0000000000..aec3390aeb --- /dev/null +++ b/ddtrace/contrib/consul/patch.py @@ -0,0 +1,57 @@ +import consul + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from ddtrace import config +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import consul as consulx +from ...pin import Pin +from ...utils.wrappers import unwrap as _u + + +_KV_FUNCS = ['put', 'get', 'delete'] + + +def patch(): + if getattr(consul, '__datadog_patch', False): + return + setattr(consul, '__datadog_patch', True) + + pin = Pin(service=consulx.SERVICE, app=consulx.APP) + pin.onto(consul.Consul.KV) + + for f_name in _KV_FUNCS: + _w('consul', 'Consul.KV.%s' % f_name, wrap_function(f_name)) + + +def unpatch(): + if not getattr(consul, '__datadog_patch', False): + return + setattr(consul, '__datadog_patch', False) + + for f_name in _KV_FUNCS: + _u(consul.Consul.KV, f_name) + + +def wrap_function(name): + def trace_func(wrapped, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + # Only patch the syncronous implementation + if not isinstance(instance.agent.http, consul.std.HTTPClient): + return wrapped(*args, **kwargs) + + path = kwargs.get('key') or args[0] + resource = name.upper() + + with pin.tracer.trace(consulx.CMD, service=pin.service, resource=resource) as span: + rate = config.consul.get_analytics_sample_rate() + if rate is not None: + span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, rate) + span.set_tag(consulx.KEY, path) + span.set_tag(consulx.CMD, resource) + return wrapped(*args, **kwargs) + + return trace_func diff --git a/ddtrace/contrib/dbapi/__init__.py b/ddtrace/contrib/dbapi/__init__.py new file mode 100644 index 0000000000..01b34dab08 --- /dev/null +++ b/ddtrace/contrib/dbapi/__init__.py @@ -0,0 +1,204 @@ +""" +Generic dbapi tracing code. +""" + +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, sql +from ...internal.logger import get_logger +from ...pin import Pin +from ...settings import config +from ...utils.formats import asbool, get_env +from ...vendor import wrapt + + +log = get_logger(__name__) + +config._add('dbapi2', dict( + trace_fetch_methods=asbool(get_env('dbapi2', 'trace_fetch_methods', 'false')), +)) + + +class TracedCursor(wrapt.ObjectProxy): + """ TracedCursor wraps a psql cursor and traces it's queries. """ + + def __init__(self, cursor, pin): + super(TracedCursor, self).__init__(cursor) + pin.onto(self) + name = pin.app or 'sql' + self._self_datadog_name = '{}.query'.format(name) + self._self_last_execute_operation = None + + def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs): + """ + Internal function to trace the call to the underlying cursor method + :param method: The callable to be wrapped + :param name: The name of the resulting span. + :param resource: The sql query. Sql queries are obfuscated on the agent side. + :param extra_tags: A dict of tags to store into the span's meta + :param args: The args that will be passed as positional args to the wrapped method + :param kwargs: The args that will be passed as kwargs to the wrapped method + :return: The result of the wrapped method invocation + """ + pin = Pin.get_from(self) + if not pin or not pin.enabled(): + return method(*args, **kwargs) + service = pin.service + with pin.tracer.trace(name, service=service, resource=resource, span_type=SpanTypes.SQL) as s: + # No reason to tag the query since it is set as the resource by the agent. See: + # https://github.com/DataDog/datadog-trace-agent/blob/bda1ebbf170dd8c5879be993bdd4dbae70d10fda/obfuscate/sql.go#L232 + s.set_tags(pin.tags) + s.set_tags(extra_tags) + + # set analytics sample rate if enabled but only for non-FetchTracedCursor + if not isinstance(self, FetchTracedCursor): + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.dbapi2.get_analytics_sample_rate() + ) + + try: + return method(*args, **kwargs) + finally: + row_count = self.__wrapped__.rowcount + s.set_metric('db.rowcount', row_count) + # Necessary for django integration backward compatibility. Django integration used to provide its own + # implementation of the TracedCursor, which used to store the row count into a tag instead of + # as a metric. Such custom implementation has been replaced by this generic dbapi implementation and + # this tag has been added since. + if row_count and row_count >= 0: + s.set_tag(sql.ROWS, row_count) + + def executemany(self, query, *args, **kwargs): + """ Wraps the cursor.executemany method""" + self._self_last_execute_operation = query + # Always return the result as-is + # DEV: Some libraries return `None`, others `int`, and others the cursor objects + # These differences should be overriden at the integration specific layer (e.g. in `sqlite3/patch.py`) + # FIXME[matt] properly handle kwargs here. arg names can be different + # with different libs. + return self._trace_method( + self.__wrapped__.executemany, self._self_datadog_name, query, {'sql.executemany': 'true'}, + query, *args, **kwargs) + + def execute(self, query, *args, **kwargs): + """ Wraps the cursor.execute method""" + self._self_last_execute_operation = query + + # Always return the result as-is + # DEV: Some libraries return `None`, others `int`, and others the cursor objects + # These differences should be overriden at the integration specific layer (e.g. in `sqlite3/patch.py`) + return self._trace_method(self.__wrapped__.execute, self._self_datadog_name, query, {}, query, *args, **kwargs) + + def callproc(self, proc, args): + """ Wraps the cursor.callproc method""" + self._self_last_execute_operation = proc + return self._trace_method(self.__wrapped__.callproc, self._self_datadog_name, proc, {}, proc, args) + + def __enter__(self): + # previous versions of the dbapi didn't support context managers. let's + # reference the func that would be called to ensure that errors + # messages will be the same. + self.__wrapped__.__enter__ + + # and finally, yield the traced cursor. + return self + + +class FetchTracedCursor(TracedCursor): + """ + Sub-class of :class:`TracedCursor` that also instruments `fetchone`, `fetchall`, and `fetchmany` methods. + + We do not trace these functions by default since they can get very noisy (e.g. `fetchone` with 100k rows). + """ + def fetchone(self, *args, **kwargs): + """ Wraps the cursor.fetchone method""" + span_name = '{}.{}'.format(self._self_datadog_name, 'fetchone') + return self._trace_method(self.__wrapped__.fetchone, span_name, self._self_last_execute_operation, {}, + *args, **kwargs) + + def fetchall(self, *args, **kwargs): + """ Wraps the cursor.fetchall method""" + span_name = '{}.{}'.format(self._self_datadog_name, 'fetchall') + return self._trace_method(self.__wrapped__.fetchall, span_name, self._self_last_execute_operation, {}, + *args, **kwargs) + + def fetchmany(self, *args, **kwargs): + """ Wraps the cursor.fetchmany method""" + span_name = '{}.{}'.format(self._self_datadog_name, 'fetchmany') + # We want to trace the information about how many rows were requested. Note that this number may be larger + # the number of rows actually returned if less then requested are available from the query. + size_tag_key = 'db.fetch.size' + if 'size' in kwargs: + extra_tags = {size_tag_key: kwargs.get('size')} + elif len(args) == 1 and isinstance(args[0], int): + extra_tags = {size_tag_key: args[0]} + else: + default_array_size = getattr(self.__wrapped__, 'arraysize', None) + extra_tags = {size_tag_key: default_array_size} if default_array_size else {} + + return self._trace_method(self.__wrapped__.fetchmany, span_name, self._self_last_execute_operation, extra_tags, + *args, **kwargs) + + +class TracedConnection(wrapt.ObjectProxy): + """ TracedConnection wraps a Connection with tracing code. """ + + def __init__(self, conn, pin=None, cursor_cls=None): + # Set default cursor class if one was not provided + if not cursor_cls: + # Do not trace `fetch*` methods by default + cursor_cls = TracedCursor + if config.dbapi2.trace_fetch_methods: + cursor_cls = FetchTracedCursor + + super(TracedConnection, self).__init__(conn) + name = _get_vendor(conn) + self._self_datadog_name = '{}.connection'.format(name) + db_pin = pin or Pin(service=name, app=name) + db_pin.onto(self) + # wrapt requires prefix of `_self` for attributes that are only in the + # proxy (since some of our source objects will use `__slots__`) + self._self_cursor_cls = cursor_cls + + def _trace_method(self, method, name, extra_tags, *args, **kwargs): + pin = Pin.get_from(self) + if not pin or not pin.enabled(): + return method(*args, **kwargs) + service = pin.service + + with pin.tracer.trace(name, service=service) as s: + s.set_tags(pin.tags) + s.set_tags(extra_tags) + + return method(*args, **kwargs) + + def cursor(self, *args, **kwargs): + cursor = self.__wrapped__.cursor(*args, **kwargs) + pin = Pin.get_from(self) + if not pin: + return cursor + return self._self_cursor_cls(cursor, pin) + + def commit(self, *args, **kwargs): + span_name = '{}.{}'.format(self._self_datadog_name, 'commit') + return self._trace_method(self.__wrapped__.commit, span_name, {}, *args, **kwargs) + + def rollback(self, *args, **kwargs): + span_name = '{}.{}'.format(self._self_datadog_name, 'rollback') + return self._trace_method(self.__wrapped__.rollback, span_name, {}, *args, **kwargs) + + +def _get_vendor(conn): + """ Return the vendor (e.g postgres, mysql) of the given + database. + """ + try: + name = _get_module_name(conn) + except Exception: + log.debug('couldnt parse module name', exc_info=True) + name = 'sql' + return sql.normalize_vendor(name) + + +def _get_module_name(conn): + return conn.__class__.__module__.split('.')[0] diff --git a/ddtrace/contrib/django/__init__.py b/ddtrace/contrib/django/__init__.py new file mode 100644 index 0000000000..f3405dd4f2 --- /dev/null +++ b/ddtrace/contrib/django/__init__.py @@ -0,0 +1,101 @@ +""" +The Django integration will trace users requests, template renderers, database and cache +calls. + +**Note:** by default the tracer is **disabled** (will not send spans) when +the Django setting ``DEBUG`` is ``True``. This can be overridden by explicitly enabling +the tracer with ``DATADOG_TRACE['ENABLED'] = True``, as described below. + +To enable the Django integration, add the application to your installed +apps, as follows:: + + INSTALLED_APPS = [ + # your Django apps... + + # the order is not important + 'ddtrace.contrib.django', + ] + +The configuration for this integration is namespaced under the ``DATADOG_TRACE`` +Django setting. For example, your ``settings.py`` may contain:: + + DATADOG_TRACE = { + 'DEFAULT_SERVICE': 'my-django-app', + 'TAGS': {'env': 'production'}, + } + +If you need to access to Datadog settings, you can:: + + from ddtrace.contrib.django.conf import settings + + tracer = settings.TRACER + tracer.trace("something") + # your code ... + +To have Django capture the tracer logs, ensure the ``LOGGING`` variable in +``settings.py`` looks similar to:: + + LOGGING = { + 'loggers': { + 'ddtrace': { + 'handlers': ['console'], + 'level': 'WARNING', + }, + }, + } + + +The available settings are: + +* ``DEFAULT_SERVICE`` (default: ``'django'``): set the service name used by the + tracer. Usually this configuration must be updated with a meaningful name. +* ``DEFAULT_DATABASE_PREFIX`` (default: ``''``): set a prefix value to database services, + so that your service is listed such as `prefix-defaultdb`. +* ``DEFAULT_CACHE_SERVICE`` (default: ``''``): set the django cache service name used + by the tracer. Change this name if you want to see django cache spans as a cache application. +* ``TAGS`` (default: ``{}``): set global tags that should be applied to all + spans. +* ``TRACER`` (default: ``ddtrace.tracer``): set the default tracer + instance that is used to trace Django internals. By default the ``ddtrace`` + tracer is used. +* ``ENABLED`` (default: ``not django_settings.DEBUG``): defines if the tracer is + enabled or not. If set to false, the code is still instrumented but no spans + are sent to the trace agent. This setting cannot be changed at runtime + and a restart is required. By default the tracer is disabled when in ``DEBUG`` + mode, enabled otherwise. +* ``DISTRIBUTED_TRACING`` (default: ``True``): defines if the tracer should + use incoming X-DATADOG-* HTTP headers to extend a trace created remotely. It is + required for distributed tracing if this application is called remotely from another + instrumented application. + We suggest to enable it only for internal services where headers are under your control. +* ``ANALYTICS_ENABLED`` (default: ``None``): enables APM events in Trace Search & Analytics. +* ``AGENT_HOSTNAME`` (default: ``localhost``): define the hostname of the trace agent. +* ``AGENT_PORT`` (default: ``8126``): define the port of the trace agent. +* ``AUTO_INSTRUMENT`` (default: ``True``): if set to false the code will not be + instrumented (even if ``INSTRUMENT_DATABASE``, ``INSTRUMENT_CACHE`` or + ``INSTRUMENT_TEMPLATE`` are set to ``True``), while the tracer may be active + for your internal usage. This could be useful if you want to use the Django + integration, but you want to trace only particular functions or views. If set + to False, the request middleware will be disabled even if present. +* ``INSTRUMENT_DATABASE`` (default: ``True``): if set to ``False`` database will not + be instrumented. Only configurable when ``AUTO_INSTRUMENT`` is set to ``True``. +* ``INSTRUMENT_CACHE`` (default: ``True``): if set to ``False`` cache will not + be instrumented. Only configurable when ``AUTO_INSTRUMENT`` is set to ``True``. +* ``INSTRUMENT_TEMPLATE`` (default: ``True``): if set to ``False`` template + rendering will not be instrumented. Only configurable when ``AUTO_INSTRUMENT`` + is set to ``True``. +""" +from ...utils.importlib import require_modules + + +required_modules = ['django'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .middleware import TraceMiddleware, TraceExceptionMiddleware + from .patch import patch + __all__ = ['TraceMiddleware', 'TraceExceptionMiddleware', 'patch'] + + +# define the Django app configuration +default_app_config = 'ddtrace.contrib.django.apps.TracerConfig' diff --git a/ddtrace/contrib/django/apps.py b/ddtrace/contrib/django/apps.py new file mode 100644 index 0000000000..7cec2eeded --- /dev/null +++ b/ddtrace/contrib/django/apps.py @@ -0,0 +1,19 @@ +# 3rd party +from django.apps import AppConfig, apps + +# project +from .patch import apply_django_patches + + +class TracerConfig(AppConfig): + name = 'ddtrace.contrib.django' + label = 'datadog_django' + + def ready(self): + """ + Ready is called as soon as the registry is fully populated. + Tracing capabilities must be enabled in this function so that + all Django internals are properly configured. + """ + rest_framework_is_installed = apps.is_installed('rest_framework') + apply_django_patches(patch_rest_framework=rest_framework_is_installed) diff --git a/ddtrace/contrib/django/cache.py b/ddtrace/contrib/django/cache.py new file mode 100644 index 0000000000..3113a58b31 --- /dev/null +++ b/ddtrace/contrib/django/cache.py @@ -0,0 +1,111 @@ +from functools import wraps + +from django.conf import settings as django_settings + +from ...ext import SpanTypes +from ...internal.logger import get_logger +from .conf import settings, import_from_string +from .utils import quantize_key_values, _resource_from_cache_prefix + + +log = get_logger(__name__) + +# code instrumentation +DATADOG_NAMESPACE = '__datadog_original_{method}' +TRACED_METHODS = [ + 'get', + 'set', + 'add', + 'delete', + 'incr', + 'decr', + 'get_many', + 'set_many', + 'delete_many', +] + +# standard tags +CACHE_BACKEND = 'django.cache.backend' +CACHE_COMMAND_KEY = 'django.cache.key' + + +def patch_cache(tracer): + """ + Function that patches the inner cache system. Because the cache backend + can have different implementations and connectors, this function must + handle all possible interactions with the Django cache. What follows + is currently traced: + + * in-memory cache + * the cache client wrapper that could use any of the common + Django supported cache servers (Redis, Memcached, Database, Custom) + """ + # discover used cache backends + cache_backends = set([cache['BACKEND'] for cache in django_settings.CACHES.values()]) + + def _trace_operation(fn, method_name): + """ + Return a wrapped function that traces a cache operation + """ + cache_service_name = settings.DEFAULT_CACHE_SERVICE \ + if settings.DEFAULT_CACHE_SERVICE else settings.DEFAULT_SERVICE + + @wraps(fn) + def wrapped(self, *args, **kwargs): + # get the original function method + method = getattr(self, DATADOG_NAMESPACE.format(method=method_name)) + with tracer.trace('django.cache', span_type=SpanTypes.CACHE, service=cache_service_name) as span: + # update the resource name and tag the cache backend + span.resource = _resource_from_cache_prefix(method_name, self) + cache_backend = '{}.{}'.format(self.__module__, self.__class__.__name__) + span.set_tag(CACHE_BACKEND, cache_backend) + + if args: + keys = quantize_key_values(args[0]) + span.set_tag(CACHE_COMMAND_KEY, keys) + + return method(*args, **kwargs) + return wrapped + + def _wrap_method(cls, method_name): + """ + For the given class, wraps the method name with a traced operation + so that the original method is executed, while the span is properly + created + """ + # check if the backend owns the given bounded method + if not hasattr(cls, method_name): + return + + # prevent patching each backend's method more than once + if hasattr(cls, DATADOG_NAMESPACE.format(method=method_name)): + log.debug('%s already traced', method_name) + else: + method = getattr(cls, method_name) + setattr(cls, DATADOG_NAMESPACE.format(method=method_name), method) + setattr(cls, method_name, _trace_operation(method, method_name)) + + # trace all backends + for cache_module in cache_backends: + cache = import_from_string(cache_module, cache_module) + + for method in TRACED_METHODS: + _wrap_method(cache, method) + + +def unpatch_method(cls, method_name): + method = getattr(cls, DATADOG_NAMESPACE.format(method=method_name), None) + if method is None: + log.debug('nothing to do, the class is not patched') + return + setattr(cls, method_name, method) + delattr(cls, DATADOG_NAMESPACE.format(method=method_name)) + + +def unpatch_cache(): + cache_backends = set([cache['BACKEND'] for cache in django_settings.CACHES.values()]) + for cache_module in cache_backends: + cache = import_from_string(cache_module, cache_module) + + for method in TRACED_METHODS: + unpatch_method(cache, method) diff --git a/ddtrace/contrib/django/compat.py b/ddtrace/contrib/django/compat.py new file mode 100644 index 0000000000..f686b7f117 --- /dev/null +++ b/ddtrace/contrib/django/compat.py @@ -0,0 +1,27 @@ +import django + + +if django.VERSION >= (1, 10, 1): + from django.urls import get_resolver + + def user_is_authenticated(user): + # Explicit comparison due to the following bug + # https://code.djangoproject.com/ticket/26988 + return user.is_authenticated == True # noqa E712 +else: + from django.conf import settings + from django.core import urlresolvers + + def user_is_authenticated(user): + return user.is_authenticated() + + if django.VERSION >= (1, 9, 0): + def get_resolver(urlconf=None): + urlconf = urlconf or settings.ROOT_URLCONF + urlresolvers.set_urlconf(urlconf) + return urlresolvers.get_resolver(urlconf) + else: + def get_resolver(urlconf=None): + urlconf = urlconf or settings.ROOT_URLCONF + urlresolvers.set_urlconf(urlconf) + return urlresolvers.RegexURLResolver(r'^/', urlconf) diff --git a/ddtrace/contrib/django/conf.py b/ddtrace/contrib/django/conf.py new file mode 100644 index 0000000000..31dda24534 --- /dev/null +++ b/ddtrace/contrib/django/conf.py @@ -0,0 +1,163 @@ +""" +Settings for Datadog tracer are all namespaced in the DATADOG_TRACE setting. +For example your project's `settings.py` file might look like this:: + + DATADOG_TRACE = { + 'TRACER': 'myapp.tracer', + } + +This module provides the `setting` object, that is used to access +Datadog settings, checking for user settings first, then falling +back to the defaults. +""" +from __future__ import unicode_literals + +import os +import importlib + +from django.conf import settings as django_settings + +from ...internal.logger import get_logger + + +log = get_logger(__name__) + +# List of available settings with their defaults +DEFAULTS = { + 'AGENT_HOSTNAME': 'localhost', + 'AGENT_PORT': 8126, + 'AUTO_INSTRUMENT': True, + 'INSTRUMENT_CACHE': True, + 'INSTRUMENT_DATABASE': True, + 'INSTRUMENT_TEMPLATE': True, + 'DEFAULT_DATABASE_PREFIX': '', + 'DEFAULT_SERVICE': 'django', + 'DEFAULT_CACHE_SERVICE': '', + 'ENABLED': True, + 'DISTRIBUTED_TRACING': True, + 'ANALYTICS_ENABLED': None, + 'ANALYTICS_SAMPLE_RATE': True, + 'TRACE_QUERY_STRING': None, + 'TAGS': {}, + 'TRACER': 'ddtrace.tracer', +} + +# List of settings that may be in string import notation. +IMPORT_STRINGS = ( + 'TRACER', +) + +# List of settings that have been removed +REMOVED_SETTINGS = () + + +def import_from_string(val, setting_name): + """ + Attempt to import a class from a string representation. + """ + try: + # Nod to tastypie's use of importlib. + parts = val.split('.') + module_path, class_name = '.'.join(parts[:-1]), parts[-1] + module = importlib.import_module(module_path) + return getattr(module, class_name) + except (ImportError, AttributeError) as e: + msg = 'Could not import "{}" for setting "{}". {}: {}.'.format( + val, + setting_name, + e.__class__.__name__, + e, + ) + + raise ImportError(msg) + + +class DatadogSettings(object): + """ + A settings object, that allows Datadog settings to be accessed as properties. + For example: + + from ddtrace.contrib.django.conf import settings + + tracer = settings.TRACER + + Any setting with string import paths will be automatically resolved + and return the class, rather than the string literal. + """ + def __init__(self, user_settings=None, defaults=None, import_strings=None): + if user_settings: + self._user_settings = self.__check_user_settings(user_settings) + + self.defaults = defaults or DEFAULTS + if os.environ.get('DATADOG_ENV'): + self.defaults['TAGS'].update({'env': os.environ.get('DATADOG_ENV')}) + if os.environ.get('DATADOG_SERVICE_NAME'): + self.defaults['DEFAULT_SERVICE'] = os.environ.get('DATADOG_SERVICE_NAME') + + host = os.environ.get('DD_AGENT_HOST', os.environ.get('DATADOG_TRACE_AGENT_HOSTNAME')) + if host: + self.defaults['AGENT_HOSTNAME'] = host + + port = os.environ.get('DD_TRACE_AGENT_PORT', os.environ.get('DATADOG_TRACE_AGENT_PORT')) + if port: + # if the agent port is a string, the underlying library that creates the socket + # stops working + try: + port = int(port) + except ValueError: + log.warning('DD_TRACE_AGENT_PORT is not an integer value; default to 8126') + else: + self.defaults['AGENT_PORT'] = port + + self.import_strings = import_strings or IMPORT_STRINGS + + @property + def user_settings(self): + if not hasattr(self, '_user_settings'): + self._user_settings = getattr(django_settings, 'DATADOG_TRACE', {}) + + # TODO[manu]: prevents docs import errors; provide a better implementation + if 'ENABLED' not in self._user_settings: + self._user_settings['ENABLED'] = not django_settings.DEBUG + return self._user_settings + + def __getattr__(self, attr): + if attr not in self.defaults: + raise AttributeError('Invalid setting: "{}"'.format(attr)) + + try: + # Check if present in user settings + val = self.user_settings[attr] + except KeyError: + # Otherwise, fall back to defaults + val = self.defaults[attr] + + # Coerce import strings into classes + if attr in self.import_strings: + val = import_from_string(val, attr) + + # Cache the result + setattr(self, attr, val) + return val + + def __check_user_settings(self, user_settings): + SETTINGS_DOC = 'http://pypi.datadoghq.com/trace/docs/#module-ddtrace.contrib.django' + for setting in REMOVED_SETTINGS: + if setting in user_settings: + raise RuntimeError( + 'The "{}" setting has been removed, check "{}".'.format(setting, SETTINGS_DOC) + ) + return user_settings + + +settings = DatadogSettings(None, DEFAULTS, IMPORT_STRINGS) + + +def reload_settings(*args, **kwargs): + """ + Triggers a reload when Django emits the reloading signal + """ + global settings + setting, value = kwargs['setting'], kwargs['value'] + if setting == 'DATADOG_TRACE': + settings = DatadogSettings(value, DEFAULTS, IMPORT_STRINGS) diff --git a/ddtrace/contrib/django/db.py b/ddtrace/contrib/django/db.py new file mode 100644 index 0000000000..f5b6c804e1 --- /dev/null +++ b/ddtrace/contrib/django/db.py @@ -0,0 +1,76 @@ +from django.db import connections + +# project +from ...ext import sql as sqlx +from ...internal.logger import get_logger +from ...pin import Pin + +from .conf import settings +from ..dbapi import TracedCursor as DbApiTracedCursor + +log = get_logger(__name__) + +CURSOR_ATTR = '_datadog_original_cursor' +ALL_CONNS_ATTR = '_datadog_original_connections_all' + + +def patch_db(tracer): + if hasattr(connections, ALL_CONNS_ATTR): + log.debug('db already patched') + return + setattr(connections, ALL_CONNS_ATTR, connections.all) + + def all_connections(self): + conns = getattr(self, ALL_CONNS_ATTR)() + for conn in conns: + patch_conn(tracer, conn) + return conns + + connections.all = all_connections.__get__(connections, type(connections)) + + +def unpatch_db(): + for c in connections.all(): + unpatch_conn(c) + + all_connections = getattr(connections, ALL_CONNS_ATTR, None) + if all_connections is None: + log.debug('nothing to do, the db is not patched') + return + connections.all = all_connections + delattr(connections, ALL_CONNS_ATTR) + + +def patch_conn(tracer, conn): + if hasattr(conn, CURSOR_ATTR): + return + + setattr(conn, CURSOR_ATTR, conn.cursor) + + def cursor(): + database_prefix = ( + '{}-'.format(settings.DEFAULT_DATABASE_PREFIX) + if settings.DEFAULT_DATABASE_PREFIX else '' + ) + alias = getattr(conn, 'alias', 'default') + service = '{}{}{}'.format(database_prefix, alias, 'db') + vendor = getattr(conn, 'vendor', 'db') + prefix = sqlx.normalize_vendor(vendor) + tags = { + 'django.db.vendor': vendor, + 'django.db.alias': alias, + } + + pin = Pin(service, tags=tags, tracer=tracer, app=prefix) + return DbApiTracedCursor(conn._datadog_original_cursor(), pin) + + conn.cursor = cursor + + +def unpatch_conn(conn): + cursor = getattr(conn, CURSOR_ATTR, None) + if cursor is None: + log.debug('nothing to do, the connection is not patched') + return + conn.cursor = cursor + delattr(conn, CURSOR_ATTR) diff --git a/ddtrace/contrib/django/middleware.py b/ddtrace/contrib/django/middleware.py new file mode 100644 index 0000000000..8a0539a75b --- /dev/null +++ b/ddtrace/contrib/django/middleware.py @@ -0,0 +1,230 @@ +# project +from .conf import settings +from .compat import user_is_authenticated, get_resolver +from .utils import get_request_uri + +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...contrib import func_name +from ...ext import SpanTypes, http +from ...internal.logger import get_logger +from ...propagation.http import HTTPPropagator +from ...settings import config + +# 3p +from django.core.exceptions import MiddlewareNotUsed +from django.conf import settings as django_settings +import django + +try: + from django.utils.deprecation import MiddlewareMixin + + MiddlewareClass = MiddlewareMixin +except ImportError: + MiddlewareClass = object + +log = get_logger(__name__) + +EXCEPTION_MIDDLEWARE = "ddtrace.contrib.django.TraceExceptionMiddleware" +TRACE_MIDDLEWARE = "ddtrace.contrib.django.TraceMiddleware" +MIDDLEWARE = "MIDDLEWARE" +MIDDLEWARE_CLASSES = "MIDDLEWARE_CLASSES" + +# Default views list available from: +# https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/views/defaults.py +# DEV: Django doesn't call `process_view` when falling back to one of these internal error handling views +# DEV: We only use these names when `span.resource == 'unknown'` and we have one of these status codes +_django_default_views = { + 400: "django.views.defaults.bad_request", + 403: "django.views.defaults.permission_denied", + 404: "django.views.defaults.page_not_found", + 500: "django.views.defaults.server_error", +} + + +def _analytics_enabled(): + return ( + (config.analytics_enabled and settings.ANALYTICS_ENABLED is not False) or settings.ANALYTICS_ENABLED is True + ) and settings.ANALYTICS_SAMPLE_RATE is not None + + +def get_middleware_insertion_point(): + """Returns the attribute name and collection object for the Django middleware. + + If middleware cannot be found, returns None for the middleware collection. + """ + middleware = getattr(django_settings, MIDDLEWARE, None) + # Prioritise MIDDLEWARE over ..._CLASSES, but only in 1.10 and later. + if middleware is not None and django.VERSION >= (1, 10): + return MIDDLEWARE, middleware + return MIDDLEWARE_CLASSES, getattr(django_settings, MIDDLEWARE_CLASSES, None) + + +def insert_trace_middleware(): + middleware_attribute, middleware = get_middleware_insertion_point() + if middleware is not None and TRACE_MIDDLEWARE not in set(middleware): + setattr(django_settings, middleware_attribute, type(middleware)((TRACE_MIDDLEWARE,)) + middleware) + + +def remove_trace_middleware(): + _, middleware = get_middleware_insertion_point() + if middleware and TRACE_MIDDLEWARE in set(middleware): + middleware.remove(TRACE_MIDDLEWARE) + + +def insert_exception_middleware(): + middleware_attribute, middleware = get_middleware_insertion_point() + if middleware is not None and EXCEPTION_MIDDLEWARE not in set(middleware): + setattr(django_settings, middleware_attribute, middleware + type(middleware)((EXCEPTION_MIDDLEWARE,))) + + +def remove_exception_middleware(): + _, middleware = get_middleware_insertion_point() + if middleware and EXCEPTION_MIDDLEWARE in set(middleware): + middleware.remove(EXCEPTION_MIDDLEWARE) + + +class InstrumentationMixin(MiddlewareClass): + """ + Useful mixin base class for tracing middlewares + """ + + def __init__(self, get_response=None): + # disable the middleware if the tracer is not enabled + # or if the auto instrumentation is disabled + self.get_response = get_response + if not settings.AUTO_INSTRUMENT: + raise MiddlewareNotUsed + + +class TraceExceptionMiddleware(InstrumentationMixin): + """ + Middleware that traces exceptions raised + """ + + def process_exception(self, request, exception): + try: + span = _get_req_span(request) + if span: + span.set_tag(http.STATUS_CODE, "500") + span.set_traceback() # will set the exception info + except Exception: + log.debug("error processing exception", exc_info=True) + + +class TraceMiddleware(InstrumentationMixin): + """ + Middleware that traces Django requests + """ + + def process_request(self, request): + tracer = settings.TRACER + if settings.DISTRIBUTED_TRACING: + propagator = HTTPPropagator() + context = propagator.extract(request.META) + # Only need to active the new context if something was propagated + if context.trace_id: + tracer.context_provider.activate(context) + try: + span = tracer.trace( + "django.request", + service=settings.DEFAULT_SERVICE, + resource="unknown", # will be filled by process view + span_type=SpanTypes.WEB, + ) + + # set analytics sample rate + # DEV: django is special case maintains separate configuration from config api + if _analytics_enabled() and settings.ANALYTICS_SAMPLE_RATE is not None: + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, settings.ANALYTICS_SAMPLE_RATE, + ) + + # Set HTTP Request tags + span.set_tag(http.METHOD, request.method) + span.set_tag(http.URL, get_request_uri(request)) + trace_query_string = settings.TRACE_QUERY_STRING + if trace_query_string is None: + trace_query_string = config.django.trace_query_string + if trace_query_string: + span.set_tag(http.QUERY_STRING, request.META["QUERY_STRING"]) + _set_req_span(request, span) + except Exception: + log.debug("error tracing request", exc_info=True) + + def process_view(self, request, view_func, *args, **kwargs): + span = _get_req_span(request) + if span: + span.resource = func_name(view_func) + + def process_response(self, request, response): + try: + span = _get_req_span(request) + if span: + if response.status_code < 500 and span.error: + # remove any existing stack trace since it must have been + # handled appropriately + span._remove_exc_info() + + # If `process_view` was not called, try to determine the correct `span.resource` to set + # DEV: `process_view` won't get called if a middle `process_request` returns an HttpResponse + # DEV: `process_view` won't get called when internal error handlers are used (e.g. for 404 responses) + if span.resource == "unknown": + try: + # Attempt to lookup the view function from the url resolver + # https://github.com/django/django/blob/38e2fdadfd9952e751deed662edf4c496d238f28/django/core/handlers/base.py#L104-L113 # noqa + urlconf = None + if hasattr(request, "urlconf"): + urlconf = request.urlconf + resolver = get_resolver(urlconf) + + # Try to resolve the Django view for handling this request + if getattr(request, "request_match", None): + request_match = request.request_match + else: + # This may raise a `django.urls.exceptions.Resolver404` exception + request_match = resolver.resolve(request.path_info) + span.resource = func_name(request_match.func) + except Exception: + log.debug("error determining request view function", exc_info=True) + + # If the view could not be found, try to set from a static list of + # known internal error handler views + span.resource = _django_default_views.get(response.status_code, "unknown") + + span.set_tag(http.STATUS_CODE, response.status_code) + span = _set_auth_tags(span, request) + span.finish() + except Exception: + log.debug("error tracing request", exc_info=True) + finally: + return response + + +def _get_req_span(request): + """ Return the datadog span from the given request. """ + return getattr(request, "_datadog_request_span", None) + + +def _set_req_span(request, span): + """ Set the datadog span on the given request. """ + return setattr(request, "_datadog_request_span", span) + + +def _set_auth_tags(span, request): + """ Patch any available auth tags from the request onto the span. """ + user = getattr(request, "user", None) + if not user: + return span + + if hasattr(user, "is_authenticated"): + span.set_tag("django.user.is_authenticated", user_is_authenticated(user)) + + uid = getattr(user, "pk", None) + if uid: + span.set_tag("django.user.id", uid) + + uname = getattr(user, "username", None) + if uname: + span.set_tag("django.user.name", uname) + + return span diff --git a/ddtrace/contrib/django/patch.py b/ddtrace/contrib/django/patch.py new file mode 100644 index 0000000000..9dd6a54d32 --- /dev/null +++ b/ddtrace/contrib/django/patch.py @@ -0,0 +1,94 @@ +# 3rd party +from ddtrace.vendor import wrapt +import django +from django.db import connections + +# project +from .db import patch_db +from .conf import settings +from .cache import patch_cache +from .templates import patch_template +from .middleware import insert_exception_middleware, insert_trace_middleware + +from ...internal.logger import get_logger + +log = get_logger(__name__) + + +def patch(): + """Patch the instrumented methods + """ + if getattr(django, '_datadog_patch', False): + return + setattr(django, '_datadog_patch', True) + + _w = wrapt.wrap_function_wrapper + _w('django', 'setup', traced_setup) + + +def traced_setup(wrapped, instance, args, kwargs): + from django.conf import settings + + if 'ddtrace.contrib.django' not in settings.INSTALLED_APPS: + if isinstance(settings.INSTALLED_APPS, tuple): + # INSTALLED_APPS is a tuple < 1.9 + settings.INSTALLED_APPS = settings.INSTALLED_APPS + ('ddtrace.contrib.django', ) + else: + settings.INSTALLED_APPS.append('ddtrace.contrib.django') + + wrapped(*args, **kwargs) + + +def apply_django_patches(patch_rest_framework): + """ + Ready is called as soon as the registry is fully populated. + In order for all Django internals are properly configured, this + must be called after the app is finished starting + """ + tracer = settings.TRACER + + if settings.TAGS: + tracer.set_tags(settings.TAGS) + + # configure the tracer instance + # TODO[manu]: we may use configure() but because it creates a new + # AgentWriter, it breaks all tests. The configure() behavior must + # be changed to use it in this integration + tracer.enabled = settings.ENABLED + tracer.writer.api.hostname = settings.AGENT_HOSTNAME + tracer.writer.api.port = settings.AGENT_PORT + + if settings.AUTO_INSTRUMENT: + # trace Django internals + insert_trace_middleware() + insert_exception_middleware() + + if settings.INSTRUMENT_TEMPLATE: + try: + patch_template(tracer) + except Exception: + log.exception('error patching Django template rendering') + + if settings.INSTRUMENT_DATABASE: + try: + patch_db(tracer) + # This is the trigger to patch individual connections. + # By patching these here, all processes including + # management commands are also traced. + connections.all() + except Exception: + log.exception('error patching Django database connections') + + if settings.INSTRUMENT_CACHE: + try: + patch_cache(tracer) + except Exception: + log.exception('error patching Django cache') + + # Instrument rest_framework app to trace custom exception handling. + if patch_rest_framework: + try: + from .restframework import patch_restframework + patch_restframework(tracer) + except Exception: + log.exception('error patching rest_framework app') diff --git a/ddtrace/contrib/django/restframework.py b/ddtrace/contrib/django/restframework.py new file mode 100644 index 0000000000..1970111e0d --- /dev/null +++ b/ddtrace/contrib/django/restframework.py @@ -0,0 +1,42 @@ +from ddtrace.vendor.wrapt import wrap_function_wrapper as wrap + +from rest_framework.views import APIView + +from ...utils.wrappers import unwrap + + +def patch_restframework(tracer): + """ Patches rest_framework app. + + To trace exceptions occuring during view processing we currently use a TraceExceptionMiddleware. + However the rest_framework handles exceptions before they come to our middleware. + So we need to manually patch the rest_framework exception handler + to set the exception stack trace in the current span. + + """ + + def _traced_handle_exception(wrapped, instance, args, kwargs): + """ Sets the error message, error type and exception stack trace to the current span + before calling the original exception handler. + """ + span = tracer.current_span() + if span is not None: + span.set_traceback() + + return wrapped(*args, **kwargs) + + # do not patch if already patched + if getattr(APIView, '_datadog_patch', False): + return + else: + setattr(APIView, '_datadog_patch', True) + + # trace the handle_exception method + wrap('rest_framework.views', 'APIView.handle_exception', _traced_handle_exception) + + +def unpatch_restframework(): + """ Unpatches rest_framework app.""" + if getattr(APIView, '_datadog_patch', False): + setattr(APIView, '_datadog_patch', False) + unwrap(APIView, 'handle_exception') diff --git a/ddtrace/contrib/django/templates.py b/ddtrace/contrib/django/templates.py new file mode 100644 index 0000000000..27752b362f --- /dev/null +++ b/ddtrace/contrib/django/templates.py @@ -0,0 +1,48 @@ +""" +code to measure django template rendering. +""" +# project +from ...ext import SpanTypes +from ...internal.logger import get_logger + +# 3p +from django.template import Template + +log = get_logger(__name__) + +RENDER_ATTR = '_datadog_original_render' + + +def patch_template(tracer): + """ will patch django's template rendering function to include timing + and trace information. + """ + + # FIXME[matt] we're patching the template class here. ideally we'd only + # patch so we can use multiple tracers at once, but i suspect this is fine + # in practice. + if getattr(Template, RENDER_ATTR, None): + log.debug('already patched') + return + + setattr(Template, RENDER_ATTR, Template.render) + + def traced_render(self, context): + with tracer.trace('django.template', span_type=SpanTypes.TEMPLATE) as span: + try: + return Template._datadog_original_render(self, context) + finally: + template_name = self.name or getattr(context, 'template_name', None) or 'unknown' + span.resource = template_name + span.set_tag('django.template_name', template_name) + + Template.render = traced_render + + +def unpatch_template(): + render = getattr(Template, RENDER_ATTR, None) + if render is None: + log.debug('nothing to do Template is already patched') + return + Template.render = render + delattr(Template, RENDER_ATTR) diff --git a/ddtrace/contrib/django/utils.py b/ddtrace/contrib/django/utils.py new file mode 100644 index 0000000000..3b4a1acee1 --- /dev/null +++ b/ddtrace/contrib/django/utils.py @@ -0,0 +1,75 @@ +from ...compat import parse +from ...internal.logger import get_logger + +log = get_logger(__name__) + + +def _resource_from_cache_prefix(resource, cache): + """ + Combine the resource name with the cache prefix (if any) + """ + if getattr(cache, 'key_prefix', None): + name = '{} {}'.format(resource, cache.key_prefix) + else: + name = resource + + # enforce lowercase to make the output nicer to read + return name.lower() + + +def quantize_key_values(key): + """ + Used in the Django trace operation method, it ensures that if a dict + with values is used, we removes the values from the span meta + attributes. For example:: + + >>> quantize_key_values({'key', 'value'}) + # returns ['key'] + """ + if isinstance(key, dict): + return key.keys() + + return key + + +def get_request_uri(request): + """ + Helper to rebuild the original request url + + query string or fragments are not included. + """ + # DEV: We do this instead of `request.build_absolute_uri()` since + # an exception can get raised, we want to always build a url + # regardless of any exceptions raised from `request.get_host()` + host = None + try: + host = request.get_host() # this will include host:port + except Exception: + log.debug('Failed to get Django request host', exc_info=True) + + if not host: + try: + # Try to build host how Django would have + # https://github.com/django/django/blob/e8d0d2a5efc8012dcc8bf1809dec065ebde64c81/django/http/request.py#L85-L102 + if 'HTTP_HOST' in request.META: + host = request.META['HTTP_HOST'] + else: + host = request.META['SERVER_NAME'] + port = str(request.META['SERVER_PORT']) + if port != ('443' if request.is_secure() else '80'): + host = '{0}:{1}'.format(host, port) + except Exception: + # This really shouldn't ever happen, but lets guard here just in case + log.debug('Failed to build Django request host', exc_info=True) + host = 'unknown' + + # Build request url from the information available + # DEV: We are explicitly omitting query strings since they may contain sensitive information + return parse.urlunparse(parse.ParseResult( + scheme=request.scheme, + netloc=host, + path=request.path, + params='', + query='', + fragment='', + )) diff --git a/ddtrace/contrib/dogpile_cache/__init__.py b/ddtrace/contrib/dogpile_cache/__init__.py new file mode 100644 index 0000000000..39eac76c67 --- /dev/null +++ b/ddtrace/contrib/dogpile_cache/__init__.py @@ -0,0 +1,48 @@ +""" +Instrument dogpile.cache__ to report all cached lookups. + +This will add spans around the calls to your cache backend (eg. redis, memory, +etc). The spans will also include the following tags: + +- key/keys: The key(s) dogpile passed to your backend. Note that this will be + the output of the region's ``function_key_generator``, but before any key + mangling is applied (ie. the region's ``key_mangler``). +- region: Name of the region. +- backend: Name of the backend class. +- hit: If the key was found in the cache. +- expired: If the key is expired. This is only relevant if the key was found. + +While cache tracing will generally already have keys in tags, some caching +setups will not have useful tag values - such as when you're using consistent +hashing with memcached - the key(s) will appear as a mangled hash. +:: + + # Patch before importing dogpile.cache + from ddtrace import patch + patch(dogpile_cache=True) + + from dogpile.cache import make_region + + region = make_region().configure( + "dogpile.cache.pylibmc", + expiration_time=3600, + arguments={"url": ["127.0.0.1"]}, + ) + + @region.cache_on_arguments() + def hello(name): + # Some complicated, slow calculation + return "Hello, {}".format(name) + +.. __: https://dogpilecache.sqlalchemy.org/ +""" +from ...utils.importlib import require_modules + + +required_modules = ['dogpile.cache'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/dogpile_cache/lock.py b/ddtrace/contrib/dogpile_cache/lock.py new file mode 100644 index 0000000000..e73124655b --- /dev/null +++ b/ddtrace/contrib/dogpile_cache/lock.py @@ -0,0 +1,37 @@ +import dogpile + +from ...pin import Pin +from ...utils.formats import asbool + + +def _wrap_lock_ctor(func, instance, args, kwargs): + """ + This seems rather odd. But to track hits, we need to patch the wrapped function that + dogpile passes to the region and locks. Unfortunately it's a closure defined inside + the get_or_create* methods themselves, so we can't easily patch those. + """ + func(*args, **kwargs) + ori_backend_fetcher = instance.value_and_created_fn + + def wrapped_backend_fetcher(): + pin = Pin.get_from(dogpile.cache) + if not pin or not pin.enabled(): + return ori_backend_fetcher() + + hit = False + expired = True + try: + value, createdtime = ori_backend_fetcher() + hit = value is not dogpile.cache.api.NO_VALUE + # dogpile sometimes returns None, but only checks for truthiness. Coalesce + # to minimize APM users' confusion. + expired = instance._is_expired(createdtime) or False + return value, createdtime + finally: + # Keys are checked in random order so the 'final' answer for partial hits + # should really be false (ie. if any are 'negative', then the tag value + # should be). This means ANDing all hit values and ORing all expired values. + span = pin.tracer.current_span() + span.set_tag('hit', asbool(span.get_tag('hit') or 'True') and hit) + span.set_tag('expired', asbool(span.get_tag('expired') or 'False') or expired) + instance.value_and_created_fn = wrapped_backend_fetcher diff --git a/ddtrace/contrib/dogpile_cache/patch.py b/ddtrace/contrib/dogpile_cache/patch.py new file mode 100644 index 0000000000..6525de587b --- /dev/null +++ b/ddtrace/contrib/dogpile_cache/patch.py @@ -0,0 +1,37 @@ +import dogpile + +from ddtrace.pin import Pin, _DD_PIN_NAME, _DD_PIN_PROXY_NAME +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from .lock import _wrap_lock_ctor +from .region import _wrap_get_create, _wrap_get_create_multi + +_get_or_create = dogpile.cache.region.CacheRegion.get_or_create +_get_or_create_multi = dogpile.cache.region.CacheRegion.get_or_create_multi +_lock_ctor = dogpile.lock.Lock.__init__ + + +def patch(): + if getattr(dogpile.cache, '_datadog_patch', False): + return + setattr(dogpile.cache, '_datadog_patch', True) + + _w('dogpile.cache.region', 'CacheRegion.get_or_create', _wrap_get_create) + _w('dogpile.cache.region', 'CacheRegion.get_or_create_multi', _wrap_get_create_multi) + _w('dogpile.lock', 'Lock.__init__', _wrap_lock_ctor) + + Pin(app='dogpile.cache', service='dogpile.cache').onto(dogpile.cache) + + +def unpatch(): + if not getattr(dogpile.cache, '_datadog_patch', False): + return + setattr(dogpile.cache, '_datadog_patch', False) + # This looks silly but the unwrap util doesn't support class instance methods, even + # though wrapt does. This was causing the patches to stack on top of each other + # during testing. + dogpile.cache.region.CacheRegion.get_or_create = _get_or_create + dogpile.cache.region.CacheRegion.get_or_create_multi = _get_or_create_multi + dogpile.lock.Lock.__init__ = _lock_ctor + setattr(dogpile.cache, _DD_PIN_NAME, None) + setattr(dogpile.cache, _DD_PIN_PROXY_NAME, None) diff --git a/ddtrace/contrib/dogpile_cache/region.py b/ddtrace/contrib/dogpile_cache/region.py new file mode 100644 index 0000000000..61d1cdb618 --- /dev/null +++ b/ddtrace/contrib/dogpile_cache/region.py @@ -0,0 +1,29 @@ +import dogpile + +from ...pin import Pin + + +def _wrap_get_create(func, instance, args, kwargs): + pin = Pin.get_from(dogpile.cache) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + key = args[0] + with pin.tracer.trace('dogpile.cache', resource='get_or_create', span_type='cache') as span: + span.set_tag('key', key) + span.set_tag('region', instance.name) + span.set_tag('backend', instance.actual_backend.__class__.__name__) + return func(*args, **kwargs) + + +def _wrap_get_create_multi(func, instance, args, kwargs): + pin = Pin.get_from(dogpile.cache) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + keys = args[0] + with pin.tracer.trace('dogpile.cache', resource='get_or_create_multi', span_type='cache') as span: + span.set_tag('keys', keys) + span.set_tag('region', instance.name) + span.set_tag('backend', instance.actual_backend.__class__.__name__) + return func(*args, **kwargs) diff --git a/ddtrace/contrib/elasticsearch/__init__.py b/ddtrace/contrib/elasticsearch/__init__.py new file mode 100644 index 0000000000..6b66cb3632 --- /dev/null +++ b/ddtrace/contrib/elasticsearch/__init__.py @@ -0,0 +1,33 @@ +"""Instrument Elasticsearch to report Elasticsearch queries. + +``patch_all`` will automatically patch your Elasticsearch instance to make it work. +:: + + from ddtrace import Pin, patch + from elasticsearch import Elasticsearch + + # If not patched yet, you can patch elasticsearch specifically + patch(elasticsearch=True) + + # This will report spans with the default instrumentation + es = Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + # Example of instrumented query + es.indices.create(index='books', ignore=400) + + # Use a pin to specify metadata related to this client + es = Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + Pin.override(es.transport, service='elasticsearch-videos') + es.indices.create(index='videos', ignore=400) +""" +from ...utils.importlib import require_modules + +# DEV: We only require one of these modules to be available +required_modules = ['elasticsearch', 'elasticsearch1', 'elasticsearch2', 'elasticsearch5', 'elasticsearch6'] + +with require_modules(required_modules) as missing_modules: + # We were able to find at least one of the required modules + if set(missing_modules) != set(required_modules): + from .transport import get_traced_transport + from .patch import patch + + __all__ = ['get_traced_transport', 'patch'] diff --git a/ddtrace/contrib/elasticsearch/elasticsearch.py b/ddtrace/contrib/elasticsearch/elasticsearch.py new file mode 100644 index 0000000000..efd8e8bb5f --- /dev/null +++ b/ddtrace/contrib/elasticsearch/elasticsearch.py @@ -0,0 +1,14 @@ +from importlib import import_module + +module_names = ('elasticsearch', 'elasticsearch1', 'elasticsearch2', 'elasticsearch5', 'elasticsearch6') +for module_name in module_names: + try: + elasticsearch = import_module(module_name) + break + except ImportError: + pass +else: + raise ImportError('could not import any of {0!r}'.format(module_names)) + + +__all__ = ['elasticsearch'] diff --git a/ddtrace/contrib/elasticsearch/patch.py b/ddtrace/contrib/elasticsearch/patch.py new file mode 100644 index 0000000000..16b9d6e0e0 --- /dev/null +++ b/ddtrace/contrib/elasticsearch/patch.py @@ -0,0 +1,118 @@ +from importlib import import_module + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from .quantize import quantize + +from ...compat import urlencode +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, elasticsearch as metadata, http +from ...pin import Pin +from ...utils.wrappers import unwrap as _u +from ...settings import config + + +def _es_modules(): + module_names = ('elasticsearch', 'elasticsearch1', 'elasticsearch2', 'elasticsearch5', 'elasticsearch6') + for module_name in module_names: + try: + yield import_module(module_name) + except ImportError: + pass + + +# NB: We are patching the default elasticsearch.transport module +def patch(): + for elasticsearch in _es_modules(): + _patch(elasticsearch) + + +def _patch(elasticsearch): + if getattr(elasticsearch, '_datadog_patch', False): + return + setattr(elasticsearch, '_datadog_patch', True) + _w(elasticsearch.transport, 'Transport.perform_request', _get_perform_request(elasticsearch)) + Pin(service=metadata.SERVICE, app=metadata.APP).onto(elasticsearch.transport.Transport) + + +def unpatch(): + for elasticsearch in _es_modules(): + _unpatch(elasticsearch) + + +def _unpatch(elasticsearch): + if getattr(elasticsearch, '_datadog_patch', False): + setattr(elasticsearch, '_datadog_patch', False) + _u(elasticsearch.transport.Transport, 'perform_request') + + +def _get_perform_request(elasticsearch): + def _perform_request(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + with pin.tracer.trace('elasticsearch.query', span_type=SpanTypes.ELASTICSEARCH) as span: + # Don't instrument if the trace is not sampled + if not span.sampled: + return func(*args, **kwargs) + + method, url = args + params = kwargs.get('params') + body = kwargs.get('body') + + span.service = pin.service + span.set_tag(metadata.METHOD, method) + span.set_tag(metadata.URL, url) + span.set_tag(metadata.PARAMS, urlencode(params)) + if config.elasticsearch.trace_query_string: + span.set_tag(http.QUERY_STRING, urlencode(params)) + if method == 'GET': + span.set_tag(metadata.BODY, instance.serializer.dumps(body)) + status = None + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.elasticsearch.get_analytics_sample_rate() + ) + + span = quantize(span) + + try: + result = func(*args, **kwargs) + except elasticsearch.exceptions.TransportError as e: + span.set_tag(http.STATUS_CODE, getattr(e, 'status_code', 500)) + raise + + try: + # Optional metadata extraction with soft fail. + if isinstance(result, tuple) and len(result) == 2: + # elasticsearch<2.4; it returns both the status and the body + status, data = result + else: + # elasticsearch>=2.4; internal change for ``Transport.perform_request`` + # that just returns the body + data = result + + took = data.get('took') + if took: + span.set_metric(metadata.TOOK, int(took)) + except Exception: + pass + + if status: + span.set_tag(http.STATUS_CODE, status) + + return result + return _perform_request + + +# Backwards compatibility for anyone who decided to import `ddtrace.contrib.elasticsearch.patch._perform_request` +# DEV: `_perform_request` is a `wrapt.FunctionWrapper` +try: + # DEV: Import as `es` to not shadow loop variables above + import elasticsearch as es + _perform_request = _get_perform_request(es) +except ImportError: + pass diff --git a/ddtrace/contrib/elasticsearch/quantize.py b/ddtrace/contrib/elasticsearch/quantize.py new file mode 100644 index 0000000000..64b688ce39 --- /dev/null +++ b/ddtrace/contrib/elasticsearch/quantize.py @@ -0,0 +1,37 @@ +import re + +from ...ext import elasticsearch as metadata + +# Replace any ID +ID_REGEXP = re.compile(r'/([0-9]+)([/\?]|$)') +ID_PLACEHOLDER = r'/?\2' + +# Remove digits from potential timestamped indexes (should be an option). +# For now, let's say 2+ digits +INDEX_REGEXP = re.compile(r'[0-9]{2,}') +INDEX_PLACEHOLDER = r'?' + + +def quantize(span): + """Quantize an elasticsearch span + + We want to extract a meaningful `resource` from the request. + We do it based on the method + url, with some cleanup applied to the URL. + + The URL might a ID, but also it is common to have timestamped indexes. + While the first is easy to catch, the second should probably be configurable. + + All of this should probably be done in the Agent. Later. + """ + url = span.get_tag(metadata.URL) + method = span.get_tag(metadata.METHOD) + + quantized_url = ID_REGEXP.sub(ID_PLACEHOLDER, url) + quantized_url = INDEX_REGEXP.sub(INDEX_PLACEHOLDER, quantized_url) + + span.resource = '{method} {url}'.format( + method=method, + url=quantized_url + ) + + return span diff --git a/ddtrace/contrib/elasticsearch/transport.py b/ddtrace/contrib/elasticsearch/transport.py new file mode 100644 index 0000000000..347f710bd6 --- /dev/null +++ b/ddtrace/contrib/elasticsearch/transport.py @@ -0,0 +1,66 @@ +# DEV: This will import the first available module from: +# `elasticsearch`, `elasticsearch1`, `elasticsearch2`, `elasticsearch5`, 'elasticsearch6' +from .elasticsearch import elasticsearch + +from .quantize import quantize + +from ...utils.deprecation import deprecated +from ...compat import urlencode +from ...ext import SpanTypes, http, elasticsearch as metadata +from ...settings import config + +DEFAULT_SERVICE = 'elasticsearch' + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def get_traced_transport(datadog_tracer, datadog_service=DEFAULT_SERVICE): + + class TracedTransport(elasticsearch.Transport): + """ Extend elasticseach transport layer to allow Datadog + tracer to catch any performed request. + """ + + _datadog_tracer = datadog_tracer + _datadog_service = datadog_service + + def perform_request(self, method, url, params=None, body=None): + with self._datadog_tracer.trace('elasticsearch.query', span_type=SpanTypes.ELASTICSEARCH) as s: + # Don't instrument if the trace is not sampled + if not s.sampled: + return super(TracedTransport, self).perform_request( + method, url, params=params, body=body) + + s.service = self._datadog_service + s.set_tag(metadata.METHOD, method) + s.set_tag(metadata.URL, url) + s.set_tag(metadata.PARAMS, urlencode(params)) + if config.elasticsearch.trace_query_string: + s.set_tag(http.QUERY_STRING, urlencode(params)) + if method == 'GET': + s.set_tag(metadata.BODY, self.serializer.dumps(body)) + s = quantize(s) + + try: + result = super(TracedTransport, self).perform_request(method, url, params=params, body=body) + except elasticsearch.exceptions.TransportError as e: + s.set_tag(http.STATUS_CODE, e.status_code) + raise + + status = None + if isinstance(result, tuple) and len(result) == 2: + # elasticsearch<2.4; it returns both the status and the body + status, data = result + else: + # elasticsearch>=2.4; internal change for ``Transport.perform_request`` + # that just returns the body + data = result + + if status: + s.set_tag(http.STATUS_CODE, status) + + took = data.get('took') + if took: + s.set_metric(metadata.TOOK, int(took)) + + return result + return TracedTransport diff --git a/ddtrace/contrib/falcon/__init__.py b/ddtrace/contrib/falcon/__init__.py new file mode 100644 index 0000000000..ca69d3f0cb --- /dev/null +++ b/ddtrace/contrib/falcon/__init__.py @@ -0,0 +1,59 @@ +""" +To trace the falcon web framework, install the trace middleware:: + + import falcon + from ddtrace import tracer + from ddtrace.contrib.falcon import TraceMiddleware + + mw = TraceMiddleware(tracer, 'my-falcon-app') + falcon.API(middleware=[mw]) + +You can also use the autopatching functionality:: + + import falcon + from ddtrace import tracer, patch + + patch(falcon=True) + + app = falcon.API() + +To disable distributed tracing when using autopatching, set the +``DATADOG_FALCON_DISTRIBUTED_TRACING`` environment variable to ``False``. + +To enable generating APM events for Trace Search & Analytics, set the +``DD_FALCON_ANALYTICS_ENABLED`` environment variable to ``True``. + +**Supported span hooks** + +The following is a list of available tracer hooks that can be used to intercept +and modify spans created by this integration. + +- ``request`` + - Called before the response has been finished + - ``def on_falcon_request(span, request, response)`` + + +Example:: + + import falcon + from ddtrace import config, patch_all + patch_all() + + app = falcon.API() + + @config.falcon.hooks.on('request') + def on_falcon_request(span, request, response): + span.set_tag('my.custom', 'tag') + +:ref:`Headers tracing ` is supported for this integration. +""" +from ...utils.importlib import require_modules + +required_modules = ['falcon'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .middleware import TraceMiddleware + from .patch import patch + + __all__ = ['TraceMiddleware', 'patch'] diff --git a/ddtrace/contrib/falcon/middleware.py b/ddtrace/contrib/falcon/middleware.py new file mode 100644 index 0000000000..e30b070494 --- /dev/null +++ b/ddtrace/contrib/falcon/middleware.py @@ -0,0 +1,116 @@ +import sys + +from ddtrace.ext import SpanTypes, http as httpx +from ddtrace.http import store_request_headers, store_response_headers +from ddtrace.propagation.http import HTTPPropagator + +from ...compat import iteritems +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...settings import config + + +class TraceMiddleware(object): + + def __init__(self, tracer, service='falcon', distributed_tracing=True): + # store tracing references + self.tracer = tracer + self.service = service + self._distributed_tracing = distributed_tracing + + def process_request(self, req, resp): + if self._distributed_tracing: + # Falcon uppercases all header names. + headers = dict((k.lower(), v) for k, v in iteritems(req.headers)) + propagator = HTTPPropagator() + context = propagator.extract(headers) + # Only activate the new context if there was a trace id extracted + if context.trace_id: + self.tracer.context_provider.activate(context) + + span = self.tracer.trace( + 'falcon.request', + service=self.service, + span_type=SpanTypes.WEB, + ) + + # set analytics sample rate with global config enabled + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.falcon.get_analytics_sample_rate(use_global_config=True) + ) + + span.set_tag(httpx.METHOD, req.method) + span.set_tag(httpx.URL, req.url) + if config.falcon.trace_query_string: + span.set_tag(httpx.QUERY_STRING, req.query_string) + + # Note: any request header set after this line will not be stored in the span + store_request_headers(req.headers, span, config.falcon) + + def process_resource(self, req, resp, resource, params): + span = self.tracer.current_span() + if not span: + return # unexpected + span.resource = '%s %s' % (req.method, _name(resource)) + + def process_response(self, req, resp, resource, req_succeeded=None): + # req_succeded is not a kwarg in the API, but we need that to support + # Falcon 1.0 that doesn't provide this argument + span = self.tracer.current_span() + if not span: + return # unexpected + + status = httpx.normalize_status_code(resp.status) + + # Note: any response header set after this line will not be stored in the span + store_response_headers(resp._headers, span, config.falcon) + + # FIXME[matt] falcon does not map errors or unmatched routes + # to proper status codes, so we we have to try to infer them + # here. See https://github.com/falconry/falcon/issues/606 + if resource is None: + status = '404' + span.resource = '%s 404' % req.method + span.set_tag(httpx.STATUS_CODE, status) + span.finish() + return + + err_type = sys.exc_info()[0] + if err_type is not None: + if req_succeeded is None: + # backward-compatibility with Falcon 1.0; any version + # greater than 1.0 has req_succeded in [True, False] + # TODO[manu]: drop the support at some point + status = _detect_and_set_status_error(err_type, span) + elif req_succeeded is False: + # Falcon 1.1+ provides that argument that is set to False + # if get an Exception (404 is still an exception) + status = _detect_and_set_status_error(err_type, span) + + span.set_tag(httpx.STATUS_CODE, status) + + # Emit span hook for this response + # DEV: Emit before closing so they can overwrite `span.resource` if they want + config.falcon.hooks._emit('request', span, req, resp) + + # Close the span + span.finish() + + +def _is_404(err_type): + return 'HTTPNotFound' in err_type.__name__ + + +def _detect_and_set_status_error(err_type, span): + """Detect the HTTP status code from the current stacktrace and + set the traceback to the given Span + """ + if not _is_404(err_type): + span.set_traceback() + return '500' + elif _is_404(err_type): + return '404' + + +def _name(r): + return '%s.%s' % (r.__module__, r.__class__.__name__) diff --git a/ddtrace/contrib/falcon/patch.py b/ddtrace/contrib/falcon/patch.py new file mode 100644 index 0000000000..5eef31f6d6 --- /dev/null +++ b/ddtrace/contrib/falcon/patch.py @@ -0,0 +1,31 @@ +import os +from ddtrace.vendor import wrapt +import falcon + +from ddtrace import tracer + +from .middleware import TraceMiddleware +from ...utils.formats import asbool, get_env + + +def patch(): + """ + Patch falcon.API to include contrib.falcon.TraceMiddleware + by default + """ + if getattr(falcon, '_datadog_patch', False): + return + + setattr(falcon, '_datadog_patch', True) + wrapt.wrap_function_wrapper('falcon', 'API.__init__', traced_init) + + +def traced_init(wrapped, instance, args, kwargs): + mw = kwargs.pop('middleware', []) + service = os.environ.get('DATADOG_SERVICE_NAME') or 'falcon' + distributed_tracing = asbool(get_env('falcon', 'distributed_tracing', True)) + + mw.insert(0, TraceMiddleware(tracer, service, distributed_tracing)) + kwargs['middleware'] = mw + + wrapped(*args, **kwargs) diff --git a/ddtrace/contrib/flask/__init__.py b/ddtrace/contrib/flask/__init__.py new file mode 100644 index 0000000000..369dcf83d5 --- /dev/null +++ b/ddtrace/contrib/flask/__init__.py @@ -0,0 +1,113 @@ +""" +The Flask__ integration will add tracing to all requests to your Flask application. + +This integration will track the entire Flask lifecycle including user-defined endpoints, hooks, +signals, and templating rendering. + +To configure tracing manually:: + + from ddtrace import patch_all + patch_all() + + from flask import Flask + + app = Flask(__name__) + + + @app.route('/') + def index(): + return 'hello world' + + + if __name__ == '__main__': + app.run() + + +You may also enable Flask tracing automatically via ddtrace-run:: + + ddtrace-run python app.py + + +Configuration +~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.flask['distributed_tracing_enabled'] + + Whether to parse distributed tracing headers from requests received by your Flask app. + + Default: ``True`` + +.. py:data:: ddtrace.config.flask['analytics_enabled'] + + Whether to generate APM events for Flask in Trace Search & Analytics. + + Can also be enabled with the ``DD_FLASK_ANALYTICS_ENABLED`` environment variable. + + Default: ``None`` + +.. py:data:: ddtrace.config.flask['service_name'] + + The service name reported for your Flask app. + + Can also be configured via the ``DATADOG_SERVICE_NAME`` environment variable. + + Default: ``'flask'`` + +.. py:data:: ddtrace.config.flask['collect_view_args'] + + Whether to add request tags for view function argument values. + + Default: ``True`` + +.. py:data:: ddtrace.config.flask['template_default_name'] + + The default template name to use when one does not exist. + + Default: ```` + +.. py:data:: ddtrace.config.flask['trace_signals'] + + Whether to trace Flask signals (``before_request``, ``after_request``, etc). + + Default: ``True`` + +.. py:data:: ddtrace.config.flask['extra_error_codes'] + + A list of response codes that should get marked as errors. + + *5xx codes are always considered an error.* + + Default: ``[]`` + + +Example:: + + from ddtrace import config + + # Enable distributed tracing + config.flask['distributed_tracing_enabled'] = True + + # Override service name + config.flask['service_name'] = 'custom-service-name' + + # Report 401, and 403 responses as errors + config.flask['extra_error_codes'] = [401, 403] + +.. __: http://flask.pocoo.org/ +""" + +from ...utils.importlib import require_modules + + +required_modules = ['flask'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + # DEV: We do this so we can `@mock.patch('ddtrace.contrib.flask._patch.')` in tests + from . import patch as _patch + from .middleware import TraceMiddleware + + patch = _patch.patch + unpatch = _patch.unpatch + + __all__ = ['TraceMiddleware', 'patch', 'unpatch'] diff --git a/ddtrace/contrib/flask/helpers.py b/ddtrace/contrib/flask/helpers.py new file mode 100644 index 0000000000..38dcd25037 --- /dev/null +++ b/ddtrace/contrib/flask/helpers.py @@ -0,0 +1,44 @@ +from ddtrace import Pin +import flask + + +def get_current_app(): + """Helper to get the flask.app.Flask from the current app context""" + appctx = flask._app_ctx_stack.top + if appctx: + return appctx.app + return None + + +def with_instance_pin(func): + """Helper to wrap a function wrapper and ensure an enabled pin is available for the `instance`""" + def wrapper(wrapped, instance, args, kwargs): + pin = Pin._find(wrapped, instance, get_current_app()) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + return func(pin, wrapped, instance, args, kwargs) + return wrapper + + +def simple_tracer(name, span_type=None): + """Generate a simple tracer that wraps the function call with `with tracer.trace()`""" + @with_instance_pin + def wrapper(pin, wrapped, instance, args, kwargs): + with pin.tracer.trace(name, service=pin.service, span_type=span_type): + return wrapped(*args, **kwargs) + return wrapper + + +def get_current_span(pin, root=False): + """Helper to get the current span from the provided pins current call context""" + if not pin or not pin.enabled(): + return None + + ctx = pin.tracer.get_call_context() + if not ctx: + return None + + if root: + return ctx.get_current_root_span() + return ctx.get_current_span() diff --git a/ddtrace/contrib/flask/middleware.py b/ddtrace/contrib/flask/middleware.py new file mode 100644 index 0000000000..fb9a45f88c --- /dev/null +++ b/ddtrace/contrib/flask/middleware.py @@ -0,0 +1,208 @@ +from ... import compat +from ...ext import SpanTypes, http, errors +from ...internal.logger import get_logger +from ...propagation.http import HTTPPropagator +from ...utils.deprecation import deprecated + +import flask.templating +from flask import g, request, signals + + +log = get_logger(__name__) + + +SPAN_NAME = 'flask.request' + + +class TraceMiddleware(object): + + @deprecated(message='Use patching instead (see the docs).', version='1.0.0') + def __init__(self, app, tracer, service='flask', use_signals=True, distributed_tracing=False): + self.app = app + log.debug('flask: initializing trace middleware') + + # Attach settings to the inner application middleware. This is required if double + # instrumentation happens (i.e. `ddtrace-run` with `TraceMiddleware`). In that + # case, `ddtrace-run` instruments the application, but then users code is unable + # to update settings such as `distributed_tracing` flag. This step can be removed + # when the `Config` object is used + self.app._tracer = tracer + self.app._service = service + self.app._use_distributed_tracing = distributed_tracing + self.use_signals = use_signals + + # safe-guard to avoid double instrumentation + if getattr(app, '__dd_instrumentation', False): + return + setattr(app, '__dd_instrumentation', True) + + # Install hooks which time requests. + self.app.before_request(self._before_request) + self.app.after_request(self._after_request) + self.app.teardown_request(self._teardown_request) + + # Add exception handling signals. This will annotate exceptions that + # are caught and handled in custom user code. + # See https://github.com/DataDog/dd-trace-py/issues/390 + if use_signals and not signals.signals_available: + log.debug(_blinker_not_installed_msg) + self.use_signals = use_signals and signals.signals_available + timing_signals = { + 'got_request_exception': self._request_exception, + } + self._receivers = [] + if self.use_signals and _signals_exist(timing_signals): + self._connect(timing_signals) + + _patch_render(tracer) + + def _connect(self, signal_to_handler): + connected = True + for name, handler in signal_to_handler.items(): + s = getattr(signals, name, None) + if not s: + connected = False + log.warning('trying to instrument missing signal %s', name) + continue + # we should connect to the signal without using weak references + # otherwise they will be garbage collected and our handlers + # will be disconnected after the first call; for more details check: + # https://github.com/jek/blinker/blob/207446f2d97/blinker/base.py#L106-L108 + s.connect(handler, sender=self.app, weak=False) + self._receivers.append(handler) + return connected + + def _before_request(self): + """ Starts tracing the current request and stores it in the global + request object. + """ + self._start_span() + + def _after_request(self, response): + """ Runs after the server can process a response. """ + try: + self._process_response(response) + except Exception: + log.debug('flask: error tracing response', exc_info=True) + return response + + def _teardown_request(self, exception): + """ Runs at the end of a request. If there's an unhandled exception, it + will be passed in. + """ + # when we teardown the span, ensure we have a clean slate. + span = getattr(g, 'flask_datadog_span', None) + setattr(g, 'flask_datadog_span', None) + if not span: + return + + try: + self._finish_span(span, exception=exception) + except Exception: + log.debug('flask: error finishing span', exc_info=True) + + def _start_span(self): + if self.app._use_distributed_tracing: + propagator = HTTPPropagator() + context = propagator.extract(request.headers) + # Only need to active the new context if something was propagated + if context.trace_id: + self.app._tracer.context_provider.activate(context) + try: + g.flask_datadog_span = self.app._tracer.trace( + SPAN_NAME, + service=self.app._service, + span_type=SpanTypes.WEB, + ) + except Exception: + log.debug('flask: error tracing request', exc_info=True) + + def _process_response(self, response): + span = getattr(g, 'flask_datadog_span', None) + if not (span and span.sampled): + return + + code = response.status_code if response else '' + span.set_tag(http.STATUS_CODE, code) + + def _request_exception(self, *args, **kwargs): + exception = kwargs.get('exception', None) + span = getattr(g, 'flask_datadog_span', None) + if span and exception: + _set_error_on_span(span, exception) + + def _finish_span(self, span, exception=None): + if not span or not span.sampled: + return + + code = span.get_tag(http.STATUS_CODE) or 0 + try: + code = int(code) + except Exception: + code = 0 + + if exception: + # if the request has already had a code set, don't override it. + code = code or 500 + _set_error_on_span(span, exception) + + # the endpoint that matched the request is None if an exception + # happened so we fallback to a common resource + span.error = 0 if code < 500 else 1 + + # the request isn't guaranteed to exist here, so only use it carefully. + method = '' + endpoint = '' + url = '' + if request: + method = request.method + endpoint = request.endpoint or code + url = request.base_url or '' + + # Let users specify their own resource in middleware if they so desire. + # See case https://github.com/DataDog/dd-trace-py/issues/353 + if span.resource == SPAN_NAME: + resource = endpoint or code + span.resource = compat.to_unicode(resource).lower() + + span.set_tag(http.URL, compat.to_unicode(url)) + span.set_tag(http.STATUS_CODE, code) + span.set_tag(http.METHOD, method) + span.finish() + + +def _set_error_on_span(span, exception): + # The 3 next lines might not be strictly required, since `set_traceback` + # also get the exception from the sys.exc_info (and fill the error meta). + # Since we aren't sure it always work/for insuring no BC break, keep + # these lines which get overridden anyway. + span.set_tag(errors.ERROR_TYPE, type(exception)) + span.set_tag(errors.ERROR_MSG, exception) + # The provided `exception` object doesn't have a stack trace attached, + # so attach the stack trace with `set_traceback`. + span.set_traceback() + + +def _patch_render(tracer): + """ patch flask's render template methods with the given tracer. """ + # fall back to patching global method + _render = flask.templating._render + + def _traced_render(template, context, app): + with tracer.trace('flask.template', span_type=SpanTypes.TEMPLATE) as span: + span.set_tag('flask.template', template.name or 'string') + return _render(template, context, app) + + flask.templating._render = _traced_render + + +def _signals_exist(names): + """ Return true if all of the given signals exist in this version of flask. + """ + return all(getattr(signals, n, False) for n in names) + + +_blinker_not_installed_msg = ( + 'please install blinker to use flask signals. ' + 'http://flask.pocoo.org/docs/0.11/signals/' +) diff --git a/ddtrace/contrib/flask/patch.py b/ddtrace/contrib/flask/patch.py new file mode 100644 index 0000000000..243a8cc56b --- /dev/null +++ b/ddtrace/contrib/flask/patch.py @@ -0,0 +1,497 @@ +import os + +import flask +import werkzeug +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from ddtrace import compat +from ddtrace import config, Pin + +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...internal.logger import get_logger +from ...propagation.http import HTTPPropagator +from ...utils.wrappers import unwrap as _u +from .helpers import get_current_app, get_current_span, simple_tracer, with_instance_pin +from .wrappers import wrap_function, wrap_signal + +log = get_logger(__name__) + +FLASK_ENDPOINT = 'flask.endpoint' +FLASK_VIEW_ARGS = 'flask.view_args' +FLASK_URL_RULE = 'flask.url_rule' +FLASK_VERSION = 'flask.version' + +# Configure default configuration +config._add('flask', dict( + # Flask service configuration + # DEV: Environment variable 'DATADOG_SERVICE_NAME' used for backwards compatibility + service_name=os.environ.get('DATADOG_SERVICE_NAME') or 'flask', + app='flask', + + collect_view_args=True, + distributed_tracing_enabled=True, + template_default_name='', + trace_signals=True, + + # We mark 5xx responses as errors, these codes are additional status codes to mark as errors + # DEV: This is so that if a user wants to see `401` or `403` as an error, they can configure that + extra_error_codes=set(), +)) + + +# Extract flask version into a tuple e.g. (0, 12, 1) or (1, 0, 2) +# DEV: This makes it so we can do `if flask_version >= (0, 12, 0):` +# DEV: Example tests: +# (0, 10, 0) > (0, 10) +# (0, 10, 0) >= (0, 10, 0) +# (0, 10, 1) >= (0, 10) +# (0, 11, 1) >= (0, 10) +# (0, 11, 1) >= (0, 10, 2) +# (1, 0, 0) >= (0, 10) +# (0, 9) == (0, 9) +# (0, 9, 0) != (0, 9) +# (0, 8, 5) <= (0, 9) +flask_version_str = getattr(flask, '__version__', '0.0.0') +flask_version = tuple([int(i) for i in flask_version_str.split('.')]) + + +def patch(): + """ + Patch `flask` module for tracing + """ + # Check to see if we have patched Flask yet or not + if getattr(flask, '_datadog_patch', False): + return + setattr(flask, '_datadog_patch', True) + + # Attach service pin to `flask.app.Flask` + Pin( + service=config.flask['service_name'], + app=config.flask['app'] + ).onto(flask.Flask) + + # flask.app.Flask methods that have custom tracing (add metadata, wrap functions, etc) + _w('flask', 'Flask.wsgi_app', traced_wsgi_app) + _w('flask', 'Flask.dispatch_request', request_tracer('dispatch_request')) + _w('flask', 'Flask.preprocess_request', request_tracer('preprocess_request')) + _w('flask', 'Flask.add_url_rule', traced_add_url_rule) + _w('flask', 'Flask.endpoint', traced_endpoint) + _w('flask', 'Flask._register_error_handler', traced_register_error_handler) + + # flask.blueprints.Blueprint methods that have custom tracing (add metadata, wrap functions, etc) + _w('flask', 'Blueprint.register', traced_blueprint_register) + _w('flask', 'Blueprint.add_url_rule', traced_blueprint_add_url_rule) + + # flask.app.Flask traced hook decorators + flask_hooks = [ + 'before_request', + 'before_first_request', + 'after_request', + 'teardown_request', + 'teardown_appcontext', + ] + for hook in flask_hooks: + _w('flask', 'Flask.{}'.format(hook), traced_flask_hook) + _w('flask', 'after_this_request', traced_flask_hook) + + # flask.app.Flask traced methods + flask_app_traces = [ + 'process_response', + 'handle_exception', + 'handle_http_exception', + 'handle_user_exception', + 'try_trigger_before_first_request_functions', + 'do_teardown_request', + 'do_teardown_appcontext', + 'send_static_file', + ] + for name in flask_app_traces: + _w('flask', 'Flask.{}'.format(name), simple_tracer('flask.{}'.format(name))) + + # flask static file helpers + _w('flask', 'send_file', simple_tracer('flask.send_file')) + + # flask.json.jsonify + _w('flask', 'jsonify', traced_jsonify) + + # flask.templating traced functions + _w('flask.templating', '_render', traced_render) + _w('flask', 'render_template', traced_render_template) + _w('flask', 'render_template_string', traced_render_template_string) + + # flask.blueprints.Blueprint traced hook decorators + bp_hooks = [ + 'after_app_request', + 'after_request', + 'before_app_first_request', + 'before_app_request', + 'before_request', + 'teardown_request', + 'teardown_app_request', + ] + for hook in bp_hooks: + _w('flask', 'Blueprint.{}'.format(hook), traced_flask_hook) + + # flask.signals signals + if config.flask['trace_signals']: + signals = [ + 'template_rendered', + 'request_started', + 'request_finished', + 'request_tearing_down', + 'got_request_exception', + 'appcontext_tearing_down', + ] + # These were added in 0.11.0 + if flask_version >= (0, 11): + signals.append('before_render_template') + + # These were added in 0.10.0 + if flask_version >= (0, 10): + signals.append('appcontext_pushed') + signals.append('appcontext_popped') + signals.append('message_flashed') + + for signal in signals: + module = 'flask' + + # v0.9 missed importing `appcontext_tearing_down` in `flask/__init__.py` + # https://github.com/pallets/flask/blob/0.9/flask/__init__.py#L35-L37 + # https://github.com/pallets/flask/blob/0.9/flask/signals.py#L52 + # DEV: Version 0.9 doesn't have a patch version + if flask_version <= (0, 9) and signal == 'appcontext_tearing_down': + module = 'flask.signals' + + # DEV: Patch `receivers_for` instead of `connect` to ensure we don't mess with `disconnect` + _w(module, '{}.receivers_for'.format(signal), traced_signal_receivers_for(signal)) + + +def unpatch(): + if not getattr(flask, '_datadog_patch', False): + return + setattr(flask, '_datadog_patch', False) + + props = [ + # Flask + 'Flask.wsgi_app', + 'Flask.dispatch_request', + 'Flask.add_url_rule', + 'Flask.endpoint', + 'Flask._register_error_handler', + + 'Flask.preprocess_request', + 'Flask.process_response', + 'Flask.handle_exception', + 'Flask.handle_http_exception', + 'Flask.handle_user_exception', + 'Flask.try_trigger_before_first_request_functions', + 'Flask.do_teardown_request', + 'Flask.do_teardown_appcontext', + 'Flask.send_static_file', + + # Flask Hooks + 'Flask.before_request', + 'Flask.before_first_request', + 'Flask.after_request', + 'Flask.teardown_request', + 'Flask.teardown_appcontext', + + # Blueprint + 'Blueprint.register', + 'Blueprint.add_url_rule', + + # Blueprint Hooks + 'Blueprint.after_app_request', + 'Blueprint.after_request', + 'Blueprint.before_app_first_request', + 'Blueprint.before_app_request', + 'Blueprint.before_request', + 'Blueprint.teardown_request', + 'Blueprint.teardown_app_request', + + # Signals + 'template_rendered.receivers_for', + 'request_started.receivers_for', + 'request_finished.receivers_for', + 'request_tearing_down.receivers_for', + 'got_request_exception.receivers_for', + 'appcontext_tearing_down.receivers_for', + + # Top level props + 'after_this_request', + 'send_file', + 'jsonify', + 'render_template', + 'render_template_string', + 'templating._render', + ] + + # These were added in 0.11.0 + if flask_version >= (0, 11): + props.append('before_render_template.receivers_for') + + # These were added in 0.10.0 + if flask_version >= (0, 10): + props.append('appcontext_pushed.receivers_for') + props.append('appcontext_popped.receivers_for') + props.append('message_flashed.receivers_for') + + for prop in props: + # Handle 'flask.request_started.receivers_for' + obj = flask + + # v0.9.0 missed importing `appcontext_tearing_down` in `flask/__init__.py` + # https://github.com/pallets/flask/blob/0.9/flask/__init__.py#L35-L37 + # https://github.com/pallets/flask/blob/0.9/flask/signals.py#L52 + # DEV: Version 0.9 doesn't have a patch version + if flask_version <= (0, 9) and prop == 'appcontext_tearing_down.receivers_for': + obj = flask.signals + + if '.' in prop: + attr, _, prop = prop.partition('.') + obj = getattr(obj, attr, object()) + _u(obj, prop) + + +@with_instance_pin +def traced_wsgi_app(pin, wrapped, instance, args, kwargs): + """ + Wrapper for flask.app.Flask.wsgi_app + + This wrapper is the starting point for all requests. + """ + # DEV: This is safe before this is the args for a WSGI handler + # https://www.python.org/dev/peps/pep-3333/ + environ, start_response = args + + # Create a werkzeug request from the `environ` to make interacting with it easier + # DEV: This executes before a request context is created + request = werkzeug.Request(environ) + + # Configure distributed tracing + if config.flask.get('distributed_tracing_enabled', False): + propagator = HTTPPropagator() + context = propagator.extract(request.headers) + # Only need to activate the new context if something was propagated + if context.trace_id: + pin.tracer.context_provider.activate(context) + + # Default resource is method and path: + # GET / + # POST /save + # We will override this below in `traced_dispatch_request` when we have a `RequestContext` and possibly a url rule + resource = u'{} {}'.format(request.method, request.path) + with pin.tracer.trace('flask.request', service=pin.service, resource=resource, span_type=SpanTypes.WEB) as s: + # set analytics sample rate with global config enabled + sample_rate = config.flask.get_analytics_sample_rate(use_global_config=True) + if sample_rate is not None: + s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) + + s.set_tag(FLASK_VERSION, flask_version_str) + + # Wrap the `start_response` handler to extract response code + # DEV: We tried using `Flask.finalize_request`, which seemed to work, but gave us hell during tests + # DEV: The downside to using `start_response` is we do not have a `Flask.Response` object here, + # only `status_code`, and `headers` to work with + # On the bright side, this works in all versions of Flask (or any WSGI app actually) + def _wrap_start_response(func): + def traced_start_response(status_code, headers): + code, _, _ = status_code.partition(' ') + try: + code = int(code) + except ValueError: + pass + + # Override root span resource name to be ` 404` for 404 requests + # DEV: We do this because we want to make it easier to see all unknown requests together + # Also, we do this to reduce the cardinality on unknown urls + # DEV: If we have an endpoint or url rule tag, then we don't need to do this, + # we still want `GET /product/` grouped together, + # even if it is a 404 + if not s.get_tag(FLASK_ENDPOINT) and not s.get_tag(FLASK_URL_RULE): + s.resource = u'{} {}'.format(request.method, code) + + s.set_tag(http.STATUS_CODE, code) + if 500 <= code < 600: + s.error = 1 + elif code in config.flask.get('extra_error_codes', set()): + s.error = 1 + return func(status_code, headers) + return traced_start_response + start_response = _wrap_start_response(start_response) + + # DEV: We set response status code in `_wrap_start_response` + # DEV: Use `request.base_url` and not `request.url` to keep from leaking any query string parameters + s.set_tag(http.URL, request.base_url) + s.set_tag(http.METHOD, request.method) + if config.flask.trace_query_string: + s.set_tag(http.QUERY_STRING, compat.to_unicode(request.query_string)) + + return wrapped(environ, start_response) + + +def traced_blueprint_register(wrapped, instance, args, kwargs): + """ + Wrapper for flask.blueprints.Blueprint.register + + This wrapper just ensures the blueprint has a pin, either set manually on + itself from the user or inherited from the application + """ + app = kwargs.get('app', args[0]) + # Check if this Blueprint has a pin, otherwise clone the one from the app onto it + pin = Pin.get_from(instance) + if not pin: + pin = Pin.get_from(app) + if pin: + pin.clone().onto(instance) + return wrapped(*args, **kwargs) + + +def traced_blueprint_add_url_rule(wrapped, instance, args, kwargs): + pin = Pin._find(wrapped, instance) + if not pin: + return wrapped(*args, **kwargs) + + def _wrap(rule, endpoint=None, view_func=None, **kwargs): + if view_func: + pin.clone().onto(view_func) + return wrapped(rule, endpoint=endpoint, view_func=view_func, **kwargs) + + return _wrap(*args, **kwargs) + + +def traced_add_url_rule(wrapped, instance, args, kwargs): + """Wrapper for flask.app.Flask.add_url_rule to wrap all views attached to this app""" + def _wrap(rule, endpoint=None, view_func=None, **kwargs): + if view_func: + # TODO: `if hasattr(view_func, 'view_class')` then this was generated from a `flask.views.View` + # should we do something special with these views? Change the name/resource? Add tags? + view_func = wrap_function(instance, view_func, name=endpoint, resource=rule) + + return wrapped(rule, endpoint=endpoint, view_func=view_func, **kwargs) + + return _wrap(*args, **kwargs) + + +def traced_endpoint(wrapped, instance, args, kwargs): + """Wrapper for flask.app.Flask.endpoint to ensure all endpoints are wrapped""" + endpoint = kwargs.get('endpoint', args[0]) + + def _wrapper(func): + # DEV: `wrap_function` will call `func_name(func)` for us + return wrapped(endpoint)(wrap_function(instance, func, resource=endpoint)) + return _wrapper + + +def traced_flask_hook(wrapped, instance, args, kwargs): + """Wrapper for hook functions (before_request, after_request, etc) are properly traced""" + func = kwargs.get('f', args[0]) + return wrapped(wrap_function(instance, func)) + + +def traced_render_template(wrapped, instance, args, kwargs): + """Wrapper for flask.templating.render_template""" + pin = Pin._find(wrapped, instance, get_current_app()) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + with pin.tracer.trace('flask.render_template', span_type=SpanTypes.TEMPLATE): + return wrapped(*args, **kwargs) + + +def traced_render_template_string(wrapped, instance, args, kwargs): + """Wrapper for flask.templating.render_template_string""" + pin = Pin._find(wrapped, instance, get_current_app()) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + with pin.tracer.trace('flask.render_template_string', span_type=SpanTypes.TEMPLATE): + return wrapped(*args, **kwargs) + + +def traced_render(wrapped, instance, args, kwargs): + """ + Wrapper for flask.templating._render + + This wrapper is used for setting template tags on the span. + + This method is called for render_template or render_template_string + """ + pin = Pin._find(wrapped, instance, get_current_app()) + # DEV: `get_current_span` will verify `pin` is valid and enabled first + span = get_current_span(pin) + if not span: + return wrapped(*args, **kwargs) + + def _wrap(template, context, app): + name = getattr(template, 'name', None) or config.flask.get('template_default_name') + span.resource = name + span.set_tag('flask.template_name', name) + return wrapped(*args, **kwargs) + return _wrap(*args, **kwargs) + + +def traced_register_error_handler(wrapped, instance, args, kwargs): + """Wrapper to trace all functions registered with flask.app.register_error_handler""" + def _wrap(key, code_or_exception, f): + return wrapped(key, code_or_exception, wrap_function(instance, f)) + return _wrap(*args, **kwargs) + + +def request_tracer(name): + @with_instance_pin + def _traced_request(pin, wrapped, instance, args, kwargs): + """ + Wrapper to trace a Flask function while trying to extract endpoint information + (endpoint, url_rule, view_args, etc) + + This wrapper will add identifier tags to the current span from `flask.app.Flask.wsgi_app`. + """ + span = get_current_span(pin) + if not span: + return wrapped(*args, **kwargs) + + try: + request = flask._request_ctx_stack.top.request + + # DEV: This name will include the blueprint name as well (e.g. `bp.index`) + if not span.get_tag(FLASK_ENDPOINT) and request.endpoint: + span.resource = u'{} {}'.format(request.method, request.endpoint) + span.set_tag(FLASK_ENDPOINT, request.endpoint) + + if not span.get_tag(FLASK_URL_RULE) and request.url_rule and request.url_rule.rule: + span.resource = u'{} {}'.format(request.method, request.url_rule.rule) + span.set_tag(FLASK_URL_RULE, request.url_rule.rule) + + if not span.get_tag(FLASK_VIEW_ARGS) and request.view_args and config.flask.get('collect_view_args'): + for k, v in request.view_args.items(): + span.set_tag(u'{}.{}'.format(FLASK_VIEW_ARGS, k), v) + except Exception: + log.debug('failed to set tags for "flask.request" span', exc_info=True) + + with pin.tracer.trace('flask.{}'.format(name), service=pin.service): + return wrapped(*args, **kwargs) + return _traced_request + + +def traced_signal_receivers_for(signal): + """Wrapper for flask.signals.{signal}.receivers_for to ensure all signal receivers are traced""" + def outer(wrapped, instance, args, kwargs): + sender = kwargs.get('sender', args[0]) + # See if they gave us the flask.app.Flask as the sender + app = None + if isinstance(sender, flask.Flask): + app = sender + for receiver in wrapped(*args, **kwargs): + yield wrap_signal(app, signal, receiver) + return outer + + +def traced_jsonify(wrapped, instance, args, kwargs): + pin = Pin._find(wrapped, instance, get_current_app()) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + with pin.tracer.trace('flask.jsonify'): + return wrapped(*args, **kwargs) diff --git a/ddtrace/contrib/flask/wrappers.py b/ddtrace/contrib/flask/wrappers.py new file mode 100644 index 0000000000..e49426225c --- /dev/null +++ b/ddtrace/contrib/flask/wrappers.py @@ -0,0 +1,46 @@ +from ddtrace.vendor.wrapt import function_wrapper + +from ...pin import Pin +from ...utils.importlib import func_name +from .helpers import get_current_app + + +def wrap_function(instance, func, name=None, resource=None): + """ + Helper function to wrap common flask.app.Flask methods. + + This helper will first ensure that a Pin is available and enabled before tracing + """ + if not name: + name = func_name(func) + + @function_wrapper + def trace_func(wrapped, _instance, args, kwargs): + pin = Pin._find(wrapped, _instance, instance, get_current_app()) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + with pin.tracer.trace(name, service=pin.service, resource=resource): + return wrapped(*args, **kwargs) + + return trace_func(func) + + +def wrap_signal(app, signal, func): + """ + Helper used to wrap signal handlers + + We will attempt to find the pin attached to the flask.app.Flask app + """ + name = func_name(func) + + @function_wrapper + def trace_func(wrapped, instance, args, kwargs): + pin = Pin._find(wrapped, instance, app, get_current_app()) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + with pin.tracer.trace(name, service=pin.service) as span: + span.set_tag('flask.signal', signal) + return wrapped(*args, **kwargs) + + return trace_func(func) diff --git a/ddtrace/contrib/flask_cache/__init__.py b/ddtrace/contrib/flask_cache/__init__.py new file mode 100644 index 0000000000..d8cfe3036f --- /dev/null +++ b/ddtrace/contrib/flask_cache/__init__.py @@ -0,0 +1,44 @@ +""" +The flask cache tracer will track any access to a cache backend. +You can use this tracer together with the Flask tracer middleware. + +To install the tracer, ``from ddtrace import tracer`` needs to be added:: + + from ddtrace import tracer + from ddtrace.contrib.flask_cache import get_traced_cache + +and the tracer needs to be initialized:: + + Cache = get_traced_cache(tracer, service='my-flask-cache-app') + +Here is the end result, in a sample app:: + + from flask import Flask + + from ddtrace import tracer + from ddtrace.contrib.flask_cache import get_traced_cache + + app = Flask(__name__) + + # get the traced Cache class + Cache = get_traced_cache(tracer, service='my-flask-cache-app') + + # use the Cache as usual with your preferred CACHE_TYPE + cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + def counter(): + # this access is traced + conn_counter = cache.get("conn_counter") + +""" + +from ...utils.importlib import require_modules + + +required_modules = ['flask_cache'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .tracers import get_traced_cache + + __all__ = ['get_traced_cache'] diff --git a/ddtrace/contrib/flask_cache/tracers.py b/ddtrace/contrib/flask_cache/tracers.py new file mode 100644 index 0000000000..31c7ea9b22 --- /dev/null +++ b/ddtrace/contrib/flask_cache/tracers.py @@ -0,0 +1,146 @@ +""" +Datadog trace code for flask_cache +""" + +# stdlib +import logging + +# project +from .utils import _extract_conn_tags, _resource_from_cache_prefix +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes +from ...settings import config + +# 3rd party +from flask.ext.cache import Cache + + +log = logging.Logger(__name__) + +DEFAULT_SERVICE = 'flask-cache' + +# standard tags +COMMAND_KEY = 'flask_cache.key' +CACHE_BACKEND = 'flask_cache.backend' +CONTACT_POINTS = 'flask_cache.contact_points' + + +def get_traced_cache(ddtracer, service=DEFAULT_SERVICE, meta=None): + """ + Return a traced Cache object that behaves exactly as the ``flask.ext.cache.Cache class`` + """ + + class TracedCache(Cache): + """ + Traced cache backend that monitors any operations done by flask_cache. Observed actions are: + * get, set, add, delete, clear + * all ``many_`` operations + """ + _datadog_tracer = ddtracer + _datadog_service = service + _datadog_meta = meta + + def __trace(self, cmd): + """ + Start a tracing with default attributes and tags + """ + # create a new span + s = self._datadog_tracer.trace( + cmd, + span_type=SpanTypes.CACHE, + service=self._datadog_service + ) + # set span tags + s.set_tag(CACHE_BACKEND, self.config.get('CACHE_TYPE')) + s.set_tags(self._datadog_meta) + # set analytics sample rate + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.flask_cache.get_analytics_sample_rate() + ) + # add connection meta if there is one + if getattr(self.cache, '_client', None): + try: + s.set_tags(_extract_conn_tags(self.cache._client)) + except Exception: + log.debug('error parsing connection tags', exc_info=True) + + return s + + def get(self, *args, **kwargs): + """ + Track ``get`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('GET', self.config) + if len(args) > 0: + span.set_tag(COMMAND_KEY, args[0]) + return super(TracedCache, self).get(*args, **kwargs) + + def set(self, *args, **kwargs): + """ + Track ``set`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('SET', self.config) + if len(args) > 0: + span.set_tag(COMMAND_KEY, args[0]) + return super(TracedCache, self).set(*args, **kwargs) + + def add(self, *args, **kwargs): + """ + Track ``add`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('ADD', self.config) + if len(args) > 0: + span.set_tag(COMMAND_KEY, args[0]) + return super(TracedCache, self).add(*args, **kwargs) + + def delete(self, *args, **kwargs): + """ + Track ``delete`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('DELETE', self.config) + if len(args) > 0: + span.set_tag(COMMAND_KEY, args[0]) + return super(TracedCache, self).delete(*args, **kwargs) + + def delete_many(self, *args, **kwargs): + """ + Track ``delete_many`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('DELETE_MANY', self.config) + span.set_tag(COMMAND_KEY, list(args)) + return super(TracedCache, self).delete_many(*args, **kwargs) + + def clear(self, *args, **kwargs): + """ + Track ``clear`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('CLEAR', self.config) + return super(TracedCache, self).clear(*args, **kwargs) + + def get_many(self, *args, **kwargs): + """ + Track ``get_many`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('GET_MANY', self.config) + span.set_tag(COMMAND_KEY, list(args)) + return super(TracedCache, self).get_many(*args, **kwargs) + + def set_many(self, *args, **kwargs): + """ + Track ``set_many`` operation + """ + with self.__trace('flask_cache.cmd') as span: + span.resource = _resource_from_cache_prefix('SET_MANY', self.config) + if len(args) > 0: + span.set_tag(COMMAND_KEY, list(args[0].keys())) + return super(TracedCache, self).set_many(*args, **kwargs) + + return TracedCache diff --git a/ddtrace/contrib/flask_cache/utils.py b/ddtrace/contrib/flask_cache/utils.py new file mode 100644 index 0000000000..a2a285d7c1 --- /dev/null +++ b/ddtrace/contrib/flask_cache/utils.py @@ -0,0 +1,46 @@ +# project +from ...ext import net +from ..redis.util import _extract_conn_tags as extract_redis_tags +from ..pylibmc.addrs import parse_addresses + + +def _resource_from_cache_prefix(resource, cache): + """ + Combine the resource name with the cache prefix (if any) + """ + if getattr(cache, 'key_prefix', None): + name = '{} {}'.format(resource, cache.key_prefix) + else: + name = resource + + # enforce lowercase to make the output nicer to read + return name.lower() + + +def _extract_conn_tags(client): + """ + For the given client extracts connection tags + """ + tags = {} + + if hasattr(client, 'servers'): + # Memcached backend supports an address pool + if isinstance(client.servers, list) and len(client.servers) > 0: + # use the first address of the pool as a host because + # the code doesn't expose more information + contact_point = client.servers[0].address + tags[net.TARGET_HOST] = contact_point[0] + tags[net.TARGET_PORT] = contact_point[1] + elif hasattr(client, 'connection_pool'): + # Redis main connection + redis_tags = extract_redis_tags(client.connection_pool.connection_kwargs) + tags.update(**redis_tags) + elif hasattr(client, 'addresses'): + # pylibmc + # FIXME[matt] should we memoize this? + addrs = parse_addresses(client.addresses) + if addrs: + _, host, port, _ = addrs[0] + tags[net.TARGET_PORT] = port + tags[net.TARGET_HOST] = host + return tags diff --git a/ddtrace/contrib/futures/__init__.py b/ddtrace/contrib/futures/__init__.py new file mode 100644 index 0000000000..126c9e4674 --- /dev/null +++ b/ddtrace/contrib/futures/__init__.py @@ -0,0 +1,29 @@ +""" +The ``futures`` integration propagates the current active Tracing Context +between threads. The integration ensures that when operations are executed +in a new thread, that thread can continue the previously generated trace. + +The integration doesn't trace automatically threads execution, so manual +instrumentation or another integration must be activated. Threads propagation +is not enabled by default with the `patch_all()` method and must be activated +as follows:: + + from ddtrace import patch, patch_all + + patch(futures=True) + # or, when instrumenting all libraries + patch_all(futures=True) +""" +from ...utils.importlib import require_modules + + +required_modules = ['concurrent.futures'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = [ + 'patch', + 'unpatch', + ] diff --git a/ddtrace/contrib/futures/patch.py b/ddtrace/contrib/futures/patch.py new file mode 100644 index 0000000000..dd9e5d8b2c --- /dev/null +++ b/ddtrace/contrib/futures/patch.py @@ -0,0 +1,24 @@ +from concurrent import futures + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from .threading import _wrap_submit +from ...utils.wrappers import unwrap as _u + + +def patch(): + """Enables Context Propagation between threads""" + if getattr(futures, '__datadog_patch', False): + return + setattr(futures, '__datadog_patch', True) + + _w('concurrent.futures', 'ThreadPoolExecutor.submit', _wrap_submit) + + +def unpatch(): + """Disables Context Propagation between threads""" + if not getattr(futures, '__datadog_patch', False): + return + setattr(futures, '__datadog_patch', False) + + _u(futures.ThreadPoolExecutor, 'submit') diff --git a/ddtrace/contrib/futures/threading.py b/ddtrace/contrib/futures/threading.py new file mode 100644 index 0000000000..dcaef6ca8a --- /dev/null +++ b/ddtrace/contrib/futures/threading.py @@ -0,0 +1,46 @@ +import ddtrace + + +def _wrap_submit(func, instance, args, kwargs): + """ + Wrap `Executor` method used to submit a work executed in another + thread. This wrapper ensures that a new `Context` is created and + properly propagated using an intermediate function. + """ + # If there isn't a currently active context, then do not create one + # DEV: Calling `.active()` when there isn't an active context will create a new context + # DEV: We need to do this in case they are either: + # - Starting nested futures + # - Starting futures from outside of an existing context + # + # In either of these cases we essentially will propagate the wrong context between futures + # + # The resolution is to not create/propagate a new context if one does not exist, but let the + # future's thread create the context instead. + current_ctx = None + if ddtrace.tracer.context_provider._has_active_context(): + current_ctx = ddtrace.tracer.context_provider.active() + + # If we have a context then make sure we clone it + # DEV: We don't know if the future will finish executing before the parent span finishes + # so we clone to ensure we properly collect/report the future's spans + current_ctx = current_ctx.clone() + + # extract the target function that must be executed in + # a new thread and the `target` arguments + fn = args[0] + fn_args = args[1:] + return func(_wrap_execution, current_ctx, fn, fn_args, kwargs) + + +def _wrap_execution(ctx, fn, args, kwargs): + """ + Intermediate target function that is executed in a new thread; + it receives the original function with arguments and keyword + arguments, including our tracing `Context`. The current context + provider sets the Active context in a thread local storage + variable because it's outside the asynchronous loop. + """ + if ctx is not None: + ddtrace.tracer.context_provider.activate(ctx) + return fn(*args, **kwargs) diff --git a/ddtrace/contrib/gevent/__init__.py b/ddtrace/contrib/gevent/__init__.py new file mode 100644 index 0000000000..5628e737a3 --- /dev/null +++ b/ddtrace/contrib/gevent/__init__.py @@ -0,0 +1,48 @@ +""" +To trace a request in a ``gevent`` environment, configure the tracer to use the greenlet +context provider, rather than the default one that relies on a thread-local storaging. + +This allows the tracer to pick up a transaction exactly where it left off as greenlets +yield the context to another one. + +The simplest way to trace a ``gevent`` application is to configure the tracer and +patch ``gevent`` **before importing** the library:: + + # patch before importing gevent + from ddtrace import patch, tracer + patch(gevent=True) + + # use gevent as usual with or without the monkey module + from gevent import monkey; monkey.patch_thread() + + def my_parent_function(): + with tracer.trace("web.request") as span: + span.service = "web" + gevent.spawn(worker_function) + + def worker_function(): + # then trace its child + with tracer.trace("greenlet.call") as span: + span.service = "greenlet" + ... + + with tracer.trace("greenlet.child_call") as child: + ... +""" +from ...utils.importlib import require_modules + + +required_modules = ['gevent'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .provider import GeventContextProvider + from .patch import patch, unpatch + + context_provider = GeventContextProvider() + + __all__ = [ + 'patch', + 'unpatch', + 'context_provider', + ] diff --git a/ddtrace/contrib/gevent/greenlet.py b/ddtrace/contrib/gevent/greenlet.py new file mode 100644 index 0000000000..3c48e49084 --- /dev/null +++ b/ddtrace/contrib/gevent/greenlet.py @@ -0,0 +1,58 @@ +import gevent +import gevent.pool as gpool + +from .provider import CONTEXT_ATTR + +GEVENT_VERSION = gevent.version_info[0:3] + + +class TracingMixin(object): + def __init__(self, *args, **kwargs): + # get the current Context if available + current_g = gevent.getcurrent() + ctx = getattr(current_g, CONTEXT_ATTR, None) + + # create the Greenlet as usual + super(TracingMixin, self).__init__(*args, **kwargs) + + # the context is always available made exception of the main greenlet + if ctx: + # create a new context that inherits the current active span + new_ctx = ctx.clone() + setattr(self, CONTEXT_ATTR, new_ctx) + + +class TracedGreenlet(TracingMixin, gevent.Greenlet): + """ + ``Greenlet`` class that is used to replace the original ``gevent`` + class. This class is supposed to do ``Context`` replacing operation, so + that any greenlet inherits the context from the parent Greenlet. + When a new greenlet is spawned from the main greenlet, a new instance + of ``Context`` is created. The main greenlet is not affected by this behavior. + + There is no need to inherit this class to create or optimize greenlets + instances, because this class replaces ``gevent.greenlet.Greenlet`` + through the ``patch()`` method. After the patch, extending the gevent + ``Greenlet`` class means extending automatically ``TracedGreenlet``. + """ + def __init__(self, *args, **kwargs): + super(TracedGreenlet, self).__init__(*args, **kwargs) + + +class TracedIMapUnordered(TracingMixin, gpool.IMapUnordered): + def __init__(self, *args, **kwargs): + super(TracedIMapUnordered, self).__init__(*args, **kwargs) + + +if GEVENT_VERSION >= (1, 3) or GEVENT_VERSION < (1, 1): + # For gevent <1.1 and >=1.3, IMap is its own class, so we derive + # from TracingMixin + class TracedIMap(TracingMixin, gpool.IMap): + def __init__(self, *args, **kwargs): + super(TracedIMap, self).__init__(*args, **kwargs) +else: + # For gevent >=1.1 and <1.3, IMap derives from IMapUnordered, so we derive + # from TracedIMapUnordered and get tracing that way + class TracedIMap(gpool.IMap, TracedIMapUnordered): + def __init__(self, *args, **kwargs): + super(TracedIMap, self).__init__(*args, **kwargs) diff --git a/ddtrace/contrib/gevent/patch.py b/ddtrace/contrib/gevent/patch.py new file mode 100644 index 0000000000..d4dc985c22 --- /dev/null +++ b/ddtrace/contrib/gevent/patch.py @@ -0,0 +1,63 @@ +import gevent +import gevent.pool +import ddtrace + +from .greenlet import TracedGreenlet, TracedIMap, TracedIMapUnordered, GEVENT_VERSION +from .provider import GeventContextProvider +from ...provider import DefaultContextProvider + + +__Greenlet = gevent.Greenlet +__IMap = gevent.pool.IMap +__IMapUnordered = gevent.pool.IMapUnordered + + +def patch(): + """ + Patch the gevent module so that all references to the + internal ``Greenlet`` class points to the ``DatadogGreenlet`` + class. + + This action ensures that if a user extends the ``Greenlet`` + class, the ``TracedGreenlet`` is used as a parent class. + """ + _replace(TracedGreenlet, TracedIMap, TracedIMapUnordered) + ddtrace.tracer.configure(context_provider=GeventContextProvider()) + + +def unpatch(): + """ + Restore the original ``Greenlet``. This function must be invoked + before executing application code, otherwise the ``DatadogGreenlet`` + class may be used during initialization. + """ + _replace(__Greenlet, __IMap, __IMapUnordered) + ddtrace.tracer.configure(context_provider=DefaultContextProvider()) + + +def _replace(g_class, imap_class, imap_unordered_class): + """ + Utility function that replace the gevent Greenlet class with the given one. + """ + # replace the original Greenlet classes with the new one + gevent.greenlet.Greenlet = g_class + + if GEVENT_VERSION >= (1, 3): + # For gevent >= 1.3.0, IMap and IMapUnordered were pulled out of + # gevent.pool and into gevent._imap + gevent._imap.IMap = imap_class + gevent._imap.IMapUnordered = imap_unordered_class + gevent.pool.IMap = gevent._imap.IMap + gevent.pool.IMapUnordered = gevent._imap.IMapUnordered + gevent.pool.Greenlet = gevent.greenlet.Greenlet + else: + # For gevent < 1.3, only patching of gevent.pool classes necessary + gevent.pool.IMap = imap_class + gevent.pool.IMapUnordered = imap_unordered_class + + gevent.pool.Group.greenlet_class = g_class + + # replace gevent shortcuts + gevent.Greenlet = gevent.greenlet.Greenlet + gevent.spawn = gevent.greenlet.Greenlet.spawn + gevent.spawn_later = gevent.greenlet.Greenlet.spawn_later diff --git a/ddtrace/contrib/gevent/provider.py b/ddtrace/contrib/gevent/provider.py new file mode 100644 index 0000000000..ebae325e30 --- /dev/null +++ b/ddtrace/contrib/gevent/provider.py @@ -0,0 +1,55 @@ +import gevent + +from ...context import Context +from ...provider import BaseContextProvider + + +# Greenlet attribute used to set/get the Context instance +CONTEXT_ATTR = '__datadog_context' + + +class GeventContextProvider(BaseContextProvider): + """ + Context provider that retrieves all contexts for the current asynchronous + execution. It must be used in asynchronous programming that relies + in the ``gevent`` library. Framework instrumentation that uses the + gevent WSGI server (or gevent in general), can use this provider. + """ + def _get_current_context(self): + """Helper to get the current context from the current greenlet""" + current_g = gevent.getcurrent() + if current_g is not None: + return getattr(current_g, CONTEXT_ATTR, None) + return None + + def _has_active_context(self): + """Helper to determine if we have a currently active context""" + return self._get_current_context() is not None + + def activate(self, context): + """Sets the scoped ``Context`` for the current running ``Greenlet``. + """ + current_g = gevent.getcurrent() + if current_g is not None: + setattr(current_g, CONTEXT_ATTR, context) + return context + + def active(self): + """ + Returns the scoped ``Context`` for this execution flow. The ``Context`` + uses the ``Greenlet`` class as a carrier, and everytime a greenlet + is created it receives the "parent" context. + """ + ctx = self._get_current_context() + if ctx is not None: + # return the active Context for this greenlet (if any) + return ctx + + # the Greenlet doesn't have a Context so it's created and attached + # even to the main greenlet. This is required in Distributed Tracing + # when a new arbitrary Context is provided. + current_g = gevent.getcurrent() + if current_g: + ctx = Context() + setattr(current_g, CONTEXT_ATTR, ctx) + return ctx diff --git a/ddtrace/contrib/grpc/__init__.py b/ddtrace/contrib/grpc/__init__.py new file mode 100644 index 0000000000..3d6876e198 --- /dev/null +++ b/ddtrace/contrib/grpc/__init__.py @@ -0,0 +1,57 @@ +""" +The gRPC integration traces the client and server using interceptor pattern. + +gRPC will be automatically instrumented with ``patch_all``, or when using +the ``ddtrace-run`` command. +gRPC is instrumented on import. To instrument gRPC manually use the +``patch`` function.:: + + import grpc + from ddtrace import patch + patch(grpc=True) + + # use grpc like usual + +To configure the gRPC integration on an per-channel basis use the +``Pin`` API:: + + import grpc + from ddtrace import Pin, patch, Tracer + + patch(grpc=True) + custom_tracer = Tracer() + + # override the pin on the client + Pin.override(grpc.Channel, service='mygrpc', tracer=custom_tracer) + with grpc.insecure_channel('localhost:50051') as channel: + # create stubs and send requests + pass + +To configure the gRPC integration on the server use the ``Pin`` API:: + + import grpc + from grpc.framework.foundation import logging_pool + + from ddtrace import Pin, patch, Tracer + + patch(grpc=True) + custom_tracer = Tracer() + + # override the pin on the server + Pin.override(grpc.Server, service='mygrpc', tracer=custom_tracer) + server = grpc.server(logging_pool.pool(2)) + server.add_insecure_port('localhost:50051') + add_MyServicer_to_server(MyServicer(), server) + server.start() +""" + + +from ...utils.importlib import require_modules + +required_modules = ['grpc'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/grpc/client_interceptor.py b/ddtrace/contrib/grpc/client_interceptor.py new file mode 100644 index 0000000000..be1e66efa9 --- /dev/null +++ b/ddtrace/contrib/grpc/client_interceptor.py @@ -0,0 +1,239 @@ +import collections +import grpc +from ddtrace.vendor import wrapt + +from ddtrace import config +from ddtrace.compat import to_unicode +from ddtrace.ext import SpanTypes, errors +from ...internal.logger import get_logger +from ...propagation.http import HTTPPropagator +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from . import constants +from .utils import parse_method_path + + +log = get_logger(__name__) + +# DEV: Follows Python interceptors RFC laid out in +# https://github.com/grpc/proposal/blob/master/L13-python-interceptors.md + +# DEV: __version__ added in v1.21.4 +# https://github.com/grpc/grpc/commit/dd4830eae80143f5b0a9a3a1a024af4cf60e7d02 + + +def create_client_interceptor(pin, host, port): + return _ClientInterceptor(pin, host, port) + + +def intercept_channel(wrapped, instance, args, kwargs): + channel = args[0] + interceptors = args[1:] + if isinstance(getattr(channel, "_interceptor", None), _ClientInterceptor): + dd_interceptor = channel._interceptor + base_channel = getattr(channel, "_channel", None) + if base_channel: + new_channel = wrapped(channel._channel, *interceptors) + return grpc.intercept_channel(new_channel, dd_interceptor) + + return wrapped(*args, **kwargs) + + +class _ClientCallDetails( + collections.namedtuple("_ClientCallDetails", ("method", "timeout", "metadata", "credentials")), + grpc.ClientCallDetails, +): + pass + + +def _future_done_callback(span): + def func(response): + try: + # pull out response code from gRPC response to use both for `grpc.status.code` + # tag and the error type tag if the response is an exception + response_code = response.code() + # cast code to unicode for tags + status_code = to_unicode(response_code) + span.set_tag(constants.GRPC_STATUS_CODE_KEY, status_code) + + if response_code != grpc.StatusCode.OK: + _handle_error(span, response, status_code) + finally: + span.finish() + + return func + + +def _handle_response(span, response): + if isinstance(response, grpc.Future): + response.add_done_callback(_future_done_callback(span)) + + +def _handle_error(span, response_error, status_code): + # response_error should be a grpc.Future and so we expect to have cancelled(), + # exception() and traceback() methods if a computation has resulted in an + # exception being raised + if ( + not callable(getattr(response_error, "cancelled", None)) + and not callable(getattr(response_error, "exception", None)) + and not callable(getattr(response_error, "traceback", None)) + ): + return + + if response_error.cancelled(): + # handle cancelled futures separately to avoid raising grpc.FutureCancelledError + span.error = 1 + exc_val = to_unicode(response_error.details()) + span.set_tag(errors.ERROR_MSG, exc_val) + span.set_tag(errors.ERROR_TYPE, status_code) + return + + exception = response_error.exception() + traceback = response_error.traceback() + + if exception is not None and traceback is not None: + span.error = 1 + if isinstance(exception, grpc.RpcError): + # handle internal gRPC exceptions separately to get status code and + # details as tags properly + exc_val = to_unicode(response_error.details()) + span.set_tag(errors.ERROR_MSG, exc_val) + span.set_tag(errors.ERROR_TYPE, status_code) + span.set_tag(errors.ERROR_STACK, traceback) + else: + exc_type = type(exception) + span.set_exc_info(exc_type, exception, traceback) + status_code = to_unicode(response_error.code()) + + +class _WrappedResponseCallFuture(wrapt.ObjectProxy): + def __init__(self, wrapped, span): + super(_WrappedResponseCallFuture, self).__init__(wrapped) + self._span = span + + def __iter__(self): + return self + + def __next__(self): + try: + return next(self.__wrapped__) + except StopIteration: + # at end of iteration handle response status from wrapped future + _handle_response(self._span, self.__wrapped__) + raise + except grpc.RpcError as rpc_error: + # DEV: grpcio<1.18.0 grpc.RpcError is raised rather than returned as response + # https://github.com/grpc/grpc/commit/8199aff7a66460fbc4e9a82ade2e95ef076fd8f9 + # handle as a response + _handle_response(self._span, rpc_error) + raise + except Exception: + # DEV: added for safety though should not be reached since wrapped response + log.debug("unexpected non-grpc exception raised, closing open span", exc_info=True) + self._span.set_traceback() + self._span.finish() + raise + + def next(self): + return self.__next__() + + +class _ClientInterceptor( + grpc.UnaryUnaryClientInterceptor, + grpc.UnaryStreamClientInterceptor, + grpc.StreamUnaryClientInterceptor, + grpc.StreamStreamClientInterceptor, +): + def __init__(self, pin, host, port): + self._pin = pin + self._host = host + self._port = port + + def _intercept_client_call(self, method_kind, client_call_details): + tracer = self._pin.tracer + + span = tracer.trace( + "grpc", span_type=SpanTypes.GRPC, service=self._pin.service, resource=client_call_details.method, + ) + + # tags for method details + method_path = client_call_details.method + method_package, method_service, method_name = parse_method_path(method_path) + span.set_tag(constants.GRPC_METHOD_PATH_KEY, method_path) + span.set_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) + span.set_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service) + span.set_tag(constants.GRPC_METHOD_NAME_KEY, method_name) + span.set_tag(constants.GRPC_METHOD_KIND_KEY, method_kind) + span.set_tag(constants.GRPC_HOST_KEY, self._host) + span.set_tag(constants.GRPC_PORT_KEY, self._port) + span.set_tag(constants.GRPC_SPAN_KIND_KEY, constants.GRPC_SPAN_KIND_VALUE_CLIENT) + + sample_rate = config.grpc.get_analytics_sample_rate() + if sample_rate is not None: + span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) + + # inject tags from pin + if self._pin.tags: + span.set_tags(self._pin.tags) + + # propagate distributed tracing headers if available + headers = {} + if config.grpc.distributed_tracing_enabled: + propagator = HTTPPropagator() + propagator.inject(span.context, headers) + + metadata = [] + if client_call_details.metadata is not None: + metadata = list(client_call_details.metadata) + metadata.extend(headers.items()) + + client_call_details = _ClientCallDetails( + client_call_details.method, client_call_details.timeout, metadata, client_call_details.credentials, + ) + + return span, client_call_details + + def intercept_unary_unary(self, continuation, client_call_details, request): + span, client_call_details = self._intercept_client_call(constants.GRPC_METHOD_KIND_UNARY, client_call_details,) + try: + response = continuation(client_call_details, request) + _handle_response(span, response) + except grpc.RpcError as rpc_error: + # DEV: grpcio<1.18.0 grpc.RpcError is raised rather than returned as response + # https://github.com/grpc/grpc/commit/8199aff7a66460fbc4e9a82ade2e95ef076fd8f9 + # handle as a response + _handle_response(span, rpc_error) + raise + + return response + + def intercept_unary_stream(self, continuation, client_call_details, request): + span, client_call_details = self._intercept_client_call( + constants.GRPC_METHOD_KIND_SERVER_STREAMING, client_call_details, + ) + response_iterator = continuation(client_call_details, request) + response_iterator = _WrappedResponseCallFuture(response_iterator, span) + return response_iterator + + def intercept_stream_unary(self, continuation, client_call_details, request_iterator): + span, client_call_details = self._intercept_client_call( + constants.GRPC_METHOD_KIND_CLIENT_STREAMING, client_call_details, + ) + try: + response = continuation(client_call_details, request_iterator) + _handle_response(span, response) + except grpc.RpcError as rpc_error: + # DEV: grpcio<1.18.0 grpc.RpcError is raised rather than returned as response + # https://github.com/grpc/grpc/commit/8199aff7a66460fbc4e9a82ade2e95ef076fd8f9 + # handle as a response + _handle_response(span, rpc_error) + raise + + return response + + def intercept_stream_stream(self, continuation, client_call_details, request_iterator): + span, client_call_details = self._intercept_client_call( + constants.GRPC_METHOD_KIND_BIDI_STREAMING, client_call_details, + ) + response_iterator = continuation(client_call_details, request_iterator) + response_iterator = _WrappedResponseCallFuture(response_iterator, span) + return response_iterator diff --git a/ddtrace/contrib/grpc/constants.py b/ddtrace/contrib/grpc/constants.py new file mode 100644 index 0000000000..9a11cf81e0 --- /dev/null +++ b/ddtrace/contrib/grpc/constants.py @@ -0,0 +1,24 @@ +import grpc + + +GRPC_PIN_MODULE_SERVER = grpc.Server +GRPC_PIN_MODULE_CLIENT = grpc.Channel +GRPC_METHOD_PATH_KEY = 'grpc.method.path' +GRPC_METHOD_PACKAGE_KEY = 'grpc.method.package' +GRPC_METHOD_SERVICE_KEY = 'grpc.method.service' +GRPC_METHOD_NAME_KEY = 'grpc.method.name' +GRPC_METHOD_KIND_KEY = 'grpc.method.kind' +GRPC_STATUS_CODE_KEY = 'grpc.status.code' +GRPC_REQUEST_METADATA_PREFIX_KEY = 'grpc.request.metadata.' +GRPC_RESPONSE_METADATA_PREFIX_KEY = 'grpc.response.metadata.' +GRPC_HOST_KEY = 'grpc.host' +GRPC_PORT_KEY = 'grpc.port' +GRPC_SPAN_KIND_KEY = 'span.kind' +GRPC_SPAN_KIND_VALUE_CLIENT = 'client' +GRPC_SPAN_KIND_VALUE_SERVER = 'server' +GRPC_METHOD_KIND_UNARY = 'unary' +GRPC_METHOD_KIND_CLIENT_STREAMING = 'client_streaming' +GRPC_METHOD_KIND_SERVER_STREAMING = 'server_streaming' +GRPC_METHOD_KIND_BIDI_STREAMING = 'bidi_streaming' +GRPC_SERVICE_SERVER = 'grpc-server' +GRPC_SERVICE_CLIENT = 'grpc-client' diff --git a/ddtrace/contrib/grpc/patch.py b/ddtrace/contrib/grpc/patch.py new file mode 100644 index 0000000000..0c00c77fe1 --- /dev/null +++ b/ddtrace/contrib/grpc/patch.py @@ -0,0 +1,126 @@ +import grpc +import os + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w +from ddtrace import config, Pin + +from ...utils.wrappers import unwrap as _u + +from . import constants +from .client_interceptor import create_client_interceptor, intercept_channel +from .server_interceptor import create_server_interceptor + + +config._add('grpc_server', dict( + service_name=os.environ.get('DATADOG_SERVICE_NAME', constants.GRPC_SERVICE_SERVER), + distributed_tracing_enabled=True, +)) + +# TODO[tbutt]: keeping name for client config unchanged to maintain backwards +# compatibility but should change in future +config._add('grpc', dict( + service_name='{}-{}'.format( + os.environ.get('DATADOG_SERVICE_NAME'), constants.GRPC_SERVICE_CLIENT + ) if os.environ.get('DATADOG_SERVICE_NAME') else constants.GRPC_SERVICE_CLIENT, + distributed_tracing_enabled=True, +)) + + +def patch(): + _patch_client() + _patch_server() + + +def unpatch(): + _unpatch_client() + _unpatch_server() + + +def _patch_client(): + if getattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', False): + return + setattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', True) + + Pin(service=config.grpc.service_name).onto(constants.GRPC_PIN_MODULE_CLIENT) + + _w('grpc', 'insecure_channel', _client_channel_interceptor) + _w('grpc', 'secure_channel', _client_channel_interceptor) + _w('grpc', 'intercept_channel', intercept_channel) + + +def _unpatch_client(): + if not getattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', False): + return + setattr(constants.GRPC_PIN_MODULE_CLIENT, '__datadog_patch', False) + + pin = Pin.get_from(constants.GRPC_PIN_MODULE_CLIENT) + if pin: + pin.remove_from(constants.GRPC_PIN_MODULE_CLIENT) + + _u(grpc, 'secure_channel') + _u(grpc, 'insecure_channel') + + +def _patch_server(): + if getattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', False): + return + setattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', True) + + Pin(service=config.grpc_server.service_name).onto(constants.GRPC_PIN_MODULE_SERVER) + + _w('grpc', 'server', _server_constructor_interceptor) + + +def _unpatch_server(): + if not getattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', False): + return + setattr(constants.GRPC_PIN_MODULE_SERVER, '__datadog_patch', False) + + pin = Pin.get_from(constants.GRPC_PIN_MODULE_SERVER) + if pin: + pin.remove_from(constants.GRPC_PIN_MODULE_SERVER) + + _u(grpc, 'server') + + +def _client_channel_interceptor(wrapped, instance, args, kwargs): + channel = wrapped(*args, **kwargs) + + pin = Pin.get_from(constants.GRPC_PIN_MODULE_CLIENT) + if not pin or not pin.enabled(): + return channel + + (host, port) = _parse_target_from_arguments(args, kwargs) + + interceptor_function = create_client_interceptor(pin, host, port) + return grpc.intercept_channel(channel, interceptor_function) + + +def _server_constructor_interceptor(wrapped, instance, args, kwargs): + # DEV: we clone the pin on the grpc module and configure it for the server + # interceptor + + pin = Pin.get_from(constants.GRPC_PIN_MODULE_SERVER) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + interceptor = create_server_interceptor(pin) + + # DEV: Inject our tracing interceptor first in the list of interceptors + if 'interceptors' in kwargs: + kwargs['interceptors'] = (interceptor,) + tuple(kwargs['interceptors']) + else: + kwargs['interceptors'] = (interceptor,) + + return wrapped(*args, **kwargs) + + +def _parse_target_from_arguments(args, kwargs): + if 'target' in kwargs: + target = kwargs['target'] + else: + target = args[0] + + split = target.rsplit(':', 2) + + return (split[0], split[1] if len(split) > 1 else None) diff --git a/ddtrace/contrib/grpc/server_interceptor.py b/ddtrace/contrib/grpc/server_interceptor.py new file mode 100644 index 0000000000..dbce643543 --- /dev/null +++ b/ddtrace/contrib/grpc/server_interceptor.py @@ -0,0 +1,146 @@ +import grpc +from ddtrace.vendor import wrapt + +from ddtrace import config +from ddtrace.ext import errors +from ddtrace.compat import to_unicode + +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes +from ...propagation.http import HTTPPropagator +from . import constants +from .utils import parse_method_path + + +def create_server_interceptor(pin): + def interceptor_function(continuation, handler_call_details): + if not pin.enabled: + return continuation(handler_call_details) + + rpc_method_handler = continuation(handler_call_details) + return _TracedRpcMethodHandler(pin, handler_call_details, rpc_method_handler) + + return _ServerInterceptor(interceptor_function) + + +def _handle_server_exception(server_context, span): + if server_context is not None and \ + hasattr(server_context, '_state') and \ + server_context._state is not None: + code = to_unicode(server_context._state.code) + details = to_unicode(server_context._state.details) + span.error = 1 + span.set_tag(errors.ERROR_MSG, details) + span.set_tag(errors.ERROR_TYPE, code) + + +def _wrap_response_iterator(response_iterator, server_context, span): + try: + for response in response_iterator: + yield response + except Exception: + span.set_traceback() + _handle_server_exception(server_context, span) + raise + finally: + span.finish() + + +class _TracedRpcMethodHandler(wrapt.ObjectProxy): + def __init__(self, pin, handler_call_details, wrapped): + super(_TracedRpcMethodHandler, self).__init__(wrapped) + self._pin = pin + self._handler_call_details = handler_call_details + + def _fn(self, method_kind, behavior, args, kwargs): + if config.grpc_server.distributed_tracing_enabled: + headers = dict(self._handler_call_details.invocation_metadata) + propagator = HTTPPropagator() + context = propagator.extract(headers) + + if context.trace_id: + self._pin.tracer.context_provider.activate(context) + + tracer = self._pin.tracer + + span = tracer.trace( + 'grpc', + span_type=SpanTypes.GRPC, + service=self._pin.service, + resource=self._handler_call_details.method, + ) + + method_path = self._handler_call_details.method + method_package, method_service, method_name = parse_method_path(method_path) + span.set_tag(constants.GRPC_METHOD_PATH_KEY, method_path) + span.set_tag(constants.GRPC_METHOD_PACKAGE_KEY, method_package) + span.set_tag(constants.GRPC_METHOD_SERVICE_KEY, method_service) + span.set_tag(constants.GRPC_METHOD_NAME_KEY, method_name) + span.set_tag(constants.GRPC_METHOD_KIND_KEY, method_kind) + span.set_tag(constants.GRPC_SPAN_KIND_KEY, constants.GRPC_SPAN_KIND_VALUE_SERVER) + + sample_rate = config.grpc_server.get_analytics_sample_rate() + if sample_rate is not None: + span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) + + # access server context by taking second argument as server context + # if not found, skip using context to tag span with server state information + server_context = args[1] if isinstance(args[1], grpc.ServicerContext) else None + + if self._pin.tags: + span.set_tags(self._pin.tags) + + try: + response_or_iterator = behavior(*args, **kwargs) + + if self.__wrapped__.response_streaming: + response_or_iterator = _wrap_response_iterator(response_or_iterator, server_context, span) + except Exception: + span.set_traceback() + _handle_server_exception(server_context, span) + raise + finally: + if not self.__wrapped__.response_streaming: + span.finish() + + return response_or_iterator + + def unary_unary(self, *args, **kwargs): + return self._fn( + constants.GRPC_METHOD_KIND_UNARY, + self.__wrapped__.unary_unary, + args, + kwargs + ) + + def unary_stream(self, *args, **kwargs): + return self._fn( + constants.GRPC_METHOD_KIND_SERVER_STREAMING, + self.__wrapped__.unary_stream, + args, + kwargs + ) + + def stream_unary(self, *args, **kwargs): + return self._fn( + constants.GRPC_METHOD_KIND_CLIENT_STREAMING, + self.__wrapped__.stream_unary, + args, + kwargs + ) + + def stream_stream(self, *args, **kwargs): + return self._fn( + constants.GRPC_METHOD_KIND_BIDI_STREAMING, + self.__wrapped__.stream_stream, + args, + kwargs + ) + + +class _ServerInterceptor(grpc.ServerInterceptor): + def __init__(self, interceptor_function): + self._fn = interceptor_function + + def intercept_service(self, continuation, handler_call_details): + return self._fn(continuation, handler_call_details) diff --git a/ddtrace/contrib/grpc/utils.py b/ddtrace/contrib/grpc/utils.py new file mode 100644 index 0000000000..568d118c25 --- /dev/null +++ b/ddtrace/contrib/grpc/utils.py @@ -0,0 +1,12 @@ +def parse_method_path(method_path): + """ Returns (package, service, method) tuple from parsing method path """ + # unpack method path based on "/{package}.{service}/{method}" + # first remove leading "/" as unnecessary + package_service, method_name = method_path.lstrip('/').rsplit('/', 1) + + # {package} is optional + package_service = package_service.rsplit('.', 1) + if len(package_service) == 2: + return package_service[0], package_service[1], method_name + + return None, package_service[0], method_name diff --git a/ddtrace/contrib/httplib/__init__.py b/ddtrace/contrib/httplib/__init__.py new file mode 100644 index 0000000000..0d7883f639 --- /dev/null +++ b/ddtrace/contrib/httplib/__init__.py @@ -0,0 +1,33 @@ +""" +Patch the built-in ``httplib``/``http.client`` libraries to trace all HTTP calls. + + +Usage:: + + # Patch all supported modules/functions + from ddtrace import patch + patch(httplib=True) + + # Python 2 + import httplib + import urllib + + resp = urllib.urlopen('http://www.datadog.com/') + + # Python 3 + import http.client + import urllib.request + + resp = urllib.request.urlopen('http://www.datadog.com/') + +``httplib`` spans do not include a default service name. Before HTTP calls are +made, ensure a parent span has been started with a service name to be used for +spans generated from those calls:: + + with tracer.trace('main', service='my-httplib-operation'): + resp = urllib.request.urlopen('http://www.datadog.com/') + +:ref:`Headers tracing ` is supported for this integration. +""" +from .patch import patch, unpatch +__all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/httplib/patch.py b/ddtrace/contrib/httplib/patch.py new file mode 100644 index 0000000000..61297cbd74 --- /dev/null +++ b/ddtrace/contrib/httplib/patch.py @@ -0,0 +1,138 @@ +# Third party +from ddtrace.vendor import wrapt + +# Project +from ...compat import PY2, httplib, parse +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http as ext_http +from ...http import store_request_headers, store_response_headers +from ...internal.logger import get_logger +from ...pin import Pin +from ...settings import config +from ...utils.wrappers import unwrap as _u + +span_name = 'httplib.request' if PY2 else 'http.client.request' + +log = get_logger(__name__) + + +def _wrap_init(func, instance, args, kwargs): + Pin(app='httplib', service=None).onto(instance) + return func(*args, **kwargs) + + +def _wrap_getresponse(func, instance, args, kwargs): + # Use any attached tracer if available, otherwise use the global tracer + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + resp = None + try: + resp = func(*args, **kwargs) + return resp + finally: + try: + # Get the span attached to this instance, if available + span = getattr(instance, '_datadog_span', None) + if span: + if resp: + span.set_tag(ext_http.STATUS_CODE, resp.status) + span.error = int(500 <= resp.status) + store_response_headers(dict(resp.getheaders()), span, config.httplib) + + span.finish() + delattr(instance, '_datadog_span') + except Exception: + log.debug('error applying request tags', exc_info=True) + + +def _wrap_putrequest(func, instance, args, kwargs): + # Use any attached tracer if available, otherwise use the global tracer + pin = Pin.get_from(instance) + if should_skip_request(pin, instance): + return func(*args, **kwargs) + + try: + # Create a new span and attach to this instance (so we can retrieve/update/close later on the response) + span = pin.tracer.trace(span_name, span_type=SpanTypes.HTTP) + setattr(instance, '_datadog_span', span) + + method, path = args[:2] + scheme = 'https' if isinstance(instance, httplib.HTTPSConnection) else 'http' + port = ':{port}'.format(port=instance.port) + + if (scheme == 'http' and instance.port == 80) or (scheme == 'https' and instance.port == 443): + port = '' + url = '{scheme}://{host}{port}{path}'.format(scheme=scheme, host=instance.host, port=port, path=path) + + # sanitize url + parsed = parse.urlparse(url) + sanitized_url = parse.urlunparse(( + parsed.scheme, + parsed.netloc, + parsed.path, + parsed.params, + None, # drop query + parsed.fragment + )) + + span.set_tag(ext_http.URL, sanitized_url) + span.set_tag(ext_http.METHOD, method) + if config.httplib.trace_query_string: + span.set_tag(ext_http.QUERY_STRING, parsed.query) + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.httplib.get_analytics_sample_rate() + ) + except Exception: + log.debug('error applying request tags', exc_info=True) + return func(*args, **kwargs) + + +def _wrap_putheader(func, instance, args, kwargs): + span = getattr(instance, '_datadog_span', None) + if span: + store_request_headers({args[0]: args[1]}, span, config.httplib) + + return func(*args, **kwargs) + + +def should_skip_request(pin, request): + """Helper to determine if the provided request should be traced""" + if not pin or not pin.enabled(): + return True + + api = pin.tracer.writer.api + return request.host == api.hostname and request.port == api.port + + +def patch(): + """ patch the built-in urllib/httplib/httplib.client methods for tracing""" + if getattr(httplib, '__datadog_patch', False): + return + setattr(httplib, '__datadog_patch', True) + + # Patch the desired methods + setattr(httplib.HTTPConnection, '__init__', + wrapt.FunctionWrapper(httplib.HTTPConnection.__init__, _wrap_init)) + setattr(httplib.HTTPConnection, 'getresponse', + wrapt.FunctionWrapper(httplib.HTTPConnection.getresponse, _wrap_getresponse)) + setattr(httplib.HTTPConnection, 'putrequest', + wrapt.FunctionWrapper(httplib.HTTPConnection.putrequest, _wrap_putrequest)) + setattr(httplib.HTTPConnection, 'putheader', + wrapt.FunctionWrapper(httplib.HTTPConnection.putheader, _wrap_putheader)) + + +def unpatch(): + """ unpatch any previously patched modules """ + if not getattr(httplib, '__datadog_patch', False): + return + setattr(httplib, '__datadog_patch', False) + + _u(httplib.HTTPConnection, '__init__') + _u(httplib.HTTPConnection, 'getresponse') + _u(httplib.HTTPConnection, 'putrequest') + _u(httplib.HTTPConnection, 'putheader') diff --git a/ddtrace/contrib/jinja2/__init__.py b/ddtrace/contrib/jinja2/__init__.py new file mode 100644 index 0000000000..5a39d248d7 --- /dev/null +++ b/ddtrace/contrib/jinja2/__init__.py @@ -0,0 +1,42 @@ +""" +The ``jinja2`` integration traces templates loading, compilation and rendering. +Auto instrumentation is available using the ``patch``. The following is an example:: + + from ddtrace import patch + from jinja2 import Environment, FileSystemLoader + + patch(jinja2=True) + + env = Environment( + loader=FileSystemLoader("templates") + ) + template = env.get_template('mytemplate.html') + + +The library can be configured globally and per instance, using the Configuration API:: + + from ddtrace import config + + # Change service name globally + config.jinja2['service_name'] = 'jinja-templates' + + # change the service name only for this environment + cfg = config.get_from(env) + cfg['service_name'] = 'jinja-templates' + +By default, the service name is set to None, so it is inherited from the parent span. +If there is no parent span and the service name is not overriden the agent will drop the traces. +""" +from ...utils.importlib import require_modules + + +required_modules = ['jinja2'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = [ + 'patch', + 'unpatch', + ] diff --git a/ddtrace/contrib/jinja2/constants.py b/ddtrace/contrib/jinja2/constants.py new file mode 100644 index 0000000000..101c4d144d --- /dev/null +++ b/ddtrace/contrib/jinja2/constants.py @@ -0,0 +1 @@ +DEFAULT_TEMPLATE_NAME = '' diff --git a/ddtrace/contrib/jinja2/patch.py b/ddtrace/contrib/jinja2/patch.py new file mode 100644 index 0000000000..988c33db9e --- /dev/null +++ b/ddtrace/contrib/jinja2/patch.py @@ -0,0 +1,93 @@ +import jinja2 +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from ddtrace import config + +from ...ext import SpanTypes +from ...utils.formats import get_env +from ...pin import Pin +from ...utils.wrappers import unwrap as _u +from .constants import DEFAULT_TEMPLATE_NAME + + +# default settings +config._add('jinja2', { + 'service_name': get_env('jinja2', 'service_name', None), +}) + + +def patch(): + if getattr(jinja2, '__datadog_patch', False): + # already patched + return + setattr(jinja2, '__datadog_patch', True) + Pin( + service=config.jinja2['service_name'], + _config=config.jinja2, + ).onto(jinja2.environment.Environment) + _w(jinja2, 'environment.Template.render', _wrap_render) + _w(jinja2, 'environment.Template.generate', _wrap_render) + _w(jinja2, 'environment.Environment.compile', _wrap_compile) + _w(jinja2, 'environment.Environment._load_template', _wrap_load_template) + + +def unpatch(): + if not getattr(jinja2, '__datadog_patch', False): + return + setattr(jinja2, '__datadog_patch', False) + _u(jinja2.Template, 'render') + _u(jinja2.Template, 'generate') + _u(jinja2.Environment, 'compile') + _u(jinja2.Environment, '_load_template') + + +def _wrap_render(wrapped, instance, args, kwargs): + """Wrap `Template.render()` or `Template.generate()` + """ + pin = Pin.get_from(instance.environment) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + template_name = instance.name or DEFAULT_TEMPLATE_NAME + with pin.tracer.trace('jinja2.render', pin.service, span_type=SpanTypes.TEMPLATE) as span: + try: + return wrapped(*args, **kwargs) + finally: + span.resource = template_name + span.set_tag('jinja2.template_name', template_name) + + +def _wrap_compile(wrapped, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + if len(args) > 1: + template_name = args[1] + else: + template_name = kwargs.get('name', DEFAULT_TEMPLATE_NAME) + + with pin.tracer.trace('jinja2.compile', pin.service, span_type=SpanTypes.TEMPLATE) as span: + try: + return wrapped(*args, **kwargs) + finally: + span.resource = template_name + span.set_tag('jinja2.template_name', template_name) + + +def _wrap_load_template(wrapped, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + template_name = kwargs.get('name', args[0]) + with pin.tracer.trace('jinja2.load', pin.service, span_type=SpanTypes.TEMPLATE) as span: + template = None + try: + template = wrapped(*args, **kwargs) + return template + finally: + span.resource = template_name + span.set_tag('jinja2.template_name', template_name) + if template: + span.set_tag('jinja2.template_path', template.filename) diff --git a/ddtrace/contrib/kombu/__init__.py b/ddtrace/contrib/kombu/__init__.py new file mode 100644 index 0000000000..a29639b9b8 --- /dev/null +++ b/ddtrace/contrib/kombu/__init__.py @@ -0,0 +1,43 @@ +"""Instrument kombu to report AMQP messaging. + +``patch_all`` will not automatically patch your Kombu client to make it work, as this would conflict with the +Celery integration. You must specifically request kombu be patched, as in the example below. + +Note: To permit distributed tracing for the kombu integration you must enable the tracer with priority +sampling. Refer to the documentation here: +http://pypi.datadoghq.com/trace/docs/advanced_usage.html#priority-sampling + +Without enabling distributed tracing, spans within a trace generated by the kombu integration might be dropped +without the whole trace being dropped. +:: + + from ddtrace import Pin, patch + import kombu + + # If not patched yet, you can patch kombu specifically + patch(kombu=True) + + # This will report a span with the default settings + conn = kombu.Connection("amqp://guest:guest@127.0.0.1:5672//") + conn.connect() + task_queue = kombu.Queue('tasks', kombu.Exchange('tasks'), routing_key='tasks') + to_publish = {'hello': 'world'} + producer = conn.Producer() + producer.publish(to_publish, + exchange=task_queue.exchange, + routing_key=task_queue.routing_key, + declare=[task_queue]) + + # Use a pin to specify metadata related to this client + Pin.override(producer, service='kombu-consumer') +""" + +from ...utils.importlib import require_modules + +required_modules = ['kombu', 'kombu.messaging'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + + __all__ = ['patch'] diff --git a/ddtrace/contrib/kombu/constants.py b/ddtrace/contrib/kombu/constants.py new file mode 100644 index 0000000000..10a67c5829 --- /dev/null +++ b/ddtrace/contrib/kombu/constants.py @@ -0,0 +1 @@ +DEFAULT_SERVICE = 'kombu' diff --git a/ddtrace/contrib/kombu/patch.py b/ddtrace/contrib/kombu/patch.py new file mode 100644 index 0000000000..a82079f3e0 --- /dev/null +++ b/ddtrace/contrib/kombu/patch.py @@ -0,0 +1,118 @@ +# 3p +import kombu +from ddtrace.vendor import wrapt + +# project +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, kombu as kombux +from ...pin import Pin +from ...propagation.http import HTTPPropagator +from ...settings import config +from ...utils.formats import get_env +from ...utils.wrappers import unwrap + +from .constants import DEFAULT_SERVICE +from .utils import ( + get_exchange_from_args, + get_body_length_from_args, + get_routing_key_from_args, + extract_conn_tags, + HEADER_POS +) + +# kombu default settings +config._add('kombu', { + 'service_name': get_env('kombu', 'service_name', DEFAULT_SERVICE) +}) + +propagator = HTTPPropagator() + + +def patch(): + """Patch the instrumented methods + + This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top + of Kombu. However, it means that any "import kombu.Connection" won't be instrumented. + """ + if getattr(kombu, '_datadog_patch', False): + return + setattr(kombu, '_datadog_patch', True) + + _w = wrapt.wrap_function_wrapper + # We wrap the _publish method because the publish method: + # * defines defaults in its kwargs + # * potentially overrides kwargs with values from self + # * extracts/normalizes things like exchange + _w('kombu', 'Producer._publish', traced_publish) + _w('kombu', 'Consumer.receive', traced_receive) + Pin( + service=config.kombu['service_name'], + app='kombu' + ).onto(kombu.messaging.Producer) + + Pin( + service=config.kombu['service_name'], + app='kombu' + ).onto(kombu.messaging.Consumer) + + +def unpatch(): + if getattr(kombu, '_datadog_patch', False): + setattr(kombu, '_datadog_patch', False) + unwrap(kombu.Producer, '_publish') + unwrap(kombu.Consumer, 'receive') + +# +# tracing functions +# + + +def traced_receive(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + # Signature only takes 2 args: (body, message) + message = args[1] + context = propagator.extract(message.headers) + # only need to active the new context if something was propagated + if context.trace_id: + pin.tracer.context_provider.activate(context) + with pin.tracer.trace(kombux.RECEIVE_NAME, service=pin.service, span_type=SpanTypes.WORKER) as s: + # run the command + exchange = message.delivery_info['exchange'] + s.resource = exchange + s.set_tag(kombux.EXCHANGE, exchange) + + s.set_tags(extract_conn_tags(message.channel.connection)) + s.set_tag(kombux.ROUTING_KEY, message.delivery_info['routing_key']) + # set analytics sample rate + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.kombu.get_analytics_sample_rate() + ) + return func(*args, **kwargs) + + +def traced_publish(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + with pin.tracer.trace(kombux.PUBLISH_NAME, service=pin.service, span_type=SpanTypes.WORKER) as s: + exchange_name = get_exchange_from_args(args) + s.resource = exchange_name + s.set_tag(kombux.EXCHANGE, exchange_name) + if pin.tags: + s.set_tags(pin.tags) + s.set_tag(kombux.ROUTING_KEY, get_routing_key_from_args(args)) + s.set_tags(extract_conn_tags(instance.channel.connection)) + s.set_metric(kombux.BODY_LEN, get_body_length_from_args(args)) + # set analytics sample rate + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.kombu.get_analytics_sample_rate() + ) + # run the command + propagator.inject(s.context, args[HEADER_POS]) + return func(*args, **kwargs) diff --git a/ddtrace/contrib/kombu/utils.py b/ddtrace/contrib/kombu/utils.py new file mode 100644 index 0000000000..af81e0c299 --- /dev/null +++ b/ddtrace/contrib/kombu/utils.py @@ -0,0 +1,47 @@ +""" +Some utils used by the dogtrace kombu integration +""" +from ...ext import kombu as kombux, net + +PUBLISH_BODY_IDX = 0 +PUBLISH_ROUTING_KEY = 6 +PUBLISH_EXCHANGE_IDX = 9 + +HEADER_POS = 4 + + +def extract_conn_tags(connection): + """ Transform kombu conn info into dogtrace metas """ + try: + host, port = connection.host.split(':') + return { + net.TARGET_HOST: host, + net.TARGET_PORT: port, + kombux.VHOST: connection.virtual_host, + } + except AttributeError: + # Unlikely that we don't have .host or .virtual_host but let's not die over it + return {} + + +def get_exchange_from_args(args): + """Extract the exchange + + The publish method extracts the name and hands that off to _publish (what we patch) + """ + + return args[PUBLISH_EXCHANGE_IDX] + + +def get_routing_key_from_args(args): + """Extract the routing key""" + + name = args[PUBLISH_ROUTING_KEY] + return name + + +def get_body_length_from_args(args): + """Extract the length of the body""" + + length = len(args[PUBLISH_BODY_IDX]) + return length diff --git a/ddtrace/contrib/logging/__init__.py b/ddtrace/contrib/logging/__init__.py new file mode 100644 index 0000000000..1882463d30 --- /dev/null +++ b/ddtrace/contrib/logging/__init__.py @@ -0,0 +1,66 @@ +""" +Datadog APM traces can be integrated with Logs by first having the tracing +library patch the standard library ``logging`` module and updating the log +formatter used by an application. This feature enables you to inject the current +trace information into a log entry. + +Before the trace information can be injected into logs, the formatter has to be +updated to include ``dd.trace_id`` and ``dd.span_id`` attributes from the log +record. The integration with Logs occurs as long as the log entry includes +``dd.trace_id=%(dd.trace_id)s`` and ``dd.span_id=%(dd.span_id)s``. + +ddtrace-run +----------- + +When using ``ddtrace-run``, enable patching by setting the environment variable +``DD_LOGS_INJECTION=true``. The logger by default will have a format that +includes trace information:: + + import logging + from ddtrace import tracer + + log = logging.getLogger() + log.level = logging.INFO + + + @tracer.wrap() + def hello(): + log.info('Hello, World!') + + hello() + +Manual Instrumentation +---------------------- + +If you prefer to instrument manually, patch the logging library then update the +log formatter as in the following example:: + + from ddtrace import patch_all; patch_all(logging=True) + import logging + from ddtrace import tracer + + FORMAT = ('%(asctime)s %(levelname)s [%(name)s] [%(filename)s:%(lineno)d] ' + '[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s] ' + '- %(message)s') + logging.basicConfig(format=FORMAT) + log = logging.getLogger() + log.level = logging.INFO + + + @tracer.wrap() + def hello(): + log.info('Hello, World!') + + hello() +""" + +from ...utils.importlib import require_modules + + +required_modules = ['logging'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/logging/patch.py b/ddtrace/contrib/logging/patch.py new file mode 100644 index 0000000000..84b6fdc370 --- /dev/null +++ b/ddtrace/contrib/logging/patch.py @@ -0,0 +1,49 @@ +import logging + +from ddtrace import config + +from ...helpers import get_correlation_ids +from ...utils.wrappers import unwrap as _u +from ...vendor.wrapt import wrap_function_wrapper as _w + +RECORD_ATTR_TRACE_ID = 'dd.trace_id' +RECORD_ATTR_SPAN_ID = 'dd.span_id' +RECORD_ATTR_VALUE_NULL = 0 + +config._add('logging', dict( + tracer=None, # by default, override here for custom tracer +)) + + +def _w_makeRecord(func, instance, args, kwargs): + record = func(*args, **kwargs) + + # add correlation identifiers to LogRecord + trace_id, span_id = get_correlation_ids(tracer=config.logging.tracer) + if trace_id and span_id: + setattr(record, RECORD_ATTR_TRACE_ID, trace_id) + setattr(record, RECORD_ATTR_SPAN_ID, span_id) + else: + setattr(record, RECORD_ATTR_TRACE_ID, RECORD_ATTR_VALUE_NULL) + setattr(record, RECORD_ATTR_SPAN_ID, RECORD_ATTR_VALUE_NULL) + + return record + + +def patch(): + """ + Patch ``logging`` module in the Python Standard Library for injection of + tracer information by wrapping the base factory method ``Logger.makeRecord`` + """ + if getattr(logging, '_datadog_patch', False): + return + setattr(logging, '_datadog_patch', True) + + _w(logging.Logger, 'makeRecord', _w_makeRecord) + + +def unpatch(): + if getattr(logging, '_datadog_patch', False): + setattr(logging, '_datadog_patch', False) + + _u(logging.Logger, 'makeRecord') diff --git a/ddtrace/contrib/mako/__init__.py b/ddtrace/contrib/mako/__init__.py new file mode 100644 index 0000000000..d26d44ba49 --- /dev/null +++ b/ddtrace/contrib/mako/__init__.py @@ -0,0 +1,24 @@ +""" +The ``mako`` integration traces templates rendering. +Auto instrumentation is available using the ``patch``. The following is an example:: + + from ddtrace import patch + from mako.template import Template + + patch(mako=True) + + t = Template(filename="index.html") + +""" +from ...utils.importlib import require_modules + +required_modules = ['mako'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = [ + 'patch', + 'unpatch', + ] diff --git a/ddtrace/contrib/mako/constants.py b/ddtrace/contrib/mako/constants.py new file mode 100644 index 0000000000..101c4d144d --- /dev/null +++ b/ddtrace/contrib/mako/constants.py @@ -0,0 +1 @@ +DEFAULT_TEMPLATE_NAME = '' diff --git a/ddtrace/contrib/mako/patch.py b/ddtrace/contrib/mako/patch.py new file mode 100644 index 0000000000..5f6da9c2c4 --- /dev/null +++ b/ddtrace/contrib/mako/patch.py @@ -0,0 +1,47 @@ +import mako +from mako.template import Template + +from ...ext import SpanTypes +from ...pin import Pin +from ...utils.importlib import func_name +from ...utils.wrappers import unwrap as _u +from ...vendor.wrapt import wrap_function_wrapper as _w +from .constants import DEFAULT_TEMPLATE_NAME + + +def patch(): + if getattr(mako, '__datadog_patch', False): + # already patched + return + setattr(mako, '__datadog_patch', True) + + Pin(service='mako', app='mako').onto(Template) + + _w(mako, 'template.Template.render', _wrap_render) + _w(mako, 'template.Template.render_unicode', _wrap_render) + _w(mako, 'template.Template.render_context', _wrap_render) + + +def unpatch(): + if not getattr(mako, '__datadog_patch', False): + return + setattr(mako, '__datadog_patch', False) + + _u(mako.template.Template, 'render') + _u(mako.template.Template, 'render_unicode') + _u(mako.template.Template, 'render_context') + + +def _wrap_render(wrapped, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + template_name = instance.filename or DEFAULT_TEMPLATE_NAME + with pin.tracer.trace(func_name(wrapped), pin.service, span_type=SpanTypes.TEMPLATE) as span: + try: + template = wrapped(*args, **kwargs) + return template + finally: + span.resource = template_name + span.set_tag('mako.template_name', template_name) diff --git a/ddtrace/contrib/molten/__init__.py b/ddtrace/contrib/molten/__init__.py new file mode 100644 index 0000000000..e8cd134be1 --- /dev/null +++ b/ddtrace/contrib/molten/__init__.py @@ -0,0 +1,53 @@ +""" +The molten web framework is automatically traced by ``ddtrace`` when calling ``patch``:: + + from molten import App, Route + from ddtrace import patch_all; patch_all(molten=True) + + def hello(name: str, age: int) -> str: + return f'Hello {age} year old named {name}!' + app = App(routes=[Route('/hello/{name}/{age}', hello)]) + + +You may also enable molten tracing automatically via ``ddtrace-run``:: + + ddtrace-run python app.py + + +Configuration +~~~~~~~~~~~~~ + +.. py:data:: ddtrace.config.molten['distributed_tracing'] + + Whether to parse distributed tracing headers from requests received by your Molten app. + + Default: ``True`` + +.. py:data:: ddtrace.config.molten['analytics_enabled'] + + Whether to generate APM events in Trace Search & Analytics. + + Can also be enabled with the ``DD_MOLTEN_ANALYTICS_ENABLED`` environment variable. + + Default: ``None`` + +.. py:data:: ddtrace.config.molten['service_name'] + + The service name reported for your Molten app. + + Can also be configured via the ``DD_MOLTEN_SERVICE_NAME`` environment variable. + + Default: ``'molten'`` +""" +from ...utils.importlib import require_modules + +required_modules = ['molten'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from . import patch as _patch + + patch = _patch.patch + unpatch = _patch.unpatch + + __all__ = ['patch', 'unpatch'] diff --git a/ddtrace/contrib/molten/patch.py b/ddtrace/contrib/molten/patch.py new file mode 100644 index 0000000000..967d06899f --- /dev/null +++ b/ddtrace/contrib/molten/patch.py @@ -0,0 +1,169 @@ +from ddtrace.vendor import wrapt +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +import molten + +from ... import Pin, config +from ...compat import urlencode +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...propagation.http import HTTPPropagator +from ...utils.formats import asbool, get_env +from ...utils.importlib import func_name +from ...utils.wrappers import unwrap as _u +from .wrappers import WrapperComponent, WrapperRenderer, WrapperMiddleware, WrapperRouter, MOLTEN_ROUTE + +MOLTEN_VERSION = tuple(map(int, molten.__version__.split()[0].split('.'))) + +# Configure default configuration +config._add('molten', dict( + service_name=get_env('molten', 'service_name', 'molten'), + app='molten', + distributed_tracing=asbool(get_env('molten', 'distributed_tracing', True)), +)) + + +def patch(): + """Patch the instrumented methods + """ + if getattr(molten, '_datadog_patch', False): + return + setattr(molten, '_datadog_patch', True) + + pin = Pin( + service=config.molten['service_name'], + app=config.molten['app'] + ) + + # add pin to module since many classes use __slots__ + pin.onto(molten) + + _w(molten.BaseApp, '__init__', patch_app_init) + _w(molten.App, '__call__', patch_app_call) + + +def unpatch(): + """Remove instrumentation + """ + if getattr(molten, '_datadog_patch', False): + setattr(molten, '_datadog_patch', False) + + # remove pin + pin = Pin.get_from(molten) + if pin: + pin.remove_from(molten) + + _u(molten.BaseApp, '__init__') + _u(molten.App, '__call__') + _u(molten.Router, 'add_route') + + +def patch_app_call(wrapped, instance, args, kwargs): + """Patch wsgi interface for app + """ + pin = Pin.get_from(molten) + + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + # DEV: This is safe because this is the args for a WSGI handler + # https://www.python.org/dev/peps/pep-3333/ + environ, start_response = args + + request = molten.http.Request.from_environ(environ) + resource = func_name(wrapped) + + # Configure distributed tracing + if config.molten.get('distributed_tracing', True): + propagator = HTTPPropagator() + # request.headers is type Iterable[Tuple[str, str]] + context = propagator.extract(dict(request.headers)) + # Only need to activate the new context if something was propagated + if context.trace_id: + pin.tracer.context_provider.activate(context) + + with pin.tracer.trace('molten.request', service=pin.service, resource=resource, span_type=SpanTypes.WEB) as span: + # set analytics sample rate with global config enabled + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.molten.get_analytics_sample_rate(use_global_config=True) + ) + + @wrapt.function_wrapper + def _w_start_response(wrapped, instance, args, kwargs): + """ Patch respond handling to set metadata """ + + pin = Pin.get_from(molten) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + status, headers, exc_info = args + code, _, _ = status.partition(' ') + + try: + code = int(code) + except ValueError: + pass + + if not span.get_tag(MOLTEN_ROUTE): + # if route never resolve, update root resource + span.resource = u'{} {}'.format(request.method, code) + + span.set_tag(http.STATUS_CODE, code) + + # mark 5xx spans as error + if 500 <= code < 600: + span.error = 1 + + return wrapped(*args, **kwargs) + + # patching for extracting response code + start_response = _w_start_response(start_response) + + span.set_tag(http.METHOD, request.method) + span.set_tag(http.URL, '%s://%s:%s%s' % ( + request.scheme, request.host, request.port, request.path, + )) + if config.molten.trace_query_string: + span.set_tag(http.QUERY_STRING, urlencode(dict(request.params))) + span.set_tag('molten.version', molten.__version__) + return wrapped(environ, start_response, **kwargs) + + +def patch_app_init(wrapped, instance, args, kwargs): + """Patch app initialization of middleware, components and renderers + """ + # allow instance to be initialized before wrapping them + wrapped(*args, **kwargs) + + # add Pin to instance + pin = Pin.get_from(molten) + + if not pin or not pin.enabled(): + return + + # Wrappers here allow us to trace objects without altering class or instance + # attributes, which presents a problem when classes in molten use + # ``__slots__`` + + instance.router = WrapperRouter(instance.router) + + # wrap middleware functions/callables + instance.middleware = [ + WrapperMiddleware(mw) + for mw in instance.middleware + ] + + # wrap components objects within injector + # NOTE: the app instance also contains a list of components but it does not + # appear to be used for anything passing along to the dependency injector + instance.injector.components = [ + WrapperComponent(c) + for c in instance.injector.components + ] + + # but renderers objects + instance.renderers = [ + WrapperRenderer(r) + for r in instance.renderers + ] diff --git a/ddtrace/contrib/molten/wrappers.py b/ddtrace/contrib/molten/wrappers.py new file mode 100644 index 0000000000..db13556530 --- /dev/null +++ b/ddtrace/contrib/molten/wrappers.py @@ -0,0 +1,95 @@ +from ddtrace.vendor import wrapt +import molten + +from ... import Pin +from ...utils.importlib import func_name + +MOLTEN_ROUTE = 'molten.route' + + +def trace_wrapped(resource, wrapped, *args, **kwargs): + pin = Pin.get_from(molten) + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + with pin.tracer.trace(func_name(wrapped), service=pin.service, resource=resource): + return wrapped(*args, **kwargs) + + +def trace_func(resource): + """Trace calls to function using provided resource name + """ + @wrapt.function_wrapper + def _trace_func(wrapped, instance, args, kwargs): + pin = Pin.get_from(molten) + + if not pin or not pin.enabled(): + return wrapped(*args, **kwargs) + + with pin.tracer.trace(func_name(wrapped), service=pin.service, resource=resource): + return wrapped(*args, **kwargs) + + return _trace_func + + +class WrapperComponent(wrapt.ObjectProxy): + """ Tracing of components """ + def can_handle_parameter(self, *args, **kwargs): + func = self.__wrapped__.can_handle_parameter + cname = func_name(self.__wrapped__) + resource = '{}.{}'.format(cname, func.__name__) + return trace_wrapped(resource, func, *args, **kwargs) + + # TODO[tahir]: the signature of a wrapped resolve method causes DIError to + # be thrown since paramter types cannot be determined + + +class WrapperRenderer(wrapt.ObjectProxy): + """ Tracing of renderers """ + def render(self, *args, **kwargs): + func = self.__wrapped__.render + cname = func_name(self.__wrapped__) + resource = '{}.{}'.format(cname, func.__name__) + return trace_wrapped(resource, func, *args, **kwargs) + + +class WrapperMiddleware(wrapt.ObjectProxy): + """ Tracing of callable functional-middleware """ + def __call__(self, *args, **kwargs): + func = self.__wrapped__.__call__ + resource = func_name(self.__wrapped__) + return trace_wrapped(resource, func, *args, **kwargs) + + +class WrapperRouter(wrapt.ObjectProxy): + """ Tracing of router on the way back from a matched route """ + def match(self, *args, **kwargs): + # catch matched route and wrap tracer around its handler and set root span resource + func = self.__wrapped__.match + route_and_params = func(*args, **kwargs) + + pin = Pin.get_from(molten) + if not pin or not pin.enabled(): + return route_and_params + + if route_and_params is not None: + route, params = route_and_params + + route.handler = trace_func(func_name(route.handler))(route.handler) + + # update root span resource while we know the matched route + resource = '{} {}'.format( + route.method, + route.template, + ) + root_span = pin.tracer.current_root_span() + root_span.resource = resource + + # if no root route set make sure we record it based on this resolved + # route + if root_span and not root_span.get_tag(MOLTEN_ROUTE): + root_span.set_tag(MOLTEN_ROUTE, route.name) + + return route, params + + return route_and_params diff --git a/ddtrace/contrib/mongoengine/__init__.py b/ddtrace/contrib/mongoengine/__init__.py new file mode 100644 index 0000000000..384c04ab77 --- /dev/null +++ b/ddtrace/contrib/mongoengine/__init__.py @@ -0,0 +1,29 @@ +"""Instrument mongoengine to report MongoDB queries. + +``patch_all`` will automatically patch your mongoengine connect method to make it work. +:: + + from ddtrace import Pin, patch + import mongoengine + + # If not patched yet, you can patch mongoengine specifically + patch(mongoengine=True) + + # At that point, mongoengine is instrumented with the default settings + mongoengine.connect('db', alias='default') + + # Use a pin to specify metadata related to this client + client = mongoengine.connect('db', alias='master') + Pin.override(client, service="mongo-master") +""" + +from ...utils.importlib import require_modules + + +required_modules = ['mongoengine'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, trace_mongoengine + + __all__ = ['patch', 'trace_mongoengine'] diff --git a/ddtrace/contrib/mongoengine/patch.py b/ddtrace/contrib/mongoengine/patch.py new file mode 100644 index 0000000000..7faa72d4cd --- /dev/null +++ b/ddtrace/contrib/mongoengine/patch.py @@ -0,0 +1,20 @@ +import mongoengine + +from .trace import WrappedConnect +from ...utils.deprecation import deprecated + +# Original connect function +_connect = mongoengine.connect + + +def patch(): + setattr(mongoengine, 'connect', WrappedConnect(_connect)) + + +def unpatch(): + setattr(mongoengine, 'connect', _connect) + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def trace_mongoengine(*args, **kwargs): + return _connect diff --git a/ddtrace/contrib/mongoengine/trace.py b/ddtrace/contrib/mongoengine/trace.py new file mode 100644 index 0000000000..e219bf9930 --- /dev/null +++ b/ddtrace/contrib/mongoengine/trace.py @@ -0,0 +1,32 @@ + +# 3p +from ddtrace.vendor import wrapt + +# project +import ddtrace +from ddtrace.ext import mongo as mongox +from ddtrace.contrib.pymongo.client import TracedMongoClient + + +# TODO(Benjamin): we should instrument register_connection instead, because more generic +# We should also extract the "alias" attribute and set it as a meta +class WrappedConnect(wrapt.ObjectProxy): + """ WrappedConnect wraps mongoengines 'connect' function to ensure + that all returned connections are wrapped for tracing. + """ + + def __init__(self, connect): + super(WrappedConnect, self).__init__(connect) + ddtrace.Pin(service=mongox.SERVICE, tracer=ddtrace.tracer).onto(self) + + def __call__(self, *args, **kwargs): + client = self.__wrapped__(*args, **kwargs) + pin = ddtrace.Pin.get_from(self) + if pin: + # mongoengine uses pymongo internally, so we can just piggyback on the + # existing pymongo integration and make sure that the connections it + # uses internally are traced. + client = TracedMongoClient(client) + ddtrace.Pin(service=pin.service, tracer=pin.tracer).onto(client) + + return client diff --git a/ddtrace/contrib/mysql/__init__.py b/ddtrace/contrib/mysql/__init__.py new file mode 100644 index 0000000000..422d7ad943 --- /dev/null +++ b/ddtrace/contrib/mysql/__init__.py @@ -0,0 +1,39 @@ +"""Instrument mysql to report MySQL queries. + +``patch_all`` will automatically patch your mysql connection to make it work. + +:: + + # Make sure to import mysql.connector and not the 'connect' function, + # otherwise you won't have access to the patched version + from ddtrace import Pin, patch + import mysql.connector + + # If not patched yet, you can patch mysql specifically + patch(mysql=True) + + # This will report a span with the default settings + conn = mysql.connector.connect(user="alice", password="b0b", host="localhost", port=3306, database="test") + cursor = conn.cursor() + cursor.execute("SELECT 6*7 AS the_answer;") + + # Use a pin to specify metadata related to this connection + Pin.override(conn, service='mysql-users') + +Only the default full-Python integration works. The binary C connector, +provided by _mysql_connector, is not supported yet. + +Help on mysql.connector can be found on: +https://dev.mysql.com/doc/connector-python/en/ +""" +from ...utils.importlib import require_modules + +# check `mysql-connector` availability +required_modules = ['mysql.connector'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + from .tracers import get_traced_mysql_connection + + __all__ = ['get_traced_mysql_connection', 'patch'] diff --git a/ddtrace/contrib/mysql/patch.py b/ddtrace/contrib/mysql/patch.py new file mode 100644 index 0000000000..15a0b1aa51 --- /dev/null +++ b/ddtrace/contrib/mysql/patch.py @@ -0,0 +1,46 @@ +# 3p +from ddtrace.vendor import wrapt +import mysql.connector + +# project +from ddtrace import Pin +from ddtrace.contrib.dbapi import TracedConnection +from ...ext import net, db + + +CONN_ATTR_BY_TAG = { + net.TARGET_HOST: 'server_host', + net.TARGET_PORT: 'server_port', + db.USER: 'user', + db.NAME: 'database', +} + + +def patch(): + wrapt.wrap_function_wrapper('mysql.connector', 'connect', _connect) + # `Connect` is an alias for `connect`, patch it too + if hasattr(mysql.connector, 'Connect'): + mysql.connector.Connect = mysql.connector.connect + + +def unpatch(): + if isinstance(mysql.connector.connect, wrapt.ObjectProxy): + mysql.connector.connect = mysql.connector.connect.__wrapped__ + if hasattr(mysql.connector, 'Connect'): + mysql.connector.Connect = mysql.connector.connect + + +def _connect(func, instance, args, kwargs): + conn = func(*args, **kwargs) + return patch_conn(conn) + + +def patch_conn(conn): + + tags = {t: getattr(conn, a) for t, a in CONN_ATTR_BY_TAG.items() if getattr(conn, a, '') != ''} + pin = Pin(service='mysql', app='mysql', tags=tags) + + # grab the metadata from the conn + wrapped = TracedConnection(conn, pin=pin) + pin.onto(wrapped) + return wrapped diff --git a/ddtrace/contrib/mysql/tracers.py b/ddtrace/contrib/mysql/tracers.py new file mode 100644 index 0000000000..14640210bf --- /dev/null +++ b/ddtrace/contrib/mysql/tracers.py @@ -0,0 +1,8 @@ +import mysql.connector + +from ...utils.deprecation import deprecated + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def get_traced_mysql_connection(*args, **kwargs): + return mysql.connector.MySQLConnection diff --git a/ddtrace/contrib/mysqldb/__init__.py b/ddtrace/contrib/mysqldb/__init__.py new file mode 100644 index 0000000000..3219a189c3 --- /dev/null +++ b/ddtrace/contrib/mysqldb/__init__.py @@ -0,0 +1,38 @@ +"""Instrument mysqlclient / MySQL-python to report MySQL queries. + +``patch_all`` will automatically patch your mysql connection to make it work. + +:: + + # Make sure to import MySQLdb and not the 'connect' function, + # otherwise you won't have access to the patched version + from ddtrace import Pin, patch + import MySQLdb + + # If not patched yet, you can patch mysqldb specifically + patch(mysqldb=True) + + # This will report a span with the default settings + conn = MySQLdb.connect(user="alice", passwd="b0b", host="localhost", port=3306, db="test") + cursor = conn.cursor() + cursor.execute("SELECT 6*7 AS the_answer;") + + # Use a pin to specify metadata related to this connection + Pin.override(conn, service='mysql-users') + +This package works for mysqlclient or MySQL-python. Only the default +full-Python integration works. The binary C connector provided by +_mysql is not yet supported. + +Help on mysqlclient can be found on: +https://mysqlclient.readthedocs.io/ +""" +from ...utils.importlib import require_modules + +required_modules = ['MySQLdb'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + + __all__ = ['patch'] diff --git a/ddtrace/contrib/mysqldb/patch.py b/ddtrace/contrib/mysqldb/patch.py new file mode 100644 index 0000000000..ac6e0e1d4e --- /dev/null +++ b/ddtrace/contrib/mysqldb/patch.py @@ -0,0 +1,63 @@ +# 3p +import MySQLdb + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +# project +from ddtrace import Pin +from ddtrace.contrib.dbapi import TracedConnection + +from ...ext import net, db +from ...utils.wrappers import unwrap as _u + +KWPOS_BY_TAG = { + net.TARGET_HOST: ('host', 0), + db.USER: ('user', 1), + db.NAME: ('db', 3), +} + + +def patch(): + # patch only once + if getattr(MySQLdb, '__datadog_patch', False): + return + setattr(MySQLdb, '__datadog_patch', True) + + # `Connection` and `connect` are aliases for + # `Connect`; patch them too + _w('MySQLdb', 'Connect', _connect) + if hasattr(MySQLdb, 'Connection'): + _w('MySQLdb', 'Connection', _connect) + if hasattr(MySQLdb, 'connect'): + _w('MySQLdb', 'connect', _connect) + + +def unpatch(): + if not getattr(MySQLdb, '__datadog_patch', False): + return + setattr(MySQLdb, '__datadog_patch', False) + + # unpatch MySQLdb + _u(MySQLdb, 'Connect') + if hasattr(MySQLdb, 'Connection'): + _u(MySQLdb, 'Connection') + if hasattr(MySQLdb, 'connect'): + _u(MySQLdb, 'connect') + + +def _connect(func, instance, args, kwargs): + conn = func(*args, **kwargs) + return patch_conn(conn, *args, **kwargs) + + +def patch_conn(conn, *args, **kwargs): + tags = {t: kwargs[k] if k in kwargs else args[p] + for t, (k, p) in KWPOS_BY_TAG.items() + if k in kwargs or len(args) > p} + tags[net.TARGET_PORT] = conn.port + pin = Pin(service='mysql', app='mysql', tags=tags) + + # grab the metadata from the conn + wrapped = TracedConnection(conn, pin=pin) + pin.onto(wrapped) + return wrapped diff --git a/ddtrace/contrib/psycopg/__init__.py b/ddtrace/contrib/psycopg/__init__.py new file mode 100644 index 0000000000..7ff699636d --- /dev/null +++ b/ddtrace/contrib/psycopg/__init__.py @@ -0,0 +1,30 @@ +"""Instrument psycopg2 to report Postgres queries. + +``patch_all`` will automatically patch your psycopg2 connection to make it work. +:: + + from ddtrace import Pin, patch + import psycopg2 + + # If not patched yet, you can patch psycopg2 specifically + patch(psycopg=True) + + # This will report a span with the default settings + db = psycopg2.connect(connection_factory=factory) + cursor = db.cursor() + cursor.execute("select * from users where id = 1") + + # Use a pin to specify metadata related to this connection + Pin.override(db, service='postgres-users') +""" +from ...utils.importlib import require_modules + + +required_modules = ['psycopg2'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .connection import connection_factory + from .patch import patch, patch_conn + + __all__ = ['connection_factory', 'patch', 'patch_conn'] diff --git a/ddtrace/contrib/psycopg/connection.py b/ddtrace/contrib/psycopg/connection.py new file mode 100644 index 0000000000..d3e4eb6e95 --- /dev/null +++ b/ddtrace/contrib/psycopg/connection.py @@ -0,0 +1,91 @@ +""" +Tracing utilities for the psycopg potgres client library. +""" + +# stdlib +import functools + +from ...ext import SpanTypes, db, net, sql +from ...utils.deprecation import deprecated + +# 3p +from psycopg2.extensions import connection, cursor + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def connection_factory(tracer, service='postgres'): + """ Return a connection factory class that will can be used to trace + postgres queries. + + >>> factory = connection_factor(my_tracer, service='my_db_service') + >>> conn = pyscopg2.connect(..., connection_factory=factory) + """ + + return functools.partial( + TracedConnection, + datadog_tracer=tracer, + datadog_service=service, + ) + + +class TracedCursor(cursor): + """Wrapper around cursor creating one span per query""" + + def __init__(self, *args, **kwargs): + self._datadog_tracer = kwargs.pop('datadog_tracer', None) + self._datadog_service = kwargs.pop('datadog_service', None) + self._datadog_tags = kwargs.pop('datadog_tags', None) + super(TracedCursor, self).__init__(*args, **kwargs) + + def execute(self, query, vars=None): # noqa: A002 + """ just wrap the cursor execution in a span """ + if not self._datadog_tracer: + return cursor.execute(self, query, vars) + + with self._datadog_tracer.trace('postgres.query', service=self._datadog_service, span_type=SpanTypes.SQL) as s: + if not s.sampled: + return super(TracedCursor, self).execute(query, vars) + + s.resource = query + s.set_tags(self._datadog_tags) + try: + return super(TracedCursor, self).execute(query, vars) + finally: + s.set_metric('db.rowcount', self.rowcount) + + def callproc(self, procname, vars=None): # noqa: A002 + """ just wrap the execution in a span """ + return cursor.callproc(self, procname, vars) + + +class TracedConnection(connection): + """Wrapper around psycopg2 for tracing""" + + def __init__(self, *args, **kwargs): + + self._datadog_tracer = kwargs.pop('datadog_tracer', None) + self._datadog_service = kwargs.pop('datadog_service', None) + + super(TracedConnection, self).__init__(*args, **kwargs) + + # add metadata (from the connection, string, etc) + dsn = sql.parse_pg_dsn(self.dsn) + self._datadog_tags = { + net.TARGET_HOST: dsn.get('host'), + net.TARGET_PORT: dsn.get('port'), + db.NAME: dsn.get('dbname'), + db.USER: dsn.get('user'), + 'db.application': dsn.get('application_name'), + } + + self._datadog_cursor_class = functools.partial( + TracedCursor, + datadog_tracer=self._datadog_tracer, + datadog_service=self._datadog_service, + datadog_tags=self._datadog_tags, + ) + + def cursor(self, *args, **kwargs): + """ register our custom cursor factory """ + kwargs.setdefault('cursor_factory', self._datadog_cursor_class) + return super(TracedConnection, self).cursor(*args, **kwargs) diff --git a/ddtrace/contrib/psycopg/patch.py b/ddtrace/contrib/psycopg/patch.py new file mode 100644 index 0000000000..3aeb68b713 --- /dev/null +++ b/ddtrace/contrib/psycopg/patch.py @@ -0,0 +1,196 @@ +# 3p +import psycopg2 +from ddtrace.vendor import wrapt + +# project +from ddtrace import Pin, config +from ddtrace.contrib import dbapi +from ddtrace.ext import sql, net, db + +# Original connect method +_connect = psycopg2.connect + +# psycopg2 versions can end in `-betaN` where `N` is a number +# in such cases we simply skip version specific patching +PSYCOPG2_VERSION = (0, 0, 0) + +try: + PSYCOPG2_VERSION = tuple(map(int, psycopg2.__version__.split()[0].split('.'))) +except Exception: + pass + +if PSYCOPG2_VERSION >= (2, 7): + from psycopg2.sql import Composable + + +def patch(): + """ Patch monkey patches psycopg's connection function + so that the connection's functions are traced. + """ + if getattr(psycopg2, '_datadog_patch', False): + return + setattr(psycopg2, '_datadog_patch', True) + + wrapt.wrap_function_wrapper(psycopg2, 'connect', patched_connect) + _patch_extensions(_psycopg2_extensions) # do this early just in case + + +def unpatch(): + if getattr(psycopg2, '_datadog_patch', False): + setattr(psycopg2, '_datadog_patch', False) + psycopg2.connect = _connect + + +class Psycopg2TracedCursor(dbapi.TracedCursor): + """ TracedCursor for psycopg2 """ + def _trace_method(self, method, name, resource, extra_tags, *args, **kwargs): + # treat psycopg2.sql.Composable resource objects as strings + if PSYCOPG2_VERSION >= (2, 7) and isinstance(resource, Composable): + resource = resource.as_string(self.__wrapped__) + + return super(Psycopg2TracedCursor, self)._trace_method(method, name, resource, extra_tags, *args, **kwargs) + + +class Psycopg2FetchTracedCursor(Psycopg2TracedCursor, dbapi.FetchTracedCursor): + """ FetchTracedCursor for psycopg2 """ + + +class Psycopg2TracedConnection(dbapi.TracedConnection): + """ TracedConnection wraps a Connection with tracing code. """ + + def __init__(self, conn, pin=None, cursor_cls=None): + if not cursor_cls: + # Do not trace `fetch*` methods by default + cursor_cls = Psycopg2TracedCursor + if config.dbapi2.trace_fetch_methods: + cursor_cls = Psycopg2FetchTracedCursor + + super(Psycopg2TracedConnection, self).__init__(conn, pin, cursor_cls=cursor_cls) + + +def patch_conn(conn, traced_conn_cls=Psycopg2TracedConnection): + """ Wrap will patch the instance so that its queries are traced.""" + # ensure we've patched extensions (this is idempotent) in + # case we're only tracing some connections. + _patch_extensions(_psycopg2_extensions) + + c = traced_conn_cls(conn) + + # fetch tags from the dsn + dsn = sql.parse_pg_dsn(conn.dsn) + tags = { + net.TARGET_HOST: dsn.get('host'), + net.TARGET_PORT: dsn.get('port'), + db.NAME: dsn.get('dbname'), + db.USER: dsn.get('user'), + 'db.application': dsn.get('application_name'), + } + + Pin( + service='postgres', + app='postgres', + tags=tags).onto(c) + + return c + + +def _patch_extensions(_extensions): + # we must patch extensions all the time (it's pretty harmless) so split + # from global patching of connections. must be idempotent. + for _, module, func, wrapper in _extensions: + if not hasattr(module, func) or isinstance(getattr(module, func), wrapt.ObjectProxy): + continue + wrapt.wrap_function_wrapper(module, func, wrapper) + + +def _unpatch_extensions(_extensions): + # we must patch extensions all the time (it's pretty harmless) so split + # from global patching of connections. must be idempotent. + for original, module, func, _ in _extensions: + setattr(module, func, original) + + +# +# monkeypatch targets +# + +def patched_connect(connect_func, _, args, kwargs): + conn = connect_func(*args, **kwargs) + return patch_conn(conn) + + +def _extensions_register_type(func, _, args, kwargs): + def _unroll_args(obj, scope=None): + return obj, scope + obj, scope = _unroll_args(*args, **kwargs) + + # register_type performs a c-level check of the object + # type so we must be sure to pass in the actual db connection + if scope and isinstance(scope, wrapt.ObjectProxy): + scope = scope.__wrapped__ + + return func(obj, scope) if scope else func(obj) + + +def _extensions_quote_ident(func, _, args, kwargs): + def _unroll_args(obj, scope=None): + return obj, scope + obj, scope = _unroll_args(*args, **kwargs) + + # register_type performs a c-level check of the object + # type so we must be sure to pass in the actual db connection + if scope and isinstance(scope, wrapt.ObjectProxy): + scope = scope.__wrapped__ + + return func(obj, scope) if scope else func(obj) + + +def _extensions_adapt(func, _, args, kwargs): + adapt = func(*args, **kwargs) + if hasattr(adapt, 'prepare'): + return AdapterWrapper(adapt) + return adapt + + +class AdapterWrapper(wrapt.ObjectProxy): + def prepare(self, *args, **kwargs): + func = self.__wrapped__.prepare + if not args: + return func(*args, **kwargs) + conn = args[0] + + # prepare performs a c-level check of the object type so + # we must be sure to pass in the actual db connection + if isinstance(conn, wrapt.ObjectProxy): + conn = conn.__wrapped__ + + return func(conn, *args[1:], **kwargs) + + +# extension hooks +_psycopg2_extensions = [ + (psycopg2.extensions.register_type, + psycopg2.extensions, 'register_type', + _extensions_register_type), + (psycopg2._psycopg.register_type, + psycopg2._psycopg, 'register_type', + _extensions_register_type), + (psycopg2.extensions.adapt, + psycopg2.extensions, 'adapt', + _extensions_adapt), +] + +# `_json` attribute is only available for psycopg >= 2.5 +if getattr(psycopg2, '_json', None): + _psycopg2_extensions += [ + (psycopg2._json.register_type, + psycopg2._json, 'register_type', + _extensions_register_type), + ] + +# `quote_ident` attribute is only available for psycopg >= 2.7 +if getattr(psycopg2, 'extensions', None) and getattr(psycopg2.extensions, + 'quote_ident', None): + _psycopg2_extensions += [ + (psycopg2.extensions.quote_ident, psycopg2.extensions, 'quote_ident', _extensions_quote_ident), + ] diff --git a/ddtrace/contrib/pylibmc/__init__.py b/ddtrace/contrib/pylibmc/__init__.py new file mode 100644 index 0000000000..798faffbc5 --- /dev/null +++ b/ddtrace/contrib/pylibmc/__init__.py @@ -0,0 +1,31 @@ +"""Instrument pylibmc to report Memcached queries. + +``patch_all`` will automatically patch your pylibmc client to make it work. +:: + + # Be sure to import pylibmc and not pylibmc.Client directly, + # otherwise you won't have access to the patched version + from ddtrace import Pin, patch + import pylibmc + + # If not patched yet, you can patch pylibmc specifically + patch(pylibmc=True) + + # One client instrumented with default configuration + client = pylibmc.Client(["localhost:11211"] + client.set("key1", "value1") + + # Use a pin to specify metadata related to this client + Pin.override(client, service="memcached-sessions") +""" + +from ...utils.importlib import require_modules + +required_modules = ['pylibmc'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .client import TracedClient + from .patch import patch + + __all__ = ['TracedClient', 'patch'] diff --git a/ddtrace/contrib/pylibmc/addrs.py b/ddtrace/contrib/pylibmc/addrs.py new file mode 100644 index 0000000000..0f11d2ac44 --- /dev/null +++ b/ddtrace/contrib/pylibmc/addrs.py @@ -0,0 +1,14 @@ +translate_server_specs = None + +try: + # NOTE: we rely on an undocumented method to parse addresses, + # so be a bit defensive and don't assume it exists. + from pylibmc.client import translate_server_specs +except ImportError: + pass + + +def parse_addresses(addrs): + if not translate_server_specs: + return [] + return translate_server_specs(addrs) diff --git a/ddtrace/contrib/pylibmc/client.py b/ddtrace/contrib/pylibmc/client.py new file mode 100644 index 0000000000..415a0ef9f4 --- /dev/null +++ b/ddtrace/contrib/pylibmc/client.py @@ -0,0 +1,158 @@ +from contextlib import contextmanager +import random + +# 3p +from ddtrace.vendor.wrapt import ObjectProxy +import pylibmc + +# project +import ddtrace +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, memcached, net +from ...internal.logger import get_logger +from ...settings import config +from .addrs import parse_addresses + + +# Original Client class +_Client = pylibmc.Client + + +log = get_logger(__name__) + + +class TracedClient(ObjectProxy): + """ TracedClient is a proxy for a pylibmc.Client that times it's network operations. """ + + def __init__(self, client=None, service=memcached.SERVICE, tracer=None, *args, **kwargs): + """ Create a traced client that wraps the given memcached client. + + """ + # The client instance/service/tracer attributes are kept for compatibility + # with the old interface: TracedClient(client=pylibmc.Client(['localhost:11211'])) + # TODO(Benjamin): Remove these in favor of patching. + if not isinstance(client, _Client): + # We are in the patched situation, just pass down all arguments to the pylibmc.Client + # Note that, in that case, client isn't a real client (just the first argument) + client = _Client(client, *args, **kwargs) + else: + log.warning('TracedClient instantiation is deprecated and will be remove ' + 'in future versions (0.6.0). Use patching instead (see the docs).') + + super(TracedClient, self).__init__(client) + + pin = ddtrace.Pin(service=service, tracer=tracer) + pin.onto(self) + + # attempt to collect the pool of urls this client talks to + try: + self._addresses = parse_addresses(client.addresses) + except Exception: + log.debug('error setting addresses', exc_info=True) + + def clone(self, *args, **kwargs): + # rewrap new connections. + cloned = self.__wrapped__.clone(*args, **kwargs) + traced_client = TracedClient(cloned) + pin = ddtrace.Pin.get_from(self) + if pin: + pin.clone().onto(traced_client) + return traced_client + + def get(self, *args, **kwargs): + return self._trace_cmd('get', *args, **kwargs) + + def set(self, *args, **kwargs): + return self._trace_cmd('set', *args, **kwargs) + + def delete(self, *args, **kwargs): + return self._trace_cmd('delete', *args, **kwargs) + + def gets(self, *args, **kwargs): + return self._trace_cmd('gets', *args, **kwargs) + + def touch(self, *args, **kwargs): + return self._trace_cmd('touch', *args, **kwargs) + + def cas(self, *args, **kwargs): + return self._trace_cmd('cas', *args, **kwargs) + + def incr(self, *args, **kwargs): + return self._trace_cmd('incr', *args, **kwargs) + + def decr(self, *args, **kwargs): + return self._trace_cmd('decr', *args, **kwargs) + + def append(self, *args, **kwargs): + return self._trace_cmd('append', *args, **kwargs) + + def prepend(self, *args, **kwargs): + return self._trace_cmd('prepend', *args, **kwargs) + + def get_multi(self, *args, **kwargs): + return self._trace_multi_cmd('get_multi', *args, **kwargs) + + def set_multi(self, *args, **kwargs): + return self._trace_multi_cmd('set_multi', *args, **kwargs) + + def delete_multi(self, *args, **kwargs): + return self._trace_multi_cmd('delete_multi', *args, **kwargs) + + def _trace_cmd(self, method_name, *args, **kwargs): + """ trace the execution of the method with the given name and will + patch the first arg. + """ + method = getattr(self.__wrapped__, method_name) + with self._span(method_name) as span: + + if span and args: + span.set_tag(memcached.QUERY, '%s %s' % (method_name, args[0])) + + return method(*args, **kwargs) + + def _trace_multi_cmd(self, method_name, *args, **kwargs): + """ trace the execution of the multi command with the given name. """ + method = getattr(self.__wrapped__, method_name) + with self._span(method_name) as span: + + pre = kwargs.get('key_prefix') + if span and pre: + span.set_tag(memcached.QUERY, '%s %s' % (method_name, pre)) + + return method(*args, **kwargs) + + @contextmanager + def _no_span(self): + yield None + + def _span(self, cmd_name): + """ Return a span timing the given command. """ + pin = ddtrace.Pin.get_from(self) + if not pin or not pin.enabled(): + return self._no_span() + + span = pin.tracer.trace( + 'memcached.cmd', + service=pin.service, + resource=cmd_name, + span_type=SpanTypes.CACHE) + + try: + self._tag_span(span) + except Exception: + log.debug('error tagging span', exc_info=True) + return span + + def _tag_span(self, span): + # FIXME[matt] the host selection is buried in c code. we can't tell what it's actually + # using, so fallback to randomly choosing one. can we do better? + if self._addresses: + _, host, port, _ = random.choice(self._addresses) + span.set_meta(net.TARGET_HOST, host) + span.set_meta(net.TARGET_PORT, port) + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.pylibmc.get_analytics_sample_rate() + ) diff --git a/ddtrace/contrib/pylibmc/patch.py b/ddtrace/contrib/pylibmc/patch.py new file mode 100644 index 0000000000..bf1606a627 --- /dev/null +++ b/ddtrace/contrib/pylibmc/patch.py @@ -0,0 +1,14 @@ +import pylibmc + +from .client import TracedClient + +# Original Client class +_Client = pylibmc.Client + + +def patch(): + setattr(pylibmc, 'Client', TracedClient) + + +def unpatch(): + setattr(pylibmc, 'Client', _Client) diff --git a/ddtrace/contrib/pylons/__init__.py b/ddtrace/contrib/pylons/__init__.py new file mode 100644 index 0000000000..88339224d6 --- /dev/null +++ b/ddtrace/contrib/pylons/__init__.py @@ -0,0 +1,32 @@ +""" +The pylons trace middleware will track request timings. To +install the middleware, prepare your WSGI application and do +the following:: + + from pylons.wsgiapp import PylonsApp + + from ddtrace import tracer + from ddtrace.contrib.pylons import PylonsTraceMiddleware + + app = PylonsApp(...) + + traced_app = PylonsTraceMiddleware(app, tracer, service='my-pylons-app') + +Then you can define your routes and views as usual. +""" + +from ...utils.importlib import require_modules + + +required_modules = ['pylons.wsgiapp'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .middleware import PylonsTraceMiddleware + from .patch import patch, unpatch + + __all__ = [ + 'patch', + 'unpatch', + 'PylonsTraceMiddleware', + ] diff --git a/ddtrace/contrib/pylons/compat.py b/ddtrace/contrib/pylons/compat.py new file mode 100644 index 0000000000..f49480d055 --- /dev/null +++ b/ddtrace/contrib/pylons/compat.py @@ -0,0 +1,8 @@ +try: + from pylons.templating import render_mako # noqa + + # Pylons > 0.9.7 + legacy_pylons = False +except ImportError: + # Pylons <= 0.9.7 + legacy_pylons = True diff --git a/ddtrace/contrib/pylons/constants.py b/ddtrace/contrib/pylons/constants.py new file mode 100644 index 0000000000..ae0fb42497 --- /dev/null +++ b/ddtrace/contrib/pylons/constants.py @@ -0,0 +1 @@ +CONFIG_MIDDLEWARE = '__datadog_middleware' diff --git a/ddtrace/contrib/pylons/middleware.py b/ddtrace/contrib/pylons/middleware.py new file mode 100644 index 0000000000..2d2d5df359 --- /dev/null +++ b/ddtrace/contrib/pylons/middleware.py @@ -0,0 +1,110 @@ +import sys + +from webob import Request +from pylons import config + +from .renderer import trace_rendering +from .constants import CONFIG_MIDDLEWARE + +from ...compat import reraise +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...internal.logger import get_logger +from ...propagation.http import HTTPPropagator +from ...settings import config as ddconfig + + +log = get_logger(__name__) + + +class PylonsTraceMiddleware(object): + + def __init__(self, app, tracer, service='pylons', distributed_tracing=True): + self.app = app + self._service = service + self._distributed_tracing = distributed_tracing + self._tracer = tracer + + # register middleware reference + config[CONFIG_MIDDLEWARE] = self + + # add template tracing + trace_rendering() + + def __call__(self, environ, start_response): + if self._distributed_tracing: + # retrieve distributed tracing headers + request = Request(environ) + propagator = HTTPPropagator() + context = propagator.extract(request.headers) + # only need to active the new context if something was propagated + if context.trace_id: + self._tracer.context_provider.activate(context) + + with self._tracer.trace('pylons.request', service=self._service, span_type=SpanTypes.WEB) as span: + # Set the service in tracer.trace() as priority sampling requires it to be + # set as early as possible when different services share one single agent. + + # set analytics sample rate with global config enabled + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + ddconfig.pylons.get_analytics_sample_rate(use_global_config=True) + ) + + if not span.sampled: + return self.app(environ, start_response) + + # tentative on status code, otherwise will be caught by except below + def _start_response(status, *args, **kwargs): + """ a patched response callback which will pluck some metadata. """ + http_code = int(status.split()[0]) + span.set_tag(http.STATUS_CODE, http_code) + if http_code >= 500: + span.error = 1 + return start_response(status, *args, **kwargs) + + try: + return self.app(environ, _start_response) + except Exception as e: + # store current exceptions info so we can re-raise it later + (typ, val, tb) = sys.exc_info() + + # e.code can either be a string or an int + code = getattr(e, 'code', 500) + try: + code = int(code) + if not 100 <= code < 600: + code = 500 + except Exception: + code = 500 + span.set_tag(http.STATUS_CODE, code) + span.error = 1 + + # re-raise the original exception with its original traceback + reraise(typ, val, tb=tb) + except SystemExit: + span.set_tag(http.STATUS_CODE, 500) + span.error = 1 + raise + finally: + controller = environ.get('pylons.routes_dict', {}).get('controller') + action = environ.get('pylons.routes_dict', {}).get('action') + + # There are cases where users re-route requests and manually + # set resources. If this is so, don't do anything, otherwise + # set the resource to the controller / action that handled it. + if span.resource == span.name: + span.resource = '%s.%s' % (controller, action) + + span.set_tags({ + http.METHOD: environ.get('REQUEST_METHOD'), + http.URL: '%s://%s:%s%s' % (environ.get('wsgi.url_scheme'), + environ.get('SERVER_NAME'), + environ.get('SERVER_PORT'), + environ.get('PATH_INFO')), + 'pylons.user': environ.get('REMOTE_USER', ''), + 'pylons.route.controller': controller, + 'pylons.route.action': action, + }) + if ddconfig.pylons.trace_query_string: + span.set_tag(http.QUERY_STRING, environ.get('QUERY_STRING')) diff --git a/ddtrace/contrib/pylons/patch.py b/ddtrace/contrib/pylons/patch.py new file mode 100644 index 0000000000..ad437d8c20 --- /dev/null +++ b/ddtrace/contrib/pylons/patch.py @@ -0,0 +1,41 @@ +import os +from ddtrace.vendor import wrapt +import pylons.wsgiapp + +from ddtrace import tracer, Pin + +from .middleware import PylonsTraceMiddleware +from ...utils.formats import asbool, get_env +from ...utils.wrappers import unwrap as _u + + +def patch(): + """Instrument Pylons applications""" + if getattr(pylons.wsgiapp, '_datadog_patch', False): + return + + setattr(pylons.wsgiapp, '_datadog_patch', True) + wrapt.wrap_function_wrapper('pylons.wsgiapp', 'PylonsApp.__init__', traced_init) + + +def unpatch(): + """Disable Pylons tracing""" + if not getattr(pylons.wsgiapp, '__datadog_patch', False): + return + setattr(pylons.wsgiapp, '__datadog_patch', False) + + _u(pylons.wsgiapp.PylonsApp, '__init__') + + +def traced_init(wrapped, instance, args, kwargs): + wrapped(*args, **kwargs) + + # set tracing options and create the TraceMiddleware + service = os.environ.get('DATADOG_SERVICE_NAME', 'pylons') + distributed_tracing = asbool(get_env('pylons', 'distributed_tracing', True)) + Pin(service=service, tracer=tracer).onto(instance) + traced_app = PylonsTraceMiddleware(instance, tracer, service=service, distributed_tracing=distributed_tracing) + + # re-order the middleware stack so that the first middleware is ours + traced_app.app = instance.app + instance.app = traced_app diff --git a/ddtrace/contrib/pylons/renderer.py b/ddtrace/contrib/pylons/renderer.py new file mode 100644 index 0000000000..45ae49c805 --- /dev/null +++ b/ddtrace/contrib/pylons/renderer.py @@ -0,0 +1,36 @@ +import pylons + +from pylons import config + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from .compat import legacy_pylons +from .constants import CONFIG_MIDDLEWARE + + +def trace_rendering(): + """Patch all Pylons renderers. It supports multiple versions + of Pylons and multiple renderers. + """ + # patch only once + if getattr(pylons.templating, '__datadog_patch', False): + return + setattr(pylons.templating, '__datadog_patch', True) + + if legacy_pylons: + # Pylons <= 0.9.7 + _w('pylons.templating', 'render', _traced_renderer) + else: + # Pylons > 0.9.7 + _w('pylons.templating', 'render_mako', _traced_renderer) + _w('pylons.templating', 'render_mako_def', _traced_renderer) + _w('pylons.templating', 'render_genshi', _traced_renderer) + _w('pylons.templating', 'render_jinja2', _traced_renderer) + + +def _traced_renderer(wrapped, instance, args, kwargs): + """Traced renderer""" + tracer = config[CONFIG_MIDDLEWARE]._tracer + with tracer.trace('pylons.render') as span: + span.set_tag('template.name', args[0]) + return wrapped(*args, **kwargs) diff --git a/ddtrace/contrib/pymemcache/__init__.py b/ddtrace/contrib/pymemcache/__init__.py new file mode 100644 index 0000000000..ad8607a732 --- /dev/null +++ b/ddtrace/contrib/pymemcache/__init__.py @@ -0,0 +1,38 @@ +"""Instrument pymemcache to report memcached queries. + +``patch_all`` will automatically patch the pymemcache ``Client``:: + + from ddtrace import Pin, patch + + # If not patched yet, patch pymemcache specifically + patch(pymemcache=True) + + # Import reference to Client AFTER patching + import pymemcache + from pymemcache.client.base import Client + + # Use a pin to specify metadata related all clients + Pin.override(pymemcache, service='my-memcached-service') + + # This will report a span with the default settings + client = Client(('localhost', 11211)) + client.set("my-key", "my-val") + + # Use a pin to specify metadata related to this particular client + Pin.override(client, service='my-memcached-service') + +Pymemcache ``HashClient`` will also be indirectly patched as it uses ``Client`` +under the hood. +""" +from ...utils.importlib import require_modules + + +required_modules = ['pymemcache'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + __all__ = [ + patch, + unpatch, + ] diff --git a/ddtrace/contrib/pymemcache/client.py b/ddtrace/contrib/pymemcache/client.py new file mode 100644 index 0000000000..891425814b --- /dev/null +++ b/ddtrace/contrib/pymemcache/client.py @@ -0,0 +1,219 @@ +import sys + +# 3p +from ddtrace.vendor import wrapt +import pymemcache +from pymemcache.client.base import Client +from pymemcache.exceptions import ( + MemcacheClientError, + MemcacheServerError, + MemcacheUnknownCommandError, + MemcacheUnknownError, + MemcacheIllegalInputError, +) + +# project +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...compat import reraise +from ...ext import SpanTypes, net, memcached as memcachedx +from ...internal.logger import get_logger +from ...pin import Pin +from ...settings import config + +log = get_logger(__name__) + + +# keep a reference to the original unpatched clients +_Client = Client + + +class WrappedClient(wrapt.ObjectProxy): + """Wrapper providing patched methods of a pymemcache Client. + + Relevant connection information is obtained during initialization and + attached to each span. + + Keys are tagged in spans for methods that act upon a key. + """ + + def __init__(self, *args, **kwargs): + c = _Client(*args, **kwargs) + super(WrappedClient, self).__init__(c) + + # tags to apply to each span generated by this client + tags = _get_address_tags(*args, **kwargs) + + parent_pin = Pin.get_from(pymemcache) + + if parent_pin: + pin = parent_pin.clone(tags=tags) + else: + pin = Pin(tags=tags) + + # attach the pin onto this instance + pin.onto(self) + + def set(self, *args, **kwargs): + return self._traced_cmd('set', *args, **kwargs) + + def set_many(self, *args, **kwargs): + return self._traced_cmd('set_many', *args, **kwargs) + + def add(self, *args, **kwargs): + return self._traced_cmd('add', *args, **kwargs) + + def replace(self, *args, **kwargs): + return self._traced_cmd('replace', *args, **kwargs) + + def append(self, *args, **kwargs): + return self._traced_cmd('append', *args, **kwargs) + + def prepend(self, *args, **kwargs): + return self._traced_cmd('prepend', *args, **kwargs) + + def cas(self, *args, **kwargs): + return self._traced_cmd('cas', *args, **kwargs) + + def get(self, *args, **kwargs): + return self._traced_cmd('get', *args, **kwargs) + + def get_many(self, *args, **kwargs): + return self._traced_cmd('get_many', *args, **kwargs) + + def gets(self, *args, **kwargs): + return self._traced_cmd('gets', *args, **kwargs) + + def gets_many(self, *args, **kwargs): + return self._traced_cmd('gets_many', *args, **kwargs) + + def delete(self, *args, **kwargs): + return self._traced_cmd('delete', *args, **kwargs) + + def delete_many(self, *args, **kwargs): + return self._traced_cmd('delete_many', *args, **kwargs) + + def incr(self, *args, **kwargs): + return self._traced_cmd('incr', *args, **kwargs) + + def decr(self, *args, **kwargs): + return self._traced_cmd('decr', *args, **kwargs) + + def touch(self, *args, **kwargs): + return self._traced_cmd('touch', *args, **kwargs) + + def stats(self, *args, **kwargs): + return self._traced_cmd('stats', *args, **kwargs) + + def version(self, *args, **kwargs): + return self._traced_cmd('version', *args, **kwargs) + + def flush_all(self, *args, **kwargs): + return self._traced_cmd('flush_all', *args, **kwargs) + + def quit(self, *args, **kwargs): + return self._traced_cmd('quit', *args, **kwargs) + + def set_multi(self, *args, **kwargs): + """set_multi is an alias for set_many""" + return self._traced_cmd('set_many', *args, **kwargs) + + def get_multi(self, *args, **kwargs): + """set_multi is an alias for set_many""" + return self._traced_cmd('get_many', *args, **kwargs) + + def _traced_cmd(self, method_name, *args, **kwargs): + """Run and trace the given command. + + Any pymemcache exception is caught and span error information is + set. The exception is then reraised for the application to handle + appropriately. + + Relevant tags are set in the span. + """ + method = getattr(self.__wrapped__, method_name) + p = Pin.get_from(self) + + # if the pin does not exist or is not enabled, shortcut + if not p or not p.enabled(): + return method(*args, **kwargs) + + with p.tracer.trace( + memcachedx.CMD, + service=p.service, + resource=method_name, + span_type=SpanTypes.CACHE, + ) as span: + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.pymemcache.get_analytics_sample_rate() + ) + + # try to set relevant tags, catch any exceptions so we don't mess + # with the application + try: + span.set_tags(p.tags) + vals = _get_query_string(args) + query = '{}{}{}'.format(method_name, ' ' if vals else '', vals) + span.set_tag(memcachedx.QUERY, query) + except Exception: + log.debug('Error setting relevant pymemcache tags') + + try: + return method(*args, **kwargs) + except ( + MemcacheClientError, + MemcacheServerError, + MemcacheUnknownCommandError, + MemcacheUnknownError, + MemcacheIllegalInputError, + ): + (typ, val, tb) = sys.exc_info() + span.set_exc_info(typ, val, tb) + reraise(typ, val, tb) + + +def _get_address_tags(*args, **kwargs): + """Attempt to get host and port from args passed to Client initializer.""" + tags = {} + try: + if len(args): + host, port = args[0] + tags[net.TARGET_HOST] = host + tags[net.TARGET_PORT] = port + except Exception: + log.debug('Error collecting client address tags') + + return tags + + +def _get_query_string(args): + """Return the query values given the arguments to a pymemcache command. + + If there are multiple query values, they are joined together + space-separated. + """ + keys = '' + + # shortcut if no args + if not args: + return keys + + # pull out the first arg which will contain any key + arg = args[0] + + # if we get a dict, convert to list of keys + if type(arg) is dict: + arg = list(arg) + + if type(arg) is str: + keys = arg + elif type(arg) is bytes: + keys = arg.decode() + elif type(arg) is list and len(arg): + if type(arg[0]) is str: + keys = ' '.join(arg) + elif type(arg[0]) is bytes: + keys = b' '.join(arg).decode() + + return keys diff --git a/ddtrace/contrib/pymemcache/patch.py b/ddtrace/contrib/pymemcache/patch.py new file mode 100644 index 0000000000..6ab0cabc07 --- /dev/null +++ b/ddtrace/contrib/pymemcache/patch.py @@ -0,0 +1,32 @@ +import pymemcache + +from ddtrace.ext import memcached as memcachedx +from ddtrace.pin import Pin, _DD_PIN_NAME, _DD_PIN_PROXY_NAME +from .client import WrappedClient + +_Client = pymemcache.client.base.Client + + +def patch(): + if getattr(pymemcache.client, '_datadog_patch', False): + return + + setattr(pymemcache.client, '_datadog_patch', True) + setattr(pymemcache.client.base, 'Client', WrappedClient) + + # Create a global pin with default configuration for our pymemcache clients + Pin( + app=memcachedx.SERVICE, service=memcachedx.SERVICE + ).onto(pymemcache) + + +def unpatch(): + """Remove pymemcache tracing""" + if not getattr(pymemcache.client, '_datadog_patch', False): + return + setattr(pymemcache.client, '_datadog_patch', False) + setattr(pymemcache.client.base, 'Client', _Client) + + # Remove any pins that may exist on the pymemcache reference + setattr(pymemcache, _DD_PIN_NAME, None) + setattr(pymemcache, _DD_PIN_PROXY_NAME, None) diff --git a/ddtrace/contrib/pymongo/__init__.py b/ddtrace/contrib/pymongo/__init__.py new file mode 100644 index 0000000000..fd49869d3b --- /dev/null +++ b/ddtrace/contrib/pymongo/__init__.py @@ -0,0 +1,36 @@ +"""Instrument pymongo to report MongoDB queries. + +The pymongo integration works by wrapping pymongo's MongoClient to trace +network calls. Pymongo 3.0 and greater are the currently supported versions. +``patch_all`` will automatically patch your MongoClient instance to make it work. + +:: + + # Be sure to import pymongo and not pymongo.MongoClient directly, + # otherwise you won't have access to the patched version + from ddtrace import Pin, patch + import pymongo + + # If not patched yet, you can patch pymongo specifically + patch(pymongo=True) + + # At that point, pymongo is instrumented with the default settings + client = pymongo.MongoClient() + # Example of instrumented query + db = client["test-db"] + db.teams.find({"name": "Toronto Maple Leafs"}) + + # Use a pin to specify metadata related to this client + client = pymongo.MongoClient() + pin = Pin.override(client, service="mongo-master") +""" +from ...utils.importlib import require_modules + + +required_modules = ['pymongo'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .client import trace_mongo_client + from .patch import patch + __all__ = ['trace_mongo_client', 'patch'] diff --git a/ddtrace/contrib/pymongo/client.py b/ddtrace/contrib/pymongo/client.py new file mode 100644 index 0000000000..3aa5d22320 --- /dev/null +++ b/ddtrace/contrib/pymongo/client.py @@ -0,0 +1,293 @@ +# stdlib +import contextlib +import json + +# 3p +import pymongo +from ddtrace.vendor.wrapt import ObjectProxy + +# project +import ddtrace +from ...compat import iteritems +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, mongo as mongox, net as netx +from ...internal.logger import get_logger +from ...settings import config +from ...utils.deprecation import deprecated +from .parse import parse_spec, parse_query, parse_msg + +# Original Client class +_MongoClient = pymongo.MongoClient + +log = get_logger(__name__) + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def trace_mongo_client(client, tracer, service=mongox.SERVICE): + traced_client = TracedMongoClient(client) + ddtrace.Pin(service=service, tracer=tracer).onto(traced_client) + return traced_client + + +class TracedMongoClient(ObjectProxy): + + def __init__(self, client=None, *args, **kwargs): + # To support the former trace_mongo_client interface, we have to keep this old interface + # TODO(Benjamin): drop it in a later version + if not isinstance(client, _MongoClient): + # Patched interface, instantiate the client + + # client is just the first arg which could be the host if it is + # None, then it could be that the caller: + + # if client is None then __init__ was: + # 1) invoked with host=None + # 2) not given a first argument (client defaults to None) + # we cannot tell which case it is, but it should not matter since + # the default value for host is None, in either case we can simply + # not provide it as an argument + if client is None: + client = _MongoClient(*args, **kwargs) + # else client is a value for host so just pass it along + else: + client = _MongoClient(client, *args, **kwargs) + + super(TracedMongoClient, self).__init__(client) + # NOTE[matt] the TracedMongoClient attempts to trace all of the network + # calls in the trace library. This is good because it measures the + # actual network time. It's bad because it uses a private API which + # could change. We'll see how this goes. + client._topology = TracedTopology(client._topology) + + # Default Pin + ddtrace.Pin(service=mongox.SERVICE, app=mongox.SERVICE).onto(self) + + def __setddpin__(self, pin): + pin.onto(self._topology) + + def __getddpin__(self): + return ddtrace.Pin.get_from(self._topology) + + +class TracedTopology(ObjectProxy): + + def __init__(self, topology): + super(TracedTopology, self).__init__(topology) + + def select_server(self, *args, **kwargs): + s = self.__wrapped__.select_server(*args, **kwargs) + if not isinstance(s, TracedServer): + s = TracedServer(s) + # Reattach the pin every time in case it changed since the initial patching + ddtrace.Pin.get_from(self).onto(s) + return s + + +class TracedServer(ObjectProxy): + + def __init__(self, server): + super(TracedServer, self).__init__(server) + + def _datadog_trace_operation(self, operation): + cmd = None + # Only try to parse something we think is a query. + if self._is_query(operation): + try: + cmd = parse_query(operation) + except Exception: + log.exception('error parsing query') + + pin = ddtrace.Pin.get_from(self) + # if we couldn't parse or shouldn't trace the message, just go. + if not cmd or not pin or not pin.enabled(): + return None + + span = pin.tracer.trace('pymongo.cmd', span_type=SpanTypes.MONGODB, service=pin.service) + span.set_tag(mongox.DB, cmd.db) + span.set_tag(mongox.COLLECTION, cmd.coll) + span.set_tags(cmd.tags) + + # set `mongodb.query` tag and resource for span + _set_query_metadata(span, cmd) + + # set analytics sample rate + sample_rate = config.pymongo.get_analytics_sample_rate() + if sample_rate is not None: + span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) + return span + + # Pymongo >= 3.9 + def run_operation_with_response(self, sock_info, operation, *args, **kwargs): + span = self._datadog_trace_operation(operation) + if not span: + return self.__wrapped__.run_operation_with_response( + sock_info, + operation, + *args, + **kwargs + ) + + try: + result = self.__wrapped__.run_operation_with_response( + sock_info, + operation, + *args, + **kwargs + ) + + if result and result.address: + _set_address_tags(span, result.address) + return result + finally: + span.finish() + + # Pymongo < 3.9 + def send_message_with_response(self, operation, *args, **kwargs): + span = self._datadog_trace_operation(operation) + if not span: + return self.__wrapped__.send_message_with_response( + operation, + *args, + **kwargs + ) + + try: + result = self.__wrapped__.send_message_with_response( + operation, + *args, + **kwargs + ) + + if result and result.address: + _set_address_tags(span, result.address) + return result + finally: + span.finish() + + @contextlib.contextmanager + def get_socket(self, *args, **kwargs): + with self.__wrapped__.get_socket(*args, **kwargs) as s: + if not isinstance(s, TracedSocket): + s = TracedSocket(s) + ddtrace.Pin.get_from(self).onto(s) + yield s + + @staticmethod + def _is_query(op): + # NOTE: _Query should alwyas have a spec field + return hasattr(op, 'spec') + + +class TracedSocket(ObjectProxy): + + def __init__(self, socket): + super(TracedSocket, self).__init__(socket) + + def command(self, dbname, spec, *args, **kwargs): + cmd = None + try: + cmd = parse_spec(spec, dbname) + except Exception: + log.exception('error parsing spec. skipping trace') + + pin = ddtrace.Pin.get_from(self) + # skip tracing if we don't have a piece of data we need + if not dbname or not cmd or not pin or not pin.enabled(): + return self.__wrapped__.command(dbname, spec, *args, **kwargs) + + cmd.db = dbname + with self.__trace(cmd): + return self.__wrapped__.command(dbname, spec, *args, **kwargs) + + def write_command(self, request_id, msg): + cmd = None + try: + cmd = parse_msg(msg) + except Exception: + log.exception('error parsing msg') + + pin = ddtrace.Pin.get_from(self) + # if we couldn't parse it, don't try to trace it. + if not cmd or not pin or not pin.enabled(): + return self.__wrapped__.write_command(request_id, msg) + + with self.__trace(cmd) as s: + result = self.__wrapped__.write_command(request_id, msg) + if result: + s.set_metric(mongox.ROWS, result.get('n', -1)) + return result + + def __trace(self, cmd): + pin = ddtrace.Pin.get_from(self) + s = pin.tracer.trace( + 'pymongo.cmd', + span_type=SpanTypes.MONGODB, + service=pin.service) + + if cmd.db: + s.set_tag(mongox.DB, cmd.db) + if cmd: + s.set_tag(mongox.COLLECTION, cmd.coll) + s.set_tags(cmd.tags) + s.set_metrics(cmd.metrics) + + # set `mongodb.query` tag and resource for span + _set_query_metadata(s, cmd) + + # set analytics sample rate + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.pymongo.get_analytics_sample_rate() + ) + + if self.address: + _set_address_tags(s, self.address) + return s + + +def normalize_filter(f=None): + if f is None: + return {} + elif isinstance(f, list): + # normalize lists of filters + # e.g. {$or: [ { age: { $lt: 30 } }, { type: 1 } ]} + return [normalize_filter(s) for s in f] + elif isinstance(f, dict): + # normalize dicts of filters + # {$or: [ { age: { $lt: 30 } }, { type: 1 } ]}) + out = {} + for k, v in iteritems(f): + if k == '$in' or k == '$nin': + # special case $in queries so we don't loop over lists. + out[k] = '?' + elif isinstance(v, list) or isinstance(v, dict): + # RECURSION ALERT: needs to move to the agent + out[k] = normalize_filter(v) + else: + # NOTE: this shouldn't happen, but let's have a safeguard. + out[k] = '?' + return out + else: + # FIXME[matt] unexpected type. not sure this should ever happen, but at + # least it won't crash. + return {} + + +def _set_address_tags(span, address): + # the address is only set after the cursor is done. + if address: + span.set_tag(netx.TARGET_HOST, address[0]) + span.set_tag(netx.TARGET_PORT, address[1]) + + +def _set_query_metadata(span, cmd): + """ Sets span `mongodb.query` tag and resource given command query """ + if cmd.query: + nq = normalize_filter(cmd.query) + span.set_tag('mongodb.query', nq) + # needed to dump json so we don't get unicode + # dict keys like {u'foo':'bar'} + q = json.dumps(nq) + span.resource = '{} {} {}'.format(cmd.name, cmd.coll, q) + else: + span.resource = '{} {}'.format(cmd.name, cmd.coll) diff --git a/ddtrace/contrib/pymongo/parse.py b/ddtrace/contrib/pymongo/parse.py new file mode 100644 index 0000000000..a6ed214255 --- /dev/null +++ b/ddtrace/contrib/pymongo/parse.py @@ -0,0 +1,209 @@ +import ctypes +import struct + +# 3p +import bson +from bson.codec_options import CodecOptions +from bson.son import SON + +# project +from ...compat import to_unicode +from ...ext import net as netx +from ...internal.logger import get_logger + + +log = get_logger(__name__) + + +# MongoDB wire protocol commands +# http://docs.mongodb.com/manual/reference/mongodb-wire-protocol +OP_CODES = { + 1: 'reply', + 1000: 'msg', # DEV: 1000 was deprecated at some point, use 2013 instead + 2001: 'update', + 2002: 'insert', + 2003: 'reserved', + 2004: 'query', + 2005: 'get_more', + 2006: 'delete', + 2007: 'kill_cursors', + 2010: 'command', + 2011: 'command_reply', + 2013: 'msg', +} + +# The maximum message length we'll try to parse +MAX_MSG_PARSE_LEN = 1024 * 1024 + +header_struct = struct.Struct('= 3.1 stores the db and coll seperately + coll = getattr(query, 'coll', None) + db = getattr(query, 'db', None) + + # pymongo < 3.1 _Query does not have a name field, so default to 'query' + cmd = Command(getattr(query, 'name', 'query'), db, coll) + cmd.query = query.spec + return cmd + + +def parse_spec(spec, db=None): + """ Return a Command that has parsed the relevant detail for the given + pymongo SON spec. + """ + + # the first element is the command and collection + items = list(spec.items()) + if not items: + return None + name, coll = items[0] + cmd = Command(name, db or spec.get('$db'), coll) + + if 'ordered' in spec: # in insert and update + cmd.tags['mongodb.ordered'] = spec['ordered'] + + if cmd.name == 'insert': + if 'documents' in spec: + cmd.metrics['mongodb.documents'] = len(spec['documents']) + + elif cmd.name == 'update': + updates = spec.get('updates') + if updates: + # FIXME[matt] is there ever more than one here? + cmd.query = updates[0].get('q') + + elif cmd.name == 'delete': + dels = spec.get('deletes') + if dels: + # FIXME[matt] is there ever more than one here? + cmd.query = dels[0].get('q') + + return cmd + + +def _cstring(raw): + """ Return the first null terminated cstring from the bufffer. """ + return ctypes.create_string_buffer(raw).value + + +def _split_namespace(ns): + """ Return a tuple of (db, collecton) from the 'db.coll' string. """ + if ns: + # NOTE[matt] ns is unicode or bytes depending on the client version + # so force cast to unicode + split = to_unicode(ns).split('.', 1) + if len(split) == 1: + raise Exception("namespace doesn't contain period: %s" % ns) + return split + return (None, None) diff --git a/ddtrace/contrib/pymongo/patch.py b/ddtrace/contrib/pymongo/patch.py new file mode 100644 index 0000000000..2175c8d56a --- /dev/null +++ b/ddtrace/contrib/pymongo/patch.py @@ -0,0 +1,14 @@ +import pymongo + +from .client import TracedMongoClient + +# Original Client class +_MongoClient = pymongo.MongoClient + + +def patch(): + setattr(pymongo, 'MongoClient', TracedMongoClient) + + +def unpatch(): + setattr(pymongo, 'MongoClient', _MongoClient) diff --git a/ddtrace/contrib/pymysql/__init__.py b/ddtrace/contrib/pymysql/__init__.py new file mode 100644 index 0000000000..a471ea1882 --- /dev/null +++ b/ddtrace/contrib/pymysql/__init__.py @@ -0,0 +1,31 @@ +"""Instrument pymysql to report MySQL queries. + +``patch_all`` will automatically patch your pymysql connection to make it work. +:: + + from ddtrace import Pin, patch + from pymysql import connect + + # If not patched yet, you can patch pymysql specifically + patch(pymysql=True) + + # This will report a span with the default settings + conn = connect(user="alice", password="b0b", host="localhost", port=3306, database="test") + cursor = conn.cursor() + cursor.execute("SELECT 6*7 AS the_answer;") + + # Use a pin to specify metadata related to this connection + Pin.override(conn, service='pymysql-users') +""" + +from ...utils.importlib import require_modules + + +required_modules = ['pymysql'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + from .tracers import get_traced_pymysql_connection + + __all__ = ['get_traced_pymysql_connection', 'patch'] diff --git a/ddtrace/contrib/pymysql/patch.py b/ddtrace/contrib/pymysql/patch.py new file mode 100644 index 0000000000..ccb56781f8 --- /dev/null +++ b/ddtrace/contrib/pymysql/patch.py @@ -0,0 +1,39 @@ +# 3p +from ddtrace.vendor import wrapt +import pymysql + +# project +from ddtrace import Pin +from ddtrace.contrib.dbapi import TracedConnection +from ...ext import net, db + +CONN_ATTR_BY_TAG = { + net.TARGET_HOST: 'host', + net.TARGET_PORT: 'port', + db.USER: 'user', + db.NAME: 'db', +} + + +def patch(): + wrapt.wrap_function_wrapper('pymysql', 'connect', _connect) + + +def unpatch(): + if isinstance(pymysql.connect, wrapt.ObjectProxy): + pymysql.connect = pymysql.connect.__wrapped__ + + +def _connect(func, instance, args, kwargs): + conn = func(*args, **kwargs) + return patch_conn(conn) + + +def patch_conn(conn): + tags = {t: getattr(conn, a, '') for t, a in CONN_ATTR_BY_TAG.items()} + pin = Pin(service='pymysql', app='pymysql', tags=tags) + + # grab the metadata from the conn + wrapped = TracedConnection(conn, pin=pin) + pin.onto(wrapped) + return wrapped diff --git a/ddtrace/contrib/pymysql/tracers.py b/ddtrace/contrib/pymysql/tracers.py new file mode 100644 index 0000000000..d4d95bec55 --- /dev/null +++ b/ddtrace/contrib/pymysql/tracers.py @@ -0,0 +1,8 @@ +import pymysql.connections + +from ...utils.deprecation import deprecated + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def get_traced_pymysql_connection(*args, **kwargs): + return pymysql.connections.Connection diff --git a/ddtrace/contrib/pyramid/__init__.py b/ddtrace/contrib/pyramid/__init__.py new file mode 100644 index 0000000000..d7a012f42b --- /dev/null +++ b/ddtrace/contrib/pyramid/__init__.py @@ -0,0 +1,57 @@ +r"""To trace requests from a Pyramid application, trace your application +config:: + + + from pyramid.config import Configurator + from ddtrace.contrib.pyramid import trace_pyramid + + settings = { + 'datadog_trace_service' : 'my-web-app-name', + } + + config = Configurator(settings=settings) + trace_pyramid(config) + + # use your config as normal. + config.add_route('index', '/') + +Available settings are: + +* ``datadog_trace_service``: change the `pyramid` service name +* ``datadog_trace_enabled``: sets if the Tracer is enabled or not +* ``datadog_distributed_tracing``: set it to ``False`` to disable Distributed Tracing +* ``datadog_analytics_enabled``: set it to ``True`` to enable generating APM events for Trace Search & Analytics + +If you use the ``pyramid.tweens`` settings value to set the tweens for your +application, you need to add ``ddtrace.contrib.pyramid:trace_tween_factory`` +explicitly to the list. For example:: + + settings = { + 'datadog_trace_service' : 'my-web-app-name', + 'pyramid.tweens', 'your_tween_no_1\\nyour_tween_no_2\\nddtrace.contrib.pyramid:trace_tween_factory', + } + + config = Configurator(settings=settings) + trace_pyramid(config) + + # use your config as normal. + config.add_route('index', '/') + +""" + +from ...utils.importlib import require_modules + + +required_modules = ['pyramid'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .trace import trace_pyramid, trace_tween_factory, includeme + from .patch import patch + + __all__ = [ + 'patch', + 'trace_pyramid', + 'trace_tween_factory', + 'includeme', + ] diff --git a/ddtrace/contrib/pyramid/constants.py b/ddtrace/contrib/pyramid/constants.py new file mode 100644 index 0000000000..176699d781 --- /dev/null +++ b/ddtrace/contrib/pyramid/constants.py @@ -0,0 +1,6 @@ +SETTINGS_SERVICE = 'datadog_trace_service' +SETTINGS_TRACER = 'datadog_tracer' +SETTINGS_TRACE_ENABLED = 'datadog_trace_enabled' +SETTINGS_DISTRIBUTED_TRACING = 'datadog_distributed_tracing' +SETTINGS_ANALYTICS_ENABLED = 'datadog_analytics_enabled' +SETTINGS_ANALYTICS_SAMPLE_RATE = 'datadog_analytics_sample_rate' diff --git a/ddtrace/contrib/pyramid/patch.py b/ddtrace/contrib/pyramid/patch.py new file mode 100644 index 0000000000..8224d183de --- /dev/null +++ b/ddtrace/contrib/pyramid/patch.py @@ -0,0 +1,84 @@ +import os + +from .trace import trace_pyramid, DD_TWEEN_NAME +from .constants import ( + SETTINGS_SERVICE, SETTINGS_DISTRIBUTED_TRACING, + SETTINGS_ANALYTICS_ENABLED, SETTINGS_ANALYTICS_SAMPLE_RATE, +) +from ...utils.formats import asbool, get_env + +import pyramid.config +from pyramid.path import caller_package + +from ddtrace.vendor import wrapt + +DD_PATCH = '_datadog_patch' + + +def patch(): + """ + Patch pyramid.config.Configurator + """ + if getattr(pyramid.config, DD_PATCH, False): + return + + setattr(pyramid.config, DD_PATCH, True) + _w = wrapt.wrap_function_wrapper + _w('pyramid.config', 'Configurator.__init__', traced_init) + + +def traced_init(wrapped, instance, args, kwargs): + settings = kwargs.pop('settings', {}) + service = os.environ.get('DATADOG_SERVICE_NAME') or 'pyramid' + distributed_tracing = asbool(get_env('pyramid', 'distributed_tracing', True)) + # DEV: integration-specific analytics flag can be not set but still enabled + # globally for web frameworks + analytics_enabled = get_env('pyramid', 'analytics_enabled') + if analytics_enabled is not None: + analytics_enabled = asbool(analytics_enabled) + analytics_sample_rate = get_env('pyramid', 'analytics_sample_rate', True) + trace_settings = { + SETTINGS_SERVICE: service, + SETTINGS_DISTRIBUTED_TRACING: distributed_tracing, + SETTINGS_ANALYTICS_ENABLED: analytics_enabled, + SETTINGS_ANALYTICS_SAMPLE_RATE: analytics_sample_rate, + } + # Update over top of the defaults + # DEV: If we did `settings.update(trace_settings)` then we would only ever + # have the default values. + trace_settings.update(settings) + # If the tweens are explicitly set with 'pyramid.tweens', we need to + # explicitly set our tween too since `add_tween` will be ignored. + insert_tween_if_needed(trace_settings) + kwargs['settings'] = trace_settings + + # `caller_package` works by walking a fixed amount of frames up the stack + # to find the calling package. So if we let the original `__init__` + # function call it, our wrapper will mess things up. + if not kwargs.get('package', None): + # Get the packge for the third frame up from this one. + # - ddtrace.contrib.pyramid.path + # - ddtrace.vendor.wrapt + # - (this is the frame we want) + # DEV: Default is `level=2` which will give us the package from `wrapt` + kwargs['package'] = caller_package(level=3) + + wrapped(*args, **kwargs) + trace_pyramid(instance) + + +def insert_tween_if_needed(settings): + tweens = settings.get('pyramid.tweens') + # If the list is empty, pyramid does not consider the tweens have been + # set explicitly. + # And if our tween is already there, nothing to do + if not tweens or not tweens.strip() or DD_TWEEN_NAME in tweens: + return + # pyramid.tweens.EXCVIEW is the name of built-in exception view provided by + # pyramid. We need our tween to be before it, otherwise unhandled + # exceptions will be caught before they reach our tween. + idx = tweens.find(pyramid.tweens.EXCVIEW) + if idx == -1: + settings['pyramid.tweens'] = tweens + '\n' + DD_TWEEN_NAME + else: + settings['pyramid.tweens'] = tweens[:idx] + DD_TWEEN_NAME + '\n' + tweens[idx:] diff --git a/ddtrace/contrib/pyramid/trace.py b/ddtrace/contrib/pyramid/trace.py new file mode 100644 index 0000000000..029352759b --- /dev/null +++ b/ddtrace/contrib/pyramid/trace.py @@ -0,0 +1,119 @@ +import pyramid.renderers +from pyramid.settings import asbool +from pyramid.httpexceptions import HTTPException +from ddtrace.vendor import wrapt + +# project +import ddtrace +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...internal.logger import get_logger +from ...propagation.http import HTTPPropagator +from ...settings import config +from .constants import ( + SETTINGS_TRACER, + SETTINGS_SERVICE, + SETTINGS_TRACE_ENABLED, + SETTINGS_DISTRIBUTED_TRACING, + SETTINGS_ANALYTICS_ENABLED, + SETTINGS_ANALYTICS_SAMPLE_RATE, +) + + +log = get_logger(__name__) + +DD_TWEEN_NAME = 'ddtrace.contrib.pyramid:trace_tween_factory' +DD_SPAN = '_datadog_span' + + +def trace_pyramid(config): + config.include('ddtrace.contrib.pyramid') + + +def includeme(config): + # Add our tween just before the default exception handler + config.add_tween(DD_TWEEN_NAME, over=pyramid.tweens.EXCVIEW) + # ensure we only patch the renderer once. + if not isinstance(pyramid.renderers.RendererHelper.render, wrapt.ObjectProxy): + wrapt.wrap_function_wrapper('pyramid.renderers', 'RendererHelper.render', trace_render) + + +def trace_render(func, instance, args, kwargs): + # If the request is not traced, we do not trace + request = kwargs.get('request', {}) + if not request: + log.debug('No request passed to render, will not be traced') + return func(*args, **kwargs) + span = getattr(request, DD_SPAN, None) + if not span: + log.debug('No span found in request, will not be traced') + return func(*args, **kwargs) + + with span.tracer.trace('pyramid.render', span_type=SpanTypes.TEMPLATE) as span: + return func(*args, **kwargs) + + +def trace_tween_factory(handler, registry): + # configuration + settings = registry.settings + service = settings.get(SETTINGS_SERVICE) or 'pyramid' + tracer = settings.get(SETTINGS_TRACER) or ddtrace.tracer + enabled = asbool(settings.get(SETTINGS_TRACE_ENABLED, tracer.enabled)) + distributed_tracing = asbool(settings.get(SETTINGS_DISTRIBUTED_TRACING, True)) + + if enabled: + # make a request tracing function + def trace_tween(request): + if distributed_tracing: + propagator = HTTPPropagator() + context = propagator.extract(request.headers) + # only need to active the new context if something was propagated + if context.trace_id: + tracer.context_provider.activate(context) + with tracer.trace('pyramid.request', service=service, resource='404', span_type=SpanTypes.WEB) as span: + # Configure trace search sample rate + # DEV: pyramid is special case maintains separate configuration from config api + analytics_enabled = settings.get(SETTINGS_ANALYTICS_ENABLED) + + if ( + config.analytics_enabled and analytics_enabled is not False + ) or analytics_enabled is True: + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + settings.get(SETTINGS_ANALYTICS_SAMPLE_RATE, True) + ) + + setattr(request, DD_SPAN, span) # used to find the tracer in templates + response = None + try: + response = handler(request) + except HTTPException as e: + # If the exception is a pyramid HTTPException, + # that's still valuable information that isn't necessarily + # a 500. For instance, HTTPFound is a 302. + # As described in docs, Pyramid exceptions are all valid + # response types + response = e + raise + except BaseException: + span.set_tag(http.STATUS_CODE, 500) + raise + finally: + # set request tags + span.set_tag(http.URL, request.path_url) + span.set_tag(http.METHOD, request.method) + if config.pyramid.trace_query_string: + span.set_tag(http.QUERY_STRING, request.query_string) + if request.matched_route: + span.resource = '{} {}'.format(request.method, request.matched_route.name) + span.set_tag('pyramid.route.name', request.matched_route.name) + # set response tags + if response: + span.set_tag(http.STATUS_CODE, response.status_code) + if 500 <= response.status_code < 600: + span.error = 1 + return response + return trace_tween + + # if timing support is not enabled, return the original handler + return handler diff --git a/ddtrace/contrib/redis/__init__.py b/ddtrace/contrib/redis/__init__.py new file mode 100644 index 0000000000..50622016fb --- /dev/null +++ b/ddtrace/contrib/redis/__init__.py @@ -0,0 +1,29 @@ +"""Instrument redis to report Redis queries. + +``patch_all`` will automatically patch your Redis client to make it work. +:: + + from ddtrace import Pin, patch + import redis + + # If not patched yet, you can patch redis specifically + patch(redis=True) + + # This will report a span with the default settings + client = redis.StrictRedis(host="localhost", port=6379) + client.get("my-key") + + # Use a pin to specify metadata related to this client + Pin.override(client, service='redis-queue') +""" + +from ...utils.importlib import require_modules + +required_modules = ['redis', 'redis.client'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + from .tracers import get_traced_redis, get_traced_redis_from + + __all__ = ['get_traced_redis', 'get_traced_redis_from', 'patch'] diff --git a/ddtrace/contrib/redis/patch.py b/ddtrace/contrib/redis/patch.py new file mode 100644 index 0000000000..e1dddd2c9a --- /dev/null +++ b/ddtrace/contrib/redis/patch.py @@ -0,0 +1,114 @@ +# 3p +import redis +from ddtrace.vendor import wrapt + +# project +from ddtrace import config +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...pin import Pin +from ...ext import SpanTypes, redis as redisx +from ...utils.wrappers import unwrap +from .util import format_command_args, _extract_conn_tags + + +def patch(): + """Patch the instrumented methods + + This duplicated doesn't look nice. The nicer alternative is to use an ObjectProxy on top + of Redis and StrictRedis. However, it means that any "import redis.Redis" won't be instrumented. + """ + if getattr(redis, '_datadog_patch', False): + return + setattr(redis, '_datadog_patch', True) + + _w = wrapt.wrap_function_wrapper + + if redis.VERSION < (3, 0, 0): + _w('redis', 'StrictRedis.execute_command', traced_execute_command) + _w('redis', 'StrictRedis.pipeline', traced_pipeline) + _w('redis', 'Redis.pipeline', traced_pipeline) + _w('redis.client', 'BasePipeline.execute', traced_execute_pipeline) + _w('redis.client', 'BasePipeline.immediate_execute_command', traced_execute_command) + else: + _w('redis', 'Redis.execute_command', traced_execute_command) + _w('redis', 'Redis.pipeline', traced_pipeline) + _w('redis.client', 'Pipeline.execute', traced_execute_pipeline) + _w('redis.client', 'Pipeline.immediate_execute_command', traced_execute_command) + Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(redis.StrictRedis) + + +def unpatch(): + if getattr(redis, '_datadog_patch', False): + setattr(redis, '_datadog_patch', False) + + if redis.VERSION < (3, 0, 0): + unwrap(redis.StrictRedis, 'execute_command') + unwrap(redis.StrictRedis, 'pipeline') + unwrap(redis.Redis, 'pipeline') + unwrap(redis.client.BasePipeline, 'execute') + unwrap(redis.client.BasePipeline, 'immediate_execute_command') + else: + unwrap(redis.Redis, 'execute_command') + unwrap(redis.Redis, 'pipeline') + unwrap(redis.client.Pipeline, 'execute') + unwrap(redis.client.Pipeline, 'immediate_execute_command') + + +# +# tracing functions +# +def traced_execute_command(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + with pin.tracer.trace(redisx.CMD, service=pin.service, span_type=SpanTypes.REDIS) as s: + query = format_command_args(args) + s.resource = query + s.set_tag(redisx.RAWCMD, query) + if pin.tags: + s.set_tags(pin.tags) + s.set_tags(_get_tags(instance)) + s.set_metric(redisx.ARGS_LEN, len(args)) + # set analytics sample rate if enabled + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.redis.get_analytics_sample_rate() + ) + # run the command + return func(*args, **kwargs) + + +def traced_pipeline(func, instance, args, kwargs): + pipeline = func(*args, **kwargs) + pin = Pin.get_from(instance) + if pin: + pin.onto(pipeline) + return pipeline + + +def traced_execute_pipeline(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + # FIXME[matt] done in the agent. worth it? + cmds = [format_command_args(c) for c, _ in instance.command_stack] + resource = '\n'.join(cmds) + tracer = pin.tracer + with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s: + s.set_tag(redisx.RAWCMD, resource) + s.set_tags(_get_tags(instance)) + s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack)) + + # set analytics sample rate if enabled + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.redis.get_analytics_sample_rate() + ) + + return func(*args, **kwargs) + + +def _get_tags(conn): + return _extract_conn_tags(conn.connection_pool.connection_kwargs) diff --git a/ddtrace/contrib/redis/tracers.py b/ddtrace/contrib/redis/tracers.py new file mode 100644 index 0000000000..62912ce06c --- /dev/null +++ b/ddtrace/contrib/redis/tracers.py @@ -0,0 +1,20 @@ +from redis import StrictRedis + +from ...utils.deprecation import deprecated + + +DEFAULT_SERVICE = 'redis' + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def get_traced_redis(ddtracer, service=DEFAULT_SERVICE, meta=None): + return _get_traced_redis(ddtracer, StrictRedis, service, meta) + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def get_traced_redis_from(ddtracer, baseclass, service=DEFAULT_SERVICE, meta=None): + return _get_traced_redis(ddtracer, baseclass, service, meta) + + +def _get_traced_redis(ddtracer, baseclass, service, meta): + return baseclass diff --git a/ddtrace/contrib/redis/util.py b/ddtrace/contrib/redis/util.py new file mode 100644 index 0000000000..b2e73797b0 --- /dev/null +++ b/ddtrace/contrib/redis/util.py @@ -0,0 +1,52 @@ +""" +Some utils used by the dogtrace redis integration +""" +from ...compat import stringify +from ...ext import redis as redisx, net + +VALUE_PLACEHOLDER = '?' +VALUE_MAX_LEN = 100 +VALUE_TOO_LONG_MARK = '...' +CMD_MAX_LEN = 1000 + + +def _extract_conn_tags(conn_kwargs): + """ Transform redis conn info into dogtrace metas """ + try: + return { + net.TARGET_HOST: conn_kwargs['host'], + net.TARGET_PORT: conn_kwargs['port'], + redisx.DB: conn_kwargs['db'] or 0, + } + except Exception: + return {} + + +def format_command_args(args): + """Format a command by removing unwanted values + + Restrict what we keep from the values sent (with a SET, HGET, LPUSH, ...): + - Skip binary content + - Truncate + """ + length = 0 + out = [] + for arg in args: + try: + cmd = stringify(arg) + + if len(cmd) > VALUE_MAX_LEN: + cmd = cmd[:VALUE_MAX_LEN] + VALUE_TOO_LONG_MARK + + if length + len(cmd) > CMD_MAX_LEN: + prefix = cmd[:CMD_MAX_LEN - length] + out.append('%s%s' % (prefix, VALUE_TOO_LONG_MARK)) + break + + out.append(cmd) + length += len(cmd) + except Exception: + out.append(VALUE_PLACEHOLDER) + break + + return ' '.join(out) diff --git a/ddtrace/contrib/rediscluster/__init__.py b/ddtrace/contrib/rediscluster/__init__.py new file mode 100644 index 0000000000..86ad02f475 --- /dev/null +++ b/ddtrace/contrib/rediscluster/__init__.py @@ -0,0 +1,28 @@ +"""Instrument rediscluster to report Redis Cluster queries. + +``patch_all`` will automatically patch your Redis Cluster client to make it work. +:: + + from ddtrace import Pin, patch + import rediscluster + + # If not patched yet, you can patch redis specifically + patch(rediscluster=True) + + # This will report a span with the default settings + client = rediscluster.StrictRedisCluster(startup_nodes=[{'host':'localhost', 'port':'7000'}]) + client.get('my-key') + + # Use a pin to specify metadata related to this client + Pin.override(client, service='redis-queue') +""" + +from ...utils.importlib import require_modules + +required_modules = ['rediscluster', 'rediscluster.client'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch + + __all__ = ['patch'] diff --git a/ddtrace/contrib/rediscluster/patch.py b/ddtrace/contrib/rediscluster/patch.py new file mode 100644 index 0000000000..b224174dcd --- /dev/null +++ b/ddtrace/contrib/rediscluster/patch.py @@ -0,0 +1,59 @@ +# 3p +import rediscluster +from ddtrace.vendor import wrapt + +# project +from ddtrace import config +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...pin import Pin +from ...ext import SpanTypes, redis as redisx +from ...utils.wrappers import unwrap +from ..redis.patch import traced_execute_command, traced_pipeline +from ..redis.util import format_command_args + + +def patch(): + """Patch the instrumented methods + """ + if getattr(rediscluster, '_datadog_patch', False): + return + setattr(rediscluster, '_datadog_patch', True) + + _w = wrapt.wrap_function_wrapper + _w('rediscluster', 'StrictRedisCluster.execute_command', traced_execute_command) + _w('rediscluster', 'StrictRedisCluster.pipeline', traced_pipeline) + _w('rediscluster', 'StrictClusterPipeline.execute', traced_execute_pipeline) + Pin(service=redisx.DEFAULT_SERVICE, app=redisx.APP).onto(rediscluster.StrictRedisCluster) + + +def unpatch(): + if getattr(rediscluster, '_datadog_patch', False): + setattr(rediscluster, '_datadog_patch', False) + unwrap(rediscluster.StrictRedisCluster, 'execute_command') + unwrap(rediscluster.StrictRedisCluster, 'pipeline') + unwrap(rediscluster.StrictClusterPipeline, 'execute') + + +# +# tracing functions +# + +def traced_execute_pipeline(func, instance, args, kwargs): + pin = Pin.get_from(instance) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + cmds = [format_command_args(c.args) for c in instance.command_stack] + resource = '\n'.join(cmds) + tracer = pin.tracer + with tracer.trace(redisx.CMD, resource=resource, service=pin.service, span_type=SpanTypes.REDIS) as s: + s.set_tag(redisx.RAWCMD, resource) + s.set_metric(redisx.PIPELINE_LEN, len(instance.command_stack)) + + # set analytics sample rate if enabled + s.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.rediscluster.get_analytics_sample_rate() + ) + + return func(*args, **kwargs) diff --git a/ddtrace/contrib/requests/__init__.py b/ddtrace/contrib/requests/__init__.py new file mode 100644 index 0000000000..c0dc661620 --- /dev/null +++ b/ddtrace/contrib/requests/__init__.py @@ -0,0 +1,52 @@ +""" +The ``requests`` integration traces all HTTP calls to internal or external services. +Auto instrumentation is available using the ``patch`` function that **must be called +before** importing the ``requests`` library. The following is an example:: + + from ddtrace import patch + patch(requests=True) + + import requests + requests.get("https://www.datadoghq.com") + +If you would prefer finer grained control, use a ``TracedSession`` object as you would a +``requests.Session``:: + + from ddtrace.contrib.requests import TracedSession + + session = TracedSession() + session.get("https://www.datadoghq.com") + +The library can be configured globally and per instance, using the Configuration API:: + + from ddtrace import config + + # disable distributed tracing globally + config.requests['distributed_tracing'] = False + + # enable trace analytics globally + config.requests['analytics_enabled'] = True + + # change the service name/distributed tracing only for this session + session = Session() + cfg = config.get_from(session) + cfg['service_name'] = 'auth-api' + cfg['analytics_enabled'] = True + +:ref:`Headers tracing ` is supported for this integration. +""" +from ...utils.importlib import require_modules + + +required_modules = ['requests'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + from .session import TracedSession + + __all__ = [ + 'patch', + 'unpatch', + 'TracedSession', + ] diff --git a/ddtrace/contrib/requests/connection.py b/ddtrace/contrib/requests/connection.py new file mode 100644 index 0000000000..503d4a56c2 --- /dev/null +++ b/ddtrace/contrib/requests/connection.py @@ -0,0 +1,114 @@ +import ddtrace +from ddtrace import config +from ddtrace.http import store_request_headers, store_response_headers + +from ...compat import parse +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...internal.logger import get_logger +from ...propagation.http import HTTPPropagator +from .constants import DEFAULT_SERVICE + +log = get_logger(__name__) + + +def _extract_service_name(session, span, hostname=None): + """Extracts the right service name based on the following logic: + - `requests` is the default service name + - users can change it via `session.service_name = 'clients'` + - if the Span doesn't have a parent, use the set service name or fallback to the default + - if the Span has a parent, use the set service name or the + parent service value if the set service name is the default + - if `split_by_domain` is used, always override users settings + and use the network location as a service name + + The priority can be represented as: + Updated service name > parent service name > default to `requests`. + """ + cfg = config.get_from(session) + if cfg["split_by_domain"] and hostname: + return hostname + + service_name = cfg["service_name"] + if service_name == DEFAULT_SERVICE and span._parent is not None and span._parent.service is not None: + service_name = span._parent.service + return service_name + + +def _wrap_send(func, instance, args, kwargs): + """Trace the `Session.send` instance method""" + # TODO[manu]: we already offer a way to provide the Global Tracer + # and is ddtrace.tracer; it's used only inside our tests and can + # be easily changed by providing a TracingTestCase that sets common + # tracing functionalities. + tracer = getattr(instance, "datadog_tracer", ddtrace.tracer) + + # skip if tracing is not enabled + if not tracer.enabled: + return func(*args, **kwargs) + + request = kwargs.get("request") or args[0] + if not request: + return func(*args, **kwargs) + + # sanitize url of query + parsed_uri = parse.urlparse(request.url) + hostname = parsed_uri.hostname + if parsed_uri.port: + hostname = "{}:{}".format(hostname, parsed_uri.port) + sanitized_url = parse.urlunparse( + ( + parsed_uri.scheme, + parsed_uri.netloc, + parsed_uri.path, + parsed_uri.params, + None, # drop parsed_uri.query + parsed_uri.fragment, + ) + ) + + with tracer.trace("requests.request", span_type=SpanTypes.HTTP) as span: + # update the span service name before doing any action + span.service = _extract_service_name(instance, span, hostname=hostname) + + # Configure trace search sample rate + # DEV: analytics enabled on per-session basis + cfg = config.get_from(instance) + analytics_enabled = cfg.get("analytics_enabled") + if analytics_enabled: + span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, cfg.get("analytics_sample_rate", True)) + + # propagate distributed tracing headers + if cfg.get("distributed_tracing"): + propagator = HTTPPropagator() + propagator.inject(span.context, request.headers) + + # Storing request headers in the span + store_request_headers(request.headers, span, config.requests) + + response = None + try: + response = func(*args, **kwargs) + + # Storing response headers in the span. Note that response.headers is not a dict, but an iterable + # requests custom structure, that we convert to a dict + if hasattr(response, "headers"): + store_response_headers(dict(response.headers), span, config.requests) + return response + finally: + try: + span.set_tag(http.METHOD, request.method.upper()) + span.set_tag(http.URL, sanitized_url) + if config.requests.trace_query_string: + span.set_tag(http.QUERY_STRING, parsed_uri.query) + if response is not None: + span.set_tag(http.STATUS_CODE, response.status_code) + # `span.error` must be an integer + span.error = int(500 <= response.status_code) + # Storing response headers in the span. + # Note that response.headers is not a dict, but an iterable + # requests custom structure, that we convert to a dict + response_headers = dict(getattr(response, "headers", {})) + store_response_headers(response_headers, span, config.requests) + except Exception: + log.debug("requests: error adding tags", exc_info=True) diff --git a/ddtrace/contrib/requests/constants.py b/ddtrace/contrib/requests/constants.py new file mode 100644 index 0000000000..6ad02b6fe5 --- /dev/null +++ b/ddtrace/contrib/requests/constants.py @@ -0,0 +1 @@ +DEFAULT_SERVICE = 'requests' diff --git a/ddtrace/contrib/requests/legacy.py b/ddtrace/contrib/requests/legacy.py new file mode 100644 index 0000000000..a71ebbbc8c --- /dev/null +++ b/ddtrace/contrib/requests/legacy.py @@ -0,0 +1,31 @@ +# [Deprecation]: this module contains deprecated functions +# that will be removed in newer versions of the Tracer. +from ddtrace import config + +from ...utils.deprecation import deprecation + + +def _distributed_tracing(self): + """Deprecated: this method has been deprecated in favor of + the configuration system. It will be removed in newer versions + of the Tracer. + """ + deprecation( + name='client.distributed_tracing', + message='Use the configuration object instead `config.get_from(client)[\'distributed_tracing\'`', + version='1.0.0', + ) + return config.get_from(self)['distributed_tracing'] + + +def _distributed_tracing_setter(self, value): + """Deprecated: this method has been deprecated in favor of + the configuration system. It will be removed in newer versions + of the Tracer. + """ + deprecation( + name='client.distributed_tracing', + message='Use the configuration object instead `config.get_from(client)[\'distributed_tracing\'] = value`', + version='1.0.0', + ) + config.get_from(self)['distributed_tracing'] = value diff --git a/ddtrace/contrib/requests/patch.py b/ddtrace/contrib/requests/patch.py new file mode 100644 index 0000000000..0072481514 --- /dev/null +++ b/ddtrace/contrib/requests/patch.py @@ -0,0 +1,49 @@ +import requests + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from ddtrace import config + +from ...pin import Pin +from ...utils.formats import asbool, get_env +from ...utils.wrappers import unwrap as _u +from .legacy import _distributed_tracing, _distributed_tracing_setter +from .constants import DEFAULT_SERVICE +from .connection import _wrap_send + +# requests default settings +config._add('requests', { + 'service_name': get_env('requests', 'service_name', DEFAULT_SERVICE), + 'distributed_tracing': asbool(get_env('requests', 'distributed_tracing', True)), + 'split_by_domain': asbool(get_env('requests', 'split_by_domain', False)), +}) + + +def patch(): + """Activate http calls tracing""" + if getattr(requests, '__datadog_patch', False): + return + setattr(requests, '__datadog_patch', True) + + _w('requests', 'Session.send', _wrap_send) + Pin( + service=config.requests['service_name'], + app='requests', + _config=config.requests, + ).onto(requests.Session) + + # [Backward compatibility]: `session.distributed_tracing` should point and + # update the `Pin` configuration instead. This block adds a property so that + # old implementations work as expected + fn = property(_distributed_tracing) + fn = fn.setter(_distributed_tracing_setter) + requests.Session.distributed_tracing = fn + + +def unpatch(): + """Disable traced sessions""" + if not getattr(requests, '__datadog_patch', False): + return + setattr(requests, '__datadog_patch', False) + + _u(requests.Session, 'send') diff --git a/ddtrace/contrib/requests/session.py b/ddtrace/contrib/requests/session.py new file mode 100644 index 0000000000..85f21a8082 --- /dev/null +++ b/ddtrace/contrib/requests/session.py @@ -0,0 +1,17 @@ +import requests + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from .connection import _wrap_send + + +class TracedSession(requests.Session): + """TracedSession is a requests' Session that is already traced. + You can use it if you want a finer grained control for your + HTTP clients. + """ + pass + + +# always patch our `TracedSession` when imported +_w(TracedSession, 'send', _wrap_send) diff --git a/ddtrace/contrib/sqlalchemy/__init__.py b/ddtrace/contrib/sqlalchemy/__init__.py new file mode 100644 index 0000000000..b47586040c --- /dev/null +++ b/ddtrace/contrib/sqlalchemy/__init__.py @@ -0,0 +1,28 @@ +""" +To trace sqlalchemy queries, add instrumentation to the engine class +using the patch method that **must be called before** importing sqlalchemy:: + + # patch before importing `create_engine` + from ddtrace import Pin, patch + patch(sqlalchemy=True) + + # use SQLAlchemy as usual + from sqlalchemy import create_engine + + engine = create_engine('sqlite:///:memory:') + engine.connect().execute("SELECT COUNT(*) FROM users") + + # Use a PIN to specify metadata related to this engine + Pin.override(engine, service='replica-db') +""" +from ...utils.importlib import require_modules + + +required_modules = ['sqlalchemy', 'sqlalchemy.event'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + from .engine import trace_engine + + __all__ = ['trace_engine', 'patch', 'unpatch'] diff --git a/ddtrace/contrib/sqlalchemy/engine.py b/ddtrace/contrib/sqlalchemy/engine.py new file mode 100644 index 0000000000..06308e8110 --- /dev/null +++ b/ddtrace/contrib/sqlalchemy/engine.py @@ -0,0 +1,146 @@ +""" +To trace sqlalchemy queries, add instrumentation to the engine class or +instance you are using:: + + from ddtrace import tracer + from ddtrace.contrib.sqlalchemy import trace_engine + from sqlalchemy import create_engine + + engine = create_engine('sqlite:///:memory:') + trace_engine(engine, tracer, 'my-database') + + engine.connect().execute('select count(*) from users') +""" +# 3p +from sqlalchemy.event import listen + +# project +import ddtrace + +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, sql as sqlx, net as netx +from ...pin import Pin +from ...settings import config + + +def trace_engine(engine, tracer=None, service=None): + """ + Add tracing instrumentation to the given sqlalchemy engine or instance. + + :param sqlalchemy.Engine engine: a SQLAlchemy engine class or instance + :param ddtrace.Tracer tracer: a tracer instance. will default to the global + :param str service: the name of the service to trace. + """ + tracer = tracer or ddtrace.tracer # by default use global + EngineTracer(tracer, service, engine) + + +def _wrap_create_engine(func, module, args, kwargs): + """Trace the SQLAlchemy engine, creating an `EngineTracer` + object that will listen to SQLAlchemy events. A PIN object + is attached to the engine instance so that it can be + used later. + """ + # the service name is set to `None` so that the engine + # name is used by default; users can update this setting + # using the PIN object + engine = func(*args, **kwargs) + EngineTracer(ddtrace.tracer, None, engine) + return engine + + +class EngineTracer(object): + + def __init__(self, tracer, service, engine): + self.tracer = tracer + self.engine = engine + self.vendor = sqlx.normalize_vendor(engine.name) + self.service = service or self.vendor + self.name = '%s.query' % self.vendor + + # attach the PIN + Pin( + app=self.vendor, + tracer=tracer, + service=self.service + ).onto(engine) + + listen(engine, 'before_cursor_execute', self._before_cur_exec) + listen(engine, 'after_cursor_execute', self._after_cur_exec) + listen(engine, 'dbapi_error', self._dbapi_error) + + def _before_cur_exec(self, conn, cursor, statement, *args): + pin = Pin.get_from(self.engine) + if not pin or not pin.enabled(): + # don't trace the execution + return + + span = pin.tracer.trace( + self.name, + service=pin.service, + span_type=SpanTypes.SQL, + resource=statement, + ) + + if not _set_tags_from_url(span, conn.engine.url): + _set_tags_from_cursor(span, self.vendor, cursor) + + # set analytics sample rate + sample_rate = config.sqlalchemy.get_analytics_sample_rate() + if sample_rate is not None: + span.set_tag(ANALYTICS_SAMPLE_RATE_KEY, sample_rate) + + def _after_cur_exec(self, conn, cursor, statement, *args): + pin = Pin.get_from(self.engine) + if not pin or not pin.enabled(): + # don't trace the execution + return + + span = pin.tracer.current_span() + if not span: + return + + try: + if cursor and cursor.rowcount >= 0: + span.set_tag(sqlx.ROWS, cursor.rowcount) + finally: + span.finish() + + def _dbapi_error(self, conn, cursor, statement, *args): + pin = Pin.get_from(self.engine) + if not pin or not pin.enabled(): + # don't trace the execution + return + + span = pin.tracer.current_span() + if not span: + return + + try: + span.set_traceback() + finally: + span.finish() + + +def _set_tags_from_url(span, url): + """ set connection tags from the url. return true if successful. """ + if url.host: + span.set_tag(netx.TARGET_HOST, url.host) + if url.port: + span.set_tag(netx.TARGET_PORT, url.port) + if url.database: + span.set_tag(sqlx.DB, url.database) + + return bool(span.get_tag(netx.TARGET_HOST)) + + +def _set_tags_from_cursor(span, vendor, cursor): + """ attempt to set db connection tags by introspecting the cursor. """ + if 'postgres' == vendor: + if hasattr(cursor, 'connection') and hasattr(cursor.connection, 'dsn'): + dsn = getattr(cursor.connection, 'dsn', None) + if dsn: + d = sqlx.parse_pg_dsn(dsn) + span.set_tag(sqlx.DB, d.get('dbname')) + span.set_tag(netx.TARGET_HOST, d.get('host')) + span.set_tag(netx.TARGET_PORT, d.get('port')) diff --git a/ddtrace/contrib/sqlalchemy/patch.py b/ddtrace/contrib/sqlalchemy/patch.py new file mode 100644 index 0000000000..ca3a3552b4 --- /dev/null +++ b/ddtrace/contrib/sqlalchemy/patch.py @@ -0,0 +1,24 @@ +import sqlalchemy + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from .engine import _wrap_create_engine +from ...utils.wrappers import unwrap + + +def patch(): + if getattr(sqlalchemy.engine, '__datadog_patch', False): + return + setattr(sqlalchemy.engine, '__datadog_patch', True) + + # patch the engine creation function + _w('sqlalchemy', 'create_engine', _wrap_create_engine) + _w('sqlalchemy.engine', 'create_engine', _wrap_create_engine) + + +def unpatch(): + # unpatch sqlalchemy + if getattr(sqlalchemy.engine, '__datadog_patch', False): + setattr(sqlalchemy.engine, '__datadog_patch', False) + unwrap(sqlalchemy, 'create_engine') + unwrap(sqlalchemy.engine, 'create_engine') diff --git a/ddtrace/contrib/sqlite3/__init__.py b/ddtrace/contrib/sqlite3/__init__.py new file mode 100644 index 0000000000..ec45ebe706 --- /dev/null +++ b/ddtrace/contrib/sqlite3/__init__.py @@ -0,0 +1,23 @@ +"""Instrument sqlite3 to report SQLite queries. + +``patch_all`` will automatically patch your sqlite3 connection to make it work. +:: + + from ddtrace import Pin, patch + import sqlite3 + + # If not patched yet, you can patch sqlite3 specifically + patch(sqlite3=True) + + # This will report a span with the default settings + db = sqlite3.connect(":memory:") + cursor = db.cursor() + cursor.execute("select * from users where id = 1") + + # Use a pin to specify metadata related to this connection + Pin.override(db, service='sqlite-users') +""" +from .connection import connection_factory +from .patch import patch + +__all__ = ['connection_factory', 'patch'] diff --git a/ddtrace/contrib/sqlite3/connection.py b/ddtrace/contrib/sqlite3/connection.py new file mode 100644 index 0000000000..8088ab2c3d --- /dev/null +++ b/ddtrace/contrib/sqlite3/connection.py @@ -0,0 +1,8 @@ +from sqlite3 import Connection + +from ...utils.deprecation import deprecated + + +@deprecated(message='Use patching instead (see the docs).', version='1.0.0') +def connection_factory(*args, **kwargs): + return Connection diff --git a/ddtrace/contrib/sqlite3/patch.py b/ddtrace/contrib/sqlite3/patch.py new file mode 100644 index 0000000000..3cae546b2f --- /dev/null +++ b/ddtrace/contrib/sqlite3/patch.py @@ -0,0 +1,66 @@ +# 3p +import sqlite3 +import sqlite3.dbapi2 +from ddtrace.vendor import wrapt + +# project +from ...contrib.dbapi import TracedConnection, TracedCursor, FetchTracedCursor +from ...pin import Pin +from ...settings import config + +# Original connect method +_connect = sqlite3.connect + + +def patch(): + wrapped = wrapt.FunctionWrapper(_connect, traced_connect) + + setattr(sqlite3, 'connect', wrapped) + setattr(sqlite3.dbapi2, 'connect', wrapped) + + +def unpatch(): + sqlite3.connect = _connect + sqlite3.dbapi2.connect = _connect + + +def traced_connect(func, _, args, kwargs): + conn = func(*args, **kwargs) + return patch_conn(conn) + + +def patch_conn(conn): + wrapped = TracedSQLite(conn) + Pin(service='sqlite', app='sqlite').onto(wrapped) + return wrapped + + +class TracedSQLiteCursor(TracedCursor): + def executemany(self, *args, **kwargs): + # DEV: SQLite3 Cursor.execute always returns back the cursor instance + super(TracedSQLiteCursor, self).executemany(*args, **kwargs) + return self + + def execute(self, *args, **kwargs): + # DEV: SQLite3 Cursor.execute always returns back the cursor instance + super(TracedSQLiteCursor, self).execute(*args, **kwargs) + return self + + +class TracedSQLiteFetchCursor(TracedSQLiteCursor, FetchTracedCursor): + pass + + +class TracedSQLite(TracedConnection): + def __init__(self, conn, pin=None, cursor_cls=None): + if not cursor_cls: + # Do not trace `fetch*` methods by default + cursor_cls = TracedSQLiteCursor + if config.dbapi2.trace_fetch_methods: + cursor_cls = TracedSQLiteFetchCursor + + super(TracedSQLite, self).__init__(conn, pin=pin, cursor_cls=cursor_cls) + + def execute(self, *args, **kwargs): + # sqlite has a few extra sugar functions + return self.cursor().execute(*args, **kwargs) diff --git a/ddtrace/contrib/tornado/__init__.py b/ddtrace/contrib/tornado/__init__.py new file mode 100644 index 0000000000..d81bc7d4cf --- /dev/null +++ b/ddtrace/contrib/tornado/__init__.py @@ -0,0 +1,126 @@ +r""" +The Tornado integration traces all ``RequestHandler`` defined in a Tornado web application. +Auto instrumentation is available using the ``patch`` function that **must be called before** +importing the tornado library. + +**Note:** Tornado 5 and 6 supported only for Python 3.7. + +The following is an example:: + + # patch before importing tornado and concurrent.futures + from ddtrace import tracer, patch + patch(tornado=True) + + import tornado.web + import tornado.gen + import tornado.ioloop + + # create your handlers + class MainHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + self.write("Hello, world") + + # create your application + app = tornado.web.Application([ + (r'/', MainHandler), + ]) + + # and run it as usual + app.listen(8888) + tornado.ioloop.IOLoop.current().start() + +When any type of ``RequestHandler`` is hit, a request root span is automatically created. If +you want to trace more parts of your application, you can use the ``wrap()`` decorator and +the ``trace()`` method as usual:: + + class MainHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + yield self.notify() + yield self.blocking_method() + with tracer.trace('tornado.before_write') as span: + # trace more work in the handler + + @tracer.wrap('tornado.executor_handler') + @tornado.concurrent.run_on_executor + def blocking_method(self): + # do something expensive + + @tracer.wrap('tornado.notify', service='tornado-notification') + @tornado.gen.coroutine + def notify(self): + # do something + +If you are overriding the ``on_finish`` or ``log_exception`` methods on a +``RequestHandler``, you will need to call the super method to ensure the +tracer's patched methods are called:: + + class MainHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + self.write("Hello, world") + + def on_finish(self): + super(MainHandler, self).on_finish() + # do other clean-up + + def log_exception(self, typ, value, tb): + super(MainHandler, self).log_exception(typ, value, tb) + # do other logging + +Tornado settings can be used to change some tracing configuration, like:: + + settings = { + 'datadog_trace': { + 'default_service': 'my-tornado-app', + 'tags': {'env': 'production'}, + 'distributed_tracing': False, + 'analytics_enabled': False, + 'settings': { + 'FILTERS': [ + FilterRequestsOnUrl(r'http://test\\.example\\.com'), + ], + }, + }, + } + + app = tornado.web.Application([ + (r'/', MainHandler), + ], **settings) + +The available settings are: + +* ``default_service`` (default: `tornado-web`): set the service name used by the tracer. Usually + this configuration must be updated with a meaningful name. +* ``tags`` (default: `{}`): set global tags that should be applied to all spans. +* ``enabled`` (default: `True`): define if the tracer is enabled or not. If set to `false`, the + code is still instrumented but no spans are sent to the APM agent. +* ``distributed_tracing`` (default: `True`): enable distributed tracing if this is called + remotely from an instrumented application. + We suggest to enable it only for internal services where headers are under your control. +* ``analytics_enabled`` (default: `None`): enable generating APM events for Trace Search & Analytics. +* ``agent_hostname`` (default: `localhost`): define the hostname of the APM agent. +* ``agent_port`` (default: `8126`): define the port of the APM agent. +* ``settings`` (default: ``{}``): Tracer extra settings used to change, for instance, the filtering behavior. +""" +from ...utils.importlib import require_modules + + +required_modules = ['tornado'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .stack_context import run_with_trace_context, TracerStackContext + + context_provider = TracerStackContext() + + from .patch import patch, unpatch + + __all__ = [ + 'patch', + 'unpatch', + 'context_provider', + 'run_with_trace_context', + 'TracerStackContext', + ] diff --git a/ddtrace/contrib/tornado/application.py b/ddtrace/contrib/tornado/application.py new file mode 100644 index 0000000000..a413197d39 --- /dev/null +++ b/ddtrace/contrib/tornado/application.py @@ -0,0 +1,56 @@ +import ddtrace + +from tornado import template + +from . import decorators, context_provider +from .constants import CONFIG_KEY + + +def tracer_config(__init__, app, args, kwargs): + """ + Wrap Tornado web application so that we can configure services info and + tracing settings after the initialization. + """ + # call the Application constructor + __init__(*args, **kwargs) + + # default settings + settings = { + 'tracer': ddtrace.tracer, + 'default_service': 'tornado-web', + 'distributed_tracing': True, + 'analytics_enabled': None + } + + # update defaults with users settings + user_settings = app.settings.get(CONFIG_KEY) + if user_settings: + settings.update(user_settings) + + app.settings[CONFIG_KEY] = settings + tracer = settings['tracer'] + service = settings['default_service'] + + # extract extra settings + extra_settings = settings.get('settings', {}) + + # the tracer must use the right Context propagation and wrap executor; + # this action is done twice because the patch() method uses the + # global tracer while here we can have a different instance (even if + # this is not usual). + tracer.configure( + context_provider=context_provider, + wrap_executor=decorators.wrap_executor, + enabled=settings.get('enabled', None), + hostname=settings.get('agent_hostname', None), + port=settings.get('agent_port', None), + settings=extra_settings, + ) + + # set global tags if any + tags = settings.get('tags', None) + if tags: + tracer.set_tags(tags) + + # configure the PIN object for template rendering + ddtrace.Pin(app='tornado', service=service, tracer=tracer).onto(template) diff --git a/ddtrace/contrib/tornado/compat.py b/ddtrace/contrib/tornado/compat.py new file mode 100644 index 0000000000..4f78d77cc5 --- /dev/null +++ b/ddtrace/contrib/tornado/compat.py @@ -0,0 +1,12 @@ +try: + # detect if concurrent.futures is available as a Python + # stdlib or Python 2.7 backport + from ..futures import patch as wrap_futures, unpatch as unwrap_futures + futures_available = True +except ImportError: + def wrap_futures(): + pass + + def unwrap_futures(): + pass + futures_available = False diff --git a/ddtrace/contrib/tornado/constants.py b/ddtrace/contrib/tornado/constants.py new file mode 100644 index 0000000000..7052ee3dfa --- /dev/null +++ b/ddtrace/contrib/tornado/constants.py @@ -0,0 +1,9 @@ +""" +This module defines Tornado settings that are shared between +integration modules. +""" +CONFIG_KEY = 'datadog_trace' +REQUEST_CONTEXT_KEY = 'datadog_context' +REQUEST_SPAN_KEY = '__datadog_request_span' +FUTURE_SPAN_KEY = '__datadog_future_span' +PARENT_SPAN_KEY = '__datadog_parent_span' diff --git a/ddtrace/contrib/tornado/decorators.py b/ddtrace/contrib/tornado/decorators.py new file mode 100644 index 0000000000..eecb465b63 --- /dev/null +++ b/ddtrace/contrib/tornado/decorators.py @@ -0,0 +1,151 @@ +import ddtrace +import sys + +from functools import wraps + +from .constants import FUTURE_SPAN_KEY, PARENT_SPAN_KEY +from .stack_context import TracerStackContext + + +def _finish_span(future): + """ + Finish the span if it's attached to the given ``Future`` object. + This method is a Tornado callback used to close a decorated function + executed as a coroutine or as a synchronous function in another thread. + """ + span = getattr(future, FUTURE_SPAN_KEY, None) + + if span: + # `tornado.concurrent.Future` in PY3 tornado>=4.0,<5 has `exc_info` + if callable(getattr(future, 'exc_info', None)): + # retrieve the exception from the coroutine object + exc_info = future.exc_info() + if exc_info: + span.set_exc_info(*exc_info) + elif callable(getattr(future, 'exception', None)): + # in tornado>=4.0,<5 with PY2 `concurrent.futures._base.Future` + # `exception_info()` returns `(exception, traceback)` but + # `exception()` only returns the first element in the tuple + if callable(getattr(future, 'exception_info', None)): + exc, exc_tb = future.exception_info() + if exc and exc_tb: + exc_type = type(exc) + span.set_exc_info(exc_type, exc, exc_tb) + # in tornado>=5 with PY3, `tornado.concurrent.Future` is alias to + # `asyncio.Future` in PY3 `exc_info` not available, instead use + # exception method + else: + exc = future.exception() + if exc: + # we expect exception object to have a traceback attached + if hasattr(exc, '__traceback__'): + exc_type = type(exc) + exc_tb = getattr(exc, '__traceback__', None) + span.set_exc_info(exc_type, exc, exc_tb) + # if all else fails use currently handled exception for + # current thread + else: + span.set_exc_info(*sys.exc_info()) + + span.finish() + + +def _run_on_executor(run_on_executor, _, params, kw_params): + """ + Wrap the `run_on_executor` function so that when a function is executed + in a different thread, we pass the current parent Span to the intermediate + function that will execute the original call. The original function + is then executed within a `TracerStackContext` so that `tracer.trace()` + can be used as usual, both with empty or existing `Context`. + """ + def pass_context_decorator(fn): + """ + Decorator that is used to wrap the original `run_on_executor_decorator` + so that we can pass the current active context before the `executor.submit` + is called. In this case we get the `parent_span` reference and we pass + that reference to `fn` reference. Because in the outer wrapper we replace + the original call with our `traced_wrapper`, we're sure that the `parent_span` + is passed to our intermediate function and not to the user function. + """ + @wraps(fn) + def wrapper(*args, **kwargs): + # from the current context, retrive the active span + current_ctx = ddtrace.tracer.get_call_context() + parent_span = getattr(current_ctx, '_current_span', None) + + # pass the current parent span in the Future call so that + # it can be retrieved later + kwargs.update({PARENT_SPAN_KEY: parent_span}) + return fn(*args, **kwargs) + return wrapper + + # we expect exceptions here if the `run_on_executor` is called with + # wrong arguments; in that case we should not do anything because + # the exception must not be handled here + decorator = run_on_executor(*params, **kw_params) + + # `run_on_executor` can be called with arguments; in this case we + # return an inner decorator that holds the real function that should be + # called + if decorator.__module__ == 'tornado.concurrent': + def run_on_executor_decorator(deco_fn): + def inner_traced_wrapper(*args, **kwargs): + # retrieve the parent span from the function kwargs + parent_span = kwargs.pop(PARENT_SPAN_KEY, None) + return run_executor_stack_context(deco_fn, args, kwargs, parent_span) + return pass_context_decorator(decorator(inner_traced_wrapper)) + + return run_on_executor_decorator + + # return our wrapper function that executes an intermediate function to + # trace the real execution in a different thread + def traced_wrapper(*args, **kwargs): + # retrieve the parent span from the function kwargs + parent_span = kwargs.pop(PARENT_SPAN_KEY, None) + return run_executor_stack_context(params[0], args, kwargs, parent_span) + + return pass_context_decorator(run_on_executor(traced_wrapper)) + + +def run_executor_stack_context(fn, args, kwargs, parent_span): + """ + This intermediate function is always executed in a newly created thread. Here + using a `TracerStackContext` is legit because this function doesn't interfere + with the main thread loop. `StackContext` states are thread-local and retrieving + the context here will always bring to an empty `Context`. + """ + with TracerStackContext(): + ctx = ddtrace.tracer.get_call_context() + ctx._current_span = parent_span + return fn(*args, **kwargs) + + +def wrap_executor(tracer, fn, args, kwargs, span_name, service=None, resource=None, span_type=None): + """ + Wrap executor function used to change the default behavior of + ``Tracer.wrap()`` method. A decorated Tornado function can be + a regular function or a coroutine; if a coroutine is decorated, a + span is attached to the returned ``Future`` and a callback is set + so that it will close the span when the ``Future`` is done. + """ + span = tracer.trace(span_name, service=service, resource=resource, span_type=span_type) + + # catch standard exceptions raised in synchronous executions + try: + future = fn(*args, **kwargs) + + # duck-typing: if it has `add_done_callback` it's a Future + # object whatever is the underlying implementation + if callable(getattr(future, 'add_done_callback', None)): + setattr(future, FUTURE_SPAN_KEY, span) + future.add_done_callback(_finish_span) + else: + # we don't have a future so the `future` variable + # holds the result of the function + span.finish() + except Exception: + span.set_traceback() + span.finish() + raise + + return future diff --git a/ddtrace/contrib/tornado/handlers.py b/ddtrace/contrib/tornado/handlers.py new file mode 100644 index 0000000000..2699c2e15c --- /dev/null +++ b/ddtrace/contrib/tornado/handlers.py @@ -0,0 +1,105 @@ +from tornado.web import HTTPError + +from .constants import CONFIG_KEY, REQUEST_CONTEXT_KEY, REQUEST_SPAN_KEY +from .stack_context import TracerStackContext +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, http +from ...propagation.http import HTTPPropagator +from ...settings import config + + +def execute(func, handler, args, kwargs): + """ + Wrap the handler execute method so that the entire request is within the same + ``TracerStackContext``. This simplifies users code when the automatic ``Context`` + retrieval is used via ``Tracer.trace()`` method. + """ + # retrieve tracing settings + settings = handler.settings[CONFIG_KEY] + tracer = settings['tracer'] + service = settings['default_service'] + distributed_tracing = settings['distributed_tracing'] + + with TracerStackContext(): + # attach the context to the request + setattr(handler.request, REQUEST_CONTEXT_KEY, tracer.get_call_context()) + + # Read and use propagated context from HTTP headers + if distributed_tracing: + propagator = HTTPPropagator() + context = propagator.extract(handler.request.headers) + if context.trace_id: + tracer.context_provider.activate(context) + + # store the request span in the request so that it can be used later + request_span = tracer.trace( + 'tornado.request', + service=service, + span_type=SpanTypes.WEB + ) + # set analytics sample rate + # DEV: tornado is special case maintains separate configuration from config api + analytics_enabled = settings['analytics_enabled'] + if (config.analytics_enabled and analytics_enabled is not False) or analytics_enabled is True: + request_span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + settings.get('analytics_sample_rate', True) + ) + + setattr(handler.request, REQUEST_SPAN_KEY, request_span) + + return func(*args, **kwargs) + + +def on_finish(func, handler, args, kwargs): + """ + Wrap the ``RequestHandler.on_finish`` method. This is the last executed method + after the response has been sent, and it's used to retrieve and close the + current request span (if available). + """ + request = handler.request + request_span = getattr(request, REQUEST_SPAN_KEY, None) + if request_span: + # use the class name as a resource; if an handler is not available, the + # default handler class will be used so we don't pollute the resource + # space here + klass = handler.__class__ + request_span.resource = '{}.{}'.format(klass.__module__, klass.__name__) + request_span.set_tag('http.method', request.method) + request_span.set_tag('http.status_code', handler.get_status()) + request_span.set_tag(http.URL, request.full_url().rsplit('?', 1)[0]) + if config.tornado.trace_query_string: + request_span.set_tag(http.QUERY_STRING, request.query) + request_span.finish() + + return func(*args, **kwargs) + + +def log_exception(func, handler, args, kwargs): + """ + Wrap the ``RequestHandler.log_exception``. This method is called when an + Exception is not handled in the user code. In this case, we save the exception + in the current active span. If the Tornado ``Finish`` exception is raised, this wrapper + will not be called because ``Finish`` is not an exception. + """ + # safe-guard: expected arguments -> log_exception(self, typ, value, tb) + value = args[1] if len(args) == 3 else None + if not value: + return func(*args, **kwargs) + + # retrieve the current span + tracer = handler.settings[CONFIG_KEY]['tracer'] + current_span = tracer.current_span() + + if isinstance(value, HTTPError): + # Tornado uses HTTPError exceptions to stop and return a status code that + # is not a 2xx. In this case we want to check the status code to be sure that + # only 5xx are traced as errors, while any other HTTPError exception is handled as + # usual. + if 500 <= value.status_code <= 599: + current_span.set_exc_info(*args) + else: + # any other uncaught exception should be reported as error + current_span.set_exc_info(*args) + + return func(*args, **kwargs) diff --git a/ddtrace/contrib/tornado/patch.py b/ddtrace/contrib/tornado/patch.py new file mode 100644 index 0000000000..81a671ba77 --- /dev/null +++ b/ddtrace/contrib/tornado/patch.py @@ -0,0 +1,58 @@ +import ddtrace +import tornado + +from ddtrace.vendor.wrapt import wrap_function_wrapper as _w + +from . import handlers, application, decorators, template, compat, context_provider +from ...utils.wrappers import unwrap as _u + + +def patch(): + """ + Tracing function that patches the Tornado web application so that it will be + traced using the given ``tracer``. + """ + # patch only once + if getattr(tornado, '__datadog_patch', False): + return + setattr(tornado, '__datadog_patch', True) + + # patch Application to initialize properly our settings and tracer + _w('tornado.web', 'Application.__init__', application.tracer_config) + + # patch RequestHandler to trace all Tornado handlers + _w('tornado.web', 'RequestHandler._execute', handlers.execute) + _w('tornado.web', 'RequestHandler.on_finish', handlers.on_finish) + _w('tornado.web', 'RequestHandler.log_exception', handlers.log_exception) + + # patch Template system + _w('tornado.template', 'Template.generate', template.generate) + + # patch Python Futures if available when an Executor pool is used + compat.wrap_futures() + + # configure the global tracer + ddtrace.tracer.configure( + context_provider=context_provider, + wrap_executor=decorators.wrap_executor, + ) + + +def unpatch(): + """ + Remove all tracing functions in a Tornado web application. + """ + if not getattr(tornado, '__datadog_patch', False): + return + setattr(tornado, '__datadog_patch', False) + + # unpatch Tornado + _u(tornado.web.RequestHandler, '_execute') + _u(tornado.web.RequestHandler, 'on_finish') + _u(tornado.web.RequestHandler, 'log_exception') + _u(tornado.web.Application, '__init__') + _u(tornado.concurrent, 'run_on_executor') + _u(tornado.template.Template, 'generate') + + # unpatch `futures` + compat.unwrap_futures() diff --git a/ddtrace/contrib/tornado/stack_context.py b/ddtrace/contrib/tornado/stack_context.py new file mode 100644 index 0000000000..367b97e1f1 --- /dev/null +++ b/ddtrace/contrib/tornado/stack_context.py @@ -0,0 +1,142 @@ +import tornado +from tornado.ioloop import IOLoop +import sys + +from ...context import Context +from ...provider import DefaultContextProvider + +# tornado.stack_context deprecated in Tornado 5 removed in Tornado 6 +# instead use DefaultContextProvider with ContextVarContextManager for asyncio +_USE_STACK_CONTEXT = not ( + sys.version_info >= (3, 7) and tornado.version_info >= (5, 0) +) + +if _USE_STACK_CONTEXT: + from tornado.stack_context import StackContextInconsistentError, _state + + class TracerStackContext(DefaultContextProvider): + """ + A context manager that manages ``Context`` instances in a thread-local state. + It must be used everytime a Tornado's handler or coroutine is used within a + tracing Context. It is meant to work like a traditional ``StackContext``, + preserving the state across asynchronous calls. + + Everytime a new manager is initialized, a new ``Context()`` is created for + this execution flow. A context created in a ``TracerStackContext`` is not + shared between different threads. + + This implementation follows some suggestions provided here: + https://github.com/tornadoweb/tornado/issues/1063 + """ + def __init__(self): + # DEV: skip resetting context manager since TracerStackContext is used + # as a with-statement context where we do not want to be clearing the + # current context for a thread or task + super(TracerStackContext, self).__init__(reset_context_manager=False) + self._active = True + self._context = Context() + + def enter(self): + """ + Required to preserve the ``StackContext`` protocol. + """ + pass + + def exit(self, type, value, traceback): # noqa: A002 + """ + Required to preserve the ``StackContext`` protocol. + """ + pass + + def __enter__(self): + self.old_contexts = _state.contexts + self.new_contexts = (self.old_contexts[0] + (self,), self) + _state.contexts = self.new_contexts + return self + + def __exit__(self, type, value, traceback): # noqa: A002 + final_contexts = _state.contexts + _state.contexts = self.old_contexts + + if final_contexts is not self.new_contexts: + raise StackContextInconsistentError( + 'stack_context inconsistency (may be caused by yield ' + 'within a "with TracerStackContext" block)') + + # break the reference to allow faster GC on CPython + self.new_contexts = None + + def deactivate(self): + self._active = False + + def _has_io_loop(self): + """Helper to determine if we are currently in an IO loop""" + return getattr(IOLoop._current, 'instance', None) is not None + + def _has_active_context(self): + """Helper to determine if we have an active context or not""" + if not self._has_io_loop(): + return self._local._has_active_context() + else: + # we're inside a Tornado loop so the TracerStackContext is used + return self._get_state_active_context() is not None + + def _get_state_active_context(self): + """Helper to get the currently active context from the TracerStackContext""" + # we're inside a Tornado loop so the TracerStackContext is used + for stack in reversed(_state.contexts[0]): + if isinstance(stack, self.__class__) and stack._active: + return stack._context + return None + + def active(self): + """ + Return the ``Context`` from the current execution flow. This method can be + used inside a Tornado coroutine to retrieve and use the current tracing context. + If used in a separated Thread, the `_state` thread-local storage is used to + propagate the current Active context from the `MainThread`. + """ + if not self._has_io_loop(): + # if a Tornado loop is not available, it means that this method + # has been called from a synchronous code, so we can rely in a + # thread-local storage + return self._local.get() + else: + # we're inside a Tornado loop so the TracerStackContext is used + return self._get_state_active_context() + + def activate(self, ctx): + """ + Set the active ``Context`` for this async execution. If a ``TracerStackContext`` + is not found, the context is discarded. + If used in a separated Thread, the `_state` thread-local storage is used to + propagate the current Active context from the `MainThread`. + """ + if not self._has_io_loop(): + # because we're outside of an asynchronous execution, we store + # the current context in a thread-local storage + self._local.set(ctx) + else: + # we're inside a Tornado loop so the TracerStackContext is used + for stack_ctx in reversed(_state.contexts[0]): + if isinstance(stack_ctx, self.__class__) and stack_ctx._active: + stack_ctx._context = ctx + return ctx +else: + # no-op when not using stack_context + class TracerStackContext(DefaultContextProvider): + def __enter__(self): + pass + + def __exit__(self, *exc): + pass + + +def run_with_trace_context(func, *args, **kwargs): + """ + Run the given function within a traced StackContext. This function is used to + trace Tornado web handlers, but can be used in your code to trace coroutines + execution. + """ + with TracerStackContext(): + return func(*args, **kwargs) diff --git a/ddtrace/contrib/tornado/template.py b/ddtrace/contrib/tornado/template.py new file mode 100644 index 0000000000..d41bc601aa --- /dev/null +++ b/ddtrace/contrib/tornado/template.py @@ -0,0 +1,31 @@ +from tornado import template + +from ddtrace import Pin + +from ...ext import SpanTypes + + +def generate(func, renderer, args, kwargs): + """ + Wrap the ``generate`` method used in templates rendering. Because the method + may be called everywhere, the execution is traced in a tracer StackContext that + inherits the current one if it's already available. + """ + # get the module pin + pin = Pin.get_from(template) + if not pin or not pin.enabled(): + return func(*args, **kwargs) + + # change the resource and the template name + # if it's created from a string instead of a file + if '' in renderer.name: + resource = template_name = 'render_string' + else: + resource = template_name = renderer.name + + # trace the original call + with pin.tracer.trace( + 'tornado.template', service=pin.service, resource=resource, span_type=SpanTypes.TEMPLATE + ) as span: + span.set_meta('tornado.template_name', template_name) + return func(*args, **kwargs) diff --git a/ddtrace/contrib/util.py b/ddtrace/contrib/util.py new file mode 100644 index 0000000000..ae96cc4c12 --- /dev/null +++ b/ddtrace/contrib/util.py @@ -0,0 +1,16 @@ +# [Backward compatibility]: keep importing modules functions +from ..utils.deprecation import deprecation +from ..utils.importlib import require_modules, func_name, module_name + + +deprecation( + name='ddtrace.contrib.util', + message='Use `ddtrace.utils.importlib` module instead', + version='1.0.0', +) + +__all__ = [ + 'require_modules', + 'func_name', + 'module_name', +] diff --git a/ddtrace/contrib/vertica/__init__.py b/ddtrace/contrib/vertica/__init__.py new file mode 100644 index 0000000000..763c668575 --- /dev/null +++ b/ddtrace/contrib/vertica/__init__.py @@ -0,0 +1,51 @@ +""" +The Vertica integration will trace queries made using the vertica-python +library. + +Vertica will be automatically instrumented with ``patch_all``, or when using +the ``ddtrace-run`` command. + +Vertica is instrumented on import. To instrument Vertica manually use the +``patch`` function. Note the ordering of the following statements:: + + from ddtrace import patch + patch(vertica=True) + + import vertica_python + + # use vertica_python like usual + + +To configure the Vertica integration globally you can use the ``Config`` API:: + + from ddtrace import config, patch + patch(vertica=True) + + config.vertica['service_name'] = 'my-vertica-database' + + +To configure the Vertica integration on an instance-per-instance basis use the +``Pin`` API:: + + from ddtrace import Pin, patch, Tracer + patch(vertica=True) + + import vertica_python + + custom_tracer = Tracer() + conn = vertica_python.connect(**YOUR_VERTICA_CONFIG) + + # override the service and tracer to be used + Pin.override(conn, service='myverticaservice', tracer=custom_tracer) +""" + +from ...utils.importlib import require_modules + + +required_modules = ['vertica_python'] + +with require_modules(required_modules) as missing_modules: + if not missing_modules: + from .patch import patch, unpatch + + __all__ = [patch, unpatch] diff --git a/ddtrace/contrib/vertica/constants.py b/ddtrace/contrib/vertica/constants.py new file mode 100644 index 0000000000..95c3d763a8 --- /dev/null +++ b/ddtrace/contrib/vertica/constants.py @@ -0,0 +1,2 @@ +# Service info +APP = 'vertica' diff --git a/ddtrace/contrib/vertica/patch.py b/ddtrace/contrib/vertica/patch.py new file mode 100644 index 0000000000..f893b94b6f --- /dev/null +++ b/ddtrace/contrib/vertica/patch.py @@ -0,0 +1,233 @@ +import importlib + +from ddtrace.vendor import wrapt + +import ddtrace +from ...constants import ANALYTICS_SAMPLE_RATE_KEY +from ...ext import SpanTypes, db as dbx +from ...ext import net +from ...internal.logger import get_logger +from ...pin import Pin +from ...settings import config +from ...utils.wrappers import unwrap +from .constants import APP + + +log = get_logger(__name__) + +_PATCHED = False + + +def copy_span_start(instance, span, conf, *args, **kwargs): + span.resource = args[0] + + +def execute_span_start(instance, span, conf, *args, **kwargs): + span.resource = args[0] + + +def execute_span_end(instance, result, span, conf, *args, **kwargs): + span.set_metric(dbx.ROWCOUNT, instance.rowcount) + + +def fetch_span_end(instance, result, span, conf, *args, **kwargs): + span.set_metric(dbx.ROWCOUNT, instance.rowcount) + + +def cursor_span_end(instance, cursor, _, conf, *args, **kwargs): + tags = {} + tags[net.TARGET_HOST] = instance.options['host'] + tags[net.TARGET_PORT] = instance.options['port'] + if 'user' in instance.options: + tags[dbx.USER] = instance.options['user'] + if 'database' in instance.options: + tags[dbx.NAME] = instance.options['database'] + + pin = Pin( + service=config.vertica['service_name'], + app=APP, + tags=tags, + _config=config.vertica['patch']['vertica_python.vertica.cursor.Cursor'], + ) + pin.onto(cursor) + + +# tracing configuration +config._add( + 'vertica', + { + 'service_name': 'vertica', + 'app': 'vertica', + 'patch': { + 'vertica_python.vertica.connection.Connection': { + 'routines': { + 'cursor': { + 'trace_enabled': False, + 'span_end': cursor_span_end, + }, + }, + }, + 'vertica_python.vertica.cursor.Cursor': { + 'routines': { + 'execute': { + 'operation_name': 'vertica.query', + 'span_type': SpanTypes.SQL, + 'span_start': execute_span_start, + 'span_end': execute_span_end, + }, + 'copy': { + 'operation_name': 'vertica.copy', + 'span_type': SpanTypes.SQL, + 'span_start': copy_span_start, + }, + 'fetchone': { + 'operation_name': 'vertica.fetchone', + 'span_type': SpanTypes.SQL, + 'span_end': fetch_span_end, + }, + 'fetchall': { + 'operation_name': 'vertica.fetchall', + 'span_type': SpanTypes.SQL, + 'span_end': fetch_span_end, + }, + 'nextset': { + 'operation_name': 'vertica.nextset', + 'span_type': SpanTypes.SQL, + 'span_end': fetch_span_end, + }, + }, + }, + }, + }, +) + + +def patch(): + global _PATCHED + if _PATCHED: + return + + _install(config.vertica) + _PATCHED = True + + +def unpatch(): + global _PATCHED + if _PATCHED: + _uninstall(config.vertica) + _PATCHED = False + + +def _uninstall(config): + for patch_class_path in config['patch']: + patch_mod, _, patch_class = patch_class_path.rpartition('.') + mod = importlib.import_module(patch_mod) + cls = getattr(mod, patch_class, None) + + if not cls: + log.debug( + """ + Unable to find corresponding class for tracing configuration. + This version may not be supported. + """ + ) + continue + + for patch_routine in config['patch'][patch_class_path]['routines']: + unwrap(cls, patch_routine) + + +def _find_routine_config(config, instance, routine_name): + """Attempts to find the config for a routine based on the bases of the + class of the instance. + """ + bases = instance.__class__.__mro__ + for base in bases: + full_name = '{}.{}'.format(base.__module__, base.__name__) + if full_name not in config['patch']: + continue + + config_routines = config['patch'][full_name]['routines'] + + if routine_name in config_routines: + return config_routines[routine_name] + return {} + + +def _install_init(patch_item, patch_class, patch_mod, config): + patch_class_routine = '{}.{}'.format(patch_class, '__init__') + + # patch the __init__ of the class with a Pin instance containing the defaults + @wrapt.patch_function_wrapper(patch_mod, patch_class_routine) + def init_wrapper(wrapped, instance, args, kwargs): + r = wrapped(*args, **kwargs) + + # create and attach a pin with the defaults + Pin( + service=config['service_name'], + app=config['app'], + tags=config.get('tags', {}), + tracer=config.get('tracer', ddtrace.tracer), + _config=config['patch'][patch_item], + ).onto(instance) + return r + + +def _install_routine(patch_routine, patch_class, patch_mod, config): + patch_class_routine = '{}.{}'.format(patch_class, patch_routine) + + @wrapt.patch_function_wrapper(patch_mod, patch_class_routine) + def wrapper(wrapped, instance, args, kwargs): + # TODO?: remove Pin dependence + pin = Pin.get_from(instance) + + if patch_routine in pin._config['routines']: + conf = pin._config['routines'][patch_routine] + else: + conf = _find_routine_config(config, instance, patch_routine) + + enabled = conf.get('trace_enabled', True) + + span = None + + try: + # shortcut if not enabled + if not enabled: + result = wrapped(*args, **kwargs) + return result + + operation_name = conf['operation_name'] + tracer = pin.tracer + with tracer.trace(operation_name, service=pin.service, span_type=conf.get('span_type')) as span: + span.set_tags(pin.tags) + + if 'span_start' in conf: + conf['span_start'](instance, span, conf, *args, **kwargs) + + # set analytics sample rate + span.set_tag( + ANALYTICS_SAMPLE_RATE_KEY, + config.get_analytics_sample_rate() + ) + + result = wrapped(*args, **kwargs) + return result + except Exception as err: + if 'on_error' in conf: + conf['on_error'](instance, err, span, conf, *args, **kwargs) + raise + finally: + # if an exception is raised result will not exist + if 'result' not in locals(): + result = None + if 'span_end' in conf: + conf['span_end'](instance, result, span, conf, *args, **kwargs) + + +def _install(config): + for patch_class_path in config['patch']: + patch_mod, _, patch_class = patch_class_path.rpartition('.') + _install_init(patch_class_path, patch_class, patch_mod, config) + + for patch_routine in config['patch'][patch_class_path]['routines']: + _install_routine(patch_routine, patch_class, patch_mod, config) diff --git a/ddtrace/encoding.py b/ddtrace/encoding.py new file mode 100644 index 0000000000..f650966b55 --- /dev/null +++ b/ddtrace/encoding.py @@ -0,0 +1,128 @@ +import json +import struct + +from .internal.logger import get_logger + + +# Try to import msgpack, fallback to just JSON if something went wrong +# DEV: We are ok with the pure Python fallback for msgpack if the C-extension failed to install +try: + from ddtrace.vendor import msgpack + # DEV: `use_bin_type` only exists since `0.4.0`, but we vendor a more recent version + MSGPACK_PARAMS = {'use_bin_type': True} + MSGPACK_ENCODING = True +except ImportError: + # fallback to JSON + MSGPACK_PARAMS = {} + MSGPACK_ENCODING = False + +log = get_logger(__name__) + + +class Encoder(object): + """ + Encoder interface that provides the logic to encode traces and service. + """ + def __init__(self): + """ + When extending the ``Encoder`` class, ``headers`` must be set because + they're returned by the encoding methods, so that the API transport doesn't + need to know what is the right header to suggest the decoding format to the + agent + """ + self.content_type = '' + + def encode_traces(self, traces): + """ + Encodes a list of traces, expecting a list of items where each items + is a list of spans. Before dump the string in a serialized format all + traces are normalized, calling the ``to_dict()`` method. The traces + nesting is not changed. + + :param traces: A list of traces that should be serialized + """ + normalized_traces = [[span.to_dict() for span in trace] for trace in traces] + return self.encode(normalized_traces) + + def encode_trace(self, trace): + """ + Encodes a trace, expecting a list of spans. Before dump the string in a + serialized format all traces are normalized, calling the ``to_dict()`` method. + The traces nesting is not changed. + + :param trace: A list of traces that should be serialized + """ + return self.encode([span.to_dict() for span in trace]) + + def encode(self, obj): + """ + Defines the underlying format used during traces or services encoding. + This method must be implemented and should only be used by the internal functions. + """ + raise NotImplementedError + + def decode(self, data): + """ + Defines the underlying format used during traces or services encoding. + This method must be implemented and should only be used by the internal functions. + """ + raise NotImplementedError + + def join_encoded(self, objs): + """Helper used to join a list of encoded objects into an encoded list of objects""" + raise NotImplementedError + + +class JSONEncoder(Encoder): + def __init__(self): + # TODO[manu]: add instructions about how users can switch to Msgpack + log.debug('using JSON encoder; application performance may be degraded') + self.content_type = 'application/json' + + def encode(self, obj): + return json.dumps(obj) + + def decode(self, data): + return json.loads(data) + + def join_encoded(self, objs): + """Join a list of encoded objects together as a json array""" + return '[' + ','.join(objs) + ']' + + +class MsgpackEncoder(Encoder): + def __init__(self): + log.debug('using Msgpack encoder') + self.content_type = 'application/msgpack' + + def encode(self, obj): + return msgpack.packb(obj) + + def decode(self, data): + return msgpack.unpackb(data) + + def join_encoded(self, objs): + """Join a list of encoded objects together as a msgpack array""" + buf = b''.join(objs) + + # Prepend array header to buffer + # https://github.com/msgpack/msgpack-python/blob/f46523b1af7ff2d408da8500ea36a4f9f2abe915/msgpack/fallback.py#L948-L955 + count = len(objs) + if count <= 0xf: + return struct.pack('B', 0x90 + count) + buf + elif count <= 0xffff: + return struct.pack('>BH', 0xdc, count) + buf + else: + return struct.pack('>BI', 0xdd, count) + buf + + +def get_encoder(): + """ + Switching logic that choose the best encoder for the API transport. + The default behavior is to use Msgpack if we have a CPP implementation + installed, falling back to the Python built-in JSON encoder. + """ + if MSGPACK_ENCODING: + return MsgpackEncoder() + else: + return JSONEncoder() diff --git a/ddtrace/ext/__init__.py b/ddtrace/ext/__init__.py new file mode 100644 index 0000000000..d05d30392a --- /dev/null +++ b/ddtrace/ext/__init__.py @@ -0,0 +1,37 @@ +from enum import Enum + +from ..vendor.debtcollector import removals +from ..utils import removed_classproperty + + +class SpanTypes(Enum): + CACHE = "cache" + CASSANDRA = "cassandra" + ELASTICSEARCH = "elasticsearch" + GRPC = "grpc" + HTTP = "http" + MONGODB = "mongodb" + REDIS = "redis" + SQL = "sql" + TEMPLATE = "template" + WEB = "web" + WORKER = "worker" + + +@removals.removed_class("AppTypes") +class AppTypes(object): + @removed_classproperty + def web(cls): + return SpanTypes.WEB + + @removed_classproperty + def db(cls): + return "db" + + @removed_classproperty + def cache(cls): + return SpanTypes.CACHE + + @removed_classproperty + def worker(cls): + return SpanTypes.WORKER diff --git a/ddtrace/ext/aws.py b/ddtrace/ext/aws.py new file mode 100644 index 0000000000..931b92bd00 --- /dev/null +++ b/ddtrace/ext/aws.py @@ -0,0 +1,39 @@ +from ..utils.formats import flatten_dict + + +BLACKLIST_ENDPOINT = ['kms', 'sts'] +BLACKLIST_ENDPOINT_TAGS = { + 's3': ['params.Body'], +} + + +def truncate_arg_value(value, max_len=1024): + """Truncate values which are bytes and greater than `max_len`. + Useful for parameters like 'Body' in `put_object` operations. + """ + if isinstance(value, bytes) and len(value) > max_len: + return b'...' + + return value + + +def add_span_arg_tags(span, endpoint_name, args, args_names, args_traced): + if endpoint_name not in BLACKLIST_ENDPOINT: + blacklisted = BLACKLIST_ENDPOINT_TAGS.get(endpoint_name, []) + tags = dict( + (name, value) + for (name, value) in zip(args_names, args) + if name in args_traced + ) + tags = flatten_dict(tags) + tags = { + k: truncate_arg_value(v) + for k, v in tags.items() + if k not in blacklisted + } + span.set_tags(tags) + + +REGION = 'aws.region' +AGENT = 'aws.agent' +OPERATION = 'aws.operation' diff --git a/ddtrace/ext/cassandra.py b/ddtrace/ext/cassandra.py new file mode 100644 index 0000000000..6b2629a317 --- /dev/null +++ b/ddtrace/ext/cassandra.py @@ -0,0 +1,13 @@ +from . import SpanTypes + +# [TODO] Deprecated, remove when we remove AppTypes +# the type of the spans +TYPE = SpanTypes.CASSANDRA + +# tags +CLUSTER = 'cassandra.cluster' +KEYSPACE = 'cassandra.keyspace' +CONSISTENCY_LEVEL = 'cassandra.consistency_level' +PAGINATED = 'cassandra.paginated' +ROW_COUNT = 'cassandra.row_count' +PAGE_NUMBER = 'cassandra.page_number' diff --git a/ddtrace/ext/consul.py b/ddtrace/ext/consul.py new file mode 100644 index 0000000000..d1e6f1afaa --- /dev/null +++ b/ddtrace/ext/consul.py @@ -0,0 +1,10 @@ +from . import SpanTypes + +APP = 'consul' +# [TODO] Deprecated, remove when we remove AppTypes +APP_TYPE = SpanTypes.CACHE +SERVICE = 'consul' + +CMD = 'consul.command' + +KEY = 'consul.key' diff --git a/ddtrace/ext/db.py b/ddtrace/ext/db.py new file mode 100644 index 0000000000..34b42c69e3 --- /dev/null +++ b/ddtrace/ext/db.py @@ -0,0 +1,4 @@ +# tags +NAME = 'db.name' # the database name (eg: dbname for pgsql) +USER = 'db.user' # the user connecting to the db +ROWCOUNT = 'db.rowcount' # the rowcount of a query diff --git a/ddtrace/ext/elasticsearch.py b/ddtrace/ext/elasticsearch.py new file mode 100644 index 0000000000..44d1089176 --- /dev/null +++ b/ddtrace/ext/elasticsearch.py @@ -0,0 +1,13 @@ +from . import SpanTypes + +# [TODO] Deprecated, remove when we remove AppTypes +TYPE = SpanTypes.ELASTICSEARCH +SERVICE = 'elasticsearch' +APP = 'elasticsearch' + +# standard tags +URL = 'elasticsearch.url' +METHOD = 'elasticsearch.method' +TOOK = 'elasticsearch.took' +PARAMS = 'elasticsearch.params' +BODY = 'elasticsearch.body' diff --git a/ddtrace/ext/errors.py b/ddtrace/ext/errors.py new file mode 100644 index 0000000000..70fee86076 --- /dev/null +++ b/ddtrace/ext/errors.py @@ -0,0 +1,23 @@ +""" +tags for common error attributes +""" + +import traceback + + +ERROR_MSG = 'error.msg' # a string representing the error message +ERROR_TYPE = 'error.type' # a string representing the type of the error +ERROR_STACK = 'error.stack' # a human readable version of the stack. beta. + +# shorthand for -----^ +MSG = ERROR_MSG +TYPE = ERROR_TYPE +STACK = ERROR_STACK + + +def get_traceback(tb=None, error=None): + t = None + if error: + t = type(error) + lines = traceback.format_exception(t, error, tb, limit=20) + return '\n'.join(lines) diff --git a/ddtrace/ext/http.py b/ddtrace/ext/http.py new file mode 100644 index 0000000000..3df762247d --- /dev/null +++ b/ddtrace/ext/http.py @@ -0,0 +1,26 @@ +""" +Standard http tags. + +For example: + +span.set_tag(URL, '/user/home') +span.set_tag(STATUS_CODE, 404) +""" +from . import SpanTypes + +# [TODO] Deprecated, remove when we remove AppTypes +# type of the spans +TYPE = SpanTypes.HTTP + +# tags +URL = 'http.url' +METHOD = 'http.method' +STATUS_CODE = 'http.status_code' +QUERY_STRING = 'http.query.string' + +# template render span type +TEMPLATE = 'template' + + +def normalize_status_code(code): + return code.split(' ')[0] diff --git a/ddtrace/ext/kombu.py b/ddtrace/ext/kombu.py new file mode 100644 index 0000000000..9eaafeb2e5 --- /dev/null +++ b/ddtrace/ext/kombu.py @@ -0,0 +1,18 @@ +from . import SpanTypes + +# [TODO] Deprecated, remove when we remove AppTypes +# type of the spans +TYPE = SpanTypes.WORKER + +SERVICE = 'kombu' + +# net extension +VHOST = 'out.vhost' + +# standard tags +EXCHANGE = 'kombu.exchange' +BODY_LEN = 'kombu.body_length' +ROUTING_KEY = 'kombu.routing_key' + +PUBLISH_NAME = 'kombu.publish' +RECEIVE_NAME = 'kombu.receive' diff --git a/ddtrace/ext/memcached.py b/ddtrace/ext/memcached.py new file mode 100644 index 0000000000..7e71e98614 --- /dev/null +++ b/ddtrace/ext/memcached.py @@ -0,0 +1,8 @@ +from . import SpanTypes + +# [TODO] Deprecated, remove when we remove AppTypes +TYPE = SpanTypes.CACHE + +CMD = 'memcached.command' +SERVICE = 'memcached' +QUERY = 'memcached.query' diff --git a/ddtrace/ext/mongo.py b/ddtrace/ext/mongo.py new file mode 100644 index 0000000000..884764e454 --- /dev/null +++ b/ddtrace/ext/mongo.py @@ -0,0 +1,10 @@ +from . import SpanTypes + +# [TODO] Deprecated, remove when we remove AppTypes +TYPE = SpanTypes.MONGODB + +SERVICE = 'mongodb' +COLLECTION = 'mongodb.collection' +DB = 'mongodb.db' +ROWS = 'mongodb.rows' +QUERY = 'mongodb.query' diff --git a/ddtrace/ext/net.py b/ddtrace/ext/net.py new file mode 100644 index 0000000000..7133a00e9a --- /dev/null +++ b/ddtrace/ext/net.py @@ -0,0 +1,9 @@ +""" +Standard network tags. +""" + +# request targets +TARGET_HOST = 'out.host' +TARGET_PORT = 'out.port' + +BYTES_OUT = 'net.out.bytes' diff --git a/ddtrace/ext/priority.py b/ddtrace/ext/priority.py new file mode 100644 index 0000000000..d7cd27b928 --- /dev/null +++ b/ddtrace/ext/priority.py @@ -0,0 +1,24 @@ +""" +Priority is a hint given to the backend so that it knows which traces to reject or kept. +In a distributed context, it should be set before any context propagation (fork, RPC calls) to be effective. + +For example: + +from ddtrace.ext.priority import USER_REJECT, USER_KEEP + +context = tracer.context_provider.active() +# Indicate to not keep the trace +context.sampling_priority = USER_REJECT + +# Indicate to keep the trace +span.context.sampling_priority = USER_KEEP +""" + +# Use this to explicitly inform the backend that a trace should be rejected and not stored. +USER_REJECT = -1 +# Used by the builtin sampler to inform the backend that a trace should be rejected and not stored. +AUTO_REJECT = 0 +# Used by the builtin sampler to inform the backend that a trace should be kept and stored. +AUTO_KEEP = 1 +# Use this to explicitly inform the backend that a trace should be kept and stored. +USER_KEEP = 2 diff --git a/ddtrace/ext/redis.py b/ddtrace/ext/redis.py new file mode 100644 index 0000000000..542175ea66 --- /dev/null +++ b/ddtrace/ext/redis.py @@ -0,0 +1,19 @@ +from . import SpanTypes + +# defaults +APP = 'redis' +DEFAULT_SERVICE = 'redis' + +# [TODO] Deprecated, remove when we remove AppTypes +# type of the spans +TYPE = SpanTypes.REDIS + +# net extension +DB = 'out.redis_db' + +# standard tags +RAWCMD = 'redis.raw_command' +CMD = 'redis.command' +ARGS_LEN = 'redis.args_length' +PIPELINE_LEN = 'redis.pipeline_length' +PIPELINE_AGE = 'redis.pipeline_age' diff --git a/ddtrace/ext/sql.py b/ddtrace/ext/sql.py new file mode 100644 index 0000000000..1d8c1a3a90 --- /dev/null +++ b/ddtrace/ext/sql.py @@ -0,0 +1,34 @@ +from . import SpanTypes + +# [TODO] Deprecated, remove when we remove AppTypes +TYPE = SpanTypes.SQL +APP_TYPE = SpanTypes.SQL + +# tags +QUERY = 'sql.query' # the query text +ROWS = 'sql.rows' # number of rows returned by a query +DB = 'sql.db' # the name of the database + + +def normalize_vendor(vendor): + """ Return a canonical name for a type of database. """ + if not vendor: + return 'db' # should this ever happen? + elif 'sqlite' in vendor: + return 'sqlite' + elif 'postgres' in vendor or vendor == 'psycopg2': + return 'postgres' + else: + return vendor + + +def parse_pg_dsn(dsn): + """ + Return a dictionary of the components of a postgres DSN. + + >>> parse_pg_dsn('user=dog port=1543 dbname=dogdata') + {'user':'dog', 'port':'1543', 'dbname':'dogdata'} + """ + # FIXME: replace by psycopg2.extensions.parse_dsn when available + # https://github.com/psycopg/psycopg2/pull/321 + return {c.split('=')[0]: c.split('=')[1] for c in dsn.split() if '=' in c} diff --git a/ddtrace/ext/system.py b/ddtrace/ext/system.py new file mode 100644 index 0000000000..098976d095 --- /dev/null +++ b/ddtrace/ext/system.py @@ -0,0 +1,5 @@ +""" +Standard system tags +""" + +PID = 'system.pid' diff --git a/ddtrace/filters.py b/ddtrace/filters.py new file mode 100644 index 0000000000..37f3ea02cb --- /dev/null +++ b/ddtrace/filters.py @@ -0,0 +1,49 @@ +import re + +from .ext import http + + +class FilterRequestsOnUrl(object): + r"""Filter out traces from incoming http requests based on the request's url. + + This class takes as argument a list of regular expression patterns + representing the urls to be excluded from tracing. A trace will be excluded + if its root span contains a ``http.url`` tag and if this tag matches any of + the provided regular expression using the standard python regexp match + semantic (https://docs.python.org/2/library/re.html#re.match). + + :param list regexps: a list of regular expressions (or a single string) defining + the urls that should be filtered out. + + Examples: + To filter out http calls to domain api.example.com:: + + FilterRequestsOnUrl(r'http://api\\.example\\.com') + + To filter out http calls to all first level subdomains from example.com:: + + FilterRequestOnUrl(r'http://.*+\\.example\\.com') + + To filter out calls to both http://test.example.com and http://example.com/healthcheck:: + + FilterRequestOnUrl([r'http://test\\.example\\.com', r'http://example\\.com/healthcheck']) + """ + def __init__(self, regexps): + if isinstance(regexps, str): + regexps = [regexps] + self._regexps = [re.compile(regexp) for regexp in regexps] + + def process_trace(self, trace): + """ + When the filter is registered in the tracer, process_trace is called by + on each trace before it is sent to the agent, the returned value will + be fed to the next filter in the list. If process_trace returns None, + the whole trace is discarded. + """ + for span in trace: + if span.parent_id is None and span.get_tag(http.URL) is not None: + url = span.get_tag(http.URL) + for regexp in self._regexps: + if regexp.match(url): + return None + return trace diff --git a/ddtrace/helpers.py b/ddtrace/helpers.py new file mode 100644 index 0000000000..3374b415f5 --- /dev/null +++ b/ddtrace/helpers.py @@ -0,0 +1,37 @@ +import ddtrace + + +def get_correlation_ids(tracer=None): + """Retrieves the Correlation Identifiers for the current active ``Trace``. + This helper method can be achieved manually and should be considered + only a shortcut. The main reason is to abstract the current ``Tracer`` + implementation so that these identifiers can be extracted either the + tracer is an OpenTracing tracer or a Datadog tracer. + + OpenTracing users can still extract these values using the ``ScopeManager`` + API, though this shortcut is a simple one-liner. The usage is: + + from ddtrace import helpers + + trace_id, span_id = helpers.get_correlation_ids() + + :returns: a tuple containing the trace_id and span_id + """ + # Consideration: currently we don't have another way to "define" a + # GlobalTracer. In the case of OpenTracing, ``opentracing.tracer`` is exposed + # and we're doing the same here for ``ddtrace.tracer``. Because this helper + # must work also with OpenTracing, we should take the right used ``Tracer``. + # At the time of writing, it's enough to support our Datadog Tracer. + + # If no tracer passed in, use global tracer + if not tracer: + tracer = ddtrace.tracer + + # If tracer is disabled, skip + if not tracer.enabled: + return None, None + + span = tracer.current_span() + if not span: + return None, None + return span.trace_id, span.span_id diff --git a/ddtrace/http/__init__.py b/ddtrace/http/__init__.py new file mode 100644 index 0000000000..0ec8d6ff08 --- /dev/null +++ b/ddtrace/http/__init__.py @@ -0,0 +1,6 @@ +from .headers import store_request_headers, store_response_headers + +__all__ = [ + 'store_request_headers', + 'store_response_headers', +] diff --git a/ddtrace/http/headers.py b/ddtrace/http/headers.py new file mode 100644 index 0000000000..b680a5c45a --- /dev/null +++ b/ddtrace/http/headers.py @@ -0,0 +1,90 @@ +import re + +from ..internal.logger import get_logger +from ..utils.http import normalize_header_name + +log = get_logger(__name__) + +REQUEST = 'request' +RESPONSE = 'response' + +# Tag normalization based on: https://docs.datadoghq.com/tagging/#defining-tags +# With the exception of '.' in header names which are replaced with '_' to avoid +# starting a "new object" on the UI. +NORMALIZE_PATTERN = re.compile(r'([^a-z0-9_\-:/]){1}') + + +def store_request_headers(headers, span, integration_config): + """ + Store request headers as a span's tags + :param headers: All the request's http headers, will be filtered through the whitelist + :type headers: dict or list + :param span: The Span instance where tags will be stored + :type span: ddtrace.Span + :param integration_config: An integration specific config object. + :type integration_config: ddtrace.settings.IntegrationConfig + """ + _store_headers(headers, span, integration_config, REQUEST) + + +def store_response_headers(headers, span, integration_config): + """ + Store response headers as a span's tags + :param headers: All the response's http headers, will be filtered through the whitelist + :type headers: dict or list + :param span: The Span instance where tags will be stored + :type span: ddtrace.Span + :param integration_config: An integration specific config object. + :type integration_config: ddtrace.settings.IntegrationConfig + """ + _store_headers(headers, span, integration_config, RESPONSE) + + +def _store_headers(headers, span, integration_config, request_or_response): + """ + :param headers: A dict of http headers to be stored in the span + :type headers: dict or list + :param span: The Span instance where tags will be stored + :type span: ddtrace.span.Span + :param integration_config: An integration specific config object. + :type integration_config: ddtrace.settings.IntegrationConfig + """ + if not isinstance(headers, dict): + try: + headers = dict(headers) + except Exception: + return + + if integration_config is None: + log.debug('Skipping headers tracing as no integration config was provided') + return + + for header_name, header_value in headers.items(): + if not integration_config.header_is_traced(header_name): + continue + tag_name = _normalize_tag_name(request_or_response, header_name) + span.set_tag(tag_name, header_value) + + +def _normalize_tag_name(request_or_response, header_name): + """ + Given a tag name, e.g. 'Content-Type', returns a corresponding normalized tag name, i.e + 'http.request.headers.content_type'. Rules applied actual header name are: + - any letter is converted to lowercase + - any digit is left unchanged + - any block of any length of different ASCII chars is converted to a single underscore '_' + :param request_or_response: The context of the headers: request|response + :param header_name: The header's name + :type header_name: str + :rtype: str + """ + # Looking at: + # - http://www.iana.org/assignments/message-headers/message-headers.xhtml + # - https://tools.ietf.org/html/rfc6648 + # and for consistency with other language integrations seems safe to assume the following algorithm for header + # names normalization: + # - any letter is converted to lowercase + # - any digit is left unchanged + # - any block of any length of different ASCII chars is converted to a single underscore '_' + normalized_name = NORMALIZE_PATTERN.sub('_', normalize_header_name(header_name)) + return 'http.{}.headers.{}'.format(request_or_response, normalized_name) diff --git a/ddtrace/internal/README.md b/ddtrace/internal/README.md new file mode 100644 index 0000000000..5cb38087f8 --- /dev/null +++ b/ddtrace/internal/README.md @@ -0,0 +1,7 @@ +# Internal +This internal module is used to define and document an internal only API for `ddtrace`. + +These modules are not intended to be used outside of `ddtrace`. + +The APIs found within `ddtrace.internal` are subject to breaking changes at any time +and do not follow the semver versioning scheme of the `ddtrace` package. diff --git a/ddtrace/internal/__init__.py b/ddtrace/internal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ddtrace/internal/context_manager.py b/ddtrace/internal/context_manager.py new file mode 100644 index 0000000000..b884ed567c --- /dev/null +++ b/ddtrace/internal/context_manager.py @@ -0,0 +1,107 @@ +import abc +import threading +from ddtrace.vendor import six + +from .logger import get_logger +from ..context import Context + +log = get_logger(__name__) + +try: + from contextvars import ContextVar + + _DD_CONTEXTVAR = ContextVar("datadog_contextvar", default=None) + CONTEXTVARS_IS_AVAILABLE = True +except ImportError: + CONTEXTVARS_IS_AVAILABLE = False + + +class BaseContextManager(six.with_metaclass(abc.ABCMeta)): + def __init__(self, reset=True): + if reset: + self.reset() + + @abc.abstractmethod + def _has_active_context(self): + pass + + @abc.abstractmethod + def set(self, ctx): + pass + + @abc.abstractmethod + def get(self): + pass + + def reset(self): + pass + + +class ThreadLocalContext(BaseContextManager): + """ + ThreadLocalContext can be used as a tracer global reference to create + a different ``Context`` for each thread. In synchronous tracer, this + is required to prevent multiple threads sharing the same ``Context`` + in different executions. + """ + + def __init__(self, reset=True): + # always initialize a new thread-local context holder + super(ThreadLocalContext, self).__init__(reset=True) + + def _has_active_context(self): + """ + Determine whether we have a currently active context for this thread + + :returns: Whether an active context exists + :rtype: bool + """ + ctx = getattr(self._locals, "context", None) + return ctx is not None + + def set(self, ctx): + setattr(self._locals, "context", ctx) + + def get(self): + ctx = getattr(self._locals, "context", None) + if not ctx: + # create a new Context if it's not available + ctx = Context() + self._locals.context = ctx + + return ctx + + def reset(self): + self._locals = threading.local() + + +class ContextVarContextManager(BaseContextManager): + """ + _ContextVarContext can be used in place of the ThreadLocalContext for Python + 3.7 and above to manage different ``Context`` objects for each thread and + async task. + """ + + def _has_active_context(self): + ctx = _DD_CONTEXTVAR.get() + return ctx is not None + + def set(self, ctx): + _DD_CONTEXTVAR.set(ctx) + + def get(self): + ctx = _DD_CONTEXTVAR.get() + if not ctx: + ctx = Context() + self.set(ctx) + + return ctx + + def reset(self): + _DD_CONTEXTVAR.set(None) + + +if CONTEXTVARS_IS_AVAILABLE: + DefaultContextManager = ContextVarContextManager +else: + DefaultContextManager = ThreadLocalContext diff --git a/ddtrace/internal/hostname.py b/ddtrace/internal/hostname.py new file mode 100644 index 0000000000..5e3c941f0b --- /dev/null +++ b/ddtrace/internal/hostname.py @@ -0,0 +1,21 @@ +import functools +import socket + +_hostname = None + + +def _cached(func): + @functools.wraps(func) + def wrapper(): + global _hostname + if not _hostname: + _hostname = func() + + return _hostname + + return wrapper + + +@_cached +def get_hostname(): + return socket.gethostname() diff --git a/ddtrace/internal/logger.py b/ddtrace/internal/logger.py new file mode 100644 index 0000000000..04f8ef83f1 --- /dev/null +++ b/ddtrace/internal/logger.py @@ -0,0 +1,125 @@ +import collections +import logging + +from ..utils.formats import get_env + + +def get_logger(name): + """ + Retrieve or create a ``DDLogger`` instance. + + This function mirrors the behavior of `logging.getLogger`. + + If no logger with the provided name has been fetched before then + a new one is created. + + If a previous logger has been created then it is returned. + + DEV: We do not want to mess with `logging.setLoggerClass()` + That will totally mess with the user's loggers, we want + just our own, selective loggers to be DDLoggers + + :param name: The name of the logger to fetch or create + :type name: str + :return: The logger instance + :rtype: ``DDLogger`` + """ + # DEV: `logging.Logger.manager` refers to the single root `logging.Manager` instance + # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1824-L1826 # noqa + manager = logging.Logger.manager + + # If the logger does not exist yet, create it + # DEV: `Manager.loggerDict` is a dict mapping logger name to logger + # DEV: This is a simplified version of `logging.Manager.getLogger` + # https://github.com/python/cpython/blob/48769a28ad6ef4183508951fa6a378531ace26a4/Lib/logging/__init__.py#L1221-L1253 # noqa + if name not in manager.loggerDict: + manager.loggerDict[name] = DDLogger(name=name) + + # Get our logger + logger = manager.loggerDict[name] + + # If this log manager has a `_fixupParents` method then call it on our logger + # DEV: This helper is used to ensure our logger has an appropriate `Logger.parent` set, + # without this then we cannot take advantage of the root loggers handlers + # https://github.com/python/cpython/blob/7c7839329c2c66d051960ab1df096aed1cc9343e/Lib/logging/__init__.py#L1272-L1294 # noqa + # DEV: `_fixupParents` has been around for awhile, but add the `hasattr` guard... just in case. + if hasattr(manager, "_fixupParents"): + manager._fixupParents(logger) + + # Return out logger + return logger + + +class DDLogger(logging.Logger): + """ + Custom rate limited logger used by ``ddtrace`` + + This logger class is used to rate limit the output of + log messages from within the ``ddtrace`` package. + """ + + __slots__ = ("buckets", "rate_limit") + + # Named tuple used for keeping track of a log lines current time bucket and the number of log lines skipped + LoggingBucket = collections.namedtuple("LoggingBucket", ("bucket", "skipped")) + + def __init__(self, *args, **kwargs): + """Constructor for ``DDLogger``""" + super(DDLogger, self).__init__(*args, **kwargs) + + # Dict to keep track of the current time bucket per name/level/pathname/lineno + self.buckets = collections.defaultdict(lambda: DDLogger.LoggingBucket(0, 0)) + + # Allow 1 log record per name/level/pathname/lineno every 60 seconds by default + # Allow configuring via `DD_LOGGING_RATE_LIMIT` + # DEV: `DD_LOGGING_RATE_LIMIT=0` means to disable all rate limiting + self.rate_limit = int(get_env("logging", "rate_limit", default=60)) + + def handle(self, record): + """ + Function used to call the handlers for a log line. + + This implementation will first determine if this log line should + be logged or rate limited, and then call the base ``logging.Logger.handle`` + function if it should be logged + + DEV: This method has all of it's code inlined to reduce on functions calls + + :param record: The log record being logged + :type record: ``logging.LogRecord`` + """ + # If rate limiting has been disabled (`DD_LOGGING_RATE_LIMIT=0`) then apply no rate limit + if not self.rate_limit: + super(DDLogger, self).handle(record) + return + + # Allow 1 log record by name/level/pathname/lineno every X seconds + # DEV: current unix time / rate (e.g. 300 seconds) = time bucket + # int(1546615098.8404942 / 300) = 515538 + # DEV: LogRecord `created` is a unix timestamp/float + # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` + current_bucket = int(record.created / self.rate_limit) + + # Limit based on logger name, record level, filename, and line number + # ('ddtrace.writer', 'DEBUG', '../site-packages/ddtrace/writer.py', 137) + # This way each unique log message can get logged at least once per time period + # DEV: LogRecord has `levelname` and `levelno`, we want `levelno` e.g. `logging.DEBUG = 10` + key = (record.name, record.levelno, record.pathname, record.lineno) + + # Only log this message if the time bucket has changed from the previous time we ran + logging_bucket = self.buckets[key] + if logging_bucket.bucket != current_bucket: + # Append count of skipped messages if we have skipped some since our last logging + if logging_bucket.skipped: + record.msg = "{}, %s additional messages skipped".format(record.msg) + record.args = record.args + (logging_bucket.skipped,) + + # Reset our bucket + self.buckets[key] = DDLogger.LoggingBucket(current_bucket, 0) + + # Call the base handle to actually log this record + super(DDLogger, self).handle(record) + else: + # Increment the count of records we have skipped + # DEV: `self.buckets[key]` is a tuple which is immutable so recreate instead + self.buckets[key] = DDLogger.LoggingBucket(logging_bucket.bucket, logging_bucket.skipped + 1) diff --git a/ddtrace/internal/rate_limiter.py b/ddtrace/internal/rate_limiter.py new file mode 100644 index 0000000000..8c8ac68eab --- /dev/null +++ b/ddtrace/internal/rate_limiter.py @@ -0,0 +1,142 @@ +from __future__ import division +import threading + +from ..vendor import monotonic + + +class RateLimiter(object): + """ + A token bucket rate limiter implementation + """ + + __slots__ = ( + "_lock", + "current_window", + "last_update", + "max_tokens", + "prev_window_rate", + "rate_limit", + "tokens", + "tokens_allowed", + "tokens_total", + ) + + def __init__(self, rate_limit): + """ + Constructor for RateLimiter + + :param rate_limit: The rate limit to apply for number of requests per second. + rate limit > 0 max number of requests to allow per second, + rate limit == 0 to disallow all requests, + rate limit < 0 to allow all requests + :type rate_limit: :obj:`int` + """ + self.rate_limit = rate_limit + self.tokens = rate_limit + self.max_tokens = rate_limit + + self.last_update = monotonic.monotonic() + + self.current_window = 0 + self.tokens_allowed = 0 + self.tokens_total = 0 + self.prev_window_rate = None + + self._lock = threading.Lock() + + def is_allowed(self): + """ + Check whether the current request is allowed or not + + This method will also reduce the number of available tokens by 1 + + :returns: Whether the current request is allowed or not + :rtype: :obj:`bool` + """ + # Determine if it is allowed + allowed = self._is_allowed() + # Update counts used to determine effective rate + self._update_rate_counts(allowed) + return allowed + + def _update_rate_counts(self, allowed): + now = monotonic.monotonic() + + # No tokens have been seen yet, start a new window + if not self.current_window: + self.current_window = now + + # If more than 1 second has past since last window, reset + elif now - self.current_window >= 1.0: + # Store previous window's rate to average with current for `.effective_rate` + self.prev_window_rate = self._current_window_rate() + self.tokens_allowed = 0 + self.tokens_total = 0 + self.current_window = now + + # Keep track of total tokens seen vs allowed + if allowed: + self.tokens_allowed += 1 + self.tokens_total += 1 + + def _is_allowed(self): + # Rate limit of 0 blocks everything + if self.rate_limit == 0: + return False + + # Negative rate limit disables rate limiting + elif self.rate_limit < 0: + return True + + # Lock, we need this to be thread safe, it should be shared by all threads + with self._lock: + self._replenish() + + if self.tokens >= 1: + self.tokens -= 1 + return True + + return False + + def _replenish(self): + # If we are at the max, we do not need to add any more + if self.tokens == self.max_tokens: + return + + # Add more available tokens based on how much time has passed + now = monotonic.monotonic() + elapsed = now - self.last_update + self.last_update = now + + # Update the number of available tokens, but ensure we do not exceed the max + self.tokens = min(self.max_tokens, self.tokens + (elapsed * self.rate_limit),) + + def _current_window_rate(self): + # No tokens have been seen, effectively 100% sample rate + # DEV: This is to avoid division by zero error + if not self.tokens_total: + return 1.0 + + # Get rate of tokens allowed + return self.tokens_allowed / self.tokens_total + + @property + def effective_rate(self): + """ + Return the effective sample rate of this rate limiter + + :returns: Effective sample rate value 0.0 <= rate <= 1.0 + :rtype: :obj:`float`` + """ + # If we have not had a previous window yet, return current rate + if self.prev_window_rate is None: + return self._current_window_rate() + + return (self._current_window_rate() + self.prev_window_rate) / 2.0 + + def __repr__(self): + return "{}(rate_limit={!r}, tokens={!r}, last_update={!r}, effective_rate={!r})".format( + self.__class__.__name__, self.rate_limit, self.tokens, self.last_update, self.effective_rate, + ) + + __str__ = __repr__ diff --git a/ddtrace/internal/runtime/__init__.py b/ddtrace/internal/runtime/__init__.py new file mode 100644 index 0000000000..30ba177f21 --- /dev/null +++ b/ddtrace/internal/runtime/__init__.py @@ -0,0 +1,12 @@ +from .runtime_metrics import ( + RuntimeTags, + RuntimeMetrics, + RuntimeWorker, +) + + +__all__ = [ + "RuntimeTags", + "RuntimeMetrics", + "RuntimeWorker", +] diff --git a/ddtrace/internal/runtime/collector.py b/ddtrace/internal/runtime/collector.py new file mode 100644 index 0000000000..c8b8ac0bf8 --- /dev/null +++ b/ddtrace/internal/runtime/collector.py @@ -0,0 +1,79 @@ +import importlib + +from ..logger import get_logger + +log = get_logger(__name__) + + +class ValueCollector(object): + """A basic state machine useful for collecting, caching and updating data + obtained from different Python modules. + + The two primary use-cases are + 1) data loaded once (like tagging information) + 2) periodically updating data sources (like thread count) + + Functionality is provided for requiring and importing modules which may or + may not be installed. + """ + + enabled = True + periodic = False + required_modules = [] + value = None + value_loaded = False + + def __init__(self, enabled=None, periodic=None, required_modules=None): + self.enabled = self.enabled if enabled is None else enabled + self.periodic = self.periodic if periodic is None else periodic + self.required_modules = self.required_modules if required_modules is None else required_modules + + self._modules_successfully_loaded = False + self.modules = self._load_modules() + if self._modules_successfully_loaded: + self._on_modules_load() + + def _on_modules_load(self): + """Hook triggered after all required_modules have been successfully loaded. + """ + + def _load_modules(self): + modules = {} + try: + for module in self.required_modules: + modules[module] = importlib.import_module(module) + self._modules_successfully_loaded = True + except ImportError: + # DEV: disable collector if we cannot load any of the required modules + self.enabled = False + log.warning('Could not import module "%s" for %s. Disabling collector.', module, self) + return None + return modules + + def collect(self, keys=None): + """Returns metrics as collected by `collect_fn`. + + :param keys: The keys of the metrics to collect. + """ + if not self.enabled: + return self.value + + keys = keys or set() + + if not self.periodic and self.value_loaded: + return self.value + + # call underlying collect function and filter out keys not requested + self.value = self.collect_fn(keys) + + # filter values for keys + if len(keys) > 0 and isinstance(self.value, list): + self.value = [(k, v) for (k, v) in self.value if k in keys] + + self.value_loaded = True + return self.value + + def __repr__(self): + return "<{}(enabled={},periodic={},required_modules={})>".format( + self.__class__.__name__, self.enabled, self.periodic, self.required_modules, + ) diff --git a/ddtrace/internal/runtime/constants.py b/ddtrace/internal/runtime/constants.py new file mode 100644 index 0000000000..d1a627a9c2 --- /dev/null +++ b/ddtrace/internal/runtime/constants.py @@ -0,0 +1,32 @@ +GC_COUNT_GEN0 = "runtime.python.gc.count.gen0" +GC_COUNT_GEN1 = "runtime.python.gc.count.gen1" +GC_COUNT_GEN2 = "runtime.python.gc.count.gen2" + +THREAD_COUNT = "runtime.python.thread_count" +MEM_RSS = "runtime.python.mem.rss" +CPU_TIME_SYS = "runtime.python.cpu.time.sys" +CPU_TIME_USER = "runtime.python.cpu.time.user" +CPU_PERCENT = "runtime.python.cpu.percent" +CTX_SWITCH_VOLUNTARY = "runtime.python.cpu.ctx_switch.voluntary" +CTX_SWITCH_INVOLUNTARY = "runtime.python.cpu.ctx_switch.involuntary" + +GC_RUNTIME_METRICS = set([GC_COUNT_GEN0, GC_COUNT_GEN1, GC_COUNT_GEN2]) + +PSUTIL_RUNTIME_METRICS = set( + [THREAD_COUNT, MEM_RSS, CTX_SWITCH_VOLUNTARY, CTX_SWITCH_INVOLUNTARY, CPU_TIME_SYS, CPU_TIME_USER, CPU_PERCENT] +) + +DEFAULT_RUNTIME_METRICS = GC_RUNTIME_METRICS | PSUTIL_RUNTIME_METRICS + +SERVICE = "service" +ENV = "env" +LANG_INTERPRETER = "lang_interpreter" +LANG_VERSION = "lang_version" +LANG = "lang" +TRACER_VERSION = "tracer_version" + +TRACER_TAGS = set([SERVICE, ENV]) + +PLATFORM_TAGS = set([LANG_INTERPRETER, LANG_VERSION, LANG, TRACER_VERSION]) + +DEFAULT_RUNTIME_TAGS = TRACER_TAGS | PLATFORM_TAGS diff --git a/ddtrace/internal/runtime/container.py b/ddtrace/internal/runtime/container.py new file mode 100644 index 0000000000..e13c07eeee --- /dev/null +++ b/ddtrace/internal/runtime/container.py @@ -0,0 +1,105 @@ +import re + +from ..logger import get_logger + +log = get_logger(__name__) + + +class CGroupInfo(object): + """ + CGroup class for container information parsed from a group cgroup file + """ + + __slots__ = ("id", "groups", "path", "container_id", "controllers", "pod_id") + + UUID_SOURCE_PATTERN = r"[0-9a-f]{8}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{4}[-_][0-9a-f]{12}" + CONTAINER_SOURCE_PATTERN = r"[0-9a-f]{64}" + + LINE_RE = re.compile(r"^(\d+):([^:]*):(.+)$") + POD_RE = re.compile(r"pod({0})(?:\.slice)?$".format(UUID_SOURCE_PATTERN)) + CONTAINER_RE = re.compile(r"({0}|{1})(?:\.scope)?$".format(UUID_SOURCE_PATTERN, CONTAINER_SOURCE_PATTERN)) + + def __init__(self, **kwargs): + # Initialize all attributes in __slots__ to `None` + # DEV: Otherwise we'll get `AttributeError` when trying to access if they are unset + for attr in self.__slots__: + setattr(self, attr, kwargs.get(attr)) + + @classmethod + def from_line(cls, line): + """ + Parse a new :class:`CGroupInfo` from the provided line + + :param line: A line from a cgroup file (e.g. /proc/self/cgroup) to parse information from + :type line: str + :returns: A :class:`CGroupInfo` object with all parsed data, if the line is valid, otherwise `None` + :rtype: :class:`CGroupInfo` | None + + """ + # Clean up the line + line = line.strip() + + # Ensure the line is valid + match = cls.LINE_RE.match(line) + if not match: + return None + + # Create our new `CGroupInfo` and set attributes from the line + info = cls() + info.id, info.groups, info.path = match.groups() + + # Parse the controllers from the groups + info.controllers = [c.strip() for c in info.groups.split(",") if c.strip()] + + # Break up the path to grab container_id and pod_id if available + # e.g. /docker/ + # e.g. /kubepods/test/pod/ + parts = [p for p in info.path.split("/")] + + # Grab the container id from the path if a valid id is present + if len(parts): + match = cls.CONTAINER_RE.match(parts.pop()) + if match: + info.container_id = match.group(1) + + # Grab the pod id from the path if a valid id is present + if len(parts): + match = cls.POD_RE.match(parts.pop()) + if match: + info.pod_id = match.group(1) + + return info + + def __str__(self): + return self.__repr__() + + def __repr__(self): + return "{}(id={!r}, groups={!r}, path={!r}, container_id={!r}, controllers={!r}, pod_id={!r})".format( + self.__class__.__name__, self.id, self.groups, self.path, self.container_id, self.controllers, self.pod_id, + ) + + +def get_container_info(pid="self"): + """ + Helper to fetch the current container id, if we are running in a container + + We will parse `/proc/{pid}/cgroup` to determine our container id. + + The results of calling this function are cached + + :param pid: The pid of the cgroup file to parse (default: 'self') + :type pid: str | int + :returns: The cgroup file info if found, or else None + :rtype: :class:`CGroupInfo` | None + """ + try: + cgroup_file = "/proc/{0}/cgroup".format(pid) + with open(cgroup_file, mode="r") as fp: + for line in fp: + info = CGroupInfo.from_line(line) + if info and info.container_id: + return info + except Exception: + log.debug("Failed to parse cgroup file for pid %r", pid, exc_info=True) + + return None diff --git a/ddtrace/internal/runtime/metric_collectors.py b/ddtrace/internal/runtime/metric_collectors.py new file mode 100644 index 0000000000..eb140294b6 --- /dev/null +++ b/ddtrace/internal/runtime/metric_collectors.py @@ -0,0 +1,91 @@ +import os + +from .collector import ValueCollector +from .constants import ( + GC_COUNT_GEN0, + GC_COUNT_GEN1, + GC_COUNT_GEN2, + THREAD_COUNT, + MEM_RSS, + CTX_SWITCH_VOLUNTARY, + CTX_SWITCH_INVOLUNTARY, + CPU_TIME_SYS, + CPU_TIME_USER, + CPU_PERCENT, +) + + +class RuntimeMetricCollector(ValueCollector): + value = [] + periodic = True + + +class GCRuntimeMetricCollector(RuntimeMetricCollector): + """ Collector for garbage collection generational counts + + More information at https://docs.python.org/3/library/gc.html + """ + + required_modules = ["gc"] + + def collect_fn(self, keys): + gc = self.modules.get("gc") + + counts = gc.get_count() + metrics = [ + (GC_COUNT_GEN0, counts[0]), + (GC_COUNT_GEN1, counts[1]), + (GC_COUNT_GEN2, counts[2]), + ] + + return metrics + + +class PSUtilRuntimeMetricCollector(RuntimeMetricCollector): + """Collector for psutil metrics. + + Performs batched operations via proc.oneshot() to optimize the calls. + See https://psutil.readthedocs.io/en/latest/#psutil.Process.oneshot + for more information. + """ + + required_modules = ["psutil"] + stored_value = dict( + CPU_TIME_SYS_TOTAL=0, CPU_TIME_USER_TOTAL=0, CTX_SWITCH_VOLUNTARY_TOTAL=0, CTX_SWITCH_INVOLUNTARY_TOTAL=0, + ) + + def _on_modules_load(self): + self.proc = self.modules["psutil"].Process(os.getpid()) + + def collect_fn(self, keys): + with self.proc.oneshot(): + # only return time deltas + # TODO[tahir]: better abstraction for metrics based on last value + cpu_time_sys_total = self.proc.cpu_times().system + cpu_time_user_total = self.proc.cpu_times().user + cpu_time_sys = cpu_time_sys_total - self.stored_value["CPU_TIME_SYS_TOTAL"] + cpu_time_user = cpu_time_user_total - self.stored_value["CPU_TIME_USER_TOTAL"] + + ctx_switch_voluntary_total = self.proc.num_ctx_switches().voluntary + ctx_switch_involuntary_total = self.proc.num_ctx_switches().involuntary + ctx_switch_voluntary = ctx_switch_voluntary_total - self.stored_value["CTX_SWITCH_VOLUNTARY_TOTAL"] + ctx_switch_involuntary = ctx_switch_involuntary_total - self.stored_value["CTX_SWITCH_INVOLUNTARY_TOTAL"] + + self.stored_value = dict( + CPU_TIME_SYS_TOTAL=cpu_time_sys_total, + CPU_TIME_USER_TOTAL=cpu_time_user_total, + CTX_SWITCH_VOLUNTARY_TOTAL=ctx_switch_voluntary_total, + CTX_SWITCH_INVOLUNTARY_TOTAL=ctx_switch_involuntary_total, + ) + + metrics = [ + (THREAD_COUNT, self.proc.num_threads()), + (MEM_RSS, self.proc.memory_info().rss), + (CTX_SWITCH_VOLUNTARY, ctx_switch_voluntary), + (CTX_SWITCH_INVOLUNTARY, ctx_switch_involuntary), + (CPU_TIME_SYS, cpu_time_sys), + (CPU_TIME_USER, cpu_time_user), + (CPU_PERCENT, self.proc.cpu_percent()), + ] + + return metrics diff --git a/ddtrace/internal/runtime/runtime_metrics.py b/ddtrace/internal/runtime/runtime_metrics.py new file mode 100644 index 0000000000..cbfa8328b8 --- /dev/null +++ b/ddtrace/internal/runtime/runtime_metrics.py @@ -0,0 +1,74 @@ +import itertools + + +from ... import _worker +from ..logger import get_logger +from .constants import ( + DEFAULT_RUNTIME_METRICS, + DEFAULT_RUNTIME_TAGS, +) +from .metric_collectors import ( + GCRuntimeMetricCollector, + PSUtilRuntimeMetricCollector, +) +from .tag_collectors import ( + PlatformTagCollector, + TracerTagCollector, +) + +log = get_logger(__name__) + + +class RuntimeCollectorsIterable(object): + def __init__(self, enabled=None): + self._enabled = enabled or self.ENABLED + # Initialize the collectors. + self._collectors = [c() for c in self.COLLECTORS] + + def __iter__(self): + collected = (collector.collect(self._enabled) for collector in self._collectors) + return itertools.chain.from_iterable(collected) + + def __repr__(self): + return "{}(enabled={})".format(self.__class__.__name__, self._enabled,) + + +class RuntimeTags(RuntimeCollectorsIterable): + ENABLED = DEFAULT_RUNTIME_TAGS + COLLECTORS = [ + PlatformTagCollector, + TracerTagCollector, + ] + + +class RuntimeMetrics(RuntimeCollectorsIterable): + ENABLED = DEFAULT_RUNTIME_METRICS + COLLECTORS = [ + GCRuntimeMetricCollector, + PSUtilRuntimeMetricCollector, + ] + + +class RuntimeWorker(_worker.PeriodicWorkerThread): + """ Worker thread for collecting and writing runtime metrics to a DogStatsd + client. + """ + + FLUSH_INTERVAL = 10 + + def __init__(self, statsd_client, flush_interval=FLUSH_INTERVAL): + super(RuntimeWorker, self).__init__(interval=flush_interval, name=self.__class__.__name__) + self._statsd_client = statsd_client + self._runtime_metrics = RuntimeMetrics() + + def flush(self): + with self._statsd_client: + for key, value in self._runtime_metrics: + log.debug("Writing metric %s:%s", key, value) + self._statsd_client.gauge(key, value) + + run_periodic = flush + on_shutdown = flush + + def __repr__(self): + return "{}(runtime_metrics={})".format(self.__class__.__name__, self._runtime_metrics,) diff --git a/ddtrace/internal/runtime/tag_collectors.py b/ddtrace/internal/runtime/tag_collectors.py new file mode 100644 index 0000000000..41bad267ad --- /dev/null +++ b/ddtrace/internal/runtime/tag_collectors.py @@ -0,0 +1,58 @@ +from .collector import ValueCollector +from .constants import ( + SERVICE, + LANG_INTERPRETER, + LANG_VERSION, + LANG, + TRACER_VERSION, +) +from ...constants import ENV_KEY + + +class RuntimeTagCollector(ValueCollector): + periodic = False + value = [] + + +class TracerTagCollector(RuntimeTagCollector): + """ Tag collector for the ddtrace Tracer + """ + + required_modules = ["ddtrace"] + + def collect_fn(self, keys): + ddtrace = self.modules.get("ddtrace") + tags = [(SERVICE, service) for service in ddtrace.tracer._services] + if ENV_KEY in ddtrace.tracer.tags: + tags.append((ENV_KEY, ddtrace.tracer.tags[ENV_KEY])) + return tags + + +class PlatformTagCollector(RuntimeTagCollector): + """ Tag collector for the Python interpreter implementation. + + Tags collected: + - ``lang_interpreter``: + + * For CPython this is 'CPython'. + * For Pypy this is ``PyPy`` + * For Jython this is ``Jython`` + + - `lang_version``, eg ``2.7.10`` + - ``lang`` e.g. ``Python`` + - ``tracer_version`` e.g. ``0.29.0`` + + """ + + required_modules = ("platform", "ddtrace") + + def collect_fn(self, keys): + platform = self.modules.get("platform") + ddtrace = self.modules.get("ddtrace") + tags = [ + (LANG, "python"), + (LANG_INTERPRETER, platform.python_implementation()), + (LANG_VERSION, platform.python_version()), + (TRACER_VERSION, ddtrace.__version__), + ] + return tags diff --git a/ddtrace/internal/writer.py b/ddtrace/internal/writer.py new file mode 100644 index 0000000000..13250b8370 --- /dev/null +++ b/ddtrace/internal/writer.py @@ -0,0 +1,284 @@ +# stdlib +import itertools +import random +import time + +from .. import api +from .. import _worker +from ..internal.logger import get_logger +from ..sampler import BasePrioritySampler +from ..settings import config +from ..vendor import monotonic +from ddtrace.vendor.six.moves.queue import Queue, Full, Empty + +log = get_logger(__name__) + + +MAX_TRACES = 1000 + +DEFAULT_TIMEOUT = 5 +LOG_ERR_INTERVAL = 60 + + +class AgentWriter(_worker.PeriodicWorkerThread): + + QUEUE_PROCESSING_INTERVAL = 1 + + def __init__( + self, + hostname="localhost", + port=8126, + uds_path=None, + https=False, + shutdown_timeout=DEFAULT_TIMEOUT, + filters=None, + sampler=None, + priority_sampler=None, + dogstatsd=None, + ): + super(AgentWriter, self).__init__( + interval=self.QUEUE_PROCESSING_INTERVAL, exit_timeout=shutdown_timeout, name=self.__class__.__name__ + ) + self._trace_queue = Q(maxsize=MAX_TRACES) + self._filters = filters + self._sampler = sampler + self._priority_sampler = priority_sampler + self._last_error_ts = 0 + self.dogstatsd = dogstatsd + self.api = api.API( + hostname, port, uds_path=uds_path, https=https, priority_sampling=priority_sampler is not None + ) + if hasattr(time, "thread_time"): + self._last_thread_time = time.thread_time() + self.start() + + def recreate(self): + """ Create a new instance of :class:`AgentWriter` using the same settings from this instance + + :rtype: :class:`AgentWriter` + :returns: A new :class:`AgentWriter` instance + """ + writer = self.__class__( + hostname=self.api.hostname, + port=self.api.port, + uds_path=self.api.uds_path, + https=self.api.https, + shutdown_timeout=self.exit_timeout, + filters=self._filters, + priority_sampler=self._priority_sampler, + dogstatsd=self.dogstatsd, + ) + return writer + + @property + def _send_stats(self): + """Determine if we're sending stats or not.""" + return bool(config.health_metrics_enabled and self.dogstatsd) + + def write(self, spans=None, services=None): + if spans: + self._trace_queue.put(spans) + + def flush_queue(self): + try: + traces = self._trace_queue.get(block=False) + except Empty: + return + + if self._send_stats: + traces_queue_length = len(traces) + traces_queue_spans = sum(map(len, traces)) + + # Before sending the traces, make them go through the + # filters + try: + traces = self._apply_filters(traces) + except Exception: + log.error("error while filtering traces", exc_info=True) + return + + if self._send_stats: + traces_filtered = len(traces) - traces_queue_length + + # If we have data, let's try to send it. + traces_responses = self.api.send_traces(traces) + for response in traces_responses: + if isinstance(response, Exception) or response.status >= 400: + self._log_error_status(response) + elif self._priority_sampler or isinstance(self._sampler, BasePrioritySampler): + result_traces_json = response.get_json() + if result_traces_json and "rate_by_service" in result_traces_json: + if self._priority_sampler: + self._priority_sampler.update_rate_by_service_sample_rates( + result_traces_json["rate_by_service"], + ) + if isinstance(self._sampler, BasePrioritySampler): + self._sampler.update_rate_by_service_sample_rates(result_traces_json["rate_by_service"],) + + # Dump statistics + # NOTE: Do not use the buffering of dogstatsd as it's not thread-safe + # https://github.com/DataDog/datadogpy/issues/439 + if self._send_stats: + # Statistics about the queue length, size and number of spans + self.dogstatsd.increment("datadog.tracer.flushes") + self._histogram_with_total("datadog.tracer.flush.traces", traces_queue_length) + self._histogram_with_total("datadog.tracer.flush.spans", traces_queue_spans) + + # Statistics about the filtering + self._histogram_with_total("datadog.tracer.flush.traces_filtered", traces_filtered) + + # Statistics about API + self._histogram_with_total("datadog.tracer.api.requests", len(traces_responses)) + + self._histogram_with_total( + "datadog.tracer.api.errors", len(list(t for t in traces_responses if isinstance(t, Exception))) + ) + for status, grouped_responses in itertools.groupby( + sorted((t for t in traces_responses if not isinstance(t, Exception)), key=lambda r: r.status), + key=lambda r: r.status, + ): + self._histogram_with_total( + "datadog.tracer.api.responses", len(list(grouped_responses)), tags=["status:%d" % status] + ) + + # Statistics about the writer thread + if hasattr(time, "thread_time"): + new_thread_time = time.thread_time() + diff = new_thread_time - self._last_thread_time + self._last_thread_time = new_thread_time + self.dogstatsd.histogram("datadog.tracer.writer.cpu_time", diff) + + def _histogram_with_total(self, name, value, tags=None): + """Helper to add metric as a histogram and with a `.total` counter""" + self.dogstatsd.histogram(name, value, tags=tags) + self.dogstatsd.increment("%s.total" % (name,), value, tags=tags) + + def run_periodic(self): + if self._send_stats: + self.dogstatsd.gauge("datadog.tracer.heartbeat", 1) + + try: + self.flush_queue() + finally: + if not self._send_stats: + return + + # Statistics about the rate at which spans are inserted in the queue + dropped, enqueued, enqueued_lengths = self._trace_queue.reset_stats() + self.dogstatsd.gauge("datadog.tracer.queue.max_length", self._trace_queue.maxsize) + self.dogstatsd.increment("datadog.tracer.queue.dropped.traces", dropped) + self.dogstatsd.increment("datadog.tracer.queue.enqueued.traces", enqueued) + self.dogstatsd.increment("datadog.tracer.queue.enqueued.spans", enqueued_lengths) + + def on_shutdown(self): + try: + self.run_periodic() + finally: + if not self._send_stats: + return + + self.dogstatsd.increment("datadog.tracer.shutdown") + + def _log_error_status(self, response): + log_level = log.debug + now = monotonic.monotonic() + if now > self._last_error_ts + LOG_ERR_INTERVAL: + log_level = log.error + self._last_error_ts = now + prefix = "Failed to send traces to Datadog Agent at %s: " + if isinstance(response, api.Response): + log_level( + prefix + "HTTP error status %s, reason %s, message %s", + self.api, + response.status, + response.reason, + response.msg, + ) + else: + log_level( + prefix + "%s", self.api, response, + ) + + def _apply_filters(self, traces): + """ + Here we make each trace go through the filters configured in the + tracer. There is no need for a lock since the traces are owned by the + AgentWriter at that point. + """ + if self._filters is not None: + filtered_traces = [] + for trace in traces: + for filtr in self._filters: + trace = filtr.process_trace(trace) + if trace is None: + break + if trace is not None: + filtered_traces.append(trace) + return filtered_traces + return traces + + +class Q(Queue): + """ + Q is a threadsafe queue that let's you pop everything at once and + will randomly overwrite elements when it's over the max size. + + This queue also exposes some statistics about its length, the number of items dropped, etc. + """ + + def __init__(self, maxsize=0): + # Cannot use super() here because Queue in Python2 is old style class + Queue.__init__(self, maxsize) + # Number of item dropped (queue full) + self.dropped = 0 + # Number of items accepted + self.accepted = 0 + # Cumulative length of accepted items + self.accepted_lengths = 0 + + def put(self, item): + try: + # Cannot use super() here because Queue in Python2 is old style class + Queue.put(self, item, block=False) + except Full: + # If the queue is full, replace a random item. We need to make sure + # the queue is not emptied was emptied in the meantime, so we lock + # check qsize value. + with self.mutex: + qsize = self._qsize() + if qsize != 0: + idx = random.randrange(0, qsize) + self.queue[idx] = item + log.warning("Writer queue is full has more than %d traces, some traces will be lost", self.maxsize) + self.dropped += 1 + self._update_stats(item) + return + # The queue has been emptied, simply retry putting item + return self.put(item) + else: + with self.mutex: + self._update_stats(item) + + def _update_stats(self, item): + # self.mutex needs to be locked to make sure we don't lose data when resetting + self.accepted += 1 + if hasattr(item, "__len__"): + item_length = len(item) + else: + item_length = 1 + self.accepted_lengths += item_length + + def reset_stats(self): + """Reset the stats to 0. + + :return: The current value of dropped, accepted and accepted_lengths. + """ + with self.mutex: + dropped, accepted, accepted_lengths = (self.dropped, self.accepted, self.accepted_lengths) + self.dropped, self.accepted, self.accepted_lengths = 0, 0, 0 + return dropped, accepted, accepted_lengths + + def _get(self): + things = self.queue + self._init(self.maxsize) + return things diff --git a/ddtrace/monkey.py b/ddtrace/monkey.py new file mode 100644 index 0000000000..88b9fe984f --- /dev/null +++ b/ddtrace/monkey.py @@ -0,0 +1,190 @@ +"""Patch librairies to be automatically instrumented. + +It can monkey patch supported standard libraries and third party modules. +A patched module will automatically report spans with its default configuration. + +A library instrumentation can be configured (for instance, to report as another service) +using Pin. For that, check its documentation. +""" +import importlib +import sys +import threading + +from ddtrace.vendor.wrapt.importer import when_imported + +from .internal.logger import get_logger + + +log = get_logger(__name__) + +# Default set of modules to automatically patch or not +PATCH_MODULES = { + 'asyncio': False, + 'boto': True, + 'botocore': True, + 'bottle': False, + 'cassandra': True, + 'celery': True, + 'consul': True, + 'elasticsearch': True, + 'algoliasearch': True, + 'futures': False, # experimental propagation + 'grpc': True, + 'mongoengine': True, + 'mysql': True, + 'mysqldb': True, + 'pymysql': True, + 'psycopg': True, + 'pylibmc': True, + 'pymemcache': True, + 'pymongo': True, + 'redis': True, + 'rediscluster': True, + 'requests': True, + 'sqlalchemy': False, # Prefer DB client instrumentation + 'sqlite3': True, + 'aiohttp': True, # requires asyncio (Python 3.4+) + 'aiopg': True, + 'aiobotocore': False, + 'httplib': False, + 'vertica': True, + 'molten': True, + 'jinja2': True, + 'mako': True, + 'flask': True, + 'kombu': False, + + # Ignore some web framework integrations that might be configured explicitly in code + 'django': False, + 'falcon': False, + 'pylons': False, + 'pyramid': False, + + # Standard library modules off by default + 'logging': False, +} + +_LOCK = threading.Lock() +_PATCHED_MODULES = set() + +# Modules which are patched on first use +# DEV: These modules are patched when the user first imports them, rather than +# explicitly importing and patching them on application startup `ddtrace.patch_all(module=True)` +# DEV: This ensures we do not patch a module until it is needed +# DEV: => +_PATCH_ON_IMPORT = { + 'celery': ('celery', ), + 'flask': ('flask, '), + 'gevent': ('gevent', ), + 'requests': ('requests', ), +} + + +class PatchException(Exception): + """Wraps regular `Exception` class when patching modules""" + pass + + +def _on_import_factory(module, raise_errors=True): + """Factory to create an import hook for the provided module name""" + def on_import(hook): + # Import and patch module + path = 'ddtrace.contrib.%s' % module + imported_module = importlib.import_module(path) + imported_module.patch() + + return on_import + + +def patch_all(**patch_modules): + """Automatically patches all available modules. + + :param dict patch_modules: Override whether particular modules are patched or not. + + >>> patch_all(redis=False, cassandra=False) + """ + modules = PATCH_MODULES.copy() + modules.update(patch_modules) + + patch(raise_errors=False, **modules) + + +def patch(raise_errors=True, **patch_modules): + """Patch only a set of given modules. + + :param bool raise_errors: Raise error if one patch fail. + :param dict patch_modules: List of modules to patch. + + >>> patch(psycopg=True, elasticsearch=True) + """ + modules = [m for (m, should_patch) in patch_modules.items() if should_patch] + for module in modules: + if module in _PATCH_ON_IMPORT: + # If the module has already been imported then patch immediately + if module in sys.modules: + patch_module(module, raise_errors=raise_errors) + + # Otherwise, add a hook to patch when it is imported for the first time + else: + # Use factory to create handler to close over `module` and `raise_errors` values from this loop + when_imported(module)(_on_import_factory(module, raise_errors)) + + # manually add module to patched modules + _PATCHED_MODULES.add(module) + else: + patch_module(module, raise_errors=raise_errors) + + patched_modules = get_patched_modules() + log.info( + 'patched %s/%s modules (%s)', + len(patched_modules), + len(modules), + ','.join(patched_modules), + ) + + +def patch_module(module, raise_errors=True): + """Patch a single module + + Returns if the module got properly patched. + """ + try: + return _patch_module(module) + except Exception: + if raise_errors: + raise + log.debug('failed to patch %s', module, exc_info=True) + return False + + +def get_patched_modules(): + """Get the list of patched modules""" + with _LOCK: + return sorted(_PATCHED_MODULES) + + +def _patch_module(module): + """_patch_module will attempt to monkey patch the module. + + Returns if the module got patched. + Can also raise errors if it fails. + """ + path = 'ddtrace.contrib.%s' % module + with _LOCK: + if module in _PATCHED_MODULES and module not in _PATCH_ON_IMPORT: + log.debug('already patched: %s', path) + return False + + try: + imported_module = importlib.import_module(path) + imported_module.patch() + except ImportError: + # if the import fails, the integration is not available + raise PatchException('integration not available') + except AttributeError: + # if patch() is not available in the module, it means + # that the library is not installed in the environment + raise PatchException('module not installed') + + _PATCHED_MODULES.add(module) + return True diff --git a/ddtrace/opentracer/__init__.py b/ddtrace/opentracer/__init__.py new file mode 100644 index 0000000000..cf5e041217 --- /dev/null +++ b/ddtrace/opentracer/__init__.py @@ -0,0 +1,7 @@ +from .tracer import Tracer +from .helpers import set_global_tracer + +__all__ = [ + 'Tracer', + 'set_global_tracer', +] diff --git a/ddtrace/opentracer/helpers.py b/ddtrace/opentracer/helpers.py new file mode 100644 index 0000000000..f088c3f791 --- /dev/null +++ b/ddtrace/opentracer/helpers.py @@ -0,0 +1,16 @@ +import opentracing +import ddtrace + +""" +Helper routines for Datadog OpenTracing. +""" + + +def set_global_tracer(tracer): + """Sets the global tracers to the given tracer.""" + + # overwrite the opentracer reference + opentracing.tracer = tracer + + # overwrite the Datadog tracer reference + ddtrace.tracer = tracer._dd_tracer diff --git a/ddtrace/opentracer/propagation/__init__.py b/ddtrace/opentracer/propagation/__init__.py new file mode 100644 index 0000000000..28f5ad626c --- /dev/null +++ b/ddtrace/opentracer/propagation/__init__.py @@ -0,0 +1,6 @@ +from .http import HTTPPropagator + + +__all__ = [ + 'HTTPPropagator', +] diff --git a/ddtrace/opentracer/propagation/binary.py b/ddtrace/opentracer/propagation/binary.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ddtrace/opentracer/propagation/http.py b/ddtrace/opentracer/propagation/http.py new file mode 100644 index 0000000000..9652c8eaf9 --- /dev/null +++ b/ddtrace/opentracer/propagation/http.py @@ -0,0 +1,79 @@ +from opentracing import InvalidCarrierException, SpanContextCorruptedException +from ddtrace.propagation.http import HTTPPropagator as DDHTTPPropagator + +from ...internal.logger import get_logger +from ..span_context import SpanContext +from .propagator import Propagator + + +log = get_logger(__name__) + +HTTP_BAGGAGE_PREFIX = 'ot-baggage-' +HTTP_BAGGAGE_PREFIX_LEN = len(HTTP_BAGGAGE_PREFIX) + + +class HTTPPropagator(Propagator): + """OpenTracing compatible HTTP_HEADER and TEXT_MAP format propagator. + + `HTTPPropagator` provides compatibility by using existing OpenTracing + compatible methods from the ddtracer along with new logic supporting the + outstanding OpenTracing-defined functionality. + """ + + __slots__ = ['_dd_propagator'] + + def __init__(self): + self._dd_propagator = DDHTTPPropagator() + + def inject(self, span_context, carrier): + """Inject a span context into a carrier. + + *span_context* is injected into the carrier by first using an + :class:`ddtrace.propagation.http.HTTPPropagator` to inject the ddtracer + specific fields. + + Then the baggage is injected into *carrier*. + + :param span_context: span context to inject. + + :param carrier: carrier to inject into. + """ + if not isinstance(carrier, dict): + raise InvalidCarrierException('propagator expects carrier to be a dict') + + self._dd_propagator.inject(span_context._dd_context, carrier) + + # Add the baggage + if span_context.baggage is not None: + for key in span_context.baggage: + carrier[HTTP_BAGGAGE_PREFIX + key] = span_context.baggage[key] + + def extract(self, carrier): + """Extract a span context from a carrier. + + :class:`ddtrace.propagation.http.HTTPPropagator` is used to extract + ddtracer supported fields into a `ddtrace.Context` context which is + combined with new logic to extract the baggage which is returned in an + OpenTracing compatible span context. + + :param carrier: carrier to extract from. + + :return: extracted span context. + """ + if not isinstance(carrier, dict): + raise InvalidCarrierException('propagator expects carrier to be a dict') + + ddspan_ctx = self._dd_propagator.extract(carrier) + + # if the dd propagator fails then it will return a new empty span + # context (with trace_id=None), we however want to raise an exception + # if this occurs. + if not ddspan_ctx.trace_id: + raise SpanContextCorruptedException('failed to extract span context') + + baggage = {} + for key in carrier: + if key.startswith(HTTP_BAGGAGE_PREFIX): + baggage[key[HTTP_BAGGAGE_PREFIX_LEN:]] = carrier[key] + + return SpanContext(ddcontext=ddspan_ctx, baggage=baggage) diff --git a/ddtrace/opentracer/propagation/propagator.py b/ddtrace/opentracer/propagation/propagator.py new file mode 100644 index 0000000000..b7f7cda899 --- /dev/null +++ b/ddtrace/opentracer/propagation/propagator.py @@ -0,0 +1,15 @@ +from abc import ABCMeta, abstractmethod + +# ref: https://stackoverflow.com/a/38668373 +ABC = ABCMeta('ABC', (object,), {'__slots__': ()}) + + +class Propagator(ABC): + + @abstractmethod + def inject(self, span_context, carrier): + pass + + @abstractmethod + def extract(self, carrier): + pass diff --git a/ddtrace/opentracer/propagation/text.py b/ddtrace/opentracer/propagation/text.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ddtrace/opentracer/settings.py b/ddtrace/opentracer/settings.py new file mode 100644 index 0000000000..f4a5bee3f3 --- /dev/null +++ b/ddtrace/opentracer/settings.py @@ -0,0 +1,34 @@ +from collections import namedtuple + + +CONFIG_KEY_NAMES = [ + 'AGENT_HOSTNAME', + 'AGENT_HTTPS', + 'AGENT_PORT', + 'DEBUG', + 'ENABLED', + 'GLOBAL_TAGS', + 'SAMPLER', + 'PRIORITY_SAMPLING', + 'SETTINGS', +] + +# Keys used for the configuration dict +ConfigKeyNames = namedtuple('ConfigKeyNames', CONFIG_KEY_NAMES) + +ConfigKeys = ConfigKeyNames( + AGENT_HOSTNAME='agent_hostname', + AGENT_HTTPS='agent_https', + AGENT_PORT='agent_port', + DEBUG='debug', + ENABLED='enabled', + GLOBAL_TAGS='global_tags', + SAMPLER='sampler', + PRIORITY_SAMPLING='priority_sampling', + SETTINGS='settings', +) + + +def config_invalid_keys(config): + """Returns a list of keys that exist in *config* and not in KEYS.""" + return [key for key in config.keys() if key not in ConfigKeys] diff --git a/ddtrace/opentracer/span.py b/ddtrace/opentracer/span.py new file mode 100644 index 0000000000..4cd6d16373 --- /dev/null +++ b/ddtrace/opentracer/span.py @@ -0,0 +1,167 @@ +import threading + +from opentracing import Span as OpenTracingSpan +from opentracing.ext import tags as OTTags +from ddtrace.span import Span as DatadogSpan +from ddtrace.ext import errors +from .tags import Tags + +from .span_context import SpanContext + + +class Span(OpenTracingSpan): + """Datadog implementation of :class:`opentracing.Span`""" + + def __init__(self, tracer, context, operation_name): + if context is not None: + context = SpanContext(ddcontext=context._dd_context, + baggage=context.baggage) + else: + context = SpanContext() + + super(Span, self).__init__(tracer, context) + + self.finished = False + self._lock = threading.Lock() + # use a datadog span + self._dd_span = DatadogSpan(tracer._dd_tracer, operation_name, + context=context._dd_context) + + def finish(self, finish_time=None): + """Finish the span. + + This calls finish on the ddspan. + + :param finish_time: specify a custom finish time with a unix timestamp + per time.time() + :type timestamp: float + """ + if self.finished: + return + + # finish the datadog span + self._dd_span.finish(finish_time) + self.finished = True + + def set_baggage_item(self, key, value): + """Sets a baggage item in the span context of this span. + + Baggage is used to propagate state between spans. + + :param key: baggage item key + :type key: str + + :param value: baggage item value + :type value: a type that can be compat.stringify()'d + + :rtype: Span + :return: itself for chaining calls + """ + new_ctx = self.context.with_baggage_item(key, value) + with self._lock: + self._context = new_ctx + return self + + def get_baggage_item(self, key): + """Gets a baggage item from the span context of this span. + + :param key: baggage item key + :type key: str + + :rtype: str + :return: the baggage value for the given key or ``None``. + """ + return self.context.get_baggage_item(key) + + def set_operation_name(self, operation_name): + """Set the operation name.""" + self._dd_span.name = operation_name + + def log_kv(self, key_values, timestamp=None): + """Add a log record to this span. + + Passes on relevant opentracing key values onto the datadog span. + + :param key_values: a dict of string keys and values of any type + :type key_values: dict + + :param timestamp: a unix timestamp per time.time() + :type timestamp: float + + :return: the span itself, for call chaining + :rtype: Span + """ + + # match opentracing defined keys to datadog functionality + # opentracing/specification/blob/1be630515dafd4d2a468d083300900f89f28e24d/semantic_conventions.md#log-fields-table + for key, val in key_values.items(): + if key == 'event' and val == 'error': + # TODO: not sure if it's actually necessary to set the error manually + self._dd_span.error = 1 + self.set_tag('error', 1) + elif key == 'error' or key == 'error.object': + self.set_tag(errors.ERROR_TYPE, val) + elif key == 'message': + self.set_tag(errors.ERROR_MSG, val) + elif key == 'stack': + self.set_tag(errors.ERROR_STACK, val) + else: + pass + + return self + + def set_tag(self, key, value): + """Set a tag on the span. + + This sets the tag on the underlying datadog span. + """ + if key == Tags.SPAN_TYPE: + self._dd_span.span_type = value + elif key == Tags.SERVICE_NAME: + self._dd_span.service = value + elif key == Tags.RESOURCE_NAME or key == OTTags.DATABASE_STATEMENT: + self._dd_span.resource = value + elif key == OTTags.PEER_HOSTNAME: + self._dd_span.set_tag(Tags.TARGET_HOST, value) + elif key == OTTags.PEER_PORT: + self._dd_span.set_tag(Tags.TARGET_PORT, value) + elif key == Tags.SAMPLING_PRIORITY: + self._dd_span.context.sampling_priority = value + else: + self._dd_span.set_tag(key, value) + + def _get_tag(self, key): + """Gets a tag from the span. + + This method retrieves the tag from the underlying datadog span. + """ + return self._dd_span.get_tag(key) + + def _get_metric(self, key): + """Gets a metric from the span. + + This method retrieves the metric from the underlying datadog span. + """ + return self._dd_span.get_metric(key) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + if exc_type: + self._dd_span.set_exc_info(exc_type, exc_val, exc_tb) + + # note: self.finish() AND _dd_span.__exit__ will call _span.finish() but + # it is idempotent + self._dd_span.__exit__(exc_type, exc_val, exc_tb) + self.finish() + + def _associate_dd_span(self, ddspan): + """Associates a DD span with this span.""" + # get the datadog span context + self._dd_span = ddspan + self.context._dd_context = ddspan.context + + @property + def _dd_context(self): + return self._dd_span.context diff --git a/ddtrace/opentracer/span_context.py b/ddtrace/opentracer/span_context.py new file mode 100644 index 0000000000..3579422831 --- /dev/null +++ b/ddtrace/opentracer/span_context.py @@ -0,0 +1,50 @@ +from opentracing import SpanContext as OpenTracingSpanContext + +from ddtrace.context import Context as DatadogContext + + +class SpanContext(OpenTracingSpanContext): + """Implementation of the OpenTracing span context.""" + + def __init__(self, trace_id=None, span_id=None, + sampling_priority=None, baggage=None, ddcontext=None): + # create a new dict for the baggage if it is not provided + # NOTE: it would be preferable to use opentracing.SpanContext.EMPTY_BAGGAGE + # but it is mutable. + # see: opentracing-python/blob/8775c7bfc57fd66e1c8bcf9a54d3e434d37544f9/opentracing/span.py#L30 + baggage = baggage or {} + + if ddcontext is not None: + self._dd_context = ddcontext + else: + self._dd_context = DatadogContext( + trace_id=trace_id, + span_id=span_id, + sampling_priority=sampling_priority, + ) + + self._baggage = dict(baggage) + + @property + def baggage(self): + return self._baggage + + def set_baggage_item(self, key, value): + """Sets a baggage item in this span context. + + Note that this operation mutates the baggage of this span context + """ + self.baggage[key] = value + + def with_baggage_item(self, key, value): + """Returns a copy of this span with a new baggage item. + + Useful for instantiating new child span contexts. + """ + baggage = dict(self._baggage) + baggage[key] = value + return SpanContext(ddcontext=self._dd_context, baggage=baggage) + + def get_baggage_item(self, key): + """Gets a baggage item in this span context.""" + return self.baggage.get(key, None) diff --git a/ddtrace/opentracer/tags.py b/ddtrace/opentracer/tags.py new file mode 100644 index 0000000000..9b413277ba --- /dev/null +++ b/ddtrace/opentracer/tags.py @@ -0,0 +1,21 @@ +from collections import namedtuple + +TAG_NAMES = [ + 'RESOURCE_NAME', + 'SAMPLING_PRIORITY', + 'SERVICE_NAME', + 'SPAN_TYPE', + 'TARGET_HOST', + 'TARGET_PORT', +] + +TagNames = namedtuple('TagNames', TAG_NAMES) + +Tags = TagNames( + RESOURCE_NAME='resource.name', + SAMPLING_PRIORITY='sampling.priority', + SERVICE_NAME='service.name', + TARGET_HOST='out.host', + TARGET_PORT='out.port', + SPAN_TYPE='span.type', +) diff --git a/ddtrace/opentracer/tracer.py b/ddtrace/opentracer/tracer.py new file mode 100644 index 0000000000..b80402f2c4 --- /dev/null +++ b/ddtrace/opentracer/tracer.py @@ -0,0 +1,301 @@ +import opentracing +from opentracing import Format +from opentracing.scope_managers import ThreadLocalScopeManager + +import ddtrace +from ddtrace import Tracer as DatadogTracer +from ddtrace.constants import FILTERS_KEY +from ddtrace.settings import ConfigException +from ddtrace.utils import merge_dicts +from ddtrace.utils.config import get_application_name + +from ..internal.logger import get_logger +from .propagation import HTTPPropagator +from .span import Span +from .span_context import SpanContext +from .settings import ConfigKeys as keys, config_invalid_keys +from .utils import get_context_provider_for_scope_manager + +log = get_logger(__name__) + +DEFAULT_CONFIG = { + keys.AGENT_HOSTNAME: 'localhost', + keys.AGENT_HTTPS: False, + keys.AGENT_PORT: 8126, + keys.DEBUG: False, + keys.ENABLED: True, + keys.GLOBAL_TAGS: {}, + keys.SAMPLER: None, + keys.PRIORITY_SAMPLING: None, + keys.SETTINGS: { + FILTERS_KEY: [], + }, +} + + +class Tracer(opentracing.Tracer): + """A wrapper providing an OpenTracing API for the Datadog tracer.""" + + def __init__(self, service_name=None, config=None, scope_manager=None, dd_tracer=None): + """Initialize a new Datadog opentracer. + + :param service_name: (optional) the name of the service that this + tracer will be used with. Note if not provided, a service name will + try to be determined based off of ``sys.argv``. If this fails a + :class:`ddtrace.settings.ConfigException` will be raised. + :param config: (optional) a configuration object to specify additional + options. See the documentation for further information. + :param scope_manager: (optional) the scope manager for this tracer to + use. The available managers are listed in the Python OpenTracing repo + here: https://github.com/opentracing/opentracing-python#scope-managers. + If ``None`` is provided, defaults to + :class:`opentracing.scope_managers.ThreadLocalScopeManager`. + :param dd_tracer: (optional) the Datadog tracer for this tracer to use. This + should only be passed if a custom Datadog tracer is being used. Defaults + to the global ``ddtrace.tracer`` tracer. + """ + # Merge the given config with the default into a new dict + config = config or {} + self._config = merge_dicts(DEFAULT_CONFIG, config) + + # Pull out commonly used properties for performance + self._service_name = service_name or get_application_name() + self._enabled = self._config.get(keys.ENABLED) + self._debug = self._config.get(keys.DEBUG) + + if self._debug: + # Ensure there are no typos in any of the keys + invalid_keys = config_invalid_keys(self._config) + if invalid_keys: + str_invalid_keys = ','.join(invalid_keys) + raise ConfigException('invalid key(s) given (%s)'.format(str_invalid_keys)) + + if not self._service_name: + raise ConfigException(""" Cannot detect the \'service_name\'. + Please set the \'service_name=\' + keyword argument. + """) + + self._scope_manager = scope_manager or ThreadLocalScopeManager() + + dd_context_provider = get_context_provider_for_scope_manager(self._scope_manager) + + self._dd_tracer = dd_tracer or ddtrace.tracer or DatadogTracer() + self._dd_tracer.set_tags(self._config.get(keys.GLOBAL_TAGS)) + self._dd_tracer.configure(enabled=self._enabled, + hostname=self._config.get(keys.AGENT_HOSTNAME), + https=self._config.get(keys.AGENT_HTTPS), + port=self._config.get(keys.AGENT_PORT), + sampler=self._config.get(keys.SAMPLER), + settings=self._config.get(keys.SETTINGS), + priority_sampling=self._config.get(keys.PRIORITY_SAMPLING), + context_provider=dd_context_provider, + ) + self._propagators = { + Format.HTTP_HEADERS: HTTPPropagator(), + Format.TEXT_MAP: HTTPPropagator(), + } + + @property + def scope_manager(self): + """Returns the scope manager being used by this tracer.""" + return self._scope_manager + + def start_active_span(self, operation_name, child_of=None, references=None, + tags=None, start_time=None, ignore_active_span=False, + finish_on_close=True): + """Returns a newly started and activated `Scope`. + The returned `Scope` supports with-statement contexts. For example:: + + with tracer.start_active_span('...') as scope: + scope.span.set_tag('http.method', 'GET') + do_some_work() + # Span.finish() is called as part of Scope deactivation through + # the with statement. + + It's also possible to not finish the `Span` when the `Scope` context + expires:: + + with tracer.start_active_span('...', + finish_on_close=False) as scope: + scope.span.set_tag('http.method', 'GET') + do_some_work() + # Span.finish() is not called as part of Scope deactivation as + # `finish_on_close` is `False`. + + :param operation_name: name of the operation represented by the new + span from the perspective of the current service. + :param child_of: (optional) a Span or SpanContext instance representing + the parent in a REFERENCE_CHILD_OF Reference. If specified, the + `references` parameter must be omitted. + :param references: (optional) a list of Reference objects that identify + one or more parent SpanContexts. (See the Reference documentation + for detail). + :param tags: an optional dictionary of Span Tags. The caller gives up + ownership of that dictionary, because the Tracer may use it as-is + to avoid extra data copying. + :param start_time: an explicit Span start time as a unix timestamp per + time.time(). + :param ignore_active_span: (optional) an explicit flag that ignores + the current active `Scope` and creates a root `Span`. + :param finish_on_close: whether span should automatically be finished + when `Scope.close()` is called. + :return: a `Scope`, already registered via the `ScopeManager`. + """ + otspan = self.start_span( + operation_name=operation_name, + child_of=child_of, + references=references, + tags=tags, + start_time=start_time, + ignore_active_span=ignore_active_span, + ) + + # activate this new span + scope = self._scope_manager.activate(otspan, finish_on_close) + + return scope + + def start_span(self, operation_name=None, child_of=None, references=None, + tags=None, start_time=None, ignore_active_span=False): + """Starts and returns a new Span representing a unit of work. + + Starting a root Span (a Span with no causal references):: + + tracer.start_span('...') + + Starting a child Span (see also start_child_span()):: + + tracer.start_span( + '...', + child_of=parent_span) + + Starting a child Span in a more verbose way:: + + tracer.start_span( + '...', + references=[opentracing.child_of(parent_span)]) + + Note: the precedence when defining a relationship is the following, from highest to lowest: + 1. *child_of* + 2. *references* + 3. `scope_manager.active` (unless *ignore_active_span* is True) + 4. None + + Currently Datadog only supports `child_of` references. + + :param operation_name: name of the operation represented by the new + span from the perspective of the current service. + :param child_of: (optional) a Span or SpanContext instance representing + the parent in a REFERENCE_CHILD_OF Reference. If specified, the + `references` parameter must be omitted. + :param references: (optional) a list of Reference objects that identify + one or more parent SpanContexts. (See the Reference documentation + for detail) + :param tags: an optional dictionary of Span Tags. The caller gives up + ownership of that dictionary, because the Tracer may use it as-is + to avoid extra data copying. + :param start_time: an explicit Span start time as a unix timestamp per + time.time() + :param ignore_active_span: an explicit flag that ignores the current + active `Scope` and creates a root `Span`. + :return: an already-started Span instance. + """ + ot_parent = None # 'ot_parent' is more readable than 'child_of' + ot_parent_context = None # the parent span's context + dd_parent = None # the child_of to pass to the ddtracer + + if child_of is not None: + ot_parent = child_of # 'ot_parent' is more readable than 'child_of' + elif references and isinstance(references, list): + # we currently only support child_of relations to one span + ot_parent = references[0].referenced_context + + # - whenever child_of is not None ddspans with parent-child + # relationships will share a ddcontext which maintains a hierarchy of + # ddspans for the execution flow + # - when child_of is a ddspan then the ddtracer uses this ddspan to + # create the child ddspan + # - when child_of is a ddcontext then the ddtracer uses the ddcontext to + # get_current_span() for the parent + if ot_parent is None and not ignore_active_span: + # attempt to get the parent span from the scope manager + scope = self._scope_manager.active + parent_span = getattr(scope, 'span', None) + ot_parent_context = getattr(parent_span, 'context', None) + # we want the ddcontext of the active span in order to maintain the + # ddspan hierarchy + dd_parent = getattr(ot_parent_context, '_dd_context', None) + + # if we cannot get the context then try getting it from the DD tracer + # this emulates the behaviour of tracer.trace() + if dd_parent is None: + dd_parent = self._dd_tracer.get_call_context() + elif ot_parent is not None and isinstance(ot_parent, Span): + # a span is given to use as a parent + ot_parent_context = ot_parent.context + dd_parent = ot_parent._dd_span + elif ot_parent is not None and isinstance(ot_parent, SpanContext): + # a span context is given to use to find the parent ddspan + dd_parent = ot_parent._dd_context + elif ot_parent is None: + # user wants to create a new parent span we don't have to do + # anything + pass + else: + raise TypeError('invalid span configuration given') + + # create a new otspan and ddspan using the ddtracer and associate it + # with the new otspan + ddspan = self._dd_tracer.start_span( + name=operation_name, + child_of=dd_parent, + service=self._service_name, + ) + + # set the start time if one is specified + ddspan.start = start_time or ddspan.start + + otspan = Span(self, ot_parent_context, operation_name) + # sync up the OT span with the DD span + otspan._associate_dd_span(ddspan) + + if tags is not None: + for k in tags: + # Make sure we set the tags on the otspan to ensure that the special compatibility tags + # are handled correctly (resource name, span type, sampling priority, etc). + otspan.set_tag(k, tags[k]) + + return otspan + + def inject(self, span_context, format, carrier): # noqa: A002 + """Injects a span context into a carrier. + + :param span_context: span context to inject. + :param format: format to encode the span context with. + :param carrier: the carrier of the encoded span context. + """ + propagator = self._propagators.get(format, None) + + if propagator is None: + raise opentracing.UnsupportedFormatException + + propagator.inject(span_context, carrier) + + def extract(self, format, carrier): # noqa: A002 + """Extracts a span context from a carrier. + + :param format: format that the carrier is encoded with. + :param carrier: the carrier to extract from. + """ + propagator = self._propagators.get(format, None) + + if propagator is None: + raise opentracing.UnsupportedFormatException + + # we have to manually activate the returned context from a distributed + # trace + ot_span_ctx = propagator.extract(carrier) + dd_span_ctx = ot_span_ctx._dd_context + self._dd_tracer.context_provider.activate(dd_span_ctx) + return ot_span_ctx diff --git a/ddtrace/opentracer/utils.py b/ddtrace/opentracer/utils.py new file mode 100644 index 0000000000..06953ba0cd --- /dev/null +++ b/ddtrace/opentracer/utils.py @@ -0,0 +1,22 @@ +# DEV: If `asyncio` or `gevent` are unavailable we do not throw an error, +# `context_provider` will just not be set and we'll get an `AttributeError` instead + + +def get_context_provider_for_scope_manager(scope_manager): + """Returns the context_provider to use with a given scope_manager.""" + + scope_manager_type = type(scope_manager).__name__ + + # avoid having to import scope managers which may not be compatible + # with the version of python being used + if scope_manager_type == 'AsyncioScopeManager': + import ddtrace.contrib.asyncio + dd_context_provider = ddtrace.contrib.asyncio.context_provider + elif scope_manager_type == 'GeventScopeManager': + import ddtrace.contrib.gevent + dd_context_provider = ddtrace.contrib.gevent.context_provider + else: + from ddtrace.provider import DefaultContextProvider + dd_context_provider = DefaultContextProvider() + + return dd_context_provider diff --git a/ddtrace/payload.py b/ddtrace/payload.py new file mode 100644 index 0000000000..acbede4fbc --- /dev/null +++ b/ddtrace/payload.py @@ -0,0 +1,90 @@ +from .encoding import get_encoder + + +class PayloadFull(Exception): + """The payload is full.""" + pass + + +class Payload(object): + """ + Trace agent API payload buffer class + + This class is used to encoded and store traces to build the payload we send to + the trace agent. + + DEV: We encoded and buffer traces so that we can reliable determine the size of + the payload easily so we can flush based on the payload size. + """ + __slots__ = ('traces', 'size', 'encoder', 'max_payload_size') + + # Trace agent limit payload size of 10 MB + # 5 MB should be a good average efficient size + DEFAULT_MAX_PAYLOAD_SIZE = 5 * 1000000 + + def __init__(self, encoder=None, max_payload_size=DEFAULT_MAX_PAYLOAD_SIZE): + """ + Constructor for Payload + + :param encoder: The encoded to use, default is the default encoder + :type encoder: ``ddtrace.encoding.Encoder`` + :param max_payload_size: The max number of bytes a payload should be before + being considered full (default: 5mb) + """ + self.max_payload_size = max_payload_size + self.encoder = encoder or get_encoder() + self.traces = [] + self.size = 0 + + def add_trace(self, trace): + """ + Encode and append a trace to this payload + + :param trace: A trace to append + :type trace: A list of :class:`ddtrace.span.Span` + """ + # No trace or empty trace was given, ignore + if not trace: + return + + # Encode the trace, append, and add it's length to the size + encoded = self.encoder.encode_trace(trace) + if len(encoded) + self.size > self.max_payload_size: + raise PayloadFull() + self.traces.append(encoded) + self.size += len(encoded) + + @property + def length(self): + """ + Get the number of traces in this payload + + :returns: The number of traces in the payload + :rtype: int + """ + return len(self.traces) + + @property + def empty(self): + """ + Whether this payload is empty or not + + :returns: Whether this payload is empty or not + :rtype: bool + """ + return self.length == 0 + + def get_payload(self): + """ + Get the fully encoded payload + + :returns: The fully encoded payload + :rtype: str | bytes + """ + # DEV: `self.traces` is an array of encoded traces, `join_encoded` joins them together + return self.encoder.join_encoded(self.traces) + + def __repr__(self): + """Get the string representation of this payload""" + return '{0}(length={1}, size={2} B, max_payload_size={3} B)'.format( + self.__class__.__name__, self.length, self.size, self.max_payload_size) diff --git a/ddtrace/pin.py b/ddtrace/pin.py new file mode 100644 index 0000000000..c64755b491 --- /dev/null +++ b/ddtrace/pin.py @@ -0,0 +1,184 @@ +import ddtrace + +from ddtrace.vendor import debtcollector + +from .internal.logger import get_logger +from .vendor import wrapt + + +log = get_logger(__name__) + + +# To set attributes on wrapt proxy objects use this prefix: +# http://wrapt.readthedocs.io/en/latest/wrappers.html +_DD_PIN_NAME = '_datadog_pin' +_DD_PIN_PROXY_NAME = '_self_' + _DD_PIN_NAME + + +class Pin(object): + """Pin (a.k.a Patch INfo) is a small class which is used to + set tracing metadata on a particular traced connection. + This is useful if you wanted to, say, trace two different + database clusters. + + >>> conn = sqlite.connect('/tmp/user.db') + >>> # Override a pin for a specific connection + >>> pin = Pin.override(conn, service='user-db') + >>> conn = sqlite.connect('/tmp/image.db') + """ + __slots__ = ['app', 'tags', 'tracer', '_target', '_config', '_initialized'] + + @debtcollector.removals.removed_kwarg("app_type") + def __init__(self, service, app=None, app_type=None, tags=None, tracer=None, _config=None): + tracer = tracer or ddtrace.tracer + self.app = app + self.tags = tags + self.tracer = tracer + self._target = None + # keep the configuration attribute internal because the + # public API to access it is not the Pin class + self._config = _config or {} + # [Backward compatibility]: service argument updates the `Pin` config + self._config['service_name'] = service + self._initialized = True + + @property + def service(self): + """Backward compatibility: accessing to `pin.service` returns the underlying + configuration value. + """ + return self._config['service_name'] + + def __setattr__(self, name, value): + if getattr(self, '_initialized', False) and name != '_target': + raise AttributeError("can't mutate a pin, use override() or clone() instead") + super(Pin, self).__setattr__(name, value) + + def __repr__(self): + return 'Pin(service=%s, app=%s, tags=%s, tracer=%s)' % ( + self.service, self.app, self.tags, self.tracer) + + @staticmethod + def _find(*objs): + """ + Return the first :class:`ddtrace.pin.Pin` found on any of the provided objects or `None` if none were found + + + >>> pin = Pin._find(wrapper, instance, conn, app) + + :param objs: The objects to search for a :class:`ddtrace.pin.Pin` on + :type objs: List of objects + :rtype: :class:`ddtrace.pin.Pin`, None + :returns: The first found :class:`ddtrace.pin.Pin` or `None` is none was found + """ + for obj in objs: + pin = Pin.get_from(obj) + if pin: + return pin + return None + + @staticmethod + def get_from(obj): + """Return the pin associated with the given object. If a pin is attached to + `obj` but the instance is not the owner of the pin, a new pin is cloned and + attached. This ensures that a pin inherited from a class is a copy for the new + instance, avoiding that a specific instance overrides other pins values. + + >>> pin = Pin.get_from(conn) + + :param obj: The object to look for a :class:`ddtrace.pin.Pin` on + :type obj: object + :rtype: :class:`ddtrace.pin.Pin`, None + :returns: :class:`ddtrace.pin.Pin` associated with the object, or None if none was found + """ + if hasattr(obj, '__getddpin__'): + return obj.__getddpin__() + + pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME + pin = getattr(obj, pin_name, None) + # detect if the PIN has been inherited from a class + if pin is not None and pin._target != id(obj): + pin = pin.clone() + pin.onto(obj) + return pin + + @classmethod + @debtcollector.removals.removed_kwarg("app_type") + def override(cls, obj, service=None, app=None, app_type=None, tags=None, tracer=None): + """Override an object with the given attributes. + + That's the recommended way to customize an already instrumented client, without + losing existing attributes. + + >>> conn = sqlite.connect('/tmp/user.db') + >>> # Override a pin for a specific connection + >>> Pin.override(conn, service='user-db') + """ + if not obj: + return + + pin = cls.get_from(obj) + if not pin: + pin = Pin(service) + + pin.clone( + service=service, + app=app, + tags=tags, + tracer=tracer, + ).onto(obj) + + def enabled(self): + """Return true if this pin's tracer is enabled. """ + return bool(self.tracer) and self.tracer.enabled + + def onto(self, obj, send=True): + """Patch this pin onto the given object. If send is true, it will also + queue the metadata to be sent to the server. + """ + # Actually patch it on the object. + try: + if hasattr(obj, '__setddpin__'): + return obj.__setddpin__(self) + + pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME + + # set the target reference; any get_from, clones and retarget the new PIN + self._target = id(obj) + return setattr(obj, pin_name, self) + except AttributeError: + log.debug("can't pin onto object. skipping", exc_info=True) + + def remove_from(self, obj): + # Remove pin from the object. + try: + pin_name = _DD_PIN_PROXY_NAME if isinstance(obj, wrapt.ObjectProxy) else _DD_PIN_NAME + + pin = Pin.get_from(obj) + if pin is not None: + delattr(obj, pin_name) + except AttributeError: + log.debug("can't remove pin from object. skipping", exc_info=True) + + @debtcollector.removals.removed_kwarg("app_type") + def clone(self, service=None, app=None, app_type=None, tags=None, tracer=None): + """Return a clone of the pin with the given attributes replaced.""" + # do a shallow copy of Pin dicts + if not tags and self.tags: + tags = self.tags.copy() + + # we use a copy instead of a deepcopy because we expect configurations + # to have only a root level dictionary without nested objects. Using + # deepcopy introduces a big overhead: + # + # copy: 0.00654911994934082 + # deepcopy: 0.2787208557128906 + config = self._config.copy() + + return Pin( + service=service or self.service, + app=app or self.app, + tags=tags, + tracer=tracer or self.tracer, # do not clone the Tracer + _config=config, + ) diff --git a/ddtrace/propagation/__init__.py b/ddtrace/propagation/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ddtrace/propagation/http.py b/ddtrace/propagation/http.py new file mode 100644 index 0000000000..f766915567 --- /dev/null +++ b/ddtrace/propagation/http.py @@ -0,0 +1,147 @@ +from ..context import Context +from ..internal.logger import get_logger + +from .utils import get_wsgi_header + +log = get_logger(__name__) + +# HTTP headers one should set for distributed tracing. +# These are cross-language (eg: Python, Go and other implementations should honor these) +HTTP_HEADER_TRACE_ID = 'x-datadog-trace-id' +HTTP_HEADER_PARENT_ID = 'x-datadog-parent-id' +HTTP_HEADER_SAMPLING_PRIORITY = 'x-datadog-sampling-priority' +HTTP_HEADER_ORIGIN = 'x-datadog-origin' + + +# Note that due to WSGI spec we have to also check for uppercased and prefixed +# versions of these headers +POSSIBLE_HTTP_HEADER_TRACE_IDS = frozenset( + [HTTP_HEADER_TRACE_ID, get_wsgi_header(HTTP_HEADER_TRACE_ID)] +) +POSSIBLE_HTTP_HEADER_PARENT_IDS = frozenset( + [HTTP_HEADER_PARENT_ID, get_wsgi_header(HTTP_HEADER_PARENT_ID)] +) +POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES = frozenset( + [HTTP_HEADER_SAMPLING_PRIORITY, get_wsgi_header(HTTP_HEADER_SAMPLING_PRIORITY)] +) +POSSIBLE_HTTP_HEADER_ORIGIN = frozenset( + [HTTP_HEADER_ORIGIN, get_wsgi_header(HTTP_HEADER_ORIGIN)] +) + + +class HTTPPropagator(object): + """A HTTP Propagator using HTTP headers as carrier.""" + + def inject(self, span_context, headers): + """Inject Context attributes that have to be propagated as HTTP headers. + + Here is an example using `requests`:: + + import requests + from ddtrace.propagation.http import HTTPPropagator + + def parent_call(): + with tracer.trace('parent_span') as span: + headers = {} + propagator = HTTPPropagator() + propagator.inject(span.context, headers) + url = '' + r = requests.get(url, headers=headers) + + :param Context span_context: Span context to propagate. + :param dict headers: HTTP headers to extend with tracing attributes. + """ + headers[HTTP_HEADER_TRACE_ID] = str(span_context.trace_id) + headers[HTTP_HEADER_PARENT_ID] = str(span_context.span_id) + sampling_priority = span_context.sampling_priority + # Propagate priority only if defined + if sampling_priority is not None: + headers[HTTP_HEADER_SAMPLING_PRIORITY] = str(span_context.sampling_priority) + # Propagate origin only if defined + if span_context._dd_origin is not None: + headers[HTTP_HEADER_ORIGIN] = str(span_context._dd_origin) + + @staticmethod + def extract_header_value(possible_header_names, headers, default=None): + for header, value in headers.items(): + for header_name in possible_header_names: + if header.lower() == header_name.lower(): + return value + + return default + + @staticmethod + def extract_trace_id(headers): + return int( + HTTPPropagator.extract_header_value( + POSSIBLE_HTTP_HEADER_TRACE_IDS, headers, default=0, + ) + ) + + @staticmethod + def extract_parent_span_id(headers): + return int( + HTTPPropagator.extract_header_value( + POSSIBLE_HTTP_HEADER_PARENT_IDS, headers, default=0, + ) + ) + + @staticmethod + def extract_sampling_priority(headers): + return HTTPPropagator.extract_header_value( + POSSIBLE_HTTP_HEADER_SAMPLING_PRIORITIES, headers, + ) + + @staticmethod + def extract_origin(headers): + return HTTPPropagator.extract_header_value( + POSSIBLE_HTTP_HEADER_ORIGIN, headers, + ) + + def extract(self, headers): + """Extract a Context from HTTP headers into a new Context. + + Here is an example from a web endpoint:: + + from ddtrace.propagation.http import HTTPPropagator + + def my_controller(url, headers): + propagator = HTTPPropagator() + context = propagator.extract(headers) + tracer.context_provider.activate(context) + + with tracer.trace('my_controller') as span: + span.set_meta('http.url', url) + + :param dict headers: HTTP headers to extract tracing attributes. + :return: New `Context` with propagated attributes. + """ + if not headers: + return Context() + + try: + trace_id = HTTPPropagator.extract_trace_id(headers) + parent_span_id = HTTPPropagator.extract_parent_span_id(headers) + sampling_priority = HTTPPropagator.extract_sampling_priority(headers) + origin = HTTPPropagator.extract_origin(headers) + + if sampling_priority is not None: + sampling_priority = int(sampling_priority) + + return Context( + trace_id=trace_id, + span_id=parent_span_id, + sampling_priority=sampling_priority, + _dd_origin=origin, + ) + # If headers are invalid and cannot be parsed, return a new context and log the issue. + except Exception: + log.debug( + 'invalid x-datadog-* headers, trace-id: %s, parent-id: %s, priority: %s, origin: %s', + headers.get(HTTP_HEADER_TRACE_ID, 0), + headers.get(HTTP_HEADER_PARENT_ID, 0), + headers.get(HTTP_HEADER_SAMPLING_PRIORITY), + headers.get(HTTP_HEADER_ORIGIN, ''), + exc_info=True, + ) + return Context() diff --git a/ddtrace/propagation/utils.py b/ddtrace/propagation/utils.py new file mode 100644 index 0000000000..1ce6d73df4 --- /dev/null +++ b/ddtrace/propagation/utils.py @@ -0,0 +1,6 @@ +def get_wsgi_header(header): + """Returns a WSGI compliant HTTP header. + See https://www.python.org/dev/peps/pep-3333/#environ-variables for + information from the spec. + """ + return 'HTTP_{}'.format(header.upper().replace('-', '_')) diff --git a/ddtrace/provider.py b/ddtrace/provider.py new file mode 100644 index 0000000000..1cc1f3b2e0 --- /dev/null +++ b/ddtrace/provider.py @@ -0,0 +1,64 @@ +import abc +from ddtrace.vendor import six + +from .internal.context_manager import DefaultContextManager + + +class BaseContextProvider(six.with_metaclass(abc.ABCMeta)): + """ + A ``ContextProvider`` is an interface that provides the blueprint + for a callable class, capable to retrieve the current active + ``Context`` instance. Context providers must inherit this class + and implement: + * the ``active`` method, that returns the current active ``Context`` + * the ``activate`` method, that sets the current active ``Context`` + """ + @abc.abstractmethod + def _has_active_context(self): + pass + + @abc.abstractmethod + def activate(self, context): + pass + + @abc.abstractmethod + def active(self): + pass + + def __call__(self, *args, **kwargs): + """Method available for backward-compatibility. It proxies the call to + ``self.active()`` and must not do anything more. + """ + return self.active() + + +class DefaultContextProvider(BaseContextProvider): + """ + Default context provider that retrieves all contexts from the current + thread-local storage. It is suitable for synchronous programming and + Python WSGI frameworks. + """ + def __init__(self, reset_context_manager=True): + self._local = DefaultContextManager(reset=reset_context_manager) + + def _has_active_context(self): + """ + Check whether we have a currently active context. + + :returns: Whether we have an active context + :rtype: bool + """ + return self._local._has_active_context() + + def activate(self, context): + """Makes the given ``context`` active, so that the provider calls + the thread-local storage implementation. + """ + return self._local.set(context) + + def active(self): + """Returns the current active ``Context`` for this tracer. Returned + ``Context`` must be thread-safe or thread-local for this specific + implementation. + """ + return self._local.get() diff --git a/ddtrace/sampler.py b/ddtrace/sampler.py new file mode 100644 index 0000000000..9b3980fbb5 --- /dev/null +++ b/ddtrace/sampler.py @@ -0,0 +1,367 @@ +"""Samplers manage the client-side trace sampling + +Any `sampled = False` trace won't be written, and can be ignored by the instrumentation. +""" +import abc + +from .compat import iteritems, pattern_type +from .constants import ENV_KEY +from .constants import SAMPLING_AGENT_DECISION, SAMPLING_RULE_DECISION, SAMPLING_LIMIT_DECISION +from .ext.priority import AUTO_KEEP, AUTO_REJECT +from .internal.logger import get_logger +from .internal.rate_limiter import RateLimiter +from .utils.formats import get_env +from .vendor import six + +log = get_logger(__name__) + +MAX_TRACE_ID = 2 ** 64 + +# Has to be the same factor and key as the Agent to allow chained sampling +KNUTH_FACTOR = 1111111111111111111 + + +class BaseSampler(six.with_metaclass(abc.ABCMeta)): + @abc.abstractmethod + def sample(self, span): + pass + + +class BasePrioritySampler(six.with_metaclass(abc.ABCMeta)): + @abc.abstractmethod + def update_rate_by_service_sample_rates(self, sample_rates): + pass + + +class AllSampler(BaseSampler): + """Sampler sampling all the traces""" + + def sample(self, span): + return True + + +class RateSampler(BaseSampler): + """Sampler based on a rate + + Keep (100 * `sample_rate`)% of the traces. + It samples randomly, its main purpose is to reduce the instrumentation footprint. + """ + + def __init__(self, sample_rate=1): + if sample_rate <= 0: + log.error('sample_rate is negative or null, disable the Sampler') + sample_rate = 1 + elif sample_rate > 1: + sample_rate = 1 + + self.set_sample_rate(sample_rate) + + log.debug('initialized RateSampler, sample %s%% of traces', 100 * sample_rate) + + def set_sample_rate(self, sample_rate): + self.sample_rate = float(sample_rate) + self.sampling_id_threshold = self.sample_rate * MAX_TRACE_ID + + def sample(self, span): + return ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self.sampling_id_threshold + + +class RateByServiceSampler(BaseSampler, BasePrioritySampler): + """Sampler based on a rate, by service + + Keep (100 * `sample_rate`)% of the traces. + The sample rate is kept independently for each service/env tuple. + """ + + @staticmethod + def _key(service=None, env=None): + """Compute a key with the same format used by the Datadog agent API.""" + service = service or '' + env = env or '' + return 'service:' + service + ',env:' + env + + def __init__(self, sample_rate=1): + self.sample_rate = sample_rate + self._by_service_samplers = self._get_new_by_service_sampler() + + def _get_new_by_service_sampler(self): + return { + self._default_key: RateSampler(self.sample_rate) + } + + def set_sample_rate(self, sample_rate, service='', env=''): + self._by_service_samplers[self._key(service, env)] = RateSampler(sample_rate) + + def sample(self, span): + tags = span.tracer.tags + env = tags[ENV_KEY] if ENV_KEY in tags else None + key = self._key(span.service, env) + + sampler = self._by_service_samplers.get( + key, self._by_service_samplers[self._default_key] + ) + span.set_metric(SAMPLING_AGENT_DECISION, sampler.sample_rate) + return sampler.sample(span) + + def update_rate_by_service_sample_rates(self, rate_by_service): + new_by_service_samplers = self._get_new_by_service_sampler() + for key, sample_rate in iteritems(rate_by_service): + new_by_service_samplers[key] = RateSampler(sample_rate) + + self._by_service_samplers = new_by_service_samplers + + +# Default key for service with no specific rate +RateByServiceSampler._default_key = RateByServiceSampler._key() + + +class DatadogSampler(BaseSampler, BasePrioritySampler): + """ + This sampler is currently in ALPHA and it's API may change at any time, use at your own risk. + """ + __slots__ = ('default_sampler', 'limiter', 'rules') + + NO_RATE_LIMIT = -1 + DEFAULT_RATE_LIMIT = 100 + DEFAULT_SAMPLE_RATE = None + + def __init__(self, rules=None, default_sample_rate=None, rate_limit=None): + """ + Constructor for DatadogSampler sampler + + :param rules: List of :class:`SamplingRule` rules to apply to the root span of every trace, default no rules + :type rules: :obj:`list` of :class:`SamplingRule` + :param default_sample_rate: The default sample rate to apply if no rules matched (default: ``None`` / + Use :class:`RateByServiceSampler` only) + :type default_sample_rate: float 0 <= X <= 1.0 + :param rate_limit: Global rate limit (traces per second) to apply to all traces regardless of the rules + applied to them, (default: ``100``) + :type rate_limit: :obj:`int` + """ + if default_sample_rate is None: + # If no sample rate was provided explicitly in code, try to load from environment variable + sample_rate = get_env('trace', 'sample_rate', default=self.DEFAULT_SAMPLE_RATE) + + # If no env variable was found, just use the default + if sample_rate is None: + default_sample_rate = self.DEFAULT_SAMPLE_RATE + + # Otherwise, try to convert it to a float + else: + default_sample_rate = float(sample_rate) + + if rate_limit is None: + rate_limit = int(get_env('trace', 'rate_limit', default=self.DEFAULT_RATE_LIMIT)) + + # Ensure rules is a list + if not rules: + rules = [] + + # Validate that the rules is a list of SampleRules + for rule in rules: + if not isinstance(rule, SamplingRule): + raise TypeError('Rule {!r} must be a sub-class of type ddtrace.sampler.SamplingRules'.format(rule)) + self.rules = rules + + # Configure rate limiter + self.limiter = RateLimiter(rate_limit) + + # Default to previous default behavior of RateByServiceSampler + self.default_sampler = RateByServiceSampler() + if default_sample_rate is not None: + self.default_sampler = SamplingRule(sample_rate=default_sample_rate) + + def update_rate_by_service_sample_rates(self, sample_rates): + # Pass through the call to our RateByServiceSampler + if isinstance(self.default_sampler, RateByServiceSampler): + self.default_sampler.update_rate_by_service_sample_rates(sample_rates) + + def _set_priority(self, span, priority): + if span._context: + span._context.sampling_priority = priority + span.sampled = priority is AUTO_KEEP + + def sample(self, span): + """ + Decide whether the provided span should be sampled or not + + The span provided should be the root span in the trace. + + :param span: The root span of a trace + :type span: :class:`ddtrace.span.Span` + :returns: Whether the span was sampled or not + :rtype: :obj:`bool` + """ + # If there are rules defined, then iterate through them and find one that wants to sample + matching_rule = None + # Go through all rules and grab the first one that matched + # DEV: This means rules should be ordered by the user from most specific to least specific + for rule in self.rules: + if rule.matches(span): + matching_rule = rule + break + else: + # If this is the old sampler, sample and return + if isinstance(self.default_sampler, RateByServiceSampler): + if self.default_sampler.sample(span): + self._set_priority(span, AUTO_KEEP) + return True + else: + self._set_priority(span, AUTO_REJECT) + return False + + # If no rules match, use our defualt sampler + matching_rule = self.default_sampler + + # Sample with the matching sampling rule + span.set_metric(SAMPLING_RULE_DECISION, matching_rule.sample_rate) + if not matching_rule.sample(span): + self._set_priority(span, AUTO_REJECT) + return False + else: + # Do not return here, we need to apply rate limit + self._set_priority(span, AUTO_KEEP) + + # Ensure all allowed traces adhere to the global rate limit + allowed = self.limiter.is_allowed() + # Always set the sample rate metric whether it was allowed or not + # DEV: Setting this allows us to properly compute metrics and debug the + # various sample rates that are getting applied to this span + span.set_metric(SAMPLING_LIMIT_DECISION, self.limiter.effective_rate) + if not allowed: + self._set_priority(span, AUTO_REJECT) + return False + + # We made it by all of checks, sample this trace + self._set_priority(span, AUTO_KEEP) + return True + + +class SamplingRule(BaseSampler): + """ + Definition of a sampling rule used by :class:`DatadogSampler` for applying a sample rate on a span + """ + __slots__ = ('_sample_rate', '_sampling_id_threshold', 'service', 'name') + + NO_RULE = object() + + def __init__(self, sample_rate, service=NO_RULE, name=NO_RULE): + """ + Configure a new :class:`SamplingRule` + + .. code:: python + + DatadogSampler([ + # Sample 100% of any trace + SamplingRule(sample_rate=1.0), + + # Sample no healthcheck traces + SamplingRule(sample_rate=0, name='flask.request'), + + # Sample all services ending in `-db` based on a regular expression + SamplingRule(sample_rate=0.5, service=re.compile('-db$')), + + # Sample based on service name using custom function + SamplingRule(sample_rate=0.75, service=lambda service: 'my-app' in service), + ]) + + :param sample_rate: The sample rate to apply to any matching spans + :type sample_rate: :obj:`float` greater than or equal to 0.0 and less than or equal to 1.0 + :param service: Rule to match the `span.service` on, default no rule defined + :type service: :obj:`object` to directly compare, :obj:`function` to evaluate, or :class:`re.Pattern` to match + :param name: Rule to match the `span.name` on, default no rule defined + :type name: :obj:`object` to directly compare, :obj:`function` to evaluate, or :class:`re.Pattern` to match + """ + # Enforce sample rate constraints + if not 0.0 <= sample_rate <= 1.0: + raise ValueError( + 'SamplingRule(sample_rate={!r}) must be greater than or equal to 0.0 and less than or equal to 1.0', + ) + + self.sample_rate = sample_rate + self.service = service + self.name = name + + @property + def sample_rate(self): + return self._sample_rate + + @sample_rate.setter + def sample_rate(self, sample_rate): + self._sample_rate = sample_rate + self._sampling_id_threshold = sample_rate * MAX_TRACE_ID + + def _pattern_matches(self, prop, pattern): + # If the rule is not set, then assume it matches + # DEV: Having no rule and being `None` are different things + # e.g. ignoring `span.service` vs `span.service == None` + if pattern is self.NO_RULE: + return True + + # If the pattern is callable (e.g. a function) then call it passing the prop + # The expected return value is a boolean so cast the response in case it isn't + if callable(pattern): + try: + return bool(pattern(prop)) + except Exception: + log.warning('%r pattern %r failed with %r', self, pattern, prop, exc_info=True) + # Their function failed to validate, assume it is a False + return False + + # The pattern is a regular expression and the prop is a string + if isinstance(pattern, pattern_type): + try: + return bool(pattern.match(str(prop))) + except (ValueError, TypeError): + # This is to guard us against the casting to a string (shouldn't happen, but still) + log.warning('%r pattern %r failed with %r', self, pattern, prop, exc_info=True) + return False + + # Exact match on the values + return prop == pattern + + def matches(self, span): + """ + Return if this span matches this rule + + :param span: The span to match against + :type span: :class:`ddtrace.span.Span` + :returns: Whether this span matches or not + :rtype: :obj:`bool` + """ + return all( + self._pattern_matches(prop, pattern) + for prop, pattern in [ + (span.service, self.service), + (span.name, self.name), + ] + ) + + def sample(self, span): + """ + Return if this rule chooses to sample the span + + :param span: The span to sample against + :type span: :class:`ddtrace.span.Span` + :returns: Whether this span was sampled + :rtype: :obj:`bool` + """ + if self.sample_rate == 1: + return True + elif self.sample_rate == 0: + return False + + return ((span.trace_id * KNUTH_FACTOR) % MAX_TRACE_ID) <= self._sampling_id_threshold + + def _no_rule_or_self(self, val): + return 'NO_RULE' if val is self.NO_RULE else val + + def __repr__(self): + return '{}(sample_rate={!r}, service={!r}, name={!r})'.format( + self.__class__.__name__, + self.sample_rate, + self._no_rule_or_self(self.service), + self._no_rule_or_self(self.name), + ) + + __str__ = __repr__ diff --git a/ddtrace/settings/__init__.py b/ddtrace/settings/__init__.py new file mode 100644 index 0000000000..2fe82efd32 --- /dev/null +++ b/ddtrace/settings/__init__.py @@ -0,0 +1,17 @@ +from .config import Config +from .exceptions import ConfigException +from .http import HttpConfig +from .hooks import Hooks +from .integration import IntegrationConfig + +# Default global config +config = Config() + +__all__ = [ + 'config', + 'Config', + 'ConfigException', + 'HttpConfig', + 'Hooks', + 'IntegrationConfig', +] diff --git a/ddtrace/settings/config.py b/ddtrace/settings/config.py new file mode 100644 index 0000000000..67b14aa76a --- /dev/null +++ b/ddtrace/settings/config.py @@ -0,0 +1,112 @@ +from copy import deepcopy + +from ..internal.logger import get_logger +from ..pin import Pin +from ..utils.formats import asbool +from ..utils.merge import deepmerge +from .http import HttpConfig +from .integration import IntegrationConfig +from ..utils.formats import get_env + +log = get_logger(__name__) + + +class Config(object): + """Configuration object that exposes an API to set and retrieve + global settings for each integration. All integrations must use + this instance to register their defaults, so that they're public + available and can be updated by users. + """ + def __init__(self): + # use a dict as underlying storing mechanism + self._config = {} + self._http = HttpConfig() + # Master switch for turning on and off trace search by default + # this weird invocation of get_env is meant to read the DD_ANALYTICS_ENABLED + # legacy environment variable. It should be removed in the future + legacy_config_value = get_env('analytics', 'enabled', default=False) + + self.analytics_enabled = asbool( + get_env('trace', 'analytics_enabled', default=legacy_config_value) + ) + + self.report_hostname = asbool( + get_env('trace', 'report_hostname', default=False) + ) + + self.health_metrics_enabled = asbool( + get_env('trace', 'health_metrics_enabled', default=False) + ) + + def __getattr__(self, name): + if name not in self._config: + self._config[name] = IntegrationConfig(self, name) + + return self._config[name] + + def get_from(self, obj): + """Retrieves the configuration for the given object. + Any object that has an attached `Pin` must have a configuration + and if a wrong object is given, an empty `dict` is returned + for safety reasons. + """ + pin = Pin.get_from(obj) + if pin is None: + log.debug('No configuration found for %s', obj) + return {} + + return pin._config + + def _add(self, integration, settings, merge=True): + """Internal API that registers an integration with given default + settings. + + :param str integration: The integration name (i.e. `requests`) + :param dict settings: A dictionary that contains integration settings; + to preserve immutability of these values, the dictionary is copied + since it contains integration defaults. + :param bool merge: Whether to merge any existing settings with those provided, + or if we should overwrite the settings with those provided; + Note: when merging existing settings take precedence. + """ + # DEV: Use `getattr()` to call our `__getattr__` helper + existing = getattr(self, integration) + settings = deepcopy(settings) + + if merge: + # DEV: This may appear backwards keeping `existing` as the "source" and `settings` as + # the "destination", but we do not want to let `_add(..., merge=True)` overwrite any + # existing settings + # + # >>> config.requests['split_by_domain'] = True + # >>> config._add('requests', dict(split_by_domain=False)) + # >>> config.requests['split_by_domain'] + # True + self._config[integration] = IntegrationConfig(self, integration, deepmerge(existing, settings)) + else: + self._config[integration] = IntegrationConfig(self, integration, settings) + + def trace_headers(self, whitelist): + """ + Registers a set of headers to be traced at global level or integration level. + :param whitelist: the case-insensitive list of traced headers + :type whitelist: list of str or str + :return: self + :rtype: HttpConfig + """ + self._http.trace_headers(whitelist) + return self + + def header_is_traced(self, header_name): + """ + Returns whether or not the current header should be traced. + :param header_name: the header name + :type header_name: str + :rtype: bool + """ + return self._http.header_is_traced(header_name) + + def __repr__(self): + cls = self.__class__ + integrations = ', '.join(self._config.keys()) + return '{}.{}({})'.format(cls.__module__, cls.__name__, integrations) diff --git a/ddtrace/settings/exceptions.py b/ddtrace/settings/exceptions.py new file mode 100644 index 0000000000..4fd725c7e3 --- /dev/null +++ b/ddtrace/settings/exceptions.py @@ -0,0 +1,5 @@ +class ConfigException(Exception): + """Configuration exception when an integration that is not available + is called in the `Config` object. + """ + pass diff --git a/ddtrace/settings/hooks.py b/ddtrace/settings/hooks.py new file mode 100644 index 0000000000..6713684dd3 --- /dev/null +++ b/ddtrace/settings/hooks.py @@ -0,0 +1,122 @@ +import collections +from copy import deepcopy + +from ..internal.logger import get_logger +from ..span import Span + +log = get_logger(__name__) + + +class Hooks(object): + """ + Hooks configuration object is used for registering and calling hook functions + + Example:: + + @config.falcon.hooks.on('request') + def on_request(span, request, response): + pass + """ + __slots__ = ['_hooks'] + + def __init__(self): + self._hooks = collections.defaultdict(set) + + def __deepcopy__(self, memodict=None): + hooks = Hooks() + hooks._hooks = deepcopy(self._hooks) + return hooks + + def register(self, hook, func=None): + """ + Function used to register a hook for the provided name. + + Example:: + + def on_request(span, request, response): + pass + + config.falcon.hooks.register('request', on_request) + + + If no function is provided then a decorator is returned:: + + @config.falcon.hooks.register('request') + def on_request(span, request, response): + pass + + :param hook: The name of the hook to register the function for + :type hook: str + :param func: The function to register, or ``None`` if a decorator should be returned + :type func: function, None + :returns: Either a function decorator if ``func is None``, otherwise ``None`` + :rtype: function, None + """ + # If they didn't provide a function, then return a decorator + if not func: + def wrapper(func): + self.register(hook, func) + return func + return wrapper + self._hooks[hook].add(func) + + # Provide shorthand `on` method for `register` + # >>> @config.falcon.hooks.on('request') + # def on_request(span, request, response): + # pass + on = register + + def deregister(self, func): + """ + Function to deregister a function from all hooks it was registered under + + Example:: + + @config.falcon.hooks.on('request') + def on_request(span, request, response): + pass + + config.falcon.hooks.deregister(on_request) + + + :param func: Function hook to register + :type func: function + """ + for funcs in self._hooks.values(): + if func in funcs: + funcs.remove(func) + + def _emit(self, hook, span, *args, **kwargs): + """ + Function used to call registered hook functions. + + :param hook: The hook to call functions for + :type hook: str + :param span: The span to call the hook with + :type span: :class:`ddtrace.span.Span` + :param args: Positional arguments to pass to the hook functions + :type args: list + :param kwargs: Keyword arguments to pass to the hook functions + :type kwargs: dict + """ + # Return early if no hooks are registered + if hook not in self._hooks: + return + + # Return early if we don't have a Span + if not isinstance(span, Span): + return + + # Call registered hooks + for func in self._hooks[hook]: + try: + func(span, *args, **kwargs) + except Exception: + # DEV: Use log.debug instead of log.error until we have a throttled logger + log.debug('Failed to run hook %s function %s', hook, func, exc_info=True) + + def __repr__(self): + """Return string representation of this class instance""" + cls = self.__class__ + hooks = ','.join(self._hooks.keys()) + return '{}.{}({})'.format(cls.__module__, cls.__name__, hooks) diff --git a/ddtrace/settings/http.py b/ddtrace/settings/http.py new file mode 100644 index 0000000000..ccce2c3739 --- /dev/null +++ b/ddtrace/settings/http.py @@ -0,0 +1,54 @@ +from ..internal.logger import get_logger +from ..utils.http import normalize_header_name + +log = get_logger(__name__) + + +class HttpConfig(object): + """ + Configuration object that expose an API to set and retrieve both global and integration specific settings + related to the http context. + """ + + def __init__(self): + self._whitelist_headers = set() + self.trace_query_string = None + + @property + def is_header_tracing_configured(self): + return len(self._whitelist_headers) > 0 + + def trace_headers(self, whitelist): + """ + Registers a set of headers to be traced at global level or integration level. + :param whitelist: the case-insensitive list of traced headers + :type whitelist: list of str or str + :return: self + :rtype: HttpConfig + """ + if not whitelist: + return + + whitelist = [whitelist] if isinstance(whitelist, str) else whitelist + for whitelist_entry in whitelist: + normalized_header_name = normalize_header_name(whitelist_entry) + if not normalized_header_name: + continue + self._whitelist_headers.add(normalized_header_name) + + return self + + def header_is_traced(self, header_name): + """ + Returns whether or not the current header should be traced. + :param header_name: the header name + :type header_name: str + :rtype: bool + """ + normalized_header_name = normalize_header_name(header_name) + log.debug('Checking header \'%s\' tracing in whitelist %s', normalized_header_name, self._whitelist_headers) + return normalized_header_name in self._whitelist_headers + + def __repr__(self): + return '<{} traced_headers={} trace_query_string={}>'.format( + self.__class__.__name__, self._whitelist_headers, self.trace_query_string) diff --git a/ddtrace/settings/integration.py b/ddtrace/settings/integration.py new file mode 100644 index 0000000000..3d8b2288e8 --- /dev/null +++ b/ddtrace/settings/integration.py @@ -0,0 +1,103 @@ +from copy import deepcopy + +from ..utils.attrdict import AttrDict +from ..utils.formats import asbool, get_env +from .http import HttpConfig +from .hooks import Hooks + + +class IntegrationConfig(AttrDict): + """ + Integration specific configuration object. + + This is what you will get when you do:: + + from ddtrace import config + + # This is an `IntegrationConfig` + config.flask + + # `IntegrationConfig` supports both attribute and item accessors + config.flask['service_name'] = 'my-service-name' + config.flask.service_name = 'my-service-name' + """ + def __init__(self, global_config, name, *args, **kwargs): + """ + :param global_config: + :type global_config: Config + :param args: + :param kwargs: + """ + super(IntegrationConfig, self).__init__(*args, **kwargs) + + # Set internal properties for this `IntegrationConfig` + # DEV: By-pass the `__setattr__` overrides from `AttrDict` to set real properties + object.__setattr__(self, 'global_config', global_config) + object.__setattr__(self, 'integration_name', name) + object.__setattr__(self, 'hooks', Hooks()) + object.__setattr__(self, 'http', HttpConfig()) + + # Set default analytics configuration, default is disabled + # DEV: Default to `None` which means do not set this key + # Inject environment variables for integration + analytics_enabled_env = get_env(name, 'analytics_enabled') + if analytics_enabled_env is not None: + analytics_enabled_env = asbool(analytics_enabled_env) + self.setdefault('analytics_enabled', analytics_enabled_env) + self.setdefault('analytics_sample_rate', float(get_env(name, 'analytics_sample_rate', 1.0))) + + def __deepcopy__(self, memodict=None): + new = IntegrationConfig(self.global_config, deepcopy(dict(self))) + new.hooks = deepcopy(self.hooks) + new.http = deepcopy(self.http) + return new + + @property + def trace_query_string(self): + if self.http.trace_query_string is not None: + return self.http.trace_query_string + return self.global_config._http.trace_query_string + + def header_is_traced(self, header_name): + """ + Returns whether or not the current header should be traced. + :param header_name: the header name + :type header_name: str + :rtype: bool + """ + return ( + self.http.header_is_traced(header_name) + if self.http.is_header_tracing_configured + else self.global_config.header_is_traced(header_name) + ) + + def _is_analytics_enabled(self, use_global_config): + # DEV: analytics flag can be None which should not be taken as + # enabled when global flag is disabled + if use_global_config and self.global_config.analytics_enabled: + return self.analytics_enabled is not False + else: + return self.analytics_enabled is True + + def get_analytics_sample_rate(self, use_global_config=False): + """ + Returns analytics sample rate but only when integration-specific + analytics configuration is enabled with optional override with global + configuration + """ + if self._is_analytics_enabled(use_global_config): + analytics_sample_rate = getattr(self, 'analytics_sample_rate', None) + # return True if attribute is None or attribute not found + if analytics_sample_rate is None: + return True + # otherwise return rate + return analytics_sample_rate + + # Use `None` as a way to say that it was not defined, + # `False` would mean `0` which is a different thing + return None + + def __repr__(self): + cls = self.__class__ + keys = ', '.join(self.keys()) + return '{}.{}({})'.format(cls.__module__, cls.__name__, keys) diff --git a/ddtrace/span.py b/ddtrace/span.py new file mode 100644 index 0000000000..9c9c2e1fb1 --- /dev/null +++ b/ddtrace/span.py @@ -0,0 +1,394 @@ +import math +import random +import sys +import traceback + +from .compat import StringIO, stringify, iteritems, numeric_types, time_ns, is_integer +from .constants import NUMERIC_TAGS, MANUAL_DROP_KEY, MANUAL_KEEP_KEY +from .ext import SpanTypes, errors, priority, net, http +from .internal.logger import get_logger + + +log = get_logger(__name__) + + +if sys.version_info.major < 3: + _getrandbits = random.SystemRandom().getrandbits +else: + _getrandbits = random.getrandbits + + +class Span(object): + + __slots__ = [ + # Public span attributes + 'service', + 'name', + 'resource', + 'span_id', + 'trace_id', + 'parent_id', + 'meta', + 'error', + 'metrics', + 'span_type', + 'start_ns', + 'duration_ns', + 'tracer', + # Sampler attributes + 'sampled', + # Internal attributes + '_context', + 'finished', + '_parent', + '__weakref__', + ] + + def __init__( + self, + tracer, + name, + + service=None, + resource=None, + span_type=None, + trace_id=None, + span_id=None, + parent_id=None, + start=None, + context=None, + ): + """ + Create a new span. Call `finish` once the traced operation is over. + + :param ddtrace.Tracer tracer: the tracer that will submit this span when + finished. + :param str name: the name of the traced operation. + + :param str service: the service name + :param str resource: the resource name + :param str span_type: the span type + + :param int trace_id: the id of this trace's root span. + :param int parent_id: the id of this span's direct parent span. + :param int span_id: the id of this span. + + :param int start: the start time of request as a unix epoch in seconds + :param object context: the Context of the span. + """ + # required span info + self.name = name + self.service = service + self.resource = resource or name + self.span_type = span_type.value if isinstance(span_type, SpanTypes) else span_type + + # tags / metatdata + self.meta = {} + self.error = 0 + self.metrics = {} + + # timing + self.start_ns = time_ns() if start is None else int(start * 1e9) + self.duration_ns = None + + # tracing + self.trace_id = trace_id or _new_id() + self.span_id = span_id or _new_id() + self.parent_id = parent_id + self.tracer = tracer + + # sampling + self.sampled = True + + self._context = context + self._parent = None + + # state + self.finished = False + + @property + def start(self): + """The start timestamp in Unix epoch seconds.""" + return self.start_ns / 1e9 + + @start.setter + def start(self, value): + self.start_ns = int(value * 1e9) + + @property + def duration(self): + """The span duration in seconds.""" + if self.duration_ns is not None: + return self.duration_ns / 1e9 + + @duration.setter + def duration(self, value): + self.duration_ns = value * 1e9 + + def finish(self, finish_time=None): + """Mark the end time of the span and submit it to the tracer. + If the span has already been finished don't do anything + + :param int finish_time: The end time of the span in seconds. + Defaults to now. + """ + if self.finished: + return + self.finished = True + + if self.duration_ns is None: + ft = time_ns() if finish_time is None else int(finish_time * 1e9) + # be defensive so we don't die if start isn't set + self.duration_ns = ft - (self.start_ns or ft) + + if self._context: + try: + self._context.close_span(self) + except Exception: + log.exception('error recording finished trace') + else: + # if a tracer is available to process the current context + if self.tracer: + try: + self.tracer.record(self._context) + except Exception: + log.exception('error recording finished trace') + + def set_tag(self, key, value=None): + """ Set the given key / value tag pair on the span. Keys and values + must be strings (or stringable). If a casting error occurs, it will + be ignored. + """ + # Special case, force `http.status_code` as a string + # DEV: `http.status_code` *has* to be in `meta` for metrics + # calculated in the trace agent + if key == http.STATUS_CODE: + value = str(value) + + # Determine once up front + is_an_int = is_integer(value) + + # Explicitly try to convert expected integers to `int` + # DEV: Some integrations parse these values from strings, but don't call `int(value)` themselves + INT_TYPES = (net.TARGET_PORT, ) + if key in INT_TYPES and not is_an_int: + try: + value = int(value) + is_an_int = True + except (ValueError, TypeError): + pass + + # Set integers that are less than equal to 2^53 as metrics + if is_an_int and abs(value) <= 2 ** 53: + self.set_metric(key, value) + return + + # All floats should be set as a metric + elif isinstance(value, float): + self.set_metric(key, value) + return + + # Key should explicitly be converted to a float if needed + elif key in NUMERIC_TAGS: + try: + # DEV: `set_metric` will try to cast to `float()` for us + self.set_metric(key, value) + except (TypeError, ValueError): + log.debug('error setting numeric metric %s:%s', key, value) + + return + + elif key == MANUAL_KEEP_KEY: + self.context.sampling_priority = priority.USER_KEEP + return + elif key == MANUAL_DROP_KEY: + self.context.sampling_priority = priority.USER_REJECT + return + + try: + self.meta[key] = stringify(value) + if key in self.metrics: + del self.metrics[key] + except Exception: + log.debug('error setting tag %s, ignoring it', key, exc_info=True) + + def _remove_tag(self, key): + if key in self.meta: + del self.meta[key] + + def get_tag(self, key): + """ Return the given tag or None if it doesn't exist. + """ + return self.meta.get(key, None) + + def set_tags(self, tags): + """ Set a dictionary of tags on the given span. Keys and values + must be strings (or stringable) + """ + if tags: + for k, v in iter(tags.items()): + self.set_tag(k, v) + + def set_meta(self, k, v): + self.set_tag(k, v) + + def set_metas(self, kvs): + self.set_tags(kvs) + + def set_metric(self, key, value): + # This method sets a numeric tag value for the given key. It acts + # like `set_meta()` and it simply add a tag without further processing. + + # FIXME[matt] we could push this check to serialization time as well. + # only permit types that are commonly serializable (don't use + # isinstance so that we convert unserializable types like numpy + # numbers) + if type(value) not in numeric_types: + try: + value = float(value) + except (ValueError, TypeError): + log.debug('ignoring not number metric %s:%s', key, value) + return + + # don't allow nan or inf + if math.isnan(value) or math.isinf(value): + log.debug('ignoring not real metric %s:%s', key, value) + return + + if key in self.meta: + del self.meta[key] + self.metrics[key] = value + + def set_metrics(self, metrics): + if metrics: + for k, v in iteritems(metrics): + self.set_metric(k, v) + + def get_metric(self, key): + return self.metrics.get(key) + + def to_dict(self): + d = { + 'trace_id': self.trace_id, + 'parent_id': self.parent_id, + 'span_id': self.span_id, + 'service': self.service, + 'resource': self.resource, + 'name': self.name, + 'error': self.error, + } + + # a common mistake is to set the error field to a boolean instead of an + # int. let's special case that here, because it's sure to happen in + # customer code. + err = d.get('error') + if err and type(err) == bool: + d['error'] = 1 + + if self.start_ns: + d['start'] = self.start_ns + + if self.duration_ns: + d['duration'] = self.duration_ns + + if self.meta: + d['meta'] = self.meta + + if self.metrics: + d['metrics'] = self.metrics + + if self.span_type: + d['type'] = self.span_type + + return d + + def set_traceback(self, limit=20): + """ If the current stack has an exception, tag the span with the + relevant error info. If not, set the span to the current python stack. + """ + (exc_type, exc_val, exc_tb) = sys.exc_info() + + if (exc_type and exc_val and exc_tb): + self.set_exc_info(exc_type, exc_val, exc_tb) + else: + tb = ''.join(traceback.format_stack(limit=limit + 1)[:-1]) + self.set_tag(errors.ERROR_STACK, tb) # FIXME[gabin] Want to replace 'error.stack' tag with 'python.stack' + + def set_exc_info(self, exc_type, exc_val, exc_tb): + """ Tag the span with an error tuple as from `sys.exc_info()`. """ + if not (exc_type and exc_val and exc_tb): + return # nothing to do + + self.error = 1 + + # get the traceback + buff = StringIO() + traceback.print_exception(exc_type, exc_val, exc_tb, file=buff, limit=20) + tb = buff.getvalue() + + # readable version of type (e.g. exceptions.ZeroDivisionError) + exc_type_str = '%s.%s' % (exc_type.__module__, exc_type.__name__) + + self.set_tag(errors.ERROR_MSG, exc_val) + self.set_tag(errors.ERROR_TYPE, exc_type_str) + self.set_tag(errors.ERROR_STACK, tb) + + def _remove_exc_info(self): + """ Remove all exception related information from the span. """ + self.error = 0 + self._remove_tag(errors.ERROR_MSG) + self._remove_tag(errors.ERROR_TYPE) + self._remove_tag(errors.ERROR_STACK) + + def pprint(self): + """ Return a human readable version of the span. """ + lines = [ + ('name', self.name), + ('id', self.span_id), + ('trace_id', self.trace_id), + ('parent_id', self.parent_id), + ('service', self.service), + ('resource', self.resource), + ('type', self.span_type), + ('start', self.start), + ('end', '' if not self.duration else self.start + self.duration), + ('duration', '%fs' % (self.duration or 0)), + ('error', self.error), + ('tags', '') + ] + + lines.extend((' ', '%s:%s' % kv) for kv in sorted(self.meta.items())) + return '\n'.join('%10s %s' % l for l in lines) + + @property + def context(self): + """ + Property that provides access to the ``Context`` associated with this ``Span``. + The ``Context`` contains state that propagates from span to span in a + larger trace. + """ + return self._context + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + try: + if exc_type: + self.set_exc_info(exc_type, exc_val, exc_tb) + self.finish() + except Exception: + log.exception('error closing trace') + + def __repr__(self): + return '' % ( + self.span_id, + self.trace_id, + self.parent_id, + self.name, + ) + + +def _new_id(): + """Generate a random trace_id or span_id""" + return _getrandbits(64) diff --git a/ddtrace/tracer.py b/ddtrace/tracer.py new file mode 100644 index 0000000000..112eafe92d --- /dev/null +++ b/ddtrace/tracer.py @@ -0,0 +1,638 @@ +import functools +import logging +from os import environ, getpid + +from ddtrace.vendor import debtcollector + +from .constants import FILTERS_KEY, SAMPLE_RATE_METRIC_KEY +from .ext import system +from .ext.priority import AUTO_REJECT, AUTO_KEEP +from .internal.logger import get_logger +from .internal.runtime import RuntimeTags, RuntimeWorker +from .internal.writer import AgentWriter +from .provider import DefaultContextProvider +from .context import Context +from .sampler import DatadogSampler, RateSampler, RateByServiceSampler +from .span import Span +from .utils.formats import get_env +from .utils.deprecation import deprecated, RemovedInDDTrace10Warning +from .vendor.dogstatsd import DogStatsd +from . import compat + + +log = get_logger(__name__) + + +def _parse_dogstatsd_url(url): + if url is None: + return + + # url can be either of the form `udp://:` or `unix://` + # also support without url scheme included + if url.startswith('/'): + url = 'unix://' + url + elif '://' not in url: + url = 'udp://' + url + + parsed = compat.parse.urlparse(url) + + if parsed.scheme == 'unix': + return dict(socket_path=parsed.path) + elif parsed.scheme == 'udp': + return dict(host=parsed.hostname, port=parsed.port) + else: + raise ValueError('Unknown scheme `%s` for DogStatsD URL `{}`'.format(parsed.scheme)) + + +_INTERNAL_APPLICATION_SPAN_TYPES = [ + "custom", + "template", + "web", + "worker" +] + + +class Tracer(object): + """ + Tracer is used to create, sample and submit spans that measure the + execution time of sections of code. + + If you're running an application that will serve a single trace per thread, + you can use the global tracer instance:: + + from ddtrace import tracer + trace = tracer.trace('app.request', 'web-server').finish() + """ + _RUNTIME_METRICS_INTERVAL = 10 + + DEFAULT_HOSTNAME = environ.get('DD_AGENT_HOST', environ.get('DATADOG_TRACE_AGENT_HOSTNAME', 'localhost')) + DEFAULT_PORT = int(environ.get('DD_TRACE_AGENT_PORT', 8126)) + DEFAULT_DOGSTATSD_PORT = int(get_env('dogstatsd', 'port', 8125)) + DEFAULT_DOGSTATSD_URL = get_env('dogstatsd', 'url', 'udp://{}:{}'.format(DEFAULT_HOSTNAME, DEFAULT_DOGSTATSD_PORT)) + DEFAULT_AGENT_URL = environ.get('DD_TRACE_AGENT_URL', 'http://%s:%d' % (DEFAULT_HOSTNAME, DEFAULT_PORT)) + + def __init__(self, url=DEFAULT_AGENT_URL, dogstatsd_url=DEFAULT_DOGSTATSD_URL): + """ + Create a new ``Tracer`` instance. A global tracer is already initialized + for common usage, so there is no need to initialize your own ``Tracer``. + + :param url: The Datadog agent URL. + :param url: The DogStatsD URL. + """ + self.log = log + self.sampler = None + self.priority_sampler = None + self._runtime_worker = None + + uds_path = None + https = None + hostname = self.DEFAULT_HOSTNAME + port = self.DEFAULT_PORT + if url is not None: + url_parsed = compat.parse.urlparse(url) + if url_parsed.scheme in ('http', 'https'): + hostname = url_parsed.hostname + port = url_parsed.port + https = url_parsed.scheme == 'https' + # FIXME This is needed because of the way of configure() works right now, where it considers `port=None` + # to be "no port set so let's use the default". + # It should go away when we remove configure() + if port is None: + if https: + port = 443 + else: + port = 80 + elif url_parsed.scheme == 'unix': + uds_path = url_parsed.path + else: + raise ValueError('Unknown scheme `%s` for agent URL' % url_parsed.scheme) + + # Apply the default configuration + self.configure( + enabled=True, + hostname=hostname, + port=port, + https=https, + uds_path=uds_path, + sampler=DatadogSampler(), + context_provider=DefaultContextProvider(), + dogstatsd_url=dogstatsd_url, + ) + + # globally set tags + self.tags = {} + + # a buffer for service info so we don't perpetually send the same things + self._services = set() + + # Runtime id used for associating data collected during runtime to + # traces + self._pid = getpid() + + @property + def debug_logging(self): + return self.log.isEnabledFor(logging.DEBUG) + + @debug_logging.setter + @deprecated(message='Use logging.setLevel instead', version='1.0.0') + def debug_logging(self, value): + self.log.setLevel(logging.DEBUG if value else logging.WARN) + + @deprecated('Use .tracer, not .tracer()', '1.0.0') + def __call__(self): + return self + + def global_excepthook(self, tp, value, traceback): + """The global tracer except hook.""" + self._dogstatsd_client.increment('datadog.tracer.uncaught_exceptions', 1, + tags=['class:%s' % tp.__name__]) + + def get_call_context(self, *args, **kwargs): + """ + Return the current active ``Context`` for this traced execution. This method is + automatically called in the ``tracer.trace()``, but it can be used in the application + code during manual instrumentation like:: + + from ddtrace import tracer + + async def web_handler(request): + context = tracer.get_call_context() + # use the context if needed + # ... + + This method makes use of a ``ContextProvider`` that is automatically set during the tracer + initialization, or while using a library instrumentation. + """ + return self._context_provider.active(*args, **kwargs) + + @property + def context_provider(self): + """Returns the current Tracer Context Provider""" + return self._context_provider + + # TODO: deprecate this method and make sure users create a new tracer if they need different parameters + @debtcollector.removals.removed_kwarg("dogstatsd_host", "Use `dogstatsd_url` instead", + category=RemovedInDDTrace10Warning) + @debtcollector.removals.removed_kwarg("dogstatsd_port", "Use `dogstatsd_url` instead", + category=RemovedInDDTrace10Warning) + def configure(self, enabled=None, hostname=None, port=None, uds_path=None, https=None, + sampler=None, context_provider=None, wrap_executor=None, priority_sampling=None, + settings=None, collect_metrics=None, dogstatsd_host=None, dogstatsd_port=None, + dogstatsd_url=None): + """ + Configure an existing Tracer the easy way. + Allow to configure or reconfigure a Tracer instance. + + :param bool enabled: If True, finished traces will be submitted to the API. + Otherwise they'll be dropped. + :param str hostname: Hostname running the Trace Agent + :param int port: Port of the Trace Agent + :param str uds_path: The Unix Domain Socket path of the agent. + :param bool https: Whether to use HTTPS or HTTP. + :param object sampler: A custom Sampler instance, locally deciding to totally drop the trace or not. + :param object context_provider: The ``ContextProvider`` that will be used to retrieve + automatically the current call context. This is an advanced option that usually + doesn't need to be changed from the default value + :param object wrap_executor: callable that is used when a function is decorated with + ``Tracer.wrap()``. This is an advanced option that usually doesn't need to be changed + from the default value + :param priority_sampling: enable priority sampling, this is required for + complete distributed tracing support. Enabled by default. + :param collect_metrics: Whether to enable runtime metrics collection. + :param str dogstatsd_host: Host for UDP connection to DogStatsD (deprecated: use dogstatsd_url) + :param int dogstatsd_port: Port for UDP connection to DogStatsD (deprecated: use dogstatsd_url) + :param str dogstatsd_url: URL for UDP or Unix socket connection to DogStatsD + """ + if enabled is not None: + self.enabled = enabled + + filters = None + if settings is not None: + filters = settings.get(FILTERS_KEY) + + # If priority sampling is not set or is True and no priority sampler is set yet + if priority_sampling in (None, True) and not self.priority_sampler: + self.priority_sampler = RateByServiceSampler() + # Explicitly disable priority sampling + elif priority_sampling is False: + self.priority_sampler = None + + if sampler is not None: + self.sampler = sampler + + if dogstatsd_host is not None and dogstatsd_url is None: + dogstatsd_url = 'udp://{}:{}'.format(dogstatsd_host, dogstatsd_port or self.DEFAULT_DOGSTATSD_PORT) + + if dogstatsd_url is not None: + dogstatsd_kwargs = _parse_dogstatsd_url(dogstatsd_url) + self.log.debug('Connecting to DogStatsd(%s)', dogstatsd_url) + self._dogstatsd_client = DogStatsd(**dogstatsd_kwargs) + + if hostname is not None or port is not None or uds_path is not None or https is not None or \ + filters is not None or priority_sampling is not None or sampler is not None: + # Preserve hostname and port when overriding filters or priority sampling + # This is clumsy and a good reason to get rid of this configure() API + if hasattr(self, 'writer') and hasattr(self.writer, 'api'): + default_hostname = self.writer.api.hostname + default_port = self.writer.api.port + if https is None: + https = self.writer.api.https + else: + default_hostname = self.DEFAULT_HOSTNAME + default_port = self.DEFAULT_PORT + + self.writer = AgentWriter( + hostname or default_hostname, + port or default_port, + uds_path=uds_path, + https=https, + filters=filters, + sampler=self.sampler, + priority_sampler=self.priority_sampler, + dogstatsd=self._dogstatsd_client, + ) + + # HACK: since we recreated our dogstatsd agent, replace the old write one + self.writer.dogstatsd = self._dogstatsd_client + + if context_provider is not None: + self._context_provider = context_provider + + if wrap_executor is not None: + self._wrap_executor = wrap_executor + + # Since we've recreated our dogstatsd agent, we need to restart metric collection with that new agent + if self._runtime_worker: + runtime_metrics_was_running = True + self._runtime_worker.stop() + self._runtime_worker.join() + self._runtime_worker = None + else: + runtime_metrics_was_running = False + + if (collect_metrics is None and runtime_metrics_was_running) or collect_metrics: + self._start_runtime_worker() + + def start_span(self, name, child_of=None, service=None, resource=None, span_type=None): + """ + Return a span that will trace an operation called `name`. This method allows + parenting using the ``child_of`` kwarg. If it's missing, the newly created span is a + root span. + + :param str name: the name of the operation being traced. + :param object child_of: a ``Span`` or a ``Context`` instance representing the parent for this span. + :param str service: the name of the service being traced. + :param str resource: an optional name of the resource being tracked. + :param str span_type: an optional operation type. + + To start a new root span, simply:: + + span = tracer.start_span('web.request') + + If you want to create a child for a root span, just:: + + root_span = tracer.start_span('web.request') + span = tracer.start_span('web.decoder', child_of=root_span) + + Or if you have a ``Context`` object:: + + context = tracer.get_call_context() + span = tracer.start_span('web.worker', child_of=context) + """ + if child_of is not None: + # retrieve if the span is a child_of a Span or a of Context + child_of_context = isinstance(child_of, Context) + context = child_of if child_of_context else child_of.context + parent = child_of.get_current_span() if child_of_context else child_of + else: + context = Context() + parent = None + + if parent: + trace_id = parent.trace_id + parent_span_id = parent.span_id + else: + trace_id = context.trace_id + parent_span_id = context.span_id + + if trace_id: + # child_of a non-empty context, so either a local child span or from a remote context + + # when not provided, inherit from parent's service + if parent: + service = service or parent.service + + span = Span( + self, + name, + trace_id=trace_id, + parent_id=parent_span_id, + service=service, + resource=resource, + span_type=span_type, + ) + + # Extra attributes when from a local parent + if parent: + span.sampled = parent.sampled + span._parent = parent + + else: + # this is the root span of a new trace + span = Span( + self, + name, + service=service, + resource=resource, + span_type=span_type, + ) + + span.sampled = self.sampler.sample(span) + # Old behavior + # DEV: The new sampler sets metrics and priority sampling on the span for us + if not isinstance(self.sampler, DatadogSampler): + if span.sampled: + # When doing client sampling in the client, keep the sample rate so that we can + # scale up statistics in the next steps of the pipeline. + if isinstance(self.sampler, RateSampler): + span.set_metric(SAMPLE_RATE_METRIC_KEY, self.sampler.sample_rate) + + if self.priority_sampler: + # At this stage, it's important to have the service set. If unset, + # priority sampler will use the default sampling rate, which might + # lead to oversampling (that is, dropping too many traces). + if self.priority_sampler.sample(span): + context.sampling_priority = AUTO_KEEP + else: + context.sampling_priority = AUTO_REJECT + else: + if self.priority_sampler: + # If dropped by the local sampler, distributed instrumentation can drop it too. + context.sampling_priority = AUTO_REJECT + else: + context.sampling_priority = AUTO_KEEP if span.sampled else AUTO_REJECT + # We must always mark the span as sampled so it is forwarded to the agent + span.sampled = True + + # add tags to root span to correlate trace with runtime metrics + # only applied to spans with types that are internal to applications + if self._runtime_worker and span.span_type in _INTERNAL_APPLICATION_SPAN_TYPES: + span.set_tag('language', 'python') + + # add common tags + if self.tags: + span.set_tags(self.tags) + if not span._parent: + span.set_tag(system.PID, getpid()) + + # add it to the current context + context.add_span(span) + + # check for new process if runtime metrics worker has already been started + self._check_new_process() + + # update set of services handled by tracer + if service and service not in self._services: + self._services.add(service) + + # The constant tags for the dogstatsd client needs to updated with any new + # service(s) that may have been added. + self._update_dogstatsd_constant_tags() + + return span + + def _update_dogstatsd_constant_tags(self): + """ Prepare runtime tags for ddstatsd. + """ + # DEV: ddstatsd expects tags in the form ['key1:value1', 'key2:value2', ...] + tags = [ + '{}:{}'.format(k, v) + for k, v in RuntimeTags() + ] + self.log.debug('Updating constant tags %s', tags) + self._dogstatsd_client.constant_tags = tags + + def _start_runtime_worker(self): + self._runtime_worker = RuntimeWorker(self._dogstatsd_client, self._RUNTIME_METRICS_INTERVAL) + self._runtime_worker.start() + + def _check_new_process(self): + """ Checks if the tracer is in a new process (was forked) and performs + the necessary updates if it is a new process + """ + pid = getpid() + if self._pid == pid: + return + + self._pid = pid + + # Assume that the services of the child are not necessarily a subset of those + # of the parent. + self._services = set() + + if self._runtime_worker is not None: + self._start_runtime_worker() + + # force an immediate update constant tags since we have reset services + # and generated a new runtime id + self._update_dogstatsd_constant_tags() + + # Re-create the background writer thread + self.writer = self.writer.recreate() + + def trace(self, name, service=None, resource=None, span_type=None): + """ + Return a span that will trace an operation called `name`. The context that created + the span as well as the span parenting, are automatically handled by the tracing + function. + + :param str name: the name of the operation being traced + :param str service: the name of the service being traced. If not set, + it will inherit the service from its parent. + :param str resource: an optional name of the resource being tracked. + :param str span_type: an optional operation type. + + You must call `finish` on all spans, either directly or with a context + manager:: + + >>> span = tracer.trace('web.request') + try: + # do something + finally: + span.finish() + + >>> with tracer.trace('web.request') as span: + # do something + + Trace will store the current active span and subsequent child traces will + become its children:: + + parent = tracer.trace('parent') # has no parent span + child = tracer.trace('child') # is a child of a parent + child.finish() + parent.finish() + + parent2 = tracer.trace('parent2') # has no parent span + parent2.finish() + """ + # retrieve the Context using the context provider and create + # a new Span that could be a root or a nested span + context = self.get_call_context() + return self.start_span( + name, + child_of=context, + service=service, + resource=resource, + span_type=span_type, + ) + + def current_root_span(self): + """Returns the root span of the current context. + + This is useful for attaching information related to the trace as a + whole without needing to add to child spans. + + Usage is simple, for example:: + + # get the root span + root_span = tracer.current_root_span() + # set the host just once on the root span + if root_span: + root_span.set_tag('host', '127.0.0.1') + """ + ctx = self.get_call_context() + if ctx: + return ctx.get_current_root_span() + return None + + def current_span(self): + """ + Return the active span for the current call context or ``None`` + if no spans are available. + """ + ctx = self.get_call_context() + if ctx: + return ctx.get_current_span() + return None + + def record(self, context): + """ + Record the given ``Context`` if it's finished. + """ + # extract and enqueue the trace if it's sampled + trace, sampled = context.get() + if trace and sampled: + self.write(trace) + + def write(self, spans): + """ + Send the trace to the writer to enqueue the spans list in the agent + sending queue. + """ + if not spans: + return # nothing to do + + if self.log.isEnabledFor(logging.DEBUG): + self.log.debug('writing %s spans (enabled:%s)', len(spans), self.enabled) + for span in spans: + self.log.debug('\n%s', span.pprint()) + + if self.enabled and self.writer: + # only submit the spans if we're actually enabled (and don't crash :) + self.writer.write(spans=spans) + + @deprecated(message='Manually setting service info is no longer necessary', version='1.0.0') + def set_service_info(self, *args, **kwargs): + """Set the information about the given service. + """ + return + + def wrap(self, name=None, service=None, resource=None, span_type=None): + """ + A decorator used to trace an entire function. If the traced function + is a coroutine, it traces the coroutine execution when is awaited. + If a ``wrap_executor`` callable has been provided in the ``Tracer.configure()`` + method, it will be called instead of the default one when the function + decorator is invoked. + + :param str name: the name of the operation being traced. If not set, + defaults to the fully qualified function name. + :param str service: the name of the service being traced. If not set, + it will inherit the service from it's parent. + :param str resource: an optional name of the resource being tracked. + :param str span_type: an optional operation type. + + >>> @tracer.wrap('my.wrapped.function', service='my.service') + def run(): + return 'run' + + >>> # name will default to 'execute' if unset + @tracer.wrap() + def execute(): + return 'executed' + + >>> # or use it in asyncio coroutines + @tracer.wrap() + async def coroutine(): + return 'executed' + + >>> @tracer.wrap() + @asyncio.coroutine + def coroutine(): + return 'executed' + + You can access the current span using `tracer.current_span()` to set + tags: + + >>> @tracer.wrap() + def execute(): + span = tracer.current_span() + span.set_tag('a', 'b') + """ + def wrap_decorator(f): + # FIXME[matt] include the class name for methods. + span_name = name if name else '%s.%s' % (f.__module__, f.__name__) + + # detect if the the given function is a coroutine to use the + # right decorator; this initial check ensures that the + # evaluation is done only once for each @tracer.wrap + if compat.iscoroutinefunction(f): + # call the async factory that creates a tracing decorator capable + # to await the coroutine execution before finishing the span. This + # code is used for compatibility reasons to prevent Syntax errors + # in Python 2 + func_wrapper = compat.make_async_decorator( + self, f, span_name, + service=service, + resource=resource, + span_type=span_type, + ) + else: + @functools.wraps(f) + def func_wrapper(*args, **kwargs): + # if a wrap executor has been configured, it is used instead + # of the default tracing function + if getattr(self, '_wrap_executor', None): + return self._wrap_executor( + self, + f, args, kwargs, + span_name, + service=service, + resource=resource, + span_type=span_type, + ) + + # otherwise fallback to a default tracing + with self.trace(span_name, service=service, resource=resource, span_type=span_type): + return f(*args, **kwargs) + + return func_wrapper + return wrap_decorator + + def set_tags(self, tags): + """ Set some tags at the tracer level. + This will append those tags to each span created by the tracer. + + :param dict tags: dict of tags to set at tracer level + """ + self.tags.update(tags) diff --git a/ddtrace/util.py b/ddtrace/util.py new file mode 100644 index 0000000000..5151769877 --- /dev/null +++ b/ddtrace/util.py @@ -0,0 +1,20 @@ +# [Backward compatibility]: keep importing modules functions +from .utils.deprecation import deprecated, deprecation +from .utils.formats import asbool, deep_getattr, get_env +from .utils.wrappers import safe_patch, unwrap + + +deprecation( + name='ddtrace.util', + message='Use `ddtrace.utils` package instead', + version='1.0.0', +) + +__all__ = [ + 'deprecated', + 'asbool', + 'deep_getattr', + 'get_env', + 'safe_patch', + 'unwrap', +] diff --git a/ddtrace/utils/__init__.py b/ddtrace/utils/__init__.py new file mode 100644 index 0000000000..c46c5c7178 --- /dev/null +++ b/ddtrace/utils/__init__.py @@ -0,0 +1,27 @@ +from ..vendor import debtcollector + + +# https://stackoverflow.com/a/26853961 +def merge_dicts(x, y): + """Returns a copy of y merged into x.""" + z = x.copy() # start with x's keys and values + z.update(y) # modifies z with y's keys and values & returns None + return z + + +def get_module_name(module): + """Returns a module's name or None if one cannot be found. + Relevant PEP: https://www.python.org/dev/peps/pep-0451/ + """ + if hasattr(module, "__spec__"): + return module.__spec__.name + return getattr(module, "__name__", None) + + +# Based on: https://stackoverflow.com/a/7864317 +class removed_classproperty(property): + def __get__(self, cls, owner): + debtcollector.deprecate( + "Usage of ddtrace.ext.AppTypes is not longer supported, please use ddtrace.ext.SpanTypes" + ) + return classmethod(self.fget).__get__(None, owner)() diff --git a/ddtrace/utils/attrdict.py b/ddtrace/utils/attrdict.py new file mode 100644 index 0000000000..e153e2968c --- /dev/null +++ b/ddtrace/utils/attrdict.py @@ -0,0 +1,37 @@ +class AttrDict(dict): + """ + dict implementation that allows for item attribute access + + + Example:: + + data = AttrDict() + data['key'] = 'value' + print(data['key']) + + data.key = 'new-value' + print(data.key) + + # Convert an existing `dict` + data = AttrDict(dict(key='value')) + print(data.key) + """ + + def __getattr__(self, key): + if key in self: + return self[key] + return object.__getattribute__(self, key) + + def __setattr__(self, key, value): + # 1) Ensure if the key exists from a dict key we always prefer that + # 2) If we do not have an existing key but we do have an attr, set that + # 3) No existing key or attr exists, so set a key + if key in self: + # Update any existing key + self[key] = value + elif hasattr(self, key): + # Allow overwriting an existing attribute, e.g. `self.global_config = dict()` + object.__setattr__(self, key, value) + else: + # Set a new key + self[key] = value diff --git a/ddtrace/utils/config.py b/ddtrace/utils/config.py new file mode 100644 index 0000000000..4322120263 --- /dev/null +++ b/ddtrace/utils/config.py @@ -0,0 +1,11 @@ +import sys +import os + + +def get_application_name(): + """Attempts to find the application name using system arguments.""" + if hasattr(sys, "argv") and sys.argv[0]: + app_name = os.path.basename(sys.argv[0]) + else: + app_name = None + return app_name diff --git a/ddtrace/utils/deprecation.py b/ddtrace/utils/deprecation.py new file mode 100644 index 0000000000..c2102500cc --- /dev/null +++ b/ddtrace/utils/deprecation.py @@ -0,0 +1,61 @@ +import warnings + +from functools import wraps + + +class RemovedInDDTrace10Warning(DeprecationWarning): + pass + + +def format_message(name, message, version): + """Message formatter to create `DeprecationWarning` messages + such as: + + 'fn' is deprecated and will be remove in future versions (1.0). + """ + return "'{}' is deprecated and will be remove in future versions{}. {}".format( + name, " ({})".format(version) if version else "", message, + ) + + +def warn(message, stacklevel=2): + """Helper function used as a ``DeprecationWarning``.""" + warnings.warn(message, RemovedInDDTrace10Warning, stacklevel=stacklevel) + + +def deprecation(name="", message="", version=None): + """Function to report a ``DeprecationWarning``. Bear in mind that `DeprecationWarning` + are ignored by default so they're not available in user logs. To show them, + the application must be launched with a special flag: + + $ python -Wall script.py + + This approach is used by most of the frameworks, including Django + (ref: https://docs.djangoproject.com/en/2.0/howto/upgrade-version/#resolving-deprecation-warnings) + """ + msg = format_message(name, message, version) + warn(msg, stacklevel=4) + + +def deprecated(message="", version=None): + """Decorator function to report a ``DeprecationWarning``. Bear + in mind that `DeprecationWarning` are ignored by default so they're + not available in user logs. To show them, the application must be launched + with a special flag: + + $ python -Wall script.py + + This approach is used by most of the frameworks, including Django + (ref: https://docs.djangoproject.com/en/2.0/howto/upgrade-version/#resolving-deprecation-warnings) + """ + + def decorator(func): + @wraps(func) + def wrapper(*args, **kwargs): + msg = format_message(func.__name__, message, version) + warn(msg, stacklevel=3) + return func(*args, **kwargs) + + return wrapper + + return decorator diff --git a/ddtrace/utils/formats.py b/ddtrace/utils/formats.py new file mode 100644 index 0000000000..7fe13dab55 --- /dev/null +++ b/ddtrace/utils/formats.py @@ -0,0 +1,82 @@ +import os + +from .deprecation import deprecation + + +def get_env(integration, variable, default=None): + """Retrieves environment variables value for the given integration. It must be used + for consistency between integrations. The implementation is backward compatible + with legacy nomenclature: + + * `DATADOG_` is a legacy prefix with lower priority + * `DD_` environment variables have the highest priority + * the environment variable is built concatenating `integration` and `variable` + arguments + * return `default` otherwise + + """ + key = "{}_{}".format(integration, variable).upper() + legacy_env = "DATADOG_{}".format(key) + env = "DD_{}".format(key) + + value = os.getenv(env) + legacy = os.getenv(legacy_env) + if legacy: + # Deprecation: `DATADOG_` variables are deprecated + deprecation( + name="DATADOG_", message="Use `DD_` prefix instead", version="1.0.0", + ) + + value = value or legacy + return value if value else default + + +def deep_getattr(obj, attr_string, default=None): + """ + Returns the attribute of `obj` at the dotted path given by `attr_string` + If no such attribute is reachable, returns `default` + + >>> deep_getattr(cass, 'cluster') + >> deep_getattr(cass, 'cluster.metadata.partitioner') + u'org.apache.cassandra.dht.Murmur3Partitioner' + + >>> deep_getattr(cass, 'i.dont.exist', default='default') + 'default' + """ + attrs = attr_string.split(".") + for attr in attrs: + try: + obj = getattr(obj, attr) + except AttributeError: + return default + + return obj + + +def asbool(value): + """Convert the given String to a boolean object. + + Accepted values are `True` and `1`. + """ + if value is None: + return False + + if isinstance(value, bool): + return value + + return value.lower() in ("true", "1") + + +def flatten_dict(d, sep=".", prefix=""): + """ + Returns a normalized dict of depth 1 with keys in order of embedding + + """ + # adapted from https://stackoverflow.com/a/19647596 + return ( + {prefix + sep + k if prefix else k: v for kk, vv in d.items() for k, v in flatten_dict(vv, sep, kk).items()} + if isinstance(d, dict) + else {prefix: d} + ) diff --git a/ddtrace/utils/hook.py b/ddtrace/utils/hook.py new file mode 100644 index 0000000000..af1e776ede --- /dev/null +++ b/ddtrace/utils/hook.py @@ -0,0 +1,197 @@ +""" +This module is based off of wrapt.importer (wrapt==1.11.0) +https://github.com/GrahamDumpleton/wrapt/blob/4bcd190457c89e993ffcfec6dad9e9969c033e9e/src/wrapt/importer.py#L127-L136 + +The reasoning for this is that wrapt.importer does not provide a mechanism to +remove the import hooks and that wrapt removes the hooks after they are fired. + +So this module differs from wrapt.importer in that: + - removes unnecessary functionality (like allowing hooks to be import paths) + - deregister_post_import_hook is introduced to remove hooks + - the values of _post_import_hooks can only be lists (instead of allowing None) + - notify_module_loaded is modified to not remove the hooks when they are + fired. +""" +import sys +import threading + +from ..compat import PY3 +from ..internal.logger import get_logger +from ..utils import get_module_name +from ..vendor.wrapt.decorators import synchronized + + +log = get_logger(__name__) + + +_post_import_hooks = {} +_post_import_hooks_init = False +_post_import_hooks_lock = threading.RLock() + + +@synchronized(_post_import_hooks_lock) +def register_post_import_hook(name, hook): + """ + Registers a module import hook, ``hook`` for a module with name ``name``. + + If the module is already imported the hook is called immediately and a + debug message is logged since this should not be expected in our use-case. + + :param name: Name of the module (full dotted path) + :type name: str + :param hook: Callable to be invoked with the module when it is imported. + :type hook: Callable + :return: + """ + # Automatically install the import hook finder if it has not already + # been installed. + global _post_import_hooks_init + + if not _post_import_hooks_init: + _post_import_hooks_init = True + sys.meta_path.insert(0, ImportHookFinder()) + + hooks = _post_import_hooks.get(name, []) + + if hook in hooks: + log.debug('hook "%s" already exists on module "%s"', hook, name) + return + + module = sys.modules.get(name, None) + + # If the module has been imported already fire the hook and log a debug msg. + if module: + log.debug('module "%s" already imported, firing hook', name) + hook(module) + + hooks.append(hook) + _post_import_hooks[name] = hooks + + +@synchronized(_post_import_hooks_lock) +def notify_module_loaded(module): + """ + Indicate that a module has been loaded. Any post import hooks which were + registered for the target module will be invoked. + + Any raised exceptions will be caught and an error message indicating that + the hook failed. + + :param module: The module being loaded + :type module: ``types.ModuleType`` + """ + name = get_module_name(module) + hooks = _post_import_hooks.get(name, []) + + for hook in hooks: + try: + hook(module) + except Exception: + log.warning('hook "%s" for module "%s" failed', hook, name, exc_info=True) + + +class _ImportHookLoader(object): + """ + A custom module import finder. This intercepts attempts to import + modules and watches out for attempts to import target modules of + interest. When a module of interest is imported, then any post import + hooks which are registered will be invoked. + """ + + def load_module(self, fullname): + module = sys.modules[fullname] + notify_module_loaded(module) + return module + + +class _ImportHookChainedLoader(object): + def __init__(self, loader): + self.loader = loader + + def load_module(self, fullname): + module = self.loader.load_module(fullname) + notify_module_loaded(module) + return module + + +class ImportHookFinder: + def __init__(self): + self.in_progress = {} + + @synchronized(_post_import_hooks_lock) + def find_module(self, fullname, path=None): + # If the module being imported is not one we have registered + # post import hooks for, we can return immediately. We will + # take no further part in the importing of this module. + + if fullname not in _post_import_hooks: + return None + + # When we are interested in a specific module, we will call back + # into the import system a second time to defer to the import + # finder that is supposed to handle the importing of the module. + # We set an in progress flag for the target module so that on + # the second time through we don't trigger another call back + # into the import system and cause a infinite loop. + + if fullname in self.in_progress: + return None + + self.in_progress[fullname] = True + + # Now call back into the import system again. + + try: + if PY3: + # For Python 3 we need to use find_spec().loader + # from the importlib.util module. It doesn't actually + # import the target module and only finds the + # loader. If a loader is found, we need to return + # our own loader which will then in turn call the + # real loader to import the module and invoke the + # post import hooks. + try: + import importlib.util + + loader = importlib.util.find_spec(fullname).loader + except (ImportError, AttributeError): + loader = importlib.find_loader(fullname, path) + if loader: + return _ImportHookChainedLoader(loader) + else: + # For Python 2 we don't have much choice but to + # call back in to __import__(). This will + # actually cause the module to be imported. If no + # module could be found then ImportError will be + # raised. Otherwise we return a loader which + # returns the already loaded module and invokes + # the post import hooks. + __import__(fullname) + return _ImportHookLoader() + + finally: + del self.in_progress[fullname] + + +@synchronized(_post_import_hooks_lock) +def deregister_post_import_hook(modulename, hook): + """ + Deregisters post import hooks for a module given the module name and a hook + that was previously installed. + + :param modulename: Name of the module the hook is installed on. + :type: str + :param hook: The hook to remove (the function itself) + :type hook: Callable + :return: whether a hook was removed or not + """ + if modulename not in _post_import_hooks: + return False + + hooks = _post_import_hooks[modulename] + + try: + hooks.remove(hook) + return True + except ValueError: + return False diff --git a/ddtrace/utils/http.py b/ddtrace/utils/http.py new file mode 100644 index 0000000000..c4a433f78a --- /dev/null +++ b/ddtrace/utils/http.py @@ -0,0 +1,9 @@ +def normalize_header_name(header_name): + """ + Normalizes an header name to lower case, stripping all its leading and trailing white spaces. + :param header_name: the header name to normalize + :type header_name: str + :return: the normalized header name + :rtype: str + """ + return header_name.strip().lower() if header_name is not None else None diff --git a/ddtrace/utils/importlib.py b/ddtrace/utils/importlib.py new file mode 100644 index 0000000000..107b15ff6d --- /dev/null +++ b/ddtrace/utils/importlib.py @@ -0,0 +1,33 @@ +from __future__ import absolute_import + +from importlib import import_module + + +class require_modules(object): + """Context manager to check the availability of required modules.""" + + def __init__(self, modules): + self._missing_modules = [] + for module in modules: + try: + import_module(module) + except ImportError: + self._missing_modules.append(module) + + def __enter__(self): + return self._missing_modules + + def __exit__(self, exc_type, exc_value, traceback): + return False + + +def func_name(f): + """Return a human readable version of the function's name.""" + if hasattr(f, "__module__"): + return "%s.%s" % (f.__module__, getattr(f, "__name__", f.__class__.__name__)) + return getattr(f, "__name__", f.__class__.__name__) + + +def module_name(instance): + """Return the instance module name.""" + return instance.__class__.__module__.split(".")[0] diff --git a/ddtrace/utils/merge.py b/ddtrace/utils/merge.py new file mode 100644 index 0000000000..5ac6110d9b --- /dev/null +++ b/ddtrace/utils/merge.py @@ -0,0 +1,19 @@ +# Borrowed from: https://stackoverflow.com/questions/20656135/python-deep-merge-dictionary-data#20666342 +def deepmerge(source, destination): + """ + Merge the first provided ``dict`` into the second. + + :param dict source: The ``dict`` to merge into ``destination`` + :param dict destination: The ``dict`` that should get updated + :rtype: dict + :returns: ``destination`` modified + """ + for key, value in source.items(): + if isinstance(value, dict): + # get node or create one + node = destination.setdefault(key, {}) + deepmerge(value, node) + else: + destination[key] = value + + return destination diff --git a/ddtrace/utils/time.py b/ddtrace/utils/time.py new file mode 100644 index 0000000000..b563b65d41 --- /dev/null +++ b/ddtrace/utils/time.py @@ -0,0 +1,57 @@ +from ..vendor import monotonic + + +class StopWatch(object): + """A simple timer/stopwatch helper class. + + Not thread-safe (when a single watch is mutated by multiple threads at + the same time). Thread-safe when used by a single thread (not shared) or + when operations are performed in a thread-safe manner on these objects by + wrapping those operations with locks. + + It will use the `monotonic`_ pypi library to find an appropriate + monotonically increasing time providing function (which typically varies + depending on operating system and Python version). + + .. _monotonic: https://pypi.python.org/pypi/monotonic/ + """ + + def __init__(self): + self._started_at = None + self._stopped_at = None + + def start(self): + """Starts the watch.""" + self._started_at = monotonic.monotonic() + return self + + def elapsed(self): + """Get how many seconds have elapsed. + + :return: Number of seconds elapsed + :rtype: float + """ + # NOTE: datetime.timedelta does not support nanoseconds, so keep a float here + if self._started_at is None: + raise RuntimeError("Can not get the elapsed time of a stopwatch" " if it has not been started/stopped") + if self._stopped_at is None: + now = monotonic.monotonic() + else: + now = self._stopped_at + return now - self._started_at + + def __enter__(self): + """Starts the watch.""" + self.start() + return self + + def __exit__(self, tp, value, traceback): + """Stops the watch.""" + self.stop() + + def stop(self): + """Stops the watch.""" + if self._started_at is None: + raise RuntimeError("Can not stop a stopwatch that has not been" " started") + self._stopped_at = monotonic.monotonic() + return self diff --git a/ddtrace/utils/wrappers.py b/ddtrace/utils/wrappers.py new file mode 100644 index 0000000000..bfcf69a47e --- /dev/null +++ b/ddtrace/utils/wrappers.py @@ -0,0 +1,62 @@ +from ddtrace.vendor import wrapt +import inspect + +from .deprecation import deprecated + + +def unwrap(obj, attr): + f = getattr(obj, attr, None) + if f and isinstance(f, wrapt.ObjectProxy) and hasattr(f, "__wrapped__"): + setattr(obj, attr, f.__wrapped__) + + +@deprecated("`wrapt` library is used instead", version="1.0.0") +def safe_patch(patchable, key, patch_func, service, meta, tracer): + """ takes patch_func (signature: takes the orig_method that is + wrapped in the monkey patch == UNBOUND + service and meta) and + attach the patched result to patchable at patchable.key + + - If this is the module/class we can rely on methods being unbound, and just have to + update the __dict__ + - If this is an instance, we have to unbind the current and rebind our + patched method + - If patchable is an instance and if we've already patched at the module/class level + then patchable[key] contains an already patched command! + + To workaround this, check if patchable or patchable.__class__ are ``_dogtraced`` + If is isn't, nothing to worry about, patch the key as usual + But if it is, search for a '__dd_orig_{key}' method on the class, which is + the original unpatched method we wish to trace. + """ + + def _get_original_method(thing, key): + orig = None + if hasattr(thing, "_dogtraced"): + # Search for original method + orig = getattr(thing, "__dd_orig_{}".format(key), None) + else: + orig = getattr(thing, key) + # Set it for the next time we attempt to patch `thing` + setattr(thing, "__dd_orig_{}".format(key), orig) + + return orig + + if inspect.isclass(patchable) or inspect.ismodule(patchable): + orig = _get_original_method(patchable, key) + if not orig: + # Should never happen + return + elif hasattr(patchable, "__class__"): + orig = _get_original_method(patchable.__class__, key) + if not orig: + # Should never happen + return + else: + return + + dest = patch_func(orig, service, meta, tracer) + + if inspect.isclass(patchable) or inspect.ismodule(patchable): + setattr(patchable, key, dest) + elif hasattr(patchable, "__class__"): + setattr(patchable, key, dest.__get__(patchable, patchable.__class__)) diff --git a/ddtrace/vendor/__init__.py b/ddtrace/vendor/__init__.py new file mode 100644 index 0000000000..d3d436403d --- /dev/null +++ b/ddtrace/vendor/__init__.py @@ -0,0 +1,95 @@ +""" +ddtrace.vendor +============== +Install vendored dependencies under a different top level package to avoid importing `ddtrace/__init__.py` +whenever a dependency is imported. Doing this allows us to have a little more control over import order. + + +Dependencies +============ + +msgpack +------- + +Website: https://msgpack.org/ +Source: https://github.com/msgpack/msgpack-python +Version: 0.6.1 +License: Apache License, Version 2.0 + +Notes: + If you need to update any `*.pyx` files, be sure to run `cython --cplus msgpack/_cmsgpack.pyx` to regenerate `_cmsgpack.cpp` + + `_packer.pyx` and `_unpacker.pyx` were updated to import from `ddtrace.vendor.msgpack` + +six +--- + +Website: https://six.readthedocs.io/ +Source: https://github.com/benjaminp/six +Version: 1.11.0 +License: MIT + +Notes: + `six/__init__.py` is just the source code's `six.py` + `curl https://raw.githubusercontent.com/benjaminp/six/1.11.0/six.py > ddtrace/vendor/six/__init__.py` + + +wrapt +----- + +Website: https://wrapt.readthedocs.io/en/latest/ +Source: https://github.com/GrahamDumpleton/wrapt/ +Version: 1.11.1 +License: BSD 2-Clause "Simplified" License + +Notes: + `wrapt/__init__.py` was updated to include a copy of `wrapt`'s license: https://github.com/GrahamDumpleton/wrapt/blob/1.11.1/LICENSE + + `setup.py` will attempt to build the `wrapt/_wrappers.c` C module + +dogstatsd +--------- + +Website: https://datadogpy.readthedocs.io/en/latest/ +Source: https://github.com/DataDog/datadogpy +Version: 0.28.0 +License: Copyright (c) 2015, Datadog + +Notes: + `dogstatsd/__init__.py` was updated to include a copy of the `datadogpy` license: https://github.com/DataDog/datadogpy/blob/master/LICENSE + Only `datadog.dogstatsd` module was vendored to avoid unnecessary dependencies + `datadog/util/compat.py` was copied to `dogstatsd/compat.py` + +monotonic +--------- + +Website: https://pypi.org/project/monotonic/ +Source: https://github.com/atdt/monotonic +Version: 1.5 +License: Apache License 2.0 + +Notes: + The source `monotonic.py` was added as `monotonic/__init__.py` + + No other changes were made + +debtcollector +------------- + +Website: https://docs.openstack.org/debtcollector/latest/index.html +Source: https://github.com/openstack/debtcollector +Version: 1.22.0 +License: Apache License 2.0 + +Notes: + Removed dependency on `pbr` and manually set `__version__` + +""" + +# Initialize `ddtrace.vendor.datadog.base.log` logger with our custom rate limited logger +# DEV: This helps ensure if there are connection issues we do not spam their logs +# DEV: Overwrite `base.log` instead of `get_logger('datadog.dogstatsd')` so we do +# not conflict with any non-vendored datadog.dogstatsd logger +from ..internal.logger import get_logger +from .dogstatsd import base +base.log = get_logger('ddtrace.vendor.dogstatsd') diff --git a/ddtrace/vendor/debtcollector/__init__.py b/ddtrace/vendor/debtcollector/__init__.py new file mode 100644 index 0000000000..2fc6fded42 --- /dev/null +++ b/ddtrace/vendor/debtcollector/__init__.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from . import _utils, moves, removals, renames, updating + +__version__ = "1.22.0" + + +def deprecate(prefix, postfix=None, message=None, + version=None, removal_version=None, + stacklevel=3, category=DeprecationWarning): + """Helper to deprecate some thing using generated message format. + + :param prefix: prefix string used as the prefix of the output message + :param postfix: postfix string used as the postfix of the output message + :param message: message string used as ending contents of the deprecate + message + :param version: version string (represents the version this + deprecation was created in) + :param removal_version: version string (represents the version this + deprecation will be removed in); a string of '?' + will denote this will be removed in some future + unknown version + :param stacklevel: stacklevel used in the :func:`warnings.warn` function + to locate where the users code is in the + :func:`warnings.warn` call + :param category: the :mod:`warnings` category to use, defaults to + :py:class:`DeprecationWarning` if not provided + """ + out_message = _utils.generate_message(prefix, postfix=postfix, + version=version, message=message, + removal_version=removal_version) + _utils.deprecation(out_message, stacklevel=stacklevel, + category=category) diff --git a/ddtrace/vendor/debtcollector/_utils.py b/ddtrace/vendor/debtcollector/_utils.py new file mode 100644 index 0000000000..45691ab14d --- /dev/null +++ b/ddtrace/vendor/debtcollector/_utils.py @@ -0,0 +1,180 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import inspect +import types +import warnings + +from .. import six + +try: + _TYPE_TYPE = types.TypeType +except AttributeError: + _TYPE_TYPE = type + + +# See: https://docs.python.org/2/library/__builtin__.html#module-__builtin__ +# and see https://docs.python.org/2/reference/executionmodel.html (and likely +# others)... +_BUILTIN_MODULES = ('builtins', '__builtin__', '__builtins__', 'exceptions') +_enabled = True + + +def deprecation(message, stacklevel=None, category=None): + """Warns about some type of deprecation that has been (or will be) made. + + This helper function makes it easier to interact with the warnings module + by standardizing the arguments that the warning function receives so that + it is easier to use. + + This should be used to emit warnings to users (users can easily turn these + warnings off/on, see https://docs.python.org/2/library/warnings.html + as they see fit so that the messages do not fill up the users logs with + warnings that they do not wish to see in production) about functions, + methods, attributes or other code that is deprecated and will be removed + in a future release (this is done using these warnings to avoid breaking + existing users of those functions, methods, code; which a library should + avoid doing by always giving at *least* N + 1 release for users to address + the deprecation warnings). + """ + if not _enabled: + return + if category is None: + category = DeprecationWarning + if stacklevel is None: + warnings.warn(message, category=category) + else: + warnings.warn(message, category=category, stacklevel=stacklevel) + + +def get_qualified_name(obj): + # Prefer the py3.x name (if we can get at it...) + try: + return (True, obj.__qualname__) + except AttributeError: + return (False, obj.__name__) + + +def generate_message(prefix, postfix=None, message=None, + version=None, removal_version=None): + """Helper to generate a common message 'style' for deprecation helpers.""" + message_components = [prefix] + if version: + message_components.append(" in version '%s'" % version) + if removal_version: + if removal_version == "?": + message_components.append(" and will be removed in a future" + " version") + else: + message_components.append(" and will be removed in version '%s'" + % removal_version) + if postfix: + message_components.append(postfix) + if message: + message_components.append(": %s" % message) + return ''.join(message_components) + + +def get_assigned(decorator): + """Helper to fix/workaround https://bugs.python.org/issue3445""" + if six.PY3: + return functools.WRAPPER_ASSIGNMENTS + else: + assigned = [] + for attr_name in functools.WRAPPER_ASSIGNMENTS: + if hasattr(decorator, attr_name): + assigned.append(attr_name) + return tuple(assigned) + + +def get_class_name(obj, fully_qualified=True): + """Get class name for object. + + If object is a type, fully qualified name of the type is returned. + Else, fully qualified name of the type of the object is returned. + For builtin types, just name is returned. + """ + if not isinstance(obj, six.class_types): + obj = type(obj) + try: + built_in = obj.__module__ in _BUILTIN_MODULES + except AttributeError: + pass + else: + if built_in: + return obj.__name__ + + if fully_qualified and hasattr(obj, '__module__'): + return '%s.%s' % (obj.__module__, obj.__name__) + else: + return obj.__name__ + + +def get_method_self(method): + """Gets the ``self`` object attached to this method (or none).""" + if not inspect.ismethod(method): + return None + try: + return six.get_method_self(method) + except AttributeError: + return None + + +def get_callable_name(function): + """Generate a name from callable. + + Tries to do the best to guess fully qualified callable name. + """ + method_self = get_method_self(function) + if method_self is not None: + # This is a bound method. + if isinstance(method_self, six.class_types): + # This is a bound class method. + im_class = method_self + else: + im_class = type(method_self) + try: + parts = (im_class.__module__, function.__qualname__) + except AttributeError: + parts = (im_class.__module__, im_class.__name__, function.__name__) + elif inspect.ismethod(function) or inspect.isfunction(function): + # This could be a function, a static method, a unbound method... + try: + parts = (function.__module__, function.__qualname__) + except AttributeError: + if hasattr(function, 'im_class'): + # This is a unbound method, which exists only in python 2.x + im_class = function.im_class + parts = (im_class.__module__, + im_class.__name__, function.__name__) + else: + parts = (function.__module__, function.__name__) + else: + im_class = type(function) + if im_class is _TYPE_TYPE: + im_class = function + try: + parts = (im_class.__module__, im_class.__qualname__) + except AttributeError: + parts = (im_class.__module__, im_class.__name__) + # When running under sphinx it appears this can be none? if so just + # don't include it... + mod, rest = (parts[0], parts[1:]) + if not mod: + return '.'.join(rest) + else: + return '.'.join(parts) diff --git a/ddtrace/vendor/debtcollector/moves.py b/ddtrace/vendor/debtcollector/moves.py new file mode 100644 index 0000000000..e0965930bb --- /dev/null +++ b/ddtrace/vendor/debtcollector/moves.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import inspect + +from .. import six +from .. import wrapt + +from . import _utils + +_KIND_MOVED_PREFIX_TPL = "%s '%s' has moved to '%s'" +_CLASS_MOVED_PREFIX_TPL = "Class '%s' has moved to '%s'" +_MOVED_CALLABLE_POSTFIX = "()" +_FUNC_MOVED_PREFIX_TPL = "Function '%s' has moved to '%s'" + + +def _moved_decorator(kind, new_attribute_name, message=None, + version=None, removal_version=None, stacklevel=3, + attr_postfix=None, category=None): + """Decorates a method/property that was moved to another location.""" + + def decorator(f): + fully_qualified, old_attribute_name = _utils.get_qualified_name(f) + if attr_postfix: + old_attribute_name += attr_postfix + + @wrapt.decorator + def wrapper(wrapped, instance, args, kwargs): + base_name = _utils.get_class_name(wrapped, fully_qualified=False) + if fully_qualified: + old_name = old_attribute_name + else: + old_name = ".".join((base_name, old_attribute_name)) + new_name = ".".join((base_name, new_attribute_name)) + prefix = _KIND_MOVED_PREFIX_TPL % (kind, old_name, new_name) + out_message = _utils.generate_message( + prefix, message=message, + version=version, removal_version=removal_version) + _utils.deprecation(out_message, stacklevel=stacklevel, + category=category) + return wrapped(*args, **kwargs) + + return wrapper(f) + + return decorator + + +def moved_function(new_func, old_func_name, old_module_name, + message=None, version=None, removal_version=None, + stacklevel=3, category=None): + """Deprecates a function that was moved to another location. + + This generates a wrapper around ``new_func`` that will emit a deprecation + warning when called. The warning message will include the new location + to obtain the function from. + """ + new_func_full_name = _utils.get_callable_name(new_func) + new_func_full_name += _MOVED_CALLABLE_POSTFIX + old_func_full_name = ".".join([old_module_name, old_func_name]) + old_func_full_name += _MOVED_CALLABLE_POSTFIX + prefix = _FUNC_MOVED_PREFIX_TPL % (old_func_full_name, new_func_full_name) + out_message = _utils.generate_message(prefix, + message=message, version=version, + removal_version=removal_version) + + @six.wraps(new_func, assigned=_utils.get_assigned(new_func)) + def old_new_func(*args, **kwargs): + _utils.deprecation(out_message, stacklevel=stacklevel, + category=category) + return new_func(*args, **kwargs) + + old_new_func.__name__ = old_func_name + old_new_func.__module__ = old_module_name + return old_new_func + + +class moved_read_only_property(object): + """Descriptor for read-only properties moved to another location. + + This works like the ``@property`` descriptor but can be used instead to + provide the same functionality and also interact with the :mod:`warnings` + module to warn when a property is accessed, so that users of those + properties can know that a previously read-only property at a prior + location/name has moved to another location/name. + + :param old_name: old attribute location/name + :param new_name: new attribute location/name + :param version: version string (represents the version this deprecation + was created in) + :param removal_version: version string (represents the version this + deprecation will be removed in); a string + of '?' will denote this will be removed in + some future unknown version + :param stacklevel: stacklevel used in the :func:`warnings.warn` function + to locate where the users code is when reporting the + deprecation call (the default being 3) + :param category: the :mod:`warnings` category to use, defaults to + :py:class:`DeprecationWarning` if not provided + """ + + def __init__(self, old_name, new_name, + version=None, removal_version=None, + stacklevel=3, category=None): + self._old_name = old_name + self._new_name = new_name + self._message = _utils.generate_message( + "Read-only property '%s' has moved" + " to '%s'" % (self._old_name, self._new_name), + version=version, removal_version=removal_version) + self._stacklevel = stacklevel + self._category = category + + def __get__(self, instance, owner): + _utils.deprecation(self._message, + stacklevel=self._stacklevel, + category=self._category) + # This handles the descriptor being applied on a + # instance or a class and makes both work correctly... + if instance is not None: + real_owner = instance + else: + real_owner = owner + return getattr(real_owner, self._new_name) + + +def moved_method(new_method_name, message=None, + version=None, removal_version=None, stacklevel=3, + category=None): + """Decorates an *instance* method that was moved to another location.""" + if not new_method_name.endswith(_MOVED_CALLABLE_POSTFIX): + new_method_name += _MOVED_CALLABLE_POSTFIX + return _moved_decorator('Method', new_method_name, message=message, + version=version, removal_version=removal_version, + stacklevel=stacklevel, + attr_postfix=_MOVED_CALLABLE_POSTFIX, + category=category) + + +def moved_property(new_attribute_name, message=None, + version=None, removal_version=None, stacklevel=3, + category=None): + """Decorates an *instance* property that was moved to another location.""" + return _moved_decorator('Property', new_attribute_name, message=message, + version=version, removal_version=removal_version, + stacklevel=stacklevel, category=category) + + +def moved_class(new_class, old_class_name, old_module_name, + message=None, version=None, removal_version=None, + stacklevel=3, category=None): + """Deprecates a class that was moved to another location. + + This creates a 'new-old' type that can be used for a + deprecation period that can be inherited from. This will emit warnings + when the old locations class is initialized, telling where the new and + improved location for the old class now is. + """ + + if not inspect.isclass(new_class): + _qual, type_name = _utils.get_qualified_name(type(new_class)) + raise TypeError("Unexpected class type '%s' (expected" + " class type only)" % type_name) + + old_name = ".".join((old_module_name, old_class_name)) + new_name = _utils.get_class_name(new_class) + prefix = _CLASS_MOVED_PREFIX_TPL % (old_name, new_name) + out_message = _utils.generate_message( + prefix, message=message, version=version, + removal_version=removal_version) + + def decorator(f): + + @six.wraps(f, assigned=_utils.get_assigned(f)) + def wrapper(self, *args, **kwargs): + _utils.deprecation(out_message, stacklevel=stacklevel, + category=category) + return f(self, *args, **kwargs) + + return wrapper + + old_class = type(old_class_name, (new_class,), {}) + old_class.__module__ = old_module_name + old_class.__init__ = decorator(old_class.__init__) + return old_class diff --git a/ddtrace/vendor/debtcollector/removals.py b/ddtrace/vendor/debtcollector/removals.py new file mode 100644 index 0000000000..0add069e76 --- /dev/null +++ b/ddtrace/vendor/debtcollector/removals.py @@ -0,0 +1,334 @@ +# Copyright 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import functools +import inspect + +from .. import six +from .. import wrapt + +from . import _utils + + +def _get_qualified_name(obj): + return _utils.get_qualified_name(obj)[1] + + +def _fetch_first_result(fget, fset, fdel, apply_func, value_not_found=None): + """Fetch first non-none/empty result of applying ``apply_func``.""" + for f in filter(None, (fget, fset, fdel)): + result = apply_func(f) + if result: + return result + return value_not_found + + +class removed_property(object): + """Property descriptor that deprecates a property. + + This works like the ``@property`` descriptor but can be used instead to + provide the same functionality and also interact with the :mod:`warnings` + module to warn when a property is accessed, set and/or deleted. + + :param message: string used as ending contents of the deprecate message + :param version: version string (represents the version this deprecation + was created in) + :param removal_version: version string (represents the version this + deprecation will be removed in); a string + of '?' will denote this will be removed in + some future unknown version + :param stacklevel: stacklevel used in the :func:`warnings.warn` function + to locate where the users code is when reporting the + deprecation call (the default being 3) + :param category: the :mod:`warnings` category to use, defaults to + :py:class:`DeprecationWarning` if not provided + """ + + # Message templates that will be turned into real messages as needed. + _PROPERTY_GONE_TPLS = { + 'set': "Setting the '%s' property is deprecated", + 'get': "Reading the '%s' property is deprecated", + 'delete': "Deleting the '%s' property is deprecated", + } + + def __init__(self, fget=None, fset=None, fdel=None, doc=None, + stacklevel=3, category=DeprecationWarning, + version=None, removal_version=None, message=None): + self.fset = fset + self.fget = fget + self.fdel = fdel + self.stacklevel = stacklevel + self.category = category + self.version = version + self.removal_version = removal_version + self.message = message + if doc is None and inspect.isfunction(fget): + doc = getattr(fget, '__doc__', None) + self._message_cache = {} + self.__doc__ = doc + + def _fetch_message_from_cache(self, kind): + try: + out_message = self._message_cache[kind] + except KeyError: + prefix_tpl = self._PROPERTY_GONE_TPLS[kind] + prefix = prefix_tpl % _fetch_first_result( + self.fget, self.fset, self.fdel, _get_qualified_name, + value_not_found="???") + out_message = _utils.generate_message( + prefix, message=self.message, version=self.version, + removal_version=self.removal_version) + self._message_cache[kind] = out_message + return out_message + + def __call__(self, fget, **kwargs): + self.fget = fget + self.message = kwargs.get('message', self.message) + self.version = kwargs.get('version', self.version) + self.removal_version = kwargs.get('removal_version', + self.removal_version) + self.stacklevel = kwargs.get('stacklevel', self.stacklevel) + self.category = kwargs.get('category', self.category) + self.__doc__ = kwargs.get('doc', + getattr(fget, '__doc__', self.__doc__)) + # Regenerate all the messages... + self._message_cache.clear() + return self + + def __delete__(self, obj): + if self.fdel is None: + raise AttributeError("can't delete attribute") + out_message = self._fetch_message_from_cache('delete') + _utils.deprecation(out_message, stacklevel=self.stacklevel, + category=self.category) + self.fdel(obj) + + def __set__(self, obj, value): + if self.fset is None: + raise AttributeError("can't set attribute") + out_message = self._fetch_message_from_cache('set') + _utils.deprecation(out_message, stacklevel=self.stacklevel, + category=self.category) + self.fset(obj, value) + + def __get__(self, obj, value): + if obj is None: + return self + if self.fget is None: + raise AttributeError("unreadable attribute") + out_message = self._fetch_message_from_cache('get') + _utils.deprecation(out_message, stacklevel=self.stacklevel, + category=self.category) + return self.fget(obj) + + def getter(self, fget): + o = type(self)(fget, self.fset, self.fdel, self.__doc__) + o.message = self.message + o.version = self.version + o.stacklevel = self.stacklevel + o.removal_version = self.removal_version + o.category = self.category + return o + + def setter(self, fset): + o = type(self)(self.fget, fset, self.fdel, self.__doc__) + o.message = self.message + o.version = self.version + o.stacklevel = self.stacklevel + o.removal_version = self.removal_version + o.category = self.category + return o + + def deleter(self, fdel): + o = type(self)(self.fget, self.fset, fdel, self.__doc__) + o.message = self.message + o.version = self.version + o.stacklevel = self.stacklevel + o.removal_version = self.removal_version + o.category = self.category + return o + + +def remove(f=None, message=None, version=None, removal_version=None, + stacklevel=3, category=None): + """Decorates a function, method, or class to emit a deprecation warning + + Due to limitations of the wrapt library (and python) itself, if this + is applied to subclasses of metaclasses then it likely will not work + as expected. More information can be found at bug #1520397 to see if + this situation affects your usage of this *universal* decorator, for + this specific scenario please use :py:func:`.removed_class` instead. + + :param str message: A message to include in the deprecation warning + :param str version: Specify what version the removed function is present in + :param str removal_version: What version the function will be removed. If + '?' is used this implies an undefined future + version + :param int stacklevel: How many entries deep in the call stack before + ignoring + :param type category: warnings message category (this defaults to + ``DeprecationWarning`` when none is provided) + """ + if f is None: + return functools.partial(remove, message=message, + version=version, + removal_version=removal_version, + stacklevel=stacklevel, + category=category) + + @wrapt.decorator + def wrapper(f, instance, args, kwargs): + qualified, f_name = _utils.get_qualified_name(f) + if qualified: + if inspect.isclass(f): + prefix_pre = "Using class" + thing_post = '' + else: + prefix_pre = "Using function/method" + thing_post = '()' + if not qualified: + prefix_pre = "Using function/method" + base_name = None + if instance is None: + # Decorator was used on a class + if inspect.isclass(f): + prefix_pre = "Using class" + thing_post = '' + module_name = _get_qualified_name(inspect.getmodule(f)) + if module_name == '__main__': + f_name = _utils.get_class_name( + f, fully_qualified=False) + else: + f_name = _utils.get_class_name( + f, fully_qualified=True) + # Decorator was a used on a function + else: + thing_post = '()' + module_name = _get_qualified_name(inspect.getmodule(f)) + if module_name != '__main__': + f_name = _utils.get_callable_name(f) + # Decorator was used on a classmethod or instancemethod + else: + thing_post = '()' + base_name = _utils.get_class_name(instance, + fully_qualified=False) + if base_name: + thing_name = ".".join([base_name, f_name]) + else: + thing_name = f_name + else: + thing_name = f_name + if thing_post: + thing_name += thing_post + prefix = prefix_pre + " '%s' is deprecated" % (thing_name) + out_message = _utils.generate_message( + prefix, + version=version, + removal_version=removal_version, + message=message) + _utils.deprecation(out_message, + stacklevel=stacklevel, category=category) + return f(*args, **kwargs) + return wrapper(f) + + +def removed_kwarg(old_name, message=None, + version=None, removal_version=None, stacklevel=3, + category=None): + """Decorates a kwarg accepting function to deprecate a removed kwarg.""" + + prefix = "Using the '%s' argument is deprecated" % old_name + out_message = _utils.generate_message( + prefix, postfix=None, message=message, version=version, + removal_version=removal_version) + + @wrapt.decorator + def wrapper(f, instance, args, kwargs): + if old_name in kwargs: + _utils.deprecation(out_message, + stacklevel=stacklevel, category=category) + return f(*args, **kwargs) + + return wrapper + + +def removed_class(cls_name, replacement=None, message=None, + version=None, removal_version=None, stacklevel=3, + category=None): + """Decorates a class to denote that it will be removed at some point.""" + + def _wrap_it(old_init, out_message): + + @six.wraps(old_init, assigned=_utils.get_assigned(old_init)) + def new_init(self, *args, **kwargs): + _utils.deprecation(out_message, stacklevel=stacklevel, + category=category) + return old_init(self, *args, **kwargs) + + return new_init + + def _check_it(cls): + if not inspect.isclass(cls): + _qual, type_name = _utils.get_qualified_name(type(cls)) + raise TypeError("Unexpected class type '%s' (expected" + " class type only)" % type_name) + + def _cls_decorator(cls): + _check_it(cls) + out_message = _utils.generate_message( + "Using class '%s' (either directly or via inheritance)" + " is deprecated" % cls_name, postfix=None, message=message, + version=version, removal_version=removal_version) + cls.__init__ = _wrap_it(cls.__init__, out_message) + return cls + + return _cls_decorator + + +def removed_module(module, replacement=None, message=None, + version=None, removal_version=None, stacklevel=3, + category=None): + """Helper to be called inside a module to emit a deprecation warning + + :param str replacment: A location (or information about) of any potential + replacement for the removed module (if applicable) + :param str message: A message to include in the deprecation warning + :param str version: Specify what version the removed module is present in + :param str removal_version: What version the module will be removed. If + '?' is used this implies an undefined future + version + :param int stacklevel: How many entries deep in the call stack before + ignoring + :param type category: warnings message category (this defaults to + ``DeprecationWarning`` when none is provided) + """ + if inspect.ismodule(module): + module_name = _get_qualified_name(module) + elif isinstance(module, six.string_types): + module_name = module + else: + _qual, type_name = _utils.get_qualified_name(type(module)) + raise TypeError("Unexpected module type '%s' (expected string or" + " module type only)" % type_name) + prefix = "The '%s' module usage is deprecated" % module_name + if replacement: + postfix = ", please use %s instead" % replacement + else: + postfix = None + out_message = _utils.generate_message(prefix, + postfix=postfix, message=message, + version=version, + removal_version=removal_version) + _utils.deprecation(out_message, + stacklevel=stacklevel, category=category) diff --git a/ddtrace/vendor/debtcollector/renames.py b/ddtrace/vendor/debtcollector/renames.py new file mode 100644 index 0000000000..d31853aa6f --- /dev/null +++ b/ddtrace/vendor/debtcollector/renames.py @@ -0,0 +1,45 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from .. import wrapt + +from . import _utils + +_KWARG_RENAMED_POSTFIX_TPL = ", please use the '%s' argument instead" +_KWARG_RENAMED_PREFIX_TPL = "Using the '%s' argument is deprecated" + + +def renamed_kwarg(old_name, new_name, message=None, + version=None, removal_version=None, stacklevel=3, + category=None, replace=False): + """Decorates a kwarg accepting function to deprecate a renamed kwarg.""" + + prefix = _KWARG_RENAMED_PREFIX_TPL % old_name + postfix = _KWARG_RENAMED_POSTFIX_TPL % new_name + out_message = _utils.generate_message( + prefix, postfix=postfix, message=message, version=version, + removal_version=removal_version) + + @wrapt.decorator + def decorator(wrapped, instance, args, kwargs): + if old_name in kwargs: + _utils.deprecation(out_message, + stacklevel=stacklevel, category=category) + if replace: + kwargs.setdefault(new_name, kwargs.pop(old_name)) + return wrapped(*args, **kwargs) + + return decorator diff --git a/ddtrace/vendor/debtcollector/updating.py b/ddtrace/vendor/debtcollector/updating.py new file mode 100644 index 0000000000..d89eafd206 --- /dev/null +++ b/ddtrace/vendor/debtcollector/updating.py @@ -0,0 +1,68 @@ +# -*- coding: utf-8 -*- + +# Copyright (C) 2015 Yahoo! Inc. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from .. import six +from .. import wrapt +if six.PY3: + import inspect + Parameter = inspect.Parameter + Signature = inspect.Signature + get_signature = inspect.signature +else: + # Provide an equivalent but use funcsigs instead... + import funcsigs + Parameter = funcsigs.Parameter + Signature = funcsigs.Signature + get_signature = funcsigs.signature + +from . import _utils + +_KWARG_UPDATED_POSTFIX_TPL = (', please update the code to explicitly set %s ' + 'as the value') +_KWARG_UPDATED_PREFIX_TPL = ('The %s argument is changing its default value ' + 'to %s') + + +def updated_kwarg_default_value(name, old_value, new_value, message=None, + version=None, stacklevel=3, + category=FutureWarning): + + """Decorates a kwarg accepting function to change the default value""" + + prefix = _KWARG_UPDATED_PREFIX_TPL % (name, new_value) + postfix = _KWARG_UPDATED_POSTFIX_TPL % old_value + out_message = _utils.generate_message( + prefix, postfix=postfix, message=message, version=version) + + def decorator(f): + sig = get_signature(f) + varnames = list(six.iterkeys(sig.parameters)) + + @wrapt.decorator + def wrapper(wrapped, instance, args, kwargs): + explicit_params = set( + varnames[:len(args)] + list(kwargs.keys()) + ) + allparams = set(varnames) + default_params = set(allparams - explicit_params) + if name in default_params: + _utils.deprecation(out_message, + stacklevel=stacklevel, category=category) + return wrapped(*args, **kwargs) + + return wrapper(f) + + return decorator diff --git a/ddtrace/vendor/dogstatsd/__init__.py b/ddtrace/vendor/dogstatsd/__init__.py new file mode 100644 index 0000000000..0e93d75234 --- /dev/null +++ b/ddtrace/vendor/dogstatsd/__init__.py @@ -0,0 +1,28 @@ +""" +Copyright (c) 2015, Datadog +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in the + documentation and/or other materials provided with the distribution. + * Neither the name of Datadog nor the + names of its contributors may be used to endorse or promote products + derived from this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL BE LIABLE FOR ANY +DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND +ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +""" + +from .base import DogStatsd, statsd # noqa diff --git a/ddtrace/vendor/dogstatsd/base.py b/ddtrace/vendor/dogstatsd/base.py new file mode 100644 index 0000000000..f0167cf8d6 --- /dev/null +++ b/ddtrace/vendor/dogstatsd/base.py @@ -0,0 +1,425 @@ +#!/usr/bin/env python +""" +DogStatsd is a Python client for DogStatsd, a Statsd fork for Datadog. +""" +# stdlib +from random import random +import logging +import os +import socket +from threading import Lock + +# datadog +from .context import TimedContextManagerDecorator +from .route import get_default_route +from .compat import text + +# Logging +log = logging.getLogger('datadog.dogstatsd') + +# Default config +DEFAULT_HOST = 'localhost' +DEFAULT_PORT = 8125 + +# Tag name of entity_id +ENTITY_ID_TAG_NAME = "dd.internal.entity_id" + + +class DogStatsd(object): + OK, WARNING, CRITICAL, UNKNOWN = (0, 1, 2, 3) + + def __init__(self, host=DEFAULT_HOST, port=DEFAULT_PORT, max_buffer_size=50, namespace=None, + constant_tags=None, use_ms=False, use_default_route=False, + socket_path=None): + """ + Initialize a DogStatsd object. + + >>> statsd = DogStatsd() + + :envvar DD_AGENT_HOST: the host of the DogStatsd server. + If set, it overrides default value. + :type DD_AGENT_HOST: string + + :envvar DD_DOGSTATSD_PORT: the port of the DogStatsd server. + If set, it overrides default value. + :type DD_DOGSTATSD_PORT: integer + + :param host: the host of the DogStatsd server. + :type host: string + + :param port: the port of the DogStatsd server. + :type port: integer + + :param max_buffer_size: Maximum number of metrics to buffer before sending to the server + if sending metrics in batch + :type max_buffer_size: integer + + :param namespace: Namespace to prefix all metric names + :type namespace: string + + :param constant_tags: Tags to attach to all metrics + :type constant_tags: list of strings + + :param use_ms: Report timed values in milliseconds instead of seconds (default False) + :type use_ms: boolean + + :envvar DATADOG_TAGS: Tags to attach to every metric reported by dogstatsd client + :type DATADOG_TAGS: list of strings + + :envvar DD_ENTITY_ID: Tag to identify the client entity. + :type DD_ENTITY_ID: string + + :param use_default_route: Dynamically set the DogStatsd host to the default route + (Useful when running the client in a container) (Linux only) + :type use_default_route: boolean + + :param socket_path: Communicate with dogstatsd through a UNIX socket instead of + UDP. If set, disables UDP transmission (Linux only) + :type socket_path: string + """ + + self.lock = Lock() + + # Check host and port env vars + agent_host = os.environ.get('DD_AGENT_HOST') + if agent_host and host == DEFAULT_HOST: + host = agent_host + + dogstatsd_port = os.environ.get('DD_DOGSTATSD_PORT') + if dogstatsd_port and port == DEFAULT_PORT: + try: + port = int(dogstatsd_port) + except ValueError: + log.warning("Port number provided in DD_DOGSTATSD_PORT env var is not an integer: \ + %s, using %s as port number", dogstatsd_port, port) + + # Connection + if socket_path is not None: + self.socket_path = socket_path + self.host = None + self.port = None + else: + self.socket_path = None + self.host = self.resolve_host(host, use_default_route) + self.port = int(port) + + # Socket + self.socket = None + self.max_buffer_size = max_buffer_size + self._send = self._send_to_server + self.encoding = 'utf-8' + + # Options + env_tags = [tag for tag in os.environ.get('DATADOG_TAGS', '').split(',') if tag] + if constant_tags is None: + constant_tags = [] + self.constant_tags = constant_tags + env_tags + entity_id = os.environ.get('DD_ENTITY_ID') + if entity_id: + entity_tag = '{name}:{value}'.format(name=ENTITY_ID_TAG_NAME, value=entity_id) + self.constant_tags.append(entity_tag) + if namespace is not None: + namespace = text(namespace) + self.namespace = namespace + self.use_ms = use_ms + + def __enter__(self): + self.open_buffer(self.max_buffer_size) + return self + + def __exit__(self, type, value, traceback): + self.close_buffer() + + @staticmethod + def resolve_host(host, use_default_route): + """ + Resolve the DogStatsd host. + + Args: + host (string): host + use_default_route (bool): use the system default route as host + (overrides the `host` parameter) + """ + if not use_default_route: + return host + + return get_default_route() + + def get_socket(self): + """ + Return a connected socket. + + Note: connect the socket before assigning it to the class instance to + avoid bad thread race conditions. + """ + with self.lock: + if not self.socket: + if self.socket_path is not None: + sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) + sock.connect(self.socket_path) + sock.setblocking(0) + self.socket = sock + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + sock.connect((self.host, self.port)) + self.socket = sock + + return self.socket + + def open_buffer(self, max_buffer_size=50): + """ + Open a buffer to send a batch of metrics in one packet. + + You can also use this as a context manager. + + >>> with DogStatsd() as batch: + >>> batch.gauge('users.online', 123) + >>> batch.gauge('active.connections', 1001) + """ + self.max_buffer_size = max_buffer_size + self.buffer = [] + self._send = self._send_to_buffer + + def close_buffer(self): + """ + Flush the buffer and switch back to single metric packets. + """ + self._send = self._send_to_server + + if self.buffer: + # Only send packets if there are packets to send + self._flush_buffer() + + def gauge(self, metric, value, tags=None, sample_rate=1): + """ + Record the value of a gauge, optionally setting a list of tags and a + sample rate. + + >>> statsd.gauge('users.online', 123) + >>> statsd.gauge('active.connections', 1001, tags=["protocol:http"]) + """ + return self._report(metric, 'g', value, tags, sample_rate) + + def increment(self, metric, value=1, tags=None, sample_rate=1): + """ + Increment a counter, optionally setting a value, tags and a sample + rate. + + >>> statsd.increment('page.views') + >>> statsd.increment('files.transferred', 124) + """ + self._report(metric, 'c', value, tags, sample_rate) + + def decrement(self, metric, value=1, tags=None, sample_rate=1): + """ + Decrement a counter, optionally setting a value, tags and a sample + rate. + + >>> statsd.decrement('files.remaining') + >>> statsd.decrement('active.connections', 2) + """ + metric_value = -value if value else value + self._report(metric, 'c', metric_value, tags, sample_rate) + + def histogram(self, metric, value, tags=None, sample_rate=1): + """ + Sample a histogram value, optionally setting tags and a sample rate. + + >>> statsd.histogram('uploaded.file.size', 1445) + >>> statsd.histogram('album.photo.count', 26, tags=["gender:female"]) + """ + self._report(metric, 'h', value, tags, sample_rate) + + def distribution(self, metric, value, tags=None, sample_rate=1): + """ + Send a global distribution value, optionally setting tags and a sample rate. + + >>> statsd.distribution('uploaded.file.size', 1445) + >>> statsd.distribution('album.photo.count', 26, tags=["gender:female"]) + + This is a beta feature that must be enabled specifically for your organization. + """ + self._report(metric, 'd', value, tags, sample_rate) + + def timing(self, metric, value, tags=None, sample_rate=1): + """ + Record a timing, optionally setting tags and a sample rate. + + >>> statsd.timing("query.response.time", 1234) + """ + self._report(metric, 'ms', value, tags, sample_rate) + + def timed(self, metric=None, tags=None, sample_rate=1, use_ms=None): + """ + A decorator or context manager that will measure the distribution of a + function's/context's run time. Optionally specify a list of tags or a + sample rate. If the metric is not defined as a decorator, the module + name and function name will be used. The metric is required as a context + manager. + :: + + @statsd.timed('user.query.time', sample_rate=0.5) + def get_user(user_id): + # Do what you need to ... + pass + + # Is equivalent to ... + with statsd.timed('user.query.time', sample_rate=0.5): + # Do what you need to ... + pass + + # Is equivalent to ... + start = time.time() + try: + get_user(user_id) + finally: + statsd.timing('user.query.time', time.time() - start) + """ + return TimedContextManagerDecorator(self, metric, tags, sample_rate, use_ms) + + def set(self, metric, value, tags=None, sample_rate=1): + """ + Sample a set value. + + >>> statsd.set('visitors.uniques', 999) + """ + self._report(metric, 's', value, tags, sample_rate) + + def close_socket(self): + """ + Closes connected socket if connected. + """ + if self.socket: + self.socket.close() + self.socket = None + + def _report(self, metric, metric_type, value, tags, sample_rate): + """ + Create a metric packet and send it. + + More information about the packets' format: http://docs.datadoghq.com/guides/dogstatsd/ + """ + if value is None: + return + + if sample_rate != 1 and random() > sample_rate: + return + + # Resolve the full tag list + tags = self._add_constant_tags(tags) + + # Create/format the metric packet + payload = "%s%s:%s|%s%s%s" % ( + (self.namespace + ".") if self.namespace else "", + metric, + value, + metric_type, + ("|@" + text(sample_rate)) if sample_rate != 1 else "", + ("|#" + ",".join(tags)) if tags else "", + ) + + # Send it + self._send(payload) + + def _send_to_server(self, packet): + try: + # If set, use socket directly + (self.socket or self.get_socket()).send(packet.encode(self.encoding)) + except socket.timeout: + # dogstatsd is overflowing, drop the packets (mimicks the UDP behaviour) + return + except (socket.error, socket.herror, socket.gaierror) as se: + log.warning("Error submitting packet: {}, dropping the packet and closing the socket".format(se)) + self.close_socket() + except Exception as e: + log.error("Unexpected error: %s", str(e)) + return + + def _send_to_buffer(self, packet): + self.buffer.append(packet) + if len(self.buffer) >= self.max_buffer_size: + self._flush_buffer() + + def _flush_buffer(self): + self._send_to_server("\n".join(self.buffer)) + self.buffer = [] + + def _escape_event_content(self, string): + return string.replace('\n', '\\n') + + def _escape_service_check_message(self, string): + return string.replace('\n', '\\n').replace('m:', 'm\\:') + + def event(self, title, text, alert_type=None, aggregation_key=None, + source_type_name=None, date_happened=None, priority=None, + tags=None, hostname=None): + """ + Send an event. Attributes are the same as the Event API. + http://docs.datadoghq.com/api/ + + >>> statsd.event('Man down!', 'This server needs assistance.') + >>> statsd.event('The web server restarted', 'The web server is up again', alert_type='success') # NOQA + """ + title = self._escape_event_content(title) + text = self._escape_event_content(text) + + # Append all client level tags to every event + tags = self._add_constant_tags(tags) + + string = u'_e{%d,%d}:%s|%s' % (len(title), len(text), title, text) + if date_happened: + string = '%s|d:%d' % (string, date_happened) + if hostname: + string = '%s|h:%s' % (string, hostname) + if aggregation_key: + string = '%s|k:%s' % (string, aggregation_key) + if priority: + string = '%s|p:%s' % (string, priority) + if source_type_name: + string = '%s|s:%s' % (string, source_type_name) + if alert_type: + string = '%s|t:%s' % (string, alert_type) + if tags: + string = '%s|#%s' % (string, ','.join(tags)) + + if len(string) > 8 * 1024: + raise Exception(u'Event "%s" payload is too big (more than 8KB), ' + 'event discarded' % title) + + self._send(string) + + def service_check(self, check_name, status, tags=None, timestamp=None, + hostname=None, message=None): + """ + Send a service check run. + + >>> statsd.service_check('my_service.check_name', DogStatsd.WARNING) + """ + message = self._escape_service_check_message(message) if message is not None else '' + + string = u'_sc|{0}|{1}'.format(check_name, status) + + # Append all client level tags to every status check + tags = self._add_constant_tags(tags) + + if timestamp: + string = u'{0}|d:{1}'.format(string, timestamp) + if hostname: + string = u'{0}|h:{1}'.format(string, hostname) + if tags: + string = u'{0}|#{1}'.format(string, ','.join(tags)) + if message: + string = u'{0}|m:{1}'.format(string, message) + + self._send(string) + + def _add_constant_tags(self, tags): + if self.constant_tags: + if tags: + return tags + self.constant_tags + else: + return self.constant_tags + return tags + + +statsd = DogStatsd() diff --git a/ddtrace/vendor/dogstatsd/compat.py b/ddtrace/vendor/dogstatsd/compat.py new file mode 100644 index 0000000000..bff3899ae8 --- /dev/null +++ b/ddtrace/vendor/dogstatsd/compat.py @@ -0,0 +1,107 @@ +# flake8: noqa +""" +Imports for compatibility with Python 2, Python 3 and Google App Engine. +""" +from functools import wraps +import logging +import socket +import sys + + +def _is_py_version_higher_than(major, minor=0): + """ + Assert that the Python version is higher than `$maj.$min`. + """ + return sys.version_info >= (major, minor) + + +def is_p3k(): + """ + Assert that Python is version 3 or higher. + """ + return _is_py_version_higher_than(3) + + +def is_higher_py35(): + """ + Assert that Python is version 3.5 or higher. + """ + return _is_py_version_higher_than(3, 5) + + +get_input = input + +# Python 3.x +if is_p3k(): + from io import StringIO + import builtins + import configparser + import urllib.request as url_lib, urllib.error, urllib.parse + + imap = map + text = str + + def iteritems(d): + return iter(d.items()) + + def iternext(iter): + return next(iter) + + +# Python 2.x +else: + import __builtin__ as builtins + from cStringIO import StringIO + from itertools import imap + import ConfigParser as configparser + import urllib2 as url_lib + + get_input = raw_input + text = unicode + + def iteritems(d): + return d.iteritems() + + def iternext(iter): + return iter.next() + + +# Python > 3.5 +if is_higher_py35(): + from asyncio import iscoroutinefunction + +# Others +else: + def iscoroutinefunction(*args, **kwargs): + return False + +# Optional requirements +try: + from UserDict import IterableUserDict +except ImportError: + from collections import UserDict as IterableUserDict + +try: + from configparser import ConfigParser +except ImportError: + from ConfigParser import ConfigParser + +try: + from urllib.parse import urlparse +except ImportError: + from urlparse import urlparse + +try: + import pkg_resources as pkg +except ImportError: + pkg = None + +#Python 2.6.x +try: + from logging import NullHandler +except ImportError: + from logging import Handler + + class NullHandler(Handler): + def emit(self, record): + pass diff --git a/ddtrace/vendor/dogstatsd/context.py b/ddtrace/vendor/dogstatsd/context.py new file mode 100644 index 0000000000..f4e7a57a86 --- /dev/null +++ b/ddtrace/vendor/dogstatsd/context.py @@ -0,0 +1,79 @@ +# stdlib +from functools import wraps +from time import time + +# datadog +from .compat import ( + is_higher_py35, + iscoroutinefunction, +) + + +if is_higher_py35(): + from .context_async import _get_wrapped_co +else: + def _get_wrapped_co(self, func): + raise NotImplementedError( + u"Decorator `timed` compatibility with coroutine functions" + u" requires Python 3.5 or higher." + ) + + +class TimedContextManagerDecorator(object): + """ + A context manager and a decorator which will report the elapsed time in + the context OR in a function call. + """ + def __init__(self, statsd, metric=None, tags=None, sample_rate=1, use_ms=None): + self.statsd = statsd + self.metric = metric + self.tags = tags + self.sample_rate = sample_rate + self.use_ms = use_ms + self.elapsed = None + + def __call__(self, func): + """ + Decorator which returns the elapsed time of the function call. + + Default to the function name if metric was not provided. + """ + if not self.metric: + self.metric = '%s.%s' % (func.__module__, func.__name__) + + # Coroutines + if iscoroutinefunction(func): + return _get_wrapped_co(self, func) + + # Others + @wraps(func) + def wrapped(*args, **kwargs): + start = time() + try: + return func(*args, **kwargs) + finally: + self._send(start) + return wrapped + + def __enter__(self): + if not self.metric: + raise TypeError("Cannot used timed without a metric!") + self._start = time() + return self + + def __exit__(self, type, value, traceback): + # Report the elapsed time of the context manager. + self._send(self._start) + + def _send(self, start): + elapsed = time() - start + use_ms = self.use_ms if self.use_ms is not None else self.statsd.use_ms + elapsed = int(round(1000 * elapsed)) if use_ms else elapsed + self.statsd.timing(self.metric, elapsed, self.tags, self.sample_rate) + self.elapsed = elapsed + + def start(self): + self.__enter__() + + def stop(self): + self.__exit__(None, None, None) diff --git a/ddtrace/vendor/dogstatsd/context_async.py b/ddtrace/vendor/dogstatsd/context_async.py new file mode 100644 index 0000000000..97debc881f --- /dev/null +++ b/ddtrace/vendor/dogstatsd/context_async.py @@ -0,0 +1,23 @@ +""" +Decorator `timed` for coroutine methods. + +Warning: requires Python 3.5 or higher. +""" +# stdlib +from functools import wraps +from time import time + + +def _get_wrapped_co(self, func): + """ + `timed` wrapper for coroutine methods. + """ + @wraps(func) + async def wrapped_co(*args, **kwargs): + start = time() + try: + result = await func(*args, **kwargs) + return result + finally: + self._send(start) + return wrapped_co diff --git a/ddtrace/vendor/dogstatsd/route.py b/ddtrace/vendor/dogstatsd/route.py new file mode 100644 index 0000000000..59c2396748 --- /dev/null +++ b/ddtrace/vendor/dogstatsd/route.py @@ -0,0 +1,38 @@ +""" +Helper(s), resolve the system's default interface. +""" +# stdlib +import socket +import struct + + +class UnresolvableDefaultRoute(Exception): + """ + Unable to resolve system's default route. + """ + + +def get_default_route(): + """ + Return the system default interface using the proc filesystem. + + Returns: + string: default route + + Raises: + `NotImplementedError`: No proc filesystem is found (non-Linux systems) + `StopIteration`: No default route found + """ + try: + with open('/proc/net/route') as f: + for line in f.readlines(): + fields = line.strip().split() + if fields[1] == '00000000': + return socket.inet_ntoa(struct.pack(' + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +""" +import time + + +__all__ = ('monotonic',) + + +try: + monotonic = time.monotonic +except AttributeError: + import ctypes + import ctypes.util + import os + import sys + import threading + try: + if sys.platform == 'darwin': # OS X, iOS + # See Technical Q&A QA1398 of the Mac Developer Library: + # + libc = ctypes.CDLL('/usr/lib/libc.dylib', use_errno=True) + + class mach_timebase_info_data_t(ctypes.Structure): + """System timebase info. Defined in .""" + _fields_ = (('numer', ctypes.c_uint32), + ('denom', ctypes.c_uint32)) + + mach_absolute_time = libc.mach_absolute_time + mach_absolute_time.restype = ctypes.c_uint64 + + timebase = mach_timebase_info_data_t() + libc.mach_timebase_info(ctypes.byref(timebase)) + ticks_per_second = timebase.numer / timebase.denom * 1.0e9 + + def monotonic(): + """Monotonic clock, cannot go backward.""" + return mach_absolute_time() / ticks_per_second + + elif sys.platform.startswith('win32') or sys.platform.startswith('cygwin'): + if sys.platform.startswith('cygwin'): + # Note: cygwin implements clock_gettime (CLOCK_MONOTONIC = 4) since + # version 1.7.6. Using raw WinAPI for maximum version compatibility. + + # Ugly hack using the wrong calling convention (in 32-bit mode) + # because ctypes has no windll under cygwin (and it also seems that + # the code letting you select stdcall in _ctypes doesn't exist under + # the preprocessor definitions relevant to cygwin). + # This is 'safe' because: + # 1. The ABI of GetTickCount and GetTickCount64 is identical for + # both calling conventions because they both have no parameters. + # 2. libffi masks the problem because after making the call it doesn't + # touch anything through esp and epilogue code restores a correct + # esp from ebp afterwards. + try: + kernel32 = ctypes.cdll.kernel32 + except OSError: # 'No such file or directory' + kernel32 = ctypes.cdll.LoadLibrary('kernel32.dll') + else: + kernel32 = ctypes.windll.kernel32 + + GetTickCount64 = getattr(kernel32, 'GetTickCount64', None) + if GetTickCount64: + # Windows Vista / Windows Server 2008 or newer. + GetTickCount64.restype = ctypes.c_ulonglong + + def monotonic(): + """Monotonic clock, cannot go backward.""" + return GetTickCount64() / 1000.0 + + else: + # Before Windows Vista. + GetTickCount = kernel32.GetTickCount + GetTickCount.restype = ctypes.c_uint32 + + get_tick_count_lock = threading.Lock() + get_tick_count_last_sample = 0 + get_tick_count_wraparounds = 0 + + def monotonic(): + """Monotonic clock, cannot go backward.""" + global get_tick_count_last_sample + global get_tick_count_wraparounds + + with get_tick_count_lock: + current_sample = GetTickCount() + if current_sample < get_tick_count_last_sample: + get_tick_count_wraparounds += 1 + get_tick_count_last_sample = current_sample + + final_milliseconds = get_tick_count_wraparounds << 32 + final_milliseconds += get_tick_count_last_sample + return final_milliseconds / 1000.0 + + else: + try: + clock_gettime = ctypes.CDLL(ctypes.util.find_library('c'), + use_errno=True).clock_gettime + except Exception: + clock_gettime = ctypes.CDLL(ctypes.util.find_library('rt'), + use_errno=True).clock_gettime + + class timespec(ctypes.Structure): + """Time specification, as described in clock_gettime(3).""" + _fields_ = (('tv_sec', ctypes.c_long), + ('tv_nsec', ctypes.c_long)) + + if sys.platform.startswith('linux'): + CLOCK_MONOTONIC = 1 + elif sys.platform.startswith('freebsd'): + CLOCK_MONOTONIC = 4 + elif sys.platform.startswith('sunos5'): + CLOCK_MONOTONIC = 4 + elif 'bsd' in sys.platform: + CLOCK_MONOTONIC = 3 + elif sys.platform.startswith('aix'): + CLOCK_MONOTONIC = ctypes.c_longlong(10) + + def monotonic(): + """Monotonic clock, cannot go backward.""" + ts = timespec() + if clock_gettime(CLOCK_MONOTONIC, ctypes.pointer(ts)): + errno = ctypes.get_errno() + raise OSError(errno, os.strerror(errno)) + return ts.tv_sec + ts.tv_nsec / 1.0e9 + + # Perform a sanity-check. + if monotonic() - monotonic() > 0: + raise ValueError('monotonic() is not monotonic!') + + except Exception as e: + raise RuntimeError('no suitable implementation for this system: ' + repr(e)) \ No newline at end of file diff --git a/ddtrace/vendor/msgpack/__init__.py b/ddtrace/vendor/msgpack/__init__.py new file mode 100644 index 0000000000..4ad9c1a5e1 --- /dev/null +++ b/ddtrace/vendor/msgpack/__init__.py @@ -0,0 +1,65 @@ +# coding: utf-8 +from ._version import version +from .exceptions import * + +from collections import namedtuple + + +class ExtType(namedtuple('ExtType', 'code data')): + """ExtType represents ext type in msgpack.""" + def __new__(cls, code, data): + if not isinstance(code, int): + raise TypeError("code must be int") + if not isinstance(data, bytes): + raise TypeError("data must be bytes") + if not 0 <= code <= 127: + raise ValueError("code must be 0~127") + return super(ExtType, cls).__new__(cls, code, data) + + +import os +if os.environ.get('MSGPACK_PUREPYTHON'): + from .fallback import Packer, unpackb, Unpacker +else: + try: + from ._cmsgpack import Packer, unpackb, Unpacker + except ImportError: + from .fallback import Packer, unpackb, Unpacker + + +def pack(o, stream, **kwargs): + """ + Pack object `o` and write it to `stream` + + See :class:`Packer` for options. + """ + packer = Packer(**kwargs) + stream.write(packer.pack(o)) + + +def packb(o, **kwargs): + """ + Pack object `o` and return packed bytes + + See :class:`Packer` for options. + """ + return Packer(**kwargs).pack(o) + + +def unpack(stream, **kwargs): + """ + Unpack an object from `stream`. + + Raises `ExtraData` when `stream` contains extra bytes. + See :class:`Unpacker` for options. + """ + data = stream.read() + return unpackb(data, **kwargs) + + +# alias for compatibility to simplejson/marshal/pickle. +load = unpack +loads = unpackb + +dump = pack +dumps = packb diff --git a/ddtrace/vendor/msgpack/_cmsgpack.cpp b/ddtrace/vendor/msgpack/_cmsgpack.cpp new file mode 100644 index 0000000000..c5506e4b1b --- /dev/null +++ b/ddtrace/vendor/msgpack/_cmsgpack.cpp @@ -0,0 +1,15777 @@ +/* Generated by Cython 0.29.6 */ + +#define PY_SSIZE_T_CLEAN +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02060000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.6+ or Python 3.3+. +#else +#define CYTHON_ABI "0_29_6" +#define CYTHON_HEX_VERSION 0x001D06F0 +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #if PY_VERSION_HEX >= 0x02070000 + #define HAVE_LONG_LONG + #endif +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#ifdef PYPY_VERSION + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#elif defined(PYSTON_VERSION) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_PYSTON 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #elif !defined(CYTHON_USE_PYTYPE_LOOKUP) + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #if PY_VERSION_HEX < 0x02070000 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #elif !defined(CYTHON_USE_PYLONG_INTERNALS) + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT (PY_VERSION_HEX >= 0x03050000) + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1) + #endif + #ifndef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX >= 0x030600B1) + #endif + #ifndef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK (PY_VERSION_HEX >= 0x030700A3) + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #include "longintrepr.h" + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_MAYBE_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_MAYBE_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int32 uint32_t; + #endif + #endif +#else + #include +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) && __cplusplus >= 201103L + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #elif __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__ ) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif + +#ifndef __cplusplus + #error "Cython files generated with the C++ option must be compiled with a C++ compiler." +#endif +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #else + #define CYTHON_INLINE inline + #endif +#endif +template +void __Pyx_call_destructor(T& x) { + x.~T(); +} +template +class __Pyx_FakeReference { + public: + __Pyx_FakeReference() : ptr(NULL) { } + __Pyx_FakeReference(const T& ref) : ptr(const_cast(&ref)) { } + T *operator->() { return ptr; } + T *operator&() { return ptr; } + operator T&() { return *ptr; } + template bool operator ==(U other) { return *ptr == other; } + template bool operator !=(U other) { return *ptr != other; } + private: + T *ptr; +}; + +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define Py_OptimizeFlag 0 +#endif +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyClass_Type +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) + #define __Pyx_DefaultClassType PyType_Type +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_FAST_PYCCALL +#define __Pyx_PyFastCFunction_Check(func)\ + ((PyCFunction_Check(func) && (METH_FASTCALL == (PyCFunction_GET_FLAGS(func) & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))))) +#else +#define __Pyx_PyFastCFunction_Check(func) 0 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030400A1 + #define PyMem_RawMalloc(n) PyMem_Malloc(n) + #define PyMem_RawRealloc(p, n) PyMem_Realloc(p, n) + #define PyMem_RawFree(p) PyMem_Free(p) +#endif +#if CYTHON_COMPILING_IN_PYSTON + #define __Pyx_PyCode_HasFreeVars(co) PyCode_HasFreeVars(co) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if !CYTHON_FAST_THREAD_STATE || PY_VERSION_HEX < 0x02070000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStr(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +#else +#define __Pyx_PyDict_GetItemStr(dict, name) PyDict_GetItem(dict, name) +#endif +#if PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) PyUnicode_KIND(u) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535 : 1114111) + #define __Pyx_PyUnicode_KIND(u) (sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyUnicode_Contains) + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact + #define PyObject_Unicode PyObject_Str +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) (Py_TYPE(obj) == &PySet_Type) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) +#else + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t PyInt_AsLong +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t PyInt_AsSsize_t +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyMethod_New(func, self, klass) ((self) ? PyMethod_New(func, self) : (Py_INCREF(func), func)) +#else + #define __Pyx_PyMethod_New(func, self, klass) PyMethod_New(func, self, klass) +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(WIN32) || defined(MS_WINDOWS) + #define _USE_MATH_DEFINES +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + + +#define __PYX_ERR(f_index, lineno, Ln_error) \ +{ \ + __pyx_filename = __pyx_f[f_index]; __pyx_lineno = lineno; __pyx_clineno = __LINE__; goto Ln_error; \ +} + +#ifndef __PYX_EXTERN_C + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__msgpack___cmsgpack +#define __PYX_HAVE_API__msgpack___cmsgpack +/* Early includes */ +#include +#include +#include "pythread.h" +#include "pack.h" +#include "buff_converter.h" +#include +#include +#include "unpack.h" +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 1 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "ascii" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) { + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#define __Pyx_PyNumber_Float(x) (PyFloat_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Float(x)) +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +static PyObject *__pyx_m = NULL; +static PyObject *__pyx_d; +static PyObject *__pyx_b; +static PyObject *__pyx_cython_runtime = NULL; +static PyObject *__pyx_empty_tuple; +static PyObject *__pyx_empty_bytes; +static PyObject *__pyx_empty_unicode; +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm= __FILE__; +static const char *__pyx_filename; + + +static const char *__pyx_f[] = { + "msgpack/_packer.pyx", + "msgpack/_unpacker.pyx", + "stringsource", + "msgpack/_cmsgpack.pyx", + "type.pxd", + "bool.pxd", + "complex.pxd", +}; + +/* "msgpack/_unpacker.pyx":13 + * from libc.string cimport * + * from libc.limits cimport * + * ctypedef unsigned long long uint64_t # <<<<<<<<<<<<<< + * + * from ddtrace.vendor.msgpack.exceptions import ( + */ +typedef unsigned PY_LONG_LONG __pyx_t_7msgpack_9_cmsgpack_uint64_t; + +/*--- Type declarations ---*/ +struct __pyx_obj_7msgpack_9_cmsgpack_Packer; +struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker; +struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack; +struct __pyx_opt_args_7msgpack_9_cmsgpack_8Unpacker__unpack; + +/* "msgpack/_packer.pyx":148 + * self.pk.buf = NULL + * + * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: # <<<<<<<<<<<<<< + * cdef long long llval + * cdef unsigned long long ullval + */ +struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack { + int __pyx_n; + int nest_limit; +}; + +/* "msgpack/_unpacker.pyx":477 + * self.file_like = None + * + * cdef object _unpack(self, execute_fn execute, bint iter=0): # <<<<<<<<<<<<<< + * cdef int ret + * cdef object obj + */ +struct __pyx_opt_args_7msgpack_9_cmsgpack_8Unpacker__unpack { + int __pyx_n; + int iter; +}; + +/* "msgpack/_packer.pyx":54 + * + * + * cdef class Packer(object): # <<<<<<<<<<<<<< + * """ + * MessagePack Packer + */ +struct __pyx_obj_7msgpack_9_cmsgpack_Packer { + PyObject_HEAD + struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *__pyx_vtab; + struct msgpack_packer pk; + PyObject *_default; + PyObject *_bencoding; + PyObject *_berrors; + char const *encoding; + char const *unicode_errors; + int strict_types; + PyBoolObject *use_float; + int autoreset; +}; + + +/* "msgpack/_unpacker.pyx":229 + * + * + * cdef class Unpacker(object): # <<<<<<<<<<<<<< + * """Streaming unpacker. + * + */ +struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker { + PyObject_HEAD + struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *__pyx_vtab; + unpack_context ctx; + char *buf; + Py_ssize_t buf_size; + Py_ssize_t buf_head; + Py_ssize_t buf_tail; + PyObject *file_like; + PyObject *file_like_read; + Py_ssize_t read_size; + PyObject *object_hook; + PyObject *object_pairs_hook; + PyObject *list_hook; + PyObject *ext_hook; + PyObject *encoding; + PyObject *unicode_errors; + Py_ssize_t max_buffer_size; + __pyx_t_7msgpack_9_cmsgpack_uint64_t stream_offset; +}; + + + +/* "msgpack/_packer.pyx":54 + * + * + * cdef class Packer(object): # <<<<<<<<<<<<<< + * """ + * MessagePack Packer + */ + +struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer { + int (*_pack)(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *, PyObject *, struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack *__pyx_optional_args); + PyObject *(*pack)(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *, PyObject *, int __pyx_skip_dispatch); +}; +static struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *__pyx_vtabptr_7msgpack_9_cmsgpack_Packer; + + +/* "msgpack/_unpacker.pyx":229 + * + * + * cdef class Unpacker(object): # <<<<<<<<<<<<<< + * """Streaming unpacker. + * + */ + +struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker { + PyObject *(*append_buffer)(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *, void *, Py_ssize_t); + PyObject *(*read_from_file)(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *); + PyObject *(*_unpack)(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *, execute_fn, struct __pyx_opt_args_7msgpack_9_cmsgpack_8Unpacker__unpack *__pyx_optional_args); +}; +static struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *__pyx_vtabptr_7msgpack_9_cmsgpack_Unpacker; + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, int); + void (*DECREF)(void*, PyObject*, int); + void (*GOTREF)(void*, PyObject*, int); + void (*GIVEREF)(void*, PyObject*, int); + void* (*SetupContext)(const char*, int, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), __LINE__, __FILE__) +#endif + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), __LINE__) + #define __Pyx_XINCREF(r) do { if((r) != NULL) {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) != NULL) {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) != NULL) {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) != NULL) {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* KeywordStringCheck.proto */ +static int __Pyx_CheckKeywordStrings(PyObject *kwdict, const char* function_name, int kw_allowed); + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#define __Pyx_PyErr_Occurred() __pyx_tstate->curexc_type +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject **argnames[],\ + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args,\ + const char* function_name); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* PyCFunctionFastCall.proto */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject *__Pyx_PyCFunction_FastCall(PyObject *func, PyObject **args, Py_ssize_t nargs); +#else +#define __Pyx_PyCFunction_FastCall(func, args, nargs) (assert(0), NULL) +#endif + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs); +#else +#define __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs) _PyFunction_FastCallDict(func, args, nargs, kwargs) +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif + +/* PyObjectCall2Args.proto */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2); + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* IterFinish.proto */ +static CYTHON_INLINE int __Pyx_IterFinish(void); + +/* PyObjectCallNoArg.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); +#else +#define __Pyx_PyObject_CallNoArg(func) __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL) +#endif + +/* PyObjectGetMethod.proto */ +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); + +/* PyObjectCallMethod0.proto */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* UnpackItemEndCheck.proto */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* UnpackTupleError.proto */ +static void __Pyx_UnpackTupleError(PyObject *, Py_ssize_t index); + +/* UnpackTuple2.proto */ +#define __Pyx_unpack_tuple2(tuple, value1, value2, is_tuple, has_known_size, decref_tuple)\ + (likely(is_tuple || PyTuple_Check(tuple)) ?\ + (likely(has_known_size || PyTuple_GET_SIZE(tuple) == 2) ?\ + __Pyx_unpack_tuple2_exact(tuple, value1, value2, decref_tuple) :\ + (__Pyx_UnpackTupleError(tuple, 2), -1)) :\ + __Pyx_unpack_tuple2_generic(tuple, value1, value2, has_known_size, decref_tuple)) +static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( + PyObject* tuple, PyObject** value1, PyObject** value2, int decref_tuple); +static int __Pyx_unpack_tuple2_generic( + PyObject* tuple, PyObject** value1, PyObject** value2, int has_known_size, int decref_tuple); + +/* dict_iter.proto */ +static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* dict, int is_dict, PyObject* method_name, + Py_ssize_t* p_orig_length, int* p_is_dict); +static CYTHON_INLINE int __Pyx_dict_iter_next(PyObject* dict_or_iter, Py_ssize_t orig_length, Py_ssize_t* ppos, + PyObject** pkey, PyObject** pvalue, PyObject** pitem, int is_dict); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +#define __Pyx_GetModuleGlobalNameUncached(var, name) {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* ReRaiseException.proto */ +static CYTHON_INLINE void __Pyx_ReraiseException(void); + +/* None.proto */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); + +/* BuildPyUnicode.proto */ +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char); + +/* CIntToPyUnicode.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char); + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* SetVTable.proto */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable); + +/* SetupReduce.proto */ +static int __Pyx_setup_reduce(PyObject* type_obj); + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto +#define __PYX_HAVE_RT_ImportType_proto +enum __Pyx_ImportType_CheckSize { + __Pyx_ImportType_CheckSize_Error = 0, + __Pyx_ImportType_CheckSize_Warn = 1, + __Pyx_ImportType_CheckSize_Ignore = 2 +}; +static PyTypeObject *__Pyx_ImportType(PyObject* module, const char *module_name, const char *class_name, size_t size, enum __Pyx_ImportType_CheckSize check_size); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_As_PY_LONG_LONG(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_As_unsigned_PY_LONG_LONG(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +static int __pyx_f_7msgpack_9_cmsgpack_6Packer__pack(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_o, struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack *__pyx_optional_args); /* proto*/ +static PyObject *__pyx_f_7msgpack_9_cmsgpack_6Packer_pack(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_skip_dispatch); /* proto*/ +static PyObject *__pyx_f_7msgpack_9_cmsgpack_8Unpacker_append_buffer(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, void *__pyx_v__buf, Py_ssize_t __pyx_v__buf_len); /* proto*/ +static PyObject *__pyx_f_7msgpack_9_cmsgpack_8Unpacker_read_from_file(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto*/ +static PyObject *__pyx_f_7msgpack_9_cmsgpack_8Unpacker__unpack(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, execute_fn __pyx_v_execute, struct __pyx_opt_args_7msgpack_9_cmsgpack_8Unpacker__unpack *__pyx_optional_args); /* proto*/ + +/* Module declarations from 'cpython.version' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.type' */ +static PyTypeObject *__pyx_ptype_7cpython_4type_type = 0; + +/* Module declarations from 'libc.string' */ + +/* Module declarations from 'libc.stdio' */ + +/* Module declarations from 'cpython.object' */ + +/* Module declarations from 'cpython.ref' */ + +/* Module declarations from 'cpython.exc' */ + +/* Module declarations from 'cpython.module' */ + +/* Module declarations from 'cpython.mem' */ + +/* Module declarations from 'cpython.tuple' */ + +/* Module declarations from 'cpython.list' */ + +/* Module declarations from 'cpython.sequence' */ + +/* Module declarations from 'cpython.mapping' */ + +/* Module declarations from 'cpython.iterator' */ + +/* Module declarations from 'cpython.number' */ + +/* Module declarations from 'cpython.int' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.bool' */ +static PyTypeObject *__pyx_ptype_7cpython_4bool_bool = 0; + +/* Module declarations from 'cpython.long' */ + +/* Module declarations from 'cpython.float' */ + +/* Module declarations from '__builtin__' */ + +/* Module declarations from 'cpython.complex' */ +static PyTypeObject *__pyx_ptype_7cpython_7complex_complex = 0; + +/* Module declarations from 'cpython.string' */ + +/* Module declarations from 'cpython.unicode' */ + +/* Module declarations from 'cpython.dict' */ + +/* Module declarations from 'cpython.instance' */ + +/* Module declarations from 'cpython.function' */ + +/* Module declarations from 'cpython.method' */ + +/* Module declarations from 'cpython.weakref' */ + +/* Module declarations from 'cpython.getargs' */ + +/* Module declarations from 'cpython.pythread' */ + +/* Module declarations from 'cpython.pystate' */ + +/* Module declarations from 'cpython.cobject' */ + +/* Module declarations from 'cpython.oldbuffer' */ + +/* Module declarations from 'cpython.set' */ + +/* Module declarations from 'cpython.buffer' */ + +/* Module declarations from 'cpython.bytes' */ + +/* Module declarations from 'cpython.pycapsule' */ + +/* Module declarations from 'cpython' */ + +/* Module declarations from 'cpython.bytearray' */ + +/* Module declarations from 'libc.stdlib' */ + +/* Module declarations from 'libc.limits' */ + +/* Module declarations from 'msgpack._cmsgpack' */ +static PyTypeObject *__pyx_ptype_7msgpack_9_cmsgpack_Packer = 0; +static PyTypeObject *__pyx_ptype_7msgpack_9_cmsgpack_Unpacker = 0; +static int __pyx_v_7msgpack_9_cmsgpack_DEFAULT_RECURSE_LIMIT; +static PY_LONG_LONG __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT; +static CYTHON_INLINE int __pyx_f_7msgpack_9_cmsgpack_PyBytesLike_Check(PyObject *); /*proto*/ +static CYTHON_INLINE int __pyx_f_7msgpack_9_cmsgpack_PyBytesLike_CheckExact(PyObject *); /*proto*/ +static CYTHON_INLINE PyObject *__pyx_f_7msgpack_9_cmsgpack_init_ctx(unpack_context *, PyObject *, PyObject *, PyObject *, PyObject *, int, int, int, char const *, char const *, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t, Py_ssize_t); /*proto*/ +static CYTHON_INLINE int __pyx_f_7msgpack_9_cmsgpack_get_data_from_buffer(PyObject *, Py_buffer *, char **, Py_ssize_t *, int *); /*proto*/ +#define __Pyx_MODULE_NAME "msgpack._cmsgpack" +extern int __pyx_module_is_main_msgpack___cmsgpack; +int __pyx_module_is_main_msgpack___cmsgpack = 0; + +/* Implementation of 'msgpack._cmsgpack' */ +static PyObject *__pyx_builtin_MemoryError; +static PyObject *__pyx_builtin_DeprecationWarning; +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_OverflowError; +static PyObject *__pyx_builtin_RuntimeError; +static PyObject *__pyx_builtin_NotImplementedError; +static PyObject *__pyx_builtin_BufferError; +static PyObject *__pyx_builtin_RuntimeWarning; +static PyObject *__pyx_builtin_AssertionError; +static PyObject *__pyx_builtin_StopIteration; +static const char __pyx_k_d[] = "d"; +static const char __pyx_k_buf[] = "buf"; +static const char __pyx_k_ctx[] = "ctx"; +static const char __pyx_k_obj[] = "obj"; +static const char __pyx_k_off[] = "off"; +static const char __pyx_k_raw[] = "raw"; +static const char __pyx_k_ret[] = "ret"; +static const char __pyx_k_cenc[] = "cenc"; +static const char __pyx_k_cerr[] = "cerr"; +static const char __pyx_k_code[] = "code"; +static const char __pyx_k_data[] = "data"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_name[] = "__name__"; +static const char __pyx_k_pack[] = "pack"; +static const char __pyx_k_read[] = "read"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_view[] = "view"; +static const char __pyx_k_items[] = "items"; +static const char __pyx_k_Packer[] = "Packer"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_kwargs[] = "kwargs"; +static const char __pyx_k_packed[] = "packed"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_stream[] = "stream"; +static const char __pyx_k_unpack[] = "unpack"; +static const char __pyx_k_ExtType[] = "ExtType"; +static const char __pyx_k_buf_len[] = "buf_len"; +static const char __pyx_k_default[] = "default"; +static const char __pyx_k_unpackb[] = "unpackb"; +static const char __pyx_k_Unpacker[] = "Unpacker"; +static const char __pyx_k_encoding[] = "encoding"; +static const char __pyx_k_ext_hook[] = "ext_hook"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_typecode[] = "typecode"; +static const char __pyx_k_use_list[] = "use_list"; +static const char __pyx_k_ExtraData[] = "ExtraData"; +static const char __pyx_k_OutOfData[] = "OutOfData"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_autoreset[] = "autoreset"; +static const char __pyx_k_file_like[] = "file_like"; +static const char __pyx_k_list_hook[] = "list_hook"; +static const char __pyx_k_read_size[] = "read_size"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_BufferFull[] = "BufferFull"; +static const char __pyx_k_StackError[] = "StackError"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; +static const char __pyx_k_BufferError[] = "BufferError"; +static const char __pyx_k_FormatError[] = "FormatError"; +static const char __pyx_k_MemoryError[] = "MemoryError"; +static const char __pyx_k_max_bin_len[] = "max_bin_len"; +static const char __pyx_k_max_ext_len[] = "max_ext_len"; +static const char __pyx_k_max_map_len[] = "max_map_len"; +static const char __pyx_k_max_str_len[] = "max_str_len"; +static const char __pyx_k_object_hook[] = "object_hook"; +static const char __pyx_k_RuntimeError[] = "RuntimeError"; +static const char __pyx_k_new_protocol[] = "new_protocol"; +static const char __pyx_k_strict_types[] = "strict_types"; +static const char __pyx_k_use_bin_type[] = "use_bin_type"; +static const char __pyx_k_OverflowError[] = "OverflowError"; +static const char __pyx_k_StopIteration[] = "StopIteration"; +static const char __pyx_k_max_array_len[] = "max_array_len"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_AssertionError[] = "AssertionError"; +static const char __pyx_k_RuntimeWarning[] = "RuntimeWarning"; +static const char __pyx_k_internal_error[] = "internal error"; +static const char __pyx_k_strict_map_key[] = "strict_map_key"; +static const char __pyx_k_unicode_errors[] = "unicode_errors"; +static const char __pyx_k_max_buffer_size[] = "max_buffer_size"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_use_single_float[] = "use_single_float"; +static const char __pyx_k_dict_is_too_large[] = "dict is too large"; +static const char __pyx_k_list_is_too_large[] = "list is too large"; +static const char __pyx_k_msgpack__cmsgpack[] = "msgpack._cmsgpack"; +static const char __pyx_k_object_pairs_hook[] = "object_pairs_hook"; +static const char __pyx_k_DeprecationWarning[] = "DeprecationWarning"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_NotImplementedError[] = "NotImplementedError"; +static const char __pyx_k_Unpack_failed_error[] = "Unpack failed: error = "; +static const char __pyx_k_EXT_data_is_too_large[] = "EXT data is too large"; +static const char __pyx_k_msgpack__unpacker_pyx[] = "msgpack/_unpacker.pyx"; +static const char __pyx_k_No_more_data_to_unpack[] = "No more data to unpack."; +static const char __pyx_k_ddtrace_vendor_msgpack[] = "ddtrace.vendor.msgpack"; +static const char __pyx_k_memoryview_is_too_large[] = "memoryview is too large"; +static const char __pyx_k_could_not_get_memoryview[] = "could not get memoryview"; +static const char __pyx_k_recursion_limit_exceeded[] = "recursion limit exceeded."; +static const char __pyx_k_Integer_value_out_of_range[] = "Integer value out of range"; +static const char __pyx_k_default_must_be_a_callable[] = "default must be a callable."; +static const char __pyx_k_default_read_extended_type[] = "default_read_extended_type"; +static const char __pyx_k_ext_hook_must_be_a_callable[] = "ext_hook must be a callable."; +static const char __pyx_k_unicode_string_is_too_large[] = "unicode string is too large"; +static const char __pyx_k_list_hook_must_be_a_callable[] = "list_hook must be a callable."; +static const char __pyx_k_Unpack_failed_incomplete_input[] = "Unpack failed: incomplete input"; +static const char __pyx_k_object_hook_must_be_a_callable[] = "object_hook must be a callable."; +static const char __pyx_k_file_like_read_must_be_a_callab[] = "`file_like.read` must be a callable."; +static const char __pyx_k_unpacker_feed_is_not_be_able_to[] = "unpacker.feed() is not be able to use with `file_like`."; +static const char __pyx_k_Cannot_decode_extended_type_with[] = "Cannot decode extended type with typecode=%d"; +static const char __pyx_k_Unable_to_allocate_internal_buff[] = "Unable to allocate internal buffer."; +static const char __pyx_k_Unable_to_enlarge_internal_buffe[] = "Unable to enlarge internal buffer."; +static const char __pyx_k_cannot_unpack_from_multi_byte_ob[] = "cannot unpack from multi-byte object"; +static const char __pyx_k_could_not_get_buffer_for_memoryv[] = "could not get buffer for memoryview"; +static const char __pyx_k_ddtrace_vendor_msgpack_exception[] = "ddtrace.vendor.msgpack.exceptions"; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static const char __pyx_k_object_pairs_hook_and_object_hoo[] = "object_pairs_hook and object_hook are mutually exclusive."; +static const char __pyx_k_object_pairs_hook_must_be_a_call[] = "object_pairs_hook must be a callable."; +static const char __pyx_k_read_size_should_be_less_or_equa[] = "read_size should be less or equal to max_buffer_size"; +static const char __pyx_k_using_old_buffer_interface_to_un[] = "using old buffer interface to unpack %s; this leads to unpacking errors if slicing is used and will be removed in a future version"; +static PyObject *__pyx_n_s_AssertionError; +static PyObject *__pyx_n_s_BufferError; +static PyObject *__pyx_n_s_BufferFull; +static PyObject *__pyx_kp_u_Cannot_decode_extended_type_with; +static PyObject *__pyx_n_s_DeprecationWarning; +static PyObject *__pyx_kp_u_EXT_data_is_too_large; +static PyObject *__pyx_n_s_ExtType; +static PyObject *__pyx_n_s_ExtraData; +static PyObject *__pyx_n_s_FormatError; +static PyObject *__pyx_kp_u_Integer_value_out_of_range; +static PyObject *__pyx_n_s_MemoryError; +static PyObject *__pyx_kp_u_No_more_data_to_unpack; +static PyObject *__pyx_n_s_NotImplementedError; +static PyObject *__pyx_n_s_OutOfData; +static PyObject *__pyx_n_s_OverflowError; +static PyObject *__pyx_n_s_Packer; +static PyObject *__pyx_n_s_RuntimeError; +static PyObject *__pyx_n_s_RuntimeWarning; +static PyObject *__pyx_n_s_StackError; +static PyObject *__pyx_n_s_StopIteration; +static PyObject *__pyx_n_s_TypeError; +static PyObject *__pyx_kp_u_Unable_to_allocate_internal_buff; +static PyObject *__pyx_kp_u_Unable_to_enlarge_internal_buffe; +static PyObject *__pyx_kp_u_Unpack_failed_error; +static PyObject *__pyx_kp_u_Unpack_failed_incomplete_input; +static PyObject *__pyx_n_s_Unpacker; +static PyObject *__pyx_n_s_ValueError; +static PyObject *__pyx_n_s_autoreset; +static PyObject *__pyx_n_s_buf; +static PyObject *__pyx_n_s_buf_len; +static PyObject *__pyx_kp_u_cannot_unpack_from_multi_byte_ob; +static PyObject *__pyx_n_s_cenc; +static PyObject *__pyx_n_s_cerr; +static PyObject *__pyx_n_s_cline_in_traceback; +static PyObject *__pyx_n_s_code; +static PyObject *__pyx_kp_u_could_not_get_buffer_for_memoryv; +static PyObject *__pyx_kp_u_could_not_get_memoryview; +static PyObject *__pyx_n_s_ctx; +static PyObject *__pyx_n_u_d; +static PyObject *__pyx_n_s_data; +static PyObject *__pyx_n_s_ddtrace_vendor_msgpack; +static PyObject *__pyx_n_s_ddtrace_vendor_msgpack_exception; +static PyObject *__pyx_n_s_default; +static PyObject *__pyx_kp_u_default_must_be_a_callable; +static PyObject *__pyx_n_s_default_read_extended_type; +static PyObject *__pyx_kp_u_dict_is_too_large; +static PyObject *__pyx_n_s_encoding; +static PyObject *__pyx_n_s_ext_hook; +static PyObject *__pyx_kp_u_ext_hook_must_be_a_callable; +static PyObject *__pyx_n_s_file_like; +static PyObject *__pyx_kp_u_file_like_read_must_be_a_callab; +static PyObject *__pyx_n_s_getstate; +static PyObject *__pyx_n_s_import; +static PyObject *__pyx_kp_u_internal_error; +static PyObject *__pyx_n_s_items; +static PyObject *__pyx_n_s_kwargs; +static PyObject *__pyx_n_s_list_hook; +static PyObject *__pyx_kp_u_list_hook_must_be_a_callable; +static PyObject *__pyx_kp_u_list_is_too_large; +static PyObject *__pyx_n_s_main; +static PyObject *__pyx_n_s_max_array_len; +static PyObject *__pyx_n_s_max_bin_len; +static PyObject *__pyx_n_s_max_buffer_size; +static PyObject *__pyx_n_s_max_ext_len; +static PyObject *__pyx_n_s_max_map_len; +static PyObject *__pyx_n_s_max_str_len; +static PyObject *__pyx_kp_u_memoryview_is_too_large; +static PyObject *__pyx_n_s_msgpack__cmsgpack; +static PyObject *__pyx_kp_s_msgpack__unpacker_pyx; +static PyObject *__pyx_n_s_name; +static PyObject *__pyx_n_s_new_protocol; +static PyObject *__pyx_kp_s_no_default___reduce___due_to_non; +static PyObject *__pyx_n_s_obj; +static PyObject *__pyx_n_s_object_hook; +static PyObject *__pyx_kp_u_object_hook_must_be_a_callable; +static PyObject *__pyx_n_s_object_pairs_hook; +static PyObject *__pyx_kp_u_object_pairs_hook_and_object_hoo; +static PyObject *__pyx_kp_u_object_pairs_hook_must_be_a_call; +static PyObject *__pyx_n_s_off; +static PyObject *__pyx_n_s_pack; +static PyObject *__pyx_n_s_packed; +static PyObject *__pyx_n_s_pyx_vtable; +static PyObject *__pyx_n_s_raw; +static PyObject *__pyx_n_s_read; +static PyObject *__pyx_n_s_read_size; +static PyObject *__pyx_kp_u_read_size_should_be_less_or_equa; +static PyObject *__pyx_kp_u_recursion_limit_exceeded; +static PyObject *__pyx_n_s_reduce; +static PyObject *__pyx_n_s_reduce_cython; +static PyObject *__pyx_n_s_reduce_ex; +static PyObject *__pyx_n_s_ret; +static PyObject *__pyx_n_s_setstate; +static PyObject *__pyx_n_s_setstate_cython; +static PyObject *__pyx_n_s_stream; +static PyObject *__pyx_n_s_strict_map_key; +static PyObject *__pyx_n_s_strict_types; +static PyObject *__pyx_n_s_test; +static PyObject *__pyx_n_s_typecode; +static PyObject *__pyx_n_s_unicode_errors; +static PyObject *__pyx_kp_u_unicode_string_is_too_large; +static PyObject *__pyx_n_s_unpack; +static PyObject *__pyx_n_s_unpackb; +static PyObject *__pyx_kp_u_unpacker_feed_is_not_be_able_to; +static PyObject *__pyx_n_s_use_bin_type; +static PyObject *__pyx_n_s_use_list; +static PyObject *__pyx_n_s_use_single_float; +static PyObject *__pyx_kp_u_using_old_buffer_interface_to_un; +static PyObject *__pyx_n_s_view; +static int __pyx_pf_7msgpack_9_cmsgpack_6Packer___cinit__(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self); /* proto */ +static int __pyx_pf_7msgpack_9_cmsgpack_6Packer_2__init__(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_default, PyObject *__pyx_v_encoding, PyObject *__pyx_v_unicode_errors, int __pyx_v_use_single_float, int __pyx_v_autoreset, int __pyx_v_use_bin_type, int __pyx_v_strict_types); /* proto */ +static void __pyx_pf_7msgpack_9_cmsgpack_6Packer_4__dealloc__(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_6pack(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_obj); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_8pack_ext_type(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_typecode, PyObject *__pyx_v_data); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_10pack_array_header(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PY_LONG_LONG __pyx_v_size); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_12pack_map_header(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PY_LONG_LONG __pyx_v_size); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_14pack_map_pairs(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_pairs); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_16reset(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_18bytes(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_20getbuffer(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_22__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_24__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_default_read_extended_type(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_typecode, CYTHON_UNUSED PyObject *__pyx_v_data); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_2unpackb(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_packed, PyObject *__pyx_v_object_hook, PyObject *__pyx_v_list_hook, int __pyx_v_use_list, int __pyx_v_raw, int __pyx_v_strict_map_key, PyObject *__pyx_v_encoding, PyObject *__pyx_v_unicode_errors, PyObject *__pyx_v_object_pairs_hook, PyObject *__pyx_v_ext_hook, Py_ssize_t __pyx_v_max_str_len, Py_ssize_t __pyx_v_max_bin_len, Py_ssize_t __pyx_v_max_array_len, Py_ssize_t __pyx_v_max_map_len, Py_ssize_t __pyx_v_max_ext_len); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_4unpack(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_kwargs); /* proto */ +static int __pyx_pf_7msgpack_9_cmsgpack_8Unpacker___cinit__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static void __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_2__dealloc__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static int __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_4__init__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, PyObject *__pyx_v_file_like, Py_ssize_t __pyx_v_read_size, int __pyx_v_use_list, int __pyx_v_raw, int __pyx_v_strict_map_key, PyObject *__pyx_v_object_hook, PyObject *__pyx_v_object_pairs_hook, PyObject *__pyx_v_list_hook, PyObject *__pyx_v_encoding, PyObject *__pyx_v_unicode_errors, Py_ssize_t __pyx_v_max_buffer_size, PyObject *__pyx_v_ext_hook, Py_ssize_t __pyx_v_max_str_len, Py_ssize_t __pyx_v_max_bin_len, Py_ssize_t __pyx_v_max_array_len, Py_ssize_t __pyx_v_max_map_len, Py_ssize_t __pyx_v_max_ext_len); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_6feed(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, PyObject *__pyx_v_next_bytes); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_8read_bytes(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, Py_ssize_t __pyx_v_nbytes); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_10unpack(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_12skip(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_14read_array_header(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_16read_map_header(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_18tell(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_20__iter__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_22__next__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_24__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_26__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_tp_new_7msgpack_9_cmsgpack_Packer(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_7msgpack_9_cmsgpack_Unpacker(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_int_0; +static int __pyx_k__3; +static PyObject *__pyx_k__22; +static PyObject *__pyx_k__24; +static PyObject *__pyx_tuple_; +static PyObject *__pyx_tuple__2; +static PyObject *__pyx_tuple__4; +static PyObject *__pyx_tuple__5; +static PyObject *__pyx_tuple__6; +static PyObject *__pyx_tuple__7; +static PyObject *__pyx_tuple__8; +static PyObject *__pyx_tuple__9; +static PyObject *__pyx_tuple__10; +static PyObject *__pyx_tuple__11; +static PyObject *__pyx_tuple__12; +static PyObject *__pyx_tuple__13; +static PyObject *__pyx_tuple__14; +static PyObject *__pyx_tuple__15; +static PyObject *__pyx_tuple__16; +static PyObject *__pyx_tuple__17; +static PyObject *__pyx_tuple__18; +static PyObject *__pyx_tuple__19; +static PyObject *__pyx_tuple__20; +static PyObject *__pyx_tuple__21; +static PyObject *__pyx_tuple__23; +static PyObject *__pyx_tuple__25; +static PyObject *__pyx_tuple__26; +static PyObject *__pyx_tuple__27; +static PyObject *__pyx_tuple__28; +static PyObject *__pyx_tuple__29; +static PyObject *__pyx_tuple__30; +static PyObject *__pyx_tuple__31; +static PyObject *__pyx_tuple__32; +static PyObject *__pyx_tuple__34; +static PyObject *__pyx_tuple__36; +static PyObject *__pyx_codeobj__33; +static PyObject *__pyx_codeobj__35; +static PyObject *__pyx_codeobj__37; +/* Late includes */ + +/* "msgpack/_packer.pyx":46 + * + * + * cdef inline int PyBytesLike_Check(object o): # <<<<<<<<<<<<<< + * return PyBytes_Check(o) or PyByteArray_Check(o) + * + */ + +static CYTHON_INLINE int __pyx_f_7msgpack_9_cmsgpack_PyBytesLike_Check(PyObject *__pyx_v_o) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("PyBytesLike_Check", 0); + + /* "msgpack/_packer.pyx":47 + * + * cdef inline int PyBytesLike_Check(object o): + * return PyBytes_Check(o) or PyByteArray_Check(o) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = PyBytes_Check(__pyx_v_o); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L3_bool_binop_done; + } + __pyx_t_2 = PyByteArray_Check(__pyx_v_o); + __pyx_t_1 = __pyx_t_2; + __pyx_L3_bool_binop_done:; + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":46 + * + * + * cdef inline int PyBytesLike_Check(object o): # <<<<<<<<<<<<<< + * return PyBytes_Check(o) or PyByteArray_Check(o) + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":50 + * + * + * cdef inline int PyBytesLike_CheckExact(object o): # <<<<<<<<<<<<<< + * return PyBytes_CheckExact(o) or PyByteArray_CheckExact(o) + * + */ + +static CYTHON_INLINE int __pyx_f_7msgpack_9_cmsgpack_PyBytesLike_CheckExact(PyObject *__pyx_v_o) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("PyBytesLike_CheckExact", 0); + + /* "msgpack/_packer.pyx":51 + * + * cdef inline int PyBytesLike_CheckExact(object o): + * return PyBytes_CheckExact(o) or PyByteArray_CheckExact(o) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = PyBytes_CheckExact(__pyx_v_o); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L3_bool_binop_done; + } + __pyx_t_2 = PyByteArray_CheckExact(__pyx_v_o); + __pyx_t_1 = __pyx_t_2; + __pyx_L3_bool_binop_done:; + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":50 + * + * + * cdef inline int PyBytesLike_CheckExact(object o): # <<<<<<<<<<<<<< + * return PyBytes_CheckExact(o) or PyByteArray_CheckExact(o) + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":107 + * cdef bint autoreset + * + * def __cinit__(self): # <<<<<<<<<<<<<< + * cdef int buf_size = 1024*1024 + * self.pk.buf = PyMem_Malloc(buf_size) + */ + +/* Python wrapper */ +static int __pyx_pw_7msgpack_9_cmsgpack_6Packer_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_7msgpack_9_cmsgpack_6Packer_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;} + if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__cinit__", 0))) return -1; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer___cinit__(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_7msgpack_9_cmsgpack_6Packer___cinit__(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self) { + int __pyx_v_buf_size; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "msgpack/_packer.pyx":108 + * + * def __cinit__(self): + * cdef int buf_size = 1024*1024 # <<<<<<<<<<<<<< + * self.pk.buf = PyMem_Malloc(buf_size) + * if self.pk.buf == NULL: + */ + __pyx_v_buf_size = 0x100000; + + /* "msgpack/_packer.pyx":109 + * def __cinit__(self): + * cdef int buf_size = 1024*1024 + * self.pk.buf = PyMem_Malloc(buf_size) # <<<<<<<<<<<<<< + * if self.pk.buf == NULL: + * raise MemoryError("Unable to allocate internal buffer.") + */ + __pyx_v_self->pk.buf = ((char *)PyMem_Malloc(__pyx_v_buf_size)); + + /* "msgpack/_packer.pyx":110 + * cdef int buf_size = 1024*1024 + * self.pk.buf = PyMem_Malloc(buf_size) + * if self.pk.buf == NULL: # <<<<<<<<<<<<<< + * raise MemoryError("Unable to allocate internal buffer.") + * self.pk.buf_size = buf_size + */ + __pyx_t_1 = ((__pyx_v_self->pk.buf == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":111 + * self.pk.buf = PyMem_Malloc(buf_size) + * if self.pk.buf == NULL: + * raise MemoryError("Unable to allocate internal buffer.") # <<<<<<<<<<<<<< + * self.pk.buf_size = buf_size + * self.pk.length = 0 + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 111, __pyx_L1_error) + + /* "msgpack/_packer.pyx":110 + * cdef int buf_size = 1024*1024 + * self.pk.buf = PyMem_Malloc(buf_size) + * if self.pk.buf == NULL: # <<<<<<<<<<<<<< + * raise MemoryError("Unable to allocate internal buffer.") + * self.pk.buf_size = buf_size + */ + } + + /* "msgpack/_packer.pyx":112 + * if self.pk.buf == NULL: + * raise MemoryError("Unable to allocate internal buffer.") + * self.pk.buf_size = buf_size # <<<<<<<<<<<<<< + * self.pk.length = 0 + * + */ + __pyx_v_self->pk.buf_size = __pyx_v_buf_size; + + /* "msgpack/_packer.pyx":113 + * raise MemoryError("Unable to allocate internal buffer.") + * self.pk.buf_size = buf_size + * self.pk.length = 0 # <<<<<<<<<<<<<< + * + * def __init__(self, default=None, encoding=None, unicode_errors=None, + */ + __pyx_v_self->pk.length = 0; + + /* "msgpack/_packer.pyx":107 + * cdef bint autoreset + * + * def __cinit__(self): # <<<<<<<<<<<<<< + * cdef int buf_size = 1024*1024 + * self.pk.buf = PyMem_Malloc(buf_size) + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":115 + * self.pk.length = 0 + * + * def __init__(self, default=None, encoding=None, unicode_errors=None, # <<<<<<<<<<<<<< + * bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + * bint strict_types=False): + */ + +/* Python wrapper */ +static int __pyx_pw_7msgpack_9_cmsgpack_6Packer_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_7msgpack_9_cmsgpack_6Packer_3__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_default = 0; + PyObject *__pyx_v_encoding = 0; + PyObject *__pyx_v_unicode_errors = 0; + int __pyx_v_use_single_float; + int __pyx_v_autoreset; + int __pyx_v_use_bin_type; + int __pyx_v_strict_types; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_default,&__pyx_n_s_encoding,&__pyx_n_s_unicode_errors,&__pyx_n_s_use_single_float,&__pyx_n_s_autoreset,&__pyx_n_s_use_bin_type,&__pyx_n_s_strict_types,0}; + PyObject* values[7] = {0,0,0,0,0,0,0}; + values[0] = ((PyObject *)Py_None); + values[1] = ((PyObject *)Py_None); + values[2] = ((PyObject *)Py_None); + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_default); + if (value) { values[0] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 1: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_encoding); + if (value) { values[1] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_unicode_errors); + if (value) { values[2] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_use_single_float); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_autoreset); + if (value) { values[4] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 5: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_use_bin_type); + if (value) { values[5] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 6: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_strict_types); + if (value) { values[6] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(0, 115, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_default = values[0]; + __pyx_v_encoding = values[1]; + __pyx_v_unicode_errors = values[2]; + if (values[3]) { + __pyx_v_use_single_float = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_use_single_float == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 116, __pyx_L3_error) + } else { + + /* "msgpack/_packer.pyx":116 + * + * def __init__(self, default=None, encoding=None, unicode_errors=None, + * bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, # <<<<<<<<<<<<<< + * bint strict_types=False): + * if encoding is not None: + */ + __pyx_v_use_single_float = ((int)0); + } + if (values[4]) { + __pyx_v_autoreset = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_autoreset == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 116, __pyx_L3_error) + } else { + __pyx_v_autoreset = ((int)1); + } + if (values[5]) { + __pyx_v_use_bin_type = __Pyx_PyObject_IsTrue(values[5]); if (unlikely((__pyx_v_use_bin_type == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 116, __pyx_L3_error) + } else { + __pyx_v_use_bin_type = ((int)0); + } + if (values[6]) { + __pyx_v_strict_types = __Pyx_PyObject_IsTrue(values[6]); if (unlikely((__pyx_v_strict_types == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 117, __pyx_L3_error) + } else { + + /* "msgpack/_packer.pyx":117 + * def __init__(self, default=None, encoding=None, unicode_errors=None, + * bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + * bint strict_types=False): # <<<<<<<<<<<<<< + * if encoding is not None: + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated.", 1) + */ + __pyx_v_strict_types = ((int)0); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 7, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 115, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_2__init__(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self), __pyx_v_default, __pyx_v_encoding, __pyx_v_unicode_errors, __pyx_v_use_single_float, __pyx_v_autoreset, __pyx_v_use_bin_type, __pyx_v_strict_types); + + /* "msgpack/_packer.pyx":115 + * self.pk.length = 0 + * + * def __init__(self, default=None, encoding=None, unicode_errors=None, # <<<<<<<<<<<<<< + * bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + * bint strict_types=False): + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_7msgpack_9_cmsgpack_6Packer_2__init__(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_default, PyObject *__pyx_v_encoding, PyObject *__pyx_v_unicode_errors, int __pyx_v_use_single_float, int __pyx_v_autoreset, int __pyx_v_use_bin_type, int __pyx_v_strict_types) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + char const *__pyx_t_5; + char const *__pyx_t_6; + __Pyx_RefNannySetupContext("__init__", 0); + + /* "msgpack/_packer.pyx":118 + * bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + * bint strict_types=False): + * if encoding is not None: # <<<<<<<<<<<<<< + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated.", 1) + * self.use_float = use_single_float + */ + __pyx_t_1 = (__pyx_v_encoding != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "msgpack/_packer.pyx":119 + * bint strict_types=False): + * if encoding is not None: + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated.", 1) # <<<<<<<<<<<<<< + * self.use_float = use_single_float + * self.strict_types = strict_types + */ + __pyx_t_3 = PyErr_WarnEx(__pyx_builtin_DeprecationWarning, ((char *)"encoding is deprecated."), 1); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 119, __pyx_L1_error) + + /* "msgpack/_packer.pyx":118 + * bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + * bint strict_types=False): + * if encoding is not None: # <<<<<<<<<<<<<< + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated.", 1) + * self.use_float = use_single_float + */ + } + + /* "msgpack/_packer.pyx":120 + * if encoding is not None: + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated.", 1) + * self.use_float = use_single_float # <<<<<<<<<<<<<< + * self.strict_types = strict_types + * self.autoreset = autoreset + */ + __pyx_t_4 = __Pyx_PyBool_FromLong(__pyx_v_use_single_float); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (!(likely(__Pyx_TypeTest(__pyx_t_4, __pyx_ptype_7cpython_4bool_bool)))) __PYX_ERR(0, 120, __pyx_L1_error) + __Pyx_GIVEREF(__pyx_t_4); + __Pyx_GOTREF(__pyx_v_self->use_float); + __Pyx_DECREF(((PyObject *)__pyx_v_self->use_float)); + __pyx_v_self->use_float = ((PyBoolObject *)__pyx_t_4); + __pyx_t_4 = 0; + + /* "msgpack/_packer.pyx":121 + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated.", 1) + * self.use_float = use_single_float + * self.strict_types = strict_types # <<<<<<<<<<<<<< + * self.autoreset = autoreset + * self.pk.use_bin_type = use_bin_type + */ + __pyx_v_self->strict_types = __pyx_v_strict_types; + + /* "msgpack/_packer.pyx":122 + * self.use_float = use_single_float + * self.strict_types = strict_types + * self.autoreset = autoreset # <<<<<<<<<<<<<< + * self.pk.use_bin_type = use_bin_type + * if default is not None: + */ + __pyx_v_self->autoreset = __pyx_v_autoreset; + + /* "msgpack/_packer.pyx":123 + * self.strict_types = strict_types + * self.autoreset = autoreset + * self.pk.use_bin_type = use_bin_type # <<<<<<<<<<<<<< + * if default is not None: + * if not PyCallable_Check(default): + */ + __pyx_v_self->pk.use_bin_type = __pyx_v_use_bin_type; + + /* "msgpack/_packer.pyx":124 + * self.autoreset = autoreset + * self.pk.use_bin_type = use_bin_type + * if default is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(default): + * raise TypeError("default must be a callable.") + */ + __pyx_t_2 = (__pyx_v_default != Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "msgpack/_packer.pyx":125 + * self.pk.use_bin_type = use_bin_type + * if default is not None: + * if not PyCallable_Check(default): # <<<<<<<<<<<<<< + * raise TypeError("default must be a callable.") + * self._default = default + */ + __pyx_t_1 = ((!(PyCallable_Check(__pyx_v_default) != 0)) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":126 + * if default is not None: + * if not PyCallable_Check(default): + * raise TypeError("default must be a callable.") # <<<<<<<<<<<<<< + * self._default = default + * + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__2, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(0, 126, __pyx_L1_error) + + /* "msgpack/_packer.pyx":125 + * self.pk.use_bin_type = use_bin_type + * if default is not None: + * if not PyCallable_Check(default): # <<<<<<<<<<<<<< + * raise TypeError("default must be a callable.") + * self._default = default + */ + } + + /* "msgpack/_packer.pyx":124 + * self.autoreset = autoreset + * self.pk.use_bin_type = use_bin_type + * if default is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(default): + * raise TypeError("default must be a callable.") + */ + } + + /* "msgpack/_packer.pyx":127 + * if not PyCallable_Check(default): + * raise TypeError("default must be a callable.") + * self._default = default # <<<<<<<<<<<<<< + * + * self._bencoding = encoding + */ + __Pyx_INCREF(__pyx_v_default); + __Pyx_GIVEREF(__pyx_v_default); + __Pyx_GOTREF(__pyx_v_self->_default); + __Pyx_DECREF(__pyx_v_self->_default); + __pyx_v_self->_default = __pyx_v_default; + + /* "msgpack/_packer.pyx":129 + * self._default = default + * + * self._bencoding = encoding # <<<<<<<<<<<<<< + * if encoding is None: + * if PY_MAJOR_VERSION < 3: + */ + __Pyx_INCREF(__pyx_v_encoding); + __Pyx_GIVEREF(__pyx_v_encoding); + __Pyx_GOTREF(__pyx_v_self->_bencoding); + __Pyx_DECREF(__pyx_v_self->_bencoding); + __pyx_v_self->_bencoding = __pyx_v_encoding; + + /* "msgpack/_packer.pyx":130 + * + * self._bencoding = encoding + * if encoding is None: # <<<<<<<<<<<<<< + * if PY_MAJOR_VERSION < 3: + * self.encoding = 'utf-8' + */ + __pyx_t_1 = (__pyx_v_encoding == Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "msgpack/_packer.pyx":131 + * self._bencoding = encoding + * if encoding is None: + * if PY_MAJOR_VERSION < 3: # <<<<<<<<<<<<<< + * self.encoding = 'utf-8' + * else: + */ + __pyx_t_2 = ((PY_MAJOR_VERSION < 3) != 0); + if (__pyx_t_2) { + + /* "msgpack/_packer.pyx":132 + * if encoding is None: + * if PY_MAJOR_VERSION < 3: + * self.encoding = 'utf-8' # <<<<<<<<<<<<<< + * else: + * self.encoding = NULL + */ + __pyx_v_self->encoding = ((char const *)"utf-8"); + + /* "msgpack/_packer.pyx":131 + * self._bencoding = encoding + * if encoding is None: + * if PY_MAJOR_VERSION < 3: # <<<<<<<<<<<<<< + * self.encoding = 'utf-8' + * else: + */ + goto __pyx_L7; + } + + /* "msgpack/_packer.pyx":134 + * self.encoding = 'utf-8' + * else: + * self.encoding = NULL # <<<<<<<<<<<<<< + * else: + * self.encoding = self._bencoding + */ + /*else*/ { + __pyx_v_self->encoding = NULL; + } + __pyx_L7:; + + /* "msgpack/_packer.pyx":130 + * + * self._bencoding = encoding + * if encoding is None: # <<<<<<<<<<<<<< + * if PY_MAJOR_VERSION < 3: + * self.encoding = 'utf-8' + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":136 + * self.encoding = NULL + * else: + * self.encoding = self._bencoding # <<<<<<<<<<<<<< + * + * self._berrors = unicode_errors + */ + /*else*/ { + __pyx_t_5 = __Pyx_PyObject_AsString(__pyx_v_self->_bencoding); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(0, 136, __pyx_L1_error) + __pyx_v_self->encoding = __pyx_t_5; + } + __pyx_L6:; + + /* "msgpack/_packer.pyx":138 + * self.encoding = self._bencoding + * + * self._berrors = unicode_errors # <<<<<<<<<<<<<< + * if unicode_errors is None: + * self.unicode_errors = NULL + */ + __Pyx_INCREF(__pyx_v_unicode_errors); + __Pyx_GIVEREF(__pyx_v_unicode_errors); + __Pyx_GOTREF(__pyx_v_self->_berrors); + __Pyx_DECREF(__pyx_v_self->_berrors); + __pyx_v_self->_berrors = __pyx_v_unicode_errors; + + /* "msgpack/_packer.pyx":139 + * + * self._berrors = unicode_errors + * if unicode_errors is None: # <<<<<<<<<<<<<< + * self.unicode_errors = NULL + * else: + */ + __pyx_t_2 = (__pyx_v_unicode_errors == Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "msgpack/_packer.pyx":140 + * self._berrors = unicode_errors + * if unicode_errors is None: + * self.unicode_errors = NULL # <<<<<<<<<<<<<< + * else: + * self.unicode_errors = self._berrors + */ + __pyx_v_self->unicode_errors = NULL; + + /* "msgpack/_packer.pyx":139 + * + * self._berrors = unicode_errors + * if unicode_errors is None: # <<<<<<<<<<<<<< + * self.unicode_errors = NULL + * else: + */ + goto __pyx_L8; + } + + /* "msgpack/_packer.pyx":142 + * self.unicode_errors = NULL + * else: + * self.unicode_errors = self._berrors # <<<<<<<<<<<<<< + * + * def __dealloc__(self): + */ + /*else*/ { + __pyx_t_6 = __Pyx_PyObject_AsString(__pyx_v_self->_berrors); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(0, 142, __pyx_L1_error) + __pyx_v_self->unicode_errors = __pyx_t_6; + } + __pyx_L8:; + + /* "msgpack/_packer.pyx":115 + * self.pk.length = 0 + * + * def __init__(self, default=None, encoding=None, unicode_errors=None, # <<<<<<<<<<<<<< + * bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + * bint strict_types=False): + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":144 + * self.unicode_errors = self._berrors + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * PyMem_Free(self.pk.buf) + * self.pk.buf = NULL + */ + +/* Python wrapper */ +static void __pyx_pw_7msgpack_9_cmsgpack_6Packer_5__dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_pw_7msgpack_9_cmsgpack_6Packer_5__dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_pf_7msgpack_9_cmsgpack_6Packer_4__dealloc__(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_7msgpack_9_cmsgpack_6Packer_4__dealloc__(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "msgpack/_packer.pyx":145 + * + * def __dealloc__(self): + * PyMem_Free(self.pk.buf) # <<<<<<<<<<<<<< + * self.pk.buf = NULL + * + */ + PyMem_Free(__pyx_v_self->pk.buf); + + /* "msgpack/_packer.pyx":146 + * def __dealloc__(self): + * PyMem_Free(self.pk.buf) + * self.pk.buf = NULL # <<<<<<<<<<<<<< + * + * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: + */ + __pyx_v_self->pk.buf = NULL; + + /* "msgpack/_packer.pyx":144 + * self.unicode_errors = self._berrors + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * PyMem_Free(self.pk.buf) + * self.pk.buf = NULL + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "msgpack/_packer.pyx":148 + * self.pk.buf = NULL + * + * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: # <<<<<<<<<<<<<< + * cdef long long llval + * cdef unsigned long long ullval + */ + +static int __pyx_f_7msgpack_9_cmsgpack_6Packer__pack(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_o, struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack *__pyx_optional_args) { + int __pyx_v_nest_limit = __pyx_k__3; + PY_LONG_LONG __pyx_v_llval; + unsigned PY_LONG_LONG __pyx_v_ullval; + long __pyx_v_longval; + float __pyx_v_fval; + double __pyx_v_dval; + char *__pyx_v_rawval; + int __pyx_v_ret; + PyObject *__pyx_v_d = 0; + Py_ssize_t __pyx_v_L; + int __pyx_v_default_used; + int __pyx_v_strict_types; + Py_buffer __pyx_v_view; + CYTHON_UNUSED PyObject *__pyx_v_oe = NULL; + PyObject *__pyx_v_k = NULL; + PyObject *__pyx_v_v = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + unsigned PY_LONG_LONG __pyx_t_7; + PY_LONG_LONG __pyx_t_8; + int __pyx_t_9; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + int __pyx_t_12; + PyObject *__pyx_t_13 = NULL; + PyObject *__pyx_t_14 = NULL; + PyObject *__pyx_t_15 = NULL; + int __pyx_t_16; + char const *__pyx_t_17; + PyObject *__pyx_t_18 = NULL; + PyObject *__pyx_t_19 = NULL; + PyObject *__pyx_t_20 = NULL; + PyObject *__pyx_t_21 = NULL; + PyObject *__pyx_t_22 = NULL; + PyObject *__pyx_t_23 = NULL; + long __pyx_t_24; + float __pyx_t_25; + double __pyx_t_26; + Py_ssize_t __pyx_t_27; + PyObject *__pyx_t_28; + char *__pyx_t_29; + Py_ssize_t __pyx_t_30; + struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack __pyx_t_31; + PyObject *(*__pyx_t_32)(PyObject *); + __Pyx_RefNannySetupContext("_pack", 0); + if (__pyx_optional_args) { + if (__pyx_optional_args->__pyx_n > 0) { + __pyx_v_nest_limit = __pyx_optional_args->nest_limit; + } + } + __Pyx_INCREF(__pyx_v_o); + + /* "msgpack/_packer.pyx":158 + * cdef dict d + * cdef Py_ssize_t L + * cdef int default_used = 0 # <<<<<<<<<<<<<< + * cdef bint strict_types = self.strict_types + * cdef Py_buffer view + */ + __pyx_v_default_used = 0; + + /* "msgpack/_packer.pyx":159 + * cdef Py_ssize_t L + * cdef int default_used = 0 + * cdef bint strict_types = self.strict_types # <<<<<<<<<<<<<< + * cdef Py_buffer view + * + */ + __pyx_t_1 = __pyx_v_self->strict_types; + __pyx_v_strict_types = __pyx_t_1; + + /* "msgpack/_packer.pyx":162 + * cdef Py_buffer view + * + * if nest_limit < 0: # <<<<<<<<<<<<<< + * raise ValueError("recursion limit exceeded.") + * + */ + __pyx_t_1 = ((__pyx_v_nest_limit < 0) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":163 + * + * if nest_limit < 0: + * raise ValueError("recursion limit exceeded.") # <<<<<<<<<<<<<< + * + * while True: + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__4, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 163, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(0, 163, __pyx_L1_error) + + /* "msgpack/_packer.pyx":162 + * cdef Py_buffer view + * + * if nest_limit < 0: # <<<<<<<<<<<<<< + * raise ValueError("recursion limit exceeded.") + * + */ + } + + /* "msgpack/_packer.pyx":165 + * raise ValueError("recursion limit exceeded.") + * + * while True: # <<<<<<<<<<<<<< + * if o is None: + * ret = msgpack_pack_nil(&self.pk) + */ + while (1) { + + /* "msgpack/_packer.pyx":166 + * + * while True: + * if o is None: # <<<<<<<<<<<<<< + * ret = msgpack_pack_nil(&self.pk) + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): + */ + __pyx_t_1 = (__pyx_v_o == Py_None); + __pyx_t_3 = (__pyx_t_1 != 0); + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":167 + * while True: + * if o is None: + * ret = msgpack_pack_nil(&self.pk) # <<<<<<<<<<<<<< + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): + * if o: + */ + __pyx_v_ret = msgpack_pack_nil((&__pyx_v_self->pk)); + + /* "msgpack/_packer.pyx":166 + * + * while True: + * if o is None: # <<<<<<<<<<<<<< + * ret = msgpack_pack_nil(&self.pk) + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":168 + * if o is None: + * ret = msgpack_pack_nil(&self.pk) + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): # <<<<<<<<<<<<<< + * if o: + * ret = msgpack_pack_true(&self.pk) + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_3 = PyBool_Check(__pyx_v_o); + } else { + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_ptype_7cpython_4bool_bool); + __pyx_t_3 = __pyx_t_1; + } + __pyx_t_1 = (__pyx_t_3 != 0); + if (__pyx_t_1) { + + /* "msgpack/_packer.pyx":169 + * ret = msgpack_pack_nil(&self.pk) + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): + * if o: # <<<<<<<<<<<<<< + * ret = msgpack_pack_true(&self.pk) + * else: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_o); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(0, 169, __pyx_L1_error) + if (__pyx_t_1) { + + /* "msgpack/_packer.pyx":170 + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): + * if o: + * ret = msgpack_pack_true(&self.pk) # <<<<<<<<<<<<<< + * else: + * ret = msgpack_pack_false(&self.pk) + */ + __pyx_v_ret = msgpack_pack_true((&__pyx_v_self->pk)); + + /* "msgpack/_packer.pyx":169 + * ret = msgpack_pack_nil(&self.pk) + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): + * if o: # <<<<<<<<<<<<<< + * ret = msgpack_pack_true(&self.pk) + * else: + */ + goto __pyx_L7; + } + + /* "msgpack/_packer.pyx":172 + * ret = msgpack_pack_true(&self.pk) + * else: + * ret = msgpack_pack_false(&self.pk) # <<<<<<<<<<<<<< + * elif PyLong_CheckExact(o) if strict_types else PyLong_Check(o): + * # PyInt_Check(long) is True for Python 3. + */ + /*else*/ { + __pyx_v_ret = msgpack_pack_false((&__pyx_v_self->pk)); + } + __pyx_L7:; + + /* "msgpack/_packer.pyx":168 + * if o is None: + * ret = msgpack_pack_nil(&self.pk) + * elif PyBool_Check(o) if strict_types else isinstance(o, bool): # <<<<<<<<<<<<<< + * if o: + * ret = msgpack_pack_true(&self.pk) + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":173 + * else: + * ret = msgpack_pack_false(&self.pk) + * elif PyLong_CheckExact(o) if strict_types else PyLong_Check(o): # <<<<<<<<<<<<<< + * # PyInt_Check(long) is True for Python 3. + * # So we should test long before int. + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_1 = PyLong_CheckExact(__pyx_v_o); + } else { + __pyx_t_1 = PyLong_Check(__pyx_v_o); + } + __pyx_t_3 = (__pyx_t_1 != 0); + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":176 + * # PyInt_Check(long) is True for Python 3. + * # So we should test long before int. + * try: # <<<<<<<<<<<<<< + * if o > 0: + * ullval = o + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_4, &__pyx_t_5, &__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_6); + /*try:*/ { + + /* "msgpack/_packer.pyx":177 + * # So we should test long before int. + * try: + * if o > 0: # <<<<<<<<<<<<<< + * ullval = o + * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) + */ + __pyx_t_2 = PyObject_RichCompare(__pyx_v_o, __pyx_int_0, Py_GT); __Pyx_XGOTREF(__pyx_t_2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 177, __pyx_L8_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_2); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 177, __pyx_L8_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":178 + * try: + * if o > 0: + * ullval = o # <<<<<<<<<<<<<< + * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) + * else: + */ + __pyx_t_7 = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(__pyx_v_o); if (unlikely((__pyx_t_7 == (unsigned PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 178, __pyx_L8_error) + __pyx_v_ullval = __pyx_t_7; + + /* "msgpack/_packer.pyx":179 + * if o > 0: + * ullval = o + * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) # <<<<<<<<<<<<<< + * else: + * llval = o + */ + __pyx_v_ret = msgpack_pack_unsigned_long_long((&__pyx_v_self->pk), __pyx_v_ullval); + + /* "msgpack/_packer.pyx":177 + * # So we should test long before int. + * try: + * if o > 0: # <<<<<<<<<<<<<< + * ullval = o + * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) + */ + goto __pyx_L16; + } + + /* "msgpack/_packer.pyx":181 + * ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) + * else: + * llval = o # <<<<<<<<<<<<<< + * ret = msgpack_pack_long_long(&self.pk, llval) + * except OverflowError as oe: + */ + /*else*/ { + __pyx_t_8 = __Pyx_PyInt_As_PY_LONG_LONG(__pyx_v_o); if (unlikely((__pyx_t_8 == (PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 181, __pyx_L8_error) + __pyx_v_llval = __pyx_t_8; + + /* "msgpack/_packer.pyx":182 + * else: + * llval = o + * ret = msgpack_pack_long_long(&self.pk, llval) # <<<<<<<<<<<<<< + * except OverflowError as oe: + * if not default_used and self._default is not None: + */ + __pyx_v_ret = msgpack_pack_long_long((&__pyx_v_self->pk), __pyx_v_llval); + } + __pyx_L16:; + + /* "msgpack/_packer.pyx":176 + * # PyInt_Check(long) is True for Python 3. + * # So we should test long before int. + * try: # <<<<<<<<<<<<<< + * if o > 0: + * ullval = o + */ + } + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + goto __pyx_L15_try_end; + __pyx_L8_error:; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "msgpack/_packer.pyx":183 + * llval = o + * ret = msgpack_pack_long_long(&self.pk, llval) + * except OverflowError as oe: # <<<<<<<<<<<<<< + * if not default_used and self._default is not None: + * o = self._default(o) + */ + __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_OverflowError); + if (__pyx_t_9) { + __Pyx_AddTraceback("msgpack._cmsgpack.Packer._pack", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_2, &__pyx_t_10, &__pyx_t_11) < 0) __PYX_ERR(0, 183, __pyx_L10_except_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GOTREF(__pyx_t_11); + __Pyx_INCREF(__pyx_t_10); + __pyx_v_oe = __pyx_t_10; + /*try:*/ { + + /* "msgpack/_packer.pyx":184 + * ret = msgpack_pack_long_long(&self.pk, llval) + * except OverflowError as oe: + * if not default_used and self._default is not None: # <<<<<<<<<<<<<< + * o = self._default(o) + * default_used = True + */ + __pyx_t_1 = ((!(__pyx_v_default_used != 0)) != 0); + if (__pyx_t_1) { + } else { + __pyx_t_3 = __pyx_t_1; + goto __pyx_L25_bool_binop_done; + } + __pyx_t_1 = (__pyx_v_self->_default != Py_None); + __pyx_t_12 = (__pyx_t_1 != 0); + __pyx_t_3 = __pyx_t_12; + __pyx_L25_bool_binop_done:; + if (likely(__pyx_t_3)) { + + /* "msgpack/_packer.pyx":185 + * except OverflowError as oe: + * if not default_used and self._default is not None: + * o = self._default(o) # <<<<<<<<<<<<<< + * default_used = True + * continue + */ + __Pyx_INCREF(__pyx_v_self->_default); + __pyx_t_14 = __pyx_v_self->_default; __pyx_t_15 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_14))) { + __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_14); + if (likely(__pyx_t_15)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_14); + __Pyx_INCREF(__pyx_t_15); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_14, function); + } + } + __pyx_t_13 = (__pyx_t_15) ? __Pyx_PyObject_Call2Args(__pyx_t_14, __pyx_t_15, __pyx_v_o) : __Pyx_PyObject_CallOneArg(__pyx_t_14, __pyx_v_o); + __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; + if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 185, __pyx_L22_error) + __Pyx_GOTREF(__pyx_t_13); + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + __Pyx_DECREF_SET(__pyx_v_o, __pyx_t_13); + __pyx_t_13 = 0; + + /* "msgpack/_packer.pyx":186 + * if not default_used and self._default is not None: + * o = self._default(o) + * default_used = True # <<<<<<<<<<<<<< + * continue + * else: + */ + __pyx_v_default_used = 1; + + /* "msgpack/_packer.pyx":187 + * o = self._default(o) + * default_used = True + * continue # <<<<<<<<<<<<<< + * else: + * raise OverflowError("Integer value out of range") + */ + goto __pyx_L19_continue; + + /* "msgpack/_packer.pyx":184 + * ret = msgpack_pack_long_long(&self.pk, llval) + * except OverflowError as oe: + * if not default_used and self._default is not None: # <<<<<<<<<<<<<< + * o = self._default(o) + * default_used = True + */ + } + + /* "msgpack/_packer.pyx":189 + * continue + * else: + * raise OverflowError("Integer value out of range") # <<<<<<<<<<<<<< + * elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): + * longval = o + */ + /*else*/ { + __pyx_t_13 = __Pyx_PyObject_Call(__pyx_builtin_OverflowError, __pyx_tuple__5, NULL); if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 189, __pyx_L22_error) + __Pyx_GOTREF(__pyx_t_13); + __Pyx_Raise(__pyx_t_13, 0, 0, 0); + __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; + __PYX_ERR(0, 189, __pyx_L22_error) + } + } + + /* "msgpack/_packer.pyx":183 + * llval = o + * ret = msgpack_pack_long_long(&self.pk, llval) + * except OverflowError as oe: # <<<<<<<<<<<<<< + * if not default_used and self._default is not None: + * o = self._default(o) + */ + /*finally:*/ { + __pyx_L22_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; + __Pyx_XDECREF(__pyx_t_13); __pyx_t_13 = 0; + __Pyx_XDECREF(__pyx_t_14); __pyx_t_14 = 0; + __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_21, &__pyx_t_22, &__pyx_t_23); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20) < 0)) __Pyx_ErrFetch(&__pyx_t_18, &__pyx_t_19, &__pyx_t_20); + __Pyx_XGOTREF(__pyx_t_18); + __Pyx_XGOTREF(__pyx_t_19); + __Pyx_XGOTREF(__pyx_t_20); + __Pyx_XGOTREF(__pyx_t_21); + __Pyx_XGOTREF(__pyx_t_22); + __Pyx_XGOTREF(__pyx_t_23); + __pyx_t_9 = __pyx_lineno; __pyx_t_16 = __pyx_clineno; __pyx_t_17 = __pyx_filename; + { + __Pyx_DECREF(__pyx_v_oe); + __pyx_v_oe = NULL; + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_21); + __Pyx_XGIVEREF(__pyx_t_22); + __Pyx_XGIVEREF(__pyx_t_23); + __Pyx_ExceptionReset(__pyx_t_21, __pyx_t_22, __pyx_t_23); + } + __Pyx_XGIVEREF(__pyx_t_18); + __Pyx_XGIVEREF(__pyx_t_19); + __Pyx_XGIVEREF(__pyx_t_20); + __Pyx_ErrRestore(__pyx_t_18, __pyx_t_19, __pyx_t_20); + __pyx_t_18 = 0; __pyx_t_19 = 0; __pyx_t_20 = 0; __pyx_t_21 = 0; __pyx_t_22 = 0; __pyx_t_23 = 0; + __pyx_lineno = __pyx_t_9; __pyx_clineno = __pyx_t_16; __pyx_filename = __pyx_t_17; + goto __pyx_L10_except_error; + } + __pyx_L19_continue: { + __Pyx_DECREF(__pyx_v_oe); + __pyx_v_oe = NULL; + goto __pyx_L18_except_continue; + } + } + __pyx_L18_except_continue:; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + goto __pyx_L14_try_continue; + } + goto __pyx_L10_except_error; + __pyx_L10_except_error:; + + /* "msgpack/_packer.pyx":176 + * # PyInt_Check(long) is True for Python 3. + * # So we should test long before int. + * try: # <<<<<<<<<<<<<< + * if o > 0: + * ullval = o + */ + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_XGIVEREF(__pyx_t_6); + __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); + goto __pyx_L1_error; + __pyx_L14_try_continue:; + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_XGIVEREF(__pyx_t_6); + __Pyx_ExceptionReset(__pyx_t_4, __pyx_t_5, __pyx_t_6); + goto __pyx_L4_continue; + __pyx_L15_try_end:; + } + + /* "msgpack/_packer.pyx":173 + * else: + * ret = msgpack_pack_false(&self.pk) + * elif PyLong_CheckExact(o) if strict_types else PyLong_Check(o): # <<<<<<<<<<<<<< + * # PyInt_Check(long) is True for Python 3. + * # So we should test long before int. + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":190 + * else: + * raise OverflowError("Integer value out of range") + * elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): # <<<<<<<<<<<<<< + * longval = o + * ret = msgpack_pack_long(&self.pk, longval) + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_3 = PyInt_CheckExact(__pyx_v_o); + } else { + __pyx_t_3 = PyInt_Check(__pyx_v_o); + } + __pyx_t_12 = (__pyx_t_3 != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":191 + * raise OverflowError("Integer value out of range") + * elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): + * longval = o # <<<<<<<<<<<<<< + * ret = msgpack_pack_long(&self.pk, longval) + * elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): + */ + __pyx_t_24 = __Pyx_PyInt_As_long(__pyx_v_o); if (unlikely((__pyx_t_24 == (long)-1) && PyErr_Occurred())) __PYX_ERR(0, 191, __pyx_L1_error) + __pyx_v_longval = __pyx_t_24; + + /* "msgpack/_packer.pyx":192 + * elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): + * longval = o + * ret = msgpack_pack_long(&self.pk, longval) # <<<<<<<<<<<<<< + * elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): + * if self.use_float: + */ + __pyx_v_ret = msgpack_pack_long((&__pyx_v_self->pk), __pyx_v_longval); + + /* "msgpack/_packer.pyx":190 + * else: + * raise OverflowError("Integer value out of range") + * elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): # <<<<<<<<<<<<<< + * longval = o + * ret = msgpack_pack_long(&self.pk, longval) + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":193 + * longval = o + * ret = msgpack_pack_long(&self.pk, longval) + * elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): # <<<<<<<<<<<<<< + * if self.use_float: + * fval = o + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_12 = PyFloat_CheckExact(__pyx_v_o); + } else { + __pyx_t_12 = PyFloat_Check(__pyx_v_o); + } + __pyx_t_3 = (__pyx_t_12 != 0); + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":194 + * ret = msgpack_pack_long(&self.pk, longval) + * elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): + * if self.use_float: # <<<<<<<<<<<<<< + * fval = o + * ret = msgpack_pack_float(&self.pk, fval) + */ + __pyx_t_3 = __Pyx_PyObject_IsTrue(((PyObject *)__pyx_v_self->use_float)); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 194, __pyx_L1_error) + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":195 + * elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): + * if self.use_float: + * fval = o # <<<<<<<<<<<<<< + * ret = msgpack_pack_float(&self.pk, fval) + * else: + */ + __pyx_t_25 = __pyx_PyFloat_AsFloat(__pyx_v_o); if (unlikely((__pyx_t_25 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 195, __pyx_L1_error) + __pyx_v_fval = __pyx_t_25; + + /* "msgpack/_packer.pyx":196 + * if self.use_float: + * fval = o + * ret = msgpack_pack_float(&self.pk, fval) # <<<<<<<<<<<<<< + * else: + * dval = o + */ + __pyx_v_ret = msgpack_pack_float((&__pyx_v_self->pk), __pyx_v_fval); + + /* "msgpack/_packer.pyx":194 + * ret = msgpack_pack_long(&self.pk, longval) + * elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): + * if self.use_float: # <<<<<<<<<<<<<< + * fval = o + * ret = msgpack_pack_float(&self.pk, fval) + */ + goto __pyx_L31; + } + + /* "msgpack/_packer.pyx":198 + * ret = msgpack_pack_float(&self.pk, fval) + * else: + * dval = o # <<<<<<<<<<<<<< + * ret = msgpack_pack_double(&self.pk, dval) + * elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): + */ + /*else*/ { + __pyx_t_26 = __pyx_PyFloat_AsDouble(__pyx_v_o); if (unlikely((__pyx_t_26 == (double)-1) && PyErr_Occurred())) __PYX_ERR(0, 198, __pyx_L1_error) + __pyx_v_dval = __pyx_t_26; + + /* "msgpack/_packer.pyx":199 + * else: + * dval = o + * ret = msgpack_pack_double(&self.pk, dval) # <<<<<<<<<<<<<< + * elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): + * L = len(o) + */ + __pyx_v_ret = msgpack_pack_double((&__pyx_v_self->pk), __pyx_v_dval); + } + __pyx_L31:; + + /* "msgpack/_packer.pyx":193 + * longval = o + * ret = msgpack_pack_long(&self.pk, longval) + * elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): # <<<<<<<<<<<<<< + * if self.use_float: + * fval = o + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":200 + * dval = o + * ret = msgpack_pack_double(&self.pk, dval) + * elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): # <<<<<<<<<<<<<< + * L = len(o) + * if L > ITEM_LIMIT: + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_16 = __pyx_f_7msgpack_9_cmsgpack_PyBytesLike_CheckExact(__pyx_v_o); + } else { + __pyx_t_16 = __pyx_f_7msgpack_9_cmsgpack_PyBytesLike_Check(__pyx_v_o); + } + __pyx_t_3 = (__pyx_t_16 != 0); + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":201 + * ret = msgpack_pack_double(&self.pk, dval) + * elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): + * L = len(o) # <<<<<<<<<<<<<< + * if L > ITEM_LIMIT: + * PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(o).tp_name) + */ + __pyx_t_27 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_27 == ((Py_ssize_t)-1))) __PYX_ERR(0, 201, __pyx_L1_error) + __pyx_v_L = __pyx_t_27; + + /* "msgpack/_packer.pyx":202 + * elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(o).tp_name) + * rawval = o + */ + __pyx_t_3 = ((__pyx_v_L > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":203 + * L = len(o) + * if L > ITEM_LIMIT: + * PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(o).tp_name) # <<<<<<<<<<<<<< + * rawval = o + * ret = msgpack_pack_bin(&self.pk, L) + */ + __pyx_t_28 = PyErr_Format(__pyx_builtin_ValueError, ((char *)"%.200s object is too large"), Py_TYPE(__pyx_v_o)->tp_name); if (unlikely(__pyx_t_28 == ((PyObject *)NULL))) __PYX_ERR(0, 203, __pyx_L1_error) + + /* "msgpack/_packer.pyx":202 + * elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(o).tp_name) + * rawval = o + */ + } + + /* "msgpack/_packer.pyx":204 + * if L > ITEM_LIMIT: + * PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(o).tp_name) + * rawval = o # <<<<<<<<<<<<<< + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: + */ + __pyx_t_29 = __Pyx_PyObject_AsWritableString(__pyx_v_o); if (unlikely((!__pyx_t_29) && PyErr_Occurred())) __PYX_ERR(0, 204, __pyx_L1_error) + __pyx_v_rawval = __pyx_t_29; + + /* "msgpack/_packer.pyx":205 + * PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(o).tp_name) + * rawval = o + * ret = msgpack_pack_bin(&self.pk, L) # <<<<<<<<<<<<<< + * if ret == 0: + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + */ + __pyx_v_ret = msgpack_pack_bin((&__pyx_v_self->pk), __pyx_v_L); + + /* "msgpack/_packer.pyx":206 + * rawval = o + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + */ + __pyx_t_3 = ((__pyx_v_ret == 0) != 0); + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":207 + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) # <<<<<<<<<<<<<< + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + * if self.encoding == NULL and self.unicode_errors == NULL: + */ + __pyx_v_ret = msgpack_pack_raw_body((&__pyx_v_self->pk), __pyx_v_rawval, __pyx_v_L); + + /* "msgpack/_packer.pyx":206 + * rawval = o + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + */ + } + + /* "msgpack/_packer.pyx":200 + * dval = o + * ret = msgpack_pack_double(&self.pk, dval) + * elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): # <<<<<<<<<<<<<< + * L = len(o) + * if L > ITEM_LIMIT: + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":208 + * if ret == 0: + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): # <<<<<<<<<<<<<< + * if self.encoding == NULL and self.unicode_errors == NULL: + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_3 = PyUnicode_CheckExact(__pyx_v_o); + } else { + __pyx_t_3 = PyUnicode_Check(__pyx_v_o); + } + __pyx_t_12 = (__pyx_t_3 != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":209 + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + * if self.encoding == NULL and self.unicode_errors == NULL: # <<<<<<<<<<<<<< + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + * if ret == -2: + */ + __pyx_t_3 = ((__pyx_v_self->encoding == NULL) != 0); + if (__pyx_t_3) { + } else { + __pyx_t_12 = __pyx_t_3; + goto __pyx_L35_bool_binop_done; + } + __pyx_t_3 = ((__pyx_v_self->unicode_errors == NULL) != 0); + __pyx_t_12 = __pyx_t_3; + __pyx_L35_bool_binop_done:; + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":210 + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + * if self.encoding == NULL and self.unicode_errors == NULL: + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); # <<<<<<<<<<<<<< + * if ret == -2: + * raise ValueError("unicode string is too large") + */ + __pyx_v_ret = msgpack_pack_unicode((&__pyx_v_self->pk), __pyx_v_o, __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT); + + /* "msgpack/_packer.pyx":211 + * if self.encoding == NULL and self.unicode_errors == NULL: + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + * if ret == -2: # <<<<<<<<<<<<<< + * raise ValueError("unicode string is too large") + * else: + */ + __pyx_t_12 = ((__pyx_v_ret == -2L) != 0); + if (unlikely(__pyx_t_12)) { + + /* "msgpack/_packer.pyx":212 + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + * if ret == -2: + * raise ValueError("unicode string is too large") # <<<<<<<<<<<<<< + * else: + * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) + */ + __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 212, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(0, 212, __pyx_L1_error) + + /* "msgpack/_packer.pyx":211 + * if self.encoding == NULL and self.unicode_errors == NULL: + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + * if ret == -2: # <<<<<<<<<<<<<< + * raise ValueError("unicode string is too large") + * else: + */ + } + + /* "msgpack/_packer.pyx":209 + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + * if self.encoding == NULL and self.unicode_errors == NULL: # <<<<<<<<<<<<<< + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + * if ret == -2: + */ + goto __pyx_L34; + } + + /* "msgpack/_packer.pyx":214 + * raise ValueError("unicode string is too large") + * else: + * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) # <<<<<<<<<<<<<< + * L = len(o) + * if L > ITEM_LIMIT: + */ + /*else*/ { + __pyx_t_11 = PyUnicode_AsEncodedString(__pyx_v_o, __pyx_v_self->encoding, __pyx_v_self->unicode_errors); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 214, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_DECREF_SET(__pyx_v_o, __pyx_t_11); + __pyx_t_11 = 0; + + /* "msgpack/_packer.pyx":215 + * else: + * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) + * L = len(o) # <<<<<<<<<<<<<< + * if L > ITEM_LIMIT: + * raise ValueError("unicode string is too large") + */ + __pyx_t_27 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_27 == ((Py_ssize_t)-1))) __PYX_ERR(0, 215, __pyx_L1_error) + __pyx_v_L = __pyx_t_27; + + /* "msgpack/_packer.pyx":216 + * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("unicode string is too large") + * ret = msgpack_pack_raw(&self.pk, L) + */ + __pyx_t_12 = ((__pyx_v_L > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_12)) { + + /* "msgpack/_packer.pyx":217 + * L = len(o) + * if L > ITEM_LIMIT: + * raise ValueError("unicode string is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_raw(&self.pk, L) + * if ret == 0: + */ + __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__6, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 217, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(0, 217, __pyx_L1_error) + + /* "msgpack/_packer.pyx":216 + * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("unicode string is too large") + * ret = msgpack_pack_raw(&self.pk, L) + */ + } + + /* "msgpack/_packer.pyx":218 + * if L > ITEM_LIMIT: + * raise ValueError("unicode string is too large") + * ret = msgpack_pack_raw(&self.pk, L) # <<<<<<<<<<<<<< + * if ret == 0: + * rawval = o + */ + __pyx_v_ret = msgpack_pack_raw((&__pyx_v_self->pk), __pyx_v_L); + + /* "msgpack/_packer.pyx":219 + * raise ValueError("unicode string is too large") + * ret = msgpack_pack_raw(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * rawval = o + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + */ + __pyx_t_12 = ((__pyx_v_ret == 0) != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":220 + * ret = msgpack_pack_raw(&self.pk, L) + * if ret == 0: + * rawval = o # <<<<<<<<<<<<<< + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyDict_CheckExact(o): + */ + __pyx_t_29 = __Pyx_PyObject_AsWritableString(__pyx_v_o); if (unlikely((!__pyx_t_29) && PyErr_Occurred())) __PYX_ERR(0, 220, __pyx_L1_error) + __pyx_v_rawval = __pyx_t_29; + + /* "msgpack/_packer.pyx":221 + * if ret == 0: + * rawval = o + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) # <<<<<<<<<<<<<< + * elif PyDict_CheckExact(o): + * d = o + */ + __pyx_v_ret = msgpack_pack_raw_body((&__pyx_v_self->pk), __pyx_v_rawval, __pyx_v_L); + + /* "msgpack/_packer.pyx":219 + * raise ValueError("unicode string is too large") + * ret = msgpack_pack_raw(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * rawval = o + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + */ + } + } + __pyx_L34:; + + /* "msgpack/_packer.pyx":208 + * if ret == 0: + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): # <<<<<<<<<<<<<< + * if self.encoding == NULL and self.unicode_errors == NULL: + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":222 + * rawval = o + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyDict_CheckExact(o): # <<<<<<<<<<<<<< + * d = o + * L = len(d) + */ + __pyx_t_12 = (PyDict_CheckExact(__pyx_v_o) != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":223 + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyDict_CheckExact(o): + * d = o # <<<<<<<<<<<<<< + * L = len(d) + * if L > ITEM_LIMIT: + */ + __pyx_t_11 = __pyx_v_o; + __Pyx_INCREF(__pyx_t_11); + __pyx_v_d = ((PyObject*)__pyx_t_11); + __pyx_t_11 = 0; + + /* "msgpack/_packer.pyx":224 + * elif PyDict_CheckExact(o): + * d = o + * L = len(d) # <<<<<<<<<<<<<< + * if L > ITEM_LIMIT: + * raise ValueError("dict is too large") + */ + if (unlikely(__pyx_v_d == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(0, 224, __pyx_L1_error) + } + __pyx_t_27 = PyDict_Size(__pyx_v_d); if (unlikely(__pyx_t_27 == ((Py_ssize_t)-1))) __PYX_ERR(0, 224, __pyx_L1_error) + __pyx_v_L = __pyx_t_27; + + /* "msgpack/_packer.pyx":225 + * d = o + * L = len(d) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + */ + __pyx_t_12 = ((__pyx_v_L > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_12)) { + + /* "msgpack/_packer.pyx":226 + * L = len(d) + * if L > ITEM_LIMIT: + * raise ValueError("dict is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: + */ + __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(0, 226, __pyx_L1_error) + + /* "msgpack/_packer.pyx":225 + * d = o + * L = len(d) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + */ + } + + /* "msgpack/_packer.pyx":227 + * if L > ITEM_LIMIT: + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) # <<<<<<<<<<<<<< + * if ret == 0: + * for k, v in d.items(): + */ + __pyx_v_ret = msgpack_pack_map((&__pyx_v_self->pk), __pyx_v_L); + + /* "msgpack/_packer.pyx":228 + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * for k, v in d.items(): + * ret = self._pack(k, nest_limit-1) + */ + __pyx_t_12 = ((__pyx_v_ret == 0) != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":229 + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: + * for k, v in d.items(): # <<<<<<<<<<<<<< + * ret = self._pack(k, nest_limit-1) + * if ret != 0: break + */ + __pyx_t_27 = 0; + if (unlikely(__pyx_v_d == Py_None)) { + PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "items"); + __PYX_ERR(0, 229, __pyx_L1_error) + } + __pyx_t_10 = __Pyx_dict_iterator(__pyx_v_d, 1, __pyx_n_s_items, (&__pyx_t_30), (&__pyx_t_16)); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 229, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_XDECREF(__pyx_t_11); + __pyx_t_11 = __pyx_t_10; + __pyx_t_10 = 0; + while (1) { + __pyx_t_9 = __Pyx_dict_iter_next(__pyx_t_11, __pyx_t_30, &__pyx_t_27, &__pyx_t_10, &__pyx_t_2, NULL, __pyx_t_16); + if (unlikely(__pyx_t_9 == 0)) break; + if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(0, 229, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_10); + __pyx_t_10 = 0; + __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_2); + __pyx_t_2 = 0; + + /* "msgpack/_packer.pyx":230 + * if ret == 0: + * for k, v in d.items(): + * ret = self._pack(k, nest_limit-1) # <<<<<<<<<<<<<< + * if ret != 0: break + * ret = self._pack(v, nest_limit-1) + */ + __pyx_t_31.__pyx_n = 1; + __pyx_t_31.nest_limit = (__pyx_v_nest_limit - 1); + __pyx_t_9 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_k, &__pyx_t_31); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(0, 230, __pyx_L1_error) + __pyx_v_ret = __pyx_t_9; + + /* "msgpack/_packer.pyx":231 + * for k, v in d.items(): + * ret = self._pack(k, nest_limit-1) + * if ret != 0: break # <<<<<<<<<<<<<< + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + */ + __pyx_t_12 = ((__pyx_v_ret != 0) != 0); + if (__pyx_t_12) { + goto __pyx_L43_break; + } + + /* "msgpack/_packer.pyx":232 + * ret = self._pack(k, nest_limit-1) + * if ret != 0: break + * ret = self._pack(v, nest_limit-1) # <<<<<<<<<<<<<< + * if ret != 0: break + * elif not strict_types and PyDict_Check(o): + */ + __pyx_t_31.__pyx_n = 1; + __pyx_t_31.nest_limit = (__pyx_v_nest_limit - 1); + __pyx_t_9 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_v, &__pyx_t_31); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(0, 232, __pyx_L1_error) + __pyx_v_ret = __pyx_t_9; + + /* "msgpack/_packer.pyx":233 + * if ret != 0: break + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break # <<<<<<<<<<<<<< + * elif not strict_types and PyDict_Check(o): + * L = len(o) + */ + __pyx_t_12 = ((__pyx_v_ret != 0) != 0); + if (__pyx_t_12) { + goto __pyx_L43_break; + } + } + __pyx_L43_break:; + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + + /* "msgpack/_packer.pyx":228 + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * for k, v in d.items(): + * ret = self._pack(k, nest_limit-1) + */ + } + + /* "msgpack/_packer.pyx":222 + * rawval = o + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyDict_CheckExact(o): # <<<<<<<<<<<<<< + * d = o + * L = len(d) + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":234 + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + * elif not strict_types and PyDict_Check(o): # <<<<<<<<<<<<<< + * L = len(o) + * if L > ITEM_LIMIT: + */ + __pyx_t_3 = ((!(__pyx_v_strict_types != 0)) != 0); + if (__pyx_t_3) { + } else { + __pyx_t_12 = __pyx_t_3; + goto __pyx_L46_bool_binop_done; + } + __pyx_t_3 = (PyDict_Check(__pyx_v_o) != 0); + __pyx_t_12 = __pyx_t_3; + __pyx_L46_bool_binop_done:; + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":235 + * if ret != 0: break + * elif not strict_types and PyDict_Check(o): + * L = len(o) # <<<<<<<<<<<<<< + * if L > ITEM_LIMIT: + * raise ValueError("dict is too large") + */ + __pyx_t_30 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_30 == ((Py_ssize_t)-1))) __PYX_ERR(0, 235, __pyx_L1_error) + __pyx_v_L = __pyx_t_30; + + /* "msgpack/_packer.pyx":236 + * elif not strict_types and PyDict_Check(o): + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + */ + __pyx_t_12 = ((__pyx_v_L > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_12)) { + + /* "msgpack/_packer.pyx":237 + * L = len(o) + * if L > ITEM_LIMIT: + * raise ValueError("dict is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: + */ + __pyx_t_11 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__7, NULL); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 237, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_Raise(__pyx_t_11, 0, 0, 0); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __PYX_ERR(0, 237, __pyx_L1_error) + + /* "msgpack/_packer.pyx":236 + * elif not strict_types and PyDict_Check(o): + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + */ + } + + /* "msgpack/_packer.pyx":238 + * if L > ITEM_LIMIT: + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) # <<<<<<<<<<<<<< + * if ret == 0: + * for k, v in o.items(): + */ + __pyx_v_ret = msgpack_pack_map((&__pyx_v_self->pk), __pyx_v_L); + + /* "msgpack/_packer.pyx":239 + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * for k, v in o.items(): + * ret = self._pack(k, nest_limit-1) + */ + __pyx_t_12 = ((__pyx_v_ret == 0) != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":240 + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: + * for k, v in o.items(): # <<<<<<<<<<<<<< + * ret = self._pack(k, nest_limit-1) + * if ret != 0: break + */ + __pyx_t_30 = 0; + if (unlikely(__pyx_v_o == Py_None)) { + PyErr_Format(PyExc_AttributeError, "'NoneType' object has no attribute '%.30s'", "items"); + __PYX_ERR(0, 240, __pyx_L1_error) + } + __pyx_t_2 = __Pyx_dict_iterator(__pyx_v_o, 0, __pyx_n_s_items, (&__pyx_t_27), (&__pyx_t_16)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_11); + __pyx_t_11 = __pyx_t_2; + __pyx_t_2 = 0; + while (1) { + __pyx_t_9 = __Pyx_dict_iter_next(__pyx_t_11, __pyx_t_27, &__pyx_t_30, &__pyx_t_2, &__pyx_t_10, NULL, __pyx_t_16); + if (unlikely(__pyx_t_9 == 0)) break; + if (unlikely(__pyx_t_9 == -1)) __PYX_ERR(0, 240, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_t_10); + __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_2); + __pyx_t_2 = 0; + __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_10); + __pyx_t_10 = 0; + + /* "msgpack/_packer.pyx":241 + * if ret == 0: + * for k, v in o.items(): + * ret = self._pack(k, nest_limit-1) # <<<<<<<<<<<<<< + * if ret != 0: break + * ret = self._pack(v, nest_limit-1) + */ + __pyx_t_31.__pyx_n = 1; + __pyx_t_31.nest_limit = (__pyx_v_nest_limit - 1); + __pyx_t_9 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_k, &__pyx_t_31); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(0, 241, __pyx_L1_error) + __pyx_v_ret = __pyx_t_9; + + /* "msgpack/_packer.pyx":242 + * for k, v in o.items(): + * ret = self._pack(k, nest_limit-1) + * if ret != 0: break # <<<<<<<<<<<<<< + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + */ + __pyx_t_12 = ((__pyx_v_ret != 0) != 0); + if (__pyx_t_12) { + goto __pyx_L51_break; + } + + /* "msgpack/_packer.pyx":243 + * ret = self._pack(k, nest_limit-1) + * if ret != 0: break + * ret = self._pack(v, nest_limit-1) # <<<<<<<<<<<<<< + * if ret != 0: break + * elif type(o) is ExtType if strict_types else isinstance(o, ExtType): + */ + __pyx_t_31.__pyx_n = 1; + __pyx_t_31.nest_limit = (__pyx_v_nest_limit - 1); + __pyx_t_9 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_v, &__pyx_t_31); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(0, 243, __pyx_L1_error) + __pyx_v_ret = __pyx_t_9; + + /* "msgpack/_packer.pyx":244 + * if ret != 0: break + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break # <<<<<<<<<<<<<< + * elif type(o) is ExtType if strict_types else isinstance(o, ExtType): + * # This should be before Tuple because ExtType is namedtuple. + */ + __pyx_t_12 = ((__pyx_v_ret != 0) != 0); + if (__pyx_t_12) { + goto __pyx_L51_break; + } + } + __pyx_L51_break:; + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + + /* "msgpack/_packer.pyx":239 + * raise ValueError("dict is too large") + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * for k, v in o.items(): + * ret = self._pack(k, nest_limit-1) + */ + } + + /* "msgpack/_packer.pyx":234 + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + * elif not strict_types and PyDict_Check(o): # <<<<<<<<<<<<<< + * L = len(o) + * if L > ITEM_LIMIT: + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":245 + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + * elif type(o) is ExtType if strict_types else isinstance(o, ExtType): # <<<<<<<<<<<<<< + * # This should be before Tuple because ExtType is namedtuple. + * longval = o.code + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_11 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_7cpython_4type_type), __pyx_v_o); if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_ExtType); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_3 = (__pyx_t_11 == __pyx_t_10); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_12 = __pyx_t_3; + } else { + __Pyx_GetModuleGlobalName(__pyx_t_10, __pyx_n_s_ExtType); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_3 = PyObject_IsInstance(__pyx_v_o, __pyx_t_10); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(0, 245, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_t_12 = __pyx_t_3; + } + __pyx_t_3 = (__pyx_t_12 != 0); + if (__pyx_t_3) { + + /* "msgpack/_packer.pyx":247 + * elif type(o) is ExtType if strict_types else isinstance(o, ExtType): + * # This should be before Tuple because ExtType is namedtuple. + * longval = o.code # <<<<<<<<<<<<<< + * rawval = o.data + * L = len(o.data) + */ + __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_o, __pyx_n_s_code); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 247, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_24 = __Pyx_PyInt_As_long(__pyx_t_10); if (unlikely((__pyx_t_24 == (long)-1) && PyErr_Occurred())) __PYX_ERR(0, 247, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_v_longval = __pyx_t_24; + + /* "msgpack/_packer.pyx":248 + * # This should be before Tuple because ExtType is namedtuple. + * longval = o.code + * rawval = o.data # <<<<<<<<<<<<<< + * L = len(o.data) + * if L > ITEM_LIMIT: + */ + __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_o, __pyx_n_s_data); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 248, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_29 = __Pyx_PyObject_AsWritableString(__pyx_t_10); if (unlikely((!__pyx_t_29) && PyErr_Occurred())) __PYX_ERR(0, 248, __pyx_L1_error) + __pyx_v_rawval = __pyx_t_29; + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "msgpack/_packer.pyx":249 + * longval = o.code + * rawval = o.data + * L = len(o.data) # <<<<<<<<<<<<<< + * if L > ITEM_LIMIT: + * raise ValueError("EXT data is too large") + */ + __pyx_t_10 = __Pyx_PyObject_GetAttrStr(__pyx_v_o, __pyx_n_s_data); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_27 = PyObject_Length(__pyx_t_10); if (unlikely(__pyx_t_27 == ((Py_ssize_t)-1))) __PYX_ERR(0, 249, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __pyx_v_L = __pyx_t_27; + + /* "msgpack/_packer.pyx":250 + * rawval = o.data + * L = len(o.data) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("EXT data is too large") + * ret = msgpack_pack_ext(&self.pk, longval, L) + */ + __pyx_t_3 = ((__pyx_v_L > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_3)) { + + /* "msgpack/_packer.pyx":251 + * L = len(o.data) + * if L > ITEM_LIMIT: + * raise ValueError("EXT data is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_ext(&self.pk, longval, L) + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__8, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(0, 251, __pyx_L1_error) + + /* "msgpack/_packer.pyx":250 + * rawval = o.data + * L = len(o.data) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("EXT data is too large") + * ret = msgpack_pack_ext(&self.pk, longval, L) + */ + } + + /* "msgpack/_packer.pyx":252 + * if L > ITEM_LIMIT: + * raise ValueError("EXT data is too large") + * ret = msgpack_pack_ext(&self.pk, longval, L) # <<<<<<<<<<<<<< + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): + */ + __pyx_v_ret = msgpack_pack_ext((&__pyx_v_self->pk), __pyx_v_longval, __pyx_v_L); + + /* "msgpack/_packer.pyx":253 + * raise ValueError("EXT data is too large") + * ret = msgpack_pack_ext(&self.pk, longval, L) + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) # <<<<<<<<<<<<<< + * elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): + * L = len(o) + */ + __pyx_v_ret = msgpack_pack_raw_body((&__pyx_v_self->pk), __pyx_v_rawval, __pyx_v_L); + + /* "msgpack/_packer.pyx":245 + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + * elif type(o) is ExtType if strict_types else isinstance(o, ExtType): # <<<<<<<<<<<<<< + * # This should be before Tuple because ExtType is namedtuple. + * longval = o.code + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":254 + * ret = msgpack_pack_ext(&self.pk, longval, L) + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): # <<<<<<<<<<<<<< + * L = len(o) + * if L > ITEM_LIMIT: + */ + if ((__pyx_v_strict_types != 0)) { + __pyx_t_3 = PyList_CheckExact(__pyx_v_o); + } else { + __pyx_t_1 = PyTuple_Check(__pyx_v_o); + if (!__pyx_t_1) { + } else { + __pyx_t_12 = __pyx_t_1; + goto __pyx_L55_bool_binop_done; + } + __pyx_t_1 = PyList_Check(__pyx_v_o); + __pyx_t_12 = __pyx_t_1; + __pyx_L55_bool_binop_done:; + __pyx_t_3 = __pyx_t_12; + } + __pyx_t_12 = (__pyx_t_3 != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":255 + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): + * L = len(o) # <<<<<<<<<<<<<< + * if L > ITEM_LIMIT: + * raise ValueError("list is too large") + */ + __pyx_t_27 = PyObject_Length(__pyx_v_o); if (unlikely(__pyx_t_27 == ((Py_ssize_t)-1))) __PYX_ERR(0, 255, __pyx_L1_error) + __pyx_v_L = __pyx_t_27; + + /* "msgpack/_packer.pyx":256 + * elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("list is too large") + * ret = msgpack_pack_array(&self.pk, L) + */ + __pyx_t_12 = ((__pyx_v_L > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_12)) { + + /* "msgpack/_packer.pyx":257 + * L = len(o) + * if L > ITEM_LIMIT: + * raise ValueError("list is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_array(&self.pk, L) + * if ret == 0: + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 257, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(0, 257, __pyx_L1_error) + + /* "msgpack/_packer.pyx":256 + * elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): + * L = len(o) + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError("list is too large") + * ret = msgpack_pack_array(&self.pk, L) + */ + } + + /* "msgpack/_packer.pyx":258 + * if L > ITEM_LIMIT: + * raise ValueError("list is too large") + * ret = msgpack_pack_array(&self.pk, L) # <<<<<<<<<<<<<< + * if ret == 0: + * for v in o: + */ + __pyx_v_ret = msgpack_pack_array((&__pyx_v_self->pk), __pyx_v_L); + + /* "msgpack/_packer.pyx":259 + * raise ValueError("list is too large") + * ret = msgpack_pack_array(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * for v in o: + * ret = self._pack(v, nest_limit-1) + */ + __pyx_t_12 = ((__pyx_v_ret == 0) != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":260 + * ret = msgpack_pack_array(&self.pk, L) + * if ret == 0: + * for v in o: # <<<<<<<<<<<<<< + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + */ + if (likely(PyList_CheckExact(__pyx_v_o)) || PyTuple_CheckExact(__pyx_v_o)) { + __pyx_t_10 = __pyx_v_o; __Pyx_INCREF(__pyx_t_10); __pyx_t_27 = 0; + __pyx_t_32 = NULL; + } else { + __pyx_t_27 = -1; __pyx_t_10 = PyObject_GetIter(__pyx_v_o); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 260, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __pyx_t_32 = Py_TYPE(__pyx_t_10)->tp_iternext; if (unlikely(!__pyx_t_32)) __PYX_ERR(0, 260, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_32)) { + if (likely(PyList_CheckExact(__pyx_t_10))) { + if (__pyx_t_27 >= PyList_GET_SIZE(__pyx_t_10)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_11 = PyList_GET_ITEM(__pyx_t_10, __pyx_t_27); __Pyx_INCREF(__pyx_t_11); __pyx_t_27++; if (unlikely(0 < 0)) __PYX_ERR(0, 260, __pyx_L1_error) + #else + __pyx_t_11 = PySequence_ITEM(__pyx_t_10, __pyx_t_27); __pyx_t_27++; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 260, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + #endif + } else { + if (__pyx_t_27 >= PyTuple_GET_SIZE(__pyx_t_10)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_11 = PyTuple_GET_ITEM(__pyx_t_10, __pyx_t_27); __Pyx_INCREF(__pyx_t_11); __pyx_t_27++; if (unlikely(0 < 0)) __PYX_ERR(0, 260, __pyx_L1_error) + #else + __pyx_t_11 = PySequence_ITEM(__pyx_t_10, __pyx_t_27); __pyx_t_27++; if (unlikely(!__pyx_t_11)) __PYX_ERR(0, 260, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_11); + #endif + } + } else { + __pyx_t_11 = __pyx_t_32(__pyx_t_10); + if (unlikely(!__pyx_t_11)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(0, 260, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_11); + } + __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_11); + __pyx_t_11 = 0; + + /* "msgpack/_packer.pyx":261 + * if ret == 0: + * for v in o: + * ret = self._pack(v, nest_limit-1) # <<<<<<<<<<<<<< + * if ret != 0: break + * elif PyMemoryView_Check(o): + */ + __pyx_t_31.__pyx_n = 1; + __pyx_t_31.nest_limit = (__pyx_v_nest_limit - 1); + __pyx_t_16 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_v, &__pyx_t_31); if (unlikely(__pyx_t_16 == ((int)-1))) __PYX_ERR(0, 261, __pyx_L1_error) + __pyx_v_ret = __pyx_t_16; + + /* "msgpack/_packer.pyx":262 + * for v in o: + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break # <<<<<<<<<<<<<< + * elif PyMemoryView_Check(o): + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + */ + __pyx_t_12 = ((__pyx_v_ret != 0) != 0); + if (__pyx_t_12) { + goto __pyx_L60_break; + } + + /* "msgpack/_packer.pyx":260 + * ret = msgpack_pack_array(&self.pk, L) + * if ret == 0: + * for v in o: # <<<<<<<<<<<<<< + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + */ + } + __pyx_L60_break:; + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + + /* "msgpack/_packer.pyx":259 + * raise ValueError("list is too large") + * ret = msgpack_pack_array(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * for v in o: + * ret = self._pack(v, nest_limit-1) + */ + } + + /* "msgpack/_packer.pyx":254 + * ret = msgpack_pack_ext(&self.pk, longval, L) + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + * elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): # <<<<<<<<<<<<<< + * L = len(o) + * if L > ITEM_LIMIT: + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":263 + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + * elif PyMemoryView_Check(o): # <<<<<<<<<<<<<< + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + * raise ValueError("could not get buffer for memoryview") + */ + __pyx_t_12 = (PyMemoryView_Check(__pyx_v_o) != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":264 + * if ret != 0: break + * elif PyMemoryView_Check(o): + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: # <<<<<<<<<<<<<< + * raise ValueError("could not get buffer for memoryview") + * L = view.len + */ + __pyx_t_16 = PyObject_GetBuffer(__pyx_v_o, (&__pyx_v_view), PyBUF_SIMPLE); if (unlikely(__pyx_t_16 == ((int)-1))) __PYX_ERR(0, 264, __pyx_L1_error) + __pyx_t_12 = ((__pyx_t_16 != 0) != 0); + if (unlikely(__pyx_t_12)) { + + /* "msgpack/_packer.pyx":265 + * elif PyMemoryView_Check(o): + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + * raise ValueError("could not get buffer for memoryview") # <<<<<<<<<<<<<< + * L = view.len + * if L > ITEM_LIMIT: + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 265, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(0, 265, __pyx_L1_error) + + /* "msgpack/_packer.pyx":264 + * if ret != 0: break + * elif PyMemoryView_Check(o): + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: # <<<<<<<<<<<<<< + * raise ValueError("could not get buffer for memoryview") + * L = view.len + */ + } + + /* "msgpack/_packer.pyx":266 + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + * raise ValueError("could not get buffer for memoryview") + * L = view.len # <<<<<<<<<<<<<< + * if L > ITEM_LIMIT: + * PyBuffer_Release(&view); + */ + __pyx_t_27 = __pyx_v_view.len; + __pyx_v_L = __pyx_t_27; + + /* "msgpack/_packer.pyx":267 + * raise ValueError("could not get buffer for memoryview") + * L = view.len + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * PyBuffer_Release(&view); + * raise ValueError("memoryview is too large") + */ + __pyx_t_12 = ((__pyx_v_L > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_12)) { + + /* "msgpack/_packer.pyx":268 + * L = view.len + * if L > ITEM_LIMIT: + * PyBuffer_Release(&view); # <<<<<<<<<<<<<< + * raise ValueError("memoryview is too large") + * ret = msgpack_pack_bin(&self.pk, L) + */ + PyBuffer_Release((&__pyx_v_view)); + + /* "msgpack/_packer.pyx":269 + * if L > ITEM_LIMIT: + * PyBuffer_Release(&view); + * raise ValueError("memoryview is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: + */ + __pyx_t_10 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 269, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_Raise(__pyx_t_10, 0, 0, 0); + __Pyx_DECREF(__pyx_t_10); __pyx_t_10 = 0; + __PYX_ERR(0, 269, __pyx_L1_error) + + /* "msgpack/_packer.pyx":267 + * raise ValueError("could not get buffer for memoryview") + * L = view.len + * if L > ITEM_LIMIT: # <<<<<<<<<<<<<< + * PyBuffer_Release(&view); + * raise ValueError("memoryview is too large") + */ + } + + /* "msgpack/_packer.pyx":270 + * PyBuffer_Release(&view); + * raise ValueError("memoryview is too large") + * ret = msgpack_pack_bin(&self.pk, L) # <<<<<<<<<<<<<< + * if ret == 0: + * ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + */ + __pyx_v_ret = msgpack_pack_bin((&__pyx_v_self->pk), __pyx_v_L); + + /* "msgpack/_packer.pyx":271 + * raise ValueError("memoryview is too large") + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + * PyBuffer_Release(&view); + */ + __pyx_t_12 = ((__pyx_v_ret == 0) != 0); + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":272 + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: + * ret = msgpack_pack_raw_body(&self.pk, view.buf, L) # <<<<<<<<<<<<<< + * PyBuffer_Release(&view); + * elif not default_used and self._default: + */ + __pyx_v_ret = msgpack_pack_raw_body((&__pyx_v_self->pk), ((char *)__pyx_v_view.buf), __pyx_v_L); + + /* "msgpack/_packer.pyx":271 + * raise ValueError("memoryview is too large") + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: # <<<<<<<<<<<<<< + * ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + * PyBuffer_Release(&view); + */ + } + + /* "msgpack/_packer.pyx":273 + * if ret == 0: + * ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + * PyBuffer_Release(&view); # <<<<<<<<<<<<<< + * elif not default_used and self._default: + * o = self._default(o) + */ + PyBuffer_Release((&__pyx_v_view)); + + /* "msgpack/_packer.pyx":263 + * ret = self._pack(v, nest_limit-1) + * if ret != 0: break + * elif PyMemoryView_Check(o): # <<<<<<<<<<<<<< + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + * raise ValueError("could not get buffer for memoryview") + */ + goto __pyx_L6; + } + + /* "msgpack/_packer.pyx":274 + * ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + * PyBuffer_Release(&view); + * elif not default_used and self._default: # <<<<<<<<<<<<<< + * o = self._default(o) + * default_used = 1 + */ + __pyx_t_3 = ((!(__pyx_v_default_used != 0)) != 0); + if (__pyx_t_3) { + } else { + __pyx_t_12 = __pyx_t_3; + goto __pyx_L65_bool_binop_done; + } + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_v_self->_default); if (unlikely(__pyx_t_3 < 0)) __PYX_ERR(0, 274, __pyx_L1_error) + __pyx_t_12 = __pyx_t_3; + __pyx_L65_bool_binop_done:; + if (__pyx_t_12) { + + /* "msgpack/_packer.pyx":275 + * PyBuffer_Release(&view); + * elif not default_used and self._default: + * o = self._default(o) # <<<<<<<<<<<<<< + * default_used = 1 + * continue + */ + __Pyx_INCREF(__pyx_v_self->_default); + __pyx_t_11 = __pyx_v_self->_default; __pyx_t_2 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_11))) { + __pyx_t_2 = PyMethod_GET_SELF(__pyx_t_11); + if (likely(__pyx_t_2)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_11); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_11, function); + } + } + __pyx_t_10 = (__pyx_t_2) ? __Pyx_PyObject_Call2Args(__pyx_t_11, __pyx_t_2, __pyx_v_o) : __Pyx_PyObject_CallOneArg(__pyx_t_11, __pyx_v_o); + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(!__pyx_t_10)) __PYX_ERR(0, 275, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_10); + __Pyx_DECREF(__pyx_t_11); __pyx_t_11 = 0; + __Pyx_DECREF_SET(__pyx_v_o, __pyx_t_10); + __pyx_t_10 = 0; + + /* "msgpack/_packer.pyx":276 + * elif not default_used and self._default: + * o = self._default(o) + * default_used = 1 # <<<<<<<<<<<<<< + * continue + * else: + */ + __pyx_v_default_used = 1; + + /* "msgpack/_packer.pyx":277 + * o = self._default(o) + * default_used = 1 + * continue # <<<<<<<<<<<<<< + * else: + * PyErr_Format(TypeError, b"can not serialize '%.200s' object", Py_TYPE(o).tp_name) + */ + goto __pyx_L4_continue; + + /* "msgpack/_packer.pyx":274 + * ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + * PyBuffer_Release(&view); + * elif not default_used and self._default: # <<<<<<<<<<<<<< + * o = self._default(o) + * default_used = 1 + */ + } + + /* "msgpack/_packer.pyx":279 + * continue + * else: + * PyErr_Format(TypeError, b"can not serialize '%.200s' object", Py_TYPE(o).tp_name) # <<<<<<<<<<<<<< + * return ret + * + */ + /*else*/ { + __pyx_t_28 = PyErr_Format(__pyx_builtin_TypeError, ((char *)"can not serialize '%.200s' object"), Py_TYPE(__pyx_v_o)->tp_name); if (unlikely(__pyx_t_28 == ((PyObject *)NULL))) __PYX_ERR(0, 279, __pyx_L1_error) + } + __pyx_L6:; + + /* "msgpack/_packer.pyx":280 + * else: + * PyErr_Format(TypeError, b"can not serialize '%.200s' object", Py_TYPE(o).tp_name) + * return ret # <<<<<<<<<<<<<< + * + * cpdef pack(self, object obj): + */ + __pyx_r = __pyx_v_ret; + goto __pyx_L0; + __pyx_L4_continue:; + } + + /* "msgpack/_packer.pyx":148 + * self.pk.buf = NULL + * + * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: # <<<<<<<<<<<<<< + * cdef long long llval + * cdef unsigned long long ullval + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_XDECREF(__pyx_t_11); + __Pyx_XDECREF(__pyx_t_13); + __Pyx_XDECREF(__pyx_t_14); + __Pyx_XDECREF(__pyx_t_15); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer._pack", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_d); + __Pyx_XDECREF(__pyx_v_oe); + __Pyx_XDECREF(__pyx_v_k); + __Pyx_XDECREF(__pyx_v_v); + __Pyx_XDECREF(__pyx_v_o); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":282 + * return ret + * + * cpdef pack(self, object obj): # <<<<<<<<<<<<<< + * cdef int ret + * try: + */ + +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_7pack(PyObject *__pyx_v_self, PyObject *__pyx_v_obj); /*proto*/ +static PyObject *__pyx_f_7msgpack_9_cmsgpack_6Packer_pack(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_skip_dispatch) { + int __pyx_v_ret; + PyObject *__pyx_v_buf = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack __pyx_t_9; + int __pyx_t_10; + __Pyx_RefNannySetupContext("pack", 0); + /* Check if called by wrapper */ + if (unlikely(__pyx_skip_dispatch)) ; + /* Check if overridden in Python */ + else if (unlikely((Py_TYPE(((PyObject *)__pyx_v_self))->tp_dictoffset != 0) || (Py_TYPE(((PyObject *)__pyx_v_self))->tp_flags & (Py_TPFLAGS_IS_ABSTRACT | Py_TPFLAGS_HEAPTYPE)))) { + #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS + static PY_UINT64_T __pyx_tp_dict_version = __PYX_DICT_VERSION_INIT, __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT; + if (unlikely(!__Pyx_object_dict_version_matches(((PyObject *)__pyx_v_self), __pyx_tp_dict_version, __pyx_obj_dict_version))) { + PY_UINT64_T __pyx_type_dict_guard = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self)); + #endif + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 282, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (!PyCFunction_Check(__pyx_t_1) || (PyCFunction_GET_FUNCTION(__pyx_t_1) != (PyCFunction)(void*)__pyx_pw_7msgpack_9_cmsgpack_6Packer_7pack)) { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_t_1); + __pyx_t_3 = __pyx_t_1; __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_Call2Args(__pyx_t_3, __pyx_t_4, __pyx_v_obj) : __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_v_obj); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 282, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + goto __pyx_L0; + } + #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS + __pyx_tp_dict_version = __Pyx_get_tp_dict_version(((PyObject *)__pyx_v_self)); + __pyx_obj_dict_version = __Pyx_get_object_dict_version(((PyObject *)__pyx_v_self)); + if (unlikely(__pyx_type_dict_guard != __pyx_tp_dict_version)) { + __pyx_tp_dict_version = __pyx_obj_dict_version = __PYX_DICT_VERSION_INIT; + } + #endif + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + #if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_PYTYPE_LOOKUP && CYTHON_USE_TYPE_SLOTS + } + #endif + } + + /* "msgpack/_packer.pyx":284 + * cpdef pack(self, object obj): + * cdef int ret + * try: # <<<<<<<<<<<<<< + * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) + * except: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_7); + /*try:*/ { + + /* "msgpack/_packer.pyx":285 + * cdef int ret + * try: + * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) # <<<<<<<<<<<<<< + * except: + * self.pk.length = 0 + */ + __pyx_t_9.__pyx_n = 1; + __pyx_t_9.nest_limit = __pyx_v_7msgpack_9_cmsgpack_DEFAULT_RECURSE_LIMIT; + __pyx_t_8 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_obj, &__pyx_t_9); if (unlikely(__pyx_t_8 == ((int)-1))) __PYX_ERR(0, 285, __pyx_L3_error) + __pyx_v_ret = __pyx_t_8; + + /* "msgpack/_packer.pyx":284 + * cpdef pack(self, object obj): + * cdef int ret + * try: # <<<<<<<<<<<<<< + * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) + * except: + */ + } + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "msgpack/_packer.pyx":286 + * try: + * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) + * except: # <<<<<<<<<<<<<< + * self.pk.length = 0 + * raise + */ + /*except:*/ { + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3) < 0) __PYX_ERR(0, 286, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_t_3); + + /* "msgpack/_packer.pyx":287 + * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) + * except: + * self.pk.length = 0 # <<<<<<<<<<<<<< + * raise + * if ret: # should not happen. + */ + __pyx_v_self->pk.length = 0; + + /* "msgpack/_packer.pyx":288 + * except: + * self.pk.length = 0 + * raise # <<<<<<<<<<<<<< + * if ret: # should not happen. + * raise RuntimeError("internal error") + */ + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ErrRestoreWithState(__pyx_t_1, __pyx_t_2, __pyx_t_3); + __pyx_t_1 = 0; __pyx_t_2 = 0; __pyx_t_3 = 0; + __PYX_ERR(0, 288, __pyx_L5_except_error) + } + __pyx_L5_except_error:; + + /* "msgpack/_packer.pyx":284 + * cpdef pack(self, object obj): + * cdef int ret + * try: # <<<<<<<<<<<<<< + * ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) + * except: + */ + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_XGIVEREF(__pyx_t_6); + __Pyx_XGIVEREF(__pyx_t_7); + __Pyx_ExceptionReset(__pyx_t_5, __pyx_t_6, __pyx_t_7); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "msgpack/_packer.pyx":289 + * self.pk.length = 0 + * raise + * if ret: # should not happen. # <<<<<<<<<<<<<< + * raise RuntimeError("internal error") + * if self.autoreset: + */ + __pyx_t_10 = (__pyx_v_ret != 0); + if (unlikely(__pyx_t_10)) { + + /* "msgpack/_packer.pyx":290 + * raise + * if ret: # should not happen. + * raise RuntimeError("internal error") # <<<<<<<<<<<<<< + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_RuntimeError, __pyx_tuple__12, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 290, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(0, 290, __pyx_L1_error) + + /* "msgpack/_packer.pyx":289 + * self.pk.length = 0 + * raise + * if ret: # should not happen. # <<<<<<<<<<<<<< + * raise RuntimeError("internal error") + * if self.autoreset: + */ + } + + /* "msgpack/_packer.pyx":291 + * if ret: # should not happen. + * raise RuntimeError("internal error") + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + __pyx_t_10 = (__pyx_v_self->autoreset != 0); + if (__pyx_t_10) { + + /* "msgpack/_packer.pyx":292 + * raise RuntimeError("internal error") + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) # <<<<<<<<<<<<<< + * self.pk.length = 0 + * return buf + */ + __pyx_t_3 = PyBytes_FromStringAndSize(__pyx_v_self->pk.buf, __pyx_v_self->pk.length); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 292, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_v_buf = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "msgpack/_packer.pyx":293 + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 # <<<<<<<<<<<<<< + * return buf + * + */ + __pyx_v_self->pk.length = 0; + + /* "msgpack/_packer.pyx":294 + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + * return buf # <<<<<<<<<<<<<< + * + * def pack_ext_type(self, typecode, data): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_buf); + __pyx_r = __pyx_v_buf; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":291 + * if ret: # should not happen. + * raise RuntimeError("internal error") + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + } + + /* "msgpack/_packer.pyx":282 + * return ret + * + * cpdef pack(self, object obj): # <<<<<<<<<<<<<< + * cdef int ret + * try: + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_buf); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_7pack(PyObject *__pyx_v_self, PyObject *__pyx_v_obj); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_6pack[] = "Packer.pack(self, obj)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_7pack(PyObject *__pyx_v_self, PyObject *__pyx_v_obj) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("pack (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_6pack(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self), ((PyObject *)__pyx_v_obj)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_6pack(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_obj) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("pack", 0); + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_f_7msgpack_9_cmsgpack_6Packer_pack(__pyx_v_self, __pyx_v_obj, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 282, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":296 + * return buf + * + * def pack_ext_type(self, typecode, data): # <<<<<<<<<<<<<< + * msgpack_pack_ext(&self.pk, typecode, len(data)) + * msgpack_pack_raw_body(&self.pk, data, len(data)) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_9pack_ext_type(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_8pack_ext_type[] = "Packer.pack_ext_type(self, typecode, data)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_9pack_ext_type(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_typecode = 0; + PyObject *__pyx_v_data = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("pack_ext_type (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_typecode,&__pyx_n_s_data,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_typecode)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("pack_ext_type", 1, 2, 2, 1); __PYX_ERR(0, 296, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "pack_ext_type") < 0)) __PYX_ERR(0, 296, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_typecode = values[0]; + __pyx_v_data = values[1]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("pack_ext_type", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(0, 296, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack_ext_type", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_8pack_ext_type(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self), __pyx_v_typecode, __pyx_v_data); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_8pack_ext_type(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_typecode, PyObject *__pyx_v_data) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char __pyx_t_1; + Py_ssize_t __pyx_t_2; + char *__pyx_t_3; + __Pyx_RefNannySetupContext("pack_ext_type", 0); + + /* "msgpack/_packer.pyx":297 + * + * def pack_ext_type(self, typecode, data): + * msgpack_pack_ext(&self.pk, typecode, len(data)) # <<<<<<<<<<<<<< + * msgpack_pack_raw_body(&self.pk, data, len(data)) + * + */ + __pyx_t_1 = __Pyx_PyInt_As_char(__pyx_v_typecode); if (unlikely((__pyx_t_1 == (char)-1) && PyErr_Occurred())) __PYX_ERR(0, 297, __pyx_L1_error) + __pyx_t_2 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 297, __pyx_L1_error) + (void)(msgpack_pack_ext((&__pyx_v_self->pk), __pyx_t_1, __pyx_t_2)); + + /* "msgpack/_packer.pyx":298 + * def pack_ext_type(self, typecode, data): + * msgpack_pack_ext(&self.pk, typecode, len(data)) + * msgpack_pack_raw_body(&self.pk, data, len(data)) # <<<<<<<<<<<<<< + * + * def pack_array_header(self, long long size): + */ + __pyx_t_3 = __Pyx_PyObject_AsWritableString(__pyx_v_data); if (unlikely((!__pyx_t_3) && PyErr_Occurred())) __PYX_ERR(0, 298, __pyx_L1_error) + __pyx_t_2 = PyObject_Length(__pyx_v_data); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 298, __pyx_L1_error) + (void)(msgpack_pack_raw_body((&__pyx_v_self->pk), __pyx_t_3, __pyx_t_2)); + + /* "msgpack/_packer.pyx":296 + * return buf + * + * def pack_ext_type(self, typecode, data): # <<<<<<<<<<<<<< + * msgpack_pack_ext(&self.pk, typecode, len(data)) + * msgpack_pack_raw_body(&self.pk, data, len(data)) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack_ext_type", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":300 + * msgpack_pack_raw_body(&self.pk, data, len(data)) + * + * def pack_array_header(self, long long size): # <<<<<<<<<<<<<< + * if size > ITEM_LIMIT: + * raise ValueError + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_11pack_array_header(PyObject *__pyx_v_self, PyObject *__pyx_arg_size); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_10pack_array_header[] = "Packer.pack_array_header(self, long long size)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_11pack_array_header(PyObject *__pyx_v_self, PyObject *__pyx_arg_size) { + PY_LONG_LONG __pyx_v_size; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("pack_array_header (wrapper)", 0); + assert(__pyx_arg_size); { + __pyx_v_size = __Pyx_PyInt_As_PY_LONG_LONG(__pyx_arg_size); if (unlikely((__pyx_v_size == (PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 300, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack_array_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_10pack_array_header(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self), ((PY_LONG_LONG)__pyx_v_size)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_10pack_array_header(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PY_LONG_LONG __pyx_v_size) { + int __pyx_v_ret; + PyObject *__pyx_v_buf = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("pack_array_header", 0); + + /* "msgpack/_packer.pyx":301 + * + * def pack_array_header(self, long long size): + * if size > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError + * cdef int ret = msgpack_pack_array(&self.pk, size) + */ + __pyx_t_1 = ((__pyx_v_size > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":302 + * def pack_array_header(self, long long size): + * if size > ITEM_LIMIT: + * raise ValueError # <<<<<<<<<<<<<< + * cdef int ret = msgpack_pack_array(&self.pk, size) + * if ret == -1: + */ + __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); + __PYX_ERR(0, 302, __pyx_L1_error) + + /* "msgpack/_packer.pyx":301 + * + * def pack_array_header(self, long long size): + * if size > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError + * cdef int ret = msgpack_pack_array(&self.pk, size) + */ + } + + /* "msgpack/_packer.pyx":303 + * if size > ITEM_LIMIT: + * raise ValueError + * cdef int ret = msgpack_pack_array(&self.pk, size) # <<<<<<<<<<<<<< + * if ret == -1: + * raise MemoryError + */ + __pyx_v_ret = msgpack_pack_array((&__pyx_v_self->pk), __pyx_v_size); + + /* "msgpack/_packer.pyx":304 + * raise ValueError + * cdef int ret = msgpack_pack_array(&self.pk, size) + * if ret == -1: # <<<<<<<<<<<<<< + * raise MemoryError + * elif ret: # should not happen + */ + __pyx_t_1 = ((__pyx_v_ret == -1L) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":305 + * cdef int ret = msgpack_pack_array(&self.pk, size) + * if ret == -1: + * raise MemoryError # <<<<<<<<<<<<<< + * elif ret: # should not happen + * raise TypeError + */ + PyErr_NoMemory(); __PYX_ERR(0, 305, __pyx_L1_error) + + /* "msgpack/_packer.pyx":304 + * raise ValueError + * cdef int ret = msgpack_pack_array(&self.pk, size) + * if ret == -1: # <<<<<<<<<<<<<< + * raise MemoryError + * elif ret: # should not happen + */ + } + + /* "msgpack/_packer.pyx":306 + * if ret == -1: + * raise MemoryError + * elif ret: # should not happen # <<<<<<<<<<<<<< + * raise TypeError + * if self.autoreset: + */ + __pyx_t_1 = (__pyx_v_ret != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":307 + * raise MemoryError + * elif ret: # should not happen + * raise TypeError # <<<<<<<<<<<<<< + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + */ + __Pyx_Raise(__pyx_builtin_TypeError, 0, 0, 0); + __PYX_ERR(0, 307, __pyx_L1_error) + + /* "msgpack/_packer.pyx":306 + * if ret == -1: + * raise MemoryError + * elif ret: # should not happen # <<<<<<<<<<<<<< + * raise TypeError + * if self.autoreset: + */ + } + + /* "msgpack/_packer.pyx":308 + * elif ret: # should not happen + * raise TypeError + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + __pyx_t_1 = (__pyx_v_self->autoreset != 0); + if (__pyx_t_1) { + + /* "msgpack/_packer.pyx":309 + * raise TypeError + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) # <<<<<<<<<<<<<< + * self.pk.length = 0 + * return buf + */ + __pyx_t_2 = PyBytes_FromStringAndSize(__pyx_v_self->pk.buf, __pyx_v_self->pk.length); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 309, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_buf = ((PyObject*)__pyx_t_2); + __pyx_t_2 = 0; + + /* "msgpack/_packer.pyx":310 + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 # <<<<<<<<<<<<<< + * return buf + * + */ + __pyx_v_self->pk.length = 0; + + /* "msgpack/_packer.pyx":311 + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + * return buf # <<<<<<<<<<<<<< + * + * def pack_map_header(self, long long size): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_buf); + __pyx_r = __pyx_v_buf; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":308 + * elif ret: # should not happen + * raise TypeError + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + } + + /* "msgpack/_packer.pyx":300 + * msgpack_pack_raw_body(&self.pk, data, len(data)) + * + * def pack_array_header(self, long long size): # <<<<<<<<<<<<<< + * if size > ITEM_LIMIT: + * raise ValueError + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack_array_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_buf); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":313 + * return buf + * + * def pack_map_header(self, long long size): # <<<<<<<<<<<<<< + * if size > ITEM_LIMIT: + * raise ValueError + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_13pack_map_header(PyObject *__pyx_v_self, PyObject *__pyx_arg_size); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_12pack_map_header[] = "Packer.pack_map_header(self, long long size)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_13pack_map_header(PyObject *__pyx_v_self, PyObject *__pyx_arg_size) { + PY_LONG_LONG __pyx_v_size; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("pack_map_header (wrapper)", 0); + assert(__pyx_arg_size); { + __pyx_v_size = __Pyx_PyInt_As_PY_LONG_LONG(__pyx_arg_size); if (unlikely((__pyx_v_size == (PY_LONG_LONG)-1) && PyErr_Occurred())) __PYX_ERR(0, 313, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack_map_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_12pack_map_header(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self), ((PY_LONG_LONG)__pyx_v_size)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_12pack_map_header(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PY_LONG_LONG __pyx_v_size) { + int __pyx_v_ret; + PyObject *__pyx_v_buf = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("pack_map_header", 0); + + /* "msgpack/_packer.pyx":314 + * + * def pack_map_header(self, long long size): + * if size > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError + * cdef int ret = msgpack_pack_map(&self.pk, size) + */ + __pyx_t_1 = ((__pyx_v_size > __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":315 + * def pack_map_header(self, long long size): + * if size > ITEM_LIMIT: + * raise ValueError # <<<<<<<<<<<<<< + * cdef int ret = msgpack_pack_map(&self.pk, size) + * if ret == -1: + */ + __Pyx_Raise(__pyx_builtin_ValueError, 0, 0, 0); + __PYX_ERR(0, 315, __pyx_L1_error) + + /* "msgpack/_packer.pyx":314 + * + * def pack_map_header(self, long long size): + * if size > ITEM_LIMIT: # <<<<<<<<<<<<<< + * raise ValueError + * cdef int ret = msgpack_pack_map(&self.pk, size) + */ + } + + /* "msgpack/_packer.pyx":316 + * if size > ITEM_LIMIT: + * raise ValueError + * cdef int ret = msgpack_pack_map(&self.pk, size) # <<<<<<<<<<<<<< + * if ret == -1: + * raise MemoryError + */ + __pyx_v_ret = msgpack_pack_map((&__pyx_v_self->pk), __pyx_v_size); + + /* "msgpack/_packer.pyx":317 + * raise ValueError + * cdef int ret = msgpack_pack_map(&self.pk, size) + * if ret == -1: # <<<<<<<<<<<<<< + * raise MemoryError + * elif ret: # should not happen + */ + __pyx_t_1 = ((__pyx_v_ret == -1L) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":318 + * cdef int ret = msgpack_pack_map(&self.pk, size) + * if ret == -1: + * raise MemoryError # <<<<<<<<<<<<<< + * elif ret: # should not happen + * raise TypeError + */ + PyErr_NoMemory(); __PYX_ERR(0, 318, __pyx_L1_error) + + /* "msgpack/_packer.pyx":317 + * raise ValueError + * cdef int ret = msgpack_pack_map(&self.pk, size) + * if ret == -1: # <<<<<<<<<<<<<< + * raise MemoryError + * elif ret: # should not happen + */ + } + + /* "msgpack/_packer.pyx":319 + * if ret == -1: + * raise MemoryError + * elif ret: # should not happen # <<<<<<<<<<<<<< + * raise TypeError + * if self.autoreset: + */ + __pyx_t_1 = (__pyx_v_ret != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_packer.pyx":320 + * raise MemoryError + * elif ret: # should not happen + * raise TypeError # <<<<<<<<<<<<<< + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + */ + __Pyx_Raise(__pyx_builtin_TypeError, 0, 0, 0); + __PYX_ERR(0, 320, __pyx_L1_error) + + /* "msgpack/_packer.pyx":319 + * if ret == -1: + * raise MemoryError + * elif ret: # should not happen # <<<<<<<<<<<<<< + * raise TypeError + * if self.autoreset: + */ + } + + /* "msgpack/_packer.pyx":321 + * elif ret: # should not happen + * raise TypeError + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + __pyx_t_1 = (__pyx_v_self->autoreset != 0); + if (__pyx_t_1) { + + /* "msgpack/_packer.pyx":322 + * raise TypeError + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) # <<<<<<<<<<<<<< + * self.pk.length = 0 + * return buf + */ + __pyx_t_2 = PyBytes_FromStringAndSize(__pyx_v_self->pk.buf, __pyx_v_self->pk.length); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 322, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_v_buf = ((PyObject*)__pyx_t_2); + __pyx_t_2 = 0; + + /* "msgpack/_packer.pyx":323 + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 # <<<<<<<<<<<<<< + * return buf + * + */ + __pyx_v_self->pk.length = 0; + + /* "msgpack/_packer.pyx":324 + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + * return buf # <<<<<<<<<<<<<< + * + * def pack_map_pairs(self, object pairs): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_buf); + __pyx_r = __pyx_v_buf; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":321 + * elif ret: # should not happen + * raise TypeError + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + } + + /* "msgpack/_packer.pyx":313 + * return buf + * + * def pack_map_header(self, long long size): # <<<<<<<<<<<<<< + * if size > ITEM_LIMIT: + * raise ValueError + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack_map_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_buf); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":326 + * return buf + * + * def pack_map_pairs(self, object pairs): # <<<<<<<<<<<<<< + * """ + * Pack *pairs* as msgpack map type. + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_15pack_map_pairs(PyObject *__pyx_v_self, PyObject *__pyx_v_pairs); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_14pack_map_pairs[] = "Packer.pack_map_pairs(self, pairs)\n\n Pack *pairs* as msgpack map type.\n\n *pairs* should be a sequence of pairs.\n (`len(pairs)` and `for k, v in pairs:` should be supported.)\n "; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_15pack_map_pairs(PyObject *__pyx_v_self, PyObject *__pyx_v_pairs) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("pack_map_pairs (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_14pack_map_pairs(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self), ((PyObject *)__pyx_v_pairs)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_14pack_map_pairs(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, PyObject *__pyx_v_pairs) { + int __pyx_v_ret; + PyObject *__pyx_v_k = NULL; + PyObject *__pyx_v_v = NULL; + PyObject *__pyx_v_buf = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *(*__pyx_t_9)(PyObject *); + int __pyx_t_10; + __Pyx_RefNannySetupContext("pack_map_pairs", 0); + + /* "msgpack/_packer.pyx":333 + * (`len(pairs)` and `for k, v in pairs:` should be supported.) + * """ + * cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) # <<<<<<<<<<<<<< + * if ret == 0: + * for k, v in pairs: + */ + __pyx_t_1 = PyObject_Length(__pyx_v_pairs); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(0, 333, __pyx_L1_error) + __pyx_v_ret = msgpack_pack_map((&__pyx_v_self->pk), __pyx_t_1); + + /* "msgpack/_packer.pyx":334 + * """ + * cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) + * if ret == 0: # <<<<<<<<<<<<<< + * for k, v in pairs: + * ret = self._pack(k) + */ + __pyx_t_2 = ((__pyx_v_ret == 0) != 0); + if (__pyx_t_2) { + + /* "msgpack/_packer.pyx":335 + * cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) + * if ret == 0: + * for k, v in pairs: # <<<<<<<<<<<<<< + * ret = self._pack(k) + * if ret != 0: break + */ + if (likely(PyList_CheckExact(__pyx_v_pairs)) || PyTuple_CheckExact(__pyx_v_pairs)) { + __pyx_t_3 = __pyx_v_pairs; __Pyx_INCREF(__pyx_t_3); __pyx_t_1 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_1 = -1; __pyx_t_3 = PyObject_GetIter(__pyx_v_pairs); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 335, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = Py_TYPE(__pyx_t_3)->tp_iternext; if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 335, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_3))) { + if (__pyx_t_1 >= PyList_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyList_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 335, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 335, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } else { + if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_3)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_3, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely(0 < 0)) __PYX_ERR(0, 335, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_3, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 335, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_3); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(0, 335, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_5); + } + if ((likely(PyTuple_CheckExact(__pyx_t_5))) || (PyList_CheckExact(__pyx_t_5))) { + PyObject* sequence = __pyx_t_5; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(0, 335, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + if (likely(PyTuple_CheckExact(sequence))) { + __pyx_t_6 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_7 = PyTuple_GET_ITEM(sequence, 1); + } else { + __pyx_t_6 = PyList_GET_ITEM(sequence, 0); + __pyx_t_7 = PyList_GET_ITEM(sequence, 1); + } + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + #else + __pyx_t_6 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 335, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 335, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + #endif + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } else { + Py_ssize_t index = -1; + __pyx_t_8 = PyObject_GetIter(__pyx_t_5); if (unlikely(!__pyx_t_8)) __PYX_ERR(0, 335, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_9 = Py_TYPE(__pyx_t_8)->tp_iternext; + index = 0; __pyx_t_6 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_6)) goto __pyx_L6_unpacking_failed; + __Pyx_GOTREF(__pyx_t_6); + index = 1; __pyx_t_7 = __pyx_t_9(__pyx_t_8); if (unlikely(!__pyx_t_7)) goto __pyx_L6_unpacking_failed; + __Pyx_GOTREF(__pyx_t_7); + if (__Pyx_IternextUnpackEndCheck(__pyx_t_9(__pyx_t_8), 2) < 0) __PYX_ERR(0, 335, __pyx_L1_error) + __pyx_t_9 = NULL; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L7_unpacking_done; + __pyx_L6_unpacking_failed:; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_9 = NULL; + if (__Pyx_IterFinish() == 0) __Pyx_RaiseNeedMoreValuesError(index); + __PYX_ERR(0, 335, __pyx_L1_error) + __pyx_L7_unpacking_done:; + } + __Pyx_XDECREF_SET(__pyx_v_k, __pyx_t_6); + __pyx_t_6 = 0; + __Pyx_XDECREF_SET(__pyx_v_v, __pyx_t_7); + __pyx_t_7 = 0; + + /* "msgpack/_packer.pyx":336 + * if ret == 0: + * for k, v in pairs: + * ret = self._pack(k) # <<<<<<<<<<<<<< + * if ret != 0: break + * ret = self._pack(v) + */ + __pyx_t_10 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_k, NULL); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 336, __pyx_L1_error) + __pyx_v_ret = __pyx_t_10; + + /* "msgpack/_packer.pyx":337 + * for k, v in pairs: + * ret = self._pack(k) + * if ret != 0: break # <<<<<<<<<<<<<< + * ret = self._pack(v) + * if ret != 0: break + */ + __pyx_t_2 = ((__pyx_v_ret != 0) != 0); + if (__pyx_t_2) { + goto __pyx_L5_break; + } + + /* "msgpack/_packer.pyx":338 + * ret = self._pack(k) + * if ret != 0: break + * ret = self._pack(v) # <<<<<<<<<<<<<< + * if ret != 0: break + * if ret == -1: + */ + __pyx_t_10 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer *)__pyx_v_self->__pyx_vtab)->_pack(__pyx_v_self, __pyx_v_v, NULL); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(0, 338, __pyx_L1_error) + __pyx_v_ret = __pyx_t_10; + + /* "msgpack/_packer.pyx":339 + * if ret != 0: break + * ret = self._pack(v) + * if ret != 0: break # <<<<<<<<<<<<<< + * if ret == -1: + * raise MemoryError + */ + __pyx_t_2 = ((__pyx_v_ret != 0) != 0); + if (__pyx_t_2) { + goto __pyx_L5_break; + } + + /* "msgpack/_packer.pyx":335 + * cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) + * if ret == 0: + * for k, v in pairs: # <<<<<<<<<<<<<< + * ret = self._pack(k) + * if ret != 0: break + */ + } + __pyx_L5_break:; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "msgpack/_packer.pyx":334 + * """ + * cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) + * if ret == 0: # <<<<<<<<<<<<<< + * for k, v in pairs: + * ret = self._pack(k) + */ + } + + /* "msgpack/_packer.pyx":340 + * ret = self._pack(v) + * if ret != 0: break + * if ret == -1: # <<<<<<<<<<<<<< + * raise MemoryError + * elif ret: # should not happen + */ + __pyx_t_2 = ((__pyx_v_ret == -1L) != 0); + if (unlikely(__pyx_t_2)) { + + /* "msgpack/_packer.pyx":341 + * if ret != 0: break + * if ret == -1: + * raise MemoryError # <<<<<<<<<<<<<< + * elif ret: # should not happen + * raise TypeError + */ + PyErr_NoMemory(); __PYX_ERR(0, 341, __pyx_L1_error) + + /* "msgpack/_packer.pyx":340 + * ret = self._pack(v) + * if ret != 0: break + * if ret == -1: # <<<<<<<<<<<<<< + * raise MemoryError + * elif ret: # should not happen + */ + } + + /* "msgpack/_packer.pyx":342 + * if ret == -1: + * raise MemoryError + * elif ret: # should not happen # <<<<<<<<<<<<<< + * raise TypeError + * if self.autoreset: + */ + __pyx_t_2 = (__pyx_v_ret != 0); + if (unlikely(__pyx_t_2)) { + + /* "msgpack/_packer.pyx":343 + * raise MemoryError + * elif ret: # should not happen + * raise TypeError # <<<<<<<<<<<<<< + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + */ + __Pyx_Raise(__pyx_builtin_TypeError, 0, 0, 0); + __PYX_ERR(0, 343, __pyx_L1_error) + + /* "msgpack/_packer.pyx":342 + * if ret == -1: + * raise MemoryError + * elif ret: # should not happen # <<<<<<<<<<<<<< + * raise TypeError + * if self.autoreset: + */ + } + + /* "msgpack/_packer.pyx":344 + * elif ret: # should not happen + * raise TypeError + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + __pyx_t_2 = (__pyx_v_self->autoreset != 0); + if (__pyx_t_2) { + + /* "msgpack/_packer.pyx":345 + * raise TypeError + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) # <<<<<<<<<<<<<< + * self.pk.length = 0 + * return buf + */ + __pyx_t_3 = PyBytes_FromStringAndSize(__pyx_v_self->pk.buf, __pyx_v_self->pk.length); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 345, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_v_buf = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "msgpack/_packer.pyx":346 + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 # <<<<<<<<<<<<<< + * return buf + * + */ + __pyx_v_self->pk.length = 0; + + /* "msgpack/_packer.pyx":347 + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + * return buf # <<<<<<<<<<<<<< + * + * def reset(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_buf); + __pyx_r = __pyx_v_buf; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":344 + * elif ret: # should not happen + * raise TypeError + * if self.autoreset: # <<<<<<<<<<<<<< + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * self.pk.length = 0 + */ + } + + /* "msgpack/_packer.pyx":326 + * return buf + * + * def pack_map_pairs(self, object pairs): # <<<<<<<<<<<<<< + * """ + * Pack *pairs* as msgpack map type. + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.pack_map_pairs", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_k); + __Pyx_XDECREF(__pyx_v_v); + __Pyx_XDECREF(__pyx_v_buf); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":349 + * return buf + * + * def reset(self): # <<<<<<<<<<<<<< + * """Reset internal buffer. + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_17reset(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_16reset[] = "Packer.reset(self)\nReset internal buffer.\n\n This method is usaful only when autoreset=False.\n "; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_17reset(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("reset (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_16reset(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_16reset(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("reset", 0); + + /* "msgpack/_packer.pyx":354 + * This method is usaful only when autoreset=False. + * """ + * self.pk.length = 0 # <<<<<<<<<<<<<< + * + * def bytes(self): + */ + __pyx_v_self->pk.length = 0; + + /* "msgpack/_packer.pyx":349 + * return buf + * + * def reset(self): # <<<<<<<<<<<<<< + * """Reset internal buffer. + * + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":356 + * self.pk.length = 0 + * + * def bytes(self): # <<<<<<<<<<<<<< + * """Return internal buffer contents as bytes object""" + * return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_19bytes(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_18bytes[] = "Packer.bytes(self)\nReturn internal buffer contents as bytes object"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_19bytes(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("bytes (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_18bytes(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_18bytes(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("bytes", 0); + + /* "msgpack/_packer.pyx":358 + * def bytes(self): + * """Return internal buffer contents as bytes object""" + * return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) # <<<<<<<<<<<<<< + * + * def getbuffer(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyBytes_FromStringAndSize(__pyx_v_self->pk.buf, __pyx_v_self->pk.length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 358, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":356 + * self.pk.length = 0 + * + * def bytes(self): # <<<<<<<<<<<<<< + * """Return internal buffer contents as bytes object""" + * return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.bytes", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_packer.pyx":360 + * return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * + * def getbuffer(self): # <<<<<<<<<<<<<< + * """Return view of internal buffer.""" + * return buff_to_buff(self.pk.buf, self.pk.length) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_21getbuffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_20getbuffer[] = "Packer.getbuffer(self)\nReturn view of internal buffer."; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_21getbuffer(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("getbuffer (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_20getbuffer(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_20getbuffer(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("getbuffer", 0); + + /* "msgpack/_packer.pyx":362 + * def getbuffer(self): + * """Return view of internal buffer.""" + * return buff_to_buff(self.pk.buf, self.pk.length) # <<<<<<<<<<<<<< + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = buff_to_buff(__pyx_v_self->pk.buf, __pyx_v_self->pk.length); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 362, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_packer.pyx":360 + * return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + * + * def getbuffer(self): # <<<<<<<<<<<<<< + * """Return view of internal buffer.""" + * return buff_to_buff(self.pk.buf, self.pk.length) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.getbuffer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_23__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_22__reduce_cython__[] = "Packer.__reduce_cython__(self)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_23__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_22__reduce_cython__(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_22__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(2, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_25__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_6Packer_24__setstate_cython__[] = "Packer.__setstate_cython__(self, __pyx_state)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_6Packer_25__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_6Packer_24__setstate_cython__(((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_6Packer_24__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Packer *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(2, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Packer.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":57 + * void unpack_clear(unpack_context* ctx) + * + * cdef inline init_ctx(unpack_context *ctx, # <<<<<<<<<<<<<< + * object object_hook, object object_pairs_hook, + * object list_hook, object ext_hook, + */ + +static CYTHON_INLINE PyObject *__pyx_f_7msgpack_9_cmsgpack_init_ctx(unpack_context *__pyx_v_ctx, PyObject *__pyx_v_object_hook, PyObject *__pyx_v_object_pairs_hook, PyObject *__pyx_v_list_hook, PyObject *__pyx_v_ext_hook, int __pyx_v_use_list, int __pyx_v_raw, int __pyx_v_strict_map_key, char const *__pyx_v_encoding, char const *__pyx_v_unicode_errors, Py_ssize_t __pyx_v_max_str_len, Py_ssize_t __pyx_v_max_bin_len, Py_ssize_t __pyx_v_max_array_len, Py_ssize_t __pyx_v_max_map_len, Py_ssize_t __pyx_v_max_ext_len) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("init_ctx", 0); + + /* "msgpack/_unpacker.pyx":65 + * Py_ssize_t max_array_len, Py_ssize_t max_map_len, + * Py_ssize_t max_ext_len): + * unpack_init(ctx) # <<<<<<<<<<<<<< + * ctx.user.use_list = use_list + * ctx.user.raw = raw + */ + unpack_init(__pyx_v_ctx); + + /* "msgpack/_unpacker.pyx":66 + * Py_ssize_t max_ext_len): + * unpack_init(ctx) + * ctx.user.use_list = use_list # <<<<<<<<<<<<<< + * ctx.user.raw = raw + * ctx.user.strict_map_key = strict_map_key + */ + __pyx_v_ctx->user.use_list = __pyx_v_use_list; + + /* "msgpack/_unpacker.pyx":67 + * unpack_init(ctx) + * ctx.user.use_list = use_list + * ctx.user.raw = raw # <<<<<<<<<<<<<< + * ctx.user.strict_map_key = strict_map_key + * ctx.user.object_hook = ctx.user.list_hook = NULL + */ + __pyx_v_ctx->user.raw = __pyx_v_raw; + + /* "msgpack/_unpacker.pyx":68 + * ctx.user.use_list = use_list + * ctx.user.raw = raw + * ctx.user.strict_map_key = strict_map_key # <<<<<<<<<<<<<< + * ctx.user.object_hook = ctx.user.list_hook = NULL + * ctx.user.max_str_len = max_str_len + */ + __pyx_v_ctx->user.strict_map_key = __pyx_v_strict_map_key; + + /* "msgpack/_unpacker.pyx":69 + * ctx.user.raw = raw + * ctx.user.strict_map_key = strict_map_key + * ctx.user.object_hook = ctx.user.list_hook = NULL # <<<<<<<<<<<<<< + * ctx.user.max_str_len = max_str_len + * ctx.user.max_bin_len = max_bin_len + */ + __pyx_v_ctx->user.object_hook = ((PyObject *)NULL); + __pyx_v_ctx->user.list_hook = ((PyObject *)NULL); + + /* "msgpack/_unpacker.pyx":70 + * ctx.user.strict_map_key = strict_map_key + * ctx.user.object_hook = ctx.user.list_hook = NULL + * ctx.user.max_str_len = max_str_len # <<<<<<<<<<<<<< + * ctx.user.max_bin_len = max_bin_len + * ctx.user.max_array_len = max_array_len + */ + __pyx_v_ctx->user.max_str_len = __pyx_v_max_str_len; + + /* "msgpack/_unpacker.pyx":71 + * ctx.user.object_hook = ctx.user.list_hook = NULL + * ctx.user.max_str_len = max_str_len + * ctx.user.max_bin_len = max_bin_len # <<<<<<<<<<<<<< + * ctx.user.max_array_len = max_array_len + * ctx.user.max_map_len = max_map_len + */ + __pyx_v_ctx->user.max_bin_len = __pyx_v_max_bin_len; + + /* "msgpack/_unpacker.pyx":72 + * ctx.user.max_str_len = max_str_len + * ctx.user.max_bin_len = max_bin_len + * ctx.user.max_array_len = max_array_len # <<<<<<<<<<<<<< + * ctx.user.max_map_len = max_map_len + * ctx.user.max_ext_len = max_ext_len + */ + __pyx_v_ctx->user.max_array_len = __pyx_v_max_array_len; + + /* "msgpack/_unpacker.pyx":73 + * ctx.user.max_bin_len = max_bin_len + * ctx.user.max_array_len = max_array_len + * ctx.user.max_map_len = max_map_len # <<<<<<<<<<<<<< + * ctx.user.max_ext_len = max_ext_len + * + */ + __pyx_v_ctx->user.max_map_len = __pyx_v_max_map_len; + + /* "msgpack/_unpacker.pyx":74 + * ctx.user.max_array_len = max_array_len + * ctx.user.max_map_len = max_map_len + * ctx.user.max_ext_len = max_ext_len # <<<<<<<<<<<<<< + * + * if object_hook is not None and object_pairs_hook is not None: + */ + __pyx_v_ctx->user.max_ext_len = __pyx_v_max_ext_len; + + /* "msgpack/_unpacker.pyx":76 + * ctx.user.max_ext_len = max_ext_len + * + * if object_hook is not None and object_pairs_hook is not None: # <<<<<<<<<<<<<< + * raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") + * + */ + __pyx_t_2 = (__pyx_v_object_hook != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + if (__pyx_t_3) { + } else { + __pyx_t_1 = __pyx_t_3; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_3 = (__pyx_v_object_pairs_hook != Py_None); + __pyx_t_2 = (__pyx_t_3 != 0); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":77 + * + * if object_hook is not None and object_pairs_hook is not None: + * raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") # <<<<<<<<<<<<<< + * + * if object_hook is not None: + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 77, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":76 + * ctx.user.max_ext_len = max_ext_len + * + * if object_hook is not None and object_pairs_hook is not None: # <<<<<<<<<<<<<< + * raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") + * + */ + } + + /* "msgpack/_unpacker.pyx":79 + * raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") + * + * if object_hook is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(object_hook): + * raise TypeError("object_hook must be a callable.") + */ + __pyx_t_1 = (__pyx_v_object_hook != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "msgpack/_unpacker.pyx":80 + * + * if object_hook is not None: + * if not PyCallable_Check(object_hook): # <<<<<<<<<<<<<< + * raise TypeError("object_hook must be a callable.") + * ctx.user.object_hook = object_hook + */ + __pyx_t_2 = ((!(PyCallable_Check(__pyx_v_object_hook) != 0)) != 0); + if (unlikely(__pyx_t_2)) { + + /* "msgpack/_unpacker.pyx":81 + * if object_hook is not None: + * if not PyCallable_Check(object_hook): + * raise TypeError("object_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.object_hook = object_hook + * + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 81, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 81, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":80 + * + * if object_hook is not None: + * if not PyCallable_Check(object_hook): # <<<<<<<<<<<<<< + * raise TypeError("object_hook must be a callable.") + * ctx.user.object_hook = object_hook + */ + } + + /* "msgpack/_unpacker.pyx":82 + * if not PyCallable_Check(object_hook): + * raise TypeError("object_hook must be a callable.") + * ctx.user.object_hook = object_hook # <<<<<<<<<<<<<< + * + * if object_pairs_hook is None: + */ + __pyx_v_ctx->user.object_hook = ((PyObject *)__pyx_v_object_hook); + + /* "msgpack/_unpacker.pyx":79 + * raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") + * + * if object_hook is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(object_hook): + * raise TypeError("object_hook must be a callable.") + */ + } + + /* "msgpack/_unpacker.pyx":84 + * ctx.user.object_hook = object_hook + * + * if object_pairs_hook is None: # <<<<<<<<<<<<<< + * ctx.user.has_pairs_hook = False + * else: + */ + __pyx_t_2 = (__pyx_v_object_pairs_hook == Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":85 + * + * if object_pairs_hook is None: + * ctx.user.has_pairs_hook = False # <<<<<<<<<<<<<< + * else: + * if not PyCallable_Check(object_pairs_hook): + */ + __pyx_v_ctx->user.has_pairs_hook = 0; + + /* "msgpack/_unpacker.pyx":84 + * ctx.user.object_hook = object_hook + * + * if object_pairs_hook is None: # <<<<<<<<<<<<<< + * ctx.user.has_pairs_hook = False + * else: + */ + goto __pyx_L8; + } + + /* "msgpack/_unpacker.pyx":87 + * ctx.user.has_pairs_hook = False + * else: + * if not PyCallable_Check(object_pairs_hook): # <<<<<<<<<<<<<< + * raise TypeError("object_pairs_hook must be a callable.") + * ctx.user.object_hook = object_pairs_hook + */ + /*else*/ { + __pyx_t_1 = ((!(PyCallable_Check(__pyx_v_object_pairs_hook) != 0)) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":88 + * else: + * if not PyCallable_Check(object_pairs_hook): + * raise TypeError("object_pairs_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.object_hook = object_pairs_hook + * ctx.user.has_pairs_hook = True + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 88, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":87 + * ctx.user.has_pairs_hook = False + * else: + * if not PyCallable_Check(object_pairs_hook): # <<<<<<<<<<<<<< + * raise TypeError("object_pairs_hook must be a callable.") + * ctx.user.object_hook = object_pairs_hook + */ + } + + /* "msgpack/_unpacker.pyx":89 + * if not PyCallable_Check(object_pairs_hook): + * raise TypeError("object_pairs_hook must be a callable.") + * ctx.user.object_hook = object_pairs_hook # <<<<<<<<<<<<<< + * ctx.user.has_pairs_hook = True + * + */ + __pyx_v_ctx->user.object_hook = ((PyObject *)__pyx_v_object_pairs_hook); + + /* "msgpack/_unpacker.pyx":90 + * raise TypeError("object_pairs_hook must be a callable.") + * ctx.user.object_hook = object_pairs_hook + * ctx.user.has_pairs_hook = True # <<<<<<<<<<<<<< + * + * if list_hook is not None: + */ + __pyx_v_ctx->user.has_pairs_hook = 1; + } + __pyx_L8:; + + /* "msgpack/_unpacker.pyx":92 + * ctx.user.has_pairs_hook = True + * + * if list_hook is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(list_hook): + * raise TypeError("list_hook must be a callable.") + */ + __pyx_t_1 = (__pyx_v_list_hook != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "msgpack/_unpacker.pyx":93 + * + * if list_hook is not None: + * if not PyCallable_Check(list_hook): # <<<<<<<<<<<<<< + * raise TypeError("list_hook must be a callable.") + * ctx.user.list_hook = list_hook + */ + __pyx_t_2 = ((!(PyCallable_Check(__pyx_v_list_hook) != 0)) != 0); + if (unlikely(__pyx_t_2)) { + + /* "msgpack/_unpacker.pyx":94 + * if list_hook is not None: + * if not PyCallable_Check(list_hook): + * raise TypeError("list_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.list_hook = list_hook + * + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 94, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 94, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":93 + * + * if list_hook is not None: + * if not PyCallable_Check(list_hook): # <<<<<<<<<<<<<< + * raise TypeError("list_hook must be a callable.") + * ctx.user.list_hook = list_hook + */ + } + + /* "msgpack/_unpacker.pyx":95 + * if not PyCallable_Check(list_hook): + * raise TypeError("list_hook must be a callable.") + * ctx.user.list_hook = list_hook # <<<<<<<<<<<<<< + * + * if ext_hook is not None: + */ + __pyx_v_ctx->user.list_hook = ((PyObject *)__pyx_v_list_hook); + + /* "msgpack/_unpacker.pyx":92 + * ctx.user.has_pairs_hook = True + * + * if list_hook is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(list_hook): + * raise TypeError("list_hook must be a callable.") + */ + } + + /* "msgpack/_unpacker.pyx":97 + * ctx.user.list_hook = list_hook + * + * if ext_hook is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(ext_hook): + * raise TypeError("ext_hook must be a callable.") + */ + __pyx_t_2 = (__pyx_v_ext_hook != Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":98 + * + * if ext_hook is not None: + * if not PyCallable_Check(ext_hook): # <<<<<<<<<<<<<< + * raise TypeError("ext_hook must be a callable.") + * ctx.user.ext_hook = ext_hook + */ + __pyx_t_1 = ((!(PyCallable_Check(__pyx_v_ext_hook) != 0)) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":99 + * if ext_hook is not None: + * if not PyCallable_Check(ext_hook): + * raise TypeError("ext_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.ext_hook = ext_hook + * + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 99, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 99, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":98 + * + * if ext_hook is not None: + * if not PyCallable_Check(ext_hook): # <<<<<<<<<<<<<< + * raise TypeError("ext_hook must be a callable.") + * ctx.user.ext_hook = ext_hook + */ + } + + /* "msgpack/_unpacker.pyx":100 + * if not PyCallable_Check(ext_hook): + * raise TypeError("ext_hook must be a callable.") + * ctx.user.ext_hook = ext_hook # <<<<<<<<<<<<<< + * + * ctx.user.encoding = encoding + */ + __pyx_v_ctx->user.ext_hook = ((PyObject *)__pyx_v_ext_hook); + + /* "msgpack/_unpacker.pyx":97 + * ctx.user.list_hook = list_hook + * + * if ext_hook is not None: # <<<<<<<<<<<<<< + * if not PyCallable_Check(ext_hook): + * raise TypeError("ext_hook must be a callable.") + */ + } + + /* "msgpack/_unpacker.pyx":102 + * ctx.user.ext_hook = ext_hook + * + * ctx.user.encoding = encoding # <<<<<<<<<<<<<< + * ctx.user.unicode_errors = unicode_errors + * + */ + __pyx_v_ctx->user.encoding = __pyx_v_encoding; + + /* "msgpack/_unpacker.pyx":103 + * + * ctx.user.encoding = encoding + * ctx.user.unicode_errors = unicode_errors # <<<<<<<<<<<<<< + * + * def default_read_extended_type(typecode, data): + */ + __pyx_v_ctx->user.unicode_errors = __pyx_v_unicode_errors; + + /* "msgpack/_unpacker.pyx":57 + * void unpack_clear(unpack_context* ctx) + * + * cdef inline init_ctx(unpack_context *ctx, # <<<<<<<<<<<<<< + * object object_hook, object object_pairs_hook, + * object list_hook, object ext_hook, + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("msgpack._cmsgpack.init_ctx", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":105 + * ctx.user.unicode_errors = unicode_errors + * + * def default_read_extended_type(typecode, data): # <<<<<<<<<<<<<< + * raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_1default_read_extended_type(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_default_read_extended_type[] = "default_read_extended_type(typecode, data)"; +static PyMethodDef __pyx_mdef_7msgpack_9_cmsgpack_1default_read_extended_type = {"default_read_extended_type", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7msgpack_9_cmsgpack_1default_read_extended_type, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7msgpack_9_cmsgpack_default_read_extended_type}; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_1default_read_extended_type(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_typecode = 0; + CYTHON_UNUSED PyObject *__pyx_v_data = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("default_read_extended_type (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_typecode,&__pyx_n_s_data,0}; + PyObject* values[2] = {0,0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_typecode)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_data)) != 0)) kw_args--; + else { + __Pyx_RaiseArgtupleInvalid("default_read_extended_type", 1, 2, 2, 1); __PYX_ERR(1, 105, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "default_read_extended_type") < 0)) __PYX_ERR(1, 105, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 2) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + } + __pyx_v_typecode = values[0]; + __pyx_v_data = values[1]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("default_read_extended_type", 1, 2, 2, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 105, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.default_read_extended_type", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_default_read_extended_type(__pyx_self, __pyx_v_typecode, __pyx_v_data); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_default_read_extended_type(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_typecode, CYTHON_UNUSED PyObject *__pyx_v_data) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannySetupContext("default_read_extended_type", 0); + + /* "msgpack/_unpacker.pyx":106 + * + * def default_read_extended_type(typecode, data): + * raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) # <<<<<<<<<<<<<< + * + * cdef inline int get_data_from_buffer(object obj, + */ + __pyx_t_1 = __Pyx_PyUnicode_FormatSafe(__pyx_kp_u_Cannot_decode_extended_type_with, __pyx_v_typecode); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 106, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_NotImplementedError, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 106, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 106, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":105 + * ctx.user.unicode_errors = unicode_errors + * + * def default_read_extended_type(typecode, data): # <<<<<<<<<<<<<< + * raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("msgpack._cmsgpack.default_read_extended_type", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":108 + * raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + * + * cdef inline int get_data_from_buffer(object obj, # <<<<<<<<<<<<<< + * Py_buffer *view, + * char **buf, + */ + +static CYTHON_INLINE int __pyx_f_7msgpack_9_cmsgpack_get_data_from_buffer(PyObject *__pyx_v_obj, Py_buffer *__pyx_v_view, char **__pyx_v_buf, Py_ssize_t *__pyx_v_buffer_len, int *__pyx_v_new_protocol) { + PyObject *__pyx_v_contiguous = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + char *__pyx_t_6; + __Pyx_RefNannySetupContext("get_data_from_buffer", 0); + + /* "msgpack/_unpacker.pyx":115 + * cdef object contiguous + * cdef Py_buffer tmp + * if PyObject_CheckBuffer(obj): # <<<<<<<<<<<<<< + * new_protocol[0] = 1 + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + */ + __pyx_t_1 = (PyObject_CheckBuffer(__pyx_v_obj) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":116 + * cdef Py_buffer tmp + * if PyObject_CheckBuffer(obj): + * new_protocol[0] = 1 # <<<<<<<<<<<<<< + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + * raise + */ + (__pyx_v_new_protocol[0]) = 1; + + /* "msgpack/_unpacker.pyx":117 + * if PyObject_CheckBuffer(obj): + * new_protocol[0] = 1 + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: # <<<<<<<<<<<<<< + * raise + * if view.itemsize != 1: + */ + __pyx_t_2 = PyObject_GetBuffer(__pyx_v_obj, __pyx_v_view, PyBUF_FULL_RO); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 117, __pyx_L1_error) + __pyx_t_1 = ((__pyx_t_2 == -1L) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":118 + * new_protocol[0] = 1 + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + * raise # <<<<<<<<<<<<<< + * if view.itemsize != 1: + * PyBuffer_Release(view) + */ + __Pyx_ReraiseException(); __PYX_ERR(1, 118, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":117 + * if PyObject_CheckBuffer(obj): + * new_protocol[0] = 1 + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: # <<<<<<<<<<<<<< + * raise + * if view.itemsize != 1: + */ + } + + /* "msgpack/_unpacker.pyx":119 + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + * raise + * if view.itemsize != 1: # <<<<<<<<<<<<<< + * PyBuffer_Release(view) + * raise BufferError("cannot unpack from multi-byte object") + */ + __pyx_t_1 = ((__pyx_v_view->itemsize != 1) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":120 + * raise + * if view.itemsize != 1: + * PyBuffer_Release(view) # <<<<<<<<<<<<<< + * raise BufferError("cannot unpack from multi-byte object") + * if PyBuffer_IsContiguous(view, b'A') == 0: + */ + PyBuffer_Release(__pyx_v_view); + + /* "msgpack/_unpacker.pyx":121 + * if view.itemsize != 1: + * PyBuffer_Release(view) + * raise BufferError("cannot unpack from multi-byte object") # <<<<<<<<<<<<<< + * if PyBuffer_IsContiguous(view, b'A') == 0: + * PyBuffer_Release(view) + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_BufferError, __pyx_tuple__20, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 121, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 121, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":119 + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + * raise + * if view.itemsize != 1: # <<<<<<<<<<<<<< + * PyBuffer_Release(view) + * raise BufferError("cannot unpack from multi-byte object") + */ + } + + /* "msgpack/_unpacker.pyx":122 + * PyBuffer_Release(view) + * raise BufferError("cannot unpack from multi-byte object") + * if PyBuffer_IsContiguous(view, b'A') == 0: # <<<<<<<<<<<<<< + * PyBuffer_Release(view) + * # create a contiguous copy and get buffer + */ + __pyx_t_1 = ((PyBuffer_IsContiguous(__pyx_v_view, 'A') == 0) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":123 + * raise BufferError("cannot unpack from multi-byte object") + * if PyBuffer_IsContiguous(view, b'A') == 0: + * PyBuffer_Release(view) # <<<<<<<<<<<<<< + * # create a contiguous copy and get buffer + * contiguous = PyMemoryView_GetContiguous(obj, PyBUF_READ, b'C') + */ + PyBuffer_Release(__pyx_v_view); + + /* "msgpack/_unpacker.pyx":125 + * PyBuffer_Release(view) + * # create a contiguous copy and get buffer + * contiguous = PyMemoryView_GetContiguous(obj, PyBUF_READ, b'C') # <<<<<<<<<<<<<< + * PyObject_GetBuffer(contiguous, view, PyBUF_SIMPLE) + * # view must hold the only reference to contiguous, + */ + __pyx_t_3 = PyMemoryView_GetContiguous(__pyx_v_obj, PyBUF_READ, 'C'); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 125, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_v_contiguous = __pyx_t_3; + __pyx_t_3 = 0; + + /* "msgpack/_unpacker.pyx":126 + * # create a contiguous copy and get buffer + * contiguous = PyMemoryView_GetContiguous(obj, PyBUF_READ, b'C') + * PyObject_GetBuffer(contiguous, view, PyBUF_SIMPLE) # <<<<<<<<<<<<<< + * # view must hold the only reference to contiguous, + * # so memory is freed when view is released + */ + __pyx_t_2 = PyObject_GetBuffer(__pyx_v_contiguous, __pyx_v_view, PyBUF_SIMPLE); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 126, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":129 + * # view must hold the only reference to contiguous, + * # so memory is freed when view is released + * Py_DECREF(contiguous) # <<<<<<<<<<<<<< + * buffer_len[0] = view.len + * buf[0] = view.buf + */ + Py_DECREF(__pyx_v_contiguous); + + /* "msgpack/_unpacker.pyx":122 + * PyBuffer_Release(view) + * raise BufferError("cannot unpack from multi-byte object") + * if PyBuffer_IsContiguous(view, b'A') == 0: # <<<<<<<<<<<<<< + * PyBuffer_Release(view) + * # create a contiguous copy and get buffer + */ + } + + /* "msgpack/_unpacker.pyx":130 + * # so memory is freed when view is released + * Py_DECREF(contiguous) + * buffer_len[0] = view.len # <<<<<<<<<<<<<< + * buf[0] = view.buf + * return 1 + */ + __pyx_t_4 = __pyx_v_view->len; + (__pyx_v_buffer_len[0]) = __pyx_t_4; + + /* "msgpack/_unpacker.pyx":131 + * Py_DECREF(contiguous) + * buffer_len[0] = view.len + * buf[0] = view.buf # <<<<<<<<<<<<<< + * return 1 + * else: + */ + (__pyx_v_buf[0]) = ((char *)__pyx_v_view->buf); + + /* "msgpack/_unpacker.pyx":132 + * buffer_len[0] = view.len + * buf[0] = view.buf + * return 1 # <<<<<<<<<<<<<< + * else: + * new_protocol[0] = 0 + */ + __pyx_r = 1; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":115 + * cdef object contiguous + * cdef Py_buffer tmp + * if PyObject_CheckBuffer(obj): # <<<<<<<<<<<<<< + * new_protocol[0] = 1 + * if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + */ + } + + /* "msgpack/_unpacker.pyx":134 + * return 1 + * else: + * new_protocol[0] = 0 # <<<<<<<<<<<<<< + * if PyObject_AsReadBuffer(obj, buf, buffer_len) == -1: + * raise BufferError("could not get memoryview") + */ + /*else*/ { + (__pyx_v_new_protocol[0]) = 0; + + /* "msgpack/_unpacker.pyx":135 + * else: + * new_protocol[0] = 0 + * if PyObject_AsReadBuffer(obj, buf, buffer_len) == -1: # <<<<<<<<<<<<<< + * raise BufferError("could not get memoryview") + * PyErr_WarnEx(RuntimeWarning, + */ + __pyx_t_2 = PyObject_AsReadBuffer(__pyx_v_obj, ((void const **)__pyx_v_buf), __pyx_v_buffer_len); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 135, __pyx_L1_error) + __pyx_t_1 = ((__pyx_t_2 == -1L) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":136 + * new_protocol[0] = 0 + * if PyObject_AsReadBuffer(obj, buf, buffer_len) == -1: + * raise BufferError("could not get memoryview") # <<<<<<<<<<<<<< + * PyErr_WarnEx(RuntimeWarning, + * "using old buffer interface to unpack %s; " + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_BufferError, __pyx_tuple__21, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 136, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":135 + * else: + * new_protocol[0] = 0 + * if PyObject_AsReadBuffer(obj, buf, buffer_len) == -1: # <<<<<<<<<<<<<< + * raise BufferError("could not get memoryview") + * PyErr_WarnEx(RuntimeWarning, + */ + } + + /* "msgpack/_unpacker.pyx":140 + * "using old buffer interface to unpack %s; " + * "this leads to unpacking errors if slicing is used and " + * "will be removed in a future version" % type(obj), # <<<<<<<<<<<<<< + * 1) + * return 1 + */ + __pyx_t_3 = __Pyx_PyObject_CallOneArg(((PyObject *)__pyx_ptype_7cpython_4type_type), __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = PyUnicode_Format(__pyx_kp_u_using_old_buffer_interface_to_un, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 140, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_6 = __Pyx_PyObject_AsWritableString(__pyx_t_5); if (unlikely((!__pyx_t_6) && PyErr_Occurred())) __PYX_ERR(1, 140, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":137 + * if PyObject_AsReadBuffer(obj, buf, buffer_len) == -1: + * raise BufferError("could not get memoryview") + * PyErr_WarnEx(RuntimeWarning, # <<<<<<<<<<<<<< + * "using old buffer interface to unpack %s; " + * "this leads to unpacking errors if slicing is used and " + */ + __pyx_t_2 = PyErr_WarnEx(__pyx_builtin_RuntimeWarning, __pyx_t_6, 1); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 137, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + + /* "msgpack/_unpacker.pyx":142 + * "will be removed in a future version" % type(obj), + * 1) + * return 1 # <<<<<<<<<<<<<< + * + * def unpackb(object packed, object object_hook=None, object list_hook=None, + */ + __pyx_r = 1; + goto __pyx_L0; + } + + /* "msgpack/_unpacker.pyx":108 + * raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + * + * cdef inline int get_data_from_buffer(object obj, # <<<<<<<<<<<<<< + * Py_buffer *view, + * char **buf, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("msgpack._cmsgpack.get_data_from_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_contiguous); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":144 + * return 1 + * + * def unpackb(object packed, object object_hook=None, object list_hook=None, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_3unpackb(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_2unpackb[] = "unpackb(packed, object_hook=None, list_hook=None, bool use_list=True, bool raw=True, bool strict_map_key=False, encoding=None, unicode_errors=None, object_pairs_hook=None, ext_hook=ExtType, Py_ssize_t max_str_len=-1, Py_ssize_t max_bin_len=-1, Py_ssize_t max_array_len=-1, Py_ssize_t max_map_len=-1, Py_ssize_t max_ext_len=-1)\n\n Unpack packed_bytes to object. Returns an unpacked object.\n\n Raises ``ExtraData`` when *packed* contains extra bytes.\n Raises ``ValueError`` when *packed* is incomplete.\n Raises ``FormatError`` when *packed* is not valid msgpack.\n Raises ``StackError`` when *packed* contains too nested.\n Other exceptions can be raised during unpacking.\n\n See :class:`Unpacker` for options.\n\n *max_xxx_len* options are configured automatically from ``len(packed)``.\n "; +static PyMethodDef __pyx_mdef_7msgpack_9_cmsgpack_3unpackb = {"unpackb", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7msgpack_9_cmsgpack_3unpackb, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7msgpack_9_cmsgpack_2unpackb}; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_3unpackb(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_packed = 0; + PyObject *__pyx_v_object_hook = 0; + PyObject *__pyx_v_list_hook = 0; + int __pyx_v_use_list; + int __pyx_v_raw; + int __pyx_v_strict_map_key; + PyObject *__pyx_v_encoding = 0; + PyObject *__pyx_v_unicode_errors = 0; + PyObject *__pyx_v_object_pairs_hook = 0; + PyObject *__pyx_v_ext_hook = 0; + Py_ssize_t __pyx_v_max_str_len; + Py_ssize_t __pyx_v_max_bin_len; + Py_ssize_t __pyx_v_max_array_len; + Py_ssize_t __pyx_v_max_map_len; + Py_ssize_t __pyx_v_max_ext_len; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("unpackb (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_packed,&__pyx_n_s_object_hook,&__pyx_n_s_list_hook,&__pyx_n_s_use_list,&__pyx_n_s_raw,&__pyx_n_s_strict_map_key,&__pyx_n_s_encoding,&__pyx_n_s_unicode_errors,&__pyx_n_s_object_pairs_hook,&__pyx_n_s_ext_hook,&__pyx_n_s_max_str_len,&__pyx_n_s_max_bin_len,&__pyx_n_s_max_array_len,&__pyx_n_s_max_map_len,&__pyx_n_s_max_ext_len,0}; + PyObject* values[15] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + values[1] = ((PyObject *)Py_None); + values[2] = ((PyObject *)Py_None); + + /* "msgpack/_unpacker.pyx":146 + * def unpackb(object packed, object object_hook=None, object list_hook=None, + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, # <<<<<<<<<<<<<< + * object_pairs_hook=None, ext_hook=ExtType, + * Py_ssize_t max_str_len=-1, + */ + values[6] = ((PyObject *)Py_None); + values[7] = ((PyObject *)Py_None); + + /* "msgpack/_unpacker.pyx":147 + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, + * object_pairs_hook=None, ext_hook=ExtType, # <<<<<<<<<<<<<< + * Py_ssize_t max_str_len=-1, + * Py_ssize_t max_bin_len=-1, + */ + values[8] = ((PyObject *)Py_None); + values[9] = __pyx_k__22; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14); + CYTHON_FALLTHROUGH; + case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); + CYTHON_FALLTHROUGH; + case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); + CYTHON_FALLTHROUGH; + case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); + CYTHON_FALLTHROUGH; + case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); + CYTHON_FALLTHROUGH; + case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + CYTHON_FALLTHROUGH; + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_packed)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_object_hook); + if (value) { values[1] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_list_hook); + if (value) { values[2] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_use_list); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_raw); + if (value) { values[4] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 5: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_strict_map_key); + if (value) { values[5] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 6: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_encoding); + if (value) { values[6] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 7: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_unicode_errors); + if (value) { values[7] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 8: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_object_pairs_hook); + if (value) { values[8] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 9: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ext_hook); + if (value) { values[9] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 10: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_str_len); + if (value) { values[10] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 11: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_bin_len); + if (value) { values[11] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 12: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_array_len); + if (value) { values[12] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 13: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_map_len); + if (value) { values[13] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 14: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_ext_len); + if (value) { values[14] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "unpackb") < 0)) __PYX_ERR(1, 144, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14); + CYTHON_FALLTHROUGH; + case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); + CYTHON_FALLTHROUGH; + case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); + CYTHON_FALLTHROUGH; + case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); + CYTHON_FALLTHROUGH; + case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); + CYTHON_FALLTHROUGH; + case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + CYTHON_FALLTHROUGH; + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_packed = values[0]; + __pyx_v_object_hook = values[1]; + __pyx_v_list_hook = values[2]; + if (values[3]) { + __pyx_v_use_list = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_use_list == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 145, __pyx_L3_error) + } else { + + /* "msgpack/_unpacker.pyx":145 + * + * def unpackb(object packed, object object_hook=None, object list_hook=None, + * bint use_list=True, bint raw=True, bint strict_map_key=False, # <<<<<<<<<<<<<< + * encoding=None, unicode_errors=None, + * object_pairs_hook=None, ext_hook=ExtType, + */ + __pyx_v_use_list = ((int)1); + } + if (values[4]) { + __pyx_v_raw = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_raw == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 145, __pyx_L3_error) + } else { + __pyx_v_raw = ((int)1); + } + if (values[5]) { + __pyx_v_strict_map_key = __Pyx_PyObject_IsTrue(values[5]); if (unlikely((__pyx_v_strict_map_key == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 145, __pyx_L3_error) + } else { + __pyx_v_strict_map_key = ((int)0); + } + __pyx_v_encoding = values[6]; + __pyx_v_unicode_errors = values[7]; + __pyx_v_object_pairs_hook = values[8]; + __pyx_v_ext_hook = values[9]; + if (values[10]) { + __pyx_v_max_str_len = __Pyx_PyIndex_AsSsize_t(values[10]); if (unlikely((__pyx_v_max_str_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 148, __pyx_L3_error) + } else { + __pyx_v_max_str_len = ((Py_ssize_t)-1L); + } + if (values[11]) { + __pyx_v_max_bin_len = __Pyx_PyIndex_AsSsize_t(values[11]); if (unlikely((__pyx_v_max_bin_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L3_error) + } else { + __pyx_v_max_bin_len = ((Py_ssize_t)-1L); + } + if (values[12]) { + __pyx_v_max_array_len = __Pyx_PyIndex_AsSsize_t(values[12]); if (unlikely((__pyx_v_max_array_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 150, __pyx_L3_error) + } else { + __pyx_v_max_array_len = ((Py_ssize_t)-1L); + } + if (values[13]) { + __pyx_v_max_map_len = __Pyx_PyIndex_AsSsize_t(values[13]); if (unlikely((__pyx_v_max_map_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 151, __pyx_L3_error) + } else { + __pyx_v_max_map_len = ((Py_ssize_t)-1L); + } + if (values[14]) { + __pyx_v_max_ext_len = __Pyx_PyIndex_AsSsize_t(values[14]); if (unlikely((__pyx_v_max_ext_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 152, __pyx_L3_error) + } else { + __pyx_v_max_ext_len = ((Py_ssize_t)-1L); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("unpackb", 0, 1, 15, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 144, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.unpackb", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_2unpackb(__pyx_self, __pyx_v_packed, __pyx_v_object_hook, __pyx_v_list_hook, __pyx_v_use_list, __pyx_v_raw, __pyx_v_strict_map_key, __pyx_v_encoding, __pyx_v_unicode_errors, __pyx_v_object_pairs_hook, __pyx_v_ext_hook, __pyx_v_max_str_len, __pyx_v_max_bin_len, __pyx_v_max_array_len, __pyx_v_max_map_len, __pyx_v_max_ext_len); + + /* "msgpack/_unpacker.pyx":144 + * return 1 + * + * def unpackb(object packed, object object_hook=None, object list_hook=None, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_2unpackb(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_packed, PyObject *__pyx_v_object_hook, PyObject *__pyx_v_list_hook, int __pyx_v_use_list, int __pyx_v_raw, int __pyx_v_strict_map_key, PyObject *__pyx_v_encoding, PyObject *__pyx_v_unicode_errors, PyObject *__pyx_v_object_pairs_hook, PyObject *__pyx_v_ext_hook, Py_ssize_t __pyx_v_max_str_len, Py_ssize_t __pyx_v_max_bin_len, Py_ssize_t __pyx_v_max_array_len, Py_ssize_t __pyx_v_max_map_len, Py_ssize_t __pyx_v_max_ext_len) { + unpack_context __pyx_v_ctx; + Py_ssize_t __pyx_v_off; + int __pyx_v_ret; + Py_buffer __pyx_v_view; + char *__pyx_v_buf; + Py_ssize_t __pyx_v_buf_len; + char const *__pyx_v_cenc; + char const *__pyx_v_cerr; + int __pyx_v_new_protocol; + PyObject *__pyx_v_obj = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + char const *__pyx_t_4; + char const *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + char const *__pyx_t_8; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + PyObject *__pyx_t_13 = NULL; + PyObject *__pyx_t_14 = NULL; + PyObject *__pyx_t_15 = NULL; + PyObject *__pyx_t_16 = NULL; + PyObject *__pyx_t_17 = NULL; + PyObject *__pyx_t_18 = NULL; + __Pyx_RefNannySetupContext("unpackb", 0); + + /* "msgpack/_unpacker.pyx":167 + * """ + * cdef unpack_context ctx + * cdef Py_ssize_t off = 0 # <<<<<<<<<<<<<< + * cdef int ret + * + */ + __pyx_v_off = 0; + + /* "msgpack/_unpacker.pyx":171 + * + * cdef Py_buffer view + * cdef char* buf = NULL # <<<<<<<<<<<<<< + * cdef Py_ssize_t buf_len + * cdef const char* cenc = NULL + */ + __pyx_v_buf = NULL; + + /* "msgpack/_unpacker.pyx":173 + * cdef char* buf = NULL + * cdef Py_ssize_t buf_len + * cdef const char* cenc = NULL # <<<<<<<<<<<<<< + * cdef const char* cerr = NULL + * cdef int new_protocol = 0 + */ + __pyx_v_cenc = NULL; + + /* "msgpack/_unpacker.pyx":174 + * cdef Py_ssize_t buf_len + * cdef const char* cenc = NULL + * cdef const char* cerr = NULL # <<<<<<<<<<<<<< + * cdef int new_protocol = 0 + * + */ + __pyx_v_cerr = NULL; + + /* "msgpack/_unpacker.pyx":175 + * cdef const char* cenc = NULL + * cdef const char* cerr = NULL + * cdef int new_protocol = 0 # <<<<<<<<<<<<<< + * + * if encoding is not None: + */ + __pyx_v_new_protocol = 0; + + /* "msgpack/_unpacker.pyx":177 + * cdef int new_protocol = 0 + * + * if encoding is not None: # <<<<<<<<<<<<<< + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + * cenc = encoding + */ + __pyx_t_1 = (__pyx_v_encoding != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (__pyx_t_2) { + + /* "msgpack/_unpacker.pyx":178 + * + * if encoding is not None: + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) # <<<<<<<<<<<<<< + * cenc = encoding + * + */ + __pyx_t_3 = PyErr_WarnEx(__pyx_builtin_DeprecationWarning, ((char *)"encoding is deprecated, Use raw=False instead."), 1); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 178, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":179 + * if encoding is not None: + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + * cenc = encoding # <<<<<<<<<<<<<< + * + * if unicode_errors is not None: + */ + __pyx_t_4 = __Pyx_PyObject_AsString(__pyx_v_encoding); if (unlikely((!__pyx_t_4) && PyErr_Occurred())) __PYX_ERR(1, 179, __pyx_L1_error) + __pyx_v_cenc = __pyx_t_4; + + /* "msgpack/_unpacker.pyx":177 + * cdef int new_protocol = 0 + * + * if encoding is not None: # <<<<<<<<<<<<<< + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + * cenc = encoding + */ + } + + /* "msgpack/_unpacker.pyx":181 + * cenc = encoding + * + * if unicode_errors is not None: # <<<<<<<<<<<<<< + * cerr = unicode_errors + * + */ + __pyx_t_2 = (__pyx_v_unicode_errors != Py_None); + __pyx_t_1 = (__pyx_t_2 != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":182 + * + * if unicode_errors is not None: + * cerr = unicode_errors # <<<<<<<<<<<<<< + * + * get_data_from_buffer(packed, &view, &buf, &buf_len, &new_protocol) + */ + __pyx_t_5 = __Pyx_PyObject_AsString(__pyx_v_unicode_errors); if (unlikely((!__pyx_t_5) && PyErr_Occurred())) __PYX_ERR(1, 182, __pyx_L1_error) + __pyx_v_cerr = __pyx_t_5; + + /* "msgpack/_unpacker.pyx":181 + * cenc = encoding + * + * if unicode_errors is not None: # <<<<<<<<<<<<<< + * cerr = unicode_errors + * + */ + } + + /* "msgpack/_unpacker.pyx":184 + * cerr = unicode_errors + * + * get_data_from_buffer(packed, &view, &buf, &buf_len, &new_protocol) # <<<<<<<<<<<<<< + * + * if max_str_len == -1: + */ + __pyx_t_3 = __pyx_f_7msgpack_9_cmsgpack_get_data_from_buffer(__pyx_v_packed, (&__pyx_v_view), (&__pyx_v_buf), (&__pyx_v_buf_len), (&__pyx_v_new_protocol)); if (unlikely(__pyx_t_3 == ((int)0))) __PYX_ERR(1, 184, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":186 + * get_data_from_buffer(packed, &view, &buf, &buf_len, &new_protocol) + * + * if max_str_len == -1: # <<<<<<<<<<<<<< + * max_str_len = buf_len + * if max_bin_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_str_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":187 + * + * if max_str_len == -1: + * max_str_len = buf_len # <<<<<<<<<<<<<< + * if max_bin_len == -1: + * max_bin_len = buf_len + */ + __pyx_v_max_str_len = __pyx_v_buf_len; + + /* "msgpack/_unpacker.pyx":186 + * get_data_from_buffer(packed, &view, &buf, &buf_len, &new_protocol) + * + * if max_str_len == -1: # <<<<<<<<<<<<<< + * max_str_len = buf_len + * if max_bin_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":188 + * if max_str_len == -1: + * max_str_len = buf_len + * if max_bin_len == -1: # <<<<<<<<<<<<<< + * max_bin_len = buf_len + * if max_array_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_bin_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":189 + * max_str_len = buf_len + * if max_bin_len == -1: + * max_bin_len = buf_len # <<<<<<<<<<<<<< + * if max_array_len == -1: + * max_array_len = buf_len + */ + __pyx_v_max_bin_len = __pyx_v_buf_len; + + /* "msgpack/_unpacker.pyx":188 + * if max_str_len == -1: + * max_str_len = buf_len + * if max_bin_len == -1: # <<<<<<<<<<<<<< + * max_bin_len = buf_len + * if max_array_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":190 + * if max_bin_len == -1: + * max_bin_len = buf_len + * if max_array_len == -1: # <<<<<<<<<<<<<< + * max_array_len = buf_len + * if max_map_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_array_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":191 + * max_bin_len = buf_len + * if max_array_len == -1: + * max_array_len = buf_len # <<<<<<<<<<<<<< + * if max_map_len == -1: + * max_map_len = buf_len//2 + */ + __pyx_v_max_array_len = __pyx_v_buf_len; + + /* "msgpack/_unpacker.pyx":190 + * if max_bin_len == -1: + * max_bin_len = buf_len + * if max_array_len == -1: # <<<<<<<<<<<<<< + * max_array_len = buf_len + * if max_map_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":192 + * if max_array_len == -1: + * max_array_len = buf_len + * if max_map_len == -1: # <<<<<<<<<<<<<< + * max_map_len = buf_len//2 + * if max_ext_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_map_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":193 + * max_array_len = buf_len + * if max_map_len == -1: + * max_map_len = buf_len//2 # <<<<<<<<<<<<<< + * if max_ext_len == -1: + * max_ext_len = buf_len + */ + __pyx_v_max_map_len = __Pyx_div_Py_ssize_t(__pyx_v_buf_len, 2); + + /* "msgpack/_unpacker.pyx":192 + * if max_array_len == -1: + * max_array_len = buf_len + * if max_map_len == -1: # <<<<<<<<<<<<<< + * max_map_len = buf_len//2 + * if max_ext_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":194 + * if max_map_len == -1: + * max_map_len = buf_len//2 + * if max_ext_len == -1: # <<<<<<<<<<<<<< + * max_ext_len = buf_len + * + */ + __pyx_t_1 = ((__pyx_v_max_ext_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":195 + * max_map_len = buf_len//2 + * if max_ext_len == -1: + * max_ext_len = buf_len # <<<<<<<<<<<<<< + * + * try: + */ + __pyx_v_max_ext_len = __pyx_v_buf_len; + + /* "msgpack/_unpacker.pyx":194 + * if max_map_len == -1: + * max_map_len = buf_len//2 + * if max_ext_len == -1: # <<<<<<<<<<<<<< + * max_ext_len = buf_len + * + */ + } + + /* "msgpack/_unpacker.pyx":197 + * max_ext_len = buf_len + * + * try: # <<<<<<<<<<<<<< + * init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook, + * use_list, raw, strict_map_key, cenc, cerr, + */ + /*try:*/ { + + /* "msgpack/_unpacker.pyx":198 + * + * try: + * init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook, # <<<<<<<<<<<<<< + * use_list, raw, strict_map_key, cenc, cerr, + * max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len) + */ + __pyx_t_6 = __pyx_f_7msgpack_9_cmsgpack_init_ctx((&__pyx_v_ctx), __pyx_v_object_hook, __pyx_v_object_pairs_hook, __pyx_v_list_hook, __pyx_v_ext_hook, __pyx_v_use_list, __pyx_v_raw, __pyx_v_strict_map_key, __pyx_v_cenc, __pyx_v_cerr, __pyx_v_max_str_len, __pyx_v_max_bin_len, __pyx_v_max_array_len, __pyx_v_max_map_len, __pyx_v_max_ext_len); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 198, __pyx_L11_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "msgpack/_unpacker.pyx":201 + * use_list, raw, strict_map_key, cenc, cerr, + * max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len) + * ret = unpack_construct(&ctx, buf, buf_len, &off) # <<<<<<<<<<<<<< + * finally: + * if new_protocol: + */ + __pyx_t_3 = unpack_construct((&__pyx_v_ctx), __pyx_v_buf, __pyx_v_buf_len, (&__pyx_v_off)); if (unlikely(__pyx_t_3 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(1, 201, __pyx_L11_error) + __pyx_v_ret = __pyx_t_3; + } + + /* "msgpack/_unpacker.pyx":203 + * ret = unpack_construct(&ctx, buf, buf_len, &off) + * finally: + * if new_protocol: # <<<<<<<<<<<<<< + * PyBuffer_Release(&view); + * + */ + /*finally:*/ { + /*normal exit:*/{ + __pyx_t_1 = (__pyx_v_new_protocol != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":204 + * finally: + * if new_protocol: + * PyBuffer_Release(&view); # <<<<<<<<<<<<<< + * + * if ret == 1: + */ + PyBuffer_Release((&__pyx_v_view)); + + /* "msgpack/_unpacker.pyx":203 + * ret = unpack_construct(&ctx, buf, buf_len, &off) + * finally: + * if new_protocol: # <<<<<<<<<<<<<< + * PyBuffer_Release(&view); + * + */ + } + goto __pyx_L12; + } + __pyx_L11_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_12, &__pyx_t_13, &__pyx_t_14); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11) < 0)) __Pyx_ErrFetch(&__pyx_t_9, &__pyx_t_10, &__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __Pyx_XGOTREF(__pyx_t_13); + __Pyx_XGOTREF(__pyx_t_14); + __pyx_t_3 = __pyx_lineno; __pyx_t_7 = __pyx_clineno; __pyx_t_8 = __pyx_filename; + { + __pyx_t_1 = (__pyx_v_new_protocol != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":204 + * finally: + * if new_protocol: + * PyBuffer_Release(&view); # <<<<<<<<<<<<<< + * + * if ret == 1: + */ + PyBuffer_Release((&__pyx_v_view)); + + /* "msgpack/_unpacker.pyx":203 + * ret = unpack_construct(&ctx, buf, buf_len, &off) + * finally: + * if new_protocol: # <<<<<<<<<<<<<< + * PyBuffer_Release(&view); + * + */ + } + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_XGIVEREF(__pyx_t_13); + __Pyx_XGIVEREF(__pyx_t_14); + __Pyx_ExceptionReset(__pyx_t_12, __pyx_t_13, __pyx_t_14); + } + __Pyx_XGIVEREF(__pyx_t_9); + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_ErrRestore(__pyx_t_9, __pyx_t_10, __pyx_t_11); + __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; __pyx_t_13 = 0; __pyx_t_14 = 0; + __pyx_lineno = __pyx_t_3; __pyx_clineno = __pyx_t_7; __pyx_filename = __pyx_t_8; + goto __pyx_L1_error; + } + __pyx_L12:; + } + + /* "msgpack/_unpacker.pyx":206 + * PyBuffer_Release(&view); + * + * if ret == 1: # <<<<<<<<<<<<<< + * obj = unpack_data(&ctx) + * if off < buf_len: + */ + __pyx_t_1 = ((__pyx_v_ret == 1) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":207 + * + * if ret == 1: + * obj = unpack_data(&ctx) # <<<<<<<<<<<<<< + * if off < buf_len: + * raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) + */ + __pyx_t_6 = unpack_data((&__pyx_v_ctx)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 207, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_v_obj = __pyx_t_6; + __pyx_t_6 = 0; + + /* "msgpack/_unpacker.pyx":208 + * if ret == 1: + * obj = unpack_data(&ctx) + * if off < buf_len: # <<<<<<<<<<<<<< + * raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) + * return obj + */ + __pyx_t_1 = ((__pyx_v_off < __pyx_v_buf_len) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":209 + * obj = unpack_data(&ctx) + * if off < buf_len: + * raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) # <<<<<<<<<<<<<< + * return obj + * unpack_clear(&ctx) + */ + __Pyx_GetModuleGlobalName(__pyx_t_15, __pyx_n_s_ExtraData); if (unlikely(!__pyx_t_15)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __pyx_t_16 = PyBytes_FromStringAndSize((__pyx_v_buf + __pyx_v_off), (__pyx_v_buf_len - __pyx_v_off)); if (unlikely(!__pyx_t_16)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __pyx_t_17 = NULL; + __pyx_t_7 = 0; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_15))) { + __pyx_t_17 = PyMethod_GET_SELF(__pyx_t_15); + if (likely(__pyx_t_17)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_15); + __Pyx_INCREF(__pyx_t_17); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_15, function); + __pyx_t_7 = 1; + } + } + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(__pyx_t_15)) { + PyObject *__pyx_temp[3] = {__pyx_t_17, __pyx_v_obj, __pyx_t_16}; + __pyx_t_6 = __Pyx_PyFunction_FastCall(__pyx_t_15, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_17); __pyx_t_17 = 0; + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + } else + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(__pyx_t_15)) { + PyObject *__pyx_temp[3] = {__pyx_t_17, __pyx_v_obj, __pyx_t_16}; + __pyx_t_6 = __Pyx_PyCFunction_FastCall(__pyx_t_15, __pyx_temp+1-__pyx_t_7, 2+__pyx_t_7); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_XDECREF(__pyx_t_17); __pyx_t_17 = 0; + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + } else + #endif + { + __pyx_t_18 = PyTuple_New(2+__pyx_t_7); if (unlikely(!__pyx_t_18)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_18); + if (__pyx_t_17) { + __Pyx_GIVEREF(__pyx_t_17); PyTuple_SET_ITEM(__pyx_t_18, 0, __pyx_t_17); __pyx_t_17 = NULL; + } + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + PyTuple_SET_ITEM(__pyx_t_18, 0+__pyx_t_7, __pyx_v_obj); + __Pyx_GIVEREF(__pyx_t_16); + PyTuple_SET_ITEM(__pyx_t_18, 1+__pyx_t_7, __pyx_t_16); + __pyx_t_16 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_15, __pyx_t_18, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 209, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + } + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 209, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":208 + * if ret == 1: + * obj = unpack_data(&ctx) + * if off < buf_len: # <<<<<<<<<<<<<< + * raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) + * return obj + */ + } + + /* "msgpack/_unpacker.pyx":210 + * if off < buf_len: + * raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) + * return obj # <<<<<<<<<<<<<< + * unpack_clear(&ctx) + * if ret == 0: + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_obj); + __pyx_r = __pyx_v_obj; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":206 + * PyBuffer_Release(&view); + * + * if ret == 1: # <<<<<<<<<<<<<< + * obj = unpack_data(&ctx) + * if off < buf_len: + */ + } + + /* "msgpack/_unpacker.pyx":211 + * raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) + * return obj + * unpack_clear(&ctx) # <<<<<<<<<<<<<< + * if ret == 0: + * raise ValueError("Unpack failed: incomplete input") + */ + unpack_clear((&__pyx_v_ctx)); + + /* "msgpack/_unpacker.pyx":212 + * return obj + * unpack_clear(&ctx) + * if ret == 0: # <<<<<<<<<<<<<< + * raise ValueError("Unpack failed: incomplete input") + * elif ret == -2: + */ + switch (__pyx_v_ret) { + case 0: + + /* "msgpack/_unpacker.pyx":213 + * unpack_clear(&ctx) + * if ret == 0: + * raise ValueError("Unpack failed: incomplete input") # <<<<<<<<<<<<<< + * elif ret == -2: + * raise FormatError + */ + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__23, NULL); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 213, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 213, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":212 + * return obj + * unpack_clear(&ctx) + * if ret == 0: # <<<<<<<<<<<<<< + * raise ValueError("Unpack failed: incomplete input") + * elif ret == -2: + */ + break; + case -2L: + + /* "msgpack/_unpacker.pyx":215 + * raise ValueError("Unpack failed: incomplete input") + * elif ret == -2: + * raise FormatError # <<<<<<<<<<<<<< + * elif ret == -3: + * raise StackError + */ + __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_FormatError); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 215, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 215, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":214 + * if ret == 0: + * raise ValueError("Unpack failed: incomplete input") + * elif ret == -2: # <<<<<<<<<<<<<< + * raise FormatError + * elif ret == -3: + */ + break; + case -3L: + + /* "msgpack/_unpacker.pyx":217 + * raise FormatError + * elif ret == -3: + * raise StackError # <<<<<<<<<<<<<< + * raise ValueError("Unpack failed: error = %d" % (ret,)) + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_StackError); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 217, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 217, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":216 + * elif ret == -2: + * raise FormatError + * elif ret == -3: # <<<<<<<<<<<<<< + * raise StackError + * raise ValueError("Unpack failed: error = %d" % (ret,)) + */ + break; + default: break; + } + + /* "msgpack/_unpacker.pyx":218 + * elif ret == -3: + * raise StackError + * raise ValueError("Unpack failed: error = %d" % (ret,)) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_6 = __Pyx_PyUnicode_From_int(__pyx_v_ret, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_15 = __Pyx_PyUnicode_Concat(__pyx_kp_u_Unpack_failed_error, __pyx_t_6); if (unlikely(!__pyx_t_15)) __PYX_ERR(1, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_15); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 218, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + __Pyx_Raise(__pyx_t_6, 0, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 218, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":144 + * return 1 + * + * def unpackb(object packed, object object_hook=None, object list_hook=None, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_15); + __Pyx_XDECREF(__pyx_t_16); + __Pyx_XDECREF(__pyx_t_17); + __Pyx_XDECREF(__pyx_t_18); + __Pyx_AddTraceback("msgpack._cmsgpack.unpackb", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":221 + * + * + * def unpack(object stream, **kwargs): # <<<<<<<<<<<<<< + * PyErr_WarnEx( + * DeprecationWarning, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_5unpack(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_4unpack[] = "unpack(stream, **kwargs)"; +static PyMethodDef __pyx_mdef_7msgpack_9_cmsgpack_5unpack = {"unpack", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7msgpack_9_cmsgpack_5unpack, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7msgpack_9_cmsgpack_4unpack}; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_5unpack(PyObject *__pyx_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_stream = 0; + PyObject *__pyx_v_kwargs = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("unpack (wrapper)", 0); + __pyx_v_kwargs = PyDict_New(); if (unlikely(!__pyx_v_kwargs)) return NULL; + __Pyx_GOTREF(__pyx_v_kwargs); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_stream,0}; + PyObject* values[1] = {0}; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (likely((values[0] = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_stream)) != 0)) kw_args--; + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, __pyx_v_kwargs, values, pos_args, "unpack") < 0)) __PYX_ERR(1, 221, __pyx_L3_error) + } + } else if (PyTuple_GET_SIZE(__pyx_args) != 1) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + } + __pyx_v_stream = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("unpack", 1, 1, 1, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 221, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_DECREF(__pyx_v_kwargs); __pyx_v_kwargs = 0; + __Pyx_AddTraceback("msgpack._cmsgpack.unpack", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_4unpack(__pyx_self, __pyx_v_stream, __pyx_v_kwargs); + + /* function exit code */ + __Pyx_XDECREF(__pyx_v_kwargs); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_4unpack(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_stream, PyObject *__pyx_v_kwargs) { + PyObject *__pyx_v_data = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + __Pyx_RefNannySetupContext("unpack", 0); + + /* "msgpack/_unpacker.pyx":222 + * + * def unpack(object stream, **kwargs): + * PyErr_WarnEx( # <<<<<<<<<<<<<< + * DeprecationWarning, + * "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", 1) + */ + __pyx_t_1 = PyErr_WarnEx(__pyx_builtin_DeprecationWarning, ((char *)"Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead."), 1); if (unlikely(__pyx_t_1 == ((int)-1))) __PYX_ERR(1, 222, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":225 + * DeprecationWarning, + * "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", 1) + * data = stream.read() # <<<<<<<<<<<<<< + * return unpackb(data, **kwargs) + * + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_v_stream, __pyx_n_s_read); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 225, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + } + } + __pyx_t_2 = (__pyx_t_4) ? __Pyx_PyObject_CallOneArg(__pyx_t_3, __pyx_t_4) : __Pyx_PyObject_CallNoArg(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 225, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_data = __pyx_t_2; + __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":226 + * "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", 1) + * data = stream.read() + * return unpackb(data, **kwargs) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_unpackb); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_data); + __Pyx_GIVEREF(__pyx_v_data); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_data); + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_t_2, __pyx_t_3, __pyx_v_kwargs); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":221 + * + * + * def unpack(object stream, **kwargs): # <<<<<<<<<<<<<< + * PyErr_WarnEx( + * DeprecationWarning, + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("msgpack._cmsgpack.unpack", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_data); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":337 + * cdef uint64_t stream_offset + * + * def __cinit__(self): # <<<<<<<<<<<<<< + * self.buf = NULL + * + */ + +/* Python wrapper */ +static int __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_1__cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + if (unlikely(PyTuple_GET_SIZE(__pyx_args) > 0)) { + __Pyx_RaiseArgtupleInvalid("__cinit__", 1, 0, 0, PyTuple_GET_SIZE(__pyx_args)); return -1;} + if (unlikely(__pyx_kwds) && unlikely(PyDict_Size(__pyx_kwds) > 0) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__cinit__", 0))) return -1; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker___cinit__(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_7msgpack_9_cmsgpack_8Unpacker___cinit__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "msgpack/_unpacker.pyx":338 + * + * def __cinit__(self): + * self.buf = NULL # <<<<<<<<<<<<<< + * + * def __dealloc__(self): + */ + __pyx_v_self->buf = NULL; + + /* "msgpack/_unpacker.pyx":337 + * cdef uint64_t stream_offset + * + * def __cinit__(self): # <<<<<<<<<<<<<< + * self.buf = NULL + * + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":340 + * self.buf = NULL + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * PyMem_Free(self.buf) + * self.buf = NULL + */ + +/* Python wrapper */ +static void __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_3__dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_3__dealloc__(PyObject *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_2__dealloc__(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_2__dealloc__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "msgpack/_unpacker.pyx":341 + * + * def __dealloc__(self): + * PyMem_Free(self.buf) # <<<<<<<<<<<<<< + * self.buf = NULL + * + */ + PyMem_Free(__pyx_v_self->buf); + + /* "msgpack/_unpacker.pyx":342 + * def __dealloc__(self): + * PyMem_Free(self.buf) + * self.buf = NULL # <<<<<<<<<<<<<< + * + * def __init__(self, file_like=None, Py_ssize_t read_size=0, + */ + __pyx_v_self->buf = NULL; + + /* "msgpack/_unpacker.pyx":340 + * self.buf = NULL + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * PyMem_Free(self.buf) + * self.buf = NULL + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "msgpack/_unpacker.pyx":344 + * self.buf = NULL + * + * def __init__(self, file_like=None, Py_ssize_t read_size=0, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * object object_hook=None, object object_pairs_hook=None, object list_hook=None, + */ + +/* Python wrapper */ +static int __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_5__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_5__init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_file_like = 0; + Py_ssize_t __pyx_v_read_size; + int __pyx_v_use_list; + int __pyx_v_raw; + int __pyx_v_strict_map_key; + PyObject *__pyx_v_object_hook = 0; + PyObject *__pyx_v_object_pairs_hook = 0; + PyObject *__pyx_v_list_hook = 0; + PyObject *__pyx_v_encoding = 0; + PyObject *__pyx_v_unicode_errors = 0; + Py_ssize_t __pyx_v_max_buffer_size; + PyObject *__pyx_v_ext_hook = 0; + Py_ssize_t __pyx_v_max_str_len; + Py_ssize_t __pyx_v_max_bin_len; + Py_ssize_t __pyx_v_max_array_len; + Py_ssize_t __pyx_v_max_map_len; + Py_ssize_t __pyx_v_max_ext_len; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + { + static PyObject **__pyx_pyargnames[] = {&__pyx_n_s_file_like,&__pyx_n_s_read_size,&__pyx_n_s_use_list,&__pyx_n_s_raw,&__pyx_n_s_strict_map_key,&__pyx_n_s_object_hook,&__pyx_n_s_object_pairs_hook,&__pyx_n_s_list_hook,&__pyx_n_s_encoding,&__pyx_n_s_unicode_errors,&__pyx_n_s_max_buffer_size,&__pyx_n_s_ext_hook,&__pyx_n_s_max_str_len,&__pyx_n_s_max_bin_len,&__pyx_n_s_max_array_len,&__pyx_n_s_max_map_len,&__pyx_n_s_max_ext_len,0}; + PyObject* values[17] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; + values[0] = ((PyObject *)Py_None); + + /* "msgpack/_unpacker.pyx":346 + * def __init__(self, file_like=None, Py_ssize_t read_size=0, + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * object object_hook=None, object object_pairs_hook=None, object list_hook=None, # <<<<<<<<<<<<<< + * encoding=None, unicode_errors=None, Py_ssize_t max_buffer_size=0, + * object ext_hook=ExtType, + */ + values[5] = ((PyObject *)Py_None); + values[6] = ((PyObject *)Py_None); + values[7] = ((PyObject *)Py_None); + + /* "msgpack/_unpacker.pyx":347 + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * object object_hook=None, object object_pairs_hook=None, object list_hook=None, + * encoding=None, unicode_errors=None, Py_ssize_t max_buffer_size=0, # <<<<<<<<<<<<<< + * object ext_hook=ExtType, + * Py_ssize_t max_str_len=-1, + */ + values[8] = ((PyObject *)Py_None); + values[9] = ((PyObject *)Py_None); + values[11] = __pyx_k__24; + if (unlikely(__pyx_kwds)) { + Py_ssize_t kw_args; + const Py_ssize_t pos_args = PyTuple_GET_SIZE(__pyx_args); + switch (pos_args) { + case 17: values[16] = PyTuple_GET_ITEM(__pyx_args, 16); + CYTHON_FALLTHROUGH; + case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15); + CYTHON_FALLTHROUGH; + case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14); + CYTHON_FALLTHROUGH; + case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); + CYTHON_FALLTHROUGH; + case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); + CYTHON_FALLTHROUGH; + case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); + CYTHON_FALLTHROUGH; + case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); + CYTHON_FALLTHROUGH; + case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + CYTHON_FALLTHROUGH; + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = PyDict_Size(__pyx_kwds); + switch (pos_args) { + case 0: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_file_like); + if (value) { values[0] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 1: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_read_size); + if (value) { values[1] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_use_list); + if (value) { values[2] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_raw); + if (value) { values[3] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_strict_map_key); + if (value) { values[4] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 5: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_object_hook); + if (value) { values[5] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 6: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_object_pairs_hook); + if (value) { values[6] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 7: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_list_hook); + if (value) { values[7] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 8: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_encoding); + if (value) { values[8] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 9: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_unicode_errors); + if (value) { values[9] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 10: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_buffer_size); + if (value) { values[10] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 11: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_ext_hook); + if (value) { values[11] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 12: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_str_len); + if (value) { values[12] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 13: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_bin_len); + if (value) { values[13] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 14: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_array_len); + if (value) { values[14] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 15: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_map_len); + if (value) { values[15] = value; kw_args--; } + } + CYTHON_FALLTHROUGH; + case 16: + if (kw_args > 0) { + PyObject* value = __Pyx_PyDict_GetItemStr(__pyx_kwds, __pyx_n_s_max_ext_len); + if (value) { values[16] = value; kw_args--; } + } + } + if (unlikely(kw_args > 0)) { + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_pyargnames, 0, values, pos_args, "__init__") < 0)) __PYX_ERR(1, 344, __pyx_L3_error) + } + } else { + switch (PyTuple_GET_SIZE(__pyx_args)) { + case 17: values[16] = PyTuple_GET_ITEM(__pyx_args, 16); + CYTHON_FALLTHROUGH; + case 16: values[15] = PyTuple_GET_ITEM(__pyx_args, 15); + CYTHON_FALLTHROUGH; + case 15: values[14] = PyTuple_GET_ITEM(__pyx_args, 14); + CYTHON_FALLTHROUGH; + case 14: values[13] = PyTuple_GET_ITEM(__pyx_args, 13); + CYTHON_FALLTHROUGH; + case 13: values[12] = PyTuple_GET_ITEM(__pyx_args, 12); + CYTHON_FALLTHROUGH; + case 12: values[11] = PyTuple_GET_ITEM(__pyx_args, 11); + CYTHON_FALLTHROUGH; + case 11: values[10] = PyTuple_GET_ITEM(__pyx_args, 10); + CYTHON_FALLTHROUGH; + case 10: values[9] = PyTuple_GET_ITEM(__pyx_args, 9); + CYTHON_FALLTHROUGH; + case 9: values[8] = PyTuple_GET_ITEM(__pyx_args, 8); + CYTHON_FALLTHROUGH; + case 8: values[7] = PyTuple_GET_ITEM(__pyx_args, 7); + CYTHON_FALLTHROUGH; + case 7: values[6] = PyTuple_GET_ITEM(__pyx_args, 6); + CYTHON_FALLTHROUGH; + case 6: values[5] = PyTuple_GET_ITEM(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = PyTuple_GET_ITEM(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = PyTuple_GET_ITEM(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = PyTuple_GET_ITEM(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = PyTuple_GET_ITEM(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = PyTuple_GET_ITEM(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_file_like = values[0]; + if (values[1]) { + __pyx_v_read_size = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_read_size == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 344, __pyx_L3_error) + } else { + __pyx_v_read_size = ((Py_ssize_t)0); + } + if (values[2]) { + __pyx_v_use_list = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_use_list == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + } else { + + /* "msgpack/_unpacker.pyx":345 + * + * def __init__(self, file_like=None, Py_ssize_t read_size=0, + * bint use_list=True, bint raw=True, bint strict_map_key=False, # <<<<<<<<<<<<<< + * object object_hook=None, object object_pairs_hook=None, object list_hook=None, + * encoding=None, unicode_errors=None, Py_ssize_t max_buffer_size=0, + */ + __pyx_v_use_list = ((int)1); + } + if (values[3]) { + __pyx_v_raw = __Pyx_PyObject_IsTrue(values[3]); if (unlikely((__pyx_v_raw == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + } else { + __pyx_v_raw = ((int)1); + } + if (values[4]) { + __pyx_v_strict_map_key = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_strict_map_key == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 345, __pyx_L3_error) + } else { + __pyx_v_strict_map_key = ((int)0); + } + __pyx_v_object_hook = values[5]; + __pyx_v_object_pairs_hook = values[6]; + __pyx_v_list_hook = values[7]; + __pyx_v_encoding = values[8]; + __pyx_v_unicode_errors = values[9]; + if (values[10]) { + __pyx_v_max_buffer_size = __Pyx_PyIndex_AsSsize_t(values[10]); if (unlikely((__pyx_v_max_buffer_size == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 347, __pyx_L3_error) + } else { + __pyx_v_max_buffer_size = ((Py_ssize_t)0); + } + __pyx_v_ext_hook = values[11]; + if (values[12]) { + __pyx_v_max_str_len = __Pyx_PyIndex_AsSsize_t(values[12]); if (unlikely((__pyx_v_max_str_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) + } else { + __pyx_v_max_str_len = ((Py_ssize_t)-1L); + } + if (values[13]) { + __pyx_v_max_bin_len = __Pyx_PyIndex_AsSsize_t(values[13]); if (unlikely((__pyx_v_max_bin_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 350, __pyx_L3_error) + } else { + __pyx_v_max_bin_len = ((Py_ssize_t)-1L); + } + if (values[14]) { + __pyx_v_max_array_len = __Pyx_PyIndex_AsSsize_t(values[14]); if (unlikely((__pyx_v_max_array_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 351, __pyx_L3_error) + } else { + __pyx_v_max_array_len = ((Py_ssize_t)-1L); + } + if (values[15]) { + __pyx_v_max_map_len = __Pyx_PyIndex_AsSsize_t(values[15]); if (unlikely((__pyx_v_max_map_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 352, __pyx_L3_error) + } else { + __pyx_v_max_map_len = ((Py_ssize_t)-1L); + } + if (values[16]) { + __pyx_v_max_ext_len = __Pyx_PyIndex_AsSsize_t(values[16]); if (unlikely((__pyx_v_max_ext_len == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 353, __pyx_L3_error) + } else { + __pyx_v_max_ext_len = ((Py_ssize_t)-1L); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 0, 0, 17, PyTuple_GET_SIZE(__pyx_args)); __PYX_ERR(1, 344, __pyx_L3_error) + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_4__init__(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self), __pyx_v_file_like, __pyx_v_read_size, __pyx_v_use_list, __pyx_v_raw, __pyx_v_strict_map_key, __pyx_v_object_hook, __pyx_v_object_pairs_hook, __pyx_v_list_hook, __pyx_v_encoding, __pyx_v_unicode_errors, __pyx_v_max_buffer_size, __pyx_v_ext_hook, __pyx_v_max_str_len, __pyx_v_max_bin_len, __pyx_v_max_array_len, __pyx_v_max_map_len, __pyx_v_max_ext_len); + + /* "msgpack/_unpacker.pyx":344 + * self.buf = NULL + * + * def __init__(self, file_like=None, Py_ssize_t read_size=0, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * object object_hook=None, object object_pairs_hook=None, object list_hook=None, + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_4__init__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, PyObject *__pyx_v_file_like, Py_ssize_t __pyx_v_read_size, int __pyx_v_use_list, int __pyx_v_raw, int __pyx_v_strict_map_key, PyObject *__pyx_v_object_hook, PyObject *__pyx_v_object_pairs_hook, PyObject *__pyx_v_list_hook, PyObject *__pyx_v_encoding, PyObject *__pyx_v_unicode_errors, Py_ssize_t __pyx_v_max_buffer_size, PyObject *__pyx_v_ext_hook, Py_ssize_t __pyx_v_max_str_len, Py_ssize_t __pyx_v_max_bin_len, Py_ssize_t __pyx_v_max_array_len, Py_ssize_t __pyx_v_max_map_len, Py_ssize_t __pyx_v_max_ext_len) { + char const *__pyx_v_cenc; + char const *__pyx_v_cerr; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + long __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + char const *__pyx_t_8; + char const *__pyx_t_9; + __Pyx_RefNannySetupContext("__init__", 0); + + /* "msgpack/_unpacker.pyx":354 + * Py_ssize_t max_map_len=-1, + * Py_ssize_t max_ext_len=-1): + * cdef const char *cenc=NULL, # <<<<<<<<<<<<<< + * cdef const char *cerr=NULL + * + */ + __pyx_v_cenc = NULL; + + /* "msgpack/_unpacker.pyx":355 + * Py_ssize_t max_ext_len=-1): + * cdef const char *cenc=NULL, + * cdef const char *cerr=NULL # <<<<<<<<<<<<<< + * + * self.object_hook = object_hook + */ + __pyx_v_cerr = NULL; + + /* "msgpack/_unpacker.pyx":357 + * cdef const char *cerr=NULL + * + * self.object_hook = object_hook # <<<<<<<<<<<<<< + * self.object_pairs_hook = object_pairs_hook + * self.list_hook = list_hook + */ + __Pyx_INCREF(__pyx_v_object_hook); + __Pyx_GIVEREF(__pyx_v_object_hook); + __Pyx_GOTREF(__pyx_v_self->object_hook); + __Pyx_DECREF(__pyx_v_self->object_hook); + __pyx_v_self->object_hook = __pyx_v_object_hook; + + /* "msgpack/_unpacker.pyx":358 + * + * self.object_hook = object_hook + * self.object_pairs_hook = object_pairs_hook # <<<<<<<<<<<<<< + * self.list_hook = list_hook + * self.ext_hook = ext_hook + */ + __Pyx_INCREF(__pyx_v_object_pairs_hook); + __Pyx_GIVEREF(__pyx_v_object_pairs_hook); + __Pyx_GOTREF(__pyx_v_self->object_pairs_hook); + __Pyx_DECREF(__pyx_v_self->object_pairs_hook); + __pyx_v_self->object_pairs_hook = __pyx_v_object_pairs_hook; + + /* "msgpack/_unpacker.pyx":359 + * self.object_hook = object_hook + * self.object_pairs_hook = object_pairs_hook + * self.list_hook = list_hook # <<<<<<<<<<<<<< + * self.ext_hook = ext_hook + * + */ + __Pyx_INCREF(__pyx_v_list_hook); + __Pyx_GIVEREF(__pyx_v_list_hook); + __Pyx_GOTREF(__pyx_v_self->list_hook); + __Pyx_DECREF(__pyx_v_self->list_hook); + __pyx_v_self->list_hook = __pyx_v_list_hook; + + /* "msgpack/_unpacker.pyx":360 + * self.object_pairs_hook = object_pairs_hook + * self.list_hook = list_hook + * self.ext_hook = ext_hook # <<<<<<<<<<<<<< + * + * self.file_like = file_like + */ + __Pyx_INCREF(__pyx_v_ext_hook); + __Pyx_GIVEREF(__pyx_v_ext_hook); + __Pyx_GOTREF(__pyx_v_self->ext_hook); + __Pyx_DECREF(__pyx_v_self->ext_hook); + __pyx_v_self->ext_hook = __pyx_v_ext_hook; + + /* "msgpack/_unpacker.pyx":362 + * self.ext_hook = ext_hook + * + * self.file_like = file_like # <<<<<<<<<<<<<< + * if file_like: + * self.file_like_read = file_like.read + */ + __Pyx_INCREF(__pyx_v_file_like); + __Pyx_GIVEREF(__pyx_v_file_like); + __Pyx_GOTREF(__pyx_v_self->file_like); + __Pyx_DECREF(__pyx_v_self->file_like); + __pyx_v_self->file_like = __pyx_v_file_like; + + /* "msgpack/_unpacker.pyx":363 + * + * self.file_like = file_like + * if file_like: # <<<<<<<<<<<<<< + * self.file_like_read = file_like.read + * if not PyCallable_Check(self.file_like_read): + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_file_like); if (unlikely(__pyx_t_1 < 0)) __PYX_ERR(1, 363, __pyx_L1_error) + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":364 + * self.file_like = file_like + * if file_like: + * self.file_like_read = file_like.read # <<<<<<<<<<<<<< + * if not PyCallable_Check(self.file_like_read): + * raise TypeError("`file_like.read` must be a callable.") + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_file_like, __pyx_n_s_read); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 364, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_v_self->file_like_read); + __Pyx_DECREF(__pyx_v_self->file_like_read); + __pyx_v_self->file_like_read = __pyx_t_2; + __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":365 + * if file_like: + * self.file_like_read = file_like.read + * if not PyCallable_Check(self.file_like_read): # <<<<<<<<<<<<<< + * raise TypeError("`file_like.read` must be a callable.") + * + */ + __pyx_t_2 = __pyx_v_self->file_like_read; + __Pyx_INCREF(__pyx_t_2); + __pyx_t_1 = ((!(PyCallable_Check(__pyx_t_2) != 0)) != 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":366 + * self.file_like_read = file_like.read + * if not PyCallable_Check(self.file_like_read): + * raise TypeError("`file_like.read` must be a callable.") # <<<<<<<<<<<<<< + * + * if max_str_len == -1: + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__25, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 366, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 366, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":365 + * if file_like: + * self.file_like_read = file_like.read + * if not PyCallable_Check(self.file_like_read): # <<<<<<<<<<<<<< + * raise TypeError("`file_like.read` must be a callable.") + * + */ + } + + /* "msgpack/_unpacker.pyx":363 + * + * self.file_like = file_like + * if file_like: # <<<<<<<<<<<<<< + * self.file_like_read = file_like.read + * if not PyCallable_Check(self.file_like_read): + */ + } + + /* "msgpack/_unpacker.pyx":368 + * raise TypeError("`file_like.read` must be a callable.") + * + * if max_str_len == -1: # <<<<<<<<<<<<<< + * max_str_len = max_buffer_size or 1024*1024 + * if max_bin_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_str_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":369 + * + * if max_str_len == -1: + * max_str_len = max_buffer_size or 1024*1024 # <<<<<<<<<<<<<< + * if max_bin_len == -1: + * max_bin_len = max_buffer_size or 1024*1024 + */ + if (!__pyx_v_max_buffer_size) { + } else { + __pyx_t_3 = __pyx_v_max_buffer_size; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_3 = 0x100000; + __pyx_L6_bool_binop_done:; + __pyx_v_max_str_len = __pyx_t_3; + + /* "msgpack/_unpacker.pyx":368 + * raise TypeError("`file_like.read` must be a callable.") + * + * if max_str_len == -1: # <<<<<<<<<<<<<< + * max_str_len = max_buffer_size or 1024*1024 + * if max_bin_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":370 + * if max_str_len == -1: + * max_str_len = max_buffer_size or 1024*1024 + * if max_bin_len == -1: # <<<<<<<<<<<<<< + * max_bin_len = max_buffer_size or 1024*1024 + * if max_array_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_bin_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":371 + * max_str_len = max_buffer_size or 1024*1024 + * if max_bin_len == -1: + * max_bin_len = max_buffer_size or 1024*1024 # <<<<<<<<<<<<<< + * if max_array_len == -1: + * max_array_len = max_buffer_size or 128*1024 + */ + if (!__pyx_v_max_buffer_size) { + } else { + __pyx_t_3 = __pyx_v_max_buffer_size; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_3 = 0x100000; + __pyx_L9_bool_binop_done:; + __pyx_v_max_bin_len = __pyx_t_3; + + /* "msgpack/_unpacker.pyx":370 + * if max_str_len == -1: + * max_str_len = max_buffer_size or 1024*1024 + * if max_bin_len == -1: # <<<<<<<<<<<<<< + * max_bin_len = max_buffer_size or 1024*1024 + * if max_array_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":372 + * if max_bin_len == -1: + * max_bin_len = max_buffer_size or 1024*1024 + * if max_array_len == -1: # <<<<<<<<<<<<<< + * max_array_len = max_buffer_size or 128*1024 + * if max_map_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_array_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":373 + * max_bin_len = max_buffer_size or 1024*1024 + * if max_array_len == -1: + * max_array_len = max_buffer_size or 128*1024 # <<<<<<<<<<<<<< + * if max_map_len == -1: + * max_map_len = max_buffer_size//2 or 32*1024 + */ + if (!__pyx_v_max_buffer_size) { + } else { + __pyx_t_3 = __pyx_v_max_buffer_size; + goto __pyx_L12_bool_binop_done; + } + __pyx_t_3 = 0x20000; + __pyx_L12_bool_binop_done:; + __pyx_v_max_array_len = __pyx_t_3; + + /* "msgpack/_unpacker.pyx":372 + * if max_bin_len == -1: + * max_bin_len = max_buffer_size or 1024*1024 + * if max_array_len == -1: # <<<<<<<<<<<<<< + * max_array_len = max_buffer_size or 128*1024 + * if max_map_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":374 + * if max_array_len == -1: + * max_array_len = max_buffer_size or 128*1024 + * if max_map_len == -1: # <<<<<<<<<<<<<< + * max_map_len = max_buffer_size//2 or 32*1024 + * if max_ext_len == -1: + */ + __pyx_t_1 = ((__pyx_v_max_map_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":375 + * max_array_len = max_buffer_size or 128*1024 + * if max_map_len == -1: + * max_map_len = max_buffer_size//2 or 32*1024 # <<<<<<<<<<<<<< + * if max_ext_len == -1: + * max_ext_len = max_buffer_size or 1024*1024 + */ + __pyx_t_4 = __Pyx_div_Py_ssize_t(__pyx_v_max_buffer_size, 2); + if (!__pyx_t_4) { + } else { + __pyx_t_3 = __pyx_t_4; + goto __pyx_L15_bool_binop_done; + } + __pyx_t_3 = 0x8000; + __pyx_L15_bool_binop_done:; + __pyx_v_max_map_len = __pyx_t_3; + + /* "msgpack/_unpacker.pyx":374 + * if max_array_len == -1: + * max_array_len = max_buffer_size or 128*1024 + * if max_map_len == -1: # <<<<<<<<<<<<<< + * max_map_len = max_buffer_size//2 or 32*1024 + * if max_ext_len == -1: + */ + } + + /* "msgpack/_unpacker.pyx":376 + * if max_map_len == -1: + * max_map_len = max_buffer_size//2 or 32*1024 + * if max_ext_len == -1: # <<<<<<<<<<<<<< + * max_ext_len = max_buffer_size or 1024*1024 + * + */ + __pyx_t_1 = ((__pyx_v_max_ext_len == -1L) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":377 + * max_map_len = max_buffer_size//2 or 32*1024 + * if max_ext_len == -1: + * max_ext_len = max_buffer_size or 1024*1024 # <<<<<<<<<<<<<< + * + * if not max_buffer_size: + */ + if (!__pyx_v_max_buffer_size) { + } else { + __pyx_t_3 = __pyx_v_max_buffer_size; + goto __pyx_L18_bool_binop_done; + } + __pyx_t_3 = 0x100000; + __pyx_L18_bool_binop_done:; + __pyx_v_max_ext_len = __pyx_t_3; + + /* "msgpack/_unpacker.pyx":376 + * if max_map_len == -1: + * max_map_len = max_buffer_size//2 or 32*1024 + * if max_ext_len == -1: # <<<<<<<<<<<<<< + * max_ext_len = max_buffer_size or 1024*1024 + * + */ + } + + /* "msgpack/_unpacker.pyx":379 + * max_ext_len = max_buffer_size or 1024*1024 + * + * if not max_buffer_size: # <<<<<<<<<<<<<< + * max_buffer_size = INT_MAX + * if read_size > max_buffer_size: + */ + __pyx_t_1 = ((!(__pyx_v_max_buffer_size != 0)) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":380 + * + * if not max_buffer_size: + * max_buffer_size = INT_MAX # <<<<<<<<<<<<<< + * if read_size > max_buffer_size: + * raise ValueError("read_size should be less or equal to max_buffer_size") + */ + __pyx_v_max_buffer_size = INT_MAX; + + /* "msgpack/_unpacker.pyx":379 + * max_ext_len = max_buffer_size or 1024*1024 + * + * if not max_buffer_size: # <<<<<<<<<<<<<< + * max_buffer_size = INT_MAX + * if read_size > max_buffer_size: + */ + } + + /* "msgpack/_unpacker.pyx":381 + * if not max_buffer_size: + * max_buffer_size = INT_MAX + * if read_size > max_buffer_size: # <<<<<<<<<<<<<< + * raise ValueError("read_size should be less or equal to max_buffer_size") + * if not read_size: + */ + __pyx_t_1 = ((__pyx_v_read_size > __pyx_v_max_buffer_size) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":382 + * max_buffer_size = INT_MAX + * if read_size > max_buffer_size: + * raise ValueError("read_size should be less or equal to max_buffer_size") # <<<<<<<<<<<<<< + * if not read_size: + * read_size = min(max_buffer_size, 1024**2) + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_ValueError, __pyx_tuple__26, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 382, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 382, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":381 + * if not max_buffer_size: + * max_buffer_size = INT_MAX + * if read_size > max_buffer_size: # <<<<<<<<<<<<<< + * raise ValueError("read_size should be less or equal to max_buffer_size") + * if not read_size: + */ + } + + /* "msgpack/_unpacker.pyx":383 + * if read_size > max_buffer_size: + * raise ValueError("read_size should be less or equal to max_buffer_size") + * if not read_size: # <<<<<<<<<<<<<< + * read_size = min(max_buffer_size, 1024**2) + * self.max_buffer_size = max_buffer_size + */ + __pyx_t_1 = ((!(__pyx_v_read_size != 0)) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":384 + * raise ValueError("read_size should be less or equal to max_buffer_size") + * if not read_size: + * read_size = min(max_buffer_size, 1024**2) # <<<<<<<<<<<<<< + * self.max_buffer_size = max_buffer_size + * self.read_size = read_size + */ + __pyx_t_5 = 0x100000; + __pyx_t_3 = __pyx_v_max_buffer_size; + if (((__pyx_t_5 < __pyx_t_3) != 0)) { + __pyx_t_4 = __pyx_t_5; + } else { + __pyx_t_4 = __pyx_t_3; + } + __pyx_v_read_size = __pyx_t_4; + + /* "msgpack/_unpacker.pyx":383 + * if read_size > max_buffer_size: + * raise ValueError("read_size should be less or equal to max_buffer_size") + * if not read_size: # <<<<<<<<<<<<<< + * read_size = min(max_buffer_size, 1024**2) + * self.max_buffer_size = max_buffer_size + */ + } + + /* "msgpack/_unpacker.pyx":385 + * if not read_size: + * read_size = min(max_buffer_size, 1024**2) + * self.max_buffer_size = max_buffer_size # <<<<<<<<<<<<<< + * self.read_size = read_size + * self.buf = PyMem_Malloc(read_size) + */ + __pyx_v_self->max_buffer_size = __pyx_v_max_buffer_size; + + /* "msgpack/_unpacker.pyx":386 + * read_size = min(max_buffer_size, 1024**2) + * self.max_buffer_size = max_buffer_size + * self.read_size = read_size # <<<<<<<<<<<<<< + * self.buf = PyMem_Malloc(read_size) + * if self.buf == NULL: + */ + __pyx_v_self->read_size = __pyx_v_read_size; + + /* "msgpack/_unpacker.pyx":387 + * self.max_buffer_size = max_buffer_size + * self.read_size = read_size + * self.buf = PyMem_Malloc(read_size) # <<<<<<<<<<<<<< + * if self.buf == NULL: + * raise MemoryError("Unable to allocate internal buffer.") + */ + __pyx_v_self->buf = ((char *)PyMem_Malloc(__pyx_v_read_size)); + + /* "msgpack/_unpacker.pyx":388 + * self.read_size = read_size + * self.buf = PyMem_Malloc(read_size) + * if self.buf == NULL: # <<<<<<<<<<<<<< + * raise MemoryError("Unable to allocate internal buffer.") + * self.buf_size = read_size + */ + __pyx_t_1 = ((__pyx_v_self->buf == NULL) != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":389 + * self.buf = PyMem_Malloc(read_size) + * if self.buf == NULL: + * raise MemoryError("Unable to allocate internal buffer.") # <<<<<<<<<<<<<< + * self.buf_size = read_size + * self.buf_head = 0 + */ + __pyx_t_2 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple_, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 389, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_Raise(__pyx_t_2, 0, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 389, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":388 + * self.read_size = read_size + * self.buf = PyMem_Malloc(read_size) + * if self.buf == NULL: # <<<<<<<<<<<<<< + * raise MemoryError("Unable to allocate internal buffer.") + * self.buf_size = read_size + */ + } + + /* "msgpack/_unpacker.pyx":390 + * if self.buf == NULL: + * raise MemoryError("Unable to allocate internal buffer.") + * self.buf_size = read_size # <<<<<<<<<<<<<< + * self.buf_head = 0 + * self.buf_tail = 0 + */ + __pyx_v_self->buf_size = __pyx_v_read_size; + + /* "msgpack/_unpacker.pyx":391 + * raise MemoryError("Unable to allocate internal buffer.") + * self.buf_size = read_size + * self.buf_head = 0 # <<<<<<<<<<<<<< + * self.buf_tail = 0 + * self.stream_offset = 0 + */ + __pyx_v_self->buf_head = 0; + + /* "msgpack/_unpacker.pyx":392 + * self.buf_size = read_size + * self.buf_head = 0 + * self.buf_tail = 0 # <<<<<<<<<<<<<< + * self.stream_offset = 0 + * + */ + __pyx_v_self->buf_tail = 0; + + /* "msgpack/_unpacker.pyx":393 + * self.buf_head = 0 + * self.buf_tail = 0 + * self.stream_offset = 0 # <<<<<<<<<<<<<< + * + * if encoding is not None: + */ + __pyx_v_self->stream_offset = 0; + + /* "msgpack/_unpacker.pyx":395 + * self.stream_offset = 0 + * + * if encoding is not None: # <<<<<<<<<<<<<< + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + * self.encoding = encoding + */ + __pyx_t_1 = (__pyx_v_encoding != Py_None); + __pyx_t_6 = (__pyx_t_1 != 0); + if (__pyx_t_6) { + + /* "msgpack/_unpacker.pyx":396 + * + * if encoding is not None: + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) # <<<<<<<<<<<<<< + * self.encoding = encoding + * cenc = encoding + */ + __pyx_t_7 = PyErr_WarnEx(__pyx_builtin_DeprecationWarning, ((char *)"encoding is deprecated, Use raw=False instead."), 1); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(1, 396, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":397 + * if encoding is not None: + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + * self.encoding = encoding # <<<<<<<<<<<<<< + * cenc = encoding + * + */ + __Pyx_INCREF(__pyx_v_encoding); + __Pyx_GIVEREF(__pyx_v_encoding); + __Pyx_GOTREF(__pyx_v_self->encoding); + __Pyx_DECREF(__pyx_v_self->encoding); + __pyx_v_self->encoding = __pyx_v_encoding; + + /* "msgpack/_unpacker.pyx":398 + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + * self.encoding = encoding + * cenc = encoding # <<<<<<<<<<<<<< + * + * if unicode_errors is not None: + */ + __pyx_t_8 = __Pyx_PyObject_AsString(__pyx_v_encoding); if (unlikely((!__pyx_t_8) && PyErr_Occurred())) __PYX_ERR(1, 398, __pyx_L1_error) + __pyx_v_cenc = __pyx_t_8; + + /* "msgpack/_unpacker.pyx":395 + * self.stream_offset = 0 + * + * if encoding is not None: # <<<<<<<<<<<<<< + * PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + * self.encoding = encoding + */ + } + + /* "msgpack/_unpacker.pyx":400 + * cenc = encoding + * + * if unicode_errors is not None: # <<<<<<<<<<<<<< + * self.unicode_errors = unicode_errors + * cerr = unicode_errors + */ + __pyx_t_6 = (__pyx_v_unicode_errors != Py_None); + __pyx_t_1 = (__pyx_t_6 != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":401 + * + * if unicode_errors is not None: + * self.unicode_errors = unicode_errors # <<<<<<<<<<<<<< + * cerr = unicode_errors + * + */ + __Pyx_INCREF(__pyx_v_unicode_errors); + __Pyx_GIVEREF(__pyx_v_unicode_errors); + __Pyx_GOTREF(__pyx_v_self->unicode_errors); + __Pyx_DECREF(__pyx_v_self->unicode_errors); + __pyx_v_self->unicode_errors = __pyx_v_unicode_errors; + + /* "msgpack/_unpacker.pyx":402 + * if unicode_errors is not None: + * self.unicode_errors = unicode_errors + * cerr = unicode_errors # <<<<<<<<<<<<<< + * + * init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, + */ + __pyx_t_9 = __Pyx_PyObject_AsString(__pyx_v_unicode_errors); if (unlikely((!__pyx_t_9) && PyErr_Occurred())) __PYX_ERR(1, 402, __pyx_L1_error) + __pyx_v_cerr = __pyx_t_9; + + /* "msgpack/_unpacker.pyx":400 + * cenc = encoding + * + * if unicode_errors is not None: # <<<<<<<<<<<<<< + * self.unicode_errors = unicode_errors + * cerr = unicode_errors + */ + } + + /* "msgpack/_unpacker.pyx":404 + * cerr = unicode_errors + * + * init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, # <<<<<<<<<<<<<< + * ext_hook, use_list, raw, strict_map_key, cenc, cerr, + * max_str_len, max_bin_len, max_array_len, + */ + __pyx_t_2 = __pyx_f_7msgpack_9_cmsgpack_init_ctx((&__pyx_v_self->ctx), __pyx_v_object_hook, __pyx_v_object_pairs_hook, __pyx_v_list_hook, __pyx_v_ext_hook, __pyx_v_use_list, __pyx_v_raw, __pyx_v_strict_map_key, __pyx_v_cenc, __pyx_v_cerr, __pyx_v_max_str_len, __pyx_v_max_bin_len, __pyx_v_max_array_len, __pyx_v_max_map_len, __pyx_v_max_ext_len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 404, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":344 + * self.buf = NULL + * + * def __init__(self, file_like=None, Py_ssize_t read_size=0, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * object object_hook=None, object object_pairs_hook=None, object list_hook=None, + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":409 + * max_map_len, max_ext_len) + * + * def feed(self, object next_bytes): # <<<<<<<<<<<<<< + * """Append `next_bytes` to internal buffer.""" + * cdef Py_buffer pybuff + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_7feed(PyObject *__pyx_v_self, PyObject *__pyx_v_next_bytes); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_6feed[] = "Unpacker.feed(self, next_bytes)\nAppend `next_bytes` to internal buffer."; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_7feed(PyObject *__pyx_v_self, PyObject *__pyx_v_next_bytes) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("feed (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_6feed(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self), ((PyObject *)__pyx_v_next_bytes)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_6feed(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, PyObject *__pyx_v_next_bytes) { + Py_buffer __pyx_v_pybuff; + int __pyx_v_new_protocol; + char *__pyx_v_buf; + Py_ssize_t __pyx_v_buf_len; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + char const *__pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + __Pyx_RefNannySetupContext("feed", 0); + + /* "msgpack/_unpacker.pyx":412 + * """Append `next_bytes` to internal buffer.""" + * cdef Py_buffer pybuff + * cdef int new_protocol = 0 # <<<<<<<<<<<<<< + * cdef char* buf + * cdef Py_ssize_t buf_len + */ + __pyx_v_new_protocol = 0; + + /* "msgpack/_unpacker.pyx":416 + * cdef Py_ssize_t buf_len + * + * if self.file_like is not None: # <<<<<<<<<<<<<< + * raise AssertionError( + * "unpacker.feed() is not be able to use with `file_like`.") + */ + __pyx_t_1 = (__pyx_v_self->file_like != Py_None); + __pyx_t_2 = (__pyx_t_1 != 0); + if (unlikely(__pyx_t_2)) { + + /* "msgpack/_unpacker.pyx":417 + * + * if self.file_like is not None: + * raise AssertionError( # <<<<<<<<<<<<<< + * "unpacker.feed() is not be able to use with `file_like`.") + * + */ + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_builtin_AssertionError, __pyx_tuple__27, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 417, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_Raise(__pyx_t_3, 0, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 417, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":416 + * cdef Py_ssize_t buf_len + * + * if self.file_like is not None: # <<<<<<<<<<<<<< + * raise AssertionError( + * "unpacker.feed() is not be able to use with `file_like`.") + */ + } + + /* "msgpack/_unpacker.pyx":420 + * "unpacker.feed() is not be able to use with `file_like`.") + * + * get_data_from_buffer(next_bytes, &pybuff, &buf, &buf_len, &new_protocol) # <<<<<<<<<<<<<< + * try: + * self.append_buffer(buf, buf_len) + */ + __pyx_t_4 = __pyx_f_7msgpack_9_cmsgpack_get_data_from_buffer(__pyx_v_next_bytes, (&__pyx_v_pybuff), (&__pyx_v_buf), (&__pyx_v_buf_len), (&__pyx_v_new_protocol)); if (unlikely(__pyx_t_4 == ((int)0))) __PYX_ERR(1, 420, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":421 + * + * get_data_from_buffer(next_bytes, &pybuff, &buf, &buf_len, &new_protocol) + * try: # <<<<<<<<<<<<<< + * self.append_buffer(buf, buf_len) + * finally: + */ + /*try:*/ { + + /* "msgpack/_unpacker.pyx":422 + * get_data_from_buffer(next_bytes, &pybuff, &buf, &buf_len, &new_protocol) + * try: + * self.append_buffer(buf, buf_len) # <<<<<<<<<<<<<< + * finally: + * if new_protocol: + */ + __pyx_t_3 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->append_buffer(__pyx_v_self, __pyx_v_buf, __pyx_v_buf_len); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 422, __pyx_L5_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + + /* "msgpack/_unpacker.pyx":424 + * self.append_buffer(buf, buf_len) + * finally: + * if new_protocol: # <<<<<<<<<<<<<< + * PyBuffer_Release(&pybuff) + * + */ + /*finally:*/ { + /*normal exit:*/{ + __pyx_t_2 = (__pyx_v_new_protocol != 0); + if (__pyx_t_2) { + + /* "msgpack/_unpacker.pyx":425 + * finally: + * if new_protocol: + * PyBuffer_Release(&pybuff) # <<<<<<<<<<<<<< + * + * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): + */ + PyBuffer_Release((&__pyx_v_pybuff)); + + /* "msgpack/_unpacker.pyx":424 + * self.append_buffer(buf, buf_len) + * finally: + * if new_protocol: # <<<<<<<<<<<<<< + * PyBuffer_Release(&pybuff) + * + */ + } + goto __pyx_L6; + } + __pyx_L5_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; + { + __pyx_t_2 = (__pyx_v_new_protocol != 0); + if (__pyx_t_2) { + + /* "msgpack/_unpacker.pyx":425 + * finally: + * if new_protocol: + * PyBuffer_Release(&pybuff) # <<<<<<<<<<<<<< + * + * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): + */ + PyBuffer_Release((&__pyx_v_pybuff)); + + /* "msgpack/_unpacker.pyx":424 + * self.append_buffer(buf, buf_len) + * finally: + * if new_protocol: # <<<<<<<<<<<<<< + * PyBuffer_Release(&pybuff) + * + */ + } + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); + } + __Pyx_XGIVEREF(__pyx_t_7); + __Pyx_XGIVEREF(__pyx_t_8); + __Pyx_XGIVEREF(__pyx_t_9); + __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; + goto __pyx_L1_error; + } + __pyx_L6:; + } + + /* "msgpack/_unpacker.pyx":409 + * max_map_len, max_ext_len) + * + * def feed(self, object next_bytes): # <<<<<<<<<<<<<< + * """Append `next_bytes` to internal buffer.""" + * cdef Py_buffer pybuff + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.feed", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":427 + * PyBuffer_Release(&pybuff) + * + * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): # <<<<<<<<<<<<<< + * cdef: + * char* buf = self.buf + */ + +static PyObject *__pyx_f_7msgpack_9_cmsgpack_8Unpacker_append_buffer(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, void *__pyx_v__buf, Py_ssize_t __pyx_v__buf_len) { + char *__pyx_v_buf; + char *__pyx_v_new_buf; + Py_ssize_t __pyx_v_head; + Py_ssize_t __pyx_v_tail; + Py_ssize_t __pyx_v_buf_size; + Py_ssize_t __pyx_v_new_size; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + Py_ssize_t __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + __Pyx_RefNannySetupContext("append_buffer", 0); + + /* "msgpack/_unpacker.pyx":429 + * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): + * cdef: + * char* buf = self.buf # <<<<<<<<<<<<<< + * char* new_buf + * Py_ssize_t head = self.buf_head + */ + __pyx_t_1 = __pyx_v_self->buf; + __pyx_v_buf = __pyx_t_1; + + /* "msgpack/_unpacker.pyx":431 + * char* buf = self.buf + * char* new_buf + * Py_ssize_t head = self.buf_head # <<<<<<<<<<<<<< + * Py_ssize_t tail = self.buf_tail + * Py_ssize_t buf_size = self.buf_size + */ + __pyx_t_2 = __pyx_v_self->buf_head; + __pyx_v_head = __pyx_t_2; + + /* "msgpack/_unpacker.pyx":432 + * char* new_buf + * Py_ssize_t head = self.buf_head + * Py_ssize_t tail = self.buf_tail # <<<<<<<<<<<<<< + * Py_ssize_t buf_size = self.buf_size + * Py_ssize_t new_size + */ + __pyx_t_2 = __pyx_v_self->buf_tail; + __pyx_v_tail = __pyx_t_2; + + /* "msgpack/_unpacker.pyx":433 + * Py_ssize_t head = self.buf_head + * Py_ssize_t tail = self.buf_tail + * Py_ssize_t buf_size = self.buf_size # <<<<<<<<<<<<<< + * Py_ssize_t new_size + * + */ + __pyx_t_2 = __pyx_v_self->buf_size; + __pyx_v_buf_size = __pyx_t_2; + + /* "msgpack/_unpacker.pyx":436 + * Py_ssize_t new_size + * + * if tail + _buf_len > buf_size: # <<<<<<<<<<<<<< + * if ((tail - head) + _buf_len) <= buf_size: + * # move to front. + */ + __pyx_t_3 = (((__pyx_v_tail + __pyx_v__buf_len) > __pyx_v_buf_size) != 0); + if (__pyx_t_3) { + + /* "msgpack/_unpacker.pyx":437 + * + * if tail + _buf_len > buf_size: + * if ((tail - head) + _buf_len) <= buf_size: # <<<<<<<<<<<<<< + * # move to front. + * memmove(buf, buf + head, tail - head) + */ + __pyx_t_3 = ((((__pyx_v_tail - __pyx_v_head) + __pyx_v__buf_len) <= __pyx_v_buf_size) != 0); + if (__pyx_t_3) { + + /* "msgpack/_unpacker.pyx":439 + * if ((tail - head) + _buf_len) <= buf_size: + * # move to front. + * memmove(buf, buf + head, tail - head) # <<<<<<<<<<<<<< + * tail -= head + * head = 0 + */ + (void)(memmove(__pyx_v_buf, (__pyx_v_buf + __pyx_v_head), (__pyx_v_tail - __pyx_v_head))); + + /* "msgpack/_unpacker.pyx":440 + * # move to front. + * memmove(buf, buf + head, tail - head) + * tail -= head # <<<<<<<<<<<<<< + * head = 0 + * else: + */ + __pyx_v_tail = (__pyx_v_tail - __pyx_v_head); + + /* "msgpack/_unpacker.pyx":441 + * memmove(buf, buf + head, tail - head) + * tail -= head + * head = 0 # <<<<<<<<<<<<<< + * else: + * # expand buffer. + */ + __pyx_v_head = 0; + + /* "msgpack/_unpacker.pyx":437 + * + * if tail + _buf_len > buf_size: + * if ((tail - head) + _buf_len) <= buf_size: # <<<<<<<<<<<<<< + * # move to front. + * memmove(buf, buf + head, tail - head) + */ + goto __pyx_L4; + } + + /* "msgpack/_unpacker.pyx":444 + * else: + * # expand buffer. + * new_size = (tail-head) + _buf_len # <<<<<<<<<<<<<< + * if new_size > self.max_buffer_size: + * raise BufferFull + */ + /*else*/ { + __pyx_v_new_size = ((__pyx_v_tail - __pyx_v_head) + __pyx_v__buf_len); + + /* "msgpack/_unpacker.pyx":445 + * # expand buffer. + * new_size = (tail-head) + _buf_len + * if new_size > self.max_buffer_size: # <<<<<<<<<<<<<< + * raise BufferFull + * new_size = min(new_size*2, self.max_buffer_size) + */ + __pyx_t_3 = ((__pyx_v_new_size > __pyx_v_self->max_buffer_size) != 0); + if (unlikely(__pyx_t_3)) { + + /* "msgpack/_unpacker.pyx":446 + * new_size = (tail-head) + _buf_len + * if new_size > self.max_buffer_size: + * raise BufferFull # <<<<<<<<<<<<<< + * new_size = min(new_size*2, self.max_buffer_size) + * new_buf = PyMem_Malloc(new_size) + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_BufferFull); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 446, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 446, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":445 + * # expand buffer. + * new_size = (tail-head) + _buf_len + * if new_size > self.max_buffer_size: # <<<<<<<<<<<<<< + * raise BufferFull + * new_size = min(new_size*2, self.max_buffer_size) + */ + } + + /* "msgpack/_unpacker.pyx":447 + * if new_size > self.max_buffer_size: + * raise BufferFull + * new_size = min(new_size*2, self.max_buffer_size) # <<<<<<<<<<<<<< + * new_buf = PyMem_Malloc(new_size) + * if new_buf == NULL: + */ + __pyx_t_2 = __pyx_v_self->max_buffer_size; + __pyx_t_5 = (__pyx_v_new_size * 2); + if (((__pyx_t_2 < __pyx_t_5) != 0)) { + __pyx_t_6 = __pyx_t_2; + } else { + __pyx_t_6 = __pyx_t_5; + } + __pyx_v_new_size = __pyx_t_6; + + /* "msgpack/_unpacker.pyx":448 + * raise BufferFull + * new_size = min(new_size*2, self.max_buffer_size) + * new_buf = PyMem_Malloc(new_size) # <<<<<<<<<<<<<< + * if new_buf == NULL: + * # self.buf still holds old buffer and will be freed during + */ + __pyx_v_new_buf = ((char *)PyMem_Malloc(__pyx_v_new_size)); + + /* "msgpack/_unpacker.pyx":449 + * new_size = min(new_size*2, self.max_buffer_size) + * new_buf = PyMem_Malloc(new_size) + * if new_buf == NULL: # <<<<<<<<<<<<<< + * # self.buf still holds old buffer and will be freed during + * # obj destruction + */ + __pyx_t_3 = ((__pyx_v_new_buf == NULL) != 0); + if (unlikely(__pyx_t_3)) { + + /* "msgpack/_unpacker.pyx":452 + * # self.buf still holds old buffer and will be freed during + * # obj destruction + * raise MemoryError("Unable to enlarge internal buffer.") # <<<<<<<<<<<<<< + * memcpy(new_buf, buf + head, tail - head) + * PyMem_Free(buf) + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_MemoryError, __pyx_tuple__28, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 452, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 452, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":449 + * new_size = min(new_size*2, self.max_buffer_size) + * new_buf = PyMem_Malloc(new_size) + * if new_buf == NULL: # <<<<<<<<<<<<<< + * # self.buf still holds old buffer and will be freed during + * # obj destruction + */ + } + + /* "msgpack/_unpacker.pyx":453 + * # obj destruction + * raise MemoryError("Unable to enlarge internal buffer.") + * memcpy(new_buf, buf + head, tail - head) # <<<<<<<<<<<<<< + * PyMem_Free(buf) + * + */ + (void)(memcpy(__pyx_v_new_buf, (__pyx_v_buf + __pyx_v_head), (__pyx_v_tail - __pyx_v_head))); + + /* "msgpack/_unpacker.pyx":454 + * raise MemoryError("Unable to enlarge internal buffer.") + * memcpy(new_buf, buf + head, tail - head) + * PyMem_Free(buf) # <<<<<<<<<<<<<< + * + * buf = new_buf + */ + PyMem_Free(__pyx_v_buf); + + /* "msgpack/_unpacker.pyx":456 + * PyMem_Free(buf) + * + * buf = new_buf # <<<<<<<<<<<<<< + * buf_size = new_size + * tail -= head + */ + __pyx_v_buf = __pyx_v_new_buf; + + /* "msgpack/_unpacker.pyx":457 + * + * buf = new_buf + * buf_size = new_size # <<<<<<<<<<<<<< + * tail -= head + * head = 0 + */ + __pyx_v_buf_size = __pyx_v_new_size; + + /* "msgpack/_unpacker.pyx":458 + * buf = new_buf + * buf_size = new_size + * tail -= head # <<<<<<<<<<<<<< + * head = 0 + * + */ + __pyx_v_tail = (__pyx_v_tail - __pyx_v_head); + + /* "msgpack/_unpacker.pyx":459 + * buf_size = new_size + * tail -= head + * head = 0 # <<<<<<<<<<<<<< + * + * memcpy(buf + tail, (_buf), _buf_len) + */ + __pyx_v_head = 0; + } + __pyx_L4:; + + /* "msgpack/_unpacker.pyx":436 + * Py_ssize_t new_size + * + * if tail + _buf_len > buf_size: # <<<<<<<<<<<<<< + * if ((tail - head) + _buf_len) <= buf_size: + * # move to front. + */ + } + + /* "msgpack/_unpacker.pyx":461 + * head = 0 + * + * memcpy(buf + tail, (_buf), _buf_len) # <<<<<<<<<<<<<< + * self.buf = buf + * self.buf_head = head + */ + (void)(memcpy((__pyx_v_buf + __pyx_v_tail), ((char *)__pyx_v__buf), __pyx_v__buf_len)); + + /* "msgpack/_unpacker.pyx":462 + * + * memcpy(buf + tail, (_buf), _buf_len) + * self.buf = buf # <<<<<<<<<<<<<< + * self.buf_head = head + * self.buf_size = buf_size + */ + __pyx_v_self->buf = __pyx_v_buf; + + /* "msgpack/_unpacker.pyx":463 + * memcpy(buf + tail, (_buf), _buf_len) + * self.buf = buf + * self.buf_head = head # <<<<<<<<<<<<<< + * self.buf_size = buf_size + * self.buf_tail = tail + _buf_len + */ + __pyx_v_self->buf_head = __pyx_v_head; + + /* "msgpack/_unpacker.pyx":464 + * self.buf = buf + * self.buf_head = head + * self.buf_size = buf_size # <<<<<<<<<<<<<< + * self.buf_tail = tail + _buf_len + * + */ + __pyx_v_self->buf_size = __pyx_v_buf_size; + + /* "msgpack/_unpacker.pyx":465 + * self.buf_head = head + * self.buf_size = buf_size + * self.buf_tail = tail + _buf_len # <<<<<<<<<<<<<< + * + * cdef read_from_file(self): + */ + __pyx_v_self->buf_tail = (__pyx_v_tail + __pyx_v__buf_len); + + /* "msgpack/_unpacker.pyx":427 + * PyBuffer_Release(&pybuff) + * + * cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): # <<<<<<<<<<<<<< + * cdef: + * char* buf = self.buf + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.append_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":467 + * self.buf_tail = tail + _buf_len + * + * cdef read_from_file(self): # <<<<<<<<<<<<<< + * next_bytes = self.file_like_read( + * min(self.read_size, + */ + +static PyObject *__pyx_f_7msgpack_9_cmsgpack_8Unpacker_read_from_file(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_v_next_bytes = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + char *__pyx_t_9; + __Pyx_RefNannySetupContext("read_from_file", 0); + + /* "msgpack/_unpacker.pyx":470 + * next_bytes = self.file_like_read( + * min(self.read_size, + * self.max_buffer_size - (self.buf_tail - self.buf_head) # <<<<<<<<<<<<<< + * )) + * if next_bytes: + */ + __pyx_t_2 = (__pyx_v_self->max_buffer_size - (__pyx_v_self->buf_tail - __pyx_v_self->buf_head)); + + /* "msgpack/_unpacker.pyx":469 + * cdef read_from_file(self): + * next_bytes = self.file_like_read( + * min(self.read_size, # <<<<<<<<<<<<<< + * self.max_buffer_size - (self.buf_tail - self.buf_head) + * )) + */ + __pyx_t_3 = __pyx_v_self->read_size; + + /* "msgpack/_unpacker.pyx":470 + * next_bytes = self.file_like_read( + * min(self.read_size, + * self.max_buffer_size - (self.buf_tail - self.buf_head) # <<<<<<<<<<<<<< + * )) + * if next_bytes: + */ + if (((__pyx_t_2 < __pyx_t_3) != 0)) { + __pyx_t_4 = __pyx_t_2; + } else { + __pyx_t_4 = __pyx_t_3; + } + __pyx_t_5 = PyInt_FromSsize_t(__pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 470, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_INCREF(__pyx_v_self->file_like_read); + __pyx_t_6 = __pyx_v_self->file_like_read; __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + } + } + __pyx_t_1 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_t_5) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 468, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_v_next_bytes = __pyx_t_1; + __pyx_t_1 = 0; + + /* "msgpack/_unpacker.pyx":472 + * self.max_buffer_size - (self.buf_tail - self.buf_head) + * )) + * if next_bytes: # <<<<<<<<<<<<<< + * self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) + * else: + */ + __pyx_t_8 = __Pyx_PyObject_IsTrue(__pyx_v_next_bytes); if (unlikely(__pyx_t_8 < 0)) __PYX_ERR(1, 472, __pyx_L1_error) + if (__pyx_t_8) { + + /* "msgpack/_unpacker.pyx":473 + * )) + * if next_bytes: + * self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) # <<<<<<<<<<<<<< + * else: + * self.file_like = None + */ + __pyx_t_9 = PyBytes_AsString(__pyx_v_next_bytes); if (unlikely(__pyx_t_9 == ((char *)NULL))) __PYX_ERR(1, 473, __pyx_L1_error) + __pyx_t_4 = PyBytes_Size(__pyx_v_next_bytes); if (unlikely(__pyx_t_4 == ((Py_ssize_t)-1L))) __PYX_ERR(1, 473, __pyx_L1_error) + __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->append_buffer(__pyx_v_self, __pyx_t_9, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 473, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "msgpack/_unpacker.pyx":472 + * self.max_buffer_size - (self.buf_tail - self.buf_head) + * )) + * if next_bytes: # <<<<<<<<<<<<<< + * self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) + * else: + */ + goto __pyx_L3; + } + + /* "msgpack/_unpacker.pyx":475 + * self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) + * else: + * self.file_like = None # <<<<<<<<<<<<<< + * + * cdef object _unpack(self, execute_fn execute, bint iter=0): + */ + /*else*/ { + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_GOTREF(__pyx_v_self->file_like); + __Pyx_DECREF(__pyx_v_self->file_like); + __pyx_v_self->file_like = Py_None; + } + __pyx_L3:; + + /* "msgpack/_unpacker.pyx":467 + * self.buf_tail = tail + _buf_len + * + * cdef read_from_file(self): # <<<<<<<<<<<<<< + * next_bytes = self.file_like_read( + * min(self.read_size, + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.read_from_file", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_next_bytes); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":477 + * self.file_like = None + * + * cdef object _unpack(self, execute_fn execute, bint iter=0): # <<<<<<<<<<<<<< + * cdef int ret + * cdef object obj + */ + +static PyObject *__pyx_f_7msgpack_9_cmsgpack_8Unpacker__unpack(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, execute_fn __pyx_v_execute, struct __pyx_opt_args_7msgpack_9_cmsgpack_8Unpacker__unpack *__pyx_optional_args) { + int __pyx_v_iter = ((int)0); + int __pyx_v_ret; + PyObject *__pyx_v_obj = 0; + Py_ssize_t __pyx_v_prev_head; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + Py_ssize_t __pyx_t_5; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + __Pyx_RefNannySetupContext("_unpack", 0); + if (__pyx_optional_args) { + if (__pyx_optional_args->__pyx_n > 0) { + __pyx_v_iter = __pyx_optional_args->iter; + } + } + + /* "msgpack/_unpacker.pyx":482 + * cdef Py_ssize_t prev_head + * + * if self.buf_head >= self.buf_tail and self.file_like is not None: # <<<<<<<<<<<<<< + * self.read_from_file() + * + */ + __pyx_t_2 = ((__pyx_v_self->buf_head >= __pyx_v_self->buf_tail) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_self->file_like != Py_None); + __pyx_t_3 = (__pyx_t_2 != 0); + __pyx_t_1 = __pyx_t_3; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":483 + * + * if self.buf_head >= self.buf_tail and self.file_like is not None: + * self.read_from_file() # <<<<<<<<<<<<<< + * + * while 1: + */ + __pyx_t_4 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->read_from_file(__pyx_v_self); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 483, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "msgpack/_unpacker.pyx":482 + * cdef Py_ssize_t prev_head + * + * if self.buf_head >= self.buf_tail and self.file_like is not None: # <<<<<<<<<<<<<< + * self.read_from_file() + * + */ + } + + /* "msgpack/_unpacker.pyx":485 + * self.read_from_file() + * + * while 1: # <<<<<<<<<<<<<< + * prev_head = self.buf_head + * if prev_head >= self.buf_tail: + */ + while (1) { + + /* "msgpack/_unpacker.pyx":486 + * + * while 1: + * prev_head = self.buf_head # <<<<<<<<<<<<<< + * if prev_head >= self.buf_tail: + * if iter: + */ + __pyx_t_5 = __pyx_v_self->buf_head; + __pyx_v_prev_head = __pyx_t_5; + + /* "msgpack/_unpacker.pyx":487 + * while 1: + * prev_head = self.buf_head + * if prev_head >= self.buf_tail: # <<<<<<<<<<<<<< + * if iter: + * raise StopIteration("No more data to unpack.") + */ + __pyx_t_1 = ((__pyx_v_prev_head >= __pyx_v_self->buf_tail) != 0); + if (__pyx_t_1) { + + /* "msgpack/_unpacker.pyx":488 + * prev_head = self.buf_head + * if prev_head >= self.buf_tail: + * if iter: # <<<<<<<<<<<<<< + * raise StopIteration("No more data to unpack.") + * else: + */ + __pyx_t_1 = (__pyx_v_iter != 0); + if (unlikely(__pyx_t_1)) { + + /* "msgpack/_unpacker.pyx":489 + * if prev_head >= self.buf_tail: + * if iter: + * raise StopIteration("No more data to unpack.") # <<<<<<<<<<<<<< + * else: + * raise OutOfData("No more data to unpack.") + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_StopIteration, __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 489, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 489, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":488 + * prev_head = self.buf_head + * if prev_head >= self.buf_tail: + * if iter: # <<<<<<<<<<<<<< + * raise StopIteration("No more data to unpack.") + * else: + */ + } + + /* "msgpack/_unpacker.pyx":491 + * raise StopIteration("No more data to unpack.") + * else: + * raise OutOfData("No more data to unpack.") # <<<<<<<<<<<<<< + * + * ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head) + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_OutOfData); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 491, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + } + } + __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_kp_u_No_more_data_to_unpack) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_kp_u_No_more_data_to_unpack); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 491, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 491, __pyx_L1_error) + } + + /* "msgpack/_unpacker.pyx":487 + * while 1: + * prev_head = self.buf_head + * if prev_head >= self.buf_tail: # <<<<<<<<<<<<<< + * if iter: + * raise StopIteration("No more data to unpack.") + */ + } + + /* "msgpack/_unpacker.pyx":493 + * raise OutOfData("No more data to unpack.") + * + * ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head) # <<<<<<<<<<<<<< + * self.stream_offset += self.buf_head - prev_head + * + */ + __pyx_t_8 = __pyx_v_execute((&__pyx_v_self->ctx), __pyx_v_self->buf, __pyx_v_self->buf_tail, (&__pyx_v_self->buf_head)); if (unlikely(__pyx_t_8 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(1, 493, __pyx_L1_error) + __pyx_v_ret = __pyx_t_8; + + /* "msgpack/_unpacker.pyx":494 + * + * ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head) + * self.stream_offset += self.buf_head - prev_head # <<<<<<<<<<<<<< + * + * if ret == 1: + */ + __pyx_v_self->stream_offset = (__pyx_v_self->stream_offset + (__pyx_v_self->buf_head - __pyx_v_prev_head)); + + /* "msgpack/_unpacker.pyx":496 + * self.stream_offset += self.buf_head - prev_head + * + * if ret == 1: # <<<<<<<<<<<<<< + * obj = unpack_data(&self.ctx) + * unpack_init(&self.ctx) + */ + switch (__pyx_v_ret) { + case 1: + + /* "msgpack/_unpacker.pyx":497 + * + * if ret == 1: + * obj = unpack_data(&self.ctx) # <<<<<<<<<<<<<< + * unpack_init(&self.ctx) + * return obj + */ + __pyx_t_4 = unpack_data((&__pyx_v_self->ctx)); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 497, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_v_obj = __pyx_t_4; + __pyx_t_4 = 0; + + /* "msgpack/_unpacker.pyx":498 + * if ret == 1: + * obj = unpack_data(&self.ctx) + * unpack_init(&self.ctx) # <<<<<<<<<<<<<< + * return obj + * elif ret == 0: + */ + unpack_init((&__pyx_v_self->ctx)); + + /* "msgpack/_unpacker.pyx":499 + * obj = unpack_data(&self.ctx) + * unpack_init(&self.ctx) + * return obj # <<<<<<<<<<<<<< + * elif ret == 0: + * if self.file_like is not None: + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_obj); + __pyx_r = __pyx_v_obj; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":496 + * self.stream_offset += self.buf_head - prev_head + * + * if ret == 1: # <<<<<<<<<<<<<< + * obj = unpack_data(&self.ctx) + * unpack_init(&self.ctx) + */ + break; + case 0: + + /* "msgpack/_unpacker.pyx":501 + * return obj + * elif ret == 0: + * if self.file_like is not None: # <<<<<<<<<<<<<< + * self.read_from_file() + * continue + */ + __pyx_t_1 = (__pyx_v_self->file_like != Py_None); + __pyx_t_3 = (__pyx_t_1 != 0); + if (__pyx_t_3) { + + /* "msgpack/_unpacker.pyx":502 + * elif ret == 0: + * if self.file_like is not None: + * self.read_from_file() # <<<<<<<<<<<<<< + * continue + * if iter: + */ + __pyx_t_4 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->read_from_file(__pyx_v_self); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 502, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "msgpack/_unpacker.pyx":503 + * if self.file_like is not None: + * self.read_from_file() + * continue # <<<<<<<<<<<<<< + * if iter: + * raise StopIteration("No more data to unpack.") + */ + goto __pyx_L6_continue; + + /* "msgpack/_unpacker.pyx":501 + * return obj + * elif ret == 0: + * if self.file_like is not None: # <<<<<<<<<<<<<< + * self.read_from_file() + * continue + */ + } + + /* "msgpack/_unpacker.pyx":504 + * self.read_from_file() + * continue + * if iter: # <<<<<<<<<<<<<< + * raise StopIteration("No more data to unpack.") + * else: + */ + __pyx_t_3 = (__pyx_v_iter != 0); + if (unlikely(__pyx_t_3)) { + + /* "msgpack/_unpacker.pyx":505 + * continue + * if iter: + * raise StopIteration("No more data to unpack.") # <<<<<<<<<<<<<< + * else: + * raise OutOfData("No more data to unpack.") + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin_StopIteration, __pyx_tuple__29, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 505, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 505, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":504 + * self.read_from_file() + * continue + * if iter: # <<<<<<<<<<<<<< + * raise StopIteration("No more data to unpack.") + * else: + */ + } + + /* "msgpack/_unpacker.pyx":507 + * raise StopIteration("No more data to unpack.") + * else: + * raise OutOfData("No more data to unpack.") # <<<<<<<<<<<<<< + * elif ret == -2: + * raise FormatError + */ + /*else*/ { + __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_OutOfData); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 507, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + if (CYTHON_UNPACK_METHODS && unlikely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + } + } + __pyx_t_4 = (__pyx_t_7) ? __Pyx_PyObject_Call2Args(__pyx_t_6, __pyx_t_7, __pyx_kp_u_No_more_data_to_unpack) : __Pyx_PyObject_CallOneArg(__pyx_t_6, __pyx_kp_u_No_more_data_to_unpack); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 507, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 507, __pyx_L1_error) + } + + /* "msgpack/_unpacker.pyx":500 + * unpack_init(&self.ctx) + * return obj + * elif ret == 0: # <<<<<<<<<<<<<< + * if self.file_like is not None: + * self.read_from_file() + */ + break; + case -2L: + + /* "msgpack/_unpacker.pyx":509 + * raise OutOfData("No more data to unpack.") + * elif ret == -2: + * raise FormatError # <<<<<<<<<<<<<< + * elif ret == -3: + * raise StackError + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_FormatError); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 509, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 509, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":508 + * else: + * raise OutOfData("No more data to unpack.") + * elif ret == -2: # <<<<<<<<<<<<<< + * raise FormatError + * elif ret == -3: + */ + break; + case -3L: + + /* "msgpack/_unpacker.pyx":511 + * raise FormatError + * elif ret == -3: + * raise StackError # <<<<<<<<<<<<<< + * else: + * raise ValueError("Unpack failed: error = %d" % (ret,)) + */ + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_StackError); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 511, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 511, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":510 + * elif ret == -2: + * raise FormatError + * elif ret == -3: # <<<<<<<<<<<<<< + * raise StackError + * else: + */ + break; + default: + + /* "msgpack/_unpacker.pyx":513 + * raise StackError + * else: + * raise ValueError("Unpack failed: error = %d" % (ret,)) # <<<<<<<<<<<<<< + * + * def read_bytes(self, Py_ssize_t nbytes): + */ + __pyx_t_4 = __Pyx_PyUnicode_From_int(__pyx_v_ret, 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 513, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_kp_u_Unpack_failed_error, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 513, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_builtin_ValueError, __pyx_t_6); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 513, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_Raise(__pyx_t_4, 0, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 513, __pyx_L1_error) + break; + } + __pyx_L6_continue:; + } + + /* "msgpack/_unpacker.pyx":477 + * self.file_like = None + * + * cdef object _unpack(self, execute_fn execute, bint iter=0): # <<<<<<<<<<<<<< + * cdef int ret + * cdef object obj + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker._unpack", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":515 + * raise ValueError("Unpack failed: error = %d" % (ret,)) + * + * def read_bytes(self, Py_ssize_t nbytes): # <<<<<<<<<<<<<< + * """Read a specified number of raw bytes from the stream""" + * cdef Py_ssize_t nread + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_9read_bytes(PyObject *__pyx_v_self, PyObject *__pyx_arg_nbytes); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_8read_bytes[] = "Unpacker.read_bytes(self, Py_ssize_t nbytes)\nRead a specified number of raw bytes from the stream"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_9read_bytes(PyObject *__pyx_v_self, PyObject *__pyx_arg_nbytes) { + Py_ssize_t __pyx_v_nbytes; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("read_bytes (wrapper)", 0); + assert(__pyx_arg_nbytes); { + __pyx_v_nbytes = __Pyx_PyIndex_AsSsize_t(__pyx_arg_nbytes); if (unlikely((__pyx_v_nbytes == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 515, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L3_error:; + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.read_bytes", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_8read_bytes(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self), ((Py_ssize_t)__pyx_v_nbytes)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_8read_bytes(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, Py_ssize_t __pyx_v_nbytes) { + Py_ssize_t __pyx_v_nread; + PyObject *__pyx_v_ret = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + __Pyx_RefNannySetupContext("read_bytes", 0); + + /* "msgpack/_unpacker.pyx":518 + * """Read a specified number of raw bytes from the stream""" + * cdef Py_ssize_t nread + * nread = min(self.buf_tail - self.buf_head, nbytes) # <<<<<<<<<<<<<< + * ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) + * self.buf_head += nread + */ + __pyx_t_1 = __pyx_v_nbytes; + __pyx_t_2 = (__pyx_v_self->buf_tail - __pyx_v_self->buf_head); + if (((__pyx_t_1 < __pyx_t_2) != 0)) { + __pyx_t_3 = __pyx_t_1; + } else { + __pyx_t_3 = __pyx_t_2; + } + __pyx_v_nread = __pyx_t_3; + + /* "msgpack/_unpacker.pyx":519 + * cdef Py_ssize_t nread + * nread = min(self.buf_tail - self.buf_head, nbytes) + * ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) # <<<<<<<<<<<<<< + * self.buf_head += nread + * if len(ret) < nbytes and self.file_like is not None: + */ + __pyx_t_4 = PyBytes_FromStringAndSize((__pyx_v_self->buf + __pyx_v_self->buf_head), __pyx_v_nread); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 519, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_v_ret = __pyx_t_4; + __pyx_t_4 = 0; + + /* "msgpack/_unpacker.pyx":520 + * nread = min(self.buf_tail - self.buf_head, nbytes) + * ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) + * self.buf_head += nread # <<<<<<<<<<<<<< + * if len(ret) < nbytes and self.file_like is not None: + * ret += self.file_like.read(nbytes - len(ret)) + */ + __pyx_v_self->buf_head = (__pyx_v_self->buf_head + __pyx_v_nread); + + /* "msgpack/_unpacker.pyx":521 + * ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) + * self.buf_head += nread + * if len(ret) < nbytes and self.file_like is not None: # <<<<<<<<<<<<<< + * ret += self.file_like.read(nbytes - len(ret)) + * return ret + */ + __pyx_t_3 = PyObject_Length(__pyx_v_ret); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 521, __pyx_L1_error) + __pyx_t_6 = ((__pyx_t_3 < __pyx_v_nbytes) != 0); + if (__pyx_t_6) { + } else { + __pyx_t_5 = __pyx_t_6; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_6 = (__pyx_v_self->file_like != Py_None); + __pyx_t_7 = (__pyx_t_6 != 0); + __pyx_t_5 = __pyx_t_7; + __pyx_L4_bool_binop_done:; + if (__pyx_t_5) { + + /* "msgpack/_unpacker.pyx":522 + * self.buf_head += nread + * if len(ret) < nbytes and self.file_like is not None: + * ret += self.file_like.read(nbytes - len(ret)) # <<<<<<<<<<<<<< + * return ret + * + */ + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_self->file_like, __pyx_n_s_read); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 522, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_3 = PyObject_Length(__pyx_v_ret); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 522, __pyx_L1_error) + __pyx_t_9 = PyInt_FromSsize_t((__pyx_v_nbytes - __pyx_t_3)); if (unlikely(!__pyx_t_9)) __PYX_ERR(1, 522, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_9); + __pyx_t_10 = NULL; + if (CYTHON_UNPACK_METHODS && likely(PyMethod_Check(__pyx_t_8))) { + __pyx_t_10 = PyMethod_GET_SELF(__pyx_t_8); + if (likely(__pyx_t_10)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_8); + __Pyx_INCREF(__pyx_t_10); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_8, function); + } + } + __pyx_t_4 = (__pyx_t_10) ? __Pyx_PyObject_Call2Args(__pyx_t_8, __pyx_t_10, __pyx_t_9) : __Pyx_PyObject_CallOneArg(__pyx_t_8, __pyx_t_9); + __Pyx_XDECREF(__pyx_t_10); __pyx_t_10 = 0; + __Pyx_DECREF(__pyx_t_9); __pyx_t_9 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 522, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_t_8 = PyNumber_InPlaceAdd(__pyx_v_ret, __pyx_t_4); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 522, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF_SET(__pyx_v_ret, __pyx_t_8); + __pyx_t_8 = 0; + + /* "msgpack/_unpacker.pyx":521 + * ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) + * self.buf_head += nread + * if len(ret) < nbytes and self.file_like is not None: # <<<<<<<<<<<<<< + * ret += self.file_like.read(nbytes - len(ret)) + * return ret + */ + } + + /* "msgpack/_unpacker.pyx":523 + * if len(ret) < nbytes and self.file_like is not None: + * ret += self.file_like.read(nbytes - len(ret)) + * return ret # <<<<<<<<<<<<<< + * + * def unpack(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_ret); + __pyx_r = __pyx_v_ret; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":515 + * raise ValueError("Unpack failed: error = %d" % (ret,)) + * + * def read_bytes(self, Py_ssize_t nbytes): # <<<<<<<<<<<<<< + * """Read a specified number of raw bytes from the stream""" + * cdef Py_ssize_t nread + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_XDECREF(__pyx_t_9); + __Pyx_XDECREF(__pyx_t_10); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.read_bytes", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_ret); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":525 + * return ret + * + * def unpack(self): # <<<<<<<<<<<<<< + * """Unpack one object + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_11unpack(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_10unpack[] = "Unpacker.unpack(self)\nUnpack one object\n\n Raises `OutOfData` when there are no more bytes to unpack.\n "; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_11unpack(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("unpack (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_10unpack(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_10unpack(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("unpack", 0); + + /* "msgpack/_unpacker.pyx":530 + * Raises `OutOfData` when there are no more bytes to unpack. + * """ + * return self._unpack(unpack_construct) # <<<<<<<<<<<<<< + * + * def skip(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->_unpack(__pyx_v_self, unpack_construct, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 530, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":525 + * return ret + * + * def unpack(self): # <<<<<<<<<<<<<< + * """Unpack one object + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.unpack", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":532 + * return self._unpack(unpack_construct) + * + * def skip(self): # <<<<<<<<<<<<<< + * """Read and ignore one object, returning None + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_13skip(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_12skip[] = "Unpacker.skip(self)\nRead and ignore one object, returning None\n\n Raises `OutOfData` when there are no more bytes to unpack.\n "; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_13skip(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("skip (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_12skip(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_12skip(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("skip", 0); + + /* "msgpack/_unpacker.pyx":537 + * Raises `OutOfData` when there are no more bytes to unpack. + * """ + * return self._unpack(unpack_skip) # <<<<<<<<<<<<<< + * + * def read_array_header(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->_unpack(__pyx_v_self, unpack_skip, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 537, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":532 + * return self._unpack(unpack_construct) + * + * def skip(self): # <<<<<<<<<<<<<< + * """Read and ignore one object, returning None + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.skip", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":539 + * return self._unpack(unpack_skip) + * + * def read_array_header(self): # <<<<<<<<<<<<<< + * """assuming the next object is an array, return its size n, such that + * the next n unpack() calls will iterate over its contents. + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_15read_array_header(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_14read_array_header[] = "Unpacker.read_array_header(self)\nassuming the next object is an array, return its size n, such that\n the next n unpack() calls will iterate over its contents.\n\n Raises `OutOfData` when there are no more bytes to unpack.\n "; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_15read_array_header(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("read_array_header (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_14read_array_header(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_14read_array_header(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("read_array_header", 0); + + /* "msgpack/_unpacker.pyx":545 + * Raises `OutOfData` when there are no more bytes to unpack. + * """ + * return self._unpack(read_array_header) # <<<<<<<<<<<<<< + * + * def read_map_header(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->_unpack(__pyx_v_self, read_array_header, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 545, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":539 + * return self._unpack(unpack_skip) + * + * def read_array_header(self): # <<<<<<<<<<<<<< + * """assuming the next object is an array, return its size n, such that + * the next n unpack() calls will iterate over its contents. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.read_array_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":547 + * return self._unpack(read_array_header) + * + * def read_map_header(self): # <<<<<<<<<<<<<< + * """assuming the next object is a map, return its size n, such that the + * next n * 2 unpack() calls will iterate over its key-value pairs. + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_17read_map_header(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_16read_map_header[] = "Unpacker.read_map_header(self)\nassuming the next object is a map, return its size n, such that the\n next n * 2 unpack() calls will iterate over its key-value pairs.\n\n Raises `OutOfData` when there are no more bytes to unpack.\n "; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_17read_map_header(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("read_map_header (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_16read_map_header(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_16read_map_header(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("read_map_header", 0); + + /* "msgpack/_unpacker.pyx":553 + * Raises `OutOfData` when there are no more bytes to unpack. + * """ + * return self._unpack(read_map_header) # <<<<<<<<<<<<<< + * + * def tell(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->_unpack(__pyx_v_self, read_map_header, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 553, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":547 + * return self._unpack(read_array_header) + * + * def read_map_header(self): # <<<<<<<<<<<<<< + * """assuming the next object is a map, return its size n, such that the + * next n * 2 unpack() calls will iterate over its key-value pairs. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.read_map_header", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":555 + * return self._unpack(read_map_header) + * + * def tell(self): # <<<<<<<<<<<<<< + * return self.stream_offset + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_19tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_18tell[] = "Unpacker.tell(self)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_19tell(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("tell (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_18tell(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_18tell(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("tell", 0); + + /* "msgpack/_unpacker.pyx":556 + * + * def tell(self): + * return self.stream_offset # <<<<<<<<<<<<<< + * + * def __iter__(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_unsigned_PY_LONG_LONG(__pyx_v_self->stream_offset); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 556, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":555 + * return self._unpack(read_map_header) + * + * def tell(self): # <<<<<<<<<<<<<< + * return self.stream_offset + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.tell", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":558 + * return self.stream_offset + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_21__iter__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_21__iter__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__iter__ (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_20__iter__(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_20__iter__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__iter__", 0); + + /* "msgpack/_unpacker.pyx":559 + * + * def __iter__(self): + * return self # <<<<<<<<<<<<<< + * + * def __next__(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_self)); + __pyx_r = ((PyObject *)__pyx_v_self); + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":558 + * return self.stream_offset + * + * def __iter__(self): # <<<<<<<<<<<<<< + * return self + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "msgpack/_unpacker.pyx":561 + * return self + * + * def __next__(self): # <<<<<<<<<<<<<< + * return self._unpack(unpack_construct, 1) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_23__next__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_23__next__(PyObject *__pyx_v_self) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__next__ (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_22__next__(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_22__next__(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + struct __pyx_opt_args_7msgpack_9_cmsgpack_8Unpacker__unpack __pyx_t_2; + __Pyx_RefNannySetupContext("__next__", 0); + + /* "msgpack/_unpacker.pyx":562 + * + * def __next__(self): + * return self._unpack(unpack_construct, 1) # <<<<<<<<<<<<<< + * + * # for debug. + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2.__pyx_n = 1; + __pyx_t_2.iter = 1; + __pyx_t_1 = ((struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self->__pyx_vtab)->_unpack(__pyx_v_self, unpack_construct, &__pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 562, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "msgpack/_unpacker.pyx":561 + * return self + * + * def __next__(self): # <<<<<<<<<<<<<< + * return self._unpack(unpack_construct, 1) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.__next__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_25__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_24__reduce_cython__[] = "Unpacker.__reduce_cython__(self)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_25__reduce_cython__(PyObject *__pyx_v_self, CYTHON_UNUSED PyObject *unused) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_24__reduce_cython__(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_24__reduce_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__30, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(2, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_27__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state); /*proto*/ +static char __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_26__setstate_cython__[] = "Unpacker.__setstate_cython__(self, __pyx_state)"; +static PyObject *__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_27__setstate_cython__(PyObject *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + __pyx_r = __pyx_pf_7msgpack_9_cmsgpack_8Unpacker_26__setstate_cython__(((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)__pyx_v_self), ((PyObject *)__pyx_v___pyx_state)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_7msgpack_9_cmsgpack_8Unpacker_26__setstate_cython__(CYTHON_UNUSED struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_t_1 = __Pyx_PyObject_Call(__pyx_builtin_TypeError, __pyx_tuple__31, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_Raise(__pyx_t_1, 0, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(2, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("msgpack._cmsgpack.Unpacker.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +static struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Packer __pyx_vtable_7msgpack_9_cmsgpack_Packer; + +static PyObject *__pyx_tp_new_7msgpack_9_cmsgpack_Packer(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_obj_7msgpack_9_cmsgpack_Packer *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)o); + p->__pyx_vtab = __pyx_vtabptr_7msgpack_9_cmsgpack_Packer; + p->_default = Py_None; Py_INCREF(Py_None); + p->_bencoding = Py_None; Py_INCREF(Py_None); + p->_berrors = Py_None; Py_INCREF(Py_None); + p->use_float = ((PyBoolObject *)Py_None); Py_INCREF(Py_None); + if (unlikely(__pyx_pw_7msgpack_9_cmsgpack_6Packer_1__cinit__(o, __pyx_empty_tuple, NULL) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_7msgpack_9_cmsgpack_Packer(PyObject *o) { + struct __pyx_obj_7msgpack_9_cmsgpack_Packer *p = (struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + ++Py_REFCNT(o); + __pyx_pw_7msgpack_9_cmsgpack_6Packer_5__dealloc__(o); + --Py_REFCNT(o); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->_default); + Py_CLEAR(p->_bencoding); + Py_CLEAR(p->_berrors); + Py_CLEAR(p->use_float); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_7msgpack_9_cmsgpack_Packer(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_obj_7msgpack_9_cmsgpack_Packer *p = (struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)o; + if (p->_default) { + e = (*v)(p->_default, a); if (e) return e; + } + if (p->_bencoding) { + e = (*v)(p->_bencoding, a); if (e) return e; + } + if (p->_berrors) { + e = (*v)(p->_berrors, a); if (e) return e; + } + if (p->use_float) { + e = (*v)(((PyObject *)p->use_float), a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_7msgpack_9_cmsgpack_Packer(PyObject *o) { + PyObject* tmp; + struct __pyx_obj_7msgpack_9_cmsgpack_Packer *p = (struct __pyx_obj_7msgpack_9_cmsgpack_Packer *)o; + tmp = ((PyObject*)p->_default); + p->_default = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_bencoding); + p->_bencoding = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_berrors); + p->_berrors = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->use_float); + p->use_float = ((PyBoolObject *)Py_None); Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyMethodDef __pyx_methods_7msgpack_9_cmsgpack_Packer[] = { + {"pack", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_7pack, METH_O, __pyx_doc_7msgpack_9_cmsgpack_6Packer_6pack}, + {"pack_ext_type", (PyCFunction)(void*)(PyCFunctionWithKeywords)__pyx_pw_7msgpack_9_cmsgpack_6Packer_9pack_ext_type, METH_VARARGS|METH_KEYWORDS, __pyx_doc_7msgpack_9_cmsgpack_6Packer_8pack_ext_type}, + {"pack_array_header", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_11pack_array_header, METH_O, __pyx_doc_7msgpack_9_cmsgpack_6Packer_10pack_array_header}, + {"pack_map_header", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_13pack_map_header, METH_O, __pyx_doc_7msgpack_9_cmsgpack_6Packer_12pack_map_header}, + {"pack_map_pairs", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_15pack_map_pairs, METH_O, __pyx_doc_7msgpack_9_cmsgpack_6Packer_14pack_map_pairs}, + {"reset", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_17reset, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_6Packer_16reset}, + {"bytes", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_19bytes, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_6Packer_18bytes}, + {"getbuffer", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_21getbuffer, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_6Packer_20getbuffer}, + {"__reduce_cython__", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_23__reduce_cython__, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_6Packer_22__reduce_cython__}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_6Packer_25__setstate_cython__, METH_O, __pyx_doc_7msgpack_9_cmsgpack_6Packer_24__setstate_cython__}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_7msgpack_9_cmsgpack_Packer = { + PyVarObject_HEAD_INIT(0, 0) + "msgpack._cmsgpack.Packer", /*tp_name*/ + sizeof(struct __pyx_obj_7msgpack_9_cmsgpack_Packer), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_7msgpack_9_cmsgpack_Packer, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + "Packer(default=None, encoding=None, unicode_errors=None, bool use_single_float=False, bool autoreset=True, bool use_bin_type=False, bool strict_types=False)\n\n MessagePack Packer\n\n usage::\n\n packer = Packer()\n astream.write(packer.pack(a))\n astream.write(packer.pack(b))\n\n Packer's constructor has some keyword arguments:\n\n :param callable default:\n Convert user type to builtin type that Packer supports.\n See also simplejson's document.\n\n :param bool use_single_float:\n Use single precision float type for float. (default: False)\n\n :param bool autoreset:\n Reset buffer after each pack and return its content as `bytes`. (default: True).\n If set this to false, use `bytes()` to get content and `.reset()` to clear buffer.\n\n :param bool use_bin_type:\n Use bin type introduced in msgpack spec 2.0 for bytes.\n It also enables str8 type for unicode.\n Current default value is false, but it will be changed to true\n in future version. You should specify it explicitly.\n\n :param bool strict_types:\n If set to true, types will be checked to be exact. Derived classes\n from serializeable types will not be serialized and will be\n treated as unsupported type and forwarded to default.\n Additionally tuples will not be serialized as lists.\n This is useful when trying to implement accurate serialization\n for python types.\n\n :param str unicode_errors:\n Error handler for encoding unicode. (default: 'strict')\n\n :param str encoding:\n (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8')\n ", /*tp_doc*/ + __pyx_tp_traverse_7msgpack_9_cmsgpack_Packer, /*tp_traverse*/ + __pyx_tp_clear_7msgpack_9_cmsgpack_Packer, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_7msgpack_9_cmsgpack_Packer, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_pw_7msgpack_9_cmsgpack_6Packer_3__init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_7msgpack_9_cmsgpack_Packer, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif +}; +static struct __pyx_vtabstruct_7msgpack_9_cmsgpack_Unpacker __pyx_vtable_7msgpack_9_cmsgpack_Unpacker; + +static PyObject *__pyx_tp_new_7msgpack_9_cmsgpack_Unpacker(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *p; + PyObject *o; + if (likely((t->tp_flags & Py_TPFLAGS_IS_ABSTRACT) == 0)) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + p = ((struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)o); + p->__pyx_vtab = __pyx_vtabptr_7msgpack_9_cmsgpack_Unpacker; + p->file_like = Py_None; Py_INCREF(Py_None); + p->file_like_read = Py_None; Py_INCREF(Py_None); + p->object_hook = Py_None; Py_INCREF(Py_None); + p->object_pairs_hook = Py_None; Py_INCREF(Py_None); + p->list_hook = Py_None; Py_INCREF(Py_None); + p->ext_hook = Py_None; Py_INCREF(Py_None); + p->encoding = Py_None; Py_INCREF(Py_None); + p->unicode_errors = Py_None; Py_INCREF(Py_None); + if (unlikely(__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_1__cinit__(o, __pyx_empty_tuple, NULL) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_7msgpack_9_cmsgpack_Unpacker(PyObject *o) { + struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *p = (struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely(PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE) && Py_TYPE(o)->tp_finalize) && !_PyGC_FINALIZED(o)) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + ++Py_REFCNT(o); + __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_3__dealloc__(o); + --Py_REFCNT(o); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->file_like); + Py_CLEAR(p->file_like_read); + Py_CLEAR(p->object_hook); + Py_CLEAR(p->object_pairs_hook); + Py_CLEAR(p->list_hook); + Py_CLEAR(p->ext_hook); + Py_CLEAR(p->encoding); + Py_CLEAR(p->unicode_errors); + (*Py_TYPE(o)->tp_free)(o); +} + +static int __pyx_tp_traverse_7msgpack_9_cmsgpack_Unpacker(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *p = (struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)o; + if (p->file_like) { + e = (*v)(p->file_like, a); if (e) return e; + } + if (p->file_like_read) { + e = (*v)(p->file_like_read, a); if (e) return e; + } + if (p->object_hook) { + e = (*v)(p->object_hook, a); if (e) return e; + } + if (p->object_pairs_hook) { + e = (*v)(p->object_pairs_hook, a); if (e) return e; + } + if (p->list_hook) { + e = (*v)(p->list_hook, a); if (e) return e; + } + if (p->ext_hook) { + e = (*v)(p->ext_hook, a); if (e) return e; + } + if (p->encoding) { + e = (*v)(p->encoding, a); if (e) return e; + } + if (p->unicode_errors) { + e = (*v)(p->unicode_errors, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_7msgpack_9_cmsgpack_Unpacker(PyObject *o) { + PyObject* tmp; + struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *p = (struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *)o; + tmp = ((PyObject*)p->file_like); + p->file_like = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->file_like_read); + p->file_like_read = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->object_hook); + p->object_hook = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->object_pairs_hook); + p->object_pairs_hook = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->list_hook); + p->list_hook = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->ext_hook); + p->ext_hook = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->encoding); + p->encoding = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->unicode_errors); + p->unicode_errors = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyObject *__pyx_specialmethod___pyx_pw_7msgpack_9_cmsgpack_8Unpacker_23__next__(PyObject *self, CYTHON_UNUSED PyObject *arg) {return __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_23__next__(self);} + +static PyMethodDef __pyx_methods_7msgpack_9_cmsgpack_Unpacker[] = { + {"feed", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_7feed, METH_O, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_6feed}, + {"read_bytes", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_9read_bytes, METH_O, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_8read_bytes}, + {"unpack", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_11unpack, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_10unpack}, + {"skip", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_13skip, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_12skip}, + {"read_array_header", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_15read_array_header, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_14read_array_header}, + {"read_map_header", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_17read_map_header, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_16read_map_header}, + {"tell", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_19tell, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_18tell}, + {"__next__", (PyCFunction)__pyx_specialmethod___pyx_pw_7msgpack_9_cmsgpack_8Unpacker_23__next__, METH_NOARGS|METH_COEXIST, 0}, + {"__reduce_cython__", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_25__reduce_cython__, METH_NOARGS, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_24__reduce_cython__}, + {"__setstate_cython__", (PyCFunction)__pyx_pw_7msgpack_9_cmsgpack_8Unpacker_27__setstate_cython__, METH_O, __pyx_doc_7msgpack_9_cmsgpack_8Unpacker_26__setstate_cython__}, + {0, 0, 0, 0} +}; + +static PyTypeObject __pyx_type_7msgpack_9_cmsgpack_Unpacker = { + PyVarObject_HEAD_INIT(0, 0) + "msgpack._cmsgpack.Unpacker", /*tp_name*/ + sizeof(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_7msgpack_9_cmsgpack_Unpacker, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + "Unpacker(file_like=None, Py_ssize_t read_size=0, bool use_list=True, bool raw=True, bool strict_map_key=False, object_hook=None, object_pairs_hook=None, list_hook=None, encoding=None, unicode_errors=None, Py_ssize_t max_buffer_size=0, ext_hook=ExtType, Py_ssize_t max_str_len=-1, Py_ssize_t max_bin_len=-1, Py_ssize_t max_array_len=-1, Py_ssize_t max_map_len=-1, Py_ssize_t max_ext_len=-1)\nStreaming unpacker.\n\n Arguments:\n\n :param file_like:\n File-like object having `.read(n)` method.\n If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable.\n\n :param int read_size:\n Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`)\n\n :param bool use_list:\n If true, unpack msgpack array to Python list.\n Otherwise, unpack to Python tuple. (default: True)\n\n :param bool raw:\n If true, unpack msgpack raw to Python bytes (default).\n Otherwise, unpack to Python str (or unicode on Python 2) by decoding\n with UTF-8 encoding (recommended).\n Currently, the default is true, but it will be changed to false in\n near future. So you must specify it explicitly for keeping backward\n compatibility.\n\n *encoding* option which is deprecated overrides this option.\n\n :param bool strict_map_key:\n If true, only str or bytes are accepted for map (dict) keys.\n It's False by default for backward-compatibility.\n But it will be True from msgpack 1.0.\n\n :param callable object_hook:\n When specified, it should be callable.\n Unpacker calls it with a dict argument after unpacking msgpack map.\n (See also simplejson)\n\n :param callable object_pairs_hook:\n When specified, it should be callable.\n Unpacker calls it with a list of key-value pairs after unpacking msgpack map.\n (See also simplejson)\n\n :param int max_buffer_size:\n Limits size of data w""aiting unpacked. 0 means system's INT_MAX (default).\n Raises `BufferFull` exception when it is insufficient.\n You should set this parameter when unpacking data from untrusted source.\n\n :param int max_str_len:\n Deprecated, use *max_buffer_size* instead.\n Limits max length of str. (default: max_buffer_size or 1024*1024)\n\n :param int max_bin_len:\n Deprecated, use *max_buffer_size* instead.\n Limits max length of bin. (default: max_buffer_size or 1024*1024)\n\n :param int max_array_len:\n Limits max length of array. (default: max_buffer_size or 128*1024)\n\n :param int max_map_len:\n Limits max length of map. (default: max_buffer_size//2 or 32*1024)\n\n :param int max_ext_len:\n Deprecated, use *max_buffer_size* instead.\n Limits max size of ext type. (default: max_buffer_size or 1024*1024)\n\n :param str encoding:\n Deprecated, use ``raw=False`` instead.\n Encoding used for decoding msgpack raw.\n If it is None (default), msgpack raw is deserialized to Python bytes.\n\n :param str unicode_errors:\n Error handler used for decoding str type. (default: `'strict'`)\n\n\n Example of streaming deserialize from file-like object::\n\n unpacker = Unpacker(file_like, raw=False, max_buffer_size=10*1024*1024)\n for o in unpacker:\n process(o)\n\n Example of streaming deserialize from socket::\n\n unpacker = Unpacker(raw=False, max_buffer_size=10*1024*1024)\n while True:\n buf = sock.recv(1024**2)\n if not buf:\n break\n unpacker.feed(buf)\n for o in unpacker:\n process(o)\n\n Raises ``ExtraData`` when *packed* contains extra bytes.\n Raises ``OutOfData`` when *packed* is incomplete.\n Raises ``FormatError`` when *packed* is not valid msgpack.\n Raises ``StackError`` when *packed* contains too nested.\n Other exceptions ca""n be raised during unpacking.\n ", /*tp_doc*/ + __pyx_tp_traverse_7msgpack_9_cmsgpack_Unpacker, /*tp_traverse*/ + __pyx_tp_clear_7msgpack_9_cmsgpack_Unpacker, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_21__iter__, /*tp_iter*/ + __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_23__next__, /*tp_iternext*/ + __pyx_methods_7msgpack_9_cmsgpack_Unpacker, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + __pyx_pw_7msgpack_9_cmsgpack_8Unpacker_5__init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_7msgpack_9_cmsgpack_Unpacker, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + 0, /*tp_finalize*/ + #endif +}; + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec__cmsgpack(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec__cmsgpack}, + {0, NULL} +}; +#endif + +static struct PyModuleDef __pyx_moduledef = { + PyModuleDef_HEAD_INIT, + "_cmsgpack", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ +}; +#endif +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif + +static __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_n_s_AssertionError, __pyx_k_AssertionError, sizeof(__pyx_k_AssertionError), 0, 0, 1, 1}, + {&__pyx_n_s_BufferError, __pyx_k_BufferError, sizeof(__pyx_k_BufferError), 0, 0, 1, 1}, + {&__pyx_n_s_BufferFull, __pyx_k_BufferFull, sizeof(__pyx_k_BufferFull), 0, 0, 1, 1}, + {&__pyx_kp_u_Cannot_decode_extended_type_with, __pyx_k_Cannot_decode_extended_type_with, sizeof(__pyx_k_Cannot_decode_extended_type_with), 0, 1, 0, 0}, + {&__pyx_n_s_DeprecationWarning, __pyx_k_DeprecationWarning, sizeof(__pyx_k_DeprecationWarning), 0, 0, 1, 1}, + {&__pyx_kp_u_EXT_data_is_too_large, __pyx_k_EXT_data_is_too_large, sizeof(__pyx_k_EXT_data_is_too_large), 0, 1, 0, 0}, + {&__pyx_n_s_ExtType, __pyx_k_ExtType, sizeof(__pyx_k_ExtType), 0, 0, 1, 1}, + {&__pyx_n_s_ExtraData, __pyx_k_ExtraData, sizeof(__pyx_k_ExtraData), 0, 0, 1, 1}, + {&__pyx_n_s_FormatError, __pyx_k_FormatError, sizeof(__pyx_k_FormatError), 0, 0, 1, 1}, + {&__pyx_kp_u_Integer_value_out_of_range, __pyx_k_Integer_value_out_of_range, sizeof(__pyx_k_Integer_value_out_of_range), 0, 1, 0, 0}, + {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, + {&__pyx_kp_u_No_more_data_to_unpack, __pyx_k_No_more_data_to_unpack, sizeof(__pyx_k_No_more_data_to_unpack), 0, 1, 0, 0}, + {&__pyx_n_s_NotImplementedError, __pyx_k_NotImplementedError, sizeof(__pyx_k_NotImplementedError), 0, 0, 1, 1}, + {&__pyx_n_s_OutOfData, __pyx_k_OutOfData, sizeof(__pyx_k_OutOfData), 0, 0, 1, 1}, + {&__pyx_n_s_OverflowError, __pyx_k_OverflowError, sizeof(__pyx_k_OverflowError), 0, 0, 1, 1}, + {&__pyx_n_s_Packer, __pyx_k_Packer, sizeof(__pyx_k_Packer), 0, 0, 1, 1}, + {&__pyx_n_s_RuntimeError, __pyx_k_RuntimeError, sizeof(__pyx_k_RuntimeError), 0, 0, 1, 1}, + {&__pyx_n_s_RuntimeWarning, __pyx_k_RuntimeWarning, sizeof(__pyx_k_RuntimeWarning), 0, 0, 1, 1}, + {&__pyx_n_s_StackError, __pyx_k_StackError, sizeof(__pyx_k_StackError), 0, 0, 1, 1}, + {&__pyx_n_s_StopIteration, __pyx_k_StopIteration, sizeof(__pyx_k_StopIteration), 0, 0, 1, 1}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_kp_u_Unable_to_allocate_internal_buff, __pyx_k_Unable_to_allocate_internal_buff, sizeof(__pyx_k_Unable_to_allocate_internal_buff), 0, 1, 0, 0}, + {&__pyx_kp_u_Unable_to_enlarge_internal_buffe, __pyx_k_Unable_to_enlarge_internal_buffe, sizeof(__pyx_k_Unable_to_enlarge_internal_buffe), 0, 1, 0, 0}, + {&__pyx_kp_u_Unpack_failed_error, __pyx_k_Unpack_failed_error, sizeof(__pyx_k_Unpack_failed_error), 0, 1, 0, 0}, + {&__pyx_kp_u_Unpack_failed_incomplete_input, __pyx_k_Unpack_failed_incomplete_input, sizeof(__pyx_k_Unpack_failed_incomplete_input), 0, 1, 0, 0}, + {&__pyx_n_s_Unpacker, __pyx_k_Unpacker, sizeof(__pyx_k_Unpacker), 0, 0, 1, 1}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_autoreset, __pyx_k_autoreset, sizeof(__pyx_k_autoreset), 0, 0, 1, 1}, + {&__pyx_n_s_buf, __pyx_k_buf, sizeof(__pyx_k_buf), 0, 0, 1, 1}, + {&__pyx_n_s_buf_len, __pyx_k_buf_len, sizeof(__pyx_k_buf_len), 0, 0, 1, 1}, + {&__pyx_kp_u_cannot_unpack_from_multi_byte_ob, __pyx_k_cannot_unpack_from_multi_byte_ob, sizeof(__pyx_k_cannot_unpack_from_multi_byte_ob), 0, 1, 0, 0}, + {&__pyx_n_s_cenc, __pyx_k_cenc, sizeof(__pyx_k_cenc), 0, 0, 1, 1}, + {&__pyx_n_s_cerr, __pyx_k_cerr, sizeof(__pyx_k_cerr), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_code, __pyx_k_code, sizeof(__pyx_k_code), 0, 0, 1, 1}, + {&__pyx_kp_u_could_not_get_buffer_for_memoryv, __pyx_k_could_not_get_buffer_for_memoryv, sizeof(__pyx_k_could_not_get_buffer_for_memoryv), 0, 1, 0, 0}, + {&__pyx_kp_u_could_not_get_memoryview, __pyx_k_could_not_get_memoryview, sizeof(__pyx_k_could_not_get_memoryview), 0, 1, 0, 0}, + {&__pyx_n_s_ctx, __pyx_k_ctx, sizeof(__pyx_k_ctx), 0, 0, 1, 1}, + {&__pyx_n_u_d, __pyx_k_d, sizeof(__pyx_k_d), 0, 1, 0, 1}, + {&__pyx_n_s_data, __pyx_k_data, sizeof(__pyx_k_data), 0, 0, 1, 1}, + {&__pyx_n_s_ddtrace_vendor_msgpack, __pyx_k_ddtrace_vendor_msgpack, sizeof(__pyx_k_ddtrace_vendor_msgpack), 0, 0, 1, 1}, + {&__pyx_n_s_ddtrace_vendor_msgpack_exception, __pyx_k_ddtrace_vendor_msgpack_exception, sizeof(__pyx_k_ddtrace_vendor_msgpack_exception), 0, 0, 1, 1}, + {&__pyx_n_s_default, __pyx_k_default, sizeof(__pyx_k_default), 0, 0, 1, 1}, + {&__pyx_kp_u_default_must_be_a_callable, __pyx_k_default_must_be_a_callable, sizeof(__pyx_k_default_must_be_a_callable), 0, 1, 0, 0}, + {&__pyx_n_s_default_read_extended_type, __pyx_k_default_read_extended_type, sizeof(__pyx_k_default_read_extended_type), 0, 0, 1, 1}, + {&__pyx_kp_u_dict_is_too_large, __pyx_k_dict_is_too_large, sizeof(__pyx_k_dict_is_too_large), 0, 1, 0, 0}, + {&__pyx_n_s_encoding, __pyx_k_encoding, sizeof(__pyx_k_encoding), 0, 0, 1, 1}, + {&__pyx_n_s_ext_hook, __pyx_k_ext_hook, sizeof(__pyx_k_ext_hook), 0, 0, 1, 1}, + {&__pyx_kp_u_ext_hook_must_be_a_callable, __pyx_k_ext_hook_must_be_a_callable, sizeof(__pyx_k_ext_hook_must_be_a_callable), 0, 1, 0, 0}, + {&__pyx_n_s_file_like, __pyx_k_file_like, sizeof(__pyx_k_file_like), 0, 0, 1, 1}, + {&__pyx_kp_u_file_like_read_must_be_a_callab, __pyx_k_file_like_read_must_be_a_callab, sizeof(__pyx_k_file_like_read_must_be_a_callab), 0, 1, 0, 0}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_kp_u_internal_error, __pyx_k_internal_error, sizeof(__pyx_k_internal_error), 0, 1, 0, 0}, + {&__pyx_n_s_items, __pyx_k_items, sizeof(__pyx_k_items), 0, 0, 1, 1}, + {&__pyx_n_s_kwargs, __pyx_k_kwargs, sizeof(__pyx_k_kwargs), 0, 0, 1, 1}, + {&__pyx_n_s_list_hook, __pyx_k_list_hook, sizeof(__pyx_k_list_hook), 0, 0, 1, 1}, + {&__pyx_kp_u_list_hook_must_be_a_callable, __pyx_k_list_hook_must_be_a_callable, sizeof(__pyx_k_list_hook_must_be_a_callable), 0, 1, 0, 0}, + {&__pyx_kp_u_list_is_too_large, __pyx_k_list_is_too_large, sizeof(__pyx_k_list_is_too_large), 0, 1, 0, 0}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_max_array_len, __pyx_k_max_array_len, sizeof(__pyx_k_max_array_len), 0, 0, 1, 1}, + {&__pyx_n_s_max_bin_len, __pyx_k_max_bin_len, sizeof(__pyx_k_max_bin_len), 0, 0, 1, 1}, + {&__pyx_n_s_max_buffer_size, __pyx_k_max_buffer_size, sizeof(__pyx_k_max_buffer_size), 0, 0, 1, 1}, + {&__pyx_n_s_max_ext_len, __pyx_k_max_ext_len, sizeof(__pyx_k_max_ext_len), 0, 0, 1, 1}, + {&__pyx_n_s_max_map_len, __pyx_k_max_map_len, sizeof(__pyx_k_max_map_len), 0, 0, 1, 1}, + {&__pyx_n_s_max_str_len, __pyx_k_max_str_len, sizeof(__pyx_k_max_str_len), 0, 0, 1, 1}, + {&__pyx_kp_u_memoryview_is_too_large, __pyx_k_memoryview_is_too_large, sizeof(__pyx_k_memoryview_is_too_large), 0, 1, 0, 0}, + {&__pyx_n_s_msgpack__cmsgpack, __pyx_k_msgpack__cmsgpack, sizeof(__pyx_k_msgpack__cmsgpack), 0, 0, 1, 1}, + {&__pyx_kp_s_msgpack__unpacker_pyx, __pyx_k_msgpack__unpacker_pyx, sizeof(__pyx_k_msgpack__unpacker_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_new_protocol, __pyx_k_new_protocol, sizeof(__pyx_k_new_protocol), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, + {&__pyx_n_s_object_hook, __pyx_k_object_hook, sizeof(__pyx_k_object_hook), 0, 0, 1, 1}, + {&__pyx_kp_u_object_hook_must_be_a_callable, __pyx_k_object_hook_must_be_a_callable, sizeof(__pyx_k_object_hook_must_be_a_callable), 0, 1, 0, 0}, + {&__pyx_n_s_object_pairs_hook, __pyx_k_object_pairs_hook, sizeof(__pyx_k_object_pairs_hook), 0, 0, 1, 1}, + {&__pyx_kp_u_object_pairs_hook_and_object_hoo, __pyx_k_object_pairs_hook_and_object_hoo, sizeof(__pyx_k_object_pairs_hook_and_object_hoo), 0, 1, 0, 0}, + {&__pyx_kp_u_object_pairs_hook_must_be_a_call, __pyx_k_object_pairs_hook_must_be_a_call, sizeof(__pyx_k_object_pairs_hook_must_be_a_call), 0, 1, 0, 0}, + {&__pyx_n_s_off, __pyx_k_off, sizeof(__pyx_k_off), 0, 0, 1, 1}, + {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, + {&__pyx_n_s_packed, __pyx_k_packed, sizeof(__pyx_k_packed), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, + {&__pyx_n_s_raw, __pyx_k_raw, sizeof(__pyx_k_raw), 0, 0, 1, 1}, + {&__pyx_n_s_read, __pyx_k_read, sizeof(__pyx_k_read), 0, 0, 1, 1}, + {&__pyx_n_s_read_size, __pyx_k_read_size, sizeof(__pyx_k_read_size), 0, 0, 1, 1}, + {&__pyx_kp_u_read_size_should_be_less_or_equa, __pyx_k_read_size_should_be_less_or_equa, sizeof(__pyx_k_read_size_should_be_less_or_equa), 0, 1, 0, 0}, + {&__pyx_kp_u_recursion_limit_exceeded, __pyx_k_recursion_limit_exceeded, sizeof(__pyx_k_recursion_limit_exceeded), 0, 1, 0, 0}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_ret, __pyx_k_ret, sizeof(__pyx_k_ret), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_stream, __pyx_k_stream, sizeof(__pyx_k_stream), 0, 0, 1, 1}, + {&__pyx_n_s_strict_map_key, __pyx_k_strict_map_key, sizeof(__pyx_k_strict_map_key), 0, 0, 1, 1}, + {&__pyx_n_s_strict_types, __pyx_k_strict_types, sizeof(__pyx_k_strict_types), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_n_s_typecode, __pyx_k_typecode, sizeof(__pyx_k_typecode), 0, 0, 1, 1}, + {&__pyx_n_s_unicode_errors, __pyx_k_unicode_errors, sizeof(__pyx_k_unicode_errors), 0, 0, 1, 1}, + {&__pyx_kp_u_unicode_string_is_too_large, __pyx_k_unicode_string_is_too_large, sizeof(__pyx_k_unicode_string_is_too_large), 0, 1, 0, 0}, + {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, + {&__pyx_n_s_unpackb, __pyx_k_unpackb, sizeof(__pyx_k_unpackb), 0, 0, 1, 1}, + {&__pyx_kp_u_unpacker_feed_is_not_be_able_to, __pyx_k_unpacker_feed_is_not_be_able_to, sizeof(__pyx_k_unpacker_feed_is_not_be_able_to), 0, 1, 0, 0}, + {&__pyx_n_s_use_bin_type, __pyx_k_use_bin_type, sizeof(__pyx_k_use_bin_type), 0, 0, 1, 1}, + {&__pyx_n_s_use_list, __pyx_k_use_list, sizeof(__pyx_k_use_list), 0, 0, 1, 1}, + {&__pyx_n_s_use_single_float, __pyx_k_use_single_float, sizeof(__pyx_k_use_single_float), 0, 0, 1, 1}, + {&__pyx_kp_u_using_old_buffer_interface_to_un, __pyx_k_using_old_buffer_interface_to_un, sizeof(__pyx_k_using_old_buffer_interface_to_un), 0, 1, 0, 0}, + {&__pyx_n_s_view, __pyx_k_view, sizeof(__pyx_k_view), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} +}; +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(0, 111, __pyx_L1_error) + __pyx_builtin_DeprecationWarning = __Pyx_GetBuiltinName(__pyx_n_s_DeprecationWarning); if (!__pyx_builtin_DeprecationWarning) __PYX_ERR(0, 119, __pyx_L1_error) + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(0, 126, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(0, 163, __pyx_L1_error) + __pyx_builtin_OverflowError = __Pyx_GetBuiltinName(__pyx_n_s_OverflowError); if (!__pyx_builtin_OverflowError) __PYX_ERR(0, 183, __pyx_L1_error) + __pyx_builtin_RuntimeError = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeError); if (!__pyx_builtin_RuntimeError) __PYX_ERR(0, 290, __pyx_L1_error) + __pyx_builtin_NotImplementedError = __Pyx_GetBuiltinName(__pyx_n_s_NotImplementedError); if (!__pyx_builtin_NotImplementedError) __PYX_ERR(1, 106, __pyx_L1_error) + __pyx_builtin_BufferError = __Pyx_GetBuiltinName(__pyx_n_s_BufferError); if (!__pyx_builtin_BufferError) __PYX_ERR(1, 121, __pyx_L1_error) + __pyx_builtin_RuntimeWarning = __Pyx_GetBuiltinName(__pyx_n_s_RuntimeWarning); if (!__pyx_builtin_RuntimeWarning) __PYX_ERR(1, 137, __pyx_L1_error) + __pyx_builtin_AssertionError = __Pyx_GetBuiltinName(__pyx_n_s_AssertionError); if (!__pyx_builtin_AssertionError) __PYX_ERR(1, 417, __pyx_L1_error) + __pyx_builtin_StopIteration = __Pyx_GetBuiltinName(__pyx_n_s_StopIteration); if (!__pyx_builtin_StopIteration) __PYX_ERR(1, 489, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "msgpack/_packer.pyx":111 + * self.pk.buf = PyMem_Malloc(buf_size) + * if self.pk.buf == NULL: + * raise MemoryError("Unable to allocate internal buffer.") # <<<<<<<<<<<<<< + * self.pk.buf_size = buf_size + * self.pk.length = 0 + */ + __pyx_tuple_ = PyTuple_Pack(1, __pyx_kp_u_Unable_to_allocate_internal_buff); if (unlikely(!__pyx_tuple_)) __PYX_ERR(0, 111, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple_); + __Pyx_GIVEREF(__pyx_tuple_); + + /* "msgpack/_packer.pyx":126 + * if default is not None: + * if not PyCallable_Check(default): + * raise TypeError("default must be a callable.") # <<<<<<<<<<<<<< + * self._default = default + * + */ + __pyx_tuple__2 = PyTuple_Pack(1, __pyx_kp_u_default_must_be_a_callable); if (unlikely(!__pyx_tuple__2)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__2); + __Pyx_GIVEREF(__pyx_tuple__2); + + /* "msgpack/_packer.pyx":163 + * + * if nest_limit < 0: + * raise ValueError("recursion limit exceeded.") # <<<<<<<<<<<<<< + * + * while True: + */ + __pyx_tuple__4 = PyTuple_Pack(1, __pyx_kp_u_recursion_limit_exceeded); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(0, 163, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "msgpack/_packer.pyx":189 + * continue + * else: + * raise OverflowError("Integer value out of range") # <<<<<<<<<<<<<< + * elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): + * longval = o + */ + __pyx_tuple__5 = PyTuple_Pack(1, __pyx_kp_u_Integer_value_out_of_range); if (unlikely(!__pyx_tuple__5)) __PYX_ERR(0, 189, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__5); + __Pyx_GIVEREF(__pyx_tuple__5); + + /* "msgpack/_packer.pyx":212 + * ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + * if ret == -2: + * raise ValueError("unicode string is too large") # <<<<<<<<<<<<<< + * else: + * o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) + */ + __pyx_tuple__6 = PyTuple_Pack(1, __pyx_kp_u_unicode_string_is_too_large); if (unlikely(!__pyx_tuple__6)) __PYX_ERR(0, 212, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__6); + __Pyx_GIVEREF(__pyx_tuple__6); + + /* "msgpack/_packer.pyx":226 + * L = len(d) + * if L > ITEM_LIMIT: + * raise ValueError("dict is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_map(&self.pk, L) + * if ret == 0: + */ + __pyx_tuple__7 = PyTuple_Pack(1, __pyx_kp_u_dict_is_too_large); if (unlikely(!__pyx_tuple__7)) __PYX_ERR(0, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__7); + __Pyx_GIVEREF(__pyx_tuple__7); + + /* "msgpack/_packer.pyx":251 + * L = len(o.data) + * if L > ITEM_LIMIT: + * raise ValueError("EXT data is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_ext(&self.pk, longval, L) + * ret = msgpack_pack_raw_body(&self.pk, rawval, L) + */ + __pyx_tuple__8 = PyTuple_Pack(1, __pyx_kp_u_EXT_data_is_too_large); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(0, 251, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "msgpack/_packer.pyx":257 + * L = len(o) + * if L > ITEM_LIMIT: + * raise ValueError("list is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_array(&self.pk, L) + * if ret == 0: + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_u_list_is_too_large); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(0, 257, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "msgpack/_packer.pyx":265 + * elif PyMemoryView_Check(o): + * if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + * raise ValueError("could not get buffer for memoryview") # <<<<<<<<<<<<<< + * L = view.len + * if L > ITEM_LIMIT: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_u_could_not_get_buffer_for_memoryv); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(0, 265, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "msgpack/_packer.pyx":269 + * if L > ITEM_LIMIT: + * PyBuffer_Release(&view); + * raise ValueError("memoryview is too large") # <<<<<<<<<<<<<< + * ret = msgpack_pack_bin(&self.pk, L) + * if ret == 0: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_kp_u_memoryview_is_too_large); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(0, 269, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + + /* "msgpack/_packer.pyx":290 + * raise + * if ret: # should not happen. + * raise RuntimeError("internal error") # <<<<<<<<<<<<<< + * if self.autoreset: + * buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + */ + __pyx_tuple__12 = PyTuple_Pack(1, __pyx_kp_u_internal_error); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(0, 290, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(2, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__14 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(2, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__14); + __Pyx_GIVEREF(__pyx_tuple__14); + + /* "msgpack/_unpacker.pyx":77 + * + * if object_hook is not None and object_pairs_hook is not None: + * raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") # <<<<<<<<<<<<<< + * + * if object_hook is not None: + */ + __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_u_object_pairs_hook_and_object_hoo); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 77, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + + /* "msgpack/_unpacker.pyx":81 + * if object_hook is not None: + * if not PyCallable_Check(object_hook): + * raise TypeError("object_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.object_hook = object_hook + * + */ + __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_u_object_hook_must_be_a_callable); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 81, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__16); + __Pyx_GIVEREF(__pyx_tuple__16); + + /* "msgpack/_unpacker.pyx":88 + * else: + * if not PyCallable_Check(object_pairs_hook): + * raise TypeError("object_pairs_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.object_hook = object_pairs_hook + * ctx.user.has_pairs_hook = True + */ + __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_u_object_pairs_hook_must_be_a_call); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 88, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + + /* "msgpack/_unpacker.pyx":94 + * if list_hook is not None: + * if not PyCallable_Check(list_hook): + * raise TypeError("list_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.list_hook = list_hook + * + */ + __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_u_list_hook_must_be_a_callable); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 94, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__18); + __Pyx_GIVEREF(__pyx_tuple__18); + + /* "msgpack/_unpacker.pyx":99 + * if ext_hook is not None: + * if not PyCallable_Check(ext_hook): + * raise TypeError("ext_hook must be a callable.") # <<<<<<<<<<<<<< + * ctx.user.ext_hook = ext_hook + * + */ + __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_u_ext_hook_must_be_a_callable); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 99, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__19); + __Pyx_GIVEREF(__pyx_tuple__19); + + /* "msgpack/_unpacker.pyx":121 + * if view.itemsize != 1: + * PyBuffer_Release(view) + * raise BufferError("cannot unpack from multi-byte object") # <<<<<<<<<<<<<< + * if PyBuffer_IsContiguous(view, b'A') == 0: + * PyBuffer_Release(view) + */ + __pyx_tuple__20 = PyTuple_Pack(1, __pyx_kp_u_cannot_unpack_from_multi_byte_ob); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 121, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__20); + __Pyx_GIVEREF(__pyx_tuple__20); + + /* "msgpack/_unpacker.pyx":136 + * new_protocol[0] = 0 + * if PyObject_AsReadBuffer(obj, buf, buffer_len) == -1: + * raise BufferError("could not get memoryview") # <<<<<<<<<<<<<< + * PyErr_WarnEx(RuntimeWarning, + * "using old buffer interface to unpack %s; " + */ + __pyx_tuple__21 = PyTuple_Pack(1, __pyx_kp_u_could_not_get_memoryview); if (unlikely(!__pyx_tuple__21)) __PYX_ERR(1, 136, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__21); + __Pyx_GIVEREF(__pyx_tuple__21); + + /* "msgpack/_unpacker.pyx":213 + * unpack_clear(&ctx) + * if ret == 0: + * raise ValueError("Unpack failed: incomplete input") # <<<<<<<<<<<<<< + * elif ret == -2: + * raise FormatError + */ + __pyx_tuple__23 = PyTuple_Pack(1, __pyx_kp_u_Unpack_failed_incomplete_input); if (unlikely(!__pyx_tuple__23)) __PYX_ERR(1, 213, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__23); + __Pyx_GIVEREF(__pyx_tuple__23); + + /* "msgpack/_unpacker.pyx":366 + * self.file_like_read = file_like.read + * if not PyCallable_Check(self.file_like_read): + * raise TypeError("`file_like.read` must be a callable.") # <<<<<<<<<<<<<< + * + * if max_str_len == -1: + */ + __pyx_tuple__25 = PyTuple_Pack(1, __pyx_kp_u_file_like_read_must_be_a_callab); if (unlikely(!__pyx_tuple__25)) __PYX_ERR(1, 366, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__25); + __Pyx_GIVEREF(__pyx_tuple__25); + + /* "msgpack/_unpacker.pyx":382 + * max_buffer_size = INT_MAX + * if read_size > max_buffer_size: + * raise ValueError("read_size should be less or equal to max_buffer_size") # <<<<<<<<<<<<<< + * if not read_size: + * read_size = min(max_buffer_size, 1024**2) + */ + __pyx_tuple__26 = PyTuple_Pack(1, __pyx_kp_u_read_size_should_be_less_or_equa); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(1, 382, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__26); + __Pyx_GIVEREF(__pyx_tuple__26); + + /* "msgpack/_unpacker.pyx":417 + * + * if self.file_like is not None: + * raise AssertionError( # <<<<<<<<<<<<<< + * "unpacker.feed() is not be able to use with `file_like`.") + * + */ + __pyx_tuple__27 = PyTuple_Pack(1, __pyx_kp_u_unpacker_feed_is_not_be_able_to); if (unlikely(!__pyx_tuple__27)) __PYX_ERR(1, 417, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__27); + __Pyx_GIVEREF(__pyx_tuple__27); + + /* "msgpack/_unpacker.pyx":452 + * # self.buf still holds old buffer and will be freed during + * # obj destruction + * raise MemoryError("Unable to enlarge internal buffer.") # <<<<<<<<<<<<<< + * memcpy(new_buf, buf + head, tail - head) + * PyMem_Free(buf) + */ + __pyx_tuple__28 = PyTuple_Pack(1, __pyx_kp_u_Unable_to_enlarge_internal_buffe); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(1, 452, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__28); + __Pyx_GIVEREF(__pyx_tuple__28); + + /* "msgpack/_unpacker.pyx":489 + * if prev_head >= self.buf_tail: + * if iter: + * raise StopIteration("No more data to unpack.") # <<<<<<<<<<<<<< + * else: + * raise OutOfData("No more data to unpack.") + */ + __pyx_tuple__29 = PyTuple_Pack(1, __pyx_kp_u_No_more_data_to_unpack); if (unlikely(!__pyx_tuple__29)) __PYX_ERR(1, 489, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__29); + __Pyx_GIVEREF(__pyx_tuple__29); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + */ + __pyx_tuple__30 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__30)) __PYX_ERR(2, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__30); + __Pyx_GIVEREF(__pyx_tuple__30); + + /* "(tree fragment)":4 + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") + * def __setstate_cython__(self, __pyx_state): + * raise TypeError("no default __reduce__ due to non-trivial __cinit__") # <<<<<<<<<<<<<< + */ + __pyx_tuple__31 = PyTuple_Pack(1, __pyx_kp_s_no_default___reduce___due_to_non); if (unlikely(!__pyx_tuple__31)) __PYX_ERR(2, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__31); + __Pyx_GIVEREF(__pyx_tuple__31); + + /* "msgpack/_unpacker.pyx":105 + * ctx.user.unicode_errors = unicode_errors + * + * def default_read_extended_type(typecode, data): # <<<<<<<<<<<<<< + * raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + * + */ + __pyx_tuple__32 = PyTuple_Pack(2, __pyx_n_s_typecode, __pyx_n_s_data); if (unlikely(!__pyx_tuple__32)) __PYX_ERR(1, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__32); + __Pyx_GIVEREF(__pyx_tuple__32); + __pyx_codeobj__33 = (PyObject*)__Pyx_PyCode_New(2, 0, 2, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__32, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_msgpack__unpacker_pyx, __pyx_n_s_default_read_extended_type, 105, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__33)) __PYX_ERR(1, 105, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":144 + * return 1 + * + * def unpackb(object packed, object object_hook=None, object list_hook=None, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, + */ + __pyx_tuple__34 = PyTuple_Pack(25, __pyx_n_s_packed, __pyx_n_s_object_hook, __pyx_n_s_list_hook, __pyx_n_s_use_list, __pyx_n_s_raw, __pyx_n_s_strict_map_key, __pyx_n_s_encoding, __pyx_n_s_unicode_errors, __pyx_n_s_object_pairs_hook, __pyx_n_s_ext_hook, __pyx_n_s_max_str_len, __pyx_n_s_max_bin_len, __pyx_n_s_max_array_len, __pyx_n_s_max_map_len, __pyx_n_s_max_ext_len, __pyx_n_s_ctx, __pyx_n_s_off, __pyx_n_s_ret, __pyx_n_s_view, __pyx_n_s_buf, __pyx_n_s_buf_len, __pyx_n_s_cenc, __pyx_n_s_cerr, __pyx_n_s_new_protocol, __pyx_n_s_obj); if (unlikely(!__pyx_tuple__34)) __PYX_ERR(1, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__34); + __Pyx_GIVEREF(__pyx_tuple__34); + __pyx_codeobj__35 = (PyObject*)__Pyx_PyCode_New(15, 0, 25, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__34, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_msgpack__unpacker_pyx, __pyx_n_s_unpackb, 144, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__35)) __PYX_ERR(1, 144, __pyx_L1_error) + + /* "msgpack/_unpacker.pyx":221 + * + * + * def unpack(object stream, **kwargs): # <<<<<<<<<<<<<< + * PyErr_WarnEx( + * DeprecationWarning, + */ + __pyx_tuple__36 = PyTuple_Pack(3, __pyx_n_s_stream, __pyx_n_s_kwargs, __pyx_n_s_data); if (unlikely(!__pyx_tuple__36)) __PYX_ERR(1, 221, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__36); + __Pyx_GIVEREF(__pyx_tuple__36); + __pyx_codeobj__37 = (PyObject*)__Pyx_PyCode_New(1, 0, 3, 0, CO_OPTIMIZED|CO_NEWLOCALS|CO_VARKEYWORDS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__36, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_msgpack__unpacker_pyx, __pyx_n_s_unpack, 221, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__37)) __PYX_ERR(1, 221, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + if (__Pyx_InitStrings(__pyx_string_tab) < 0) __PYX_ERR(3, 1, __pyx_L1_error); + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(3, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __pyx_vtabptr_7msgpack_9_cmsgpack_Packer = &__pyx_vtable_7msgpack_9_cmsgpack_Packer; + __pyx_vtable_7msgpack_9_cmsgpack_Packer._pack = (int (*)(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *, PyObject *, struct __pyx_opt_args_7msgpack_9_cmsgpack_6Packer__pack *__pyx_optional_args))__pyx_f_7msgpack_9_cmsgpack_6Packer__pack; + __pyx_vtable_7msgpack_9_cmsgpack_Packer.pack = (PyObject *(*)(struct __pyx_obj_7msgpack_9_cmsgpack_Packer *, PyObject *, int __pyx_skip_dispatch))__pyx_f_7msgpack_9_cmsgpack_6Packer_pack; + if (PyType_Ready(&__pyx_type_7msgpack_9_cmsgpack_Packer) < 0) __PYX_ERR(0, 54, __pyx_L1_error) + __pyx_type_7msgpack_9_cmsgpack_Packer.tp_print = 0; + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7msgpack_9_cmsgpack_Packer.tp_dictoffset && __pyx_type_7msgpack_9_cmsgpack_Packer.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_7msgpack_9_cmsgpack_Packer.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type_7msgpack_9_cmsgpack_Packer.tp_dict, __pyx_vtabptr_7msgpack_9_cmsgpack_Packer) < 0) __PYX_ERR(0, 54, __pyx_L1_error) + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_Packer, (PyObject *)&__pyx_type_7msgpack_9_cmsgpack_Packer) < 0) __PYX_ERR(0, 54, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7msgpack_9_cmsgpack_Packer) < 0) __PYX_ERR(0, 54, __pyx_L1_error) + __pyx_ptype_7msgpack_9_cmsgpack_Packer = &__pyx_type_7msgpack_9_cmsgpack_Packer; + __pyx_vtabptr_7msgpack_9_cmsgpack_Unpacker = &__pyx_vtable_7msgpack_9_cmsgpack_Unpacker; + __pyx_vtable_7msgpack_9_cmsgpack_Unpacker.append_buffer = (PyObject *(*)(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *, void *, Py_ssize_t))__pyx_f_7msgpack_9_cmsgpack_8Unpacker_append_buffer; + __pyx_vtable_7msgpack_9_cmsgpack_Unpacker.read_from_file = (PyObject *(*)(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *))__pyx_f_7msgpack_9_cmsgpack_8Unpacker_read_from_file; + __pyx_vtable_7msgpack_9_cmsgpack_Unpacker._unpack = (PyObject *(*)(struct __pyx_obj_7msgpack_9_cmsgpack_Unpacker *, execute_fn, struct __pyx_opt_args_7msgpack_9_cmsgpack_8Unpacker__unpack *__pyx_optional_args))__pyx_f_7msgpack_9_cmsgpack_8Unpacker__unpack; + if (PyType_Ready(&__pyx_type_7msgpack_9_cmsgpack_Unpacker) < 0) __PYX_ERR(1, 229, __pyx_L1_error) + __pyx_type_7msgpack_9_cmsgpack_Unpacker.tp_print = 0; + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_type_7msgpack_9_cmsgpack_Unpacker.tp_dictoffset && __pyx_type_7msgpack_9_cmsgpack_Unpacker.tp_getattro == PyObject_GenericGetAttr)) { + __pyx_type_7msgpack_9_cmsgpack_Unpacker.tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + if (__Pyx_SetVtable(__pyx_type_7msgpack_9_cmsgpack_Unpacker.tp_dict, __pyx_vtabptr_7msgpack_9_cmsgpack_Unpacker) < 0) __PYX_ERR(1, 229, __pyx_L1_error) + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_Unpacker, (PyObject *)&__pyx_type_7msgpack_9_cmsgpack_Unpacker) < 0) __PYX_ERR(1, 229, __pyx_L1_error) + if (__Pyx_setup_reduce((PyObject*)&__pyx_type_7msgpack_9_cmsgpack_Unpacker) < 0) __PYX_ERR(1, 229, __pyx_L1_error) + __pyx_ptype_7msgpack_9_cmsgpack_Unpacker = &__pyx_type_7msgpack_9_cmsgpack_Unpacker; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(4, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), + #else + sizeof(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(4, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(5, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4bool_bool = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "bool", sizeof(PyBoolObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_4bool_bool) __PYX_ERR(5, 8, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(6, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_7complex_complex = __Pyx_ImportType(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "complex", sizeof(PyComplexObject), __Pyx_ImportType_CheckSize_Warn); + if (!__pyx_ptype_7cpython_7complex_complex) __PYX_ERR(6, 15, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#if PY_MAJOR_VERSION < 3 +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC void +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#else +#ifdef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC init_cmsgpack(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC init_cmsgpack(void) +#else +__Pyx_PyMODINIT_FUNC PyInit__cmsgpack(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit__cmsgpack(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) { + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { + result = PyDict_SetItemString(moddict, to_name, value); + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, CYTHON_UNUSED PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec__cmsgpack(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module '_cmsgpack' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit__cmsgpack(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(3, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(3, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(3, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + #ifdef WITH_THREAD /* Python build with threading support? */ + PyEval_InitThreads(); + #endif + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("_cmsgpack", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + #endif + if (unlikely(!__pyx_m)) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(3, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(3, 1, __pyx_L1_error) + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(3, 1, __pyx_L1_error) + #if CYTHON_COMPILING_IN_PYPY + Py_INCREF(__pyx_b); + #endif + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(3, 1, __pyx_L1_error); + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitGlobals() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_msgpack___cmsgpack) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name, __pyx_n_s_main) < 0) __PYX_ERR(3, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(3, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "msgpack._cmsgpack")) { + if (unlikely(PyDict_SetItemString(modules, "msgpack._cmsgpack", __pyx_m) < 0)) __PYX_ERR(3, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely(__Pyx_modinit_type_init_code() != 0)) goto __pyx_L1_error; + if (unlikely(__Pyx_modinit_type_import_code() != 0)) goto __pyx_L1_error; + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(3, 1, __pyx_L1_error) + #endif + + /* "msgpack/_packer.pyx":6 + * from cpython.bytearray cimport PyByteArray_Check, PyByteArray_CheckExact + * + * from ddtrace.vendor.msgpack import ExtType # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_n_s_ExtType); + __Pyx_GIVEREF(__pyx_n_s_ExtType); + PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_ExtType); + __pyx_t_2 = __Pyx_Import(__pyx_n_s_ddtrace_vendor_msgpack, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_ExtType); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_ExtType, __pyx_t_1) < 0) __PYX_ERR(0, 6, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "msgpack/_packer.pyx":42 + * object buff_to_buff(char *, Py_ssize_t) + * + * cdef int DEFAULT_RECURSE_LIMIT=511 # <<<<<<<<<<<<<< + * cdef long long ITEM_LIMIT = (2**32)-1 + * + */ + __pyx_v_7msgpack_9_cmsgpack_DEFAULT_RECURSE_LIMIT = 0x1FF; + + /* "msgpack/_packer.pyx":43 + * + * cdef int DEFAULT_RECURSE_LIMIT=511 + * cdef long long ITEM_LIMIT = (2**32)-1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_7msgpack_9_cmsgpack_ITEM_LIMIT = 0xFFFFFFFF; + + /* "msgpack/_packer.pyx":148 + * self.pk.buf = NULL + * + * cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: # <<<<<<<<<<<<<< + * cdef long long llval + * cdef unsigned long long ullval + */ + __pyx_k__3 = __pyx_v_7msgpack_9_cmsgpack_DEFAULT_RECURSE_LIMIT; + + /* "msgpack/_unpacker.pyx":16 + * + * from ddtrace.vendor.msgpack.exceptions import ( + * BufferFull, # <<<<<<<<<<<<<< + * OutOfData, + * ExtraData, + */ + __pyx_t_2 = PyList_New(5); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_n_s_BufferFull); + __Pyx_GIVEREF(__pyx_n_s_BufferFull); + PyList_SET_ITEM(__pyx_t_2, 0, __pyx_n_s_BufferFull); + __Pyx_INCREF(__pyx_n_s_OutOfData); + __Pyx_GIVEREF(__pyx_n_s_OutOfData); + PyList_SET_ITEM(__pyx_t_2, 1, __pyx_n_s_OutOfData); + __Pyx_INCREF(__pyx_n_s_ExtraData); + __Pyx_GIVEREF(__pyx_n_s_ExtraData); + PyList_SET_ITEM(__pyx_t_2, 2, __pyx_n_s_ExtraData); + __Pyx_INCREF(__pyx_n_s_FormatError); + __Pyx_GIVEREF(__pyx_n_s_FormatError); + PyList_SET_ITEM(__pyx_t_2, 3, __pyx_n_s_FormatError); + __Pyx_INCREF(__pyx_n_s_StackError); + __Pyx_GIVEREF(__pyx_n_s_StackError); + PyList_SET_ITEM(__pyx_t_2, 4, __pyx_n_s_StackError); + + /* "msgpack/_unpacker.pyx":15 + * ctypedef unsigned long long uint64_t + * + * from ddtrace.vendor.msgpack.exceptions import ( # <<<<<<<<<<<<<< + * BufferFull, + * OutOfData, + */ + __pyx_t_1 = __Pyx_Import(__pyx_n_s_ddtrace_vendor_msgpack_exception, __pyx_t_2, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_BufferFull); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_BufferFull, __pyx_t_2) < 0) __PYX_ERR(1, 16, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_OutOfData); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_OutOfData, __pyx_t_2) < 0) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_ExtraData); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_ExtraData, __pyx_t_2) < 0) __PYX_ERR(1, 18, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_FormatError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_FormatError, __pyx_t_2) < 0) __PYX_ERR(1, 19, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_ImportFrom(__pyx_t_1, __pyx_n_s_StackError); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_StackError, __pyx_t_2) < 0) __PYX_ERR(1, 20, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "msgpack/_unpacker.pyx":22 + * StackError, + * ) + * from ddtrace.vendor.msgpack import ExtType # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_n_s_ExtType); + __Pyx_GIVEREF(__pyx_n_s_ExtType); + PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_ExtType); + __pyx_t_2 = __Pyx_Import(__pyx_n_s_ddtrace_vendor_msgpack, __pyx_t_1, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_2, __pyx_n_s_ExtType); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 22, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_ExtType, __pyx_t_1) < 0) __PYX_ERR(1, 22, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":105 + * ctx.user.unicode_errors = unicode_errors + * + * def default_read_extended_type(typecode, data): # <<<<<<<<<<<<<< + * raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + * + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7msgpack_9_cmsgpack_1default_read_extended_type, NULL, __pyx_n_s_msgpack__cmsgpack); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 105, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_default_read_extended_type, __pyx_t_2) < 0) __PYX_ERR(1, 105, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":147 + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, + * object_pairs_hook=None, ext_hook=ExtType, # <<<<<<<<<<<<<< + * Py_ssize_t max_str_len=-1, + * Py_ssize_t max_bin_len=-1, + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_ExtType); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_k__22 = __pyx_t_2; + __Pyx_GIVEREF(__pyx_t_2); + __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":144 + * return 1 + * + * def unpackb(object packed, object object_hook=None, object list_hook=None, # <<<<<<<<<<<<<< + * bint use_list=True, bint raw=True, bint strict_map_key=False, + * encoding=None, unicode_errors=None, + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7msgpack_9_cmsgpack_3unpackb, NULL, __pyx_n_s_msgpack__cmsgpack); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 144, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_unpackb, __pyx_t_2) < 0) __PYX_ERR(1, 144, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":221 + * + * + * def unpack(object stream, **kwargs): # <<<<<<<<<<<<<< + * PyErr_WarnEx( + * DeprecationWarning, + */ + __pyx_t_2 = PyCFunction_NewEx(&__pyx_mdef_7msgpack_9_cmsgpack_5unpack, NULL, __pyx_n_s_msgpack__cmsgpack); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 221, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_unpack, __pyx_t_2) < 0) __PYX_ERR(1, 221, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "msgpack/_unpacker.pyx":348 + * object object_hook=None, object object_pairs_hook=None, object list_hook=None, + * encoding=None, unicode_errors=None, Py_ssize_t max_buffer_size=0, + * object ext_hook=ExtType, # <<<<<<<<<<<<<< + * Py_ssize_t max_str_len=-1, + * Py_ssize_t max_bin_len=-1, + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_ExtType); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 348, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_k__24 = __pyx_t_2; + __Pyx_GIVEREF(__pyx_t_2); + __pyx_t_2 = 0; + + /* "msgpack/_cmsgpack.pyx":1 + * # coding: utf-8 # <<<<<<<<<<<<<< + * #cython: embedsignature=True, c_string_encoding=ascii, language_level=3 + * include "_packer.pyx" + */ + __pyx_t_2 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(3, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_2) < 0) __PYX_ERR(3, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + if (__pyx_m) { + if (__pyx_d) { + __Pyx_AddTraceback("init msgpack._cmsgpack", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + Py_CLEAR(__pyx_m); + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init msgpack._cmsgpack"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStr(__pyx_b, name); + if (unlikely(!result)) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* KeywordStringCheck */ +static int __Pyx_CheckKeywordStrings( + PyObject *kwdict, + const char* function_name, + int kw_allowed) +{ + PyObject* key = 0; + Py_ssize_t pos = 0; +#if CYTHON_COMPILING_IN_PYPY + if (!kw_allowed && PyDict_Next(kwdict, &pos, &key, 0)) + goto invalid_keyword; + return 1; +#else + while (PyDict_Next(kwdict, &pos, &key, 0)) { + #if PY_MAJOR_VERSION < 3 + if (unlikely(!PyString_Check(key))) + #endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } + if ((!kw_allowed) && unlikely(key)) + goto invalid_keyword; + return 1; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + return 0; +#endif +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif + return 0; +} + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = func->ob_type->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +} +#endif + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, + CYTHON_UNUSED PyObject *cause) { + __Pyx_PyThreadState_declare + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { +#if CYTHON_COMPILING_IN_PYPY + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#else + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + while (PyDict_Next(kwds, &pos, &key, &value)) { + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; + continue; + } + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_CheckExact(key)) || likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = (**name == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**name) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (PyUnicode_GET_SIZE(**argname) != PyUnicode_GET_SIZE(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION < 3 + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + return -1; +} + +/* ExtTypeTest */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + PyErr_Format(PyExc_TypeError, "Cannot convert %.200s to %.200s", + Py_TYPE(obj)->tp_name, type->tp_name); + return 0; +} + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_type == NULL || exc_info->exc_type == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + #endif + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +} +#endif + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; icurexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; + if (unlikely(PyTuple_Check(err))) + return __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + return __Pyx_PyErr_GivenExceptionMatches(exc_type, err); +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type, *local_value, *local_tb; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* PyCFunctionFastCall */ +#if CYTHON_FAST_PYCCALL +static CYTHON_INLINE PyObject * __Pyx_PyCFunction_FastCall(PyObject *func_obj, PyObject **args, Py_ssize_t nargs) { + PyCFunctionObject *func = (PyCFunctionObject*)func_obj; + PyCFunction meth = PyCFunction_GET_FUNCTION(func); + PyObject *self = PyCFunction_GET_SELF(func); + int flags = PyCFunction_GET_FLAGS(func); + assert(PyCFunction_Check(func)); + assert(METH_FASTCALL == (flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_KEYWORDS | METH_STACKLESS))); + assert(nargs >= 0); + assert(nargs == 0 || args != NULL); + /* _PyCFunction_FastCallDict() must not be called with an exception set, + because it may clear it (directly or indirectly) and so the + caller loses its exception */ + assert(!PyErr_Occurred()); + if ((PY_VERSION_HEX < 0x030700A0) || unlikely(flags & METH_KEYWORDS)) { + return (*((__Pyx_PyCFunctionFastWithKeywords)(void*)meth)) (self, args, nargs, NULL); + } else { + return (*((__Pyx_PyCFunctionFast)(void*)meth)) (self, args, nargs); + } +} +#endif + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +#if 1 || PY_VERSION_HEX < 0x030600B1 +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, int nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (Py_EnterRecursiveCall((char*)" while calling a Python object")) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif +#endif + +/* PyObjectCall2Args */ +static CYTHON_UNUSED PyObject* __Pyx_PyObject_Call2Args(PyObject* function, PyObject* arg1, PyObject* arg2) { + PyObject *args, *result = NULL; + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyFunction_FastCall(function, args, 2); + } + #endif + #if CYTHON_FAST_PYCCALL + if (__Pyx_PyFastCFunction_Check(function)) { + PyObject *args[2] = {arg1, arg2}; + return __Pyx_PyCFunction_FastCall(function, args, 2); + } + #endif + args = PyTuple_New(2); + if (unlikely(!args)) goto done; + Py_INCREF(arg1); + PyTuple_SET_ITEM(args, 0, arg1); + Py_INCREF(arg2); + PyTuple_SET_ITEM(args, 1, arg2); + Py_INCREF(function); + result = __Pyx_PyObject_Call(function, args, NULL); + Py_DECREF(args); + Py_DECREF(function); +done: + return result; +} + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallOneArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static PyObject* __Pyx__PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_New(1); + if (unlikely(!args)) return NULL; + Py_INCREF(arg); + PyTuple_SET_ITEM(args, 0, arg); + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, &arg, 1); + } +#endif + if (likely(PyCFunction_Check(func))) { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, arg); +#if CYTHON_FAST_PYCCALL + } else if (PyCFunction_GET_FLAGS(func) & METH_FASTCALL) { + return __Pyx_PyCFunction_FastCall(func, &arg, 1); +#endif + } + } + return __Pyx__PyObject_CallOneArg(func, arg); +} +#else +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *result; + PyObject *args = PyTuple_Pack(1, arg); + if (unlikely(!args)) return NULL; + result = __Pyx_PyObject_Call(func, args, NULL); + Py_DECREF(args); + return result; +} +#endif + +/* SwapException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* IterFinish */ +static CYTHON_INLINE int __Pyx_IterFinish(void) { +#if CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* exc_type = tstate->curexc_type; + if (unlikely(exc_type)) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) { + PyObject *exc_value, *exc_tb; + exc_value = tstate->curexc_value; + exc_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; + Py_DECREF(exc_type); + Py_XDECREF(exc_value); + Py_XDECREF(exc_tb); + return 0; + } else { + return -1; + } + } + return 0; +#else + if (unlikely(PyErr_Occurred())) { + if (likely(PyErr_ExceptionMatches(PyExc_StopIteration))) { + PyErr_Clear(); + return 0; + } else { + return -1; + } + } + return 0; +#endif +} + +/* PyObjectCallNoArg */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { +#if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCall(func, NULL, 0); + } +#endif +#ifdef __Pyx_CyFunction_USED + if (likely(PyCFunction_Check(func) || __Pyx_CyFunction_Check(func))) +#else + if (likely(PyCFunction_Check(func))) +#endif + { + if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { + return __Pyx_PyObject_CallMethO(func, NULL); + } + } + return __Pyx_PyObject_Call(func, __pyx_empty_tuple, NULL); +} +#endif + +/* PyObjectGetMethod */ +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { + PyObject *attr; +#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP + PyTypeObject *tp = Py_TYPE(obj); + PyObject *descr; + descrgetfunc f = NULL; + PyObject **dictptr, *dict; + int meth_found = 0; + assert (*method == NULL); + if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; + } + if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { + return 0; + } + descr = _PyType_Lookup(tp, name); + if (likely(descr != NULL)) { + Py_INCREF(descr); +#if PY_MAJOR_VERSION >= 3 + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr) || (Py_TYPE(descr) == &PyMethodDescr_Type))) + #endif +#else + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr))) + #endif +#endif + { + meth_found = 1; + } else { + f = Py_TYPE(descr)->tp_descr_get; + if (f != NULL && PyDescr_IsData(descr)) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + } + } + dictptr = _PyObject_GetDictPtr(obj); + if (dictptr != NULL && (dict = *dictptr) != NULL) { + Py_INCREF(dict); + attr = __Pyx_PyDict_GetItemStr(dict, name); + if (attr != NULL) { + Py_INCREF(attr); + Py_DECREF(dict); + Py_XDECREF(descr); + goto try_unpack; + } + Py_DECREF(dict); + } + if (meth_found) { + *method = descr; + return 1; + } + if (f != NULL) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + if (descr != NULL) { + *method = descr; + return 0; + } + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(name)); +#endif + return 0; +#else + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; +#endif +try_unpack: +#if CYTHON_UNPACK_METHODS + if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { + PyObject *function = PyMethod_GET_FUNCTION(attr); + Py_INCREF(function); + Py_DECREF(attr); + *method = function; + return 1; + } +#endif + *method = attr; + return 0; +} + +/* PyObjectCallMethod0 */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { + PyObject *method = NULL, *result = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_CallOneArg(method, obj); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) goto bad; + result = __Pyx_PyObject_CallNoArg(method); + Py_DECREF(method); +bad: + return result; +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* UnpackItemEndCheck */ +static int __Pyx_IternextUnpackEndCheck(PyObject *retval, Py_ssize_t expected) { + if (unlikely(retval)) { + Py_DECREF(retval); + __Pyx_RaiseTooManyValuesError(expected); + return -1; + } else { + return __Pyx_IterFinish(); + } + return 0; +} + +/* RaiseNoneIterError */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* UnpackTupleError */ +static void __Pyx_UnpackTupleError(PyObject *t, Py_ssize_t index) { + if (t == Py_None) { + __Pyx_RaiseNoneNotIterableError(); + } else if (PyTuple_GET_SIZE(t) < index) { + __Pyx_RaiseNeedMoreValuesError(PyTuple_GET_SIZE(t)); + } else { + __Pyx_RaiseTooManyValuesError(index); + } +} + +/* UnpackTuple2 */ +static CYTHON_INLINE int __Pyx_unpack_tuple2_exact( + PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, int decref_tuple) { + PyObject *value1 = NULL, *value2 = NULL; +#if CYTHON_COMPILING_IN_PYPY + value1 = PySequence_ITEM(tuple, 0); if (unlikely(!value1)) goto bad; + value2 = PySequence_ITEM(tuple, 1); if (unlikely(!value2)) goto bad; +#else + value1 = PyTuple_GET_ITEM(tuple, 0); Py_INCREF(value1); + value2 = PyTuple_GET_ITEM(tuple, 1); Py_INCREF(value2); +#endif + if (decref_tuple) { + Py_DECREF(tuple); + } + *pvalue1 = value1; + *pvalue2 = value2; + return 0; +#if CYTHON_COMPILING_IN_PYPY +bad: + Py_XDECREF(value1); + Py_XDECREF(value2); + if (decref_tuple) { Py_XDECREF(tuple); } + return -1; +#endif +} +static int __Pyx_unpack_tuple2_generic(PyObject* tuple, PyObject** pvalue1, PyObject** pvalue2, + int has_known_size, int decref_tuple) { + Py_ssize_t index; + PyObject *value1 = NULL, *value2 = NULL, *iter = NULL; + iternextfunc iternext; + iter = PyObject_GetIter(tuple); + if (unlikely(!iter)) goto bad; + if (decref_tuple) { Py_DECREF(tuple); tuple = NULL; } + iternext = Py_TYPE(iter)->tp_iternext; + value1 = iternext(iter); if (unlikely(!value1)) { index = 0; goto unpacking_failed; } + value2 = iternext(iter); if (unlikely(!value2)) { index = 1; goto unpacking_failed; } + if (!has_known_size && unlikely(__Pyx_IternextUnpackEndCheck(iternext(iter), 2))) goto bad; + Py_DECREF(iter); + *pvalue1 = value1; + *pvalue2 = value2; + return 0; +unpacking_failed: + if (!has_known_size && __Pyx_IterFinish() == 0) + __Pyx_RaiseNeedMoreValuesError(index); +bad: + Py_XDECREF(iter); + Py_XDECREF(value1); + Py_XDECREF(value2); + if (decref_tuple) { Py_XDECREF(tuple); } + return -1; +} + +/* dict_iter */ +static CYTHON_INLINE PyObject* __Pyx_dict_iterator(PyObject* iterable, int is_dict, PyObject* method_name, + Py_ssize_t* p_orig_length, int* p_source_is_dict) { + is_dict = is_dict || likely(PyDict_CheckExact(iterable)); + *p_source_is_dict = is_dict; + if (is_dict) { +#if !CYTHON_COMPILING_IN_PYPY + *p_orig_length = PyDict_Size(iterable); + Py_INCREF(iterable); + return iterable; +#elif PY_MAJOR_VERSION >= 3 + static PyObject *py_items = NULL, *py_keys = NULL, *py_values = NULL; + PyObject **pp = NULL; + if (method_name) { + const char *name = PyUnicode_AsUTF8(method_name); + if (strcmp(name, "iteritems") == 0) pp = &py_items; + else if (strcmp(name, "iterkeys") == 0) pp = &py_keys; + else if (strcmp(name, "itervalues") == 0) pp = &py_values; + if (pp) { + if (!*pp) { + *pp = PyUnicode_FromString(name + 4); + if (!*pp) + return NULL; + } + method_name = *pp; + } + } +#endif + } + *p_orig_length = 0; + if (method_name) { + PyObject* iter; + iterable = __Pyx_PyObject_CallMethod0(iterable, method_name); + if (!iterable) + return NULL; +#if !CYTHON_COMPILING_IN_PYPY + if (PyTuple_CheckExact(iterable) || PyList_CheckExact(iterable)) + return iterable; +#endif + iter = PyObject_GetIter(iterable); + Py_DECREF(iterable); + return iter; + } + return PyObject_GetIter(iterable); +} +static CYTHON_INLINE int __Pyx_dict_iter_next( + PyObject* iter_obj, CYTHON_NCP_UNUSED Py_ssize_t orig_length, CYTHON_NCP_UNUSED Py_ssize_t* ppos, + PyObject** pkey, PyObject** pvalue, PyObject** pitem, int source_is_dict) { + PyObject* next_item; +#if !CYTHON_COMPILING_IN_PYPY + if (source_is_dict) { + PyObject *key, *value; + if (unlikely(orig_length != PyDict_Size(iter_obj))) { + PyErr_SetString(PyExc_RuntimeError, "dictionary changed size during iteration"); + return -1; + } + if (unlikely(!PyDict_Next(iter_obj, ppos, &key, &value))) { + return 0; + } + if (pitem) { + PyObject* tuple = PyTuple_New(2); + if (unlikely(!tuple)) { + return -1; + } + Py_INCREF(key); + Py_INCREF(value); + PyTuple_SET_ITEM(tuple, 0, key); + PyTuple_SET_ITEM(tuple, 1, value); + *pitem = tuple; + } else { + if (pkey) { + Py_INCREF(key); + *pkey = key; + } + if (pvalue) { + Py_INCREF(value); + *pvalue = value; + } + } + return 1; + } else if (PyTuple_CheckExact(iter_obj)) { + Py_ssize_t pos = *ppos; + if (unlikely(pos >= PyTuple_GET_SIZE(iter_obj))) return 0; + *ppos = pos + 1; + next_item = PyTuple_GET_ITEM(iter_obj, pos); + Py_INCREF(next_item); + } else if (PyList_CheckExact(iter_obj)) { + Py_ssize_t pos = *ppos; + if (unlikely(pos >= PyList_GET_SIZE(iter_obj))) return 0; + *ppos = pos + 1; + next_item = PyList_GET_ITEM(iter_obj, pos); + Py_INCREF(next_item); + } else +#endif + { + next_item = PyIter_Next(iter_obj); + if (unlikely(!next_item)) { + return __Pyx_IterFinish(); + } + } + if (pitem) { + *pitem = next_item; + } else if (pkey && pvalue) { + if (__Pyx_unpack_tuple2(next_item, pkey, pvalue, source_is_dict, source_is_dict, 1)) + return -1; + } else if (pkey) { + *pkey = next_item; + } else { + *pvalue = next_item; + } + return 1; +} + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* ReRaiseException */ +static CYTHON_INLINE void __Pyx_ReraiseException(void) { + PyObject *type = NULL, *value = NULL, *tb = NULL; +#if CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = PyThreadState_GET(); + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + type = exc_info->exc_type; + value = exc_info->exc_value; + tb = exc_info->exc_traceback; + #else + type = tstate->exc_type; + value = tstate->exc_value; + tb = tstate->exc_traceback; + #endif +#else + PyErr_GetExcInfo(&type, &value, &tb); +#endif + if (!type || type == Py_None) { +#if !CYTHON_FAST_THREAD_STATE + Py_XDECREF(type); + Py_XDECREF(value); + Py_XDECREF(tb); +#endif + PyErr_SetString(PyExc_RuntimeError, + "No active exception to reraise"); + } else { +#if CYTHON_FAST_THREAD_STATE + Py_INCREF(type); + Py_XINCREF(value); + Py_XINCREF(tb); +#endif + PyErr_Restore(type, value, tb); + } +} + +/* None */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { + Py_ssize_t q = a / b; + Py_ssize_t r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* CIntToDigits */ +static const char DIGIT_PAIRS_10[2*10*10+1] = { + "00010203040506070809" + "10111213141516171819" + "20212223242526272829" + "30313233343536373839" + "40414243444546474849" + "50515253545556575859" + "60616263646566676869" + "70717273747576777879" + "80818283848586878889" + "90919293949596979899" +}; +static const char DIGIT_PAIRS_8[2*8*8+1] = { + "0001020304050607" + "1011121314151617" + "2021222324252627" + "3031323334353637" + "4041424344454647" + "5051525354555657" + "6061626364656667" + "7071727374757677" +}; +static const char DIGITS_HEX[2*16+1] = { + "0123456789abcdef" + "0123456789ABCDEF" +}; + +/* BuildPyUnicode */ +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char) { + PyObject *uval; + Py_ssize_t uoffset = ulength - clength; +#if CYTHON_USE_UNICODE_INTERNALS + Py_ssize_t i; +#if CYTHON_PEP393_ENABLED + void *udata; + uval = PyUnicode_New(ulength, 127); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_DATA(uval); +#else + Py_UNICODE *udata; + uval = PyUnicode_FromUnicode(NULL, ulength); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_AS_UNICODE(uval); +#endif + if (uoffset > 0) { + i = 0; + if (prepend_sign) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-'); + i++; + } + for (; i < uoffset; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char); + } + } + for (i=0; i < clength; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]); + } +#else + { + PyObject *sign = NULL, *padding = NULL; + uval = NULL; + if (uoffset > 0) { + prepend_sign = !!prepend_sign; + if (uoffset > prepend_sign) { + padding = PyUnicode_FromOrdinal(padding_char); + if (likely(padding) && uoffset > prepend_sign + 1) { + PyObject *tmp; + PyObject *repeat = PyInt_FromSize_t(uoffset - prepend_sign); + if (unlikely(!repeat)) goto done_or_error; + tmp = PyNumber_Multiply(padding, repeat); + Py_DECREF(repeat); + Py_DECREF(padding); + padding = tmp; + } + if (unlikely(!padding)) goto done_or_error; + } + if (prepend_sign) { + sign = PyUnicode_FromOrdinal('-'); + if (unlikely(!sign)) goto done_or_error; + } + } + uval = PyUnicode_DecodeASCII(chars, clength, NULL); + if (likely(uval) && padding) { + PyObject *tmp = PyNumber_Add(padding, uval); + Py_DECREF(uval); + uval = tmp; + } + if (likely(uval) && sign) { + PyObject *tmp = PyNumber_Add(sign, uval); + Py_DECREF(uval); + uval = tmp; + } +done_or_error: + Py_XDECREF(padding); + Py_XDECREF(sign); + } +#endif + return uval; +} + +/* CIntToPyUnicode */ +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned short uint16_t; + #else + typedef unsigned __int16 uint16_t; + #endif + #endif +#else + #include +#endif +#if defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define GCC_DIAGNOSTIC +#endif +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char) { + char digits[sizeof(int)*3+2]; + char *dpos, *end = digits + sizeof(int)*3+2; + const char *hex_digits = DIGITS_HEX; + Py_ssize_t length, ulength; + int prepend_sign, last_one_off; + int remaining; +#ifdef GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (format_char == 'X') { + hex_digits += 16; + format_char = 'x'; + } + remaining = value; + last_one_off = 0; + dpos = end; + do { + int digit_pos; + switch (format_char) { + case 'o': + digit_pos = abs((int)(remaining % (8*8))); + remaining = (int) (remaining / (8*8)); + dpos -= 2; + *(uint16_t*)dpos = ((const uint16_t*)DIGIT_PAIRS_8)[digit_pos]; + last_one_off = (digit_pos < 8); + break; + case 'd': + digit_pos = abs((int)(remaining % (10*10))); + remaining = (int) (remaining / (10*10)); + dpos -= 2; + *(uint16_t*)dpos = ((const uint16_t*)DIGIT_PAIRS_10)[digit_pos]; + last_one_off = (digit_pos < 10); + break; + case 'x': + *(--dpos) = hex_digits[abs((int)(remaining % 16))]; + remaining = (int) (remaining / 16); + break; + default: + assert(0); + break; + } + } while (unlikely(remaining != 0)); + if (last_one_off) { + assert(*dpos == '0'); + dpos++; + } + length = end - dpos; + ulength = length; + prepend_sign = 0; + if (!is_unsigned && value <= neg_one) { + if (padding_char == ' ' || width <= length + 1) { + *(--dpos) = '-'; + ++length; + } else { + prepend_sign = 1; + } + ++ulength; + } + if (width > ulength) { + ulength = width; + } + if (ulength == 1) { + return PyUnicode_FromOrdinal(*dpos); + } + return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); +} + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'%.50s' object has no attribute '%U'", + tp->tp_name, attr_name); +#else + "'%.50s' object has no attribute '%.400s'", + tp->tp_name, PyString_AS_STRING(attr_name)); +#endif + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* SetVTable */ +static int __Pyx_SetVtable(PyObject *dict, void *vtable) { +#if PY_VERSION_HEX >= 0x02070000 + PyObject *ob = PyCapsule_New(vtable, 0, 0); +#else + PyObject *ob = PyCObject_FromVoidPtr(vtable, 0); +#endif + if (!ob) + goto bad; + if (PyDict_SetItem(dict, __pyx_n_s_pyx_vtable, ob) < 0) + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + +/* SetupReduce */ +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStr(meth, __pyx_n_s_name); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + if (_PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate)) goto GOOD; +#else + if (PyObject_HasAttr(type_obj, __pyx_n_s_getstate)) goto GOOD; +#endif +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_cython); if (unlikely(!reduce_cython)) goto BAD; + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto BAD; + setstate = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_setstate_cython); if (unlikely(!setstate_cython)) goto BAD; + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto BAD; + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto GOOD; +BAD: + if (!PyErr_Occurred()) + PyErr_Format(PyExc_RuntimeError, "Unable to initialize pickling for %s", ((PyTypeObject*)type_obj)->tp_name); + ret = -1; +GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} + +/* TypeImport */ +#ifndef __PYX_HAVE_RT_ImportType +#define __PYX_HAVE_RT_ImportType +static PyTypeObject *__Pyx_ImportType(PyObject *module, const char *module_name, const char *class_name, + size_t size, enum __Pyx_ImportType_CheckSize check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; +#ifdef Py_LIMITED_API + PyObject *py_basicsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#ifndef Py_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if ((size_t)basicsize < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error && (size_t)basicsize != size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *empty_list = 0; + PyObject *module = 0; + PyObject *global_dict = 0; + PyObject *empty_dict = 0; + PyObject *list; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (!py_import) + goto bad; + #endif + if (from_list) + list = from_list; + else { + empty_list = PyList_New(0); + if (!empty_list) + goto bad; + list = empty_list; + } + global_dict = PyModule_GetDict(__pyx_m); + if (!global_dict) + goto bad; + empty_dict = PyDict_New(); + if (!empty_dict) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if (strchr(__Pyx_MODULE_NAME, '.')) { + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, 1); + if (!module) { + if (!PyErr_ExceptionMatches(PyExc_ImportError)) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (!py_level) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, global_dict, empty_dict, list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, global_dict, empty_dict, list, level); + #endif + } + } +bad: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + Py_XDECREF(empty_list); + Py_XDECREF(empty_dict); + return module; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, (size_t)new_max*sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyObject *py_srcfile = 0; + PyObject *py_funcname = 0; + #if PY_MAJOR_VERSION < 3 + py_srcfile = PyString_FromString(filename); + #else + py_srcfile = PyUnicode_FromString(filename); + #endif + if (!py_srcfile) goto bad; + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + #else + py_funcname = PyUnicode_FromString(funcname); + #endif + } + if (!py_funcname) goto bad; + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + Py_DECREF(py_funcname); + return py_code; +bad: + Py_XDECREF(py_srcfile); + Py_XDECREF(py_funcname); + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) goto bad; + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} + +/* CIntFromPyVerify */ +#define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_unsigned_PY_LONG_LONG(unsigned PY_LONG_LONG value) { + const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) ((unsigned PY_LONG_LONG) 0 - (unsigned PY_LONG_LONG) 1), const_zero = (unsigned PY_LONG_LONG) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(unsigned PY_LONG_LONG) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(unsigned PY_LONG_LONG) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(unsigned PY_LONG_LONG), + little, !is_unsigned); + } +} + +/* CIntFromPy */ +static CYTHON_INLINE PY_LONG_LONG __Pyx_PyInt_As_PY_LONG_LONG(PyObject *x) { + const PY_LONG_LONG neg_one = (PY_LONG_LONG) ((PY_LONG_LONG) 0 - (PY_LONG_LONG) 1), const_zero = (PY_LONG_LONG) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(PY_LONG_LONG) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (PY_LONG_LONG) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (PY_LONG_LONG) 0; + case 1: __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, digit, digits[0]) + case 2: + if (8 * sizeof(PY_LONG_LONG) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) >= 2 * PyLong_SHIFT) { + return (PY_LONG_LONG) (((((PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(PY_LONG_LONG) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) >= 3 * PyLong_SHIFT) { + return (PY_LONG_LONG) (((((((PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(PY_LONG_LONG) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) >= 4 * PyLong_SHIFT) { + return (PY_LONG_LONG) (((((((((PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (PY_LONG_LONG) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(PY_LONG_LONG) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (PY_LONG_LONG) 0; + case -1: __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, digit, +digits[0]) + case -2: + if (8 * sizeof(PY_LONG_LONG) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + return (PY_LONG_LONG) (((PY_LONG_LONG)-1)*(((((PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(PY_LONG_LONG) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + return (PY_LONG_LONG) ((((((PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + return (PY_LONG_LONG) (((PY_LONG_LONG)-1)*(((((((PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(PY_LONG_LONG) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + return (PY_LONG_LONG) ((((((((PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + return (PY_LONG_LONG) (((PY_LONG_LONG)-1)*(((((((((PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(PY_LONG_LONG) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + return (PY_LONG_LONG) ((((((((((PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (PY_LONG_LONG)digits[0]))); + } + } + break; + } +#endif + if (sizeof(PY_LONG_LONG) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(PY_LONG_LONG, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + PY_LONG_LONG val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (PY_LONG_LONG) -1; + } + } else { + PY_LONG_LONG val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (PY_LONG_LONG) -1; + val = __Pyx_PyInt_As_PY_LONG_LONG(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to PY_LONG_LONG"); + return (PY_LONG_LONG) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to PY_LONG_LONG"); + return (PY_LONG_LONG) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE unsigned PY_LONG_LONG __Pyx_PyInt_As_unsigned_PY_LONG_LONG(PyObject *x) { + const unsigned PY_LONG_LONG neg_one = (unsigned PY_LONG_LONG) ((unsigned PY_LONG_LONG) 0 - (unsigned PY_LONG_LONG) 1), const_zero = (unsigned PY_LONG_LONG) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(unsigned PY_LONG_LONG) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (unsigned PY_LONG_LONG) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned PY_LONG_LONG) 0; + case 1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, digit, digits[0]) + case 2: + if (8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) >= 2 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) (((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) >= 3 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) (((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) >= 4 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) (((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (unsigned PY_LONG_LONG) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (unsigned PY_LONG_LONG) 0; + case -1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, digit, +digits[0]) + case -2: + if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(unsigned PY_LONG_LONG) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) ((((((unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(unsigned PY_LONG_LONG) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) ((((((((unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) (((unsigned PY_LONG_LONG)-1)*(((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(unsigned PY_LONG_LONG) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(unsigned PY_LONG_LONG, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(unsigned PY_LONG_LONG) - 1 > 4 * PyLong_SHIFT) { + return (unsigned PY_LONG_LONG) ((((((((((unsigned PY_LONG_LONG)digits[3]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[2]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[1]) << PyLong_SHIFT) | (unsigned PY_LONG_LONG)digits[0]))); + } + } + break; + } +#endif + if (sizeof(unsigned PY_LONG_LONG) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(unsigned PY_LONG_LONG) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(unsigned PY_LONG_LONG, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + unsigned PY_LONG_LONG val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (unsigned PY_LONG_LONG) -1; + } + } else { + unsigned PY_LONG_LONG val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (unsigned PY_LONG_LONG) -1; + val = __Pyx_PyInt_As_unsigned_PY_LONG_LONG(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to unsigned PY_LONG_LONG"); + return (unsigned PY_LONG_LONG) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to unsigned PY_LONG_LONG"); + return (unsigned PY_LONG_LONG) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(long) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case 1: __PYX_VERIFY_RETURN_INT(long, digit, digits[0]) + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 2 * PyLong_SHIFT) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 3 * PyLong_SHIFT) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) >= 4 * PyLong_SHIFT) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(long) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (long) 0; + case -1: __PYX_VERIFY_RETURN_INT(long, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(long, digit, +digits[0]) + case -2: + if (8 * sizeof(long) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(long) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(long) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(long) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(long) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(long) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(long) - 1 > 4 * PyLong_SHIFT) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } +#endif + if (sizeof(long) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ +static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { + const char neg_one = (char) ((char) 0 - (char) 1), const_zero = (char) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(char) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (char) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case 1: __PYX_VERIFY_RETURN_INT(char, digit, digits[0]) + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 2 * PyLong_SHIFT) { + return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 3 * PyLong_SHIFT) { + return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) >= 4 * PyLong_SHIFT) { + return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (char) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(char) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (char) 0; + case -1: __PYX_VERIFY_RETURN_INT(char, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(char, digit, +digits[0]) + case -2: + if (8 * sizeof(char) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(char) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(char) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(char) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(char) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(char) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(char) - 1 > 4 * PyLong_SHIFT) { + return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + } +#endif + if (sizeof(char) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(char) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + char val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (char) -1; + } + } else { + char val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (char) -1; + val = __Pyx_PyInt_As_char(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to char"); + return (char) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to char"); + return (char) -1; +} + +/* CIntToPy */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { + const long neg_one = (long) ((long) 0 - (long) 1), const_zero = (long) 0; + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); + } +} + +/* CIntFromPy */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { + const int neg_one = (int) ((int) 0 - (int) 1), const_zero = (int) 0; + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if (sizeof(int) < sizeof(long)) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case 1: __PYX_VERIFY_RETURN_INT(int, digit, digits[0]) + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 2 * PyLong_SHIFT) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 3 * PyLong_SHIFT) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) >= 4 * PyLong_SHIFT) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if (sizeof(int) <= sizeof(unsigned long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)x)->ob_digit; + switch (Py_SIZE(x)) { + case 0: return (int) 0; + case -1: __PYX_VERIFY_RETURN_INT(int, sdigit, (sdigit) (-(sdigit)digits[0])) + case 1: __PYX_VERIFY_RETURN_INT(int, digit, +digits[0]) + case -2: + if (8 * sizeof(int) - 1 > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if (8 * sizeof(int) > 1 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if (8 * sizeof(int) - 1 > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if (8 * sizeof(int) > 2 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if (8 * sizeof(int) - 1 > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if (8 * sizeof(int) > 3 * PyLong_SHIFT) { + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if (8 * sizeof(int) - 1 > 4 * PyLong_SHIFT) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } +#endif + if (sizeof(int) <= sizeof(long)) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { +#if CYTHON_COMPILING_IN_PYPY && !defined(_PyLong_AsByteArray) + PyErr_SetString(PyExc_RuntimeError, + "_PyLong_AsByteArray() not available in PyPy, cannot convert large numbers"); +#else + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); + #if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } + #endif + if (likely(v)) { + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + int ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); + Py_DECREF(v); + if (likely(!ret)) + return val; + } +#endif + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = a->tp_base; + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + int res = exc_type1 ? __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type1) : 0; + if (!res) { + res = __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } + return res; +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; ip) { + #if PY_MAJOR_VERSION < 3 + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + #else + if (t->is_unicode | t->is_str) { + if (t->intern) { + *t->p = PyUnicode_InternFromString(t->s); + } else if (t->encoding) { + *t->p = PyUnicode_Decode(t->s, t->n - 1, t->encoding, NULL); + } else { + *t->p = PyUnicode_FromStringAndSize(t->s, t->n - 1); + } + } else { + *t->p = PyBytes_FromStringAndSize(t->s, t->n - 1); + } + #endif + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type %.200s). " + "The ability to return an instance of a strict subclass of int " + "is deprecated, and may be removed in a future version of Python.", + Py_TYPE(result)->tp_name)) { + Py_DECREF(result); + return NULL; + } + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type %.200s)", + type_name, type_name, Py_TYPE(result)->tp_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + const digit* digits = ((PyLongObject*)b)->ob_digit; + const Py_ssize_t size = Py_SIZE(b); + if (likely(__Pyx_sst_abs(size) <= 1)) { + ival = likely(size) ? digits[0] : 0; + if (size == -1) ival = -ival; + return ival; + } else { + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +#endif /* Py_PYTHON_H */ diff --git a/ddtrace/vendor/msgpack/_cmsgpack.pyx b/ddtrace/vendor/msgpack/_cmsgpack.pyx new file mode 100644 index 0000000000..8ebdbf58b2 --- /dev/null +++ b/ddtrace/vendor/msgpack/_cmsgpack.pyx @@ -0,0 +1,4 @@ +# coding: utf-8 +#cython: embedsignature=True, c_string_encoding=ascii, language_level=3 +include "_packer.pyx" +include "_unpacker.pyx" diff --git a/ddtrace/vendor/msgpack/_packer.pyx b/ddtrace/vendor/msgpack/_packer.pyx new file mode 100644 index 0000000000..c0e5a5c4b5 --- /dev/null +++ b/ddtrace/vendor/msgpack/_packer.pyx @@ -0,0 +1,362 @@ +# coding: utf-8 + +from cpython cimport * +from cpython.bytearray cimport PyByteArray_Check, PyByteArray_CheckExact + +from ddtrace.vendor.msgpack import ExtType + + +cdef extern from "Python.h": + + int PyMemoryView_Check(object obj) + char* PyUnicode_AsUTF8AndSize(object obj, Py_ssize_t *l) except NULL + + +cdef extern from "pack.h": + struct msgpack_packer: + char* buf + size_t length + size_t buf_size + bint use_bin_type + + int msgpack_pack_int(msgpack_packer* pk, int d) + int msgpack_pack_nil(msgpack_packer* pk) + int msgpack_pack_true(msgpack_packer* pk) + int msgpack_pack_false(msgpack_packer* pk) + int msgpack_pack_long(msgpack_packer* pk, long d) + int msgpack_pack_long_long(msgpack_packer* pk, long long d) + int msgpack_pack_unsigned_long_long(msgpack_packer* pk, unsigned long long d) + int msgpack_pack_float(msgpack_packer* pk, float d) + int msgpack_pack_double(msgpack_packer* pk, double d) + int msgpack_pack_array(msgpack_packer* pk, size_t l) + int msgpack_pack_map(msgpack_packer* pk, size_t l) + int msgpack_pack_raw(msgpack_packer* pk, size_t l) + int msgpack_pack_bin(msgpack_packer* pk, size_t l) + int msgpack_pack_raw_body(msgpack_packer* pk, char* body, size_t l) + int msgpack_pack_ext(msgpack_packer* pk, char typecode, size_t l) + int msgpack_pack_unicode(msgpack_packer* pk, object o, long long limit) + +cdef extern from "buff_converter.h": + object buff_to_buff(char *, Py_ssize_t) + +cdef int DEFAULT_RECURSE_LIMIT=511 +cdef long long ITEM_LIMIT = (2**32)-1 + + +cdef inline int PyBytesLike_Check(object o): + return PyBytes_Check(o) or PyByteArray_Check(o) + + +cdef inline int PyBytesLike_CheckExact(object o): + return PyBytes_CheckExact(o) or PyByteArray_CheckExact(o) + + +cdef class Packer(object): + """ + MessagePack Packer + + usage:: + + packer = Packer() + astream.write(packer.pack(a)) + astream.write(packer.pack(b)) + + Packer's constructor has some keyword arguments: + + :param callable default: + Convert user type to builtin type that Packer supports. + See also simplejson's document. + + :param bool use_single_float: + Use single precision float type for float. (default: False) + + :param bool autoreset: + Reset buffer after each pack and return its content as `bytes`. (default: True). + If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. + + :param bool use_bin_type: + Use bin type introduced in msgpack spec 2.0 for bytes. + It also enables str8 type for unicode. + Current default value is false, but it will be changed to true + in future version. You should specify it explicitly. + + :param bool strict_types: + If set to true, types will be checked to be exact. Derived classes + from serializeable types will not be serialized and will be + treated as unsupported type and forwarded to default. + Additionally tuples will not be serialized as lists. + This is useful when trying to implement accurate serialization + for python types. + + :param str unicode_errors: + Error handler for encoding unicode. (default: 'strict') + + :param str encoding: + (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') + """ + cdef msgpack_packer pk + cdef object _default + cdef object _bencoding + cdef object _berrors + cdef const char *encoding + cdef const char *unicode_errors + cdef bint strict_types + cdef bool use_float + cdef bint autoreset + + def __cinit__(self): + cdef int buf_size = 1024*1024 + self.pk.buf = PyMem_Malloc(buf_size) + if self.pk.buf == NULL: + raise MemoryError("Unable to allocate internal buffer.") + self.pk.buf_size = buf_size + self.pk.length = 0 + + def __init__(self, default=None, encoding=None, unicode_errors=None, + bint use_single_float=False, bint autoreset=True, bint use_bin_type=False, + bint strict_types=False): + if encoding is not None: + PyErr_WarnEx(DeprecationWarning, "encoding is deprecated.", 1) + self.use_float = use_single_float + self.strict_types = strict_types + self.autoreset = autoreset + self.pk.use_bin_type = use_bin_type + if default is not None: + if not PyCallable_Check(default): + raise TypeError("default must be a callable.") + self._default = default + + self._bencoding = encoding + if encoding is None: + if PY_MAJOR_VERSION < 3: + self.encoding = 'utf-8' + else: + self.encoding = NULL + else: + self.encoding = self._bencoding + + self._berrors = unicode_errors + if unicode_errors is None: + self.unicode_errors = NULL + else: + self.unicode_errors = self._berrors + + def __dealloc__(self): + PyMem_Free(self.pk.buf) + self.pk.buf = NULL + + cdef int _pack(self, object o, int nest_limit=DEFAULT_RECURSE_LIMIT) except -1: + cdef long long llval + cdef unsigned long long ullval + cdef long longval + cdef float fval + cdef double dval + cdef char* rawval + cdef int ret + cdef dict d + cdef Py_ssize_t L + cdef int default_used = 0 + cdef bint strict_types = self.strict_types + cdef Py_buffer view + + if nest_limit < 0: + raise ValueError("recursion limit exceeded.") + + while True: + if o is None: + ret = msgpack_pack_nil(&self.pk) + elif PyBool_Check(o) if strict_types else isinstance(o, bool): + if o: + ret = msgpack_pack_true(&self.pk) + else: + ret = msgpack_pack_false(&self.pk) + elif PyLong_CheckExact(o) if strict_types else PyLong_Check(o): + # PyInt_Check(long) is True for Python 3. + # So we should test long before int. + try: + if o > 0: + ullval = o + ret = msgpack_pack_unsigned_long_long(&self.pk, ullval) + else: + llval = o + ret = msgpack_pack_long_long(&self.pk, llval) + except OverflowError as oe: + if not default_used and self._default is not None: + o = self._default(o) + default_used = True + continue + else: + raise OverflowError("Integer value out of range") + elif PyInt_CheckExact(o) if strict_types else PyInt_Check(o): + longval = o + ret = msgpack_pack_long(&self.pk, longval) + elif PyFloat_CheckExact(o) if strict_types else PyFloat_Check(o): + if self.use_float: + fval = o + ret = msgpack_pack_float(&self.pk, fval) + else: + dval = o + ret = msgpack_pack_double(&self.pk, dval) + elif PyBytesLike_CheckExact(o) if strict_types else PyBytesLike_Check(o): + L = len(o) + if L > ITEM_LIMIT: + PyErr_Format(ValueError, b"%.200s object is too large", Py_TYPE(o).tp_name) + rawval = o + ret = msgpack_pack_bin(&self.pk, L) + if ret == 0: + ret = msgpack_pack_raw_body(&self.pk, rawval, L) + elif PyUnicode_CheckExact(o) if strict_types else PyUnicode_Check(o): + if self.encoding == NULL and self.unicode_errors == NULL: + ret = msgpack_pack_unicode(&self.pk, o, ITEM_LIMIT); + if ret == -2: + raise ValueError("unicode string is too large") + else: + o = PyUnicode_AsEncodedString(o, self.encoding, self.unicode_errors) + L = len(o) + if L > ITEM_LIMIT: + raise ValueError("unicode string is too large") + ret = msgpack_pack_raw(&self.pk, L) + if ret == 0: + rawval = o + ret = msgpack_pack_raw_body(&self.pk, rawval, L) + elif PyDict_CheckExact(o): + d = o + L = len(d) + if L > ITEM_LIMIT: + raise ValueError("dict is too large") + ret = msgpack_pack_map(&self.pk, L) + if ret == 0: + for k, v in d.items(): + ret = self._pack(k, nest_limit-1) + if ret != 0: break + ret = self._pack(v, nest_limit-1) + if ret != 0: break + elif not strict_types and PyDict_Check(o): + L = len(o) + if L > ITEM_LIMIT: + raise ValueError("dict is too large") + ret = msgpack_pack_map(&self.pk, L) + if ret == 0: + for k, v in o.items(): + ret = self._pack(k, nest_limit-1) + if ret != 0: break + ret = self._pack(v, nest_limit-1) + if ret != 0: break + elif type(o) is ExtType if strict_types else isinstance(o, ExtType): + # This should be before Tuple because ExtType is namedtuple. + longval = o.code + rawval = o.data + L = len(o.data) + if L > ITEM_LIMIT: + raise ValueError("EXT data is too large") + ret = msgpack_pack_ext(&self.pk, longval, L) + ret = msgpack_pack_raw_body(&self.pk, rawval, L) + elif PyList_CheckExact(o) if strict_types else (PyTuple_Check(o) or PyList_Check(o)): + L = len(o) + if L > ITEM_LIMIT: + raise ValueError("list is too large") + ret = msgpack_pack_array(&self.pk, L) + if ret == 0: + for v in o: + ret = self._pack(v, nest_limit-1) + if ret != 0: break + elif PyMemoryView_Check(o): + if PyObject_GetBuffer(o, &view, PyBUF_SIMPLE) != 0: + raise ValueError("could not get buffer for memoryview") + L = view.len + if L > ITEM_LIMIT: + PyBuffer_Release(&view); + raise ValueError("memoryview is too large") + ret = msgpack_pack_bin(&self.pk, L) + if ret == 0: + ret = msgpack_pack_raw_body(&self.pk, view.buf, L) + PyBuffer_Release(&view); + elif not default_used and self._default: + o = self._default(o) + default_used = 1 + continue + else: + PyErr_Format(TypeError, b"can not serialize '%.200s' object", Py_TYPE(o).tp_name) + return ret + + cpdef pack(self, object obj): + cdef int ret + try: + ret = self._pack(obj, DEFAULT_RECURSE_LIMIT) + except: + self.pk.length = 0 + raise + if ret: # should not happen. + raise RuntimeError("internal error") + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def pack_ext_type(self, typecode, data): + msgpack_pack_ext(&self.pk, typecode, len(data)) + msgpack_pack_raw_body(&self.pk, data, len(data)) + + def pack_array_header(self, long long size): + if size > ITEM_LIMIT: + raise ValueError + cdef int ret = msgpack_pack_array(&self.pk, size) + if ret == -1: + raise MemoryError + elif ret: # should not happen + raise TypeError + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def pack_map_header(self, long long size): + if size > ITEM_LIMIT: + raise ValueError + cdef int ret = msgpack_pack_map(&self.pk, size) + if ret == -1: + raise MemoryError + elif ret: # should not happen + raise TypeError + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def pack_map_pairs(self, object pairs): + """ + Pack *pairs* as msgpack map type. + + *pairs* should be a sequence of pairs. + (`len(pairs)` and `for k, v in pairs:` should be supported.) + """ + cdef int ret = msgpack_pack_map(&self.pk, len(pairs)) + if ret == 0: + for k, v in pairs: + ret = self._pack(k) + if ret != 0: break + ret = self._pack(v) + if ret != 0: break + if ret == -1: + raise MemoryError + elif ret: # should not happen + raise TypeError + if self.autoreset: + buf = PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + self.pk.length = 0 + return buf + + def reset(self): + """Reset internal buffer. + + This method is usaful only when autoreset=False. + """ + self.pk.length = 0 + + def bytes(self): + """Return internal buffer contents as bytes object""" + return PyBytes_FromStringAndSize(self.pk.buf, self.pk.length) + + def getbuffer(self): + """Return view of internal buffer.""" + return buff_to_buff(self.pk.buf, self.pk.length) diff --git a/ddtrace/vendor/msgpack/_unpacker.pyx b/ddtrace/vendor/msgpack/_unpacker.pyx new file mode 100644 index 0000000000..5239ba7bc0 --- /dev/null +++ b/ddtrace/vendor/msgpack/_unpacker.pyx @@ -0,0 +1,569 @@ +# coding: utf-8 + +from cpython cimport * + +cdef extern from "Python.h": + ctypedef struct PyObject + cdef int PyObject_AsReadBuffer(object o, const void** buff, Py_ssize_t* buf_len) except -1 + object PyMemoryView_GetContiguous(object obj, int buffertype, char order) + +from libc.stdlib cimport * +from libc.string cimport * +from libc.limits cimport * +ctypedef unsigned long long uint64_t + +from ddtrace.vendor.msgpack.exceptions import ( + BufferFull, + OutOfData, + ExtraData, + FormatError, + StackError, +) +from ddtrace.vendor.msgpack import ExtType + + +cdef extern from "unpack.h": + ctypedef struct msgpack_user: + bint use_list + bint raw + bint has_pairs_hook # call object_hook with k-v pairs + bint strict_map_key + PyObject* object_hook + PyObject* list_hook + PyObject* ext_hook + char *encoding + char *unicode_errors + Py_ssize_t max_str_len + Py_ssize_t max_bin_len + Py_ssize_t max_array_len + Py_ssize_t max_map_len + Py_ssize_t max_ext_len + + ctypedef struct unpack_context: + msgpack_user user + PyObject* obj + Py_ssize_t count + + ctypedef int (*execute_fn)(unpack_context* ctx, const char* data, + Py_ssize_t len, Py_ssize_t* off) except? -1 + execute_fn unpack_construct + execute_fn unpack_skip + execute_fn read_array_header + execute_fn read_map_header + void unpack_init(unpack_context* ctx) + object unpack_data(unpack_context* ctx) + void unpack_clear(unpack_context* ctx) + +cdef inline init_ctx(unpack_context *ctx, + object object_hook, object object_pairs_hook, + object list_hook, object ext_hook, + bint use_list, bint raw, bint strict_map_key, + const char* encoding, const char* unicode_errors, + Py_ssize_t max_str_len, Py_ssize_t max_bin_len, + Py_ssize_t max_array_len, Py_ssize_t max_map_len, + Py_ssize_t max_ext_len): + unpack_init(ctx) + ctx.user.use_list = use_list + ctx.user.raw = raw + ctx.user.strict_map_key = strict_map_key + ctx.user.object_hook = ctx.user.list_hook = NULL + ctx.user.max_str_len = max_str_len + ctx.user.max_bin_len = max_bin_len + ctx.user.max_array_len = max_array_len + ctx.user.max_map_len = max_map_len + ctx.user.max_ext_len = max_ext_len + + if object_hook is not None and object_pairs_hook is not None: + raise TypeError("object_pairs_hook and object_hook are mutually exclusive.") + + if object_hook is not None: + if not PyCallable_Check(object_hook): + raise TypeError("object_hook must be a callable.") + ctx.user.object_hook = object_hook + + if object_pairs_hook is None: + ctx.user.has_pairs_hook = False + else: + if not PyCallable_Check(object_pairs_hook): + raise TypeError("object_pairs_hook must be a callable.") + ctx.user.object_hook = object_pairs_hook + ctx.user.has_pairs_hook = True + + if list_hook is not None: + if not PyCallable_Check(list_hook): + raise TypeError("list_hook must be a callable.") + ctx.user.list_hook = list_hook + + if ext_hook is not None: + if not PyCallable_Check(ext_hook): + raise TypeError("ext_hook must be a callable.") + ctx.user.ext_hook = ext_hook + + ctx.user.encoding = encoding + ctx.user.unicode_errors = unicode_errors + +def default_read_extended_type(typecode, data): + raise NotImplementedError("Cannot decode extended type with typecode=%d" % typecode) + +cdef inline int get_data_from_buffer(object obj, + Py_buffer *view, + char **buf, + Py_ssize_t *buffer_len, + int *new_protocol) except 0: + cdef object contiguous + cdef Py_buffer tmp + if PyObject_CheckBuffer(obj): + new_protocol[0] = 1 + if PyObject_GetBuffer(obj, view, PyBUF_FULL_RO) == -1: + raise + if view.itemsize != 1: + PyBuffer_Release(view) + raise BufferError("cannot unpack from multi-byte object") + if PyBuffer_IsContiguous(view, b'A') == 0: + PyBuffer_Release(view) + # create a contiguous copy and get buffer + contiguous = PyMemoryView_GetContiguous(obj, PyBUF_READ, b'C') + PyObject_GetBuffer(contiguous, view, PyBUF_SIMPLE) + # view must hold the only reference to contiguous, + # so memory is freed when view is released + Py_DECREF(contiguous) + buffer_len[0] = view.len + buf[0] = view.buf + return 1 + else: + new_protocol[0] = 0 + if PyObject_AsReadBuffer(obj, buf, buffer_len) == -1: + raise BufferError("could not get memoryview") + PyErr_WarnEx(RuntimeWarning, + "using old buffer interface to unpack %s; " + "this leads to unpacking errors if slicing is used and " + "will be removed in a future version" % type(obj), + 1) + return 1 + +def unpackb(object packed, object object_hook=None, object list_hook=None, + bint use_list=True, bint raw=True, bint strict_map_key=False, + encoding=None, unicode_errors=None, + object_pairs_hook=None, ext_hook=ExtType, + Py_ssize_t max_str_len=-1, + Py_ssize_t max_bin_len=-1, + Py_ssize_t max_array_len=-1, + Py_ssize_t max_map_len=-1, + Py_ssize_t max_ext_len=-1): + """ + Unpack packed_bytes to object. Returns an unpacked object. + + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``ValueError`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. + + See :class:`Unpacker` for options. + + *max_xxx_len* options are configured automatically from ``len(packed)``. + """ + cdef unpack_context ctx + cdef Py_ssize_t off = 0 + cdef int ret + + cdef Py_buffer view + cdef char* buf = NULL + cdef Py_ssize_t buf_len + cdef const char* cenc = NULL + cdef const char* cerr = NULL + cdef int new_protocol = 0 + + if encoding is not None: + PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + cenc = encoding + + if unicode_errors is not None: + cerr = unicode_errors + + get_data_from_buffer(packed, &view, &buf, &buf_len, &new_protocol) + + if max_str_len == -1: + max_str_len = buf_len + if max_bin_len == -1: + max_bin_len = buf_len + if max_array_len == -1: + max_array_len = buf_len + if max_map_len == -1: + max_map_len = buf_len//2 + if max_ext_len == -1: + max_ext_len = buf_len + + try: + init_ctx(&ctx, object_hook, object_pairs_hook, list_hook, ext_hook, + use_list, raw, strict_map_key, cenc, cerr, + max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len) + ret = unpack_construct(&ctx, buf, buf_len, &off) + finally: + if new_protocol: + PyBuffer_Release(&view); + + if ret == 1: + obj = unpack_data(&ctx) + if off < buf_len: + raise ExtraData(obj, PyBytes_FromStringAndSize(buf+off, buf_len-off)) + return obj + unpack_clear(&ctx) + if ret == 0: + raise ValueError("Unpack failed: incomplete input") + elif ret == -2: + raise FormatError + elif ret == -3: + raise StackError + raise ValueError("Unpack failed: error = %d" % (ret,)) + + +def unpack(object stream, **kwargs): + PyErr_WarnEx( + DeprecationWarning, + "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", 1) + data = stream.read() + return unpackb(data, **kwargs) + + +cdef class Unpacker(object): + """Streaming unpacker. + + Arguments: + + :param file_like: + File-like object having `.read(n)` method. + If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. + + :param int read_size: + Used as `file_like.read(read_size)`. (default: `min(1024**2, max_buffer_size)`) + + :param bool use_list: + If true, unpack msgpack array to Python list. + Otherwise, unpack to Python tuple. (default: True) + + :param bool raw: + If true, unpack msgpack raw to Python bytes (default). + Otherwise, unpack to Python str (or unicode on Python 2) by decoding + with UTF-8 encoding (recommended). + Currently, the default is true, but it will be changed to false in + near future. So you must specify it explicitly for keeping backward + compatibility. + + *encoding* option which is deprecated overrides this option. + + :param bool strict_map_key: + If true, only str or bytes are accepted for map (dict) keys. + It's False by default for backward-compatibility. + But it will be True from msgpack 1.0. + + :param callable object_hook: + When specified, it should be callable. + Unpacker calls it with a dict argument after unpacking msgpack map. + (See also simplejson) + + :param callable object_pairs_hook: + When specified, it should be callable. + Unpacker calls it with a list of key-value pairs after unpacking msgpack map. + (See also simplejson) + + :param int max_buffer_size: + Limits size of data waiting unpacked. 0 means system's INT_MAX (default). + Raises `BufferFull` exception when it is insufficient. + You should set this parameter when unpacking data from untrusted source. + + :param int max_str_len: + Deprecated, use *max_buffer_size* instead. + Limits max length of str. (default: max_buffer_size or 1024*1024) + + :param int max_bin_len: + Deprecated, use *max_buffer_size* instead. + Limits max length of bin. (default: max_buffer_size or 1024*1024) + + :param int max_array_len: + Limits max length of array. (default: max_buffer_size or 128*1024) + + :param int max_map_len: + Limits max length of map. (default: max_buffer_size//2 or 32*1024) + + :param int max_ext_len: + Deprecated, use *max_buffer_size* instead. + Limits max size of ext type. (default: max_buffer_size or 1024*1024) + + :param str encoding: + Deprecated, use ``raw=False`` instead. + Encoding used for decoding msgpack raw. + If it is None (default), msgpack raw is deserialized to Python bytes. + + :param str unicode_errors: + Error handler used for decoding str type. (default: `'strict'`) + + + Example of streaming deserialize from file-like object:: + + unpacker = Unpacker(file_like, raw=False, max_buffer_size=10*1024*1024) + for o in unpacker: + process(o) + + Example of streaming deserialize from socket:: + + unpacker = Unpacker(raw=False, max_buffer_size=10*1024*1024) + while True: + buf = sock.recv(1024**2) + if not buf: + break + unpacker.feed(buf) + for o in unpacker: + process(o) + + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``OutOfData`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. + """ + cdef unpack_context ctx + cdef char* buf + cdef Py_ssize_t buf_size, buf_head, buf_tail + cdef object file_like + cdef object file_like_read + cdef Py_ssize_t read_size + # To maintain refcnt. + cdef object object_hook, object_pairs_hook, list_hook, ext_hook + cdef object encoding, unicode_errors + cdef Py_ssize_t max_buffer_size + cdef uint64_t stream_offset + + def __cinit__(self): + self.buf = NULL + + def __dealloc__(self): + PyMem_Free(self.buf) + self.buf = NULL + + def __init__(self, file_like=None, Py_ssize_t read_size=0, + bint use_list=True, bint raw=True, bint strict_map_key=False, + object object_hook=None, object object_pairs_hook=None, object list_hook=None, + encoding=None, unicode_errors=None, Py_ssize_t max_buffer_size=0, + object ext_hook=ExtType, + Py_ssize_t max_str_len=-1, + Py_ssize_t max_bin_len=-1, + Py_ssize_t max_array_len=-1, + Py_ssize_t max_map_len=-1, + Py_ssize_t max_ext_len=-1): + cdef const char *cenc=NULL, + cdef const char *cerr=NULL + + self.object_hook = object_hook + self.object_pairs_hook = object_pairs_hook + self.list_hook = list_hook + self.ext_hook = ext_hook + + self.file_like = file_like + if file_like: + self.file_like_read = file_like.read + if not PyCallable_Check(self.file_like_read): + raise TypeError("`file_like.read` must be a callable.") + + if max_str_len == -1: + max_str_len = max_buffer_size or 1024*1024 + if max_bin_len == -1: + max_bin_len = max_buffer_size or 1024*1024 + if max_array_len == -1: + max_array_len = max_buffer_size or 128*1024 + if max_map_len == -1: + max_map_len = max_buffer_size//2 or 32*1024 + if max_ext_len == -1: + max_ext_len = max_buffer_size or 1024*1024 + + if not max_buffer_size: + max_buffer_size = INT_MAX + if read_size > max_buffer_size: + raise ValueError("read_size should be less or equal to max_buffer_size") + if not read_size: + read_size = min(max_buffer_size, 1024**2) + self.max_buffer_size = max_buffer_size + self.read_size = read_size + self.buf = PyMem_Malloc(read_size) + if self.buf == NULL: + raise MemoryError("Unable to allocate internal buffer.") + self.buf_size = read_size + self.buf_head = 0 + self.buf_tail = 0 + self.stream_offset = 0 + + if encoding is not None: + PyErr_WarnEx(DeprecationWarning, "encoding is deprecated, Use raw=False instead.", 1) + self.encoding = encoding + cenc = encoding + + if unicode_errors is not None: + self.unicode_errors = unicode_errors + cerr = unicode_errors + + init_ctx(&self.ctx, object_hook, object_pairs_hook, list_hook, + ext_hook, use_list, raw, strict_map_key, cenc, cerr, + max_str_len, max_bin_len, max_array_len, + max_map_len, max_ext_len) + + def feed(self, object next_bytes): + """Append `next_bytes` to internal buffer.""" + cdef Py_buffer pybuff + cdef int new_protocol = 0 + cdef char* buf + cdef Py_ssize_t buf_len + + if self.file_like is not None: + raise AssertionError( + "unpacker.feed() is not be able to use with `file_like`.") + + get_data_from_buffer(next_bytes, &pybuff, &buf, &buf_len, &new_protocol) + try: + self.append_buffer(buf, buf_len) + finally: + if new_protocol: + PyBuffer_Release(&pybuff) + + cdef append_buffer(self, void* _buf, Py_ssize_t _buf_len): + cdef: + char* buf = self.buf + char* new_buf + Py_ssize_t head = self.buf_head + Py_ssize_t tail = self.buf_tail + Py_ssize_t buf_size = self.buf_size + Py_ssize_t new_size + + if tail + _buf_len > buf_size: + if ((tail - head) + _buf_len) <= buf_size: + # move to front. + memmove(buf, buf + head, tail - head) + tail -= head + head = 0 + else: + # expand buffer. + new_size = (tail-head) + _buf_len + if new_size > self.max_buffer_size: + raise BufferFull + new_size = min(new_size*2, self.max_buffer_size) + new_buf = PyMem_Malloc(new_size) + if new_buf == NULL: + # self.buf still holds old buffer and will be freed during + # obj destruction + raise MemoryError("Unable to enlarge internal buffer.") + memcpy(new_buf, buf + head, tail - head) + PyMem_Free(buf) + + buf = new_buf + buf_size = new_size + tail -= head + head = 0 + + memcpy(buf + tail, (_buf), _buf_len) + self.buf = buf + self.buf_head = head + self.buf_size = buf_size + self.buf_tail = tail + _buf_len + + cdef read_from_file(self): + next_bytes = self.file_like_read( + min(self.read_size, + self.max_buffer_size - (self.buf_tail - self.buf_head) + )) + if next_bytes: + self.append_buffer(PyBytes_AsString(next_bytes), PyBytes_Size(next_bytes)) + else: + self.file_like = None + + cdef object _unpack(self, execute_fn execute, bint iter=0): + cdef int ret + cdef object obj + cdef Py_ssize_t prev_head + + if self.buf_head >= self.buf_tail and self.file_like is not None: + self.read_from_file() + + while 1: + prev_head = self.buf_head + if prev_head >= self.buf_tail: + if iter: + raise StopIteration("No more data to unpack.") + else: + raise OutOfData("No more data to unpack.") + + ret = execute(&self.ctx, self.buf, self.buf_tail, &self.buf_head) + self.stream_offset += self.buf_head - prev_head + + if ret == 1: + obj = unpack_data(&self.ctx) + unpack_init(&self.ctx) + return obj + elif ret == 0: + if self.file_like is not None: + self.read_from_file() + continue + if iter: + raise StopIteration("No more data to unpack.") + else: + raise OutOfData("No more data to unpack.") + elif ret == -2: + raise FormatError + elif ret == -3: + raise StackError + else: + raise ValueError("Unpack failed: error = %d" % (ret,)) + + def read_bytes(self, Py_ssize_t nbytes): + """Read a specified number of raw bytes from the stream""" + cdef Py_ssize_t nread + nread = min(self.buf_tail - self.buf_head, nbytes) + ret = PyBytes_FromStringAndSize(self.buf + self.buf_head, nread) + self.buf_head += nread + if len(ret) < nbytes and self.file_like is not None: + ret += self.file_like.read(nbytes - len(ret)) + return ret + + def unpack(self): + """Unpack one object + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(unpack_construct) + + def skip(self): + """Read and ignore one object, returning None + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(unpack_skip) + + def read_array_header(self): + """assuming the next object is an array, return its size n, such that + the next n unpack() calls will iterate over its contents. + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(read_array_header) + + def read_map_header(self): + """assuming the next object is a map, return its size n, such that the + next n * 2 unpack() calls will iterate over its key-value pairs. + + Raises `OutOfData` when there are no more bytes to unpack. + """ + return self._unpack(read_map_header) + + def tell(self): + return self.stream_offset + + def __iter__(self): + return self + + def __next__(self): + return self._unpack(unpack_construct, 1) + + # for debug. + #def _buf(self): + # return PyString_FromStringAndSize(self.buf, self.buf_tail) + + #def _off(self): + # return self.buf_head diff --git a/ddtrace/vendor/msgpack/_version.py b/ddtrace/vendor/msgpack/_version.py new file mode 100644 index 0000000000..926c5e7b02 --- /dev/null +++ b/ddtrace/vendor/msgpack/_version.py @@ -0,0 +1 @@ +version = (0, 6, 1) diff --git a/ddtrace/vendor/msgpack/buff_converter.h b/ddtrace/vendor/msgpack/buff_converter.h new file mode 100644 index 0000000000..bc7227ae9d --- /dev/null +++ b/ddtrace/vendor/msgpack/buff_converter.h @@ -0,0 +1,28 @@ +#include "Python.h" + +/* cython does not support this preprocessor check => write it in raw C */ +#if PY_MAJOR_VERSION == 2 +static PyObject * +buff_to_buff(char *buff, Py_ssize_t size) +{ + return PyBuffer_FromMemory(buff, size); +} + +#elif (PY_MAJOR_VERSION == 3) && (PY_MINOR_VERSION >= 3) +static PyObject * +buff_to_buff(char *buff, Py_ssize_t size) +{ + return PyMemoryView_FromMemory(buff, size, PyBUF_READ); +} +#else +static PyObject * +buff_to_buff(char *buff, Py_ssize_t size) +{ + Py_buffer pybuf; + if (PyBuffer_FillInfo(&pybuf, NULL, buff, size, 1, PyBUF_FULL_RO) == -1) { + return NULL; + } + + return PyMemoryView_FromBuffer(&pybuf); +} +#endif diff --git a/ddtrace/vendor/msgpack/exceptions.py b/ddtrace/vendor/msgpack/exceptions.py new file mode 100644 index 0000000000..d6d2615cfd --- /dev/null +++ b/ddtrace/vendor/msgpack/exceptions.py @@ -0,0 +1,48 @@ +class UnpackException(Exception): + """Base class for some exceptions raised while unpacking. + + NOTE: unpack may raise exception other than subclass of + UnpackException. If you want to catch all error, catch + Exception instead. + """ + + +class BufferFull(UnpackException): + pass + + +class OutOfData(UnpackException): + pass + + +class FormatError(ValueError, UnpackException): + """Invalid msgpack format""" + + +class StackError(ValueError, UnpackException): + """Too nested""" + + +# Deprecated. Use ValueError instead +UnpackValueError = ValueError + + +class ExtraData(UnpackValueError): + """ExtraData is raised when there is trailing data. + + This exception is raised while only one-shot (not streaming) + unpack. + """ + + def __init__(self, unpacked, extra): + self.unpacked = unpacked + self.extra = extra + + def __str__(self): + return "unpack(b) received extra data." + + +# Deprecated. Use Exception instead to catch all exception during packing. +PackException = Exception +PackValueError = ValueError +PackOverflowError = OverflowError diff --git a/ddtrace/vendor/msgpack/fallback.py b/ddtrace/vendor/msgpack/fallback.py new file mode 100644 index 0000000000..3836e830b8 --- /dev/null +++ b/ddtrace/vendor/msgpack/fallback.py @@ -0,0 +1,1027 @@ +"""Fallback pure Python implementation of msgpack""" + +import sys +import struct +import warnings + + +if sys.version_info[0] == 2: + PY2 = True + int_types = (int, long) + def dict_iteritems(d): + return d.iteritems() +else: + PY2 = False + int_types = int + unicode = str + xrange = range + def dict_iteritems(d): + return d.items() + +if sys.version_info < (3, 5): + # Ugly hack... + RecursionError = RuntimeError + + def _is_recursionerror(e): + return len(e.args) == 1 and isinstance(e.args[0], str) and \ + e.args[0].startswith('maximum recursion depth exceeded') +else: + def _is_recursionerror(e): + return True + +if hasattr(sys, 'pypy_version_info'): + # cStringIO is slow on PyPy, StringIO is faster. However: PyPy's own + # StringBuilder is fastest. + from __pypy__ import newlist_hint + try: + from __pypy__.builders import BytesBuilder as StringBuilder + except ImportError: + from __pypy__.builders import StringBuilder + USING_STRINGBUILDER = True + class StringIO(object): + def __init__(self, s=b''): + if s: + self.builder = StringBuilder(len(s)) + self.builder.append(s) + else: + self.builder = StringBuilder() + def write(self, s): + if isinstance(s, memoryview): + s = s.tobytes() + elif isinstance(s, bytearray): + s = bytes(s) + self.builder.append(s) + def getvalue(self): + return self.builder.build() +else: + USING_STRINGBUILDER = False + from io import BytesIO as StringIO + newlist_hint = lambda size: [] + + +from .exceptions import ( + BufferFull, + OutOfData, + ExtraData, + FormatError, + StackError, +) + +from . import ExtType + + +EX_SKIP = 0 +EX_CONSTRUCT = 1 +EX_READ_ARRAY_HEADER = 2 +EX_READ_MAP_HEADER = 3 + +TYPE_IMMEDIATE = 0 +TYPE_ARRAY = 1 +TYPE_MAP = 2 +TYPE_RAW = 3 +TYPE_BIN = 4 +TYPE_EXT = 5 + +DEFAULT_RECURSE_LIMIT = 511 + + +def _check_type_strict(obj, t, type=type, tuple=tuple): + if type(t) is tuple: + return type(obj) in t + else: + return type(obj) is t + + +def _get_data_from_buffer(obj): + try: + view = memoryview(obj) + except TypeError: + # try to use legacy buffer protocol if 2.7, otherwise re-raise + if PY2: + view = memoryview(buffer(obj)) + warnings.warn("using old buffer interface to unpack %s; " + "this leads to unpacking errors if slicing is used and " + "will be removed in a future version" % type(obj), + RuntimeWarning, stacklevel=3) + else: + raise + if view.itemsize != 1: + raise ValueError("cannot unpack from multi-byte object") + return view + + +def unpack(stream, **kwargs): + warnings.warn( + "Direct calling implementation's unpack() is deprecated, Use msgpack.unpack() or unpackb() instead.", + DeprecationWarning, stacklevel=2) + data = stream.read() + return unpackb(data, **kwargs) + + +def unpackb(packed, **kwargs): + """ + Unpack an object from `packed`. + + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``ValueError`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. + + See :class:`Unpacker` for options. + """ + unpacker = Unpacker(None, max_buffer_size=len(packed), **kwargs) + unpacker.feed(packed) + try: + ret = unpacker._unpack() + except OutOfData: + raise ValueError("Unpack failed: incomplete input") + except RecursionError as e: + if _is_recursionerror(e): + raise StackError + raise + if unpacker._got_extradata(): + raise ExtraData(ret, unpacker._get_extradata()) + return ret + + +if sys.version_info < (2, 7, 6): + def _unpack_from(f, b, o=0): + """Explicit typcast for legacy struct.unpack_from""" + return struct.unpack_from(f, bytes(b), o) +else: + _unpack_from = struct.unpack_from + + +class Unpacker(object): + """Streaming unpacker. + + arguments: + + :param file_like: + File-like object having `.read(n)` method. + If specified, unpacker reads serialized data from it and :meth:`feed()` is not usable. + + :param int read_size: + Used as `file_like.read(read_size)`. (default: `min(16*1024, max_buffer_size)`) + + :param bool use_list: + If true, unpack msgpack array to Python list. + Otherwise, unpack to Python tuple. (default: True) + + :param bool raw: + If true, unpack msgpack raw to Python bytes (default). + Otherwise, unpack to Python str (or unicode on Python 2) by decoding + with UTF-8 encoding (recommended). + Currently, the default is true, but it will be changed to false in + near future. So you must specify it explicitly for keeping backward + compatibility. + + *encoding* option which is deprecated overrides this option. + + :param bool strict_map_key: + If true, only str or bytes are accepted for map (dict) keys. + It's False by default for backward-compatibility. + But it will be True from msgpack 1.0. + + :param callable object_hook: + When specified, it should be callable. + Unpacker calls it with a dict argument after unpacking msgpack map. + (See also simplejson) + + :param callable object_pairs_hook: + When specified, it should be callable. + Unpacker calls it with a list of key-value pairs after unpacking msgpack map. + (See also simplejson) + + :param str encoding: + Encoding used for decoding msgpack raw. + If it is None (default), msgpack raw is deserialized to Python bytes. + + :param str unicode_errors: + (deprecated) Used for decoding msgpack raw with *encoding*. + (default: `'strict'`) + + :param int max_buffer_size: + Limits size of data waiting unpacked. 0 means system's INT_MAX (default). + Raises `BufferFull` exception when it is insufficient. + You should set this parameter when unpacking data from untrusted source. + + :param int max_str_len: + Deprecated, use *max_buffer_size* instead. + Limits max length of str. (default: max_buffer_size or 1024*1024) + + :param int max_bin_len: + Deprecated, use *max_buffer_size* instead. + Limits max length of bin. (default: max_buffer_size or 1024*1024) + + :param int max_array_len: + Limits max length of array. + (default: max_buffer_size or 128*1024) + + :param int max_map_len: + Limits max length of map. + (default: max_buffer_size//2 or 32*1024) + + :param int max_ext_len: + Deprecated, use *max_buffer_size* instead. + Limits max size of ext type. (default: max_buffer_size or 1024*1024) + + Example of streaming deserialize from file-like object:: + + unpacker = Unpacker(file_like, raw=False, max_buffer_size=10*1024*1024) + for o in unpacker: + process(o) + + Example of streaming deserialize from socket:: + + unpacker = Unpacker(raw=False, max_buffer_size=10*1024*1024) + while True: + buf = sock.recv(1024**2) + if not buf: + break + unpacker.feed(buf) + for o in unpacker: + process(o) + + Raises ``ExtraData`` when *packed* contains extra bytes. + Raises ``OutOfData`` when *packed* is incomplete. + Raises ``FormatError`` when *packed* is not valid msgpack. + Raises ``StackError`` when *packed* contains too nested. + Other exceptions can be raised during unpacking. + """ + + def __init__(self, file_like=None, read_size=0, use_list=True, raw=True, strict_map_key=False, + object_hook=None, object_pairs_hook=None, list_hook=None, + encoding=None, unicode_errors=None, max_buffer_size=0, + ext_hook=ExtType, + max_str_len=-1, + max_bin_len=-1, + max_array_len=-1, + max_map_len=-1, + max_ext_len=-1): + if encoding is not None: + warnings.warn( + "encoding is deprecated, Use raw=False instead.", + DeprecationWarning, stacklevel=2) + + if unicode_errors is None: + unicode_errors = 'strict' + + if file_like is None: + self._feeding = True + else: + if not callable(file_like.read): + raise TypeError("`file_like.read` must be callable") + self.file_like = file_like + self._feeding = False + + #: array of bytes fed. + self._buffer = bytearray() + #: Which position we currently reads + self._buff_i = 0 + + # When Unpacker is used as an iterable, between the calls to next(), + # the buffer is not "consumed" completely, for efficiency sake. + # Instead, it is done sloppily. To make sure we raise BufferFull at + # the correct moments, we have to keep track of how sloppy we were. + # Furthermore, when the buffer is incomplete (that is: in the case + # we raise an OutOfData) we need to rollback the buffer to the correct + # state, which _buf_checkpoint records. + self._buf_checkpoint = 0 + + if max_str_len == -1: + max_str_len = max_buffer_size or 1024*1024 + if max_bin_len == -1: + max_bin_len = max_buffer_size or 1024*1024 + if max_array_len == -1: + max_array_len = max_buffer_size or 128*1024 + if max_map_len == -1: + max_map_len = max_buffer_size//2 or 32*1024 + if max_ext_len == -1: + max_ext_len = max_buffer_size or 1024*1024 + + self._max_buffer_size = max_buffer_size or 2**31-1 + if read_size > self._max_buffer_size: + raise ValueError("read_size must be smaller than max_buffer_size") + self._read_size = read_size or min(self._max_buffer_size, 16*1024) + self._raw = bool(raw) + self._strict_map_key = bool(strict_map_key) + self._encoding = encoding + self._unicode_errors = unicode_errors + self._use_list = use_list + self._list_hook = list_hook + self._object_hook = object_hook + self._object_pairs_hook = object_pairs_hook + self._ext_hook = ext_hook + self._max_str_len = max_str_len + self._max_bin_len = max_bin_len + self._max_array_len = max_array_len + self._max_map_len = max_map_len + self._max_ext_len = max_ext_len + self._stream_offset = 0 + + if list_hook is not None and not callable(list_hook): + raise TypeError('`list_hook` is not callable') + if object_hook is not None and not callable(object_hook): + raise TypeError('`object_hook` is not callable') + if object_pairs_hook is not None and not callable(object_pairs_hook): + raise TypeError('`object_pairs_hook` is not callable') + if object_hook is not None and object_pairs_hook is not None: + raise TypeError("object_pairs_hook and object_hook are mutually " + "exclusive") + if not callable(ext_hook): + raise TypeError("`ext_hook` is not callable") + + def feed(self, next_bytes): + assert self._feeding + view = _get_data_from_buffer(next_bytes) + if (len(self._buffer) - self._buff_i + len(view) > self._max_buffer_size): + raise BufferFull + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[:self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + # Use extend here: INPLACE_ADD += doesn't reliably typecast memoryview in jython + self._buffer.extend(view) + + def _consume(self): + """ Gets rid of the used parts of the buffer. """ + self._stream_offset += self._buff_i - self._buf_checkpoint + self._buf_checkpoint = self._buff_i + + def _got_extradata(self): + return self._buff_i < len(self._buffer) + + def _get_extradata(self): + return self._buffer[self._buff_i:] + + def read_bytes(self, n): + return self._read(n) + + def _read(self, n): + # (int) -> bytearray + self._reserve(n) + i = self._buff_i + self._buff_i = i+n + return self._buffer[i:i+n] + + def _reserve(self, n): + remain_bytes = len(self._buffer) - self._buff_i - n + + # Fast path: buffer has n bytes already + if remain_bytes >= 0: + return + + if self._feeding: + self._buff_i = self._buf_checkpoint + raise OutOfData + + # Strip buffer before checkpoint before reading file. + if self._buf_checkpoint > 0: + del self._buffer[:self._buf_checkpoint] + self._buff_i -= self._buf_checkpoint + self._buf_checkpoint = 0 + + # Read from file + remain_bytes = -remain_bytes + while remain_bytes > 0: + to_read_bytes = max(self._read_size, remain_bytes) + read_data = self.file_like.read(to_read_bytes) + if not read_data: + break + assert isinstance(read_data, bytes) + self._buffer += read_data + remain_bytes -= len(read_data) + + if len(self._buffer) < n + self._buff_i: + self._buff_i = 0 # rollback + raise OutOfData + + def _read_header(self, execute=EX_CONSTRUCT): + typ = TYPE_IMMEDIATE + n = 0 + obj = None + self._reserve(1) + b = self._buffer[self._buff_i] + self._buff_i += 1 + if b & 0b10000000 == 0: + obj = b + elif b & 0b11100000 == 0b11100000: + obj = -1 - (b ^ 0xff) + elif b & 0b11100000 == 0b10100000: + n = b & 0b00011111 + typ = TYPE_RAW + if n > self._max_str_len: + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b & 0b11110000 == 0b10010000: + n = b & 0b00001111 + typ = TYPE_ARRAY + if n > self._max_array_len: + raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b & 0b11110000 == 0b10000000: + n = b & 0b00001111 + typ = TYPE_MAP + if n > self._max_map_len: + raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + elif b == 0xc0: + obj = None + elif b == 0xc2: + obj = False + elif b == 0xc3: + obj = True + elif b == 0xc4: + typ = TYPE_BIN + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 + if n > self._max_bin_len: + raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc5: + typ = TYPE_BIN + self._reserve(2) + n = _unpack_from(">H", self._buffer, self._buff_i)[0] + self._buff_i += 2 + if n > self._max_bin_len: + raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc6: + typ = TYPE_BIN + self._reserve(4) + n = _unpack_from(">I", self._buffer, self._buff_i)[0] + self._buff_i += 4 + if n > self._max_bin_len: + raise ValueError("%s exceeds max_bin_len(%s)" % (n, self._max_bin_len)) + obj = self._read(n) + elif b == 0xc7: # ext 8 + typ = TYPE_EXT + self._reserve(2) + L, n = _unpack_from('Bb', self._buffer, self._buff_i) + self._buff_i += 2 + if L > self._max_ext_len: + raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xc8: # ext 16 + typ = TYPE_EXT + self._reserve(3) + L, n = _unpack_from('>Hb', self._buffer, self._buff_i) + self._buff_i += 3 + if L > self._max_ext_len: + raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xc9: # ext 32 + typ = TYPE_EXT + self._reserve(5) + L, n = _unpack_from('>Ib', self._buffer, self._buff_i) + self._buff_i += 5 + if L > self._max_ext_len: + raise ValueError("%s exceeds max_ext_len(%s)" % (L, self._max_ext_len)) + obj = self._read(L) + elif b == 0xca: + self._reserve(4) + obj = _unpack_from(">f", self._buffer, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xcb: + self._reserve(8) + obj = _unpack_from(">d", self._buffer, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xcc: + self._reserve(1) + obj = self._buffer[self._buff_i] + self._buff_i += 1 + elif b == 0xcd: + self._reserve(2) + obj = _unpack_from(">H", self._buffer, self._buff_i)[0] + self._buff_i += 2 + elif b == 0xce: + self._reserve(4) + obj = _unpack_from(">I", self._buffer, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xcf: + self._reserve(8) + obj = _unpack_from(">Q", self._buffer, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xd0: + self._reserve(1) + obj = _unpack_from("b", self._buffer, self._buff_i)[0] + self._buff_i += 1 + elif b == 0xd1: + self._reserve(2) + obj = _unpack_from(">h", self._buffer, self._buff_i)[0] + self._buff_i += 2 + elif b == 0xd2: + self._reserve(4) + obj = _unpack_from(">i", self._buffer, self._buff_i)[0] + self._buff_i += 4 + elif b == 0xd3: + self._reserve(8) + obj = _unpack_from(">q", self._buffer, self._buff_i)[0] + self._buff_i += 8 + elif b == 0xd4: # fixext 1 + typ = TYPE_EXT + if self._max_ext_len < 1: + raise ValueError("%s exceeds max_ext_len(%s)" % (1, self._max_ext_len)) + self._reserve(2) + n, obj = _unpack_from("b1s", self._buffer, self._buff_i) + self._buff_i += 2 + elif b == 0xd5: # fixext 2 + typ = TYPE_EXT + if self._max_ext_len < 2: + raise ValueError("%s exceeds max_ext_len(%s)" % (2, self._max_ext_len)) + self._reserve(3) + n, obj = _unpack_from("b2s", self._buffer, self._buff_i) + self._buff_i += 3 + elif b == 0xd6: # fixext 4 + typ = TYPE_EXT + if self._max_ext_len < 4: + raise ValueError("%s exceeds max_ext_len(%s)" % (4, self._max_ext_len)) + self._reserve(5) + n, obj = _unpack_from("b4s", self._buffer, self._buff_i) + self._buff_i += 5 + elif b == 0xd7: # fixext 8 + typ = TYPE_EXT + if self._max_ext_len < 8: + raise ValueError("%s exceeds max_ext_len(%s)" % (8, self._max_ext_len)) + self._reserve(9) + n, obj = _unpack_from("b8s", self._buffer, self._buff_i) + self._buff_i += 9 + elif b == 0xd8: # fixext 16 + typ = TYPE_EXT + if self._max_ext_len < 16: + raise ValueError("%s exceeds max_ext_len(%s)" % (16, self._max_ext_len)) + self._reserve(17) + n, obj = _unpack_from("b16s", self._buffer, self._buff_i) + self._buff_i += 17 + elif b == 0xd9: + typ = TYPE_RAW + self._reserve(1) + n = self._buffer[self._buff_i] + self._buff_i += 1 + if n > self._max_str_len: + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xda: + typ = TYPE_RAW + self._reserve(2) + n, = _unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 + if n > self._max_str_len: + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xdb: + typ = TYPE_RAW + self._reserve(4) + n, = _unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 + if n > self._max_str_len: + raise ValueError("%s exceeds max_str_len(%s)", n, self._max_str_len) + obj = self._read(n) + elif b == 0xdc: + typ = TYPE_ARRAY + self._reserve(2) + n, = _unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 + if n > self._max_array_len: + raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b == 0xdd: + typ = TYPE_ARRAY + self._reserve(4) + n, = _unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 + if n > self._max_array_len: + raise ValueError("%s exceeds max_array_len(%s)", n, self._max_array_len) + elif b == 0xde: + self._reserve(2) + n, = _unpack_from(">H", self._buffer, self._buff_i) + self._buff_i += 2 + if n > self._max_map_len: + raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + typ = TYPE_MAP + elif b == 0xdf: + self._reserve(4) + n, = _unpack_from(">I", self._buffer, self._buff_i) + self._buff_i += 4 + if n > self._max_map_len: + raise ValueError("%s exceeds max_map_len(%s)", n, self._max_map_len) + typ = TYPE_MAP + else: + raise FormatError("Unknown header: 0x%x" % b) + return typ, n, obj + + def _unpack(self, execute=EX_CONSTRUCT): + typ, n, obj = self._read_header(execute) + + if execute == EX_READ_ARRAY_HEADER: + if typ != TYPE_ARRAY: + raise ValueError("Expected array") + return n + if execute == EX_READ_MAP_HEADER: + if typ != TYPE_MAP: + raise ValueError("Expected map") + return n + # TODO should we eliminate the recursion? + if typ == TYPE_ARRAY: + if execute == EX_SKIP: + for i in xrange(n): + # TODO check whether we need to call `list_hook` + self._unpack(EX_SKIP) + return + ret = newlist_hint(n) + for i in xrange(n): + ret.append(self._unpack(EX_CONSTRUCT)) + if self._list_hook is not None: + ret = self._list_hook(ret) + # TODO is the interaction between `list_hook` and `use_list` ok? + return ret if self._use_list else tuple(ret) + if typ == TYPE_MAP: + if execute == EX_SKIP: + for i in xrange(n): + # TODO check whether we need to call hooks + self._unpack(EX_SKIP) + self._unpack(EX_SKIP) + return + if self._object_pairs_hook is not None: + ret = self._object_pairs_hook( + (self._unpack(EX_CONSTRUCT), + self._unpack(EX_CONSTRUCT)) + for _ in xrange(n)) + else: + ret = {} + for _ in xrange(n): + key = self._unpack(EX_CONSTRUCT) + if self._strict_map_key and type(key) not in (unicode, bytes): + raise ValueError("%s is not allowed for map key" % str(type(key))) + ret[key] = self._unpack(EX_CONSTRUCT) + if self._object_hook is not None: + ret = self._object_hook(ret) + return ret + if execute == EX_SKIP: + return + if typ == TYPE_RAW: + if self._encoding is not None: + obj = obj.decode(self._encoding, self._unicode_errors) + elif self._raw: + obj = bytes(obj) + else: + obj = obj.decode('utf_8') + return obj + if typ == TYPE_EXT: + return self._ext_hook(n, bytes(obj)) + if typ == TYPE_BIN: + return bytes(obj) + assert typ == TYPE_IMMEDIATE + return obj + + def __iter__(self): + return self + + def __next__(self): + try: + ret = self._unpack(EX_CONSTRUCT) + self._consume() + return ret + except OutOfData: + self._consume() + raise StopIteration + except RecursionError: + raise StackError + + next = __next__ + + def skip(self): + self._unpack(EX_SKIP) + self._consume() + + def unpack(self): + try: + ret = self._unpack(EX_CONSTRUCT) + except RecursionError: + raise StackError + self._consume() + return ret + + def read_array_header(self): + ret = self._unpack(EX_READ_ARRAY_HEADER) + self._consume() + return ret + + def read_map_header(self): + ret = self._unpack(EX_READ_MAP_HEADER) + self._consume() + return ret + + def tell(self): + return self._stream_offset + + +class Packer(object): + """ + MessagePack Packer + + usage: + + packer = Packer() + astream.write(packer.pack(a)) + astream.write(packer.pack(b)) + + Packer's constructor has some keyword arguments: + + :param callable default: + Convert user type to builtin type that Packer supports. + See also simplejson's document. + + :param bool use_single_float: + Use single precision float type for float. (default: False) + + :param bool autoreset: + Reset buffer after each pack and return its content as `bytes`. (default: True). + If set this to false, use `bytes()` to get content and `.reset()` to clear buffer. + + :param bool use_bin_type: + Use bin type introduced in msgpack spec 2.0 for bytes. + It also enables str8 type for unicode. + + :param bool strict_types: + If set to true, types will be checked to be exact. Derived classes + from serializeable types will not be serialized and will be + treated as unsupported type and forwarded to default. + Additionally tuples will not be serialized as lists. + This is useful when trying to implement accurate serialization + for python types. + + :param str encoding: + (deprecated) Convert unicode to bytes with this encoding. (default: 'utf-8') + + :param str unicode_errors: + Error handler for encoding unicode. (default: 'strict') + """ + def __init__(self, default=None, encoding=None, unicode_errors=None, + use_single_float=False, autoreset=True, use_bin_type=False, + strict_types=False): + if encoding is None: + encoding = 'utf_8' + else: + warnings.warn( + "encoding is deprecated, Use raw=False instead.", + DeprecationWarning, stacklevel=2) + + if unicode_errors is None: + unicode_errors = 'strict' + + self._strict_types = strict_types + self._use_float = use_single_float + self._autoreset = autoreset + self._use_bin_type = use_bin_type + self._encoding = encoding + self._unicode_errors = unicode_errors + self._buffer = StringIO() + if default is not None: + if not callable(default): + raise TypeError("default must be callable") + self._default = default + + def _pack(self, obj, nest_limit=DEFAULT_RECURSE_LIMIT, + check=isinstance, check_type_strict=_check_type_strict): + default_used = False + if self._strict_types: + check = check_type_strict + list_types = list + else: + list_types = (list, tuple) + while True: + if nest_limit < 0: + raise ValueError("recursion limit exceeded") + if obj is None: + return self._buffer.write(b"\xc0") + if check(obj, bool): + if obj: + return self._buffer.write(b"\xc3") + return self._buffer.write(b"\xc2") + if check(obj, int_types): + if 0 <= obj < 0x80: + return self._buffer.write(struct.pack("B", obj)) + if -0x20 <= obj < 0: + return self._buffer.write(struct.pack("b", obj)) + if 0x80 <= obj <= 0xff: + return self._buffer.write(struct.pack("BB", 0xcc, obj)) + if -0x80 <= obj < 0: + return self._buffer.write(struct.pack(">Bb", 0xd0, obj)) + if 0xff < obj <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xcd, obj)) + if -0x8000 <= obj < -0x80: + return self._buffer.write(struct.pack(">Bh", 0xd1, obj)) + if 0xffff < obj <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xce, obj)) + if -0x80000000 <= obj < -0x8000: + return self._buffer.write(struct.pack(">Bi", 0xd2, obj)) + if 0xffffffff < obj <= 0xffffffffffffffff: + return self._buffer.write(struct.pack(">BQ", 0xcf, obj)) + if -0x8000000000000000 <= obj < -0x80000000: + return self._buffer.write(struct.pack(">Bq", 0xd3, obj)) + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = True + continue + raise OverflowError("Integer value out of range") + if check(obj, (bytes, bytearray)): + n = len(obj) + if n >= 2**32: + raise ValueError("%s is too large" % type(obj).__name__) + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, unicode): + if self._encoding is None: + raise TypeError( + "Can't encode unicode string: " + "no encoding is specified") + obj = obj.encode(self._encoding, self._unicode_errors) + n = len(obj) + if n >= 2**32: + raise ValueError("String is too large") + self._pack_raw_header(n) + return self._buffer.write(obj) + if check(obj, memoryview): + n = len(obj) * obj.itemsize + if n >= 2**32: + raise ValueError("Memoryview is too large") + self._pack_bin_header(n) + return self._buffer.write(obj) + if check(obj, float): + if self._use_float: + return self._buffer.write(struct.pack(">Bf", 0xca, obj)) + return self._buffer.write(struct.pack(">Bd", 0xcb, obj)) + if check(obj, ExtType): + code = obj.code + data = obj.data + assert isinstance(code, int) + assert isinstance(data, bytes) + L = len(data) + if L == 1: + self._buffer.write(b'\xd4') + elif L == 2: + self._buffer.write(b'\xd5') + elif L == 4: + self._buffer.write(b'\xd6') + elif L == 8: + self._buffer.write(b'\xd7') + elif L == 16: + self._buffer.write(b'\xd8') + elif L <= 0xff: + self._buffer.write(struct.pack(">BB", 0xc7, L)) + elif L <= 0xffff: + self._buffer.write(struct.pack(">BH", 0xc8, L)) + else: + self._buffer.write(struct.pack(">BI", 0xc9, L)) + self._buffer.write(struct.pack("b", code)) + self._buffer.write(data) + return + if check(obj, list_types): + n = len(obj) + self._pack_array_header(n) + for i in xrange(n): + self._pack(obj[i], nest_limit - 1) + return + if check(obj, dict): + return self._pack_map_pairs(len(obj), dict_iteritems(obj), + nest_limit - 1) + if not default_used and self._default is not None: + obj = self._default(obj) + default_used = 1 + continue + raise TypeError("Cannot serialize %r" % (obj, )) + + def pack(self, obj): + try: + self._pack(obj) + except: + self._buffer = StringIO() # force reset + raise + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = StringIO() + return ret + + def pack_map_pairs(self, pairs): + self._pack_map_pairs(len(pairs), pairs) + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = StringIO() + return ret + + def pack_array_header(self, n): + if n >= 2**32: + raise ValueError + self._pack_array_header(n) + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = StringIO() + return ret + + def pack_map_header(self, n): + if n >= 2**32: + raise ValueError + self._pack_map_header(n) + if self._autoreset: + ret = self._buffer.getvalue() + self._buffer = StringIO() + return ret + + def pack_ext_type(self, typecode, data): + if not isinstance(typecode, int): + raise TypeError("typecode must have int type.") + if not 0 <= typecode <= 127: + raise ValueError("typecode should be 0-127") + if not isinstance(data, bytes): + raise TypeError("data must have bytes type") + L = len(data) + if L > 0xffffffff: + raise ValueError("Too large data") + if L == 1: + self._buffer.write(b'\xd4') + elif L == 2: + self._buffer.write(b'\xd5') + elif L == 4: + self._buffer.write(b'\xd6') + elif L == 8: + self._buffer.write(b'\xd7') + elif L == 16: + self._buffer.write(b'\xd8') + elif L <= 0xff: + self._buffer.write(b'\xc7' + struct.pack('B', L)) + elif L <= 0xffff: + self._buffer.write(b'\xc8' + struct.pack('>H', L)) + else: + self._buffer.write(b'\xc9' + struct.pack('>I', L)) + self._buffer.write(struct.pack('B', typecode)) + self._buffer.write(data) + + def _pack_array_header(self, n): + if n <= 0x0f: + return self._buffer.write(struct.pack('B', 0x90 + n)) + if n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xdc, n)) + if n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xdd, n)) + raise ValueError("Array is too large") + + def _pack_map_header(self, n): + if n <= 0x0f: + return self._buffer.write(struct.pack('B', 0x80 + n)) + if n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xde, n)) + if n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xdf, n)) + raise ValueError("Dict is too large") + + def _pack_map_pairs(self, n, pairs, nest_limit=DEFAULT_RECURSE_LIMIT): + self._pack_map_header(n) + for (k, v) in pairs: + self._pack(k, nest_limit - 1) + self._pack(v, nest_limit - 1) + + def _pack_raw_header(self, n): + if n <= 0x1f: + self._buffer.write(struct.pack('B', 0xa0 + n)) + elif self._use_bin_type and n <= 0xff: + self._buffer.write(struct.pack('>BB', 0xd9, n)) + elif n <= 0xffff: + self._buffer.write(struct.pack(">BH", 0xda, n)) + elif n <= 0xffffffff: + self._buffer.write(struct.pack(">BI", 0xdb, n)) + else: + raise ValueError('Raw is too large') + + def _pack_bin_header(self, n): + if not self._use_bin_type: + return self._pack_raw_header(n) + elif n <= 0xff: + return self._buffer.write(struct.pack('>BB', 0xc4, n)) + elif n <= 0xffff: + return self._buffer.write(struct.pack(">BH", 0xc5, n)) + elif n <= 0xffffffff: + return self._buffer.write(struct.pack(">BI", 0xc6, n)) + else: + raise ValueError('Bin is too large') + + def bytes(self): + """Return internal buffer contents as bytes object""" + return self._buffer.getvalue() + + def reset(self): + """Reset internal buffer. + + This method is usaful only when autoreset=False. + """ + self._buffer = StringIO() + + def getbuffer(self): + """Return view of internal buffer.""" + if USING_STRINGBUILDER or PY2: + return memoryview(self.bytes()) + else: + return self._buffer.getbuffer() diff --git a/ddtrace/vendor/msgpack/pack.h b/ddtrace/vendor/msgpack/pack.h new file mode 100644 index 0000000000..4f3ce1d99e --- /dev/null +++ b/ddtrace/vendor/msgpack/pack.h @@ -0,0 +1,119 @@ +/* + * MessagePack for Python packing routine + * + * Copyright (C) 2009 Naoki INADA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include +#include +#include "sysdep.h" +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + +#ifdef _MSC_VER +#define inline __inline +#endif + +typedef struct msgpack_packer { + char *buf; + size_t length; + size_t buf_size; + bool use_bin_type; +} msgpack_packer; + +typedef struct Packer Packer; + +static inline int msgpack_pack_write(msgpack_packer* pk, const char *data, size_t l) +{ + char* buf = pk->buf; + size_t bs = pk->buf_size; + size_t len = pk->length; + + if (len + l > bs) { + bs = (len + l) * 2; + buf = (char*)PyMem_Realloc(buf, bs); + if (!buf) { + PyErr_NoMemory(); + return -1; + } + } + memcpy(buf + len, data, l); + len += l; + + pk->buf = buf; + pk->buf_size = bs; + pk->length = len; + return 0; +} + +#define msgpack_pack_append_buffer(user, buf, len) \ + return msgpack_pack_write(user, (const char*)buf, len) + +#include "pack_template.h" + +// return -2 when o is too long +static inline int +msgpack_pack_unicode(msgpack_packer *pk, PyObject *o, long long limit) +{ +#if PY_MAJOR_VERSION >= 3 + assert(PyUnicode_Check(o)); + + Py_ssize_t len; + const char* buf = PyUnicode_AsUTF8AndSize(o, &len); + if (buf == NULL) + return -1; + + if (len > limit) { + return -2; + } + + int ret = msgpack_pack_raw(pk, len); + if (ret) return ret; + + return msgpack_pack_raw_body(pk, buf, len); +#else + PyObject *bytes; + Py_ssize_t len; + int ret; + + // py2 + bytes = PyUnicode_AsUTF8String(o); + if (bytes == NULL) + return -1; + + len = PyString_GET_SIZE(bytes); + if (len > limit) { + Py_DECREF(bytes); + return -2; + } + + ret = msgpack_pack_raw(pk, len); + if (ret) { + Py_DECREF(bytes); + return -1; + } + ret = msgpack_pack_raw_body(pk, PyString_AS_STRING(bytes), len); + Py_DECREF(bytes); + return ret; +#endif +} + +#ifdef __cplusplus +} +#endif diff --git a/ddtrace/vendor/msgpack/pack_template.h b/ddtrace/vendor/msgpack/pack_template.h new file mode 100644 index 0000000000..69982f4d29 --- /dev/null +++ b/ddtrace/vendor/msgpack/pack_template.h @@ -0,0 +1,778 @@ +/* + * MessagePack packing routine template + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#if defined(__LITTLE_ENDIAN__) +#define TAKE8_8(d) ((uint8_t*)&d)[0] +#define TAKE8_16(d) ((uint8_t*)&d)[0] +#define TAKE8_32(d) ((uint8_t*)&d)[0] +#define TAKE8_64(d) ((uint8_t*)&d)[0] +#elif defined(__BIG_ENDIAN__) +#define TAKE8_8(d) ((uint8_t*)&d)[0] +#define TAKE8_16(d) ((uint8_t*)&d)[1] +#define TAKE8_32(d) ((uint8_t*)&d)[3] +#define TAKE8_64(d) ((uint8_t*)&d)[7] +#endif + +#ifndef msgpack_pack_append_buffer +#error msgpack_pack_append_buffer callback is not defined +#endif + + +/* + * Integer + */ + +#define msgpack_pack_real_uint8(x, d) \ +do { \ + if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \ + } else { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_8(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ +} while(0) + +#define msgpack_pack_real_uint16(x, d) \ +do { \ + if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \ + } else if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } \ +} while(0) + +#define msgpack_pack_real_uint32(x, d) \ +do { \ + if(d < (1<<8)) { \ + if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \ + } else { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else { \ + if(d < (1<<16)) { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_uint64(x, d) \ +do { \ + if(d < (1ULL<<8)) { \ + if(d < (1ULL<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \ + } else { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else { \ + if(d < (1ULL<<16)) { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else if(d < (1ULL<<32)) { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } else { \ + /* unsigned 64 */ \ + unsigned char buf[9]; \ + buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \ + msgpack_pack_append_buffer(x, buf, 9); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_int8(x, d) \ +do { \ + if(d < -(1<<5)) { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_8(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); \ + } \ +} while(0) + +#define msgpack_pack_real_int16(x, d) \ +do { \ + if(d < -(1<<5)) { \ + if(d < -(1<<7)) { \ + /* signed 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_16(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_16(d), 1); \ + } else { \ + if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_16(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_int32(x, d) \ +do { \ + if(d < -(1<<5)) { \ + if(d < -(1<<15)) { \ + /* signed 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } else if(d < -(1<<7)) { \ + /* signed 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_32(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } else if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_32(d), 1); \ + } else { \ + if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_32(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else if(d < (1<<16)) { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } \ + } \ +} while(0) + +#define msgpack_pack_real_int64(x, d) \ +do { \ + if(d < -(1LL<<5)) { \ + if(d < -(1LL<<15)) { \ + if(d < -(1LL<<31)) { \ + /* signed 64 */ \ + unsigned char buf[9]; \ + buf[0] = 0xd3; _msgpack_store64(&buf[1], d); \ + msgpack_pack_append_buffer(x, buf, 9); \ + } else { \ + /* signed 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xd2; _msgpack_store32(&buf[1], (int32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } \ + } else { \ + if(d < -(1<<7)) { \ + /* signed 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xd1; _msgpack_store16(&buf[1], (int16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } else { \ + /* signed 8 */ \ + unsigned char buf[2] = {0xd0, TAKE8_64(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } \ + } \ + } else if(d < (1<<7)) { \ + /* fixnum */ \ + msgpack_pack_append_buffer(x, &TAKE8_64(d), 1); \ + } else { \ + if(d < (1LL<<16)) { \ + if(d < (1<<8)) { \ + /* unsigned 8 */ \ + unsigned char buf[2] = {0xcc, TAKE8_64(d)}; \ + msgpack_pack_append_buffer(x, buf, 2); \ + } else { \ + /* unsigned 16 */ \ + unsigned char buf[3]; \ + buf[0] = 0xcd; _msgpack_store16(&buf[1], (uint16_t)d); \ + msgpack_pack_append_buffer(x, buf, 3); \ + } \ + } else { \ + if(d < (1LL<<32)) { \ + /* unsigned 32 */ \ + unsigned char buf[5]; \ + buf[0] = 0xce; _msgpack_store32(&buf[1], (uint32_t)d); \ + msgpack_pack_append_buffer(x, buf, 5); \ + } else { \ + /* unsigned 64 */ \ + unsigned char buf[9]; \ + buf[0] = 0xcf; _msgpack_store64(&buf[1], d); \ + msgpack_pack_append_buffer(x, buf, 9); \ + } \ + } \ + } \ +} while(0) + + +static inline int msgpack_pack_uint8(msgpack_packer* x, uint8_t d) +{ + msgpack_pack_real_uint8(x, d); +} + +static inline int msgpack_pack_uint16(msgpack_packer* x, uint16_t d) +{ + msgpack_pack_real_uint16(x, d); +} + +static inline int msgpack_pack_uint32(msgpack_packer* x, uint32_t d) +{ + msgpack_pack_real_uint32(x, d); +} + +static inline int msgpack_pack_uint64(msgpack_packer* x, uint64_t d) +{ + msgpack_pack_real_uint64(x, d); +} + +static inline int msgpack_pack_int8(msgpack_packer* x, int8_t d) +{ + msgpack_pack_real_int8(x, d); +} + +static inline int msgpack_pack_int16(msgpack_packer* x, int16_t d) +{ + msgpack_pack_real_int16(x, d); +} + +static inline int msgpack_pack_int32(msgpack_packer* x, int32_t d) +{ + msgpack_pack_real_int32(x, d); +} + +static inline int msgpack_pack_int64(msgpack_packer* x, int64_t d) +{ + msgpack_pack_real_int64(x, d); +} + + +//#ifdef msgpack_pack_inline_func_cint + +static inline int msgpack_pack_short(msgpack_packer* x, short d) +{ +#if defined(SIZEOF_SHORT) +#if SIZEOF_SHORT == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_SHORT == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(SHRT_MAX) +#if SHRT_MAX == 0x7fff + msgpack_pack_real_int16(x, d); +#elif SHRT_MAX == 0x7fffffff + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(short) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(short) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_int(msgpack_packer* x, int d) +{ +#if defined(SIZEOF_INT) +#if SIZEOF_INT == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_INT == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(INT_MAX) +#if INT_MAX == 0x7fff + msgpack_pack_real_int16(x, d); +#elif INT_MAX == 0x7fffffff + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(int) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(int) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_long(msgpack_packer* x, long d) +{ +#if defined(SIZEOF_LONG) +#if SIZEOF_LONG == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_LONG == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(LONG_MAX) +#if LONG_MAX == 0x7fffL + msgpack_pack_real_int16(x, d); +#elif LONG_MAX == 0x7fffffffL + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(long) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(long) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_long_long(msgpack_packer* x, long long d) +{ +#if defined(SIZEOF_LONG_LONG) +#if SIZEOF_LONG_LONG == 2 + msgpack_pack_real_int16(x, d); +#elif SIZEOF_LONG_LONG == 4 + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#elif defined(LLONG_MAX) +#if LLONG_MAX == 0x7fffL + msgpack_pack_real_int16(x, d); +#elif LLONG_MAX == 0x7fffffffL + msgpack_pack_real_int32(x, d); +#else + msgpack_pack_real_int64(x, d); +#endif + +#else +if(sizeof(long long) == 2) { + msgpack_pack_real_int16(x, d); +} else if(sizeof(long long) == 4) { + msgpack_pack_real_int32(x, d); +} else { + msgpack_pack_real_int64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_short(msgpack_packer* x, unsigned short d) +{ +#if defined(SIZEOF_SHORT) +#if SIZEOF_SHORT == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_SHORT == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(USHRT_MAX) +#if USHRT_MAX == 0xffffU + msgpack_pack_real_uint16(x, d); +#elif USHRT_MAX == 0xffffffffU + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned short) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned short) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_int(msgpack_packer* x, unsigned int d) +{ +#if defined(SIZEOF_INT) +#if SIZEOF_INT == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_INT == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(UINT_MAX) +#if UINT_MAX == 0xffffU + msgpack_pack_real_uint16(x, d); +#elif UINT_MAX == 0xffffffffU + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned int) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned int) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_long(msgpack_packer* x, unsigned long d) +{ +#if defined(SIZEOF_LONG) +#if SIZEOF_LONG == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_LONG == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(ULONG_MAX) +#if ULONG_MAX == 0xffffUL + msgpack_pack_real_uint16(x, d); +#elif ULONG_MAX == 0xffffffffUL + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned long) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned long) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +static inline int msgpack_pack_unsigned_long_long(msgpack_packer* x, unsigned long long d) +{ +#if defined(SIZEOF_LONG_LONG) +#if SIZEOF_LONG_LONG == 2 + msgpack_pack_real_uint16(x, d); +#elif SIZEOF_LONG_LONG == 4 + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#elif defined(ULLONG_MAX) +#if ULLONG_MAX == 0xffffUL + msgpack_pack_real_uint16(x, d); +#elif ULLONG_MAX == 0xffffffffUL + msgpack_pack_real_uint32(x, d); +#else + msgpack_pack_real_uint64(x, d); +#endif + +#else +if(sizeof(unsigned long long) == 2) { + msgpack_pack_real_uint16(x, d); +} else if(sizeof(unsigned long long) == 4) { + msgpack_pack_real_uint32(x, d); +} else { + msgpack_pack_real_uint64(x, d); +} +#endif +} + +//#undef msgpack_pack_inline_func_cint +//#endif + + + +/* + * Float + */ + +static inline int msgpack_pack_float(msgpack_packer* x, float d) +{ + unsigned char buf[5]; + buf[0] = 0xca; + _PyFloat_Pack4(d, &buf[1], 0); + msgpack_pack_append_buffer(x, buf, 5); +} + +static inline int msgpack_pack_double(msgpack_packer* x, double d) +{ + unsigned char buf[9]; + buf[0] = 0xcb; + _PyFloat_Pack8(d, &buf[1], 0); + msgpack_pack_append_buffer(x, buf, 9); +} + + +/* + * Nil + */ + +static inline int msgpack_pack_nil(msgpack_packer* x) +{ + static const unsigned char d = 0xc0; + msgpack_pack_append_buffer(x, &d, 1); +} + + +/* + * Boolean + */ + +static inline int msgpack_pack_true(msgpack_packer* x) +{ + static const unsigned char d = 0xc3; + msgpack_pack_append_buffer(x, &d, 1); +} + +static inline int msgpack_pack_false(msgpack_packer* x) +{ + static const unsigned char d = 0xc2; + msgpack_pack_append_buffer(x, &d, 1); +} + + +/* + * Array + */ + +static inline int msgpack_pack_array(msgpack_packer* x, unsigned int n) +{ + if(n < 16) { + unsigned char d = 0x90 | n; + msgpack_pack_append_buffer(x, &d, 1); + } else if(n < 65536) { + unsigned char buf[3]; + buf[0] = 0xdc; _msgpack_store16(&buf[1], (uint16_t)n); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5]; + buf[0] = 0xdd; _msgpack_store32(&buf[1], (uint32_t)n); + msgpack_pack_append_buffer(x, buf, 5); + } +} + + +/* + * Map + */ + +static inline int msgpack_pack_map(msgpack_packer* x, unsigned int n) +{ + if(n < 16) { + unsigned char d = 0x80 | n; + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); + } else if(n < 65536) { + unsigned char buf[3]; + buf[0] = 0xde; _msgpack_store16(&buf[1], (uint16_t)n); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5]; + buf[0] = 0xdf; _msgpack_store32(&buf[1], (uint32_t)n); + msgpack_pack_append_buffer(x, buf, 5); + } +} + + +/* + * Raw + */ + +static inline int msgpack_pack_raw(msgpack_packer* x, size_t l) +{ + if (l < 32) { + unsigned char d = 0xa0 | (uint8_t)l; + msgpack_pack_append_buffer(x, &TAKE8_8(d), 1); + } else if (x->use_bin_type && l < 256) { // str8 is new format introduced with bin. + unsigned char buf[2] = {0xd9, (uint8_t)l}; + msgpack_pack_append_buffer(x, buf, 2); + } else if (l < 65536) { + unsigned char buf[3]; + buf[0] = 0xda; _msgpack_store16(&buf[1], (uint16_t)l); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5]; + buf[0] = 0xdb; _msgpack_store32(&buf[1], (uint32_t)l); + msgpack_pack_append_buffer(x, buf, 5); + } +} + +/* + * bin + */ +static inline int msgpack_pack_bin(msgpack_packer *x, size_t l) +{ + if (!x->use_bin_type) { + return msgpack_pack_raw(x, l); + } + if (l < 256) { + unsigned char buf[2] = {0xc4, (unsigned char)l}; + msgpack_pack_append_buffer(x, buf, 2); + } else if (l < 65536) { + unsigned char buf[3] = {0xc5}; + _msgpack_store16(&buf[1], (uint16_t)l); + msgpack_pack_append_buffer(x, buf, 3); + } else { + unsigned char buf[5] = {0xc6}; + _msgpack_store32(&buf[1], (uint32_t)l); + msgpack_pack_append_buffer(x, buf, 5); + } +} + +static inline int msgpack_pack_raw_body(msgpack_packer* x, const void* b, size_t l) +{ + if (l > 0) msgpack_pack_append_buffer(x, (const unsigned char*)b, l); + return 0; +} + +/* + * Ext + */ +static inline int msgpack_pack_ext(msgpack_packer* x, char typecode, size_t l) +{ + if (l == 1) { + unsigned char buf[2]; + buf[0] = 0xd4; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 2) { + unsigned char buf[2]; + buf[0] = 0xd5; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 4) { + unsigned char buf[2]; + buf[0] = 0xd6; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 8) { + unsigned char buf[2]; + buf[0] = 0xd7; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l == 16) { + unsigned char buf[2]; + buf[0] = 0xd8; + buf[1] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 2); + } + else if(l < 256) { + unsigned char buf[3]; + buf[0] = 0xc7; + buf[1] = l; + buf[2] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 3); + } else if(l < 65536) { + unsigned char buf[4]; + buf[0] = 0xc8; + _msgpack_store16(&buf[1], (uint16_t)l); + buf[3] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 4); + } else { + unsigned char buf[6]; + buf[0] = 0xc9; + _msgpack_store32(&buf[1], (uint32_t)l); + buf[5] = (unsigned char)typecode; + msgpack_pack_append_buffer(x, buf, 6); + } + +} + + + +#undef msgpack_pack_append_buffer + +#undef TAKE8_8 +#undef TAKE8_16 +#undef TAKE8_32 +#undef TAKE8_64 + +#undef msgpack_pack_real_uint8 +#undef msgpack_pack_real_uint16 +#undef msgpack_pack_real_uint32 +#undef msgpack_pack_real_uint64 +#undef msgpack_pack_real_int8 +#undef msgpack_pack_real_int16 +#undef msgpack_pack_real_int32 +#undef msgpack_pack_real_int64 diff --git a/ddtrace/vendor/msgpack/setup.py b/ddtrace/vendor/msgpack/setup.py new file mode 100644 index 0000000000..addc81cbd9 --- /dev/null +++ b/ddtrace/vendor/msgpack/setup.py @@ -0,0 +1,26 @@ +__all__ = ["get_extensions"] + +from setuptools import Extension +import sys + + +def get_extensions(): + libraries = [] + if sys.platform == "win32": + libraries.append("ws2_32") + + macros = [] + if sys.byteorder == "big": + macros = [("__BIG_ENDIAN__", "1")] + else: + macros = [("__LITTLE_ENDIAN__", "1")] + + ext = Extension( + "ddtrace.vendor.msgpack._cmsgpack", + sources=["ddtrace/vendor/msgpack/_cmsgpack.cpp"], + libraries=libraries, + include_dirs=["ddtrace/vendor/"], + define_macros=macros, + ) + + return [ext] diff --git a/ddtrace/vendor/msgpack/sysdep.h b/ddtrace/vendor/msgpack/sysdep.h new file mode 100644 index 0000000000..ed9c1bc0b8 --- /dev/null +++ b/ddtrace/vendor/msgpack/sysdep.h @@ -0,0 +1,194 @@ +/* + * MessagePack system dependencies + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MSGPACK_SYSDEP_H__ +#define MSGPACK_SYSDEP_H__ + +#include +#include +#if defined(_MSC_VER) && _MSC_VER < 1600 +typedef __int8 int8_t; +typedef unsigned __int8 uint8_t; +typedef __int16 int16_t; +typedef unsigned __int16 uint16_t; +typedef __int32 int32_t; +typedef unsigned __int32 uint32_t; +typedef __int64 int64_t; +typedef unsigned __int64 uint64_t; +#elif defined(_MSC_VER) // && _MSC_VER >= 1600 +#include +#else +#include +#include +#endif + +#ifdef _WIN32 +#define _msgpack_atomic_counter_header +typedef long _msgpack_atomic_counter_t; +#define _msgpack_sync_decr_and_fetch(ptr) InterlockedDecrement(ptr) +#define _msgpack_sync_incr_and_fetch(ptr) InterlockedIncrement(ptr) +#elif defined(__GNUC__) && ((__GNUC__*10 + __GNUC_MINOR__) < 41) +#define _msgpack_atomic_counter_header "gcc_atomic.h" +#else +typedef unsigned int _msgpack_atomic_counter_t; +#define _msgpack_sync_decr_and_fetch(ptr) __sync_sub_and_fetch(ptr, 1) +#define _msgpack_sync_incr_and_fetch(ptr) __sync_add_and_fetch(ptr, 1) +#endif + +#ifdef _WIN32 + +#ifdef __cplusplus +/* numeric_limits::min,max */ +#ifdef max +#undef max +#endif +#ifdef min +#undef min +#endif +#endif + +#else +#include /* __BYTE_ORDER */ +#endif + +#if !defined(__LITTLE_ENDIAN__) && !defined(__BIG_ENDIAN__) +#if __BYTE_ORDER == __LITTLE_ENDIAN +#define __LITTLE_ENDIAN__ +#elif __BYTE_ORDER == __BIG_ENDIAN +#define __BIG_ENDIAN__ +#elif _WIN32 +#define __LITTLE_ENDIAN__ +#endif +#endif + + +#ifdef __LITTLE_ENDIAN__ + +#ifdef _WIN32 +# if defined(ntohs) +# define _msgpack_be16(x) ntohs(x) +# elif defined(_byteswap_ushort) || (defined(_MSC_VER) && _MSC_VER >= 1400) +# define _msgpack_be16(x) ((uint16_t)_byteswap_ushort((unsigned short)x)) +# else +# define _msgpack_be16(x) ( \ + ((((uint16_t)x) << 8) ) | \ + ((((uint16_t)x) >> 8) ) ) +# endif +#else +# define _msgpack_be16(x) ntohs(x) +#endif + +#ifdef _WIN32 +# if defined(ntohl) +# define _msgpack_be32(x) ntohl(x) +# elif defined(_byteswap_ulong) || (defined(_MSC_VER) && _MSC_VER >= 1400) +# define _msgpack_be32(x) ((uint32_t)_byteswap_ulong((unsigned long)x)) +# else +# define _msgpack_be32(x) \ + ( ((((uint32_t)x) << 24) ) | \ + ((((uint32_t)x) << 8) & 0x00ff0000U ) | \ + ((((uint32_t)x) >> 8) & 0x0000ff00U ) | \ + ((((uint32_t)x) >> 24) ) ) +# endif +#else +# define _msgpack_be32(x) ntohl(x) +#endif + +#if defined(_byteswap_uint64) || (defined(_MSC_VER) && _MSC_VER >= 1400) +# define _msgpack_be64(x) (_byteswap_uint64(x)) +#elif defined(bswap_64) +# define _msgpack_be64(x) bswap_64(x) +#elif defined(__DARWIN_OSSwapInt64) +# define _msgpack_be64(x) __DARWIN_OSSwapInt64(x) +#else +#define _msgpack_be64(x) \ + ( ((((uint64_t)x) << 56) ) | \ + ((((uint64_t)x) << 40) & 0x00ff000000000000ULL ) | \ + ((((uint64_t)x) << 24) & 0x0000ff0000000000ULL ) | \ + ((((uint64_t)x) << 8) & 0x000000ff00000000ULL ) | \ + ((((uint64_t)x) >> 8) & 0x00000000ff000000ULL ) | \ + ((((uint64_t)x) >> 24) & 0x0000000000ff0000ULL ) | \ + ((((uint64_t)x) >> 40) & 0x000000000000ff00ULL ) | \ + ((((uint64_t)x) >> 56) ) ) +#endif + +#define _msgpack_load16(cast, from) ((cast)( \ + (((uint16_t)((uint8_t*)(from))[0]) << 8) | \ + (((uint16_t)((uint8_t*)(from))[1]) ) )) + +#define _msgpack_load32(cast, from) ((cast)( \ + (((uint32_t)((uint8_t*)(from))[0]) << 24) | \ + (((uint32_t)((uint8_t*)(from))[1]) << 16) | \ + (((uint32_t)((uint8_t*)(from))[2]) << 8) | \ + (((uint32_t)((uint8_t*)(from))[3]) ) )) + +#define _msgpack_load64(cast, from) ((cast)( \ + (((uint64_t)((uint8_t*)(from))[0]) << 56) | \ + (((uint64_t)((uint8_t*)(from))[1]) << 48) | \ + (((uint64_t)((uint8_t*)(from))[2]) << 40) | \ + (((uint64_t)((uint8_t*)(from))[3]) << 32) | \ + (((uint64_t)((uint8_t*)(from))[4]) << 24) | \ + (((uint64_t)((uint8_t*)(from))[5]) << 16) | \ + (((uint64_t)((uint8_t*)(from))[6]) << 8) | \ + (((uint64_t)((uint8_t*)(from))[7]) ) )) + +#else + +#define _msgpack_be16(x) (x) +#define _msgpack_be32(x) (x) +#define _msgpack_be64(x) (x) + +#define _msgpack_load16(cast, from) ((cast)( \ + (((uint16_t)((uint8_t*)from)[0]) << 8) | \ + (((uint16_t)((uint8_t*)from)[1]) ) )) + +#define _msgpack_load32(cast, from) ((cast)( \ + (((uint32_t)((uint8_t*)from)[0]) << 24) | \ + (((uint32_t)((uint8_t*)from)[1]) << 16) | \ + (((uint32_t)((uint8_t*)from)[2]) << 8) | \ + (((uint32_t)((uint8_t*)from)[3]) ) )) + +#define _msgpack_load64(cast, from) ((cast)( \ + (((uint64_t)((uint8_t*)from)[0]) << 56) | \ + (((uint64_t)((uint8_t*)from)[1]) << 48) | \ + (((uint64_t)((uint8_t*)from)[2]) << 40) | \ + (((uint64_t)((uint8_t*)from)[3]) << 32) | \ + (((uint64_t)((uint8_t*)from)[4]) << 24) | \ + (((uint64_t)((uint8_t*)from)[5]) << 16) | \ + (((uint64_t)((uint8_t*)from)[6]) << 8) | \ + (((uint64_t)((uint8_t*)from)[7]) ) )) +#endif + + +#define _msgpack_store16(to, num) \ + do { uint16_t val = _msgpack_be16(num); memcpy(to, &val, 2); } while(0) +#define _msgpack_store32(to, num) \ + do { uint32_t val = _msgpack_be32(num); memcpy(to, &val, 4); } while(0) +#define _msgpack_store64(to, num) \ + do { uint64_t val = _msgpack_be64(num); memcpy(to, &val, 8); } while(0) + +/* +#define _msgpack_load16(cast, from) \ + ({ cast val; memcpy(&val, (char*)from, 2); _msgpack_be16(val); }) +#define _msgpack_load32(cast, from) \ + ({ cast val; memcpy(&val, (char*)from, 4); _msgpack_be32(val); }) +#define _msgpack_load64(cast, from) \ + ({ cast val; memcpy(&val, (char*)from, 8); _msgpack_be64(val); }) +*/ + + +#endif /* msgpack/sysdep.h */ diff --git a/ddtrace/vendor/msgpack/unpack.h b/ddtrace/vendor/msgpack/unpack.h new file mode 100644 index 0000000000..85dbbed5b6 --- /dev/null +++ b/ddtrace/vendor/msgpack/unpack.h @@ -0,0 +1,287 @@ +/* + * MessagePack for Python unpacking routine + * + * Copyright (C) 2009 Naoki INADA + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#define MSGPACK_EMBED_STACK_SIZE (1024) +#include "unpack_define.h" + +typedef struct unpack_user { + bool use_list; + bool raw; + bool has_pairs_hook; + bool strict_map_key; + PyObject *object_hook; + PyObject *list_hook; + PyObject *ext_hook; + const char *encoding; + const char *unicode_errors; + Py_ssize_t max_str_len, max_bin_len, max_array_len, max_map_len, max_ext_len; +} unpack_user; + +typedef PyObject* msgpack_unpack_object; +struct unpack_context; +typedef struct unpack_context unpack_context; +typedef int (*execute_fn)(unpack_context *ctx, const char* data, Py_ssize_t len, Py_ssize_t* off); + +static inline msgpack_unpack_object unpack_callback_root(unpack_user* u) +{ + return NULL; +} + +static inline int unpack_callback_uint16(unpack_user* u, uint16_t d, msgpack_unpack_object* o) +{ + PyObject *p = PyInt_FromLong((long)d); + if (!p) + return -1; + *o = p; + return 0; +} +static inline int unpack_callback_uint8(unpack_user* u, uint8_t d, msgpack_unpack_object* o) +{ + return unpack_callback_uint16(u, d, o); +} + + +static inline int unpack_callback_uint32(unpack_user* u, uint32_t d, msgpack_unpack_object* o) +{ + PyObject *p = PyInt_FromSize_t((size_t)d); + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_uint64(unpack_user* u, uint64_t d, msgpack_unpack_object* o) +{ + PyObject *p; + if (d > LONG_MAX) { + p = PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG)d); + } else { + p = PyInt_FromLong((long)d); + } + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_int32(unpack_user* u, int32_t d, msgpack_unpack_object* o) +{ + PyObject *p = PyInt_FromLong(d); + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_int16(unpack_user* u, int16_t d, msgpack_unpack_object* o) +{ + return unpack_callback_int32(u, d, o); +} + +static inline int unpack_callback_int8(unpack_user* u, int8_t d, msgpack_unpack_object* o) +{ + return unpack_callback_int32(u, d, o); +} + +static inline int unpack_callback_int64(unpack_user* u, int64_t d, msgpack_unpack_object* o) +{ + PyObject *p; + if (d > LONG_MAX || d < LONG_MIN) { + p = PyLong_FromLongLong((PY_LONG_LONG)d); + } else { + p = PyInt_FromLong((long)d); + } + *o = p; + return 0; +} + +static inline int unpack_callback_double(unpack_user* u, double d, msgpack_unpack_object* o) +{ + PyObject *p = PyFloat_FromDouble(d); + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_float(unpack_user* u, float d, msgpack_unpack_object* o) +{ + return unpack_callback_double(u, d, o); +} + +static inline int unpack_callback_nil(unpack_user* u, msgpack_unpack_object* o) +{ Py_INCREF(Py_None); *o = Py_None; return 0; } + +static inline int unpack_callback_true(unpack_user* u, msgpack_unpack_object* o) +{ Py_INCREF(Py_True); *o = Py_True; return 0; } + +static inline int unpack_callback_false(unpack_user* u, msgpack_unpack_object* o) +{ Py_INCREF(Py_False); *o = Py_False; return 0; } + +static inline int unpack_callback_array(unpack_user* u, unsigned int n, msgpack_unpack_object* o) +{ + if (n > u->max_array_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_array_len(%zd)", n, u->max_array_len); + return -1; + } + PyObject *p = u->use_list ? PyList_New(n) : PyTuple_New(n); + + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_array_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object o) +{ + if (u->use_list) + PyList_SET_ITEM(*c, current, o); + else + PyTuple_SET_ITEM(*c, current, o); + return 0; +} + +static inline int unpack_callback_array_end(unpack_user* u, msgpack_unpack_object* c) +{ + if (u->list_hook) { + PyObject *new_c = PyObject_CallFunctionObjArgs(u->list_hook, *c, NULL); + if (!new_c) + return -1; + Py_DECREF(*c); + *c = new_c; + } + return 0; +} + +static inline int unpack_callback_map(unpack_user* u, unsigned int n, msgpack_unpack_object* o) +{ + if (n > u->max_map_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_map_len(%zd)", n, u->max_map_len); + return -1; + } + PyObject *p; + if (u->has_pairs_hook) { + p = PyList_New(n); // Or use tuple? + } + else { + p = PyDict_New(); + } + if (!p) + return -1; + *o = p; + return 0; +} + +static inline int unpack_callback_map_item(unpack_user* u, unsigned int current, msgpack_unpack_object* c, msgpack_unpack_object k, msgpack_unpack_object v) +{ + if (u->strict_map_key && !PyUnicode_CheckExact(k) && !PyBytes_CheckExact(k)) { + PyErr_Format(PyExc_ValueError, "%.100s is not allowed for map key", Py_TYPE(k)->tp_name); + return -1; + } + if (u->has_pairs_hook) { + msgpack_unpack_object item = PyTuple_Pack(2, k, v); + if (!item) + return -1; + Py_DECREF(k); + Py_DECREF(v); + PyList_SET_ITEM(*c, current, item); + return 0; + } + else if (PyDict_SetItem(*c, k, v) == 0) { + Py_DECREF(k); + Py_DECREF(v); + return 0; + } + return -1; +} + +static inline int unpack_callback_map_end(unpack_user* u, msgpack_unpack_object* c) +{ + if (u->object_hook) { + PyObject *new_c = PyObject_CallFunctionObjArgs(u->object_hook, *c, NULL); + if (!new_c) + return -1; + + Py_DECREF(*c); + *c = new_c; + } + return 0; +} + +static inline int unpack_callback_raw(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o) +{ + if (l > u->max_str_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_str_len(%zd)", l, u->max_str_len); + return -1; + } + + PyObject *py; + + if (u->encoding) { + py = PyUnicode_Decode(p, l, u->encoding, u->unicode_errors); + } else if (u->raw) { + py = PyBytes_FromStringAndSize(p, l); + } else { + py = PyUnicode_DecodeUTF8(p, l, u->unicode_errors); + } + if (!py) + return -1; + *o = py; + return 0; +} + +static inline int unpack_callback_bin(unpack_user* u, const char* b, const char* p, unsigned int l, msgpack_unpack_object* o) +{ + if (l > u->max_bin_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_bin_len(%zd)", l, u->max_bin_len); + return -1; + } + + PyObject *py = PyBytes_FromStringAndSize(p, l); + if (!py) + return -1; + *o = py; + return 0; +} + +static inline int unpack_callback_ext(unpack_user* u, const char* base, const char* pos, + unsigned int length, msgpack_unpack_object* o) +{ + PyObject *py; + int8_t typecode = (int8_t)*pos++; + if (!u->ext_hook) { + PyErr_SetString(PyExc_AssertionError, "u->ext_hook cannot be NULL"); + return -1; + } + if (length-1 > u->max_ext_len) { + PyErr_Format(PyExc_ValueError, "%u exceeds max_ext_len(%zd)", length, u->max_ext_len); + return -1; + } + // length also includes the typecode, so the actual data is length-1 +#if PY_MAJOR_VERSION == 2 + py = PyObject_CallFunction(u->ext_hook, "(is#)", (int)typecode, pos, (Py_ssize_t)length-1); +#else + py = PyObject_CallFunction(u->ext_hook, "(iy#)", (int)typecode, pos, (Py_ssize_t)length-1); +#endif + if (!py) + return -1; + *o = py; + return 0; +} + +#include "unpack_template.h" diff --git a/ddtrace/vendor/msgpack/unpack_define.h b/ddtrace/vendor/msgpack/unpack_define.h new file mode 100644 index 0000000000..0dd708d17c --- /dev/null +++ b/ddtrace/vendor/msgpack/unpack_define.h @@ -0,0 +1,95 @@ +/* + * MessagePack unpacking routine template + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +#ifndef MSGPACK_UNPACK_DEFINE_H__ +#define MSGPACK_UNPACK_DEFINE_H__ + +#include "msgpack/sysdep.h" +#include +#include +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + +#ifndef MSGPACK_EMBED_STACK_SIZE +#define MSGPACK_EMBED_STACK_SIZE 32 +#endif + + +// CS is first byte & 0x1f +typedef enum { + CS_HEADER = 0x00, // nil + + //CS_ = 0x01, + //CS_ = 0x02, // false + //CS_ = 0x03, // true + + CS_BIN_8 = 0x04, + CS_BIN_16 = 0x05, + CS_BIN_32 = 0x06, + + CS_EXT_8 = 0x07, + CS_EXT_16 = 0x08, + CS_EXT_32 = 0x09, + + CS_FLOAT = 0x0a, + CS_DOUBLE = 0x0b, + CS_UINT_8 = 0x0c, + CS_UINT_16 = 0x0d, + CS_UINT_32 = 0x0e, + CS_UINT_64 = 0x0f, + CS_INT_8 = 0x10, + CS_INT_16 = 0x11, + CS_INT_32 = 0x12, + CS_INT_64 = 0x13, + + //CS_FIXEXT1 = 0x14, + //CS_FIXEXT2 = 0x15, + //CS_FIXEXT4 = 0x16, + //CS_FIXEXT8 = 0x17, + //CS_FIXEXT16 = 0x18, + + CS_RAW_8 = 0x19, + CS_RAW_16 = 0x1a, + CS_RAW_32 = 0x1b, + CS_ARRAY_16 = 0x1c, + CS_ARRAY_32 = 0x1d, + CS_MAP_16 = 0x1e, + CS_MAP_32 = 0x1f, + + ACS_RAW_VALUE, + ACS_BIN_VALUE, + ACS_EXT_VALUE, +} msgpack_unpack_state; + + +typedef enum { + CT_ARRAY_ITEM, + CT_MAP_KEY, + CT_MAP_VALUE, +} msgpack_container_type; + + +#ifdef __cplusplus +} +#endif + +#endif /* msgpack/unpack_define.h */ diff --git a/ddtrace/vendor/msgpack/unpack_template.h b/ddtrace/vendor/msgpack/unpack_template.h new file mode 100644 index 0000000000..9924b9c6f2 --- /dev/null +++ b/ddtrace/vendor/msgpack/unpack_template.h @@ -0,0 +1,454 @@ +/* + * MessagePack unpacking routine template + * + * Copyright (C) 2008-2010 FURUHASHI Sadayuki + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef USE_CASE_RANGE +#if !defined(_MSC_VER) +#define USE_CASE_RANGE +#endif +#endif + +typedef struct unpack_stack { + PyObject* obj; + Py_ssize_t size; + Py_ssize_t count; + unsigned int ct; + PyObject* map_key; +} unpack_stack; + +struct unpack_context { + unpack_user user; + unsigned int cs; + unsigned int trail; + unsigned int top; + /* + unpack_stack* stack; + unsigned int stack_size; + unpack_stack embed_stack[MSGPACK_EMBED_STACK_SIZE]; + */ + unpack_stack stack[MSGPACK_EMBED_STACK_SIZE]; +}; + + +static inline void unpack_init(unpack_context* ctx) +{ + ctx->cs = CS_HEADER; + ctx->trail = 0; + ctx->top = 0; + /* + ctx->stack = ctx->embed_stack; + ctx->stack_size = MSGPACK_EMBED_STACK_SIZE; + */ + ctx->stack[0].obj = unpack_callback_root(&ctx->user); +} + +/* +static inline void unpack_destroy(unpack_context* ctx) +{ + if(ctx->stack_size != MSGPACK_EMBED_STACK_SIZE) { + free(ctx->stack); + } +} +*/ + +static inline PyObject* unpack_data(unpack_context* ctx) +{ + return (ctx)->stack[0].obj; +} + +static inline void unpack_clear(unpack_context *ctx) +{ + Py_CLEAR(ctx->stack[0].obj); +} + +template +static inline int unpack_execute(unpack_context* ctx, const char* data, Py_ssize_t len, Py_ssize_t* off) +{ + assert(len >= *off); + + const unsigned char* p = (unsigned char*)data + *off; + const unsigned char* const pe = (unsigned char*)data + len; + const void* n = p; + + unsigned int trail = ctx->trail; + unsigned int cs = ctx->cs; + unsigned int top = ctx->top; + unpack_stack* stack = ctx->stack; + /* + unsigned int stack_size = ctx->stack_size; + */ + unpack_user* user = &ctx->user; + + PyObject* obj = NULL; + unpack_stack* c = NULL; + + int ret; + +#define construct_cb(name) \ + construct && unpack_callback ## name + +#define push_simple_value(func) \ + if(construct_cb(func)(user, &obj) < 0) { goto _failed; } \ + goto _push +#define push_fixed_value(func, arg) \ + if(construct_cb(func)(user, arg, &obj) < 0) { goto _failed; } \ + goto _push +#define push_variable_value(func, base, pos, len) \ + if(construct_cb(func)(user, \ + (const char*)base, (const char*)pos, len, &obj) < 0) { goto _failed; } \ + goto _push + +#define again_fixed_trail(_cs, trail_len) \ + trail = trail_len; \ + cs = _cs; \ + goto _fixed_trail_again +#define again_fixed_trail_if_zero(_cs, trail_len, ifzero) \ + trail = trail_len; \ + if(trail == 0) { goto ifzero; } \ + cs = _cs; \ + goto _fixed_trail_again + +#define start_container(func, count_, ct_) \ + if(top >= MSGPACK_EMBED_STACK_SIZE) { ret = -3; goto _end; } \ + if(construct_cb(func)(user, count_, &stack[top].obj) < 0) { goto _failed; } \ + if((count_) == 0) { obj = stack[top].obj; \ + if (construct_cb(func##_end)(user, &obj) < 0) { goto _failed; } \ + goto _push; } \ + stack[top].ct = ct_; \ + stack[top].size = count_; \ + stack[top].count = 0; \ + ++top; \ + goto _header_again + +#define NEXT_CS(p) ((unsigned int)*p & 0x1f) + +#ifdef USE_CASE_RANGE +#define SWITCH_RANGE_BEGIN switch(*p) { +#define SWITCH_RANGE(FROM, TO) case FROM ... TO: +#define SWITCH_RANGE_DEFAULT default: +#define SWITCH_RANGE_END } +#else +#define SWITCH_RANGE_BEGIN { if(0) { +#define SWITCH_RANGE(FROM, TO) } else if(FROM <= *p && *p <= TO) { +#define SWITCH_RANGE_DEFAULT } else { +#define SWITCH_RANGE_END } } +#endif + + if(p == pe) { goto _out; } + do { + switch(cs) { + case CS_HEADER: + SWITCH_RANGE_BEGIN + SWITCH_RANGE(0x00, 0x7f) // Positive Fixnum + push_fixed_value(_uint8, *(uint8_t*)p); + SWITCH_RANGE(0xe0, 0xff) // Negative Fixnum + push_fixed_value(_int8, *(int8_t*)p); + SWITCH_RANGE(0xc0, 0xdf) // Variable + switch(*p) { + case 0xc0: // nil + push_simple_value(_nil); + //case 0xc1: // never used + case 0xc2: // false + push_simple_value(_false); + case 0xc3: // true + push_simple_value(_true); + case 0xc4: // bin 8 + again_fixed_trail(NEXT_CS(p), 1); + case 0xc5: // bin 16 + again_fixed_trail(NEXT_CS(p), 2); + case 0xc6: // bin 32 + again_fixed_trail(NEXT_CS(p), 4); + case 0xc7: // ext 8 + again_fixed_trail(NEXT_CS(p), 1); + case 0xc8: // ext 16 + again_fixed_trail(NEXT_CS(p), 2); + case 0xc9: // ext 32 + again_fixed_trail(NEXT_CS(p), 4); + case 0xca: // float + case 0xcb: // double + case 0xcc: // unsigned int 8 + case 0xcd: // unsigned int 16 + case 0xce: // unsigned int 32 + case 0xcf: // unsigned int 64 + case 0xd0: // signed int 8 + case 0xd1: // signed int 16 + case 0xd2: // signed int 32 + case 0xd3: // signed int 64 + again_fixed_trail(NEXT_CS(p), 1 << (((unsigned int)*p) & 0x03)); + case 0xd4: // fixext 1 + case 0xd5: // fixext 2 + case 0xd6: // fixext 4 + case 0xd7: // fixext 8 + again_fixed_trail_if_zero(ACS_EXT_VALUE, + (1 << (((unsigned int)*p) & 0x03))+1, + _ext_zero); + case 0xd8: // fixext 16 + again_fixed_trail_if_zero(ACS_EXT_VALUE, 16+1, _ext_zero); + case 0xd9: // str 8 + again_fixed_trail(NEXT_CS(p), 1); + case 0xda: // raw 16 + case 0xdb: // raw 32 + case 0xdc: // array 16 + case 0xdd: // array 32 + case 0xde: // map 16 + case 0xdf: // map 32 + again_fixed_trail(NEXT_CS(p), 2 << (((unsigned int)*p) & 0x01)); + default: + ret = -2; + goto _end; + } + SWITCH_RANGE(0xa0, 0xbf) // FixRaw + again_fixed_trail_if_zero(ACS_RAW_VALUE, ((unsigned int)*p & 0x1f), _raw_zero); + SWITCH_RANGE(0x90, 0x9f) // FixArray + start_container(_array, ((unsigned int)*p) & 0x0f, CT_ARRAY_ITEM); + SWITCH_RANGE(0x80, 0x8f) // FixMap + start_container(_map, ((unsigned int)*p) & 0x0f, CT_MAP_KEY); + + SWITCH_RANGE_DEFAULT + ret = -2; + goto _end; + SWITCH_RANGE_END + // end CS_HEADER + + + _fixed_trail_again: + ++p; + + default: + if((size_t)(pe - p) < trail) { goto _out; } + n = p; p += trail - 1; + switch(cs) { + case CS_EXT_8: + again_fixed_trail_if_zero(ACS_EXT_VALUE, *(uint8_t*)n+1, _ext_zero); + case CS_EXT_16: + again_fixed_trail_if_zero(ACS_EXT_VALUE, + _msgpack_load16(uint16_t,n)+1, + _ext_zero); + case CS_EXT_32: + again_fixed_trail_if_zero(ACS_EXT_VALUE, + _msgpack_load32(uint32_t,n)+1, + _ext_zero); + case CS_FLOAT: { + double f = _PyFloat_Unpack4((unsigned char*)n, 0); + push_fixed_value(_float, f); } + case CS_DOUBLE: { + double f = _PyFloat_Unpack8((unsigned char*)n, 0); + push_fixed_value(_double, f); } + case CS_UINT_8: + push_fixed_value(_uint8, *(uint8_t*)n); + case CS_UINT_16: + push_fixed_value(_uint16, _msgpack_load16(uint16_t,n)); + case CS_UINT_32: + push_fixed_value(_uint32, _msgpack_load32(uint32_t,n)); + case CS_UINT_64: + push_fixed_value(_uint64, _msgpack_load64(uint64_t,n)); + + case CS_INT_8: + push_fixed_value(_int8, *(int8_t*)n); + case CS_INT_16: + push_fixed_value(_int16, _msgpack_load16(int16_t,n)); + case CS_INT_32: + push_fixed_value(_int32, _msgpack_load32(int32_t,n)); + case CS_INT_64: + push_fixed_value(_int64, _msgpack_load64(int64_t,n)); + + case CS_BIN_8: + again_fixed_trail_if_zero(ACS_BIN_VALUE, *(uint8_t*)n, _bin_zero); + case CS_BIN_16: + again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load16(uint16_t,n), _bin_zero); + case CS_BIN_32: + again_fixed_trail_if_zero(ACS_BIN_VALUE, _msgpack_load32(uint32_t,n), _bin_zero); + case ACS_BIN_VALUE: + _bin_zero: + push_variable_value(_bin, data, n, trail); + + case CS_RAW_8: + again_fixed_trail_if_zero(ACS_RAW_VALUE, *(uint8_t*)n, _raw_zero); + case CS_RAW_16: + again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load16(uint16_t,n), _raw_zero); + case CS_RAW_32: + again_fixed_trail_if_zero(ACS_RAW_VALUE, _msgpack_load32(uint32_t,n), _raw_zero); + case ACS_RAW_VALUE: + _raw_zero: + push_variable_value(_raw, data, n, trail); + + case ACS_EXT_VALUE: + _ext_zero: + push_variable_value(_ext, data, n, trail); + + case CS_ARRAY_16: + start_container(_array, _msgpack_load16(uint16_t,n), CT_ARRAY_ITEM); + case CS_ARRAY_32: + /* FIXME security guard */ + start_container(_array, _msgpack_load32(uint32_t,n), CT_ARRAY_ITEM); + + case CS_MAP_16: + start_container(_map, _msgpack_load16(uint16_t,n), CT_MAP_KEY); + case CS_MAP_32: + /* FIXME security guard */ + start_container(_map, _msgpack_load32(uint32_t,n), CT_MAP_KEY); + + default: + goto _failed; + } + } + +_push: + if(top == 0) { goto _finish; } + c = &stack[top-1]; + switch(c->ct) { + case CT_ARRAY_ITEM: + if(construct_cb(_array_item)(user, c->count, &c->obj, obj) < 0) { goto _failed; } + if(++c->count == c->size) { + obj = c->obj; + if (construct_cb(_array_end)(user, &obj) < 0) { goto _failed; } + --top; + /*printf("stack pop %d\n", top);*/ + goto _push; + } + goto _header_again; + case CT_MAP_KEY: + c->map_key = obj; + c->ct = CT_MAP_VALUE; + goto _header_again; + case CT_MAP_VALUE: + if(construct_cb(_map_item)(user, c->count, &c->obj, c->map_key, obj) < 0) { goto _failed; } + if(++c->count == c->size) { + obj = c->obj; + if (construct_cb(_map_end)(user, &obj) < 0) { goto _failed; } + --top; + /*printf("stack pop %d\n", top);*/ + goto _push; + } + c->ct = CT_MAP_KEY; + goto _header_again; + + default: + goto _failed; + } + +_header_again: + cs = CS_HEADER; + ++p; + } while(p != pe); + goto _out; + + +_finish: + if (!construct) + unpack_callback_nil(user, &obj); + stack[0].obj = obj; + ++p; + ret = 1; + /*printf("-- finish --\n"); */ + goto _end; + +_failed: + /*printf("** FAILED **\n"); */ + ret = -1; + goto _end; + +_out: + ret = 0; + goto _end; + +_end: + ctx->cs = cs; + ctx->trail = trail; + ctx->top = top; + *off = p - (const unsigned char*)data; + + return ret; +#undef construct_cb +} + +#undef SWITCH_RANGE_BEGIN +#undef SWITCH_RANGE +#undef SWITCH_RANGE_DEFAULT +#undef SWITCH_RANGE_END +#undef push_simple_value +#undef push_fixed_value +#undef push_variable_value +#undef again_fixed_trail +#undef again_fixed_trail_if_zero +#undef start_container + +template +static inline int unpack_container_header(unpack_context* ctx, const char* data, Py_ssize_t len, Py_ssize_t* off) +{ + assert(len >= *off); + uint32_t size; + const unsigned char *const p = (unsigned char*)data + *off; + +#define inc_offset(inc) \ + if (len - *off < inc) \ + return 0; \ + *off += inc; + + switch (*p) { + case var_offset: + inc_offset(3); + size = _msgpack_load16(uint16_t, p + 1); + break; + case var_offset + 1: + inc_offset(5); + size = _msgpack_load32(uint32_t, p + 1); + break; +#ifdef USE_CASE_RANGE + case fixed_offset + 0x0 ... fixed_offset + 0xf: +#else + case fixed_offset + 0x0: + case fixed_offset + 0x1: + case fixed_offset + 0x2: + case fixed_offset + 0x3: + case fixed_offset + 0x4: + case fixed_offset + 0x5: + case fixed_offset + 0x6: + case fixed_offset + 0x7: + case fixed_offset + 0x8: + case fixed_offset + 0x9: + case fixed_offset + 0xa: + case fixed_offset + 0xb: + case fixed_offset + 0xc: + case fixed_offset + 0xd: + case fixed_offset + 0xe: + case fixed_offset + 0xf: +#endif + ++*off; + size = ((unsigned int)*p) & 0x0f; + break; + default: + PyErr_SetString(PyExc_ValueError, "Unexpected type header on stream"); + return -1; + } + unpack_callback_uint32(&ctx->user, size, &ctx->stack[0].obj); + return 1; +} + +#undef SWITCH_RANGE_BEGIN +#undef SWITCH_RANGE +#undef SWITCH_RANGE_DEFAULT +#undef SWITCH_RANGE_END + +static const execute_fn unpack_construct = &unpack_execute; +static const execute_fn unpack_skip = &unpack_execute; +static const execute_fn read_array_header = &unpack_container_header<0x90, 0xdc>; +static const execute_fn read_map_header = &unpack_container_header<0x80, 0xde>; + +#undef NEXT_CS + +/* vim: set ts=4 sw=4 sts=4 expandtab */ diff --git a/ddtrace/vendor/six/__init__.py b/ddtrace/vendor/six/__init__.py new file mode 100644 index 0000000000..52409e85bd --- /dev/null +++ b/ddtrace/vendor/six/__init__.py @@ -0,0 +1,891 @@ +# Copyright (c) 2010-2017 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +# SOFTWARE. + +"""Utilities for writing code that runs on Python 2 and 3""" + +from __future__ import absolute_import + +import functools +import itertools +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.11.0" + + +# Useful for very coarse version differentiation. +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 +PY34 = sys.version_info[0:2] >= (3, 4) + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) # Invokes __set__. + try: + # This is a bit ugly, but it avoids running this again by + # removing this descriptor. + delattr(obj.__class__, self.name) + except AttributeError: + pass + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + def __getattr__(self, attr): + _module = self._resolve() + value = getattr(_module, attr) + setattr(self, attr, value) + return value + + +class _LazyModule(types.ModuleType): + + def __init__(self, name): + super(_LazyModule, self).__init__(name) + self.__doc__ = self.__class__.__doc__ + + def __dir__(self): + attrs = ["__doc__", "__name__"] + attrs += [attr.name for attr in self._moved_attributes] + return attrs + + # Subclasses should override this + _moved_attributes = [] + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + +class _SixMetaPathImporter(object): + + """ + A meta path importer to import six.moves and its submodules. + + This class implements a PEP302 finder and loader. It should be compatible + with Python 2.5 and all existing versions of Python3 + """ + + def __init__(self, six_module_name): + self.name = six_module_name + self.known_modules = {} + + def _add_module(self, mod, *fullnames): + for fullname in fullnames: + self.known_modules[self.name + "." + fullname] = mod + + def _get_module(self, fullname): + return self.known_modules[self.name + "." + fullname] + + def find_module(self, fullname, path=None): + if fullname in self.known_modules: + return self + return None + + def __get_module(self, fullname): + try: + return self.known_modules[fullname] + except KeyError: + raise ImportError("This loader does not know module " + fullname) + + def load_module(self, fullname): + try: + # in case of a reload + return sys.modules[fullname] + except KeyError: + pass + mod = self.__get_module(fullname) + if isinstance(mod, MovedModule): + mod = mod._resolve() + else: + mod.__loader__ = self + sys.modules[fullname] = mod + return mod + + def is_package(self, fullname): + """ + Return true, if the named module is a package. + + We need this method to get correct spec objects with + Python 3.4 (see PEP451) + """ + return hasattr(self.__get_module(fullname), "__path__") + + def get_code(self, fullname): + """Return None + + Required, if is_package is implemented""" + self.__get_module(fullname) # eventually raises ImportError + return None + get_source = get_code # same as get_code + +_importer = _SixMetaPathImporter(__name__) + + +class _MovedItems(_LazyModule): + + """Lazy loading of moved objects""" + __path__ = [] # mark as package + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("filterfalse", "itertools", "itertools", "ifilterfalse", "filterfalse"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("intern", "__builtin__", "sys"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("getcwd", "os", "os", "getcwdu", "getcwd"), + MovedAttribute("getcwdb", "os", "os", "getcwd", "getcwdb"), + MovedAttribute("getoutput", "commands", "subprocess"), + MovedAttribute("range", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("reload_module", "__builtin__", "importlib" if PY34 else "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("shlex_quote", "pipes", "shlex", "quote"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("UserDict", "UserDict", "collections"), + MovedAttribute("UserList", "UserList", "collections"), + MovedAttribute("UserString", "UserString", "collections"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + MovedAttribute("zip_longest", "itertools", "itertools", "izip_longest", "zip_longest"), + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("dbm_gnu", "gdbm", "dbm.gnu"), + MovedModule("_dummy_thread", "dummy_thread", "_dummy_thread"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("email_mime_image", "email.MIMEImage", "email.mime.image"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_nonmultipart", "email.MIMENonMultipart", "email.mime.nonmultipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("_thread", "thread", "_thread"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_ttk", "ttk", "tkinter.ttk"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_parse", __name__ + ".moves.urllib_parse", "urllib.parse"), + MovedModule("urllib_error", __name__ + ".moves.urllib_error", "urllib.error"), + MovedModule("urllib", __name__ + ".moves.urllib", __name__ + ".moves.urllib"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("xmlrpc_client", "xmlrpclib", "xmlrpc.client"), + MovedModule("xmlrpc_server", "SimpleXMLRPCServer", "xmlrpc.server"), +] +# Add windows specific modules. +if sys.platform == "win32": + _moved_attributes += [ + MovedModule("winreg", "_winreg"), + ] + +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) + if isinstance(attr, MovedModule): + _importer._add_module(attr, "moves." + attr.name) +del attr + +_MovedItems._moved_attributes = _moved_attributes + +moves = _MovedItems(__name__ + ".moves") +_importer._add_module(moves, "moves") + + +class Module_six_moves_urllib_parse(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_parse""" + + +_urllib_parse_moved_attributes = [ + MovedAttribute("ParseResult", "urlparse", "urllib.parse"), + MovedAttribute("SplitResult", "urlparse", "urllib.parse"), + MovedAttribute("parse_qs", "urlparse", "urllib.parse"), + MovedAttribute("parse_qsl", "urlparse", "urllib.parse"), + MovedAttribute("urldefrag", "urlparse", "urllib.parse"), + MovedAttribute("urljoin", "urlparse", "urllib.parse"), + MovedAttribute("urlparse", "urlparse", "urllib.parse"), + MovedAttribute("urlsplit", "urlparse", "urllib.parse"), + MovedAttribute("urlunparse", "urlparse", "urllib.parse"), + MovedAttribute("urlunsplit", "urlparse", "urllib.parse"), + MovedAttribute("quote", "urllib", "urllib.parse"), + MovedAttribute("quote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote", "urllib", "urllib.parse"), + MovedAttribute("unquote_plus", "urllib", "urllib.parse"), + MovedAttribute("unquote_to_bytes", "urllib", "urllib.parse", "unquote", "unquote_to_bytes"), + MovedAttribute("urlencode", "urllib", "urllib.parse"), + MovedAttribute("splitquery", "urllib", "urllib.parse"), + MovedAttribute("splittag", "urllib", "urllib.parse"), + MovedAttribute("splituser", "urllib", "urllib.parse"), + MovedAttribute("splitvalue", "urllib", "urllib.parse"), + MovedAttribute("uses_fragment", "urlparse", "urllib.parse"), + MovedAttribute("uses_netloc", "urlparse", "urllib.parse"), + MovedAttribute("uses_params", "urlparse", "urllib.parse"), + MovedAttribute("uses_query", "urlparse", "urllib.parse"), + MovedAttribute("uses_relative", "urlparse", "urllib.parse"), +] +for attr in _urllib_parse_moved_attributes: + setattr(Module_six_moves_urllib_parse, attr.name, attr) +del attr + +Module_six_moves_urllib_parse._moved_attributes = _urllib_parse_moved_attributes + +_importer._add_module(Module_six_moves_urllib_parse(__name__ + ".moves.urllib_parse"), + "moves.urllib_parse", "moves.urllib.parse") + + +class Module_six_moves_urllib_error(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_error""" + + +_urllib_error_moved_attributes = [ + MovedAttribute("URLError", "urllib2", "urllib.error"), + MovedAttribute("HTTPError", "urllib2", "urllib.error"), + MovedAttribute("ContentTooShortError", "urllib", "urllib.error"), +] +for attr in _urllib_error_moved_attributes: + setattr(Module_six_moves_urllib_error, attr.name, attr) +del attr + +Module_six_moves_urllib_error._moved_attributes = _urllib_error_moved_attributes + +_importer._add_module(Module_six_moves_urllib_error(__name__ + ".moves.urllib.error"), + "moves.urllib_error", "moves.urllib.error") + + +class Module_six_moves_urllib_request(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_request""" + + +_urllib_request_moved_attributes = [ + MovedAttribute("urlopen", "urllib2", "urllib.request"), + MovedAttribute("install_opener", "urllib2", "urllib.request"), + MovedAttribute("build_opener", "urllib2", "urllib.request"), + MovedAttribute("pathname2url", "urllib", "urllib.request"), + MovedAttribute("url2pathname", "urllib", "urllib.request"), + MovedAttribute("getproxies", "urllib", "urllib.request"), + MovedAttribute("Request", "urllib2", "urllib.request"), + MovedAttribute("OpenerDirector", "urllib2", "urllib.request"), + MovedAttribute("HTTPDefaultErrorHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPRedirectHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPCookieProcessor", "urllib2", "urllib.request"), + MovedAttribute("ProxyHandler", "urllib2", "urllib.request"), + MovedAttribute("BaseHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgr", "urllib2", "urllib.request"), + MovedAttribute("HTTPPasswordMgrWithDefaultRealm", "urllib2", "urllib.request"), + MovedAttribute("AbstractBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyBasicAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("AbstractDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("ProxyDigestAuthHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPSHandler", "urllib2", "urllib.request"), + MovedAttribute("FileHandler", "urllib2", "urllib.request"), + MovedAttribute("FTPHandler", "urllib2", "urllib.request"), + MovedAttribute("CacheFTPHandler", "urllib2", "urllib.request"), + MovedAttribute("UnknownHandler", "urllib2", "urllib.request"), + MovedAttribute("HTTPErrorProcessor", "urllib2", "urllib.request"), + MovedAttribute("urlretrieve", "urllib", "urllib.request"), + MovedAttribute("urlcleanup", "urllib", "urllib.request"), + MovedAttribute("URLopener", "urllib", "urllib.request"), + MovedAttribute("FancyURLopener", "urllib", "urllib.request"), + MovedAttribute("proxy_bypass", "urllib", "urllib.request"), + MovedAttribute("parse_http_list", "urllib2", "urllib.request"), + MovedAttribute("parse_keqv_list", "urllib2", "urllib.request"), +] +for attr in _urllib_request_moved_attributes: + setattr(Module_six_moves_urllib_request, attr.name, attr) +del attr + +Module_six_moves_urllib_request._moved_attributes = _urllib_request_moved_attributes + +_importer._add_module(Module_six_moves_urllib_request(__name__ + ".moves.urllib.request"), + "moves.urllib_request", "moves.urllib.request") + + +class Module_six_moves_urllib_response(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_response""" + + +_urllib_response_moved_attributes = [ + MovedAttribute("addbase", "urllib", "urllib.response"), + MovedAttribute("addclosehook", "urllib", "urllib.response"), + MovedAttribute("addinfo", "urllib", "urllib.response"), + MovedAttribute("addinfourl", "urllib", "urllib.response"), +] +for attr in _urllib_response_moved_attributes: + setattr(Module_six_moves_urllib_response, attr.name, attr) +del attr + +Module_six_moves_urllib_response._moved_attributes = _urllib_response_moved_attributes + +_importer._add_module(Module_six_moves_urllib_response(__name__ + ".moves.urllib.response"), + "moves.urllib_response", "moves.urllib.response") + + +class Module_six_moves_urllib_robotparser(_LazyModule): + + """Lazy loading of moved objects in six.moves.urllib_robotparser""" + + +_urllib_robotparser_moved_attributes = [ + MovedAttribute("RobotFileParser", "robotparser", "urllib.robotparser"), +] +for attr in _urllib_robotparser_moved_attributes: + setattr(Module_six_moves_urllib_robotparser, attr.name, attr) +del attr + +Module_six_moves_urllib_robotparser._moved_attributes = _urllib_robotparser_moved_attributes + +_importer._add_module(Module_six_moves_urllib_robotparser(__name__ + ".moves.urllib.robotparser"), + "moves.urllib_robotparser", "moves.urllib.robotparser") + + +class Module_six_moves_urllib(types.ModuleType): + + """Create a six.moves.urllib namespace that resembles the Python 3 namespace""" + __path__ = [] # mark as package + parse = _importer._get_module("moves.urllib_parse") + error = _importer._get_module("moves.urllib_error") + request = _importer._get_module("moves.urllib_request") + response = _importer._get_module("moves.urllib_response") + robotparser = _importer._get_module("moves.urllib_robotparser") + + def __dir__(self): + return ['parse', 'error', 'request', 'response', 'robotparser'] + +_importer._add_module(Module_six_moves_urllib(__name__ + ".moves.urllib"), + "moves.urllib") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_closure = "__closure__" + _func_code = "__code__" + _func_defaults = "__defaults__" + _func_globals = "__globals__" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_closure = "func_closure" + _func_code = "func_code" + _func_defaults = "func_defaults" + _func_globals = "func_globals" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +try: + callable = callable +except NameError: + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) + + +if PY3: + def get_unbound_function(unbound): + return unbound + + create_bound_method = types.MethodType + + def create_unbound_method(func, cls): + return func + + Iterator = object +else: + def get_unbound_function(unbound): + return unbound.im_func + + def create_bound_method(func, obj): + return types.MethodType(func, obj, obj.__class__) + + def create_unbound_method(func, cls): + return types.MethodType(func, None, cls) + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_closure = operator.attrgetter(_func_closure) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) +get_function_globals = operator.attrgetter(_func_globals) + + +if PY3: + def iterkeys(d, **kw): + return iter(d.keys(**kw)) + + def itervalues(d, **kw): + return iter(d.values(**kw)) + + def iteritems(d, **kw): + return iter(d.items(**kw)) + + def iterlists(d, **kw): + return iter(d.lists(**kw)) + + viewkeys = operator.methodcaller("keys") + + viewvalues = operator.methodcaller("values") + + viewitems = operator.methodcaller("items") +else: + def iterkeys(d, **kw): + return d.iterkeys(**kw) + + def itervalues(d, **kw): + return d.itervalues(**kw) + + def iteritems(d, **kw): + return d.iteritems(**kw) + + def iterlists(d, **kw): + return d.iterlists(**kw) + + viewkeys = operator.methodcaller("viewkeys") + + viewvalues = operator.methodcaller("viewvalues") + + viewitems = operator.methodcaller("viewitems") + +_add_doc(iterkeys, "Return an iterator over the keys of a dictionary.") +_add_doc(itervalues, "Return an iterator over the values of a dictionary.") +_add_doc(iteritems, + "Return an iterator over the (key, value) pairs of a dictionary.") +_add_doc(iterlists, + "Return an iterator over the (key, [values]) pairs of a dictionary.") + + +if PY3: + def b(s): + return s.encode("latin-1") + + def u(s): + return s + unichr = chr + import struct + int2byte = struct.Struct(">B").pack + del struct + byte2int = operator.itemgetter(0) + indexbytes = operator.getitem + iterbytes = iter + import io + StringIO = io.StringIO + BytesIO = io.BytesIO + _assertCountEqual = "assertCountEqual" + if sys.version_info[1] <= 1: + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" + else: + _assertRaisesRegex = "assertRaisesRegex" + _assertRegex = "assertRegex" +else: + def b(s): + return s + # Workaround for standalone backslash + + def u(s): + return unicode(s.replace(r'\\', r'\\\\'), "unicode_escape") + unichr = unichr + int2byte = chr + + def byte2int(bs): + return ord(bs[0]) + + def indexbytes(buf, i): + return ord(buf[i]) + iterbytes = functools.partial(itertools.imap, ord) + import StringIO + StringIO = BytesIO = StringIO.StringIO + _assertCountEqual = "assertItemsEqual" + _assertRaisesRegex = "assertRaisesRegexp" + _assertRegex = "assertRegexpMatches" +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +def assertCountEqual(self, *args, **kwargs): + return getattr(self, _assertCountEqual)(*args, **kwargs) + + +def assertRaisesRegex(self, *args, **kwargs): + return getattr(self, _assertRaisesRegex)(*args, **kwargs) + + +def assertRegex(self, *args, **kwargs): + return getattr(self, _assertRegex)(*args, **kwargs) + + +if PY3: + exec_ = getattr(moves.builtins, "exec") + + def reraise(tp, value, tb=None): + try: + if value is None: + value = tp() + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + finally: + value = None + tb = None + +else: + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + + exec_("""def reraise(tp, value, tb=None): + try: + raise tp, value, tb + finally: + tb = None +""") + + +if sys.version_info[:2] == (3, 2): + exec_("""def raise_from(value, from_value): + try: + if from_value is None: + raise value + raise value from from_value + finally: + value = None +""") +elif sys.version_info[:2] > (3, 2): + exec_("""def raise_from(value, from_value): + try: + raise value from from_value + finally: + value = None +""") +else: + def raise_from(value, from_value): + raise value + + +print_ = getattr(moves.builtins, "print", None) +if print_ is None: + def print_(*args, **kwargs): + """The new-style print function for Python 2.4 and 2.5.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + + def write(data): + if not isinstance(data, basestring): + data = str(data) + # If the file has an encoding, encode unicode with it. + if (isinstance(fp, file) and + isinstance(data, unicode) and + fp.encoding is not None): + errors = getattr(fp, "errors", None) + if errors is None: + errors = "strict" + data = data.encode(fp.encoding, errors) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) +if sys.version_info[:2] < (3, 3): + _print = print_ + + def print_(*args, **kwargs): + fp = kwargs.get("file", sys.stdout) + flush = kwargs.pop("flush", False) + _print(*args, **kwargs) + if flush and fp is not None: + fp.flush() + +_add_doc(reraise, """Reraise an exception.""") + +if sys.version_info[0:2] < (3, 4): + def wraps(wrapped, assigned=functools.WRAPPER_ASSIGNMENTS, + updated=functools.WRAPPER_UPDATES): + def wrapper(f): + f = functools.wraps(wrapped, assigned, updated)(f) + f.__wrapped__ = wrapped + return f + return wrapper +else: + wraps = functools.wraps + + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + # This requires a bit of explanation: the basic idea is to make a dummy + # metaclass for one level of class instantiation that replaces itself with + # the actual metaclass. + class metaclass(type): + + def __new__(cls, name, this_bases, d): + return meta(name, bases, d) + + @classmethod + def __prepare__(cls, name, this_bases): + return meta.__prepare__(name, bases) + return type.__new__(metaclass, 'temporary_class', (), {}) + + +def add_metaclass(metaclass): + """Class decorator for creating a class with a metaclass.""" + def wrapper(cls): + orig_vars = cls.__dict__.copy() + slots = orig_vars.get('__slots__') + if slots is not None: + if isinstance(slots, str): + slots = [slots] + for slots_var in slots: + orig_vars.pop(slots_var) + orig_vars.pop('__dict__', None) + orig_vars.pop('__weakref__', None) + return metaclass(cls.__name__, cls.__bases__, orig_vars) + return wrapper + + +def python_2_unicode_compatible(klass): + """ + A decorator that defines __unicode__ and __str__ methods under Python 2. + Under Python 3 it does nothing. + + To support Python 2 and 3 with a single code base, define a __str__ method + returning text and apply this decorator to the class. + """ + if PY2: + if '__str__' not in klass.__dict__: + raise ValueError("@python_2_unicode_compatible cannot be applied " + "to %s because it doesn't define __str__()." % + klass.__name__) + klass.__unicode__ = klass.__str__ + klass.__str__ = lambda self: self.__unicode__().encode('utf-8') + return klass + + +# Complete the moves implementation. +# This code is at the end of this module to speed up module loading. +# Turn this module into a package. +__path__ = [] # required for PEP 302 and PEP 451 +__package__ = __name__ # see PEP 366 @ReservedAssignment +if globals().get("__spec__") is not None: + __spec__.submodule_search_locations = [] # PEP 451 @UndefinedVariable +# Remove other six meta path importers, since they cause problems. This can +# happen if six is removed from sys.modules and then reloaded. (Setuptools does +# this for some reason.) +if sys.meta_path: + for i, importer in enumerate(sys.meta_path): + # Here's some real nastiness: Another "instance" of the six module might + # be floating around. Therefore, we can't use isinstance() to check for + # the six meta path importer, since the other six instance will have + # inserted an importer with different class. + if (type(importer).__name__ == "_SixMetaPathImporter" and + importer.name == __name__): + del sys.meta_path[i] + break + del i, importer +# Finally, add the importer to the meta path import hook. +sys.meta_path.append(_importer) \ No newline at end of file diff --git a/ddtrace/vendor/wrapt/__init__.py b/ddtrace/vendor/wrapt/__init__.py new file mode 100644 index 0000000000..dbfd5b4003 --- /dev/null +++ b/ddtrace/vendor/wrapt/__init__.py @@ -0,0 +1,42 @@ +""" +Copyright (c) 2013-2019, Graham Dumpleton +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. +""" +__version_info__ = ('1', '11', '1') +__version__ = '.'.join(__version_info__) + +from .wrappers import (ObjectProxy, CallableObjectProxy, FunctionWrapper, + BoundFunctionWrapper, WeakFunctionProxy, PartialCallableObjectProxy, + resolve_path, apply_patch, wrap_object, wrap_object_attribute, + function_wrapper, wrap_function_wrapper, patch_function_wrapper, + transient_function_wrapper) + +from .decorators import (adapter_factory, AdapterFactory, decorator, + synchronized) + +from .importer import (register_post_import_hook, when_imported, + notify_module_loaded, discover_post_import_hooks) + +from inspect import getcallargs diff --git a/ddtrace/vendor/wrapt/_wrappers.c b/ddtrace/vendor/wrapt/_wrappers.c new file mode 100644 index 0000000000..0569a5a7f9 --- /dev/null +++ b/ddtrace/vendor/wrapt/_wrappers.c @@ -0,0 +1,3066 @@ +/* ------------------------------------------------------------------------- */ + +#include "Python.h" + +#include "structmember.h" + +#ifndef PyVarObject_HEAD_INIT +#define PyVarObject_HEAD_INIT(type, size) PyObject_HEAD_INIT(type) size, +#endif + +/* ------------------------------------------------------------------------- */ + +typedef struct { + PyObject_HEAD + + PyObject *dict; + PyObject *wrapped; + PyObject *weakreflist; +} WraptObjectProxyObject; + +PyTypeObject WraptObjectProxy_Type; +PyTypeObject WraptCallableObjectProxy_Type; + +typedef struct { + WraptObjectProxyObject object_proxy; + + PyObject *args; + PyObject *kwargs; +} WraptPartialCallableObjectProxyObject; + +PyTypeObject WraptPartialCallableObjectProxy_Type; + +typedef struct { + WraptObjectProxyObject object_proxy; + + PyObject *instance; + PyObject *wrapper; + PyObject *enabled; + PyObject *binding; + PyObject *parent; +} WraptFunctionWrapperObject; + +PyTypeObject WraptFunctionWrapperBase_Type; +PyTypeObject WraptBoundFunctionWrapper_Type; +PyTypeObject WraptFunctionWrapper_Type; + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_new(PyTypeObject *type, + PyObject *args, PyObject *kwds) +{ + WraptObjectProxyObject *self; + + self = (WraptObjectProxyObject *)type->tp_alloc(type, 0); + + if (!self) + return NULL; + + self->dict = PyDict_New(); + self->wrapped = NULL; + self->weakreflist = NULL; + + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_raw_init(WraptObjectProxyObject *self, + PyObject *wrapped) +{ + static PyObject *module_str = NULL; + static PyObject *doc_str = NULL; + + PyObject *object = NULL; + + Py_INCREF(wrapped); + Py_XDECREF(self->wrapped); + self->wrapped = wrapped; + + if (!module_str) { +#if PY_MAJOR_VERSION >= 3 + module_str = PyUnicode_InternFromString("__module__"); +#else + module_str = PyString_InternFromString("__module__"); +#endif + } + + if (!doc_str) { +#if PY_MAJOR_VERSION >= 3 + doc_str = PyUnicode_InternFromString("__doc__"); +#else + doc_str = PyString_InternFromString("__doc__"); +#endif + } + + object = PyObject_GetAttr(wrapped, module_str); + + if (object) { + if (PyDict_SetItem(self->dict, module_str, object) == -1) { + Py_DECREF(object); + return -1; + } + Py_DECREF(object); + } + else + PyErr_Clear(); + + object = PyObject_GetAttr(wrapped, doc_str); + + if (object) { + if (PyDict_SetItem(self->dict, doc_str, object) == -1) { + Py_DECREF(object); + return -1; + } + Py_DECREF(object); + } + else + PyErr_Clear(); + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_init(WraptObjectProxyObject *self, + PyObject *args, PyObject *kwds) +{ + PyObject *wrapped = NULL; + + static char *kwlist[] = { "wrapped", NULL }; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "O:ObjectProxy", + kwlist, &wrapped)) { + return -1; + } + + return WraptObjectProxy_raw_init(self, wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_traverse(WraptObjectProxyObject *self, + visitproc visit, void *arg) +{ + Py_VISIT(self->dict); + Py_VISIT(self->wrapped); + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_clear(WraptObjectProxyObject *self) +{ + Py_CLEAR(self->dict); + Py_CLEAR(self->wrapped); + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static void WraptObjectProxy_dealloc(WraptObjectProxyObject *self) +{ + PyObject_GC_UnTrack(self); + + if (self->weakreflist != NULL) + PyObject_ClearWeakRefs((PyObject *)self); + + WraptObjectProxy_clear(self); + + Py_TYPE(self)->tp_free(self); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_repr(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + +#if PY_MAJOR_VERSION >= 3 + return PyUnicode_FromFormat("<%s at %p for %s at %p>", + Py_TYPE(self)->tp_name, self, + Py_TYPE(self->wrapped)->tp_name, self->wrapped); +#else + return PyString_FromFormat("<%s at %p for %s at %p>", + Py_TYPE(self)->tp_name, self, + Py_TYPE(self->wrapped)->tp_name, self->wrapped); +#endif +} + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION < 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION < 3) +typedef long Py_hash_t; +#endif + +static Py_hash_t WraptObjectProxy_hash(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PyObject_Hash(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_str(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_Str(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_add(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Add(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_subtract(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + + return PyNumber_Subtract(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_multiply(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Multiply(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION < 3 +static PyObject *WraptObjectProxy_divide(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Divide(o1, o2); +} +#endif + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_remainder(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Remainder(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_divmod(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Divmod(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_power(PyObject *o1, PyObject *o2, + PyObject *modulo) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Power(o1, o2, modulo); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_negative(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Negative(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_positive(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Positive(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_absolute(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Absolute(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_bool(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PyObject_IsTrue(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_invert(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Invert(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_lshift(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Lshift(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_rshift(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Rshift(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_and(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_And(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_xor(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Xor(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_or(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_Or(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION < 3 +static PyObject *WraptObjectProxy_int(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Int(self->wrapped); +} +#endif + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_long(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Long(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_float(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Float(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION < 3 +static PyObject *WraptObjectProxy_oct(WraptObjectProxyObject *self) +{ + PyNumberMethods *nb; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if ((nb = self->wrapped->ob_type->tp_as_number) == NULL || + nb->nb_oct == NULL) { + PyErr_SetString(PyExc_TypeError, + "oct() argument can't be converted to oct"); + return NULL; + } + + return (*nb->nb_oct)(self->wrapped); +} +#endif + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION < 3 +static PyObject *WraptObjectProxy_hex(WraptObjectProxyObject *self) +{ + PyNumberMethods *nb; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if ((nb = self->wrapped->ob_type->tp_as_number) == NULL || + nb->nb_hex == NULL) { + PyErr_SetString(PyExc_TypeError, + "hex() argument can't be converted to hex"); + return NULL; + } + + return (*nb->nb_hex)(self->wrapped); +} +#endif + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_add(WraptObjectProxyObject *self, + PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceAdd(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_subtract( + WraptObjectProxyObject *self, PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceSubtract(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_multiply( + WraptObjectProxyObject *self, PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceMultiply(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION < 3 +static PyObject *WraptObjectProxy_inplace_divide( + WraptObjectProxyObject *self, PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceDivide(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} +#endif + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_remainder( + WraptObjectProxyObject *self, PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceRemainder(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_power(WraptObjectProxyObject *self, + PyObject *other, PyObject *modulo) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlacePower(self->wrapped, other, modulo); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_lshift(WraptObjectProxyObject *self, + PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceLshift(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_rshift(WraptObjectProxyObject *self, + PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceRshift(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_and(WraptObjectProxyObject *self, + PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceAnd(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_xor(WraptObjectProxyObject *self, + PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceXor(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_or(WraptObjectProxyObject *self, + PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceOr(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_floor_divide(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_FloorDivide(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_true_divide(PyObject *o1, PyObject *o2) +{ + if (PyObject_IsInstance(o1, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o1)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o1 = ((WraptObjectProxyObject *)o1)->wrapped; + } + + if (PyObject_IsInstance(o2, (PyObject *)&WraptObjectProxy_Type)) { + if (!((WraptObjectProxyObject *)o2)->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + o2 = ((WraptObjectProxyObject *)o2)->wrapped; + } + + return PyNumber_TrueDivide(o1, o2); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_floor_divide( + WraptObjectProxyObject *self, PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceFloorDivide(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_inplace_true_divide( + WraptObjectProxyObject *self, PyObject *other) +{ + PyObject *object = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + if (PyObject_IsInstance(other, (PyObject *)&WraptObjectProxy_Type)) + other = ((WraptObjectProxyObject *)other)->wrapped; + + object = PyNumber_InPlaceTrueDivide(self->wrapped, other); + + if (!object) + return NULL; + + Py_DECREF(self->wrapped); + self->wrapped = object; + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_index(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyNumber_Index(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static Py_ssize_t WraptObjectProxy_length(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PyObject_Length(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_contains(WraptObjectProxyObject *self, + PyObject *value) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PySequence_Contains(self->wrapped, value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_getitem(WraptObjectProxyObject *self, + PyObject *key) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetItem(self->wrapped, key); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_setitem(WraptObjectProxyObject *self, + PyObject *key, PyObject* value) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + if (value == NULL) + return PyObject_DelItem(self->wrapped, key); + else + return PyObject_SetItem(self->wrapped, key, value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_dir( + WraptObjectProxyObject *self, PyObject *args) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_Dir(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_enter( + WraptObjectProxyObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *method = NULL; + PyObject *result = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + method = PyObject_GetAttrString(self->wrapped, "__enter__"); + + if (!method) + return NULL; + + result = PyObject_Call(method, args, kwds); + + Py_DECREF(method); + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_exit( + WraptObjectProxyObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *method = NULL; + PyObject *result = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + method = PyObject_GetAttrString(self->wrapped, "__exit__"); + + if (!method) + return NULL; + + result = PyObject_Call(method, args, kwds); + + Py_DECREF(method); + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_copy( + WraptObjectProxyObject *self, PyObject *args, PyObject *kwds) +{ + PyErr_SetString(PyExc_NotImplementedError, + "object proxy must define __copy__()"); + + return NULL; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_deepcopy( + WraptObjectProxyObject *self, PyObject *args, PyObject *kwds) +{ + PyErr_SetString(PyExc_NotImplementedError, + "object proxy must define __deepcopy__()"); + + return NULL; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_reduce( + WraptObjectProxyObject *self, PyObject *args, PyObject *kwds) +{ + PyErr_SetString(PyExc_NotImplementedError, + "object proxy must define __reduce_ex__()"); + + return NULL; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_reduce_ex( + WraptObjectProxyObject *self, PyObject *args, PyObject *kwds) +{ + PyErr_SetString(PyExc_NotImplementedError, + "object proxy must define __reduce_ex__()"); + + return NULL; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_bytes( + WraptObjectProxyObject *self, PyObject *args) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_Bytes(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_reversed( + WraptObjectProxyObject *self, PyObject *args) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_CallFunctionObjArgs((PyObject *)&PyReversed_Type, + self->wrapped, NULL); +} + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION >= 3 +static PyObject *WraptObjectProxy_round( + WraptObjectProxyObject *self, PyObject *args) +{ + PyObject *module = NULL; + PyObject *dict = NULL; + PyObject *round = NULL; + + PyObject *result = NULL; + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + module = PyImport_ImportModule("builtins"); + + if (!module) + return NULL; + + dict = PyModule_GetDict(module); + round = PyDict_GetItemString(dict, "round"); + + if (!round) { + Py_DECREF(module); + return NULL; + } + + Py_INCREF(round); + Py_DECREF(module); + + result = PyObject_CallFunctionObjArgs(round, self->wrapped, NULL); + + Py_DECREF(round); + + return result; +} +#endif + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_complex( + WraptObjectProxyObject *self, PyObject *args) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_CallFunctionObjArgs((PyObject *)&PyComplex_Type, + self->wrapped, NULL); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_get_name( + WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetAttrString(self->wrapped, "__name__"); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_set_name(WraptObjectProxyObject *self, + PyObject *value) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PyObject_SetAttrString(self->wrapped, "__name__", value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_get_qualname( + WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetAttrString(self->wrapped, "__qualname__"); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_set_qualname(WraptObjectProxyObject *self, + PyObject *value) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PyObject_SetAttrString(self->wrapped, "__qualname__", value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_get_module( + WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetAttrString(self->wrapped, "__module__"); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_set_module(WraptObjectProxyObject *self, + PyObject *value) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + if (PyObject_SetAttrString(self->wrapped, "__module__", value) == -1) + return -1; + + return PyDict_SetItemString(self->dict, "__module__", value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_get_doc( + WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetAttrString(self->wrapped, "__doc__"); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_set_doc(WraptObjectProxyObject *self, + PyObject *value) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + if (PyObject_SetAttrString(self->wrapped, "__doc__", value) == -1) + return -1; + + return PyDict_SetItemString(self->dict, "__doc__", value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_get_class( + WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetAttrString(self->wrapped, "__class__"); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_get_annotations( + WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetAttrString(self->wrapped, "__annotations__"); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_set_annotations(WraptObjectProxyObject *self, + PyObject *value) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PyObject_SetAttrString(self->wrapped, "__annotations__", value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_get_wrapped( + WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + Py_INCREF(self->wrapped); + return self->wrapped; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_set_wrapped(WraptObjectProxyObject *self, + PyObject *value) +{ + if (!value) { + PyErr_SetString(PyExc_TypeError, "__wrapped__ must be an object"); + return -1; + } + + Py_INCREF(value); + Py_XDECREF(self->wrapped); + + self->wrapped = value; + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_getattro( + WraptObjectProxyObject *self, PyObject *name) +{ + PyObject *object = NULL; + PyObject *result = NULL; + + static PyObject *getattr_str = NULL; + + object = PyObject_GenericGetAttr((PyObject *)self, name); + + if (object) + return object; + + PyErr_Clear(); + + if (!getattr_str) { +#if PY_MAJOR_VERSION >= 3 + getattr_str = PyUnicode_InternFromString("__getattr__"); +#else + getattr_str = PyString_InternFromString("__getattr__"); +#endif + } + + object = PyObject_GenericGetAttr((PyObject *)self, getattr_str); + + if (!object) + return NULL; + + result = PyObject_CallFunctionObjArgs(object, name, NULL); + + Py_DECREF(object); + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_getattr( + WraptObjectProxyObject *self, PyObject *args) +{ + PyObject *name = NULL; + +#if PY_MAJOR_VERSION >= 3 + if (!PyArg_ParseTuple(args, "U:__getattr__", &name)) + return NULL; +#else + if (!PyArg_ParseTuple(args, "S:__getattr__", &name)) + return NULL; +#endif + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetAttr(self->wrapped, name); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptObjectProxy_setattro( + WraptObjectProxyObject *self, PyObject *name, PyObject *value) +{ + static PyObject *self_str = NULL; + static PyObject *wrapped_str = NULL; + static PyObject *startswith_str = NULL; + + PyObject *match = NULL; + + if (!startswith_str) { +#if PY_MAJOR_VERSION >= 3 + startswith_str = PyUnicode_InternFromString("startswith"); +#else + startswith_str = PyString_InternFromString("startswith"); +#endif + } + + if (!self_str) { +#if PY_MAJOR_VERSION >= 3 + self_str = PyUnicode_InternFromString("_self_"); +#else + self_str = PyString_InternFromString("_self_"); +#endif + } + + match = PyObject_CallMethodObjArgs(name, startswith_str, self_str, NULL); + + if (match == Py_True) { + Py_DECREF(match); + + return PyObject_GenericSetAttr((PyObject *)self, name, value); + } + else if (!match) + PyErr_Clear(); + + Py_XDECREF(match); + + if (!wrapped_str) { +#if PY_MAJOR_VERSION >= 3 + wrapped_str = PyUnicode_InternFromString("__wrapped__"); +#else + wrapped_str = PyString_InternFromString("__wrapped__"); +#endif + } + + if (PyObject_HasAttr((PyObject *)Py_TYPE(self), name)) + return PyObject_GenericSetAttr((PyObject *)self, name, value); + + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return -1; + } + + return PyObject_SetAttr(self->wrapped, name, value); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_richcompare(WraptObjectProxyObject *self, + PyObject *other, int opcode) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_RichCompare(self->wrapped, other, opcode); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptObjectProxy_iter(WraptObjectProxyObject *self) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_GetIter(self->wrapped); +} + +/* ------------------------------------------------------------------------- */ + +static PyNumberMethods WraptObjectProxy_as_number = { + (binaryfunc)WraptObjectProxy_add, /*nb_add*/ + (binaryfunc)WraptObjectProxy_subtract, /*nb_subtract*/ + (binaryfunc)WraptObjectProxy_multiply, /*nb_multiply*/ +#if PY_MAJOR_VERSION < 3 + (binaryfunc)WraptObjectProxy_divide, /*nb_divide*/ +#endif + (binaryfunc)WraptObjectProxy_remainder, /*nb_remainder*/ + (binaryfunc)WraptObjectProxy_divmod, /*nb_divmod*/ + (ternaryfunc)WraptObjectProxy_power, /*nb_power*/ + (unaryfunc)WraptObjectProxy_negative, /*nb_negative*/ + (unaryfunc)WraptObjectProxy_positive, /*nb_positive*/ + (unaryfunc)WraptObjectProxy_absolute, /*nb_absolute*/ + (inquiry)WraptObjectProxy_bool, /*nb_nonzero/nb_bool*/ + (unaryfunc)WraptObjectProxy_invert, /*nb_invert*/ + (binaryfunc)WraptObjectProxy_lshift, /*nb_lshift*/ + (binaryfunc)WraptObjectProxy_rshift, /*nb_rshift*/ + (binaryfunc)WraptObjectProxy_and, /*nb_and*/ + (binaryfunc)WraptObjectProxy_xor, /*nb_xor*/ + (binaryfunc)WraptObjectProxy_or, /*nb_or*/ +#if PY_MAJOR_VERSION < 3 + 0, /*nb_coerce*/ +#endif +#if PY_MAJOR_VERSION < 3 + (unaryfunc)WraptObjectProxy_int, /*nb_int*/ + (unaryfunc)WraptObjectProxy_long, /*nb_long*/ +#else + (unaryfunc)WraptObjectProxy_long, /*nb_int*/ + 0, /*nb_long/nb_reserved*/ +#endif + (unaryfunc)WraptObjectProxy_float, /*nb_float*/ +#if PY_MAJOR_VERSION < 3 + (unaryfunc)WraptObjectProxy_oct, /*nb_oct*/ + (unaryfunc)WraptObjectProxy_hex, /*nb_hex*/ +#endif + (binaryfunc)WraptObjectProxy_inplace_add, /*nb_inplace_add*/ + (binaryfunc)WraptObjectProxy_inplace_subtract, /*nb_inplace_subtract*/ + (binaryfunc)WraptObjectProxy_inplace_multiply, /*nb_inplace_multiply*/ +#if PY_MAJOR_VERSION < 3 + (binaryfunc)WraptObjectProxy_inplace_divide, /*nb_inplace_divide*/ +#endif + (binaryfunc)WraptObjectProxy_inplace_remainder, /*nb_inplace_remainder*/ + (ternaryfunc)WraptObjectProxy_inplace_power, /*nb_inplace_power*/ + (binaryfunc)WraptObjectProxy_inplace_lshift, /*nb_inplace_lshift*/ + (binaryfunc)WraptObjectProxy_inplace_rshift, /*nb_inplace_rshift*/ + (binaryfunc)WraptObjectProxy_inplace_and, /*nb_inplace_and*/ + (binaryfunc)WraptObjectProxy_inplace_xor, /*nb_inplace_xor*/ + (binaryfunc)WraptObjectProxy_inplace_or, /*nb_inplace_or*/ + (binaryfunc)WraptObjectProxy_floor_divide, /*nb_floor_divide*/ + (binaryfunc)WraptObjectProxy_true_divide, /*nb_true_divide*/ + (binaryfunc)WraptObjectProxy_inplace_floor_divide, /*nb_inplace_floor_divide*/ + (binaryfunc)WraptObjectProxy_inplace_true_divide, /*nb_inplace_true_divide*/ + (unaryfunc)WraptObjectProxy_index, /*nb_index*/ +}; + +static PySequenceMethods WraptObjectProxy_as_sequence = { + (lenfunc)WraptObjectProxy_length, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + 0, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + (objobjproc)WraptObjectProxy_contains, /* sq_contains */ +}; + +static PyMappingMethods WraptObjectProxy_as_mapping = { + (lenfunc)WraptObjectProxy_length, /*mp_length*/ + (binaryfunc)WraptObjectProxy_getitem, /*mp_subscript*/ + (objobjargproc)WraptObjectProxy_setitem, /*mp_ass_subscript*/ +}; + +static PyMethodDef WraptObjectProxy_methods[] = { + { "__dir__", (PyCFunction)WraptObjectProxy_dir, METH_NOARGS, 0 }, + { "__enter__", (PyCFunction)WraptObjectProxy_enter, + METH_VARARGS | METH_KEYWORDS, 0 }, + { "__exit__", (PyCFunction)WraptObjectProxy_exit, + METH_VARARGS | METH_KEYWORDS, 0 }, + { "__copy__", (PyCFunction)WraptObjectProxy_copy, + METH_NOARGS, 0 }, + { "__deepcopy__", (PyCFunction)WraptObjectProxy_deepcopy, + METH_VARARGS | METH_KEYWORDS, 0 }, + { "__reduce__", (PyCFunction)WraptObjectProxy_reduce, + METH_NOARGS, 0 }, + { "__reduce_ex__", (PyCFunction)WraptObjectProxy_reduce_ex, + METH_VARARGS | METH_KEYWORDS, 0 }, + { "__getattr__", (PyCFunction)WraptObjectProxy_getattr, + METH_VARARGS , 0 }, + { "__bytes__", (PyCFunction)WraptObjectProxy_bytes, METH_NOARGS, 0 }, + { "__reversed__", (PyCFunction)WraptObjectProxy_reversed, METH_NOARGS, 0 }, +#if PY_MAJOR_VERSION >= 3 + { "__round__", (PyCFunction)WraptObjectProxy_round, METH_NOARGS, 0 }, +#endif + { "__complex__", (PyCFunction)WraptObjectProxy_complex, METH_NOARGS, 0 }, + { NULL, NULL }, +}; + +static PyGetSetDef WraptObjectProxy_getset[] = { + { "__name__", (getter)WraptObjectProxy_get_name, + (setter)WraptObjectProxy_set_name, 0 }, + { "__qualname__", (getter)WraptObjectProxy_get_qualname, + (setter)WraptObjectProxy_set_qualname, 0 }, + { "__module__", (getter)WraptObjectProxy_get_module, + (setter)WraptObjectProxy_set_module, 0 }, + { "__doc__", (getter)WraptObjectProxy_get_doc, + (setter)WraptObjectProxy_set_doc, 0 }, + { "__class__", (getter)WraptObjectProxy_get_class, + NULL, 0 }, + { "__annotations__", (getter)WraptObjectProxy_get_annotations, + (setter)WraptObjectProxy_set_annotations, 0 }, + { "__wrapped__", (getter)WraptObjectProxy_get_wrapped, + (setter)WraptObjectProxy_set_wrapped, 0 }, + { NULL }, +}; + +PyTypeObject WraptObjectProxy_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "ObjectProxy", /*tp_name*/ + sizeof(WraptObjectProxyObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)WraptObjectProxy_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + (unaryfunc)WraptObjectProxy_repr, /*tp_repr*/ + &WraptObjectProxy_as_number, /*tp_as_number*/ + &WraptObjectProxy_as_sequence, /*tp_as_sequence*/ + &WraptObjectProxy_as_mapping, /*tp_as_mapping*/ + (hashfunc)WraptObjectProxy_hash, /*tp_hash*/ + 0, /*tp_call*/ + (unaryfunc)WraptObjectProxy_str, /*tp_str*/ + (getattrofunc)WraptObjectProxy_getattro, /*tp_getattro*/ + (setattrofunc)WraptObjectProxy_setattro, /*tp_setattro*/ + 0, /*tp_as_buffer*/ +#if PY_MAJOR_VERSION < 3 + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_CHECKTYPES, /*tp_flags*/ +#else + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ +#endif + 0, /*tp_doc*/ + (traverseproc)WraptObjectProxy_traverse, /*tp_traverse*/ + (inquiry)WraptObjectProxy_clear, /*tp_clear*/ + (richcmpfunc)WraptObjectProxy_richcompare, /*tp_richcompare*/ + offsetof(WraptObjectProxyObject, weakreflist), /*tp_weaklistoffset*/ + (getiterfunc)WraptObjectProxy_iter, /*tp_iter*/ + 0, /*tp_iternext*/ + WraptObjectProxy_methods, /*tp_methods*/ + 0, /*tp_members*/ + WraptObjectProxy_getset, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + offsetof(WraptObjectProxyObject, dict), /*tp_dictoffset*/ + (initproc)WraptObjectProxy_init, /*tp_init*/ + PyType_GenericAlloc, /*tp_alloc*/ + WraptObjectProxy_new, /*tp_new*/ + PyObject_GC_Del, /*tp_free*/ + 0, /*tp_is_gc*/ +}; + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptCallableObjectProxy_call( + WraptObjectProxyObject *self, PyObject *args, PyObject *kwds) +{ + if (!self->wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + return PyObject_Call(self->wrapped, args, kwds); +} + +/* ------------------------------------------------------------------------- */; + +static PyGetSetDef WraptCallableObjectProxy_getset[] = { + { "__module__", (getter)WraptObjectProxy_get_module, + (setter)WraptObjectProxy_set_module, 0 }, + { "__doc__", (getter)WraptObjectProxy_get_doc, + (setter)WraptObjectProxy_set_doc, 0 }, + { NULL }, +}; + +PyTypeObject WraptCallableObjectProxy_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "CallableObjectProxy", /*tp_name*/ + sizeof(WraptObjectProxyObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + (ternaryfunc)WraptCallableObjectProxy_call, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ +#if PY_MAJOR_VERSION < 3 + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES, /*tp_flags*/ +#else + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ +#endif + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(WraptObjectProxyObject, weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + WraptCallableObjectProxy_getset, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)WraptObjectProxy_init, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ +}; + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptPartialCallableObjectProxy_new(PyTypeObject *type, + PyObject *args, PyObject *kwds) +{ + WraptPartialCallableObjectProxyObject *self; + + self = (WraptPartialCallableObjectProxyObject *)WraptObjectProxy_new(type, + args, kwds); + + if (!self) + return NULL; + + self->args = NULL; + self->kwargs = NULL; + + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptPartialCallableObjectProxy_raw_init( + WraptPartialCallableObjectProxyObject *self, + PyObject *wrapped, PyObject *args, PyObject *kwargs) +{ + int result = 0; + + result = WraptObjectProxy_raw_init((WraptObjectProxyObject *)self, + wrapped); + + if (result == 0) { + Py_INCREF(args); + Py_XDECREF(self->args); + self->args = args; + + Py_XINCREF(kwargs); + Py_XDECREF(self->kwargs); + self->kwargs = kwargs; + } + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptPartialCallableObjectProxy_init( + WraptPartialCallableObjectProxyObject *self, PyObject *args, + PyObject *kwds) +{ + PyObject *wrapped = NULL; + PyObject *fnargs = NULL; + + int result = 0; + + if (!PyObject_Length(args)) { + PyErr_SetString(PyExc_TypeError, + "__init__ of partial needs an argument"); + return -1; + } + + if (PyObject_Length(args) < 1) { + PyErr_SetString(PyExc_TypeError, + "partial type takes at least one argument"); + return -1; + } + + wrapped = PyTuple_GetItem(args, 0); + + if (!PyCallable_Check(wrapped)) { + PyErr_SetString(PyExc_TypeError, + "the first argument must be callable"); + return -1; + } + + fnargs = PyTuple_GetSlice(args, 1, PyTuple_Size(args)); + + if (!fnargs) + return -1; + + result = WraptPartialCallableObjectProxy_raw_init(self, wrapped, + fnargs, kwds); + + Py_DECREF(fnargs); + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptPartialCallableObjectProxy_traverse( + WraptPartialCallableObjectProxyObject *self, + visitproc visit, void *arg) +{ + WraptObjectProxy_traverse((WraptObjectProxyObject *)self, visit, arg); + + Py_VISIT(self->args); + Py_VISIT(self->kwargs); + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptPartialCallableObjectProxy_clear( + WraptPartialCallableObjectProxyObject *self) +{ + WraptObjectProxy_clear((WraptObjectProxyObject *)self); + + Py_CLEAR(self->args); + Py_CLEAR(self->kwargs); + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static void WraptPartialCallableObjectProxy_dealloc( + WraptPartialCallableObjectProxyObject *self) +{ + WraptPartialCallableObjectProxy_clear(self); + + WraptObjectProxy_dealloc((WraptObjectProxyObject *)self); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptPartialCallableObjectProxy_call( + WraptPartialCallableObjectProxyObject *self, PyObject *args, + PyObject *kwds) +{ + PyObject *fnargs = NULL; + PyObject *fnkwargs = NULL; + + PyObject *result = NULL; + + long i; + long offset; + + if (!self->object_proxy.wrapped) { + PyErr_SetString(PyExc_ValueError, "wrapper has not been initialized"); + return NULL; + } + + fnargs = PyTuple_New(PyTuple_Size(self->args)+PyTuple_Size(args)); + + for (i=0; iargs); i++) { + PyObject *item; + item = PyTuple_GetItem(self->args, i); + Py_INCREF(item); + PyTuple_SetItem(fnargs, i, item); + } + + offset = PyTuple_Size(self->args); + + for (i=0; ikwargs && PyDict_Update(fnkwargs, self->kwargs) == -1) { + Py_DECREF(fnargs); + Py_DECREF(fnkwargs); + return NULL; + } + + if (kwds && PyDict_Update(fnkwargs, kwds) == -1) { + Py_DECREF(fnargs); + Py_DECREF(fnkwargs); + return NULL; + } + + result = PyObject_Call(self->object_proxy.wrapped, + fnargs, fnkwargs); + + Py_DECREF(fnargs); + Py_DECREF(fnkwargs); + + return result; +} + +/* ------------------------------------------------------------------------- */; + +static PyGetSetDef WraptPartialCallableObjectProxy_getset[] = { + { "__module__", (getter)WraptObjectProxy_get_module, + (setter)WraptObjectProxy_set_module, 0 }, + { "__doc__", (getter)WraptObjectProxy_get_doc, + (setter)WraptObjectProxy_set_doc, 0 }, + { NULL }, +}; + +PyTypeObject WraptPartialCallableObjectProxy_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "PartialCallableObjectProxy", /*tp_name*/ + sizeof(WraptPartialCallableObjectProxyObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)WraptPartialCallableObjectProxy_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + (ternaryfunc)WraptPartialCallableObjectProxy_call, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ +#if PY_MAJOR_VERSION < 3 + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_CHECKTYPES, /*tp_flags*/ +#else + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ +#endif + 0, /*tp_doc*/ + (traverseproc)WraptPartialCallableObjectProxy_traverse, /*tp_traverse*/ + (inquiry)WraptPartialCallableObjectProxy_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(WraptObjectProxyObject, weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + WraptPartialCallableObjectProxy_getset, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)WraptPartialCallableObjectProxy_init, /*tp_init*/ + 0, /*tp_alloc*/ + WraptPartialCallableObjectProxy_new, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ +}; + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_new(PyTypeObject *type, + PyObject *args, PyObject *kwds) +{ + WraptFunctionWrapperObject *self; + + self = (WraptFunctionWrapperObject *)WraptObjectProxy_new(type, + args, kwds); + + if (!self) + return NULL; + + self->instance = NULL; + self->wrapper = NULL; + self->enabled = NULL; + self->binding = NULL; + self->parent = NULL; + + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptFunctionWrapperBase_raw_init(WraptFunctionWrapperObject *self, + PyObject *wrapped, PyObject *instance, PyObject *wrapper, + PyObject *enabled, PyObject *binding, PyObject *parent) +{ + int result = 0; + + result = WraptObjectProxy_raw_init((WraptObjectProxyObject *)self, + wrapped); + + if (result == 0) { + Py_INCREF(instance); + Py_XDECREF(self->instance); + self->instance = instance; + + Py_INCREF(wrapper); + Py_XDECREF(self->wrapper); + self->wrapper = wrapper; + + Py_INCREF(enabled); + Py_XDECREF(self->enabled); + self->enabled = enabled; + + Py_INCREF(binding); + Py_XDECREF(self->binding); + self->binding = binding; + + Py_INCREF(parent); + Py_XDECREF(self->parent); + self->parent = parent; + } + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptFunctionWrapperBase_init(WraptFunctionWrapperObject *self, + PyObject *args, PyObject *kwds) +{ + PyObject *wrapped = NULL; + PyObject *instance = NULL; + PyObject *wrapper = NULL; + PyObject *enabled = Py_None; + PyObject *binding = NULL; + PyObject *parent = Py_None; + + static PyObject *function_str = NULL; + + static char *kwlist[] = { "wrapped", "instance", "wrapper", + "enabled", "binding", "parent", NULL }; + + if (!function_str) { +#if PY_MAJOR_VERSION >= 3 + function_str = PyUnicode_InternFromString("function"); +#else + function_str = PyString_InternFromString("function"); +#endif + } + + if (!PyArg_ParseTupleAndKeywords(args, kwds, + "OOO|OOO:FunctionWrapperBase", kwlist, &wrapped, &instance, + &wrapper, &enabled, &binding, &parent)) { + return -1; + } + + if (!binding) + binding = function_str; + + return WraptFunctionWrapperBase_raw_init(self, wrapped, instance, wrapper, + enabled, binding, parent); +} + +/* ------------------------------------------------------------------------- */ + +static int WraptFunctionWrapperBase_traverse(WraptFunctionWrapperObject *self, + visitproc visit, void *arg) +{ + WraptObjectProxy_traverse((WraptObjectProxyObject *)self, visit, arg); + + Py_VISIT(self->instance); + Py_VISIT(self->wrapper); + Py_VISIT(self->enabled); + Py_VISIT(self->binding); + Py_VISIT(self->parent); + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static int WraptFunctionWrapperBase_clear(WraptFunctionWrapperObject *self) +{ + WraptObjectProxy_clear((WraptObjectProxyObject *)self); + + Py_CLEAR(self->instance); + Py_CLEAR(self->wrapper); + Py_CLEAR(self->enabled); + Py_CLEAR(self->binding); + Py_CLEAR(self->parent); + + return 0; +} + +/* ------------------------------------------------------------------------- */ + +static void WraptFunctionWrapperBase_dealloc(WraptFunctionWrapperObject *self) +{ + WraptFunctionWrapperBase_clear(self); + + WraptObjectProxy_dealloc((WraptObjectProxyObject *)self); +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_call( + WraptFunctionWrapperObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *param_kwds = NULL; + + PyObject *result = NULL; + + static PyObject *function_str = NULL; + + if (!function_str) { +#if PY_MAJOR_VERSION >= 3 + function_str = PyUnicode_InternFromString("function"); +#else + function_str = PyString_InternFromString("function"); +#endif + } + + if (self->enabled != Py_None) { + if (PyCallable_Check(self->enabled)) { + PyObject *object = NULL; + + object = PyObject_CallFunctionObjArgs(self->enabled, NULL); + + if (!object) + return NULL; + + if (PyObject_Not(object)) { + Py_DECREF(object); + return PyObject_Call(self->object_proxy.wrapped, args, kwds); + } + + Py_DECREF(object); + } + else if (PyObject_Not(self->enabled)) { + return PyObject_Call(self->object_proxy.wrapped, args, kwds); + } + } + + if (!kwds) { + param_kwds = PyDict_New(); + kwds = param_kwds; + } + + if (self->instance == Py_None && (self->binding == function_str || + PyObject_RichCompareBool(self->binding, function_str, + Py_EQ) == 1)) { + + PyObject *instance = NULL; + + instance = PyObject_GetAttrString(self->object_proxy.wrapped, + "__self__"); + + if (instance) { + result = PyObject_CallFunctionObjArgs(self->wrapper, + self->object_proxy.wrapped, instance, args, kwds, NULL); + + Py_XDECREF(param_kwds); + + Py_DECREF(instance); + + return result; + } + else + PyErr_Clear(); + } + + result = PyObject_CallFunctionObjArgs(self->wrapper, + self->object_proxy.wrapped, self->instance, args, kwds, NULL); + + Py_XDECREF(param_kwds); + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_descr_get( + WraptFunctionWrapperObject *self, PyObject *obj, PyObject *type) +{ + PyObject *bound_type = NULL; + PyObject *descriptor = NULL; + PyObject *result = NULL; + + static PyObject *bound_type_str = NULL; + static PyObject *function_str = NULL; + + if (!bound_type_str) { +#if PY_MAJOR_VERSION >= 3 + bound_type_str = PyUnicode_InternFromString( + "__bound_function_wrapper__"); +#else + bound_type_str = PyString_InternFromString( + "__bound_function_wrapper__"); +#endif + } + + if (!function_str) { +#if PY_MAJOR_VERSION >= 3 + function_str = PyUnicode_InternFromString("function"); +#else + function_str = PyString_InternFromString("function"); +#endif + } + + if (self->parent == Py_None) { +#if PY_MAJOR_VERSION < 3 + if (PyObject_IsInstance(self->object_proxy.wrapped, + (PyObject *)&PyClass_Type) || PyObject_IsInstance( + self->object_proxy.wrapped, (PyObject *)&PyType_Type)) { + Py_INCREF(self); + return (PyObject *)self; + } +#else + if (PyObject_IsInstance(self->object_proxy.wrapped, + (PyObject *)&PyType_Type)) { + Py_INCREF(self); + return (PyObject *)self; + } +#endif + + if (Py_TYPE(self->object_proxy.wrapped)->tp_descr_get == NULL) { + PyErr_Format(PyExc_AttributeError, + "'%s' object has no attribute '__get__'", + Py_TYPE(self->object_proxy.wrapped)->tp_name); + return NULL; + } + + descriptor = (Py_TYPE(self->object_proxy.wrapped)->tp_descr_get)( + self->object_proxy.wrapped, obj, type); + + if (!descriptor) + return NULL; + + if (Py_TYPE(self) != &WraptFunctionWrapper_Type) { + bound_type = PyObject_GenericGetAttr((PyObject *)self, + bound_type_str); + + if (!bound_type) + PyErr_Clear(); + } + + if (obj == NULL) + obj = Py_None; + + result = PyObject_CallFunctionObjArgs(bound_type ? bound_type : + (PyObject *)&WraptBoundFunctionWrapper_Type, descriptor, + obj, self->wrapper, self->enabled, self->binding, + self, NULL); + + Py_XDECREF(bound_type); + Py_DECREF(descriptor); + + return result; + } + + if (self->instance == Py_None && (self->binding == function_str || + PyObject_RichCompareBool(self->binding, function_str, + Py_EQ) == 1)) { + + PyObject *wrapped = NULL; + + static PyObject *wrapped_str = NULL; + + if (!wrapped_str) { +#if PY_MAJOR_VERSION >= 3 + wrapped_str = PyUnicode_InternFromString("__wrapped__"); +#else + wrapped_str = PyString_InternFromString("__wrapped__"); +#endif + } + + wrapped = PyObject_GetAttr(self->parent, wrapped_str); + + if (!wrapped) + return NULL; + + if (Py_TYPE(wrapped)->tp_descr_get == NULL) { + PyErr_Format(PyExc_AttributeError, + "'%s' object has no attribute '__get__'", + Py_TYPE(wrapped)->tp_name); + Py_DECREF(wrapped); + return NULL; + } + + descriptor = (Py_TYPE(wrapped)->tp_descr_get)(wrapped, obj, type); + + Py_DECREF(wrapped); + + if (!descriptor) + return NULL; + + if (Py_TYPE(self->parent) != &WraptFunctionWrapper_Type) { + bound_type = PyObject_GenericGetAttr((PyObject *)self->parent, + bound_type_str); + + if (!bound_type) + PyErr_Clear(); + } + + if (obj == NULL) + obj = Py_None; + + result = PyObject_CallFunctionObjArgs(bound_type ? bound_type : + (PyObject *)&WraptBoundFunctionWrapper_Type, descriptor, + obj, self->wrapper, self->enabled, self->binding, + self->parent, NULL); + + Py_XDECREF(bound_type); + Py_DECREF(descriptor); + + return result; + } + + Py_INCREF(self); + return (PyObject *)self; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_get_self_instance( + WraptFunctionWrapperObject *self, void *closure) +{ + if (!self->instance) { + Py_INCREF(Py_None); + return Py_None; + } + + Py_INCREF(self->instance); + return self->instance; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_get_self_wrapper( + WraptFunctionWrapperObject *self, void *closure) +{ + if (!self->wrapper) { + Py_INCREF(Py_None); + return Py_None; + } + + Py_INCREF(self->wrapper); + return self->wrapper; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_get_self_enabled( + WraptFunctionWrapperObject *self, void *closure) +{ + if (!self->enabled) { + Py_INCREF(Py_None); + return Py_None; + } + + Py_INCREF(self->enabled); + return self->enabled; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_get_self_binding( + WraptFunctionWrapperObject *self, void *closure) +{ + if (!self->binding) { + Py_INCREF(Py_None); + return Py_None; + } + + Py_INCREF(self->binding); + return self->binding; +} + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptFunctionWrapperBase_get_self_parent( + WraptFunctionWrapperObject *self, void *closure) +{ + if (!self->parent) { + Py_INCREF(Py_None); + return Py_None; + } + + Py_INCREF(self->parent); + return self->parent; +} + +/* ------------------------------------------------------------------------- */; + +static PyGetSetDef WraptFunctionWrapperBase_getset[] = { + { "__module__", (getter)WraptObjectProxy_get_module, + (setter)WraptObjectProxy_set_module, 0 }, + { "__doc__", (getter)WraptObjectProxy_get_doc, + (setter)WraptObjectProxy_set_doc, 0 }, + { "_self_instance", (getter)WraptFunctionWrapperBase_get_self_instance, + NULL, 0 }, + { "_self_wrapper", (getter)WraptFunctionWrapperBase_get_self_wrapper, + NULL, 0 }, + { "_self_enabled", (getter)WraptFunctionWrapperBase_get_self_enabled, + NULL, 0 }, + { "_self_binding", (getter)WraptFunctionWrapperBase_get_self_binding, + NULL, 0 }, + { "_self_parent", (getter)WraptFunctionWrapperBase_get_self_parent, + NULL, 0 }, + { NULL }, +}; + +PyTypeObject WraptFunctionWrapperBase_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "_FunctionWrapperBase", /*tp_name*/ + sizeof(WraptFunctionWrapperObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + (destructor)WraptFunctionWrapperBase_dealloc, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + (ternaryfunc)WraptFunctionWrapperBase_call, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ +#if PY_MAJOR_VERSION < 3 + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_CHECKTYPES, /*tp_flags*/ +#else + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | + Py_TPFLAGS_HAVE_GC, /*tp_flags*/ +#endif + 0, /*tp_doc*/ + (traverseproc)WraptFunctionWrapperBase_traverse, /*tp_traverse*/ + (inquiry)WraptFunctionWrapperBase_clear, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(WraptObjectProxyObject, weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + WraptFunctionWrapperBase_getset, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + (descrgetfunc)WraptFunctionWrapperBase_descr_get, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)WraptFunctionWrapperBase_init, /*tp_init*/ + 0, /*tp_alloc*/ + WraptFunctionWrapperBase_new, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ +}; + +/* ------------------------------------------------------------------------- */ + +static PyObject *WraptBoundFunctionWrapper_call( + WraptFunctionWrapperObject *self, PyObject *args, PyObject *kwds) +{ + PyObject *param_args = NULL; + PyObject *param_kwds = NULL; + + PyObject *wrapped = NULL; + PyObject *instance = NULL; + + PyObject *result = NULL; + + static PyObject *function_str = NULL; + + if (self->enabled != Py_None) { + if (PyCallable_Check(self->enabled)) { + PyObject *object = NULL; + + object = PyObject_CallFunctionObjArgs(self->enabled, NULL); + + if (!object) + return NULL; + + if (PyObject_Not(object)) { + Py_DECREF(object); + return PyObject_Call(self->object_proxy.wrapped, args, kwds); + } + + Py_DECREF(object); + } + else if (PyObject_Not(self->enabled)) { + return PyObject_Call(self->object_proxy.wrapped, args, kwds); + } + } + + if (!function_str) { +#if PY_MAJOR_VERSION >= 3 + function_str = PyUnicode_InternFromString("function"); +#else + function_str = PyString_InternFromString("function"); +#endif + } + + /* + * We need to do things different depending on whether we are likely + * wrapping an instance method vs a static method or class method. + */ + + if (self->binding == function_str || PyObject_RichCompareBool( + self->binding, function_str, Py_EQ) == 1) { + + if (self->instance == Py_None) { + /* + * This situation can occur where someone is calling the + * instancemethod via the class type and passing the + * instance as the first argument. We need to shift the args + * before making the call to the wrapper and effectively + * bind the instance to the wrapped function using a partial + * so the wrapper doesn't see anything as being different. + */ + + if (PyTuple_Size(args) == 0) { + PyErr_SetString(PyExc_TypeError, + "missing 1 required positional argument"); + return NULL; + } + + instance = PyTuple_GetItem(args, 0); + + if (!instance) + return NULL; + + wrapped = PyObject_CallFunctionObjArgs( + (PyObject *)&WraptPartialCallableObjectProxy_Type, + self->object_proxy.wrapped, instance, NULL); + + if (!wrapped) + return NULL; + + param_args = PyTuple_GetSlice(args, 1, PyTuple_Size(args)); + + if (!param_args) { + Py_DECREF(wrapped); + return NULL; + } + + args = param_args; + } + else + instance = self->instance; + + if (!wrapped) { + Py_INCREF(self->object_proxy.wrapped); + wrapped = self->object_proxy.wrapped; + } + + if (!kwds) { + param_kwds = PyDict_New(); + kwds = param_kwds; + } + + result = PyObject_CallFunctionObjArgs(self->wrapper, wrapped, + instance, args, kwds, NULL); + + Py_XDECREF(param_args); + Py_XDECREF(param_kwds); + Py_DECREF(wrapped); + + return result; + } + else { + /* + * As in this case we would be dealing with a classmethod or + * staticmethod, then _self_instance will only tell us whether + * when calling the classmethod or staticmethod they did it via + * an instance of the class it is bound to and not the case + * where done by the class type itself. We thus ignore + * _self_instance and use the __self__ attribute of the bound + * function instead. For a classmethod, this means instance will + * be the class type and for a staticmethod it will be None. + * This is probably the more useful thing we can pass through + * even though we loose knowledge of whether they were called on + * the instance vs the class type, as it reflects what they have + * available in the decoratored function. + */ + + instance = PyObject_GetAttrString(self->object_proxy.wrapped, + "__self__"); + + if (!instance) { + PyErr_Clear(); + Py_INCREF(Py_None); + instance = Py_None; + } + + if (!kwds) { + param_kwds = PyDict_New(); + kwds = param_kwds; + } + + result = PyObject_CallFunctionObjArgs(self->wrapper, + self->object_proxy.wrapped, instance, args, kwds, NULL); + + Py_XDECREF(param_kwds); + + Py_DECREF(instance); + + return result; + } +} + +/* ------------------------------------------------------------------------- */ + +static PyGetSetDef WraptBoundFunctionWrapper_getset[] = { + { "__module__", (getter)WraptObjectProxy_get_module, + (setter)WraptObjectProxy_set_module, 0 }, + { "__doc__", (getter)WraptObjectProxy_get_doc, + (setter)WraptObjectProxy_set_doc, 0 }, + { NULL }, +}; + +PyTypeObject WraptBoundFunctionWrapper_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "BoundFunctionWrapper", /*tp_name*/ + sizeof(WraptFunctionWrapperObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + (ternaryfunc)WraptBoundFunctionWrapper_call, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ +#if PY_MAJOR_VERSION < 3 + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES, /*tp_flags*/ +#else + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ +#endif + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(WraptObjectProxyObject, weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + WraptBoundFunctionWrapper_getset, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + 0, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ +}; + +/* ------------------------------------------------------------------------- */ + +static int WraptFunctionWrapper_init(WraptFunctionWrapperObject *self, + PyObject *args, PyObject *kwds) +{ + PyObject *wrapped = NULL; + PyObject *wrapper = NULL; + PyObject *enabled = Py_None; + PyObject *binding = NULL; + PyObject *instance = NULL; + + static PyObject *classmethod_str = NULL; + static PyObject *staticmethod_str = NULL; + static PyObject *function_str = NULL; + + int result = 0; + + static char *kwlist[] = { "wrapped", "wrapper", "enabled", NULL }; + + if (!PyArg_ParseTupleAndKeywords(args, kwds, "OO|O:FunctionWrapper", + kwlist, &wrapped, &wrapper, &enabled)) { + return -1; + } + + if (!classmethod_str) { +#if PY_MAJOR_VERSION >= 3 + classmethod_str = PyUnicode_InternFromString("classmethod"); +#else + classmethod_str = PyString_InternFromString("classmethod"); +#endif + } + + if (!staticmethod_str) { +#if PY_MAJOR_VERSION >= 3 + staticmethod_str = PyUnicode_InternFromString("staticmethod"); +#else + staticmethod_str = PyString_InternFromString("staticmethod"); +#endif + } + + if (!function_str) { +#if PY_MAJOR_VERSION >= 3 + function_str = PyUnicode_InternFromString("function"); +#else + function_str = PyString_InternFromString("function"); +#endif + } + + if (PyObject_IsInstance(wrapped, (PyObject *)&PyClassMethod_Type)) { + binding = classmethod_str; + } + else if (PyObject_IsInstance(wrapped, (PyObject *)&PyStaticMethod_Type)) { + binding = staticmethod_str; + } + else if ((instance = PyObject_GetAttrString(wrapped, "__self__")) != 0) { +#if PY_MAJOR_VERSION < 3 + if (PyObject_IsInstance(instance, (PyObject *)&PyClass_Type) || + PyObject_IsInstance(instance, (PyObject *)&PyType_Type)) { + binding = classmethod_str; + } +#else + if (PyObject_IsInstance(instance, (PyObject *)&PyType_Type)) { + binding = classmethod_str; + } +#endif + else + binding = function_str; + + Py_DECREF(instance); + } + else { + PyErr_Clear(); + + binding = function_str; + } + + result = WraptFunctionWrapperBase_raw_init(self, wrapped, Py_None, + wrapper, enabled, binding, Py_None); + + return result; +} + +/* ------------------------------------------------------------------------- */ + +static PyGetSetDef WraptFunctionWrapper_getset[] = { + { "__module__", (getter)WraptObjectProxy_get_module, + (setter)WraptObjectProxy_set_module, 0 }, + { "__doc__", (getter)WraptObjectProxy_get_doc, + (setter)WraptObjectProxy_set_doc, 0 }, + { NULL }, +}; + +PyTypeObject WraptFunctionWrapper_Type = { + PyVarObject_HEAD_INIT(NULL, 0) + "FunctionWrapper", /*tp_name*/ + sizeof(WraptFunctionWrapperObject), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + /* methods */ + 0, /*tp_dealloc*/ + 0, /*tp_print*/ + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + 0, /*tp_compare*/ + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ +#if PY_MAJOR_VERSION < 3 + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE | Py_TPFLAGS_CHECKTYPES, /*tp_flags*/ +#else + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/ +#endif + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + offsetof(WraptObjectProxyObject, weakreflist), /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + 0, /*tp_methods*/ + 0, /*tp_members*/ + WraptFunctionWrapper_getset, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + 0, /*tp_dictoffset*/ + (initproc)WraptFunctionWrapper_init, /*tp_init*/ + 0, /*tp_alloc*/ + 0, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ +}; + +/* ------------------------------------------------------------------------- */ + +#if PY_MAJOR_VERSION >= 3 +static struct PyModuleDef moduledef = { + PyModuleDef_HEAD_INIT, + "_wrappers", /* m_name */ + NULL, /* m_doc */ + -1, /* m_size */ + NULL, /* m_methods */ + NULL, /* m_reload */ + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL, /* m_free */ +}; +#endif + +static PyObject * +moduleinit(void) +{ + PyObject *module; + +#if PY_MAJOR_VERSION >= 3 + module = PyModule_Create(&moduledef); +#else + module = Py_InitModule3("_wrappers", NULL, NULL); +#endif + + if (module == NULL) + return NULL; + + if (PyType_Ready(&WraptObjectProxy_Type) < 0) + return NULL; + + /* Ensure that inheritance relationships specified. */ + + WraptCallableObjectProxy_Type.tp_base = &WraptObjectProxy_Type; + WraptPartialCallableObjectProxy_Type.tp_base = &WraptObjectProxy_Type; + WraptFunctionWrapperBase_Type.tp_base = &WraptObjectProxy_Type; + WraptBoundFunctionWrapper_Type.tp_base = &WraptFunctionWrapperBase_Type; + WraptFunctionWrapper_Type.tp_base = &WraptFunctionWrapperBase_Type; + + if (PyType_Ready(&WraptCallableObjectProxy_Type) < 0) + return NULL; + if (PyType_Ready(&WraptPartialCallableObjectProxy_Type) < 0) + return NULL; + if (PyType_Ready(&WraptFunctionWrapperBase_Type) < 0) + return NULL; + if (PyType_Ready(&WraptBoundFunctionWrapper_Type) < 0) + return NULL; + if (PyType_Ready(&WraptFunctionWrapper_Type) < 0) + return NULL; + + Py_INCREF(&WraptObjectProxy_Type); + PyModule_AddObject(module, "ObjectProxy", + (PyObject *)&WraptObjectProxy_Type); + Py_INCREF(&WraptCallableObjectProxy_Type); + PyModule_AddObject(module, "CallableObjectProxy", + (PyObject *)&WraptCallableObjectProxy_Type); + PyModule_AddObject(module, "PartialCallableObjectProxy", + (PyObject *)&WraptPartialCallableObjectProxy_Type); + Py_INCREF(&WraptFunctionWrapper_Type); + PyModule_AddObject(module, "FunctionWrapper", + (PyObject *)&WraptFunctionWrapper_Type); + + Py_INCREF(&WraptFunctionWrapperBase_Type); + PyModule_AddObject(module, "_FunctionWrapperBase", + (PyObject *)&WraptFunctionWrapperBase_Type); + Py_INCREF(&WraptBoundFunctionWrapper_Type); + PyModule_AddObject(module, "BoundFunctionWrapper", + (PyObject *)&WraptBoundFunctionWrapper_Type); + + return module; +} + +#if PY_MAJOR_VERSION < 3 +PyMODINIT_FUNC init_wrappers(void) +{ + moduleinit(); +} +#else +PyMODINIT_FUNC PyInit__wrappers(void) +{ + return moduleinit(); +} +#endif + +/* ------------------------------------------------------------------------- */ diff --git a/ddtrace/vendor/wrapt/decorators.py b/ddtrace/vendor/wrapt/decorators.py new file mode 100644 index 0000000000..9b569f8cb9 --- /dev/null +++ b/ddtrace/vendor/wrapt/decorators.py @@ -0,0 +1,511 @@ +"""This module implements decorators for implementing other decorators +as well as some commonly used decorators. + +""" + +import sys + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + + import builtins + exec_ = getattr(builtins, "exec") + del builtins + +else: + string_types = basestring, + + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") + +from functools import partial +from inspect import ismethod, isclass, formatargspec +from collections import namedtuple +from threading import Lock, RLock + +try: + from inspect import signature +except ImportError: + pass + +from .wrappers import (FunctionWrapper, BoundFunctionWrapper, ObjectProxy, + CallableObjectProxy) + +# Adapter wrapper for the wrapped function which will overlay certain +# properties from the adapter function onto the wrapped function so that +# functions such as inspect.getargspec(), inspect.getfullargspec(), +# inspect.signature() and inspect.getsource() return the correct results +# one would expect. + +class _AdapterFunctionCode(CallableObjectProxy): + + def __init__(self, wrapped_code, adapter_code): + super(_AdapterFunctionCode, self).__init__(wrapped_code) + self._self_adapter_code = adapter_code + + @property + def co_argcount(self): + return self._self_adapter_code.co_argcount + + @property + def co_code(self): + return self._self_adapter_code.co_code + + @property + def co_flags(self): + return self._self_adapter_code.co_flags + + @property + def co_kwonlyargcount(self): + return self._self_adapter_code.co_kwonlyargcount + + @property + def co_varnames(self): + return self._self_adapter_code.co_varnames + +class _AdapterFunctionSurrogate(CallableObjectProxy): + + def __init__(self, wrapped, adapter): + super(_AdapterFunctionSurrogate, self).__init__(wrapped) + self._self_adapter = adapter + + @property + def __code__(self): + return _AdapterFunctionCode(self.__wrapped__.__code__, + self._self_adapter.__code__) + + @property + def __defaults__(self): + return self._self_adapter.__defaults__ + + @property + def __kwdefaults__(self): + return self._self_adapter.__kwdefaults__ + + @property + def __signature__(self): + if 'signature' not in globals(): + return self._self_adapter.__signature__ + else: + # Can't allow this to fail on Python 3 else it falls + # through to using __wrapped__, but that will be the + # wrong function we want to derive the signature + # from. Thus generate the signature ourselves. + + return signature(self._self_adapter) + + if PY2: + func_code = __code__ + func_defaults = __defaults__ + +class _BoundAdapterWrapper(BoundFunctionWrapper): + + @property + def __func__(self): + return _AdapterFunctionSurrogate(self.__wrapped__.__func__, + self._self_parent._self_adapter) + + if PY2: + im_func = __func__ + +class AdapterWrapper(FunctionWrapper): + + __bound_function_wrapper__ = _BoundAdapterWrapper + + def __init__(self, *args, **kwargs): + adapter = kwargs.pop('adapter') + super(AdapterWrapper, self).__init__(*args, **kwargs) + self._self_surrogate = _AdapterFunctionSurrogate( + self.__wrapped__, adapter) + self._self_adapter = adapter + + @property + def __code__(self): + return self._self_surrogate.__code__ + + @property + def __defaults__(self): + return self._self_surrogate.__defaults__ + + @property + def __kwdefaults__(self): + return self._self_surrogate.__kwdefaults__ + + if PY2: + func_code = __code__ + func_defaults = __defaults__ + + @property + def __signature__(self): + return self._self_surrogate.__signature__ + +class AdapterFactory(object): + def __call__(self, wrapped): + raise NotImplementedError() + +class DelegatedAdapterFactory(AdapterFactory): + def __init__(self, factory): + super(DelegatedAdapterFactory, self).__init__() + self.factory = factory + def __call__(self, wrapped): + return self.factory(wrapped) + +adapter_factory = DelegatedAdapterFactory + +# Decorator for creating other decorators. This decorator and the +# wrappers which they use are designed to properly preserve any name +# attributes, function signatures etc, in addition to the wrappers +# themselves acting like a transparent proxy for the original wrapped +# function so the wrapper is effectively indistinguishable from the +# original wrapped function. + +def decorator(wrapper=None, enabled=None, adapter=None): + # The decorator should be supplied with a single positional argument + # which is the wrapper function to be used to implement the + # decorator. This may be preceded by a step whereby the keyword + # arguments are supplied to customise the behaviour of the + # decorator. The 'adapter' argument is used to optionally denote a + # separate function which is notionally used by an adapter + # decorator. In that case parts of the function '__code__' and + # '__defaults__' attributes are used from the adapter function + # rather than those of the wrapped function. This allows for the + # argument specification from inspect.getargspec() and similar + # functions to be overridden with a prototype for a different + # function than what was wrapped. The 'enabled' argument provides a + # way to enable/disable the use of the decorator. If the type of + # 'enabled' is a boolean, then it is evaluated immediately and the + # wrapper not even applied if it is False. If not a boolean, it will + # be evaluated when the wrapper is called for an unbound wrapper, + # and when binding occurs for a bound wrapper. When being evaluated, + # if 'enabled' is callable it will be called to obtain the value to + # be checked. If False, the wrapper will not be called and instead + # the original wrapped function will be called directly instead. + + if wrapper is not None: + # Helper function for creating wrapper of the appropriate + # time when we need it down below. + + def _build(wrapped, wrapper, enabled=None, adapter=None): + if adapter: + if isinstance(adapter, AdapterFactory): + adapter = adapter(wrapped) + + if not callable(adapter): + ns = {} + if not isinstance(adapter, string_types): + adapter = formatargspec(*adapter) + exec_('def adapter{}: pass'.format(adapter), ns, ns) + adapter = ns['adapter'] + + return AdapterWrapper(wrapped=wrapped, wrapper=wrapper, + enabled=enabled, adapter=adapter) + + return FunctionWrapper(wrapped=wrapped, wrapper=wrapper, + enabled=enabled) + + # The wrapper has been provided so return the final decorator. + # The decorator is itself one of our function wrappers so we + # can determine when it is applied to functions, instance methods + # or class methods. This allows us to bind the instance or class + # method so the appropriate self or cls attribute is supplied + # when it is finally called. + + def _wrapper(wrapped, instance, args, kwargs): + # We first check for the case where the decorator was applied + # to a class type. + # + # @decorator + # class mydecoratorclass(object): + # def __init__(self, arg=None): + # self.arg = arg + # def __call__(self, wrapped, instance, args, kwargs): + # return wrapped(*args, **kwargs) + # + # @mydecoratorclass(arg=1) + # def function(): + # pass + # + # In this case an instance of the class is to be used as the + # decorator wrapper function. If args was empty at this point, + # then it means that there were optional keyword arguments + # supplied to be used when creating an instance of the class + # to be used as the wrapper function. + + if instance is None and isclass(wrapped) and not args: + # We still need to be passed the target function to be + # wrapped as yet, so we need to return a further function + # to be able to capture it. + + def _capture(target_wrapped): + # Now have the target function to be wrapped and need + # to create an instance of the class which is to act + # as the decorator wrapper function. Before we do that, + # we need to first check that use of the decorator + # hadn't been disabled by a simple boolean. If it was, + # the target function to be wrapped is returned instead. + + _enabled = enabled + if type(_enabled) is bool: + if not _enabled: + return target_wrapped + _enabled = None + + # Now create an instance of the class which is to act + # as the decorator wrapper function. Any arguments had + # to be supplied as keyword only arguments so that is + # all we pass when creating it. + + target_wrapper = wrapped(**kwargs) + + # Finally build the wrapper itself and return it. + + return _build(target_wrapped, target_wrapper, + _enabled, adapter) + + return _capture + + # We should always have the target function to be wrapped at + # this point as the first (and only) value in args. + + target_wrapped = args[0] + + # Need to now check that use of the decorator hadn't been + # disabled by a simple boolean. If it was, then target + # function to be wrapped is returned instead. + + _enabled = enabled + if type(_enabled) is bool: + if not _enabled: + return target_wrapped + _enabled = None + + # We now need to build the wrapper, but there are a couple of + # different cases we need to consider. + + if instance is None: + if isclass(wrapped): + # In this case the decorator was applied to a class + # type but optional keyword arguments were not supplied + # for initialising an instance of the class to be used + # as the decorator wrapper function. + # + # @decorator + # class mydecoratorclass(object): + # def __init__(self, arg=None): + # self.arg = arg + # def __call__(self, wrapped, instance, + # args, kwargs): + # return wrapped(*args, **kwargs) + # + # @mydecoratorclass + # def function(): + # pass + # + # We still need to create an instance of the class to + # be used as the decorator wrapper function, but no + # arguments are pass. + + target_wrapper = wrapped() + + else: + # In this case the decorator was applied to a normal + # function, or possibly a static method of a class. + # + # @decorator + # def mydecoratorfuntion(wrapped, instance, + # args, kwargs): + # return wrapped(*args, **kwargs) + # + # @mydecoratorfunction + # def function(): + # pass + # + # That normal function becomes the decorator wrapper + # function. + + target_wrapper = wrapper + + else: + if isclass(instance): + # In this case the decorator was applied to a class + # method. + # + # class myclass(object): + # @decorator + # @classmethod + # def decoratorclassmethod(cls, wrapped, + # instance, args, kwargs): + # return wrapped(*args, **kwargs) + # + # instance = myclass() + # + # @instance.decoratorclassmethod + # def function(): + # pass + # + # This one is a bit strange because binding was actually + # performed on the wrapper created by our decorator + # factory. We need to apply that binding to the decorator + # wrapper function which which the decorator factory + # was applied to. + + target_wrapper = wrapper.__get__(None, instance) + + else: + # In this case the decorator was applied to an instance + # method. + # + # class myclass(object): + # @decorator + # def decoratorclassmethod(self, wrapped, + # instance, args, kwargs): + # return wrapped(*args, **kwargs) + # + # instance = myclass() + # + # @instance.decoratorclassmethod + # def function(): + # pass + # + # This one is a bit strange because binding was actually + # performed on the wrapper created by our decorator + # factory. We need to apply that binding to the decorator + # wrapper function which which the decorator factory + # was applied to. + + target_wrapper = wrapper.__get__(instance, type(instance)) + + # Finally build the wrapper itself and return it. + + return _build(target_wrapped, target_wrapper, _enabled, adapter) + + # We first return our magic function wrapper here so we can + # determine in what context the decorator factory was used. In + # other words, it is itself a universal decorator. + + return _build(wrapper, _wrapper) + + else: + # The wrapper still has not been provided, so we are just + # collecting the optional keyword arguments. Return the + # decorator again wrapped in a partial using the collected + # arguments. + + return partial(decorator, enabled=enabled, adapter=adapter) + +# Decorator for implementing thread synchronization. It can be used as a +# decorator, in which case the synchronization context is determined by +# what type of function is wrapped, or it can also be used as a context +# manager, where the user needs to supply the correct synchronization +# context. It is also possible to supply an object which appears to be a +# synchronization primitive of some sort, by virtue of having release() +# and acquire() methods. In that case that will be used directly as the +# synchronization primitive without creating a separate lock against the +# derived or supplied context. + +def synchronized(wrapped): + # Determine if being passed an object which is a synchronization + # primitive. We can't check by type for Lock, RLock, Semaphore etc, + # as the means of creating them isn't the type. Therefore use the + # existence of acquire() and release() methods. This is more + # extensible anyway as it allows custom synchronization mechanisms. + + if hasattr(wrapped, 'acquire') and hasattr(wrapped, 'release'): + # We remember what the original lock is and then return a new + # decorator which accesses and locks it. When returning the new + # decorator we wrap it with an object proxy so we can override + # the context manager methods in case it is being used to wrap + # synchronized statements with a 'with' statement. + + lock = wrapped + + @decorator + def _synchronized(wrapped, instance, args, kwargs): + # Execute the wrapped function while the original supplied + # lock is held. + + with lock: + return wrapped(*args, **kwargs) + + class _PartialDecorator(CallableObjectProxy): + + def __enter__(self): + lock.acquire() + return lock + + def __exit__(self, *args): + lock.release() + + return _PartialDecorator(wrapped=_synchronized) + + # Following only apply when the lock is being created automatically + # based on the context of what was supplied. In this case we supply + # a final decorator, but need to use FunctionWrapper directly as we + # want to derive from it to add context manager methods in case it is + # being used to wrap synchronized statements with a 'with' statement. + + def _synchronized_lock(context): + # Attempt to retrieve the lock for the specific context. + + lock = vars(context).get('_synchronized_lock', None) + + if lock is None: + # There is no existing lock defined for the context we + # are dealing with so we need to create one. This needs + # to be done in a way to guarantee there is only one + # created, even if multiple threads try and create it at + # the same time. We can't always use the setdefault() + # method on the __dict__ for the context. This is the + # case where the context is a class, as __dict__ is + # actually a dictproxy. What we therefore do is use a + # meta lock on this wrapper itself, to control the + # creation and assignment of the lock attribute against + # the context. + + with synchronized._synchronized_meta_lock: + # We need to check again for whether the lock we want + # exists in case two threads were trying to create it + # at the same time and were competing to create the + # meta lock. + + lock = vars(context).get('_synchronized_lock', None) + + if lock is None: + lock = RLock() + setattr(context, '_synchronized_lock', lock) + + return lock + + def _synchronized_wrapper(wrapped, instance, args, kwargs): + # Execute the wrapped function while the lock for the + # desired context is held. If instance is None then the + # wrapped function is used as the context. + + with _synchronized_lock(instance or wrapped): + return wrapped(*args, **kwargs) + + class _FinalDecorator(FunctionWrapper): + + def __enter__(self): + self._self_lock = _synchronized_lock(self.__wrapped__) + self._self_lock.acquire() + return self._self_lock + + def __exit__(self, *args): + self._self_lock.release() + + return _FinalDecorator(wrapped=wrapped, wrapper=_synchronized_wrapper) + +synchronized._synchronized_meta_lock = Lock() diff --git a/ddtrace/vendor/wrapt/importer.py b/ddtrace/vendor/wrapt/importer.py new file mode 100644 index 0000000000..9e617cdddc --- /dev/null +++ b/ddtrace/vendor/wrapt/importer.py @@ -0,0 +1,230 @@ +"""This module implements a post import hook mechanism styled after what is +described in PEP-369. Note that it doesn't cope with modules being reloaded. + +""" + +import sys +import threading + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + import importlib + string_types = str, +else: + string_types = basestring, + +from .decorators import synchronized + +# The dictionary registering any post import hooks to be triggered once +# the target module has been imported. Once a module has been imported +# and the hooks fired, the list of hooks recorded against the target +# module will be truncacted but the list left in the dictionary. This +# acts as a flag to indicate that the module had already been imported. + +_post_import_hooks = {} +_post_import_hooks_init = False +_post_import_hooks_lock = threading.RLock() + +# Register a new post import hook for the target module name. This +# differs from the PEP-369 implementation in that it also allows the +# hook function to be specified as a string consisting of the name of +# the callback in the form 'module:function'. This will result in a +# proxy callback being registered which will defer loading of the +# specified module containing the callback function until required. + +def _create_import_hook_from_string(name): + def import_hook(module): + module_name, function = name.split(':') + attrs = function.split('.') + __import__(module_name) + callback = sys.modules[module_name] + for attr in attrs: + callback = getattr(callback, attr) + return callback(module) + return import_hook + +@synchronized(_post_import_hooks_lock) +def register_post_import_hook(hook, name): + # Create a deferred import hook if hook is a string name rather than + # a callable function. + + if isinstance(hook, string_types): + hook = _create_import_hook_from_string(hook) + + # Automatically install the import hook finder if it has not already + # been installed. + + global _post_import_hooks_init + + if not _post_import_hooks_init: + _post_import_hooks_init = True + sys.meta_path.insert(0, ImportHookFinder()) + + # Determine if any prior registration of a post import hook for + # the target modules has occurred and act appropriately. + + hooks = _post_import_hooks.get(name, None) + + if hooks is None: + # No prior registration of post import hooks for the target + # module. We need to check whether the module has already been + # imported. If it has we fire the hook immediately and add an + # empty list to the registry to indicate that the module has + # already been imported and hooks have fired. Otherwise add + # the post import hook to the registry. + + module = sys.modules.get(name, None) + + if module is not None: + _post_import_hooks[name] = [] + hook(module) + + else: + _post_import_hooks[name] = [hook] + + elif hooks == []: + # A prior registration of port import hooks for the target + # module was done and the hooks already fired. Fire the hook + # immediately. + + module = sys.modules[name] + hook(module) + + else: + # A prior registration of port import hooks for the target + # module was done but the module has not yet been imported. + + _post_import_hooks[name].append(hook) + +# Register post import hooks defined as package entry points. + +def _create_import_hook_from_entrypoint(entrypoint): + def import_hook(module): + __import__(entrypoint.module_name) + callback = sys.modules[entrypoint.module_name] + for attr in entrypoint.attrs: + callback = getattr(callback, attr) + return callback(module) + return import_hook + +def discover_post_import_hooks(group): + try: + import pkg_resources + except ImportError: + return + + for entrypoint in pkg_resources.iter_entry_points(group=group): + callback = _create_import_hook_from_entrypoint(entrypoint) + register_post_import_hook(callback, entrypoint.name) + +# Indicate that a module has been loaded. Any post import hooks which +# were registered against the target module will be invoked. If an +# exception is raised in any of the post import hooks, that will cause +# the import of the target module to fail. + +@synchronized(_post_import_hooks_lock) +def notify_module_loaded(module): + name = getattr(module, '__name__', None) + hooks = _post_import_hooks.get(name, None) + + if hooks: + _post_import_hooks[name] = [] + + for hook in hooks: + hook(module) + +# A custom module import finder. This intercepts attempts to import +# modules and watches out for attempts to import target modules of +# interest. When a module of interest is imported, then any post import +# hooks which are registered will be invoked. + +class _ImportHookLoader: + + def load_module(self, fullname): + module = sys.modules[fullname] + notify_module_loaded(module) + + return module + +class _ImportHookChainedLoader: + + def __init__(self, loader): + self.loader = loader + + def load_module(self, fullname): + module = self.loader.load_module(fullname) + notify_module_loaded(module) + + return module + +class ImportHookFinder: + + def __init__(self): + self.in_progress = {} + + @synchronized(_post_import_hooks_lock) + def find_module(self, fullname, path=None): + # If the module being imported is not one we have registered + # post import hooks for, we can return immediately. We will + # take no further part in the importing of this module. + + if not fullname in _post_import_hooks: + return None + + # When we are interested in a specific module, we will call back + # into the import system a second time to defer to the import + # finder that is supposed to handle the importing of the module. + # We set an in progress flag for the target module so that on + # the second time through we don't trigger another call back + # into the import system and cause a infinite loop. + + if fullname in self.in_progress: + return None + + self.in_progress[fullname] = True + + # Now call back into the import system again. + + try: + if PY3: + # For Python 3 we need to use find_spec().loader + # from the importlib.util module. It doesn't actually + # import the target module and only finds the + # loader. If a loader is found, we need to return + # our own loader which will then in turn call the + # real loader to import the module and invoke the + # post import hooks. + try: + import importlib.util + loader = importlib.util.find_spec(fullname).loader + except (ImportError, AttributeError): + loader = importlib.find_loader(fullname, path) + if loader: + return _ImportHookChainedLoader(loader) + + else: + # For Python 2 we don't have much choice but to + # call back in to __import__(). This will + # actually cause the module to be imported. If no + # module could be found then ImportError will be + # raised. Otherwise we return a loader which + # returns the already loaded module and invokes + # the post import hooks. + + __import__(fullname) + + return _ImportHookLoader() + + finally: + del self.in_progress[fullname] + +# Decorator for marking that a function should be called as a post +# import hook when the target module is imported. + +def when_imported(name): + def register(hook): + register_post_import_hook(hook, name) + return hook + return register diff --git a/ddtrace/vendor/wrapt/setup.py b/ddtrace/vendor/wrapt/setup.py new file mode 100644 index 0000000000..dae324bd23 --- /dev/null +++ b/ddtrace/vendor/wrapt/setup.py @@ -0,0 +1,7 @@ +__all__ = ["get_extensions"] + +from setuptools import Extension + + +def get_extensions(): + return [Extension("ddtrace.vendor.wrapt._wrappers", sources=["ddtrace/vendor/wrapt/_wrappers.c"],)] diff --git a/ddtrace/vendor/wrapt/wrappers.py b/ddtrace/vendor/wrapt/wrappers.py new file mode 100644 index 0000000000..1d6131d853 --- /dev/null +++ b/ddtrace/vendor/wrapt/wrappers.py @@ -0,0 +1,943 @@ +import os +import sys +import functools +import operator +import weakref +import inspect + +PY2 = sys.version_info[0] == 2 +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, +else: + string_types = basestring, + +def with_metaclass(meta, *bases): + """Create a base class with a metaclass.""" + return meta("NewBase", bases, {}) + +class _ObjectProxyMethods(object): + + # We use properties to override the values of __module__ and + # __doc__. If we add these in ObjectProxy, the derived class + # __dict__ will still be setup to have string variants of these + # attributes and the rules of descriptors means that they appear to + # take precedence over the properties in the base class. To avoid + # that, we copy the properties into the derived class type itself + # via a meta class. In that way the properties will always take + # precedence. + + @property + def __module__(self): + return self.__wrapped__.__module__ + + @__module__.setter + def __module__(self, value): + self.__wrapped__.__module__ = value + + @property + def __doc__(self): + return self.__wrapped__.__doc__ + + @__doc__.setter + def __doc__(self, value): + self.__wrapped__.__doc__ = value + + # We similar use a property for __dict__. We need __dict__ to be + # explicit to ensure that vars() works as expected. + + @property + def __dict__(self): + return self.__wrapped__.__dict__ + + # Need to also propagate the special __weakref__ attribute for case + # where decorating classes which will define this. If do not define + # it and use a function like inspect.getmembers() on a decorator + # class it will fail. This can't be in the derived classes. + + @property + def __weakref__(self): + return self.__wrapped__.__weakref__ + +class _ObjectProxyMetaType(type): + def __new__(cls, name, bases, dictionary): + # Copy our special properties into the class so that they + # always take precedence over attributes of the same name added + # during construction of a derived class. This is to save + # duplicating the implementation for them in all derived classes. + + dictionary.update(vars(_ObjectProxyMethods)) + + return type.__new__(cls, name, bases, dictionary) + +class ObjectProxy(with_metaclass(_ObjectProxyMetaType)): + + __slots__ = '__wrapped__' + + def __init__(self, wrapped): + object.__setattr__(self, '__wrapped__', wrapped) + + # Python 3.2+ has the __qualname__ attribute, but it does not + # allow it to be overridden using a property and it must instead + # be an actual string object instead. + + try: + object.__setattr__(self, '__qualname__', wrapped.__qualname__) + except AttributeError: + pass + + @property + def __name__(self): + return self.__wrapped__.__name__ + + @__name__.setter + def __name__(self, value): + self.__wrapped__.__name__ = value + + @property + def __class__(self): + return self.__wrapped__.__class__ + + @__class__.setter + def __class__(self, value): + self.__wrapped__.__class__ = value + + @property + def __annotations__(self): + return self.__wrapped__.__annotations__ + + @__annotations__.setter + def __annotations__(self, value): + self.__wrapped__.__annotations__ = value + + def __dir__(self): + return dir(self.__wrapped__) + + def __str__(self): + return str(self.__wrapped__) + + if PY3: + def __bytes__(self): + return bytes(self.__wrapped__) + + def __repr__(self): + return '<{} at 0x{:x} for {} at 0x{:x}>'.format( + type(self).__name__, id(self), + type(self.__wrapped__).__name__, + id(self.__wrapped__)) + + def __reversed__(self): + return reversed(self.__wrapped__) + + if PY3: + def __round__(self): + return round(self.__wrapped__) + + def __lt__(self, other): + return self.__wrapped__ < other + + def __le__(self, other): + return self.__wrapped__ <= other + + def __eq__(self, other): + return self.__wrapped__ == other + + def __ne__(self, other): + return self.__wrapped__ != other + + def __gt__(self, other): + return self.__wrapped__ > other + + def __ge__(self, other): + return self.__wrapped__ >= other + + def __hash__(self): + return hash(self.__wrapped__) + + def __nonzero__(self): + return bool(self.__wrapped__) + + def __bool__(self): + return bool(self.__wrapped__) + + def __setattr__(self, name, value): + if name.startswith('_self_'): + object.__setattr__(self, name, value) + + elif name == '__wrapped__': + object.__setattr__(self, name, value) + try: + object.__delattr__(self, '__qualname__') + except AttributeError: + pass + try: + object.__setattr__(self, '__qualname__', value.__qualname__) + except AttributeError: + pass + + elif name == '__qualname__': + setattr(self.__wrapped__, name, value) + object.__setattr__(self, name, value) + + elif hasattr(type(self), name): + object.__setattr__(self, name, value) + + else: + setattr(self.__wrapped__, name, value) + + def __getattr__(self, name): + # If we are being to lookup '__wrapped__' then the + # '__init__()' method cannot have been called. + + if name == '__wrapped__': + raise ValueError('wrapper has not been initialised') + + return getattr(self.__wrapped__, name) + + def __delattr__(self, name): + if name.startswith('_self_'): + object.__delattr__(self, name) + + elif name == '__wrapped__': + raise TypeError('__wrapped__ must be an object') + + elif name == '__qualname__': + object.__delattr__(self, name) + delattr(self.__wrapped__, name) + + elif hasattr(type(self), name): + object.__delattr__(self, name) + + else: + delattr(self.__wrapped__, name) + + def __add__(self, other): + return self.__wrapped__ + other + + def __sub__(self, other): + return self.__wrapped__ - other + + def __mul__(self, other): + return self.__wrapped__ * other + + def __div__(self, other): + return operator.div(self.__wrapped__, other) + + def __truediv__(self, other): + return operator.truediv(self.__wrapped__, other) + + def __floordiv__(self, other): + return self.__wrapped__ // other + + def __mod__(self, other): + return self.__wrapped__ % other + + def __divmod__(self, other): + return divmod(self.__wrapped__, other) + + def __pow__(self, other, *args): + return pow(self.__wrapped__, other, *args) + + def __lshift__(self, other): + return self.__wrapped__ << other + + def __rshift__(self, other): + return self.__wrapped__ >> other + + def __and__(self, other): + return self.__wrapped__ & other + + def __xor__(self, other): + return self.__wrapped__ ^ other + + def __or__(self, other): + return self.__wrapped__ | other + + def __radd__(self, other): + return other + self.__wrapped__ + + def __rsub__(self, other): + return other - self.__wrapped__ + + def __rmul__(self, other): + return other * self.__wrapped__ + + def __rdiv__(self, other): + return operator.div(other, self.__wrapped__) + + def __rtruediv__(self, other): + return operator.truediv(other, self.__wrapped__) + + def __rfloordiv__(self, other): + return other // self.__wrapped__ + + def __rmod__(self, other): + return other % self.__wrapped__ + + def __rdivmod__(self, other): + return divmod(other, self.__wrapped__) + + def __rpow__(self, other, *args): + return pow(other, self.__wrapped__, *args) + + def __rlshift__(self, other): + return other << self.__wrapped__ + + def __rrshift__(self, other): + return other >> self.__wrapped__ + + def __rand__(self, other): + return other & self.__wrapped__ + + def __rxor__(self, other): + return other ^ self.__wrapped__ + + def __ror__(self, other): + return other | self.__wrapped__ + + def __iadd__(self, other): + self.__wrapped__ += other + return self + + def __isub__(self, other): + self.__wrapped__ -= other + return self + + def __imul__(self, other): + self.__wrapped__ *= other + return self + + def __idiv__(self, other): + self.__wrapped__ = operator.idiv(self.__wrapped__, other) + return self + + def __itruediv__(self, other): + self.__wrapped__ = operator.itruediv(self.__wrapped__, other) + return self + + def __ifloordiv__(self, other): + self.__wrapped__ //= other + return self + + def __imod__(self, other): + self.__wrapped__ %= other + return self + + def __ipow__(self, other): + self.__wrapped__ **= other + return self + + def __ilshift__(self, other): + self.__wrapped__ <<= other + return self + + def __irshift__(self, other): + self.__wrapped__ >>= other + return self + + def __iand__(self, other): + self.__wrapped__ &= other + return self + + def __ixor__(self, other): + self.__wrapped__ ^= other + return self + + def __ior__(self, other): + self.__wrapped__ |= other + return self + + def __neg__(self): + return -self.__wrapped__ + + def __pos__(self): + return +self.__wrapped__ + + def __abs__(self): + return abs(self.__wrapped__) + + def __invert__(self): + return ~self.__wrapped__ + + def __int__(self): + return int(self.__wrapped__) + + def __long__(self): + return long(self.__wrapped__) + + def __float__(self): + return float(self.__wrapped__) + + def __complex__(self): + return complex(self.__wrapped__) + + def __oct__(self): + return oct(self.__wrapped__) + + def __hex__(self): + return hex(self.__wrapped__) + + def __index__(self): + return operator.index(self.__wrapped__) + + def __len__(self): + return len(self.__wrapped__) + + def __contains__(self, value): + return value in self.__wrapped__ + + def __getitem__(self, key): + return self.__wrapped__[key] + + def __setitem__(self, key, value): + self.__wrapped__[key] = value + + def __delitem__(self, key): + del self.__wrapped__[key] + + def __getslice__(self, i, j): + return self.__wrapped__[i:j] + + def __setslice__(self, i, j, value): + self.__wrapped__[i:j] = value + + def __delslice__(self, i, j): + del self.__wrapped__[i:j] + + def __enter__(self): + return self.__wrapped__.__enter__() + + def __exit__(self, *args, **kwargs): + return self.__wrapped__.__exit__(*args, **kwargs) + + def __iter__(self): + return iter(self.__wrapped__) + + def __copy__(self): + raise NotImplementedError('object proxy must define __copy__()') + + def __deepcopy__(self, memo): + raise NotImplementedError('object proxy must define __deepcopy__()') + + def __reduce__(self): + raise NotImplementedError( + 'object proxy must define __reduce_ex__()') + + def __reduce_ex__(self, protocol): + raise NotImplementedError( + 'object proxy must define __reduce_ex__()') + +class CallableObjectProxy(ObjectProxy): + + def __call__(self, *args, **kwargs): + return self.__wrapped__(*args, **kwargs) + +class PartialCallableObjectProxy(ObjectProxy): + + def __init__(self, *args, **kwargs): + if len(args) < 1: + raise TypeError('partial type takes at least one argument') + + wrapped, args = args[0], args[1:] + + if not callable(wrapped): + raise TypeError('the first argument must be callable') + + super(PartialCallableObjectProxy, self).__init__(wrapped) + + self._self_args = args + self._self_kwargs = kwargs + + def __call__(self, *args, **kwargs): + _args = self._self_args + args + + _kwargs = dict(self._self_kwargs) + _kwargs.update(kwargs) + + return self.__wrapped__(*_args, **_kwargs) + +class _FunctionWrapperBase(ObjectProxy): + + __slots__ = ('_self_instance', '_self_wrapper', '_self_enabled', + '_self_binding', '_self_parent') + + def __init__(self, wrapped, instance, wrapper, enabled=None, + binding='function', parent=None): + + super(_FunctionWrapperBase, self).__init__(wrapped) + + object.__setattr__(self, '_self_instance', instance) + object.__setattr__(self, '_self_wrapper', wrapper) + object.__setattr__(self, '_self_enabled', enabled) + object.__setattr__(self, '_self_binding', binding) + object.__setattr__(self, '_self_parent', parent) + + def __get__(self, instance, owner): + # This method is actually doing double duty for both unbound and + # bound derived wrapper classes. It should possibly be broken up + # and the distinct functionality moved into the derived classes. + # Can't do that straight away due to some legacy code which is + # relying on it being here in this base class. + # + # The distinguishing attribute which determines whether we are + # being called in an unbound or bound wrapper is the parent + # attribute. If binding has never occurred, then the parent will + # be None. + # + # First therefore, is if we are called in an unbound wrapper. In + # this case we perform the binding. + # + # We have one special case to worry about here. This is where we + # are decorating a nested class. In this case the wrapped class + # would not have a __get__() method to call. In that case we + # simply return self. + # + # Note that we otherwise still do binding even if instance is + # None and accessing an unbound instance method from a class. + # This is because we need to be able to later detect that + # specific case as we will need to extract the instance from the + # first argument of those passed in. + + if self._self_parent is None: + if not inspect.isclass(self.__wrapped__): + descriptor = self.__wrapped__.__get__(instance, owner) + + return self.__bound_function_wrapper__(descriptor, instance, + self._self_wrapper, self._self_enabled, + self._self_binding, self) + + return self + + # Now we have the case of binding occurring a second time on what + # was already a bound function. In this case we would usually + # return ourselves again. This mirrors what Python does. + # + # The special case this time is where we were originally bound + # with an instance of None and we were likely an instance + # method. In that case we rebind against the original wrapped + # function from the parent again. + + if self._self_instance is None and self._self_binding == 'function': + descriptor = self._self_parent.__wrapped__.__get__( + instance, owner) + + return self._self_parent.__bound_function_wrapper__( + descriptor, instance, self._self_wrapper, + self._self_enabled, self._self_binding, + self._self_parent) + + return self + + def __call__(self, *args, **kwargs): + # If enabled has been specified, then evaluate it at this point + # and if the wrapper is not to be executed, then simply return + # the bound function rather than a bound wrapper for the bound + # function. When evaluating enabled, if it is callable we call + # it, otherwise we evaluate it as a boolean. + + if self._self_enabled is not None: + if callable(self._self_enabled): + if not self._self_enabled(): + return self.__wrapped__(*args, **kwargs) + elif not self._self_enabled: + return self.__wrapped__(*args, **kwargs) + + # This can occur where initial function wrapper was applied to + # a function that was already bound to an instance. In that case + # we want to extract the instance from the function and use it. + + if self._self_binding == 'function': + if self._self_instance is None: + instance = getattr(self.__wrapped__, '__self__', None) + if instance is not None: + return self._self_wrapper(self.__wrapped__, instance, + args, kwargs) + + # This is generally invoked when the wrapped function is being + # called as a normal function and is not bound to a class as an + # instance method. This is also invoked in the case where the + # wrapped function was a method, but this wrapper was in turn + # wrapped using the staticmethod decorator. + + return self._self_wrapper(self.__wrapped__, self._self_instance, + args, kwargs) + +class BoundFunctionWrapper(_FunctionWrapperBase): + + def __call__(self, *args, **kwargs): + # If enabled has been specified, then evaluate it at this point + # and if the wrapper is not to be executed, then simply return + # the bound function rather than a bound wrapper for the bound + # function. When evaluating enabled, if it is callable we call + # it, otherwise we evaluate it as a boolean. + + if self._self_enabled is not None: + if callable(self._self_enabled): + if not self._self_enabled(): + return self.__wrapped__(*args, **kwargs) + elif not self._self_enabled: + return self.__wrapped__(*args, **kwargs) + + # We need to do things different depending on whether we are + # likely wrapping an instance method vs a static method or class + # method. + + if self._self_binding == 'function': + if self._self_instance is None: + # This situation can occur where someone is calling the + # instancemethod via the class type and passing the instance + # as the first argument. We need to shift the args before + # making the call to the wrapper and effectively bind the + # instance to the wrapped function using a partial so the + # wrapper doesn't see anything as being different. + + if not args: + raise TypeError('missing 1 required positional argument') + + instance, args = args[0], args[1:] + wrapped = PartialCallableObjectProxy(self.__wrapped__, instance) + return self._self_wrapper(wrapped, instance, args, kwargs) + + return self._self_wrapper(self.__wrapped__, self._self_instance, + args, kwargs) + + else: + # As in this case we would be dealing with a classmethod or + # staticmethod, then _self_instance will only tell us whether + # when calling the classmethod or staticmethod they did it via an + # instance of the class it is bound to and not the case where + # done by the class type itself. We thus ignore _self_instance + # and use the __self__ attribute of the bound function instead. + # For a classmethod, this means instance will be the class type + # and for a staticmethod it will be None. This is probably the + # more useful thing we can pass through even though we loose + # knowledge of whether they were called on the instance vs the + # class type, as it reflects what they have available in the + # decoratored function. + + instance = getattr(self.__wrapped__, '__self__', None) + + return self._self_wrapper(self.__wrapped__, instance, args, + kwargs) + +class FunctionWrapper(_FunctionWrapperBase): + + __bound_function_wrapper__ = BoundFunctionWrapper + + def __init__(self, wrapped, wrapper, enabled=None): + # What it is we are wrapping here could be anything. We need to + # try and detect specific cases though. In particular, we need + # to detect when we are given something that is a method of a + # class. Further, we need to know when it is likely an instance + # method, as opposed to a class or static method. This can + # become problematic though as there isn't strictly a fool proof + # method of knowing. + # + # The situations we could encounter when wrapping a method are: + # + # 1. The wrapper is being applied as part of a decorator which + # is a part of the class definition. In this case what we are + # given is the raw unbound function, classmethod or staticmethod + # wrapper objects. + # + # The problem here is that we will not know we are being applied + # in the context of the class being set up. This becomes + # important later for the case of an instance method, because in + # that case we just see it as a raw function and can't + # distinguish it from wrapping a normal function outside of + # a class context. + # + # 2. The wrapper is being applied when performing monkey + # patching of the class type afterwards and the method to be + # wrapped was retrieved direct from the __dict__ of the class + # type. This is effectively the same as (1) above. + # + # 3. The wrapper is being applied when performing monkey + # patching of the class type afterwards and the method to be + # wrapped was retrieved from the class type. In this case + # binding will have been performed where the instance against + # which the method is bound will be None at that point. + # + # This case is a problem because we can no longer tell if the + # method was a static method, plus if using Python3, we cannot + # tell if it was an instance method as the concept of an + # unnbound method no longer exists. + # + # 4. The wrapper is being applied when performing monkey + # patching of an instance of a class. In this case binding will + # have been perfomed where the instance was not None. + # + # This case is a problem because we can no longer tell if the + # method was a static method. + # + # Overall, the best we can do is look at the original type of the + # object which was wrapped prior to any binding being done and + # see if it is an instance of classmethod or staticmethod. In + # the case where other decorators are between us and them, if + # they do not propagate the __class__ attribute so that the + # isinstance() checks works, then likely this will do the wrong + # thing where classmethod and staticmethod are used. + # + # Since it is likely to be very rare that anyone even puts + # decorators around classmethod and staticmethod, likelihood of + # that being an issue is very small, so we accept it and suggest + # that those other decorators be fixed. It is also only an issue + # if a decorator wants to actually do things with the arguments. + # + # As to not being able to identify static methods properly, we + # just hope that that isn't something people are going to want + # to wrap, or if they do suggest they do it the correct way by + # ensuring that it is decorated in the class definition itself, + # or patch it in the __dict__ of the class type. + # + # So to get the best outcome we can, whenever we aren't sure what + # it is, we label it as a 'function'. If it was already bound and + # that is rebound later, we assume that it will be an instance + # method and try an cope with the possibility that the 'self' + # argument it being passed as an explicit argument and shuffle + # the arguments around to extract 'self' for use as the instance. + + if isinstance(wrapped, classmethod): + binding = 'classmethod' + + elif isinstance(wrapped, staticmethod): + binding = 'staticmethod' + + elif hasattr(wrapped, '__self__'): + if inspect.isclass(wrapped.__self__): + binding = 'classmethod' + else: + binding = 'function' + + else: + binding = 'function' + + super(FunctionWrapper, self).__init__(wrapped, None, wrapper, + enabled, binding) + +try: + if not os.environ.get('WRAPT_DISABLE_EXTENSIONS'): + from ._wrappers import (ObjectProxy, CallableObjectProxy, + PartialCallableObjectProxy, FunctionWrapper, + BoundFunctionWrapper, _FunctionWrapperBase) +except ImportError: + pass + +# Helper functions for applying wrappers to existing functions. + +def resolve_path(module, name): + if isinstance(module, string_types): + __import__(module) + module = sys.modules[module] + + parent = module + + path = name.split('.') + attribute = path[0] + + original = getattr(parent, attribute) + for attribute in path[1:]: + parent = original + + # We can't just always use getattr() because in doing + # that on a class it will cause binding to occur which + # will complicate things later and cause some things not + # to work. For the case of a class we therefore access + # the __dict__ directly. To cope though with the wrong + # class being given to us, or a method being moved into + # a base class, we need to walk the class hierarchy to + # work out exactly which __dict__ the method was defined + # in, as accessing it from __dict__ will fail if it was + # not actually on the class given. Fallback to using + # getattr() if we can't find it. If it truly doesn't + # exist, then that will fail. + + if inspect.isclass(original): + for cls in inspect.getmro(original): + if attribute in vars(cls): + original = vars(cls)[attribute] + break + else: + original = getattr(original, attribute) + + else: + original = getattr(original, attribute) + + return (parent, attribute, original) + +def apply_patch(parent, attribute, replacement): + setattr(parent, attribute, replacement) + +def wrap_object(module, name, factory, args=(), kwargs={}): + (parent, attribute, original) = resolve_path(module, name) + wrapper = factory(original, *args, **kwargs) + apply_patch(parent, attribute, wrapper) + return wrapper + +# Function for applying a proxy object to an attribute of a class +# instance. The wrapper works by defining an attribute of the same name +# on the class which is a descriptor and which intercepts access to the +# instance attribute. Note that this cannot be used on attributes which +# are themselves defined by a property object. + +class AttributeWrapper(object): + + def __init__(self, attribute, factory, args, kwargs): + self.attribute = attribute + self.factory = factory + self.args = args + self.kwargs = kwargs + + def __get__(self, instance, owner): + value = instance.__dict__[self.attribute] + return self.factory(value, *self.args, **self.kwargs) + + def __set__(self, instance, value): + instance.__dict__[self.attribute] = value + + def __delete__(self, instance): + del instance.__dict__[self.attribute] + +def wrap_object_attribute(module, name, factory, args=(), kwargs={}): + path, attribute = name.rsplit('.', 1) + parent = resolve_path(module, path)[2] + wrapper = AttributeWrapper(attribute, factory, args, kwargs) + apply_patch(parent, attribute, wrapper) + return wrapper + +# Functions for creating a simple decorator using a FunctionWrapper, +# plus short cut functions for applying wrappers to functions. These are +# for use when doing monkey patching. For a more featured way of +# creating decorators see the decorator decorator instead. + +def function_wrapper(wrapper): + def _wrapper(wrapped, instance, args, kwargs): + target_wrapped = args[0] + if instance is None: + target_wrapper = wrapper + elif inspect.isclass(instance): + target_wrapper = wrapper.__get__(None, instance) + else: + target_wrapper = wrapper.__get__(instance, type(instance)) + return FunctionWrapper(target_wrapped, target_wrapper) + return FunctionWrapper(wrapper, _wrapper) + +def wrap_function_wrapper(module, name, wrapper): + return wrap_object(module, name, FunctionWrapper, (wrapper,)) + +def patch_function_wrapper(module, name): + def _wrapper(wrapper): + return wrap_object(module, name, FunctionWrapper, (wrapper,)) + return _wrapper + +def transient_function_wrapper(module, name): + def _decorator(wrapper): + def _wrapper(wrapped, instance, args, kwargs): + target_wrapped = args[0] + if instance is None: + target_wrapper = wrapper + elif inspect.isclass(instance): + target_wrapper = wrapper.__get__(None, instance) + else: + target_wrapper = wrapper.__get__(instance, type(instance)) + def _execute(wrapped, instance, args, kwargs): + (parent, attribute, original) = resolve_path(module, name) + replacement = FunctionWrapper(original, target_wrapper) + setattr(parent, attribute, replacement) + try: + return wrapped(*args, **kwargs) + finally: + setattr(parent, attribute, original) + return FunctionWrapper(target_wrapped, _execute) + return FunctionWrapper(wrapper, _wrapper) + return _decorator + +# A weak function proxy. This will work on instance methods, class +# methods, static methods and regular functions. Special treatment is +# needed for the method types because the bound method is effectively a +# transient object and applying a weak reference to one will immediately +# result in it being destroyed and the weakref callback called. The weak +# reference is therefore applied to the instance the method is bound to +# and the original function. The function is then rebound at the point +# of a call via the weak function proxy. + +def _weak_function_proxy_callback(ref, proxy, callback): + if proxy._self_expired: + return + + proxy._self_expired = True + + # This could raise an exception. We let it propagate back and let + # the weakref.proxy() deal with it, at which point it generally + # prints out a short error message direct to stderr and keeps going. + + if callback is not None: + callback(proxy) + +class WeakFunctionProxy(ObjectProxy): + + __slots__ = ('_self_expired', '_self_instance') + + def __init__(self, wrapped, callback=None): + # We need to determine if the wrapped function is actually a + # bound method. In the case of a bound method, we need to keep a + # reference to the original unbound function and the instance. + # This is necessary because if we hold a reference to the bound + # function, it will be the only reference and given it is a + # temporary object, it will almost immediately expire and + # the weakref callback triggered. So what is done is that we + # hold a reference to the instance and unbound function and + # when called bind the function to the instance once again and + # then call it. Note that we avoid using a nested function for + # the callback here so as not to cause any odd reference cycles. + + _callback = callback and functools.partial( + _weak_function_proxy_callback, proxy=self, + callback=callback) + + self._self_expired = False + + if isinstance(wrapped, _FunctionWrapperBase): + self._self_instance = weakref.ref(wrapped._self_instance, + _callback) + + if wrapped._self_parent is not None: + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped._self_parent, _callback)) + + else: + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped, _callback)) + + return + + try: + self._self_instance = weakref.ref(wrapped.__self__, _callback) + + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped.__func__, _callback)) + + except AttributeError: + self._self_instance = None + + super(WeakFunctionProxy, self).__init__( + weakref.proxy(wrapped, _callback)) + + def __call__(self, *args, **kwargs): + # We perform a boolean check here on the instance and wrapped + # function as that will trigger the reference error prior to + # calling if the reference had expired. + + instance = self._self_instance and self._self_instance() + function = self.__wrapped__ and self.__wrapped__ + + # If the wrapped function was originally a bound function, for + # which we retained a reference to the instance and the unbound + # function we need to rebind the function and then call it. If + # not just called the wrapped function. + + if instance is None: + return self.__wrapped__(*args, **kwargs) + + return function.__get__(instance, type(instance))(*args, **kwargs) diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000000..2fb599cca1 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,125 @@ +version: "3" +# remember to use this compose file __ONLY__ for development/testing purposes + +services: + elasticsearch: + image: elasticsearch:2.4-alpine + ports: + - "127.0.0.1:9200:9200" + cassandra: + image: spotify/cassandra:latest + environment: + - MAX_HEAP_SIZE=512M + - HEAP_NEWSIZE=256M + ports: + - "127.0.0.1:9042:9042" + consul: + image: consul:1.6.0 + ports: + - "127.0.0.1:8500:8500" + postgres: + image: postgres:10.5-alpine + environment: + - POSTGRES_PASSWORD=postgres + - POSTGRES_USER=postgres + - POSTGRES_DB=postgres + ports: + - "127.0.0.1:5432:5432" + mysql: + image: mysql:5.7 + environment: + - MYSQL_ROOT_PASSWORD=admin + - MYSQL_PASSWORD=test + - MYSQL_USER=test + - MYSQL_DATABASE=test + ports: + - "127.0.0.1:3306:3306" + redis: + image: redis:4.0-alpine + ports: + - "127.0.0.1:6379:6379" + rediscluster: + image: grokzen/redis-cluster:4.0.9 + environment: + - IP=0.0.0.0 + ports: + - "127.0.0.1:7000:7000" + - "127.0.0.1:7001:7001" + - "127.0.0.1:7002:7002" + - "127.0.0.1:7003:7003" + - "127.0.0.1:7004:7004" + - "127.0.0.1:7005:7005" + mongo: + image: mongo:3.6 + ports: + - "127.0.0.1:27017:27017" + memcached: + image: memcached:1.5-alpine + ports: + - "127.0.0.1:11211:11211" + moto: + # container that executes mocked AWS services; this is a custom + # build that runs all of them in a single container. It is built + # using this fork: https://github.com/palazzem/moto/tree/palazzem/docker-service + image: datadog/docker-library:moto_1_0_1 + ports: + - "127.0.0.1:5000:5000" + - "127.0.0.1:5001:5001" + - "127.0.0.1:5002:5002" + - "127.0.0.1:5003:5003" + - "127.0.0.1:5004:5004" + - "127.0.0.1:5005:5005" + rabbitmq: + image: rabbitmq:3.7-alpine + ports: + - "127.0.0.1:5672:5672" + ddagent: + image: datadog/agent-dev:gbbr-apm-build + environment: + - DD_BIND_HOST=0.0.0.0 + - DD_API_KEY=invalid_key_but_this_is_fine + - DD_APM_RECEIVER_SOCKET=/tmp/ddagent/trace.sock + ports: + - "127.0.0.1:8126:8126" + volumes: + - ddagent:/tmp/ddagent + + vertica: + image: sumitchawla/vertica + environment: + - VP_TEST_USER=dbadmin + - VP_TEST_PASSWORD=abc123 + - VP_TEST_DATABASE=docker + ports: + - "127.0.0.1:5433:5433" + + testrunner: + image: datadog/docker-library:ddtrace_py + environment: + - TOX_SKIP_DIST=True + - TEST_DATADOG_INTEGRATION=1 + - TEST_DATADOG_INTEGRATION_UDS=1 + network_mode: host + working_dir: /src + volumes: + - ddagent:/tmp/ddagent + - ./ddtrace:/src/ddtrace:ro + # DEV: Make ddtrace/vendor rw so Tox can build C-extensions + - ./ddtrace/vendor:/src/ddtrace/vendor:rw + - ./tests:/src/tests:ro + - ./setup.cfg:/src/setup.cfg:ro + - ./setup.py:/src/setup.py:ro + - ./conftest.py:/src/conftest.py:ro + - ./tox.ini:/src/tox.ini:ro + - ./docs:/src/docs:ro + - ./pyproject.toml:/src/pyproject.toml:ro + - ./.ddtox:/src/.tox + - ./scripts:/src/scripts + # setuptools_scm needs `.git` to figure out what version we are on + # DEV: We could use `SETUPTOOLS_SCM_PRETEND_VERSION` but prefer `.git` + # to get the same behavior as during releases + - ./.git:/src/.git:ro + command: bash + +volumes: + ddagent: diff --git a/docs/Makefile b/docs/Makefile new file mode 100644 index 0000000000..7b1ce33c23 --- /dev/null +++ b/docs/Makefile @@ -0,0 +1,225 @@ +# Makefile for Sphinx documentation +# + +# You can set these variables from the command line. +SPHINXOPTS = +SPHINXBUILD = sphinx-build +PAPER = +BUILDDIR = _build + +# Internal variables. +PAPEROPT_a4 = -D latex_paper_size=a4 +PAPEROPT_letter = -D latex_paper_size=letter +ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . +# the i18n builder cannot share the environment and doctrees with the others +I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . + +.PHONY: help +help: + @echo "Please use \`make ' where is one of" + @echo " html to make standalone HTML files" + @echo " dirhtml to make HTML files named index.html in directories" + @echo " singlehtml to make a single large HTML file" + @echo " pickle to make pickle files" + @echo " json to make JSON files" + @echo " htmlhelp to make HTML files and a HTML help project" + @echo " qthelp to make HTML files and a qthelp project" + @echo " applehelp to make an Apple Help Book" + @echo " devhelp to make HTML files and a Devhelp project" + @echo " epub to make an epub" + @echo " epub3 to make an epub3" + @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" + @echo " latexpdf to make LaTeX files and run them through pdflatex" + @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" + @echo " text to make text files" + @echo " man to make manual pages" + @echo " texinfo to make Texinfo files" + @echo " info to make Texinfo files and run them through makeinfo" + @echo " gettext to make PO message catalogs" + @echo " changes to make an overview of all changed/added/deprecated items" + @echo " xml to make Docutils-native XML files" + @echo " pseudoxml to make pseudoxml-XML files for display purposes" + @echo " linkcheck to check all external links for integrity" + @echo " doctest to run all doctests embedded in the documentation (if enabled)" + @echo " coverage to run coverage check of the documentation (if enabled)" + @echo " dummy to check syntax errors of document sources" + +.PHONY: clean +clean: + rm -rf $(BUILDDIR)/* + +.PHONY: html +html: + $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." + +.PHONY: dirhtml +dirhtml: + $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml + @echo + @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." + +.PHONY: singlehtml +singlehtml: + $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml + @echo + @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." + +.PHONY: pickle +pickle: + $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle + @echo + @echo "Build finished; now you can process the pickle files." + +.PHONY: json +json: + $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json + @echo + @echo "Build finished; now you can process the JSON files." + +.PHONY: htmlhelp +htmlhelp: + $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp + @echo + @echo "Build finished; now you can run HTML Help Workshop with the" \ + ".hhp project file in $(BUILDDIR)/htmlhelp." + +.PHONY: qthelp +qthelp: + $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp + @echo + @echo "Build finished; now you can run "qcollectiongenerator" with the" \ + ".qhcp project file in $(BUILDDIR)/qthelp, like this:" + @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/ddtrace.qhcp" + @echo "To view the help file:" + @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/ddtrace.qhc" + +.PHONY: applehelp +applehelp: + $(SPHINXBUILD) -b applehelp $(ALLSPHINXOPTS) $(BUILDDIR)/applehelp + @echo + @echo "Build finished. The help book is in $(BUILDDIR)/applehelp." + @echo "N.B. You won't be able to view it unless you put it in" \ + "~/Library/Documentation/Help or install it in your application" \ + "bundle." + +.PHONY: devhelp +devhelp: + $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp + @echo + @echo "Build finished." + @echo "To view the help file:" + @echo "# mkdir -p $$HOME/.local/share/devhelp/ddtrace" + @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/ddtrace" + @echo "# devhelp" + +.PHONY: epub +epub: + $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub + @echo + @echo "Build finished. The epub file is in $(BUILDDIR)/epub." + +.PHONY: epub3 +epub3: + $(SPHINXBUILD) -b epub3 $(ALLSPHINXOPTS) $(BUILDDIR)/epub3 + @echo + @echo "Build finished. The epub3 file is in $(BUILDDIR)/epub3." + +.PHONY: latex +latex: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo + @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." + @echo "Run \`make' in that directory to run these through (pdf)latex" \ + "(use \`make latexpdf' here to do that automatically)." + +.PHONY: latexpdf +latexpdf: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through pdflatex..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: latexpdfja +latexpdfja: + $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex + @echo "Running LaTeX files through platex and dvipdfmx..." + $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja + @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." + +.PHONY: text +text: + $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text + @echo + @echo "Build finished. The text files are in $(BUILDDIR)/text." + +.PHONY: man +man: + $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man + @echo + @echo "Build finished. The manual pages are in $(BUILDDIR)/man." + +.PHONY: texinfo +texinfo: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo + @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." + @echo "Run \`make' in that directory to run these through makeinfo" \ + "(use \`make info' here to do that automatically)." + +.PHONY: info +info: + $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo + @echo "Running Texinfo files through makeinfo..." + make -C $(BUILDDIR)/texinfo info + @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." + +.PHONY: gettext +gettext: + $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale + @echo + @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." + +.PHONY: changes +changes: + $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes + @echo + @echo "The overview file is in $(BUILDDIR)/changes." + +.PHONY: linkcheck +linkcheck: + $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck + @echo + @echo "Link check complete; look for any errors in the above output " \ + "or in $(BUILDDIR)/linkcheck/output.txt." + +.PHONY: doctest +doctest: + $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest + @echo "Testing of doctests in the sources finished, look at the " \ + "results in $(BUILDDIR)/doctest/output.txt." + +.PHONY: coverage +coverage: + $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(BUILDDIR)/coverage + @echo "Testing of coverage in the sources finished, look at the " \ + "results in $(BUILDDIR)/coverage/python.txt." + +.PHONY: xml +xml: + $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml + @echo + @echo "Build finished. The XML files are in $(BUILDDIR)/xml." + +.PHONY: pseudoxml +pseudoxml: + $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml + @echo + @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." + +.PHONY: dummy +dummy: + $(SPHINXBUILD) -b dummy $(ALLSPHINXOPTS) $(BUILDDIR)/dummy + @echo + @echo "Build finished. Dummy builder generates no files." diff --git a/docs/_templates/nav.html b/docs/_templates/nav.html new file mode 100644 index 0000000000..c7c5d4b64f --- /dev/null +++ b/docs/_templates/nav.html @@ -0,0 +1,9 @@ +{{ toctree(includehidden=theme_sidebar_includehidden, collapse=theme_sidebar_collapse) }} +{% if theme_extra_nav_links %} +
+
    + {% for text, uri in theme_extra_nav_links.items() %} +
  • {{ text }}
  • + {% endfor %} +
+{% endif %} diff --git a/docs/advanced_usage.rst b/docs/advanced_usage.rst new file mode 100644 index 0000000000..10671be58e --- /dev/null +++ b/docs/advanced_usage.rst @@ -0,0 +1,639 @@ +Advanced Usage +============== + +Agent Configuration +------------------- + +If the Datadog Agent is on a separate host from your application, you can modify +the default ``ddtrace.tracer`` object to utilize another hostname and port. Here +is a small example showcasing this:: + + from ddtrace import tracer + + tracer.configure(hostname=, port=, https=) + +By default, these will be set to ``localhost``, ``8126``, and ``False`` respectively. + +You can also use a Unix Domain Socket to connect to the agent:: + + from ddtrace import tracer + + tracer.configure(uds_path="/path/to/socket") + + +Distributed Tracing +------------------- + +To trace requests across hosts, the spans on the secondary hosts must be linked together by setting `trace_id`, `parent_id` and `sampling_priority`. + +- On the server side, it means to read propagated attributes and set them to the active tracing context. +- On the client side, it means to propagate the attributes, commonly as a header/metadata. + +`ddtrace` already provides default propagators but you can also implement your own. + +Web Frameworks +^^^^^^^^^^^^^^ + +Some web framework integrations support distributed tracing out of the box. + +Supported web frameworks: + + ++-------------------+---------+ +| Framework/Library | Enabled | ++===================+=========+ +| :ref:`aiohttp` | True | ++-------------------+---------+ +| :ref:`bottle` | True | ++-------------------+---------+ +| :ref:`django` | True | ++-------------------+---------+ +| :ref:`falcon` | True | ++-------------------+---------+ +| :ref:`flask` | True | ++-------------------+---------+ +| :ref:`pylons` | True | ++-------------------+---------+ +| :ref:`pyramid` | True | ++-------------------+---------+ +| :ref:`requests` | True | ++-------------------+---------+ +| :ref:`tornado` | True | ++-------------------+---------+ + + +HTTP Client +^^^^^^^^^^^ + +For distributed tracing to work, necessary tracing information must be passed +alongside a request as it flows through the system. When the request is handled +on the other side, the metadata is retrieved and the trace can continue. + +To propagate the tracing information, HTTP headers are used to transmit the +required metadata to piece together the trace. + +.. autoclass:: ddtrace.propagation.http.HTTPPropagator + :members: + +Custom +^^^^^^ + +You can manually propagate your tracing context over your RPC protocol. Here is +an example assuming that you have `rpc.call` function that call a `method` and +propagate a `rpc_metadata` dictionary over the wire:: + + + # Implement your own context propagator + class MyRPCPropagator(object): + def inject(self, span_context, rpc_metadata): + rpc_metadata.update({ + 'trace_id': span_context.trace_id, + 'span_id': span_context.span_id, + 'sampling_priority': span_context.sampling_priority, + }) + + def extract(self, rpc_metadata): + return Context( + trace_id=rpc_metadata['trace_id'], + span_id=rpc_metadata['span_id'], + sampling_priority=rpc_metadata['sampling_priority'], + ) + + # On the parent side + def parent_rpc_call(): + with tracer.trace("parent_span") as span: + rpc_metadata = {} + propagator = MyRPCPropagator() + propagator.inject(span.context, rpc_metadata) + method = "" + rpc.call(method, metadata) + + # On the child side + def child_rpc_call(method, rpc_metadata): + propagator = MyRPCPropagator() + context = propagator.extract(rpc_metadata) + tracer.context_provider.activate(context) + + with tracer.trace("child_span") as span: + span.set_meta('my_rpc_method', method) + + +Sampling +-------- + +.. _`Priority Sampling`: + +Priority Sampling +^^^^^^^^^^^^^^^^^ + +To learn about what sampling is check out our documentation `here +`_. + +By default priorities are set on a trace by a sampler. The sampler can set the +priority to the following values: + +- ``AUTO_REJECT``: the sampler automatically rejects the trace +- ``AUTO_KEEP``: the sampler automatically keeps the trace + +Priority sampling is enabled by default. +When enabled, the sampler will automatically assign a priority to your traces, +depending on their service and volume. +This ensures that your sampled distributed traces will be complete. + +You can also set this priority manually to either drop an uninteresting trace or +to keep an important one. +To do this, set the ``context.sampling_priority`` to one of the following: + +- ``USER_REJECT``: the user asked to reject the trace +- ``USER_KEEP``: the user asked to keep the trace + +When not using distributed tracing, you may change the priority at any time, as +long as the trace is not finished yet. +But it has to be done before any context propagation (fork, RPC calls) to be +effective in a distributed context. +Changing the priority after context has been propagated causes different parts +of a distributed trace to use different priorities. Some parts might be kept, +some parts might be rejected, and this can cause the trace to be partially +stored and remain incomplete. + +If you change the priority, we recommend you do it as soon as possible, when the +root span has just been created:: + + from ddtrace.ext.priority import USER_REJECT, USER_KEEP + + context = tracer.context_provider.active() + + # indicate to not keep the trace + context.sampling_priority = USER_REJECT + + +Client Sampling +^^^^^^^^^^^^^^^ + +Client sampling enables the sampling of traces before they are sent to the +Agent. This can provide some performance benefit as the traces will be +dropped in the client. + +The ``RateSampler`` randomly samples a percentage of traces:: + + from ddtrace.sampler import RateSampler + + # Sample rate is between 0 (nothing sampled) to 1 (everything sampled). + # Keep 20% of the traces. + sample_rate = 0.2 + tracer.sampler = RateSampler(sample_rate) + + +Trace Search & Analytics +------------------------ + +Use `Trace Search & Analytics `_ to filter application performance metrics and APM Events by user-defined tags. An APM event is generated every time a trace is generated. + +Enabling APM events for all web frameworks can be accomplished by setting the environment variable ``DD_TRACE_ANALYTICS_ENABLED=true``: + +* :ref:`aiohttp` +* :ref:`bottle` +* :ref:`django` +* :ref:`falcon` +* :ref:`flask` +* :ref:`molten` +* :ref:`pylons` +* :ref:`pyramid` +* :ref:`requests` +* :ref:`tornado` + + +For most libraries, APM events can be enabled with the environment variable ``DD_{INTEGRATION}_ANALYTICS_ENABLED=true``: + ++----------------------+----------------------------------------+ +| Library | Environment Variable | ++======================+========================================+ +| :ref:`aiobotocore` | ``DD_AIOBOTOCORE_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`aiopg` | ``DD_AIOPG_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`boto` | ``DD_BOTO_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`botocore` | ``DD_BOTOCORE_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`bottle` | ``DD_BOTTLE_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`cassandra` | ``DD_CASSANDRA_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`elasticsearch` | ``DD_ELASTICSEARCH_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`falcon` | ``DD_FALCON_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`flask` | ``DD_FLASK_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`flask_cache` | ``DD_FLASK_CACHE_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`grpc` | ``DD_GRPC_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`httplib` | ``DD_HTTPLIB_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`kombu` | ``DD_KOMBU_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`molten` | ``DD_MOLTEN_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`pylibmc` | ``DD_PYLIBMC_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`pylons` | ``DD_PYLONS_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`pymemcache` | ``DD_PYMEMCACHE_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`pymongo` | ``DD_PYMONGO_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`redis` | ``DD_REDIS_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`rediscluster` | ``DD_REDISCLUSTER_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`sqlalchemy` | ``DD_SQLALCHEMY_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ +| :ref:`vertica` | ``DD_VERTICA_ANALYTICS_ENABLED`` | ++----------------------+----------------------------------------+ + +For datastore libraries that extend another, use the setting for the underlying library: + ++------------------------+----------------------------------+ +| Library | Environment Variable | ++========================+==================================+ +| :ref:`mongoengine` | ``DD_PYMONGO_ANALYTICS_ENABLED`` | ++------------------------+----------------------------------+ +| :ref:`mysql-connector` | ``DD_DBAPI2_ANALYTICS_ENABLED`` | ++------------------------+----------------------------------+ +| :ref:`mysqldb` | ``DD_DBAPI2_ANALYTICS_ENABLED`` | ++------------------------+----------------------------------+ +| :ref:`psycopg2` | ``DD_DBAPI2_ANALYTICS_ENABLED`` | ++------------------------+----------------------------------+ +| :ref:`pymysql` | ``DD_DBAPI2_ANALYTICS_ENABLED`` | ++------------------------+----------------------------------+ +| :ref:`sqllite` | ``DD_DBAPI2_ANALYTICS_ENABLED`` | ++------------------------+----------------------------------+ + +Where environment variables are not used for configuring the tracer, the instructions for configuring trace analytics is provided in the library documentation: + +* :ref:`aiohttp` +* :ref:`django` +* :ref:`pyramid` +* :ref:`requests` +* :ref:`tornado` + +Resolving deprecation warnings +------------------------------ +Before upgrading, it’s a good idea to resolve any deprecation warnings raised by your project. +These warnings must be fixed before upgrading, otherwise the ``ddtrace`` library +will not work as expected. Our deprecation messages include the version where +the behavior is altered or removed. + +In Python, deprecation warnings are silenced by default. To enable them you may +add the following flag or environment variable:: + + $ python -Wall app.py + + # or + + $ PYTHONWARNINGS=all python app.py + + +Trace Filtering +--------------- + +It is possible to filter or modify traces before they are sent to the Agent by +configuring the tracer with a filters list. For instance, to filter out +all traces of incoming requests to a specific url:: + + Tracer.configure(settings={ + 'FILTERS': [ + FilterRequestsOnUrl(r'http://test\.example\.com'), + ], + }) + +All the filters in the filters list will be evaluated sequentially +for each trace and the resulting trace will either be sent to the Agent or +discarded depending on the output. + +**Use the standard filters** + +The library comes with a ``FilterRequestsOnUrl`` filter that can be used to +filter out incoming requests to specific urls: + +.. autoclass:: ddtrace.filters.FilterRequestsOnUrl + :members: + +**Write a custom filter** + +Creating your own filters is as simple as implementing a class with a +``process_trace`` method and adding it to the filters parameter of +Tracer.configure. process_trace should either return a trace to be fed to the +next step of the pipeline or ``None`` if the trace should be discarded:: + + class FilterExample(object): + def process_trace(self, trace): + # write here your logic to return the `trace` or None; + # `trace` instance is owned by the thread and you can alter + # each single span or the whole trace if needed + + # And then instantiate it with + filters = [FilterExample()] + Tracer.configure(settings={'FILTERS': filters}) + +(see filters.py for other example implementations) + +.. _`Logs Injection`: + +Logs Injection +-------------- + +.. automodule:: ddtrace.contrib.logging + +HTTP layer +---------- + +Query String Tracing +^^^^^^^^^^^^^^^^^^^^ + +It is possible to store the query string of the URL — the part after the ``?`` +in your URL — in the ``url.query.string`` tag. + +Configuration can be provided both at the global level and at the integration level. + +Examples:: + + from ddtrace import config + + # Global config + config.http.trace_query_string = True + + # Integration level config, e.g. 'falcon' + config.falcon.http.trace_query_string = True + +.. _http-headers-tracing: + +Headers tracing +^^^^^^^^^^^^^^^ + + +For a selected set of integrations, it is possible to store http headers from both requests and responses in tags. + +Configuration can be provided both at the global level and at the integration level. + +Examples:: + + from ddtrace import config + + # Global config + config.trace_headers([ + 'user-agent', + 'transfer-encoding', + ]) + + # Integration level config, e.g. 'falcon' + config.falcon.http.trace_headers([ + 'user-agent', + 'some-other-header', + ]) + +The following rules apply: + - headers configuration is based on a whitelist. If a header does not appear in the whitelist, it won't be traced. + - headers configuration is case-insensitive. + - if you configure a specific integration, e.g. 'requests', then such configuration overrides the default global + configuration, only for the specific integration. + - if you do not configure a specific integration, then the default global configuration applies, if any. + - if no configuration is provided (neither global nor integration-specific), then headers are not traced. + +Once you configure your application for tracing, you will have the headers attached to the trace as tags, with a +structure like in the following example:: + + http { + method GET + request { + headers { + user_agent my-app/0.0.1 + } + } + response { + headers { + transfer_encoding chunked + } + } + status_code 200 + url https://api.github.com/events + } + + +.. _adv_opentracing: + +OpenTracing +----------- + + +The Datadog opentracer can be configured via the ``config`` dictionary +parameter to the tracer which accepts the following described fields. See below +for usage. + ++---------------------+----------------------------------------+---------------+ +| Configuration Key | Description | Default Value | ++=====================+========================================+===============+ +| `enabled` | enable or disable the tracer | `True` | ++---------------------+----------------------------------------+---------------+ +| `debug` | enable debug logging | `False` | ++---------------------+----------------------------------------+---------------+ +| `agent_hostname` | hostname of the Datadog agent to use | `localhost` | ++---------------------+----------------------------------------+---------------+ +| `agent_https` | use https to connect to the agent | `False` | ++---------------------+----------------------------------------+---------------+ +| `agent_port` | port the Datadog agent is listening on | `8126` | ++---------------------+----------------------------------------+---------------+ +| `global_tags` | tags that will be applied to each span | `{}` | ++---------------------+----------------------------------------+---------------+ +| `sampler` | see `Sampling`_ | `AllSampler` | ++---------------------+----------------------------------------+---------------+ +| `priority_sampling` | see `Priority Sampling`_ | `True` | ++---------------------+----------------------------------------+---------------+ +| `settings` | see `Advanced Usage`_ | `{}` | ++---------------------+----------------------------------------+---------------+ + + +Usage +^^^^^ + +**Manual tracing** + +To explicitly trace:: + + import time + import opentracing + from ddtrace.opentracer import Tracer, set_global_tracer + + def init_tracer(service_name): + config = { + 'agent_hostname': 'localhost', + 'agent_port': 8126, + } + tracer = Tracer(service_name, config=config) + set_global_tracer(tracer) + return tracer + + def my_operation(): + span = opentracing.tracer.start_span('my_operation_name') + span.set_tag('my_interesting_tag', 'my_interesting_value') + time.sleep(0.05) + span.finish() + + init_tracer('my_service_name') + my_operation() + +**Context Manager Tracing** + +To trace a function using the span context manager:: + + import time + import opentracing + from ddtrace.opentracer import Tracer, set_global_tracer + + def init_tracer(service_name): + config = { + 'agent_hostname': 'localhost', + 'agent_port': 8126, + } + tracer = Tracer(service_name, config=config) + set_global_tracer(tracer) + return tracer + + def my_operation(): + with opentracing.tracer.start_span('my_operation_name') as span: + span.set_tag('my_interesting_tag', 'my_interesting_value') + time.sleep(0.05) + + init_tracer('my_service_name') + my_operation() + +See our tracing trace-examples_ repository for concrete, runnable examples of +the Datadog opentracer. + +.. _trace-examples: https://github.com/DataDog/trace-examples/tree/master/python + +See also the `Python OpenTracing`_ repository for usage of the tracer. + +.. _Python OpenTracing: https://github.com/opentracing/opentracing-python + + +**Alongside Datadog tracer** + +The Datadog OpenTracing tracer can be used alongside the Datadog tracer. This +provides the advantage of providing tracing information collected by +``ddtrace`` in addition to OpenTracing. The simplest way to do this is to use +the :ref:`ddtrace-run` command to invoke your OpenTraced +application. + + +**Opentracer API** + +.. autoclass:: ddtrace.opentracer.Tracer + :members: + :special-members: __init__ + + +.. _ddtracerun: + +``ddtrace-run`` +--------------- + +``ddtrace-run`` will trace :ref:`supported` web frameworks +and database modules without the need for changing your code:: + + $ ddtrace-run -h + + Execute the given Python program, after configuring it + to emit Datadog traces. + + Append command line arguments to your program as usual. + + Usage: [ENV_VARS] ddtrace-run + + +The available environment variables for ``ddtrace-run`` are: + +* ``DATADOG_TRACE_ENABLED=true|false`` (default: true): Enable web framework and + library instrumentation. When false, your application code will not generate + any traces. +* ``DATADOG_ENV`` (no default): Set an application's environment e.g. ``prod``, + ``pre-prod``, ``stage`` +* ``DATADOG_TRACE_DEBUG=true|false`` (default: false): Enable debug logging in + the tracer +* ``DATADOG_SERVICE_NAME`` (no default): override the service name to be used + for this program. This value is passed through when setting up middleware for + web framework integrations (e.g. pylons, flask, django). For tracing without a + web integration, prefer setting the service name in code. +* ``DATADOG_PATCH_MODULES=module:patch,module:patch...`` e.g. + ``boto:true,redis:false``: override the modules patched for this execution of + the program (default: none) +* ``DATADOG_TRACE_AGENT_HOSTNAME=localhost``: override the address of the trace + agent host that the default tracer will attempt to submit to (default: + ``localhost``) +* ``DATADOG_TRACE_AGENT_PORT=8126``: override the port that the default tracer + will submit to (default: 8126) +* ``DATADOG_PRIORITY_SAMPLING`` (default: true): enables :ref:`Priority + Sampling` +* ``DD_LOGS_INJECTION`` (default: false): enables :ref:`Logs Injection` + +``ddtrace-run`` respects a variety of common entrypoints for web applications: + +- ``ddtrace-run python my_app.py`` +- ``ddtrace-run python manage.py runserver`` +- ``ddtrace-run gunicorn myapp.wsgi:application`` +- ``ddtrace-run uwsgi --http :9090 --wsgi-file my_app.py`` + + +Pass along command-line arguments as your program would normally expect them:: + +$ ddtrace-run gunicorn myapp.wsgi:application --max-requests 1000 --statsd-host localhost:8125 + +If you're running in a Kubernetes cluster and still don't see your traces, make +sure your application has a route to the tracing Agent. An easy way to test +this is with a:: + +$ pip install ipython +$ DATADOG_TRACE_DEBUG=true ddtrace-run ipython + +Because iPython uses SQLite, it will be automatically instrumented and your +traces should be sent off. If an error occurs, a message will be displayed in +the console, and changes can be made as needed. + + +API +--- + +``Tracer`` +^^^^^^^^^^ +.. autoclass:: ddtrace.Tracer + :members: + :special-members: __init__ + + +``Span`` +^^^^^^^^ +.. autoclass:: ddtrace.Span + :members: + :special-members: __init__ + +``Pin`` +^^^^^^^ +.. autoclass:: ddtrace.Pin + :members: + :special-members: __init__ + +.. _patch_all: + +``patch_all`` +^^^^^^^^^^^^^ + +.. autofunction:: ddtrace.monkey.patch_all + +``patch`` +^^^^^^^^^ +.. autofunction:: ddtrace.monkey.patch + +.. toctree:: + :maxdepth: 2 diff --git a/docs/async_integrations.rst b/docs/async_integrations.rst new file mode 100644 index 0000000000..6be816c1cb --- /dev/null +++ b/docs/async_integrations.rst @@ -0,0 +1,18 @@ +Asynchronous Libraries +---------------------- + +.. _asyncio: + +asyncio +^^^^^^^ + +.. automodule:: ddtrace.contrib.asyncio + + +.. _gevent: + +gevent +^^^^^^ + +.. automodule:: ddtrace.contrib.gevent + diff --git a/docs/basic_usage.rst b/docs/basic_usage.rst new file mode 100644 index 0000000000..9c79426730 --- /dev/null +++ b/docs/basic_usage.rst @@ -0,0 +1,107 @@ +.. _`basic usage`: + +Basic Usage +=========== + +With ``ddtrace`` installed, the application can be instrumented. + + +Auto Instrumentation +-------------------- + +``ddtrace-run`` +^^^^^^^^^^^^^^^ + +Python applications can easily be instrumented with ``ddtrace`` by using the +included ``ddtrace-run`` command. Simply prefix your Python execution command +with ``ddtrace-run`` in order to auto-instrument the libraries in your +application. + +For example, if the command to run your application is:: + +$ python app.py + +then to auto-instrument using Datadog, the corresponding command is:: + +$ ddtrace-run python app.py + +For more advanced usage of ``ddtrace-run`` refer to the documentation :ref:`here`. + +``patch_all`` +^^^^^^^^^^^^^ + +To manually invoke the automatic instrumentation use ``patch_all``:: + + from ddtrace import patch_all + patch_all() + +To toggle instrumentation for a particular module:: + + from ddtrace import patch_all + patch_all(redis=False, cassandra=False) + +By default all supported libraries will be patched when +``patch_all`` is invoked. + +**Note:** To ensure that the supported libraries are instrumented properly in +the application, they must be patched *prior* to being imported. So make sure +to call ``patch_all`` *before* importing libraries that are to be instrumented. + +More information about ``patch_all`` is available in our :ref:`patch_all` API +documentation. + + +Manual Instrumentation +---------------------- + +If you would like to extend the functionality of the ``ddtrace`` library or gain +finer control over instrumenting your application, several techniques are +provided by the library. + +Decorator +^^^^^^^^^ + +``ddtrace`` provides a decorator that can be used to trace a particular method +in your application:: + + @tracer.wrap() + def business_logic(): + """A method that would be of interest to trace.""" + # ... + # ... + +API details of the decorator can be found here :py:meth:`ddtrace.Tracer.wrap`. + +Context Manager +^^^^^^^^^^^^^^^ + +To trace an arbitrary block of code, you can use :py:meth:`ddtrace.Tracer.trace` +that returns a :py:mod:`ddtrace.Span` which can be used as a context manager:: + + # trace some interesting operation + with tracer.trace('interesting.operations'): + # do some interesting operation(s) + # ... + # ... + +Further API details can be found here :py:meth:`ddtrace.Tracer`. + +Using the API +^^^^^^^^^^^^^ + +If the above methods are still not enough to satisfy your tracing needs, a +manual API is provided which will allow you to start and finish spans however +you may require:: + + span = tracer.trace('operations.of.interest') + + # do some operation(s) of interest in between + + # NOTE: make sure to call span.finish() or the entire trace will not be sent + # to Datadog + span.finish() + +API details of the decorator can be found here: + +- :py:meth:`ddtrace.Tracer.trace` +- :py:meth:`ddtrace.Span.finish`. diff --git a/docs/conf.py b/docs/conf.py new file mode 100644 index 0000000000..9357dcd83c --- /dev/null +++ b/docs/conf.py @@ -0,0 +1,347 @@ +# -*- coding: utf-8 -*- +# +# ddtrace documentation build configuration file, created by +# sphinx-quickstart on Thu Jul 7 17:25:05 2016. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# + +import os +import sys +from datetime import datetime + + +# append the ddtrace path to syspath +sys.path.insert(0, os.path.abspath("..")) + + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + "sphinx.ext.autodoc", + "sphinx.ext.extlinks", +] + +# Add any paths that contain templates here, relative to this directory. +templates_path = ["_templates"] + +# The suffix(es) of source filenames. +# You can specify multiple suffix as a list of string: +# +# source_suffix = ['.rst', '.md'] +source_suffix = ".rst" + +# The encoding of source files. +# +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = "index" + +# General information about the project. +year = datetime.now().year +project = u"ddtrace" +copyright = u"2016-{}, Datadog, Inc.".format(year) # noqa: A001 +author = u"Datadog, Inc." + +# document in order of source +autodoc_member_order = "bysource" + + +# The version info for the project you're documenting, acts as replacement for +# |version| and |release|, also used in various other places throughout the +# built documents. +# +# The short X.Y version. +# version = u'0.2' +# The full version, including alpha/beta/rc tags. +# release = u'0.2' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# +# This is also used if you do content translation via gettext catalogs. +# Usually you set "language" from the command line for these cases. +language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# +# today = '' +# +# Else, today_fmt is used as the format for a strftime call. +# +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +# This patterns also effect to html_static_path and html_extra_path +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = "sphinx" + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + +# If true, `todo` and `todoList` produce output, else they produce nothing. +todo_include_todos = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +# +html_theme = "alabaster" + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# +html_theme_options = { + "description": "Datadog's Python tracing client", +} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. +# " v documentation" by default. +# +# html_title = u'ddtrace v0.2' + +# A shorter title for the navigation bar. Default is the same as html_title. +# +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# +# html_logo = None + +# The name of an image file (relative to this directory) to use as a favicon of +# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +# html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# +# html_extra_path = [] + +# If not None, a 'Last updated on:' timestamp is inserted at every page +# bottom, using the given strftime format. +# The empty string is equivalent to '%b %d, %Y'. +# +# html_last_updated_fmt = None + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# +html_sidebars = {"**": ["about.html", "nav.html", "relations.html", "searchbox.html"]} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# +# html_additional_pages = {} + +# If false, no module index is generated. +# +# html_domain_indices = True + +# If false, no index is generated. +# +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Language to be used for generating the HTML full-text search index. +# Sphinx supports the following languages: +# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja' +# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr', 'zh' +# +# html_search_language = 'en' + +# A dictionary with options for the search language support, empty by default. +# 'ja' uses this config value. +# 'zh' user can custom change `jieba` dictionary path. +# +# html_search_options = {'type': 'default'} + +# The name of a javascript file (relative to the configuration directory) that +# implements a search results scorer. If empty, the default will be used. +# +# html_search_scorer = 'scorer.js' + +# Output file base name for HTML help builder. +htmlhelp_basename = "ddtracedoc" + +# -- Options for LaTeX output --------------------------------------------- + +latex_elements = { + # The paper size ('letterpaper' or 'a4paper'). + # + # 'papersize': 'letterpaper', + # The font size ('10pt', '11pt' or '12pt'). + # + # 'pointsize': '10pt', + # Additional stuff for the LaTeX preamble. + # + # 'preamble': '', + # Latex figure (float) alignment + # + # 'figure_align': 'htbp', +} + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + (master_doc, "ddtrace.tex", u"ddtrace Documentation", u"Datadog, Inc", "manual"), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# +# latex_use_parts = False + +# If true, show page references after internal links. +# +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# +# latex_appendices = [] + +# If false, no module index is generated. +# +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [(master_doc, "ddtrace", u"ddtrace Documentation", [author], 1)] + +# If true, show URL addresses after external links. +# +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + master_doc, + "ddtrace", + u"ddtrace Documentation", + author, + "ddtrace", + "One line description of project.", + "Miscellaneous", + ), +] + +# Documents to append as an appendix to all manuals. +# +# texinfo_appendices = [] + +# If false, no module index is generated. +# +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# +# texinfo_no_detailmenu = False diff --git a/docs/contributing.rst b/docs/contributing.rst new file mode 100644 index 0000000000..d7e8af6121 --- /dev/null +++ b/docs/contributing.rst @@ -0,0 +1,87 @@ +============== + Contributing +============== + +When contributing to this repository, we advise you to discuss the change you +wish to make via an `issue `_. + +Branches +======== + +Developement happens in the `master` branch. When all the features for the next +milestone are merged, the next version is released and tagged on the `master` +branch as `vVERSION`. + +Your pull request should targets the `master` branch. + +Once a new version is released, a `release/VERSION` branch might be created to +support micro releases to `VERSION`. Patches should be cherry-picking from the +`master` branch where possible — or otherwise created from scratch. + + +Pull Request Process +==================== + +In order to be merged, a pull request needs to meet the following +conditions: + +1. The test suite must pass. +2. One of the repository Members must approve the pull request. +3. Proper unit and integration testing must be implemented. +4. Proper documentation must be written. + +Splitting Pull Requests +======================= + +If you discussed your feature within an issue (as advised), there's a great +chance that the implementation appears doable in several steps. In order to +facilite the review process, we strongly advise to split your feature +implementation in small pull requests (if that is possible) so they contain a +very small number of commits (a single commit per pull request being optimal). + +That ensures that: + +1. Each commit passes the test suite. +2. The code reviewing process done by humans is easier as there is less code to + understand at a glance. + +Internal API +============ + +The `ddtrace.internal` module contains code that must only be used inside +`ddtrace` itself. Relying on the API of this module is dangerous and can break +at anytime. Don't do it. + +Python Versions and Implementations Support +=========================================== + +The following Python implementations are supported: + +- CPython + +Versions of those implementations that are supported are the Python versions +that are currently supported by the community. + +Libraries Support +================= + +External libraries support is implemented in submodules of the `ddtest.contrib` +module. + +Our goal is to support: + +- The latest version of a library. +- All versions of a library that have been released less than 1 year ago. + +Support for older versions of a library will be kept as long as possible as +long as it can be done without too much pain and backward compatibility — on a +best effort basis. Therefore, support for old versions of a library might be +dropped from the testing pipeline at anytime. + +Code Style +========== + +The code style is enforced by `flake8 `_, its +configuration, and possibly extensions. No code style review should be done by +a human. All code style enforcement must be automatized to avoid bikeshedding +and losing time. diff --git a/docs/db_integrations.rst b/docs/db_integrations.rst new file mode 100644 index 0000000000..d48758ca40 --- /dev/null +++ b/docs/db_integrations.rst @@ -0,0 +1,177 @@ +Datastore Libraries +=================== + +.. _algoliasearch: + +Algoliasearch +------------- + +.. automodule:: ddtrace.contrib.algoliasearch + + +.. _cassandra: + +Cassandra +--------- + +.. automodule:: ddtrace.contrib.cassandra + + +.. _consul: + +Consul +------ + +.. automodule:: ddtrace.contrib.consul + + +.. _dogpile.cache: + +dogpile.cache +------------- + +.. automodule:: ddtrace.contrib.dogpile_cache + + +.. _elasticsearch: + +Elasticsearch +------------- + +.. automodule:: ddtrace.contrib.elasticsearch + + +.. _flask_cache: + +Flask Cache +----------- + +.. automodule:: ddtrace.contrib.flask_cache + + +.. _mongodb: + +MongoDB +------- + +.. _mongoengine: + +Mongoengine +^^^^^^^^^^^ + +.. automodule:: ddtrace.contrib.mongoengine + + +.. _pymongo: + +Pymongo +^^^^^^^ + +.. automodule:: ddtrace.contrib.pymongo + + +Memcached +--------- + +.. _pylibmc: + +pylibmc +^^^^^^^ + +.. automodule:: ddtrace.contrib.pylibmc + +.. _pymemcache: + +pymemcache +^^^^^^^^^^ + +.. automodule:: ddtrace.contrib.pymemcache + +MySQL +----- + +.. _mysql-connector: + +mysql-connector +^^^^^^^^^^^^^^^ + +.. automodule:: ddtrace.contrib.mysql + + +.. _mysqlclient: +.. _MySQL-python: +.. _mysqldb: + +mysqlclient/MySQL-python +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: ddtrace.contrib.mysqldb + +.. _pymysql: + +pymysql +^^^^^^^ + +.. automodule:: ddtrace.contrib.pymysql + + +Postgres +-------- + +.. _aiopg: + +aiopg +^^^^^ + +.. automodule:: ddtrace.contrib.aiopg + + +.. _psycopg2: + +psycopg +^^^^^^^ + +.. automodule:: ddtrace.contrib.psycopg + + + +Redis +----- + +.. _redis: + +redis +^^^^^ + +.. automodule:: ddtrace.contrib.redis + + +.. _rediscluster: + +redis-py-cluster +^^^^^^^^^^^^^^^^ + +.. automodule:: ddtrace.contrib.rediscluster + + +.. _sqlalchemy: + +SQLAlchemy +---------- + +.. automodule:: ddtrace.contrib.sqlalchemy + + +.. _sqllite: + +SQLite +------ + +.. automodule:: ddtrace.contrib.sqlite3 + +.. _vertica: + +Vertica +------- + +.. automodule:: ddtrace.contrib.vertica diff --git a/docs/index.rst b/docs/index.rst new file mode 100644 index 0000000000..e3166228ed --- /dev/null +++ b/docs/index.rst @@ -0,0 +1,148 @@ +.. include:: ./shared.rst + +Datadog Python Trace Client +=========================== + +``ddtrace`` is Datadog's Python tracing client. It is used to trace requests as +they flow across web servers, databases and microservices. This enables +developers to have greater visibility into bottlenecks and troublesome requests +in their application. + +Getting Started +--------------- + +For a basic product overview: check out the `setup documentation`_. + +For details about developing and contributing: refer to the `development +guide`_. + +For descriptions of the terminology of Datadog APM: take a look at the `official +documentation`_. + + +.. _`Supported Libraries`: + +Supported Libraries +------------------- + +We officially support Python 2.7, 3.4 and above. + +The versions listed are the versions that we have tested, but ``ddtrace`` can +still be compatible with other versions of these libraries. If a version of a +library you use is unsupported, feel free to contribute or request it by +contacting support. + + +.. |SUPPVER| replace:: Supported Version +.. |AUTO| replace:: Automatically Instrumented + + ++--------------------------------------------------+---------------+----------------+ +| Integration | |SUPPVER| | |AUTO| [1]_ | ++==================================================+===============+================+ +| :ref:`aiobotocore` | >= 0.2.3 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`aiohttp` | >= 1.2 | Yes [2]_ | ++--------------------------------------------------+---------------+----------------+ +| :ref:`aiopg` | >= 0.12.0 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`algoliasearch` | >= 1.20.0 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`boto2` | >= 2.29.0 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`botocore` | >= 1.4.51 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`bottle` | >= 0.11 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`celery` | >= 3.1 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`cassandra` | >= 3.5 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`consul` | >= 0.7 | Yes [3]_ | ++--------------------------------------------------+---------------+----------------+ +| :ref:`django` | >= 1.8 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`djangorestframework ` | >= 3.4 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`elasticsearch` | >= 1.6 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`falcon` | >= 1.0 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`flask` | >= 0.10 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`flask_cache` | >= 0.12 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`gevent` | >= 1.0 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`grpc` | >= 1.8.0 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`jinja2` | >= 2.7 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`mako` | >= 0.1.0 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`kombu` | >= 4.0 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`molten` | >= 0.7.0 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`mongoengine` | >= 0.11 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`mysql-connector` | >= 2.1 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`MySQL-python ` | >= 1.2.3 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`mysqlclient ` | >= 1.3 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`psycopg2` | >= 2.4 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`pylibmc` | >= 1.4 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`pylons` | >= 0.9.6 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`pymemcache` | >= 1.3 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`pymongo` | >= 3.0 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`pyramid` | >= 1.7 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`redis` | >= 2.6 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`rediscluster` | >= 1.3.5 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`requests` | >= 2.08 | Yes | ++--------------------------------------------------+---------------+----------------+ +| :ref:`sqlalchemy` | >= 1.0 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`tornado` | >= 4.0 | No | ++--------------------------------------------------+---------------+----------------+ +| :ref:`vertica` | >= 0.6 | Yes | ++--------------------------------------------------+---------------+----------------+ + + +.. [1] Libraries that are automatically instrumented when the + :ref:`ddtrace-run` command is used or the ``patch_all()`` method + is called. Always use ``patch()`` and ``patch_all()`` as soon as possible in + your Python entrypoint. + +.. [2] only third-party modules such as aiohttp_jinja2 + +.. [3] only the syncronous client + + +Indices and tables +================== + +* :ref:`genindex` +* :ref:`modindex` +* :ref:`search` + +.. toctree:: + :hidden: + + installation_quickstart + web_integrations + db_integrations + async_integrations + other_integrations + basic_usage + advanced_usage + contributing diff --git a/docs/installation_quickstart.rst b/docs/installation_quickstart.rst new file mode 100644 index 0000000000..0f158f30ff --- /dev/null +++ b/docs/installation_quickstart.rst @@ -0,0 +1,107 @@ +.. include:: ./shared.rst + + +.. _Installation: + +Installation + Quickstart +========================= + +Before installing be sure to read through the `setup documentation`_ to ensure +your environment is ready to receive traces. + + +Installation +------------ + +Install with :code:`pip`:: + +$ pip install ddtrace + +We strongly suggest pinning the version of the library you deploy. + +Quickstart +---------- + +Getting started with ``ddtrace`` is as easy as prefixing your python +entry-point command with ``ddtrace-run``. + +For example if you start your application with ``python app.py`` then run:: + + $ ddtrace-run python app.py + +For more advanced usage of ``ddtrace-run`` refer to the documentation :ref:`here`. + +To find out how to trace your own code manually refer to the documentation :ref:`here`. + + +Configuration +~~~~~~~~~~~~~ + +You can configure some parameters of the library by setting environment +variable before starting your application and importing the library: + +.. list-table:: + :header-rows: 1 + :widths: 1 1 2 + + * - Configuration Variable + - Configuration Type + - Default Value + - Value Description + * - ``DD_TRACE_AGENT_URL`` + - URL + - ``http://localhost:8126`` + - The URL to use to connect the Datadog agent. The url can starts with + ``http://`` to connect using HTTP or with ``unix://`` to use a Unix + Domain Socket. + + +OpenTracing +----------- + +``ddtrace`` also provides an OpenTracing API to the Datadog tracer so +that you can use the Datadog tracer in your OpenTracing-compatible +applications. + +Installation +^^^^^^^^^^^^ + +Include OpenTracing with ``ddtrace``:: + + $ pip install ddtrace[opentracing] + +To include the OpenTracing dependency in your project with ``ddtrace``, ensure +you have the following in ``setup.py``:: + + install_requires=[ + "ddtrace[opentracing]", + ], + +Configuration +^^^^^^^^^^^^^ + +The OpenTracing convention for initializing a tracer is to define an +initialization method that will configure and instantiate a new tracer and +overwrite the global ``opentracing.tracer`` reference. + +Typically this method looks something like:: + + from ddtrace.opentracer import Tracer, set_global_tracer + + def init_tracer(service_name): + """ + Initialize a new Datadog opentracer and set it as the + global tracer. + + This overwrites the opentracing.tracer reference. + """ + config = { + 'agent_hostname': 'localhost', + 'agent_port': 8126, + } + tracer = Tracer(service_name, config=config) + set_global_tracer(tracer) + return tracer + +For more advanced usage of OpenTracing in ``ddtrace`` refer to the +documentation :ref:`here`. diff --git a/docs/other_integrations.rst b/docs/other_integrations.rst new file mode 100644 index 0000000000..6687531157 --- /dev/null +++ b/docs/other_integrations.rst @@ -0,0 +1,91 @@ +Other Libraries +=============== + +.. _boto: + +Boto +---- + +.. _aiobotocore: + +aiobotocore +^^^^^^^^^^^ + +.. automodule:: ddtrace.contrib.aiobotocore + + +.. _boto2: + +Boto2 +^^^^^ + +.. automodule:: ddtrace.contrib.boto + + +.. _botocore: + +Botocore +^^^^^^^^ + +.. automodule:: ddtrace.contrib.botocore + + + +.. _futures: + +Futures +------- + +.. automodule:: ddtrace.contrib.futures + + +.. _celery: + +Celery +------ + +.. automodule:: ddtrace.contrib.celery + + +.. _kombu: + +Kombu +------ + +.. automodule:: ddtrace.contrib.kombu + + +.. _httplib: + +httplib +------- + +.. automodule:: ddtrace.contrib.httplib + +.. _requests: + +Requests +-------- + +.. automodule:: ddtrace.contrib.requests + +.. _grpc: + +Grpc +---- + +.. automodule:: ddtrace.contrib.grpc + +.. _jinja2: + +Jinja2 +------ + +.. automodule:: ddtrace.contrib.jinja2 + +.. _mako: + +Mako +------ + +.. automodule:: ddtrace.contrib.mako diff --git a/docs/shared.rst b/docs/shared.rst new file mode 100644 index 0000000000..b5d591eefe --- /dev/null +++ b/docs/shared.rst @@ -0,0 +1,5 @@ +.. _setup documentation: https://docs.datadoghq.com/tracing/setup/python/ + +.. _official documentation: https://docs.datadoghq.com/tracing/visualization/ + +.. _development guide: https://github.com/datadog/dd-trace-py#development diff --git a/docs/web_integrations.rst b/docs/web_integrations.rst new file mode 100644 index 0000000000..4145558cca --- /dev/null +++ b/docs/web_integrations.rst @@ -0,0 +1,84 @@ +Web Frameworks +-------------- + +``ddtrace`` provides tracing support for many Python web frameworks. For each +framework ``ddtrace`` supports: + +- tracing of requests [*]_: trace requests through middleware and back +- distributed tracing [*]_: trace requests across application boundaries +- automatic error tagging [*]_: spans will be marked with any errors that occur + +.. [*] https://docs.datadoghq.com/tracing/ +.. [*] https://docs.datadoghq.com/tracing/faq/distributed-tracing/ +.. [*] "erroneous HTTP return codes" are defined as being greater than 500 + +.. _aiohttp: + +aiohttp +^^^^^^^ + +.. automodule:: ddtrace.contrib.aiohttp + + +.. _bottle: + +Bottle +^^^^^^ + +.. automodule:: ddtrace.contrib.bottle + +.. _djangorestframework: +.. _django: + +Django +^^^^^^ + +.. automodule:: ddtrace.contrib.django + + +.. _falcon: + +Falcon +^^^^^^ + +.. automodule:: ddtrace.contrib.falcon + + +.. _flask: + +Flask +^^^^^ + + +.. automodule:: ddtrace.contrib.flask + +.. _molten: + +Molten +^^^^^^ + +.. automodule:: ddtrace.contrib.molten + +.. _pylons: + +Pylons +^^^^^^ + +.. automodule:: ddtrace.contrib.pylons + + +.. _pyramid: + +Pyramid +^^^^^^^ + +.. automodule:: ddtrace.contrib.pyramid + + +.. _tornado: + +Tornado +^^^^^^^ + +.. automodule:: ddtrace.contrib.tornado + diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000000..8790a1e7ba --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,211 @@ +[tool.black] +line-length = 120 +target_version = ['py27', 'py34', 'py35', 'py36', 'py37', 'py38'] +exclude = ''' +( + \.eggs + | \.git + | \.hg + | \.mypy_cache + | \.nox + | \.tox + | \.venv + | _build/ + | buck-out/ + | build/ + | dist/ + | ddtrace/( + [^/]+\.py + | commands/ + | contrib/ + ( + aiobotocore + | aiohttp + | aiopg + | algoliasearch + | asyncio + | boto + | botocore + | bottle + | cassandra + | celery + | consul + | dbapi + | django/ + ( + __init__.py + | apps.py + | cache.py + | compat.py + | conf.py + | db.py + | patch.py + | restframework.py + | templates.py + | utils.py + ) + | dogpile_cache + | elasticsearch + | falcon + | flask + | flask_cache + | futures + | gevent + | grpc/ + ( + __init__.py + | constants.py + | patch.py + | server_interceptor.py + | utils.py + ) + | httplib + | jinja2 + | kombu + | logging + | mako + | molten + | mongoengine + | mysql + | mysqldb + | psycopg + | pylibmc + | pylons + | pymemcache + | pymongo + | pymysql + | pyramid + | redis + | rediscluster + | requests/ + ( + __init__.py + | constants.py + | legacy.py + | patch.py + | session.py + ) + | sqlalchemy + | sqlite3 + | tornado + | util.py + | vertica + ) + | ext/ + | http/ + | opentracer/ + | propagation/ + | settings/ + | vendor/ + ) + | tests/ + ( + base + | benchmark.py + | commands + | contrib/ + ( + aiobotocore + | aiohttp + | aiopg + | algoliasearch + | asyncio + | boto + | botocore + | bottle + | cassandra + | celery + | config.py + | consul + | dbapi + | django + | djangorestframework + | elasticsearch + | falcon + | flask + | flask_autopatch + | flask_cache + | futures + | gevent + | grpc + | httplib + | jinja2 + | kombu + | logging + | mako + | molten + | mongoengine + | mysql + | mysqldb + | patch.py + | psycopg + | pylibmc + | pylons + | pymemcache + | pymongo + | pymysql + | pyramid/ + ( + app/web.py + | __init__.py + | test_pyramid.py + | test_pyramid_autopatch.py + ) + | redis + | rediscluster + | requests + | requests_gevent + | sqlalchemy + | sqlite3 + | test_utils.py + | tornado + | vertica + ) + | ddtrace_run.py + | internal/ + ( + runtime/ + | test_context_manager.py + | test_hostname.py + | test_logger.py + | test_rate_limiter.py + ) + | memory.py + | opentracer/ + ( + conftest.py + | test_dd_compatibility.py + | test_span.py + | test_span_context.py + | test_tracer_asyncio.py + | test_tracer_gevent.py + | test_tracer_tornado.py + | test_utils.py + ) + | propagation/test_utils.py + | subprocesstest.py + | test_api.py + | test_compat.py + | test_context.py + | test_encoders.py + | test_filters.py + | test_global_config.py + | test_helpers.py + | test_hook.py + | test_instance_config.py + | test_integration.py + | test_payload.py + | test_pin.py + | test_sampler.py + | test_span.py + | test_tracer.py + | test_utils.py + | test_worker.py + | unit + | util.py + | utils + | vendor + | wait-for-services.py + ) +) +''' \ No newline at end of file diff --git a/scripts/build-dist b/scripts/build-dist new file mode 100755 index 0000000000..8fc5345665 --- /dev/null +++ b/scripts/build-dist @@ -0,0 +1,39 @@ +#!/usr/bin/env bash +set -ex + +# Determine where "../dist" is +PARENT_DIR="$( cd "$(dirname "${0}")/../" ; pwd -P )" +DIST_DIR="${PARENT_DIR}/dist" + +# Remove and recreate dist/ directory where our release wheels/source distribution will go +rm -rf "${DIST_DIR}" +mkdir "${DIST_DIR}" + +build_script=$(cat <<'EOF' +set -ex + +# Build linux wheels from the source distribution we created +for PYBIN in /opt/python/*/bin; +do + "${PYBIN}/pip" wheel --no-deps /dd-trace-py/dist/*.tar.gz -w /dd-trace-py/dist +done + +# Build manylinux wheels from the linux wheels we just created +for whl in /dd-trace-py/dist/*-linux_${ARCH}.whl; +do + auditwheel repair "${whl}" -w /dd-trace-py/dist + + # Remove linux wheel since we only want the manylinux wheels + rm "${whl}" +done +EOF +) + +# First build a source distribution for our package +python setup.py sdist --dist-dir dist + +# Build x86_64 linux and manylinux wheels +docker run -it --rm -v "${PARENT_DIR}:/dd-trace-py" -e "ARCH=x86_64" quay.io/pypa/manylinux1_x86_64 /bin/bash -c "${build_script}" + +# Build i686 linux and manylinux wheels +docker run -it --rm -v "${PARENT_DIR}:/dd-trace-py" -e "ARCH=i686" quay.io/pypa/manylinux1_i686 linux32 /bin/bash -c "${build_script}" diff --git a/scripts/ddtest b/scripts/ddtest new file mode 100755 index 0000000000..3126984a8a --- /dev/null +++ b/scripts/ddtest @@ -0,0 +1,5 @@ +#!/usr/bin/env bash + +set -e + +docker-compose run --rm testrunner $* diff --git a/scripts/mkwheelhouse b/scripts/mkwheelhouse new file mode 100755 index 0000000000..0305c2bf06 --- /dev/null +++ b/scripts/mkwheelhouse @@ -0,0 +1,57 @@ +#!/usr/bin/env python +import os +import shutil +import tempfile + +import mkwheelhouse + +S3_BUCKET = 'pypi.datadoghq.com' +S3_DIR = os.environ['S3_DIR'] + + +# DEV: This is the same `mkwheelhouse.build_wheels` except we are running `python setup.py sdist` instead +def build_sdist(): + build_dir = tempfile.mkdtemp(prefix='mkwheelhouse-') + args = [ + 'python', 'setup.py', 'sdist', + '--dist-dir', build_dir, + ] + mkwheelhouse.spawn(args) + return build_dir + + +# DEV: This is the same as `mkwheelhouse.Bucket.make_index`, except we include `*.whl` and `*.tar.gz` files +def make_index(bucket): + doc, tag, text = mkwheelhouse.yattag.Doc().tagtext() + with tag('html'): + for key in bucket.list(): + # Skip over any non-wheel or non-source dist + if not key.name.endswith('.whl') and not key.name.endswith('.tar.gz'): + continue + + with tag('a', href=bucket.generate_url(key)): + text(key.name) + doc.stag('br') + + return doc.getvalue() + + +# DEV: This is the same as `mkwheelhouse.run` except we hard code some values and use our custom functions instead +def run(): + s3_url = 's3://{0}/{1}'.format(S3_BUCKET, S3_DIR) + acl = 'private' + bucket = mkwheelhouse.Bucket(s3_url) + + if not bucket.has_key('index.html'): # noqa + bucket.put('', 'index.html', acl=acl) + + index_url = bucket.generate_url('index.html') + build_dir = build_sdist() + bucket.sync(build_dir, acl=acl) + bucket.put(make_index(bucket), key='index.html', acl=acl) + shutil.rmtree(build_dir) + print('mkwheelhouse: index written to', index_url) + + +if __name__ == '__main__': + run() diff --git a/scripts/run-tox-scenario b/scripts/run-tox-scenario new file mode 100755 index 0000000000..26e3e6894a --- /dev/null +++ b/scripts/run-tox-scenario @@ -0,0 +1,8 @@ +#!/usr/bin/env bash +set -e +PATTERN="$1" +shift + +# CircleCI has a bug in its workspace code where it can't handle filenames with some chars +CLEANED_PATTERN=`echo $PATTERN | tr '^?()$' '_'` +exec tox -l | grep "$PATTERN" | tr '\n' ',' | xargs -I ARGS tox --result-json /tmp/"$CLEANED_PATTERN".results -e ARGS -- $@ diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 0000000000..3c6e79cf31 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[bdist_wheel] +universal=1 diff --git a/setup.py b/setup.py new file mode 100644 index 0000000000..a21be5dd98 --- /dev/null +++ b/setup.py @@ -0,0 +1,185 @@ +import copy +import os +import sys + +from distutils.command.build_ext import build_ext +from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError +from setuptools import setup, find_packages +from setuptools.command.test import test as TestCommand + + +HERE = os.path.dirname(os.path.abspath(__file__)) + + +def load_module_from_project_file(mod_name, fname): + """ + Helper used to load a module from a file in this project + + DEV: Loading this way will by-pass loading all parent modules + e.g. importing `ddtrace.vendor.psutil.setup` will load `ddtrace/__init__.py` + which has side effects like loading the tracer + """ + fpath = os.path.join(HERE, fname) + + if sys.version_info >= (3, 5): + import importlib.util + + spec = importlib.util.spec_from_file_location(mod_name, fpath) + mod = importlib.util.module_from_spec(spec) + spec.loader.exec_module(mod) + return mod + elif sys.version_info >= (3, 3): + from importlib.machinery import SourceFileLoader + + return SourceFileLoader(mod_name, fpath).load_module() + else: + import imp + + return imp.load_source(mod_name, fpath) + + +class Tox(TestCommand): + + user_options = [("tox-args=", "a", "Arguments to pass to tox")] + + def initialize_options(self): + TestCommand.initialize_options(self) + self.tox_args = None + + def finalize_options(self): + TestCommand.finalize_options(self) + self.test_args = [] + self.test_suite = True + + def run_tests(self): + # import here, cause outside the eggs aren't loaded + import tox + import shlex + + args = self.tox_args + if args: + args = shlex.split(self.tox_args) + errno = tox.cmdline(args=args) + sys.exit(errno) + + +long_description = """ +# dd-trace-py + +`ddtrace` is Datadog's tracing library for Python. It is used to trace requests +as they flow across web servers, databases and microservices so that developers +have great visiblity into bottlenecks and troublesome requests. + +## Getting Started + +For a basic product overview, installation and quick start, check out our +[setup documentation][setup docs]. + +For more advanced usage and configuration, check out our [API +documentation][pypi docs]. + +For descriptions of terminology used in APM, take a look at the [official +documentation][visualization docs]. + +[setup docs]: https://docs.datadoghq.com/tracing/setup/python/ +[pypi docs]: http://pypi.datadoghq.com/trace/docs/ +[visualization docs]: https://docs.datadoghq.com/tracing/visualization/ +""" + +# enum34 is an enum backport for earlier versions of python +# funcsigs backport required for vendored debtcollector +install_requires = ["psutil>=5.0.0", "enum34; python_version<'3.4'", "funcsigs>=1.0.0;python_version=='2.7'"] + +# Base `setup()` kwargs without any C-extension registering +setup_kwargs = dict( + name="ddtrace", + description="Datadog tracing code", + url="https://github.com/DataDog/dd-trace-py", + author="Datadog, Inc.", + author_email="dev@datadoghq.com", + long_description=long_description, + long_description_content_type="text/markdown", + license="BSD", + packages=find_packages(exclude=["tests*"]), + install_requires=install_requires, + extras_require={ + # users can include opentracing by having: + # install_requires=['ddtrace[opentracing]', ...] + "opentracing": ["opentracing>=2.0.0"], + }, + # plugin tox + tests_require=["tox", "flake8"], + cmdclass={"test": Tox}, + entry_points={"console_scripts": ["ddtrace-run = ddtrace.commands.ddtrace_run:main"]}, + classifiers=[ + "Programming Language :: Python", + "Programming Language :: Python :: 2.7", + "Programming Language :: Python :: 3.4", + "Programming Language :: Python :: 3.5", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + ], + use_scm_version=True, + setup_requires=["setuptools_scm"], +) + + +if sys.platform == "win32": + build_ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError, OSError) +else: + build_ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError) + + +class BuildExtFailed(Exception): + pass + + +# Attempt to build a C-extension, catch exceptions so failed building skips the extension +# DEV: This is basically what `distutils`'s' `Extension(optional=True)` does +class optional_build_ext(build_ext): + def run(self): + try: + build_ext.run(self) + except DistutilsPlatformError as e: + extensions = [ext.name for ext in self.extensions] + print("WARNING: Failed to build extensions %r, skipping: %s" % (extensions, e)) + + def build_extension(self, ext): + try: + build_ext.build_extension(self, ext) + except build_ext_errors as e: + print("WARNING: Failed to build extension %s, skipping: %s" % (ext.name, e)) + + +def get_exts_for(name): + try: + mod = load_module_from_project_file( + "ddtrace.vendor.{}.setup".format(name), "ddtrace/vendor/{}/setup.py".format(name) + ) + return mod.get_extensions() + except Exception as e: + print("WARNING: Failed to load %s extensions, skipping: %s" % (name, e)) + return [] + + +# Try to build with C extensions first, fallback to only pure-Python if building fails +try: + all_exts = [] + for extname in ("msgpack", "wrapt"): + exts = get_exts_for(extname) + if exts: + all_exts.extend(exts) + + kwargs = copy.deepcopy(setup_kwargs) + kwargs["ext_modules"] = all_exts + # DEV: Make sure `cmdclass` exists + kwargs.setdefault("cmdclass", dict()) + kwargs["cmdclass"]["build_ext"] = optional_build_ext + setup(**kwargs) +except Exception as e: + # Set `DDTRACE_BUILD_TRACE=TRUE` in CI to raise any build errors + if os.environ.get("DDTRACE_BUILD_RAISE") == "TRUE": + raise + + print("WARNING: Failed to install with ddtrace C-extensions, falling back to pure-Python only extensions: %s" % e) + setup(**setup_kwargs) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000000..5874e1c361 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,5 @@ +# Do *NOT* `import ddtrace` in here +# DEV: Some tests rely on import order of modules +# in order to properly function. Importing `ddtrace` +# here would mess with those tests since everyone +# will load this file by default diff --git a/tests/base/__init__.py b/tests/base/__init__.py new file mode 100644 index 0000000000..cd266e8640 --- /dev/null +++ b/tests/base/__init__.py @@ -0,0 +1,170 @@ +import contextlib +import sys +import unittest + +import ddtrace + +from ..utils import override_env +from ..utils.tracer import DummyTracer +from ..utils.span import TestSpanContainer, TestSpan, NO_CHILDREN + + +class BaseTestCase(unittest.TestCase): + """ + BaseTestCase extends ``unittest.TestCase`` to provide some useful helpers/assertions + + + Example:: + + from tests import BaseTestCase + + + class MyTestCase(BaseTestCase): + def test_case(self): + with self.override_config('flask', dict(distributed_tracing_enabled=True): + pass + """ + + # Expose `override_env` as `self.override_env` + override_env = staticmethod(override_env) + + @staticmethod + @contextlib.contextmanager + def override_global_config(values): + """ + Temporarily override an global configuration:: + + >>> with self.override_global_config(dict(name=value,...)): + # Your test + """ + # DEV: Uses dict as interface but internally handled as attributes on Config instance + analytics_enabled_original = ddtrace.config.analytics_enabled + report_hostname_original = ddtrace.config.report_hostname + health_metrics_enabled_original = ddtrace.config.health_metrics_enabled + + ddtrace.config.analytics_enabled = values.get('analytics_enabled', analytics_enabled_original) + ddtrace.config.report_hostname = values.get('report_hostname', report_hostname_original) + ddtrace.config.health_metrics_enabled = values.get('health_metrics_enabled', health_metrics_enabled_original) + try: + yield + finally: + ddtrace.config.analytics_enabled = analytics_enabled_original + ddtrace.config.report_hostname = report_hostname_original + ddtrace.config.health_metrics_enabled = health_metrics_enabled_original + + @staticmethod + @contextlib.contextmanager + def override_config(integration, values): + """ + Temporarily override an integration configuration value:: + + >>> with self.override_config('flask', dict(service_name='test-service')): + # Your test + """ + options = getattr(ddtrace.config, integration) + + original = dict( + (key, options.get(key)) + for key in values.keys() + ) + + options.update(values) + try: + yield + finally: + options.update(original) + + @staticmethod + @contextlib.contextmanager + def override_http_config(integration, values): + """ + Temporarily override an integration configuration for HTTP value:: + + >>> with self.override_http_config('flask', dict(trace_query_string=True)): + # Your test + """ + options = getattr(ddtrace.config, integration).http + + original = {} + for key, value in values.items(): + original[key] = getattr(options, key) + setattr(options, key, value) + + try: + yield + finally: + for key, value in original.items(): + setattr(options, key, value) + + @staticmethod + @contextlib.contextmanager + def override_sys_modules(modules): + """ + Temporarily override ``sys.modules`` with provided dictionary of modules:: + + >>> mock_module = mock.MagicMock() + >>> mock_module.fn.side_effect = lambda: 'test' + >>> with self.override_sys_modules(dict(A=mock_module)): + # Your test + """ + original = dict(sys.modules) + + sys.modules.update(modules) + try: + yield + finally: + sys.modules.clear() + sys.modules.update(original) + + +# TODO[tbutt]: Remove this once all tests are properly using BaseTracerTestCase +override_config = BaseTestCase.override_config + + +class BaseTracerTestCase(TestSpanContainer, BaseTestCase): + """ + BaseTracerTestCase is a base test case for when you need access to a dummy tracer and span assertions + """ + def setUp(self): + """Before each test case, setup a dummy tracer to use""" + self.tracer = DummyTracer() + + super(BaseTracerTestCase, self).setUp() + + def tearDown(self): + """After each test case, reset and remove the dummy tracer""" + super(BaseTracerTestCase, self).tearDown() + + self.reset() + delattr(self, 'tracer') + + def get_spans(self): + """Required subclass method for TestSpanContainer""" + return self.tracer.writer.spans + + def reset(self): + """Helper to reset the existing list of spans created""" + self.tracer.writer.pop() + + def trace(self, *args, **kwargs): + """Wrapper for self.tracer.trace that returns a TestSpan""" + return TestSpan(self.tracer.trace(*args, **kwargs)) + + def start_span(self, *args, **kwargs): + """Helper for self.tracer.start_span that returns a TestSpan""" + return TestSpan(self.tracer.start_span(*args, **kwargs)) + + def assert_structure(self, root, children=NO_CHILDREN): + """Helper to call TestSpanNode.assert_structure on the current root span""" + root_span = self.get_root_span() + root_span.assert_structure(root, children) + + @contextlib.contextmanager + def override_global_tracer(self, tracer=None): + original = ddtrace.tracer + tracer = tracer or self.tracer + setattr(ddtrace, 'tracer', tracer) + try: + yield + finally: + setattr(ddtrace, 'tracer', original) diff --git a/tests/benchmark.py b/tests/benchmark.py new file mode 100644 index 0000000000..5d7a738533 --- /dev/null +++ b/tests/benchmark.py @@ -0,0 +1,92 @@ +from ddtrace import Tracer +import pytest + +from .test_tracer import DummyWriter + + +@pytest.fixture +def tracer(): + tracer = Tracer() + tracer.writer = DummyWriter() + return tracer + + +def test_tracer_context(benchmark, tracer): + def func(tracer): + with tracer.trace('a', service='s', resource='r', span_type='t'): + pass + + benchmark(func, tracer) + + +def test_tracer_wrap_staticmethod(benchmark, tracer): + class Foo(object): + @staticmethod + @tracer.wrap() + def func(): + return 0 + + f = Foo() + benchmark(f.func) + + +def test_tracer_wrap_classmethod(benchmark, tracer): + class Foo(object): + @classmethod + @tracer.wrap() + def func(cls): + return 0 + + f = Foo() + benchmark(f.func) + + +def test_tracer_wrap_instancemethod(benchmark, tracer): + class Foo(object): + @tracer.wrap() + def func(self): + return 0 + + f = Foo() + benchmark(f.func) + + +def test_tracer_start_span(benchmark, tracer): + benchmark(tracer.start_span, 'benchmark') + + +def test_tracer_start_finish_span(benchmark, tracer): + def func(tracer): + s = tracer.start_span('benchmark') + s.finish() + + benchmark(func, tracer) + + +def test_trace_simple_trace(benchmark, tracer): + def func(tracer): + with tracer.trace('parent'): + for i in range(5): + with tracer.trace('child') as c: + c.set_tag('i', i) + + benchmark(func, tracer) + + +def test_tracer_large_trace(benchmark, tracer): + import random + + # generate trace with 1024 spans + @tracer.wrap() + def func(tracer, level=0): + span = tracer.current_span() + + # do some work + num = random.randint(1, 10) + span.set_tag('num', num) + + if level < 10: + func(tracer, level + 1) + func(tracer, level + 1) + + benchmark(func, tracer) diff --git a/tests/commands/__init__.py b/tests/commands/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/commands/bootstrap/__init__.py b/tests/commands/bootstrap/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/commands/bootstrap/sitecustomize.py b/tests/commands/bootstrap/sitecustomize.py new file mode 100644 index 0000000000..42b2dd3728 --- /dev/null +++ b/tests/commands/bootstrap/sitecustomize.py @@ -0,0 +1 @@ +CORRECT_IMPORT = True diff --git a/tests/commands/ddtrace_minimal.py b/tests/commands/ddtrace_minimal.py new file mode 100644 index 0000000000..2f5caf50b7 --- /dev/null +++ b/tests/commands/ddtrace_minimal.py @@ -0,0 +1,5 @@ +import ddtrace.bootstrap.sitecustomize as module + + +if __name__ == '__main__': + print(module.loaded) diff --git a/tests/commands/ddtrace_run_app_name.py b/tests/commands/ddtrace_run_app_name.py new file mode 100644 index 0000000000..b48266b71b --- /dev/null +++ b/tests/commands/ddtrace_run_app_name.py @@ -0,0 +1,5 @@ +from ddtrace.opentracer import Tracer + +if __name__ == '__main__': + tracer = Tracer() + print(tracer._service_name) diff --git a/tests/commands/ddtrace_run_argv.py b/tests/commands/ddtrace_run_argv.py new file mode 100644 index 0000000000..0473e422b3 --- /dev/null +++ b/tests/commands/ddtrace_run_argv.py @@ -0,0 +1,5 @@ +import sys + +if __name__ == '__main__': + assert sys.argv[1:] == ['foo', 'bar'] + print('Test success') diff --git a/tests/commands/ddtrace_run_debug.py b/tests/commands/ddtrace_run_debug.py new file mode 100644 index 0000000000..543a858567 --- /dev/null +++ b/tests/commands/ddtrace_run_debug.py @@ -0,0 +1,7 @@ +import logging + +from ddtrace import tracer + +if __name__ == '__main__': + assert tracer.log.isEnabledFor(logging.DEBUG) + print('Test success') diff --git a/tests/commands/ddtrace_run_disabled.py b/tests/commands/ddtrace_run_disabled.py new file mode 100644 index 0000000000..95d13a52ee --- /dev/null +++ b/tests/commands/ddtrace_run_disabled.py @@ -0,0 +1,6 @@ +from ddtrace import tracer, monkey + +if __name__ == '__main__': + assert not tracer.enabled + assert len(monkey.get_patched_modules()) == 0 + print('Test success') diff --git a/tests/commands/ddtrace_run_dogstatsd.py b/tests/commands/ddtrace_run_dogstatsd.py new file mode 100644 index 0000000000..2f2f7e48db --- /dev/null +++ b/tests/commands/ddtrace_run_dogstatsd.py @@ -0,0 +1,12 @@ +from __future__ import print_function + +from ddtrace import tracer + +if __name__ == '__main__': + # check both configurations with host:port or unix socket + if tracer._dogstatsd_client.socket_path is None: + assert tracer._dogstatsd_client.host == '172.10.0.1' + assert tracer._dogstatsd_client.port == 8120 + else: + assert tracer._dogstatsd_client.socket_path.endswith('dogstatsd.sock') + print('Test success') diff --git a/tests/commands/ddtrace_run_enabled.py b/tests/commands/ddtrace_run_enabled.py new file mode 100644 index 0000000000..cbe4673a9b --- /dev/null +++ b/tests/commands/ddtrace_run_enabled.py @@ -0,0 +1,5 @@ +from ddtrace import tracer + +if __name__ == '__main__': + assert tracer.enabled + print('Test success') diff --git a/tests/commands/ddtrace_run_env.py b/tests/commands/ddtrace_run_env.py new file mode 100644 index 0000000000..45db8cca15 --- /dev/null +++ b/tests/commands/ddtrace_run_env.py @@ -0,0 +1,5 @@ +from ddtrace import tracer + +if __name__ == '__main__': + assert tracer.tags['env'] == 'test' + print('Test success') diff --git a/tests/commands/ddtrace_run_global_tags.py b/tests/commands/ddtrace_run_global_tags.py new file mode 100644 index 0000000000..5f62e14d24 --- /dev/null +++ b/tests/commands/ddtrace_run_global_tags.py @@ -0,0 +1,7 @@ +from ddtrace import tracer + +if __name__ == '__main__': + assert tracer.tags['a'] == 'True' + assert tracer.tags['b'] == '0' + assert tracer.tags['c'] == 'C' + print('Test success') diff --git a/tests/commands/ddtrace_run_hostname.py b/tests/commands/ddtrace_run_hostname.py new file mode 100644 index 0000000000..c2f084c583 --- /dev/null +++ b/tests/commands/ddtrace_run_hostname.py @@ -0,0 +1,6 @@ +from ddtrace import tracer + +if __name__ == '__main__': + assert tracer.writer.api.hostname == '172.10.0.1' + assert tracer.writer.api.port == 8120 + print('Test success') diff --git a/tests/commands/ddtrace_run_integration.py b/tests/commands/ddtrace_run_integration.py new file mode 100644 index 0000000000..875742789e --- /dev/null +++ b/tests/commands/ddtrace_run_integration.py @@ -0,0 +1,43 @@ +""" +An integration test that uses a real Redis client +that we expect to be implicitly traced via `ddtrace-run` +""" + +import redis + +from ddtrace import Pin +from tests.contrib.config import REDIS_CONFIG +from tests.test_tracer import DummyWriter + +if __name__ == '__main__': + r = redis.Redis(port=REDIS_CONFIG['port']) + pin = Pin.get_from(r) + assert pin + assert pin.app == 'redis' + assert pin.service == 'redis' + + pin.tracer.writer = DummyWriter() + r.flushall() + spans = pin.tracer.writer.pop() + + assert len(spans) == 1 + assert spans[0].service == 'redis' + assert spans[0].resource == 'FLUSHALL' + + long_cmd = 'mget %s' % ' '.join(map(str, range(1000))) + us = r.execute_command(long_cmd) + + spans = pin.tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == 'redis' + assert span.name == 'redis.command' + assert span.span_type == 'redis' + assert span.error == 0 + assert span.get_metric('out.port') == REDIS_CONFIG['port'] + assert span.get_metric('out.redis_db') == 0 + assert span.get_tag('out.host') == 'localhost' + assert span.get_tag('redis.raw_command').startswith(u'mget 0 1 2 3') + assert span.get_tag('redis.raw_command').endswith(u'...') + + print('Test success') diff --git a/tests/commands/ddtrace_run_logs_injection.py b/tests/commands/ddtrace_run_logs_injection.py new file mode 100644 index 0000000000..06d5cb6e31 --- /dev/null +++ b/tests/commands/ddtrace_run_logs_injection.py @@ -0,0 +1,11 @@ +import logging + +if __name__ == '__main__': + # Ensure if module is patched then default log formatter is set up for logs + if getattr(logging, '_datadog_patch'): + assert '[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s]' in \ + logging.root.handlers[0].formatter._fmt + else: + assert '[dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s]' not in \ + logging.root.handlers[0].formatter._fmt + print('Test success') diff --git a/tests/commands/ddtrace_run_no_debug.py b/tests/commands/ddtrace_run_no_debug.py new file mode 100644 index 0000000000..3e19f9d11c --- /dev/null +++ b/tests/commands/ddtrace_run_no_debug.py @@ -0,0 +1,7 @@ +import logging + +from ddtrace import tracer + +if __name__ == '__main__': + assert not tracer.log.isEnabledFor(logging.DEBUG) + print('Test success') diff --git a/tests/commands/ddtrace_run_patched_modules.py b/tests/commands/ddtrace_run_patched_modules.py new file mode 100644 index 0000000000..bcddba07d3 --- /dev/null +++ b/tests/commands/ddtrace_run_patched_modules.py @@ -0,0 +1,5 @@ +from ddtrace import monkey + +if __name__ == '__main__': + assert 'redis' in monkey.get_patched_modules() + print('Test success') diff --git a/tests/commands/ddtrace_run_priority_sampling.py b/tests/commands/ddtrace_run_priority_sampling.py new file mode 100644 index 0000000000..d4a32ed774 --- /dev/null +++ b/tests/commands/ddtrace_run_priority_sampling.py @@ -0,0 +1,5 @@ +from ddtrace import tracer + +if __name__ == '__main__': + assert tracer.priority_sampler is not None + print('Test success') diff --git a/tests/commands/ddtrace_run_service.py b/tests/commands/ddtrace_run_service.py new file mode 100644 index 0000000000..7062006d47 --- /dev/null +++ b/tests/commands/ddtrace_run_service.py @@ -0,0 +1,5 @@ +import os + +if __name__ == '__main__': + assert os.environ['DATADOG_SERVICE_NAME'] == 'my_test_service' + print('Test success') diff --git a/tests/commands/ddtrace_run_sitecustomize.py b/tests/commands/ddtrace_run_sitecustomize.py new file mode 100644 index 0000000000..19436f1e23 --- /dev/null +++ b/tests/commands/ddtrace_run_sitecustomize.py @@ -0,0 +1,15 @@ +import sys + + +if __name__ == '__main__': + # detect if `-S` is used + suppress = len(sys.argv) == 2 and sys.argv[1] == '-S' + if suppress: + assert 'sitecustomize' not in sys.modules + else: + assert 'sitecustomize' in sys.modules + + # ensure the right `sitecustomize` will be imported + import sitecustomize + assert sitecustomize.CORRECT_IMPORT + print('Test success') diff --git a/tests/commands/test_runner.py b/tests/commands/test_runner.py new file mode 100644 index 0000000000..8596b5be6d --- /dev/null +++ b/tests/commands/test_runner.py @@ -0,0 +1,299 @@ +import os +import subprocess +import sys + +from ..base import BaseTestCase + + +def inject_sitecustomize(path): + """Creates a new environment, injecting a ``sitecustomize.py`` module in + the current PYTHONPATH. + + :param path: package path containing ``sitecustomize.py`` module, starting + from the ddtrace root folder + :returns: a cloned environment that includes an altered PYTHONPATH with + the given `sitecustomize.py` + """ + from ddtrace import __file__ as root_file + root_folder = os.path.dirname(root_file) + # Copy the current environment and replace the PYTHONPATH. This is + # required otherwise `ddtrace` scripts are not found when `env` kwarg is + # passed + env = os.environ.copy() + sitecustomize = os.path.join(root_folder, '..', path) + + # Add `boostrap` module so that `sitecustomize.py` is at the bottom + # of the PYTHONPATH + python_path = list(sys.path) + [sitecustomize] + env['PYTHONPATH'] = ':'.join(python_path)[1:] + return env + + +class DdtraceRunTest(BaseTestCase): + def test_service_name_passthrough(self): + """ + $DATADOG_SERVICE_NAME gets passed through to the program + """ + with self.override_env(dict(DATADOG_SERVICE_NAME='my_test_service')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_service.py'] + ) + assert out.startswith(b'Test success') + + def test_env_name_passthrough(self): + """ + $DATADOG_ENV gets passed through to the global tracer as an 'env' tag + """ + with self.override_env(dict(DATADOG_ENV='test')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_env.py'] + ) + assert out.startswith(b'Test success') + + def test_env_enabling(self): + """ + DATADOG_TRACE_ENABLED=false allows disabling of the global tracer + """ + with self.override_env(dict(DATADOG_TRACE_ENABLED='false')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_disabled.py'] + ) + assert out.startswith(b'Test success') + + with self.override_env(dict(DATADOG_TRACE_ENABLED='true')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_enabled.py'] + ) + assert out.startswith(b'Test success') + + def test_patched_modules(self): + """ + Using `ddtrace-run` registers some generic patched modules + """ + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_patched_modules.py'] + ) + assert out.startswith(b'Test success') + + def test_integration(self): + out = subprocess.check_output( + ['ddtrace-run', 'python', '-m', 'tests.commands.ddtrace_run_integration'] + ) + assert out.startswith(b'Test success') + + def test_debug_enabling(self): + """ + DATADOG_TRACE_DEBUG=true allows setting debug logging of the global tracer + """ + with self.override_env(dict(DATADOG_TRACE_DEBUG='false')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_no_debug.py'] + ) + assert out.startswith(b'Test success') + + with self.override_env(dict(DATADOG_TRACE_DEBUG='true')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_debug.py'] + ) + assert out.startswith(b'Test success') + + def test_host_port_from_env(self): + """ + DATADOG_TRACE_AGENT_HOSTNAME|PORT point to the tracer + to the correct host/port for submission + """ + with self.override_env(dict(DATADOG_TRACE_AGENT_HOSTNAME='172.10.0.1', + DATADOG_TRACE_AGENT_PORT='8120')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_hostname.py'] + ) + assert out.startswith(b'Test success') + + def test_host_port_from_env_dd(self): + """ + DD_AGENT_HOST|DD_TRACE_AGENT_PORT point to the tracer + to the correct host/port for submission + """ + with self.override_env(dict(DD_AGENT_HOST='172.10.0.1', + DD_TRACE_AGENT_PORT='8120')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_hostname.py'] + ) + assert out.startswith(b'Test success') + + # Do we get the same results without `ddtrace-run`? + out = subprocess.check_output( + ['python', 'tests/commands/ddtrace_run_hostname.py'] + ) + assert out.startswith(b'Test success') + + def test_dogstatsd_client_env_host_and_port(self): + """ + DD_AGENT_HOST and DD_DOGSTATSD_PORT used to configure dogstatsd with udp in tracer + """ + with self.override_env(dict(DD_AGENT_HOST='172.10.0.1', + DD_DOGSTATSD_PORT='8120')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py'] + ) + assert out.startswith(b'Test success') + + def test_dogstatsd_client_env_url_host_and_port(self): + """ + DD_DOGSTATSD_URL=: used to configure dogstatsd with udp in tracer + """ + with self.override_env(dict(DD_DOGSTATSD_URL='172.10.0.1:8120')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py'] + ) + assert out.startswith(b'Test success') + + def test_dogstatsd_client_env_url_udp(self): + """ + DD_DOGSTATSD_URL=udp://: used to configure dogstatsd with udp in tracer + """ + with self.override_env(dict(DD_DOGSTATSD_URL='udp://172.10.0.1:8120')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py'] + ) + assert out.startswith(b'Test success') + + def test_dogstatsd_client_env_url_unix(self): + """ + DD_DOGSTATSD_URL=unix:// used to configure dogstatsd with socket path in tracer + """ + with self.override_env(dict(DD_DOGSTATSD_URL='unix:///dogstatsd.sock')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py'] + ) + assert out.startswith(b'Test success') + + def test_dogstatsd_client_env_url_path(self): + """ + DD_DOGSTATSD_URL= used to configure dogstatsd with socket path in tracer + """ + with self.override_env(dict(DD_DOGSTATSD_URL='/dogstatsd.sock')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_dogstatsd.py'] + ) + assert out.startswith(b'Test success') + + def test_priority_sampling_from_env(self): + """ + DATADOG_PRIORITY_SAMPLING enables Distributed Sampling + """ + with self.override_env(dict(DATADOG_PRIORITY_SAMPLING='True')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_priority_sampling.py'] + ) + assert out.startswith(b'Test success') + + def test_patch_modules_from_env(self): + """ + DATADOG_PATCH_MODULES overrides the defaults for patch_all() + """ + from ddtrace.bootstrap.sitecustomize import EXTRA_PATCHED_MODULES, update_patched_modules + orig = EXTRA_PATCHED_MODULES.copy() + + # empty / malformed strings are no-ops + with self.override_env(dict(DATADOG_PATCH_MODULES='')): + update_patched_modules() + assert orig == EXTRA_PATCHED_MODULES + + with self.override_env(dict(DATADOG_PATCH_MODULES=':')): + update_patched_modules() + assert orig == EXTRA_PATCHED_MODULES + + with self.override_env(dict(DATADOG_PATCH_MODULES=',')): + update_patched_modules() + assert orig == EXTRA_PATCHED_MODULES + + with self.override_env(dict(DATADOG_PATCH_MODULES=',:')): + update_patched_modules() + assert orig == EXTRA_PATCHED_MODULES + + # overrides work in either direction + with self.override_env(dict(DATADOG_PATCH_MODULES='django:false')): + update_patched_modules() + assert EXTRA_PATCHED_MODULES['django'] is False + + with self.override_env(dict(DATADOG_PATCH_MODULES='boto:true')): + update_patched_modules() + assert EXTRA_PATCHED_MODULES['boto'] is True + + with self.override_env(dict(DATADOG_PATCH_MODULES='django:true,boto:false')): + update_patched_modules() + assert EXTRA_PATCHED_MODULES['boto'] is False + assert EXTRA_PATCHED_MODULES['django'] is True + + with self.override_env(dict(DATADOG_PATCH_MODULES='django:false,boto:true')): + update_patched_modules() + assert EXTRA_PATCHED_MODULES['boto'] is True + assert EXTRA_PATCHED_MODULES['django'] is False + + def test_sitecustomize_without_ddtrace_run_command(self): + # [Regression test]: ensure `sitecustomize` path is removed only if it's + # present otherwise it will cause: + # ValueError: list.remove(x): x not in list + # as mentioned here: https://github.com/DataDog/dd-trace-py/pull/516 + env = inject_sitecustomize('') + out = subprocess.check_output( + ['python', 'tests/commands/ddtrace_minimal.py'], + env=env, + ) + # `out` contains the `loaded` status of the module + result = out[:-1] == b'True' + self.assertTrue(result) + + def test_sitecustomize_run(self): + # [Regression test]: ensure users `sitecustomize.py` is properly loaded, + # so that our `bootstrap/sitecustomize.py` doesn't override the one + # defined in users' PYTHONPATH. + env = inject_sitecustomize('tests/commands/bootstrap') + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_sitecustomize.py'], + env=env, + ) + assert out.startswith(b'Test success') + + def test_sitecustomize_run_suppressed(self): + # ensure `sitecustomize.py` is not loaded if `-S` is used + env = inject_sitecustomize('tests/commands/bootstrap') + out = subprocess.check_output( + ['ddtrace-run', 'python', '-S', 'tests/commands/ddtrace_run_sitecustomize.py', '-S'], + env=env, + ) + assert out.startswith(b'Test success') + + def test_argv_passed(self): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_argv.py', 'foo', 'bar'] + ) + assert out.startswith(b'Test success') + + def test_got_app_name(self): + """ + apps run with ddtrace-run have a proper app name + """ + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_app_name.py'] + ) + assert out.startswith(b'ddtrace_run_app_name.py') + + def test_global_trace_tags(self): + """ Ensure global tags are passed in from environment + """ + with self.override_env(dict(DD_TRACE_GLOBAL_TAGS='a:True,b:0,c:C')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_global_tags.py'] + ) + assert out.startswith(b'Test success') + + def test_logs_injection(self): + """ Ensure logs injection works + """ + with self.override_env(dict(DD_LOGS_INJECTION='true')): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/commands/ddtrace_run_logs_injection.py'] + ) + assert out.startswith(b'Test success') diff --git a/tests/contrib/__init__.py b/tests/contrib/__init__.py new file mode 100644 index 0000000000..5874e1c361 --- /dev/null +++ b/tests/contrib/__init__.py @@ -0,0 +1,5 @@ +# Do *NOT* `import ddtrace` in here +# DEV: Some tests rely on import order of modules +# in order to properly function. Importing `ddtrace` +# here would mess with those tests since everyone +# will load this file by default diff --git a/tests/contrib/aiobotocore/__init__.py b/tests/contrib/aiobotocore/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiobotocore/py35/__init__.py b/tests/contrib/aiobotocore/py35/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiobotocore/py35/test.py b/tests/contrib/aiobotocore/py35/test.py new file mode 100644 index 0000000000..fb12611b08 --- /dev/null +++ b/tests/contrib/aiobotocore/py35/test.py @@ -0,0 +1,71 @@ +import aiobotocore + +from ddtrace.contrib.aiobotocore.patch import patch, unpatch + +from ..utils import aiobotocore_client +from ...asyncio.utils import AsyncioTestCase, mark_asyncio +from ....test_tracer import get_dummy_tracer +from ....utils import assert_span_http_status_code + + +class AIOBotocoreTest(AsyncioTestCase): + """Botocore integration testsuite""" + def setUp(self): + super(AIOBotocoreTest, self).setUp() + patch() + self.tracer = get_dummy_tracer() + + def tearDown(self): + super(AIOBotocoreTest, self).tearDown() + unpatch() + self.tracer = None + + @mark_asyncio + async def test_response_context_manager(self): + # the client should call the wrapped __aenter__ and return the + # object proxy + with aiobotocore_client('s3', self.tracer) as s3: + # prepare S3 and flush traces if any + await s3.create_bucket(Bucket='tracing') + await s3.put_object(Bucket='tracing', Key='apm', Body=b'') + self.tracer.writer.pop_traces() + # `async with` under test + response = await s3.get_object(Bucket='tracing', Key='apm') + async with response['Body'] as stream: + await stream.read() + + traces = self.tracer.writer.pop_traces() + + version = aiobotocore.__version__.split('.') + pre_08 = int(version[0]) == 0 and int(version[1]) < 8 + # Version 0.8+ generates only one span for reading an object. + if pre_08: + assert len(traces) == 2 + assert len(traces[0]) == 1 + assert len(traces[1]) == 1 + + span = traces[0][0] + assert span.get_tag('aws.operation') == 'GetObject' + assert_span_http_status_code(span, 200) + assert span.service == 'aws.s3' + assert span.resource == 's3.getobject' + + read_span = traces[1][0] + assert read_span.get_tag('aws.operation') == 'GetObject' + assert_span_http_status_code(read_span, 200) + assert read_span.service == 'aws.s3' + assert read_span.resource == 's3.getobject' + assert read_span.name == 's3.command.read' + # enforce parenting + assert read_span.parent_id == span.span_id + assert read_span.trace_id == span.trace_id + else: + assert len(traces[0]) == 1 + assert len(traces[0]) == 1 + + span = traces[0][0] + assert span.get_tag('aws.operation') == 'GetObject' + assert_span_http_status_code(span, 200) + assert span.service == 'aws.s3' + assert span.resource == 's3.getobject' + assert span.name == 's3.command' diff --git a/tests/contrib/aiobotocore/test.py b/tests/contrib/aiobotocore/test.py new file mode 100644 index 0000000000..700d593c8b --- /dev/null +++ b/tests/contrib/aiobotocore/test.py @@ -0,0 +1,317 @@ +import aiobotocore +from botocore.errorfactory import ClientError + +from ddtrace.contrib.aiobotocore.patch import patch, unpatch +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.compat import stringify + +from .utils import aiobotocore_client +from ..asyncio.utils import AsyncioTestCase, mark_asyncio +from ...test_tracer import get_dummy_tracer +from ...utils import assert_span_http_status_code + + +class AIOBotocoreTest(AsyncioTestCase): + """Botocore integration testsuite""" + def setUp(self): + super(AIOBotocoreTest, self).setUp() + patch() + self.tracer = get_dummy_tracer() + + def tearDown(self): + super(AIOBotocoreTest, self).tearDown() + unpatch() + self.tracer = None + + @mark_asyncio + def test_traced_client(self): + with aiobotocore_client('ec2', self.tracer) as ec2: + yield from ec2.describe_instances() + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + span = traces[0][0] + + self.assertEqual(span.get_tag('aws.agent'), 'aiobotocore') + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.get_tag('aws.operation'), 'DescribeInstances') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_metric('retry_attempts'), 0) + self.assertEqual(span.service, 'aws.ec2') + self.assertEqual(span.resource, 'ec2.describeinstances') + self.assertEqual(span.name, 'ec2.command') + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + @mark_asyncio + def test_traced_client_analytics(self): + with self.override_config( + 'aiobotocore', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + with aiobotocore_client('ec2', self.tracer) as ec2: + yield from ec2.describe_instances() + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + @mark_asyncio + def test_s3_client(self): + with aiobotocore_client('s3', self.tracer) as s3: + yield from s3.list_buckets() + yield from s3.list_buckets() + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 2) + self.assertEqual(len(traces[0]), 1) + span = traces[0][0] + + self.assertEqual(span.get_tag('aws.operation'), 'ListBuckets') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'aws.s3') + self.assertEqual(span.resource, 's3.listbuckets') + self.assertEqual(span.name, 's3.command') + + @mark_asyncio + def test_s3_put(self): + params = dict(Key='foo', Bucket='mybucket', Body=b'bar') + + with aiobotocore_client('s3', self.tracer) as s3: + yield from s3.create_bucket(Bucket='mybucket') + yield from s3.put_object(**params) + + spans = [trace[0] for trace in self.tracer.writer.pop_traces()] + assert spans + self.assertEqual(len(spans), 2) + self.assertEqual(spans[0].get_tag('aws.operation'), 'CreateBucket') + assert_span_http_status_code(spans[0], 200) + self.assertEqual(spans[0].service, 'aws.s3') + self.assertEqual(spans[0].resource, 's3.createbucket') + self.assertEqual(spans[1].get_tag('aws.operation'), 'PutObject') + self.assertEqual(spans[1].resource, 's3.putobject') + self.assertEqual(spans[1].get_tag('params.Key'), stringify(params['Key'])) + self.assertEqual(spans[1].get_tag('params.Bucket'), stringify(params['Bucket'])) + self.assertIsNone(spans[1].get_tag('params.Body')) + + @mark_asyncio + def test_s3_client_error(self): + with aiobotocore_client('s3', self.tracer) as s3: + with self.assertRaises(ClientError): + # FIXME: add proper clean-up to tearDown + yield from s3.list_objects(Bucket='doesnotexist') + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + span = traces[0][0] + + self.assertEqual(span.resource, 's3.listobjects') + self.assertEqual(span.error, 1) + self.assertTrue('NoSuchBucket' in span.get_tag('error.msg')) + + @mark_asyncio + def test_s3_client_read(self): + with aiobotocore_client('s3', self.tracer) as s3: + # prepare S3 and flush traces if any + yield from s3.create_bucket(Bucket='tracing') + yield from s3.put_object(Bucket='tracing', Key='apm', Body=b'') + self.tracer.writer.pop_traces() + # calls under test + response = yield from s3.get_object(Bucket='tracing', Key='apm') + yield from response['Body'].read() + + traces = self.tracer.writer.pop_traces() + version = aiobotocore.__version__.split('.') + pre_08 = int(version[0]) == 0 and int(version[1]) < 8 + if pre_08: + self.assertEqual(len(traces), 2) + self.assertEqual(len(traces[1]), 1) + else: + self.assertEqual(len(traces), 1) + + self.assertEqual(len(traces[0]), 1) + + span = traces[0][0] + self.assertEqual(span.get_tag('aws.operation'), 'GetObject') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'aws.s3') + self.assertEqual(span.resource, 's3.getobject') + + if pre_08: + read_span = traces[1][0] + self.assertEqual(read_span.get_tag('aws.operation'), 'GetObject') + assert_span_http_status_code(read_span, 200) + self.assertEqual(read_span.service, 'aws.s3') + self.assertEqual(read_span.resource, 's3.getobject') + self.assertEqual(read_span.name, 's3.command.read') + # enforce parenting + self.assertEqual(read_span.parent_id, span.span_id) + self.assertEqual(read_span.trace_id, span.trace_id) + + @mark_asyncio + def test_sqs_client(self): + with aiobotocore_client('sqs', self.tracer) as sqs: + yield from sqs.list_queues() + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + + span = traces[0][0] + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.get_tag('aws.operation'), 'ListQueues') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'aws.sqs') + self.assertEqual(span.resource, 'sqs.listqueues') + + @mark_asyncio + def test_kinesis_client(self): + with aiobotocore_client('kinesis', self.tracer) as kinesis: + yield from kinesis.list_streams() + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + + span = traces[0][0] + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.get_tag('aws.operation'), 'ListStreams') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'aws.kinesis') + self.assertEqual(span.resource, 'kinesis.liststreams') + + @mark_asyncio + def test_lambda_client(self): + with aiobotocore_client('lambda', self.tracer) as lambda_client: + # https://github.com/spulec/moto/issues/906 + yield from lambda_client.list_functions(MaxItems=5) + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + + span = traces[0][0] + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.get_tag('aws.operation'), 'ListFunctions') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'aws.lambda') + self.assertEqual(span.resource, 'lambda.listfunctions') + + @mark_asyncio + def test_kms_client(self): + with aiobotocore_client('kms', self.tracer) as kms: + yield from kms.list_keys(Limit=21) + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + + span = traces[0][0] + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.get_tag('aws.operation'), 'ListKeys') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'aws.kms') + self.assertEqual(span.resource, 'kms.listkeys') + # checking for protection on STS against security leak + self.assertEqual(span.get_tag('params'), None) + + @mark_asyncio + def test_unpatch(self): + unpatch() + with aiobotocore_client('kinesis', self.tracer) as kinesis: + yield from kinesis.list_streams() + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 0) + + @mark_asyncio + def test_double_patch(self): + patch() + with aiobotocore_client('sqs', self.tracer) as sqs: + yield from sqs.list_queues() + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + + @mark_asyncio + def test_opentraced_client(self): + from tests.opentracer.utils import init_tracer + + ot_tracer = init_tracer('my_svc', self.tracer) + + with ot_tracer.start_active_span('ot_outer_span'): + with aiobotocore_client('ec2', self.tracer) as ec2: + yield from ec2.describe_instances() + + traces = self.tracer.writer.pop_traces() + print(traces) + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 2) + ot_span = traces[0][0] + dd_span = traces[0][1] + + self.assertEqual(ot_span.resource, 'ot_outer_span') + self.assertEqual(ot_span.service, 'my_svc') + + # confirm the parenting + self.assertEqual(ot_span.parent_id, None) + self.assertEqual(dd_span.parent_id, ot_span.span_id) + + self.assertEqual(dd_span.get_tag('aws.agent'), 'aiobotocore') + self.assertEqual(dd_span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(dd_span.get_tag('aws.operation'), 'DescribeInstances') + assert_span_http_status_code(dd_span, 200) + self.assertEqual(dd_span.get_metric('retry_attempts'), 0) + self.assertEqual(dd_span.service, 'aws.ec2') + self.assertEqual(dd_span.resource, 'ec2.describeinstances') + self.assertEqual(dd_span.name, 'ec2.command') + + @mark_asyncio + def test_opentraced_s3_client(self): + from tests.opentracer.utils import init_tracer + + ot_tracer = init_tracer('my_svc', self.tracer) + + with ot_tracer.start_active_span('ot_outer_span'): + with aiobotocore_client('s3', self.tracer) as s3: + yield from s3.list_buckets() + with ot_tracer.start_active_span('ot_inner_span1'): + yield from s3.list_buckets() + with ot_tracer.start_active_span('ot_inner_span2'): + pass + + traces = self.tracer.writer.pop_traces() + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 5) + ot_outer_span = traces[0][0] + dd_span = traces[0][1] + ot_inner_span = traces[0][2] + dd_span2 = traces[0][3] + ot_inner_span2 = traces[0][4] + + self.assertEqual(ot_outer_span.resource, 'ot_outer_span') + self.assertEqual(ot_inner_span.resource, 'ot_inner_span1') + self.assertEqual(ot_inner_span2.resource, 'ot_inner_span2') + + # confirm the parenting + self.assertEqual(ot_outer_span.parent_id, None) + self.assertEqual(dd_span.parent_id, ot_outer_span.span_id) + self.assertEqual(ot_inner_span.parent_id, ot_outer_span.span_id) + self.assertEqual(dd_span2.parent_id, ot_inner_span.span_id) + self.assertEqual(ot_inner_span2.parent_id, ot_outer_span.span_id) + + self.assertEqual(dd_span.get_tag('aws.operation'), 'ListBuckets') + assert_span_http_status_code(dd_span, 200) + self.assertEqual(dd_span.service, 'aws.s3') + self.assertEqual(dd_span.resource, 's3.listbuckets') + self.assertEqual(dd_span.name, 's3.command') + + self.assertEqual(dd_span2.get_tag('aws.operation'), 'ListBuckets') + assert_span_http_status_code(dd_span2, 200) + self.assertEqual(dd_span2.service, 'aws.s3') + self.assertEqual(dd_span2.resource, 's3.listbuckets') + self.assertEqual(dd_span2.name, 's3.command') diff --git a/tests/contrib/aiobotocore/utils.py b/tests/contrib/aiobotocore/utils.py new file mode 100644 index 0000000000..a57d5545c9 --- /dev/null +++ b/tests/contrib/aiobotocore/utils.py @@ -0,0 +1,36 @@ +import aiobotocore.session + +from ddtrace import Pin +from contextlib import contextmanager + + +LOCALSTACK_ENDPOINT_URL = { + 's3': 'http://localhost:5000', + 'ec2': 'http://localhost:5001', + 'kms': 'http://localhost:5002', + 'sqs': 'http://localhost:5003', + 'lambda': 'http://localhost:5004', + 'kinesis': 'http://localhost:5005', +} + + +@contextmanager +def aiobotocore_client(service, tracer): + """Helper function that creates a new aiobotocore client so that + it is closed at the end of the context manager. + """ + session = aiobotocore.session.get_session() + endpoint = LOCALSTACK_ENDPOINT_URL[service] + client = session.create_client( + service, + region_name='us-west-2', + endpoint_url=endpoint, + aws_access_key_id='aws', + aws_secret_access_key='aws', + aws_session_token='aws', + ) + Pin.override(client, tracer=tracer) + try: + yield client + finally: + client.close() diff --git a/tests/contrib/aiohttp/__init__.py b/tests/contrib/aiohttp/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiohttp/app/__init__.py b/tests/contrib/aiohttp/app/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiohttp/app/statics/empty.txt b/tests/contrib/aiohttp/app/statics/empty.txt new file mode 100644 index 0000000000..3083bfa69c --- /dev/null +++ b/tests/contrib/aiohttp/app/statics/empty.txt @@ -0,0 +1 @@ +Static file diff --git a/tests/contrib/aiohttp/app/templates/__init__.py b/tests/contrib/aiohttp/app/templates/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiohttp/app/templates/template.jinja2 b/tests/contrib/aiohttp/app/templates/template.jinja2 new file mode 100644 index 0000000000..8d15644bd4 --- /dev/null +++ b/tests/contrib/aiohttp/app/templates/template.jinja2 @@ -0,0 +1 @@ +{{text}} diff --git a/tests/contrib/aiohttp/app/web.py b/tests/contrib/aiohttp/app/web.py new file mode 100644 index 0000000000..1436aeba82 --- /dev/null +++ b/tests/contrib/aiohttp/app/web.py @@ -0,0 +1,168 @@ +import os +import jinja2 +import asyncio +import aiohttp_jinja2 + +from aiohttp import web + + +BASE_DIR = os.path.dirname(os.path.realpath(__file__)) +STATIC_DIR = os.path.join(BASE_DIR, 'statics') +TEMPLATE_DIR = os.path.join(BASE_DIR, 'templates') + + +@asyncio.coroutine +def home(request): + return web.Response(text="What's tracing?") + + +@asyncio.coroutine +def name(request): + name = request.match_info.get('name', 'Anonymous') + return web.Response(text='Hello {}'.format(name)) + + +@asyncio.coroutine +def coroutine_chaining(request): + tracer = get_tracer(request) + span = tracer.trace('aiohttp.coro_1') + text = yield from coro_2(request) + span.finish() + return web.Response(text=text) + + +def route_exception(request): + raise Exception('error') + + +@asyncio.coroutine +def route_async_exception(request): + raise Exception('error') + + +@asyncio.coroutine +def route_wrapped_coroutine(request): + tracer = get_tracer(request) + + @tracer.wrap('nested') + @asyncio.coroutine + def nested(): + yield from asyncio.sleep(0.25) + + yield from nested() + return web.Response(text='OK') + + +@asyncio.coroutine +def route_sub_span(request): + tracer = get_tracer(request) + with tracer.trace('aiohttp.sub_span') as span: + span.set_tag('sub_span', 'true') + return web.Response(text='OK') + + +@asyncio.coroutine +def uncaught_server_error(request): + return 1 / 0 + + +@asyncio.coroutine +def caught_server_error(request): + return web.Response(text='NOT OK', status=503) + + +@asyncio.coroutine +def coro_2(request): + tracer = get_tracer(request) + with tracer.trace('aiohttp.coro_2') as span: + span.set_tag('aiohttp.worker', 'pending') + return 'OK' + + +@asyncio.coroutine +def template_handler(request): + return aiohttp_jinja2.render_template('template.jinja2', request, {'text': 'OK'}) + + +@aiohttp_jinja2.template('template.jinja2') +@asyncio.coroutine +def template_decorator(request): + return {'text': 'OK'} + + +@aiohttp_jinja2.template('error.jinja2') +@asyncio.coroutine +def template_error(request): + return {} + + +@asyncio.coroutine +def delayed_handler(request): + yield from asyncio.sleep(0.01) + return web.Response(text='Done') + + +@asyncio.coroutine +def noop_middleware(app, handler): + @asyncio.coroutine + def middleware_handler(request): + # noop middleware + response = yield from handler(request) + return response + return middleware_handler + + +def setup_app(loop): + """ + Use this method to create the app. It must receive + the ``loop`` provided by the ``get_app`` method of + ``AioHTTPTestCase`` class. + """ + # configure the app + app = web.Application( + loop=loop, + middlewares=[ + noop_middleware, + ], + ) + app.router.add_get('/', home) + app.router.add_get('/delayed/', delayed_handler) + app.router.add_get('/echo/{name}', name) + app.router.add_get('/chaining/', coroutine_chaining) + app.router.add_get('/exception', route_exception) + app.router.add_get('/async_exception', route_async_exception) + app.router.add_get('/wrapped_coroutine', route_wrapped_coroutine) + app.router.add_get('/sub_span', route_sub_span) + app.router.add_get('/uncaught_server_error', uncaught_server_error) + app.router.add_get('/caught_server_error', caught_server_error) + app.router.add_static('/statics', STATIC_DIR) + # configure templates + set_memory_loader(app) + app.router.add_get('/template/', template_handler) + app.router.add_get('/template_decorator/', template_decorator) + app.router.add_get('/template_error/', template_error) + + return app + + +def set_memory_loader(app): + aiohttp_jinja2.setup(app, loader=jinja2.DictLoader({ + 'template.jinja2': '{{text}}', + 'error.jinja2': '{{1/0}}', + })) + + +def set_filesystem_loader(app): + aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader(TEMPLATE_DIR)) + + +def set_package_loader(app): + aiohttp_jinja2.setup(app, loader=jinja2.PackageLoader('tests.contrib.aiohttp.app', 'templates')) + + +def get_tracer(request): + """ + Utility function to retrieve the tracer from the given ``request``. + It is meant to be used only for testing purposes. + """ + return request['__datadog_request_span'].tracer diff --git a/tests/contrib/aiohttp/test_middleware.py b/tests/contrib/aiohttp/test_middleware.py new file mode 100644 index 0000000000..77eefc68ca --- /dev/null +++ b/tests/contrib/aiohttp/test_middleware.py @@ -0,0 +1,490 @@ +import asyncio + +from aiohttp.test_utils import unittest_run_loop + +from ddtrace.contrib.aiohttp.middlewares import trace_app, trace_middleware, CONFIG_KEY +from ddtrace.ext import http +from ddtrace.sampler import RateSampler +from ddtrace.constants import SAMPLING_PRIORITY_KEY, ANALYTICS_SAMPLE_RATE_KEY + +from opentracing.scope_managers.asyncio import AsyncioScopeManager +from tests.opentracer.utils import init_tracer +from .utils import TraceTestCase +from .app.web import setup_app, noop_middleware +from ...utils import assert_span_http_status_code + + +class TestTraceMiddleware(TraceTestCase): + """ + Ensures that the trace Middleware creates root spans at + the beginning of a request. + """ + def enable_tracing(self): + trace_app(self.app, self.tracer) + + @unittest_run_loop + @asyncio.coroutine + def test_handler(self): + # it should create a root span when there is a handler hit + # with the proper tags + request = yield from self.client.request('GET', '/') + assert 200 == request.status + text = yield from request.text() + assert 'What\'s tracing?' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert 'aiohttp.request' == span.name + assert 'aiohttp-web' == span.service + assert 'web' == span.span_type + assert 'GET /' == span.resource + assert str(self.client.make_url('/')) == span.get_tag(http.URL) + assert 'GET' == span.get_tag('http.method') + assert_span_http_status_code(span, 200) + assert 0 == span.error + + @asyncio.coroutine + def _test_param_handler(self, query_string=''): + if query_string: + fqs = '?' + query_string + else: + fqs = '' + # it should manage properly handlers with params + request = yield from self.client.request('GET', '/echo/team' + fqs) + assert 200 == request.status + text = yield from request.text() + assert 'Hello team' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert 'GET /echo/{name}' == span.resource + assert str(self.client.make_url('/echo/team')) == span.get_tag(http.URL) + assert_span_http_status_code(span, 200) + if self.app[CONFIG_KEY].get('trace_query_string'): + assert query_string == span.get_tag(http.QUERY_STRING) + else: + assert http.QUERY_STRING not in span.meta + + @unittest_run_loop + def test_param_handler(self): + return self._test_param_handler() + + @unittest_run_loop + def test_query_string(self): + return self._test_param_handler('foo=bar') + + @unittest_run_loop + def test_query_string_duplicate_keys(self): + return self._test_param_handler('foo=bar&foo=baz&x=y') + + @unittest_run_loop + def test_param_handler_trace(self): + self.app[CONFIG_KEY]['trace_query_string'] = True + return self._test_param_handler() + + @unittest_run_loop + def test_query_string_trace(self): + self.app[CONFIG_KEY]['trace_query_string'] = True + return self._test_param_handler('foo=bar') + + @unittest_run_loop + def test_query_string_duplicate_keys_trace(self): + self.app[CONFIG_KEY]['trace_query_string'] = True + return self._test_param_handler('foo=bar&foo=baz&x=y') + + @unittest_run_loop + @asyncio.coroutine + def test_404_handler(self): + # it should not pollute the resource space + request = yield from self.client.request('GET', '/404/not_found') + assert 404 == request.status + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert '404' == span.resource + assert str(self.client.make_url('/404/not_found')) == span.get_tag(http.URL) + assert 'GET' == span.get_tag('http.method') + assert_span_http_status_code(span, 404) + + @unittest_run_loop + @asyncio.coroutine + def test_server_error(self): + """ + When a server error occurs (uncaught exception) + The span should be flagged as an error + """ + request = yield from self.client.request('GET', '/uncaught_server_error') + assert request.status == 500 + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.get_tag('http.method') == 'GET' + assert_span_http_status_code(span, 500) + assert span.error == 1 + + @unittest_run_loop + @asyncio.coroutine + def test_500_response_code(self): + """ + When a 5XX response code is returned + The span should be flagged as an error + """ + request = yield from self.client.request('GET', '/caught_server_error') + assert request.status == 503 + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.get_tag('http.method') == 'GET' + assert_span_http_status_code(span, 503) + assert span.error == 1 + + @unittest_run_loop + @asyncio.coroutine + def test_coroutine_chaining(self): + # it should create a trace with multiple spans + request = yield from self.client.request('GET', '/chaining/') + assert 200 == request.status + text = yield from request.text() + assert 'OK' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 3 == len(traces[0]) + root = traces[0][0] + handler = traces[0][1] + coroutine = traces[0][2] + # root span created in the middleware + assert 'aiohttp.request' == root.name + assert 'GET /chaining/' == root.resource + assert str(self.client.make_url('/chaining/')) == root.get_tag(http.URL) + assert 'GET' == root.get_tag('http.method') + assert_span_http_status_code(root, 200) + # span created in the coroutine_chaining handler + assert 'aiohttp.coro_1' == handler.name + assert root.span_id == handler.parent_id + assert root.trace_id == handler.trace_id + # span created in the coro_2 handler + assert 'aiohttp.coro_2' == coroutine.name + assert handler.span_id == coroutine.parent_id + assert root.trace_id == coroutine.trace_id + + @unittest_run_loop + @asyncio.coroutine + def test_static_handler(self): + # it should create a trace with multiple spans + request = yield from self.client.request('GET', '/statics/empty.txt') + assert 200 == request.status + text = yield from request.text() + assert 'Static file\n' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # root span created in the middleware + assert 'aiohttp.request' == span.name + assert 'GET /statics' == span.resource + assert str(self.client.make_url('/statics/empty.txt')) == span.get_tag(http.URL) + assert 'GET' == span.get_tag('http.method') + assert_span_http_status_code(span, 200) + + @unittest_run_loop + @asyncio.coroutine + def test_middleware_applied_twice(self): + # it should be idempotent + app = setup_app(self.app.loop) + # the middleware is not present + assert 1 == len(app.middlewares) + assert noop_middleware == app.middlewares[0] + # the middleware is present (with the noop middleware) + trace_app(app, self.tracer) + assert 2 == len(app.middlewares) + # applying the middleware twice doesn't add it again + trace_app(app, self.tracer) + assert 2 == len(app.middlewares) + # and the middleware is always the first + assert trace_middleware == app.middlewares[0] + assert noop_middleware == app.middlewares[1] + + @unittest_run_loop + @asyncio.coroutine + def test_exception(self): + request = yield from self.client.request('GET', '/exception') + assert 500 == request.status + yield from request.text() + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + spans = traces[0] + assert 1 == len(spans) + span = spans[0] + assert 1 == span.error + assert 'GET /exception' == span.resource + assert 'error' == span.get_tag('error.msg') + assert 'Exception: error' in span.get_tag('error.stack') + + @unittest_run_loop + @asyncio.coroutine + def test_async_exception(self): + request = yield from self.client.request('GET', '/async_exception') + assert 500 == request.status + yield from request.text() + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + spans = traces[0] + assert 1 == len(spans) + span = spans[0] + assert 1 == span.error + assert 'GET /async_exception' == span.resource + assert 'error' == span.get_tag('error.msg') + assert 'Exception: error' in span.get_tag('error.stack') + + @unittest_run_loop + @asyncio.coroutine + def test_wrapped_coroutine(self): + request = yield from self.client.request('GET', '/wrapped_coroutine') + assert 200 == request.status + text = yield from request.text() + assert 'OK' == text + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + spans = traces[0] + assert 2 == len(spans) + span = spans[0] + assert 'GET /wrapped_coroutine' == span.resource + span = spans[1] + assert 'nested' == span.name + assert span.duration > 0.25, 'span.duration={0}'.format(span.duration) + + @unittest_run_loop + @asyncio.coroutine + def test_distributed_tracing(self): + # distributed tracing is enabled by default + tracing_headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + } + + request = yield from self.client.request('GET', '/', headers=tracing_headers) + assert 200 == request.status + text = yield from request.text() + assert "What's tracing?" == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right trace_id and parent_id + assert span.trace_id == 100 + assert span.parent_id == 42 + assert span.get_metric(SAMPLING_PRIORITY_KEY) is None + + @unittest_run_loop + @asyncio.coroutine + def test_distributed_tracing_with_sampling_true(self): + self.tracer.priority_sampler = RateSampler(0.1) + + tracing_headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '1', + } + + request = yield from self.client.request('GET', '/', headers=tracing_headers) + assert 200 == request.status + text = yield from request.text() + assert "What's tracing?" == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right trace_id and parent_id + assert 100 == span.trace_id + assert 42 == span.parent_id + assert 1 == span.get_metric(SAMPLING_PRIORITY_KEY) + + @unittest_run_loop + @asyncio.coroutine + def test_distributed_tracing_with_sampling_false(self): + self.tracer.priority_sampler = RateSampler(0.9) + + tracing_headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '0', + } + + request = yield from self.client.request('GET', '/', headers=tracing_headers) + assert 200 == request.status + text = yield from request.text() + assert "What's tracing?" == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right trace_id and parent_id + assert 100 == span.trace_id + assert 42 == span.parent_id + assert 0 == span.get_metric(SAMPLING_PRIORITY_KEY) + + @unittest_run_loop + @asyncio.coroutine + def test_distributed_tracing_disabled(self): + # pass headers for distributed tracing + self.app['datadog_trace']['distributed_tracing_enabled'] = False + tracing_headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + } + + request = yield from self.client.request('GET', '/', headers=tracing_headers) + assert 200 == request.status + text = yield from request.text() + assert "What's tracing?" == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # distributed tracing must be ignored by default + assert span.trace_id != 100 + assert span.parent_id != 42 + + @unittest_run_loop + @asyncio.coroutine + def test_distributed_tracing_sub_span(self): + self.tracer.priority_sampler = RateSampler(1.0) + + # activate distributed tracing + tracing_headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '0', + } + + request = yield from self.client.request('GET', '/sub_span', headers=tracing_headers) + assert 200 == request.status + text = yield from request.text() + assert 'OK' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + span, sub_span = traces[0][0], traces[0][1] + # with the right trace_id and parent_id + assert 100 == span.trace_id + assert 42 == span.parent_id + assert 0 == span.get_metric(SAMPLING_PRIORITY_KEY) + # check parenting is OK with custom sub-span created within server code + assert 100 == sub_span.trace_id + assert span.span_id == sub_span.parent_id + assert sub_span.get_metric(SAMPLING_PRIORITY_KEY) is None + + def _assert_200_parenting(self, traces): + """Helper to assert parenting when handling aiohttp requests. + + This is used to ensure that parenting is consistent between Datadog + and OpenTracing implementations of tracing. + """ + assert 2 == len(traces) + assert 1 == len(traces[0]) + + # the inner span will be the first trace since it completes before the + # outer span does + inner_span = traces[0][0] + outer_span = traces[1][0] + + # confirm the parenting + assert outer_span.parent_id is None + assert inner_span.parent_id is None + + assert outer_span.name == 'aiohttp_op' + + # with the right fields + assert 'aiohttp.request' == inner_span.name + assert 'aiohttp-web' == inner_span.service + assert 'web' == inner_span.span_type + assert 'GET /' == inner_span.resource + assert str(self.client.make_url('/')) == inner_span.get_tag(http.URL) + assert 'GET' == inner_span.get_tag('http.method') + assert_span_http_status_code(inner_span, 200) + assert 0 == inner_span.error + + @unittest_run_loop + @asyncio.coroutine + def test_parenting_200_dd(self): + with self.tracer.trace('aiohttp_op'): + request = yield from self.client.request('GET', '/') + assert 200 == request.status + text = yield from request.text() + + assert "What's tracing?" == text + traces = self.tracer.writer.pop_traces() + self._assert_200_parenting(traces) + + @unittest_run_loop + @asyncio.coroutine + def test_parenting_200_ot(self): + """OpenTracing version of test_handler.""" + ot_tracer = init_tracer('aiohttp_svc', self.tracer, scope_manager=AsyncioScopeManager()) + + with ot_tracer.start_active_span('aiohttp_op'): + request = yield from self.client.request('GET', '/') + assert 200 == request.status + text = yield from request.text() + + assert "What's tracing?" == text + traces = self.tracer.writer.pop_traces() + self._assert_200_parenting(traces) + + @unittest_run_loop + @asyncio.coroutine + def test_analytics_integration_enabled(self): + """ Check trace has analytics sample rate set """ + self.app['datadog_trace']['analytics_enabled'] = True + self.app['datadog_trace']['analytics_sample_rate'] = 0.5 + request = yield from self.client.request('GET', '/template/') + yield from request.text() + + # Assert root span sets the appropriate metric + self.assert_structure( + dict(name='aiohttp.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}) + ) + + @unittest_run_loop + @asyncio.coroutine + def test_analytics_integration_default(self): + """ Check trace has analytics sample rate set """ + request = yield from self.client.request('GET', '/template/') + yield from request.text() + + # Assert root span does not have the appropriate metric + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + @unittest_run_loop + @asyncio.coroutine + def test_analytics_integration_disabled(self): + """ Check trace has analytics sample rate set """ + self.app['datadog_trace']['analytics_enabled'] = False + request = yield from self.client.request('GET', '/template/') + yield from request.text() + + # Assert root span does not have the appropriate metric + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) diff --git a/tests/contrib/aiohttp/test_request.py b/tests/contrib/aiohttp/test_request.py new file mode 100644 index 0000000000..b13e848f40 --- /dev/null +++ b/tests/contrib/aiohttp/test_request.py @@ -0,0 +1,77 @@ +import threading +import asyncio +import aiohttp_jinja2 + +from urllib import request +from aiohttp.test_utils import unittest_run_loop + +from ddtrace.pin import Pin +from ddtrace.contrib.aiohttp.patch import patch, unpatch +from ddtrace.contrib.aiohttp.middlewares import trace_app + +from .utils import TraceTestCase + + +class TestRequestTracing(TraceTestCase): + """ + Ensures that the trace includes all traced components. + """ + def enable_tracing(self): + # enabled tracing: + # * middleware + # * templates + trace_app(self.app, self.tracer) + patch() + Pin.override(aiohttp_jinja2, tracer=self.tracer) + + def disable_tracing(self): + unpatch() + + @unittest_run_loop + @asyncio.coroutine + def test_full_request(self): + # it should create a root span when there is a handler hit + # with the proper tags + request = yield from self.client.request('GET', '/template/') + assert 200 == request.status + yield from request.text() + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + request_span = traces[0][0] + template_span = traces[0][1] + # request + assert 'aiohttp-web' == request_span.service + assert 'aiohttp.request' == request_span.name + assert 'GET /template/' == request_span.resource + # template + assert 'aiohttp-web' == template_span.service + assert 'aiohttp.template' == template_span.name + assert 'aiohttp.template' == template_span.resource + + @unittest_run_loop + @asyncio.coroutine + def test_multiple_full_request(self): + # it should handle multiple requests using the same loop + def make_requests(): + url = self.client.make_url('/delayed/') + response = request.urlopen(str(url)).read().decode('utf-8') + assert 'Done' == response + + # blocking call executed in different threads + threads = [threading.Thread(target=make_requests) for _ in range(10)] + for t in threads: + t.daemon = True + t.start() + + # we should yield so that this loop can handle + # threads' requests + yield from asyncio.sleep(0.5) + for t in threads: + t.join(timeout=0.5) + + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 10 == len(traces) + assert 1 == len(traces[0]) diff --git a/tests/contrib/aiohttp/test_request_safety.py b/tests/contrib/aiohttp/test_request_safety.py new file mode 100644 index 0000000000..6f4c93947f --- /dev/null +++ b/tests/contrib/aiohttp/test_request_safety.py @@ -0,0 +1,87 @@ +import threading +import asyncio +import aiohttp_jinja2 + +from urllib import request +from aiohttp.test_utils import unittest_run_loop + +from ddtrace.pin import Pin +from ddtrace.provider import DefaultContextProvider +from ddtrace.contrib.aiohttp.patch import patch, unpatch +from ddtrace.contrib.aiohttp.middlewares import trace_app + +from .utils import TraceTestCase + + +class TestAiohttpSafety(TraceTestCase): + """ + Ensure that if the ``AsyncioTracer`` is not properly configured, + bad traces are produced but the ``Context`` object will not + leak memory. + """ + def enable_tracing(self): + # aiohttp TestCase with the wrong context provider + trace_app(self.app, self.tracer) + patch() + Pin.override(aiohttp_jinja2, tracer=self.tracer) + self.tracer.configure(context_provider=DefaultContextProvider()) + + def disable_tracing(self): + unpatch() + + @unittest_run_loop + @asyncio.coroutine + def test_full_request(self): + # it should create a root span when there is a handler hit + # with the proper tags + request = yield from self.client.request('GET', '/template/') + assert 200 == request.status + yield from request.text() + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + request_span = traces[0][0] + template_span = traces[0][1] + # request + assert 'aiohttp-web' == request_span.service + assert 'aiohttp.request' == request_span.name + assert 'GET /template/' == request_span.resource + # template + assert 'aiohttp-web' == template_span.service + assert 'aiohttp.template' == template_span.name + assert 'aiohttp.template' == template_span.resource + + @unittest_run_loop + @asyncio.coroutine + def test_multiple_full_request(self): + NUMBER_REQUESTS = 10 + responses = [] + + # it should produce a wrong trace, but the Context must + # be finished + def make_requests(): + url = self.client.make_url('/delayed/') + response = request.urlopen(str(url)).read().decode('utf-8') + responses.append(response) + + # blocking call executed in different threads + ctx = self.tracer.get_call_context() + threads = [threading.Thread(target=make_requests) for _ in range(NUMBER_REQUESTS)] + for t in threads: + t.start() + + # yield back to the event loop until all requests are processed + while len(responses) < NUMBER_REQUESTS: + yield from asyncio.sleep(0.001) + + for response in responses: + assert 'Done' == response + + for t in threads: + t.join() + + # the trace is wrong but the Context is finished + spans = self.tracer.writer.pop() + assert NUMBER_REQUESTS == len(spans) + assert 0 == len(ctx._trace) diff --git a/tests/contrib/aiohttp/test_templates.py b/tests/contrib/aiohttp/test_templates.py new file mode 100644 index 0000000000..bf2c1f0a46 --- /dev/null +++ b/tests/contrib/aiohttp/test_templates.py @@ -0,0 +1,120 @@ +import asyncio +import aiohttp_jinja2 + +from aiohttp.test_utils import unittest_run_loop + +from ddtrace.pin import Pin +from ddtrace.contrib.aiohttp.patch import patch, unpatch + +from .utils import TraceTestCase +from .app.web import set_filesystem_loader, set_package_loader + + +class TestTraceTemplate(TraceTestCase): + """ + Ensures that the aiohttp_jinja2 library is properly traced. + """ + def enable_tracing(self): + patch() + Pin.override(aiohttp_jinja2, tracer=self.tracer) + + def disable_tracing(self): + unpatch() + + @unittest_run_loop + @asyncio.coroutine + def test_template_rendering(self): + # it should trace a template rendering + request = yield from self.client.request('GET', '/template/') + assert 200 == request.status + text = yield from request.text() + assert 'OK' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert 'aiohttp.template' == span.name + assert 'template' == span.span_type + assert '/template.jinja2' == span.get_tag('aiohttp.template') + assert 0 == span.error + + @unittest_run_loop + @asyncio.coroutine + def test_template_rendering_filesystem(self): + # it should trace a template rendering with a FileSystemLoader + set_filesystem_loader(self.app) + request = yield from self.client.request('GET', '/template/') + assert 200 == request.status + text = yield from request.text() + assert 'OK' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert 'aiohttp.template' == span.name + assert 'template' == span.span_type + assert '/template.jinja2' == span.get_tag('aiohttp.template') + assert 0 == span.error + + @unittest_run_loop + @asyncio.coroutine + def test_template_rendering_package(self): + # it should trace a template rendering with a PackageLoader + set_package_loader(self.app) + request = yield from self.client.request('GET', '/template/') + assert 200 == request.status + text = yield from request.text() + assert 'OK' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert 'aiohttp.template' == span.name + assert 'template' == span.span_type + assert 'templates/template.jinja2' == span.get_tag('aiohttp.template') + assert 0 == span.error + + @unittest_run_loop + @asyncio.coroutine + def test_template_decorator(self): + # it should trace a template rendering + request = yield from self.client.request('GET', '/template_decorator/') + assert 200 == request.status + text = yield from request.text() + assert 'OK' == text + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert 'aiohttp.template' == span.name + assert 'template' == span.span_type + assert '/template.jinja2' == span.get_tag('aiohttp.template') + assert 0 == span.error + + @unittest_run_loop + @asyncio.coroutine + def test_template_error(self): + # it should trace a template rendering + request = yield from self.client.request('GET', '/template_error/') + assert 500 == request.status + yield from request.text() + # the trace is created + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + # with the right fields + assert 'aiohttp.template' == span.name + assert 'template' == span.span_type + assert '/error.jinja2' == span.get_tag('aiohttp.template') + assert 1 == span.error + assert 'division by zero' == span.get_tag('error.msg') + assert 'ZeroDivisionError: division by zero' in span.get_tag('error.stack') diff --git a/tests/contrib/aiohttp/utils.py b/tests/contrib/aiohttp/utils.py new file mode 100644 index 0000000000..bb5ce46c92 --- /dev/null +++ b/tests/contrib/aiohttp/utils.py @@ -0,0 +1,37 @@ +import asyncio + +from aiohttp.test_utils import AioHTTPTestCase + +from .app.web import setup_app +from ...base import BaseTracerTestCase + + +class TraceTestCase(BaseTracerTestCase, AioHTTPTestCase): + """ + Base class that provides a valid ``aiohttp`` application with + the async tracer. + """ + def enable_tracing(self): + pass + + def disable_tracing(self): + pass + + def tearDown(self): + # unpatch the aiohttp_jinja2 module + super(TraceTestCase, self).tearDown() + self.disable_tracing() + + def get_app(self, loop=None): + """ + Override the get_app method to return the test application + """ + # aiohttp 2.0+ stores the loop instance in self.loop; for + # backward compatibility, we should expect a `loop` argument + loop = loop or self.loop + # create the app with the testing loop + self.app = setup_app(loop) + asyncio.set_event_loop(loop) + # trace the app + self.enable_tracing() + return self.app diff --git a/tests/contrib/aiopg/__init__.py b/tests/contrib/aiopg/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiopg/py35/__init__.py b/tests/contrib/aiopg/py35/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiopg/py35/test.py b/tests/contrib/aiopg/py35/test.py new file mode 100644 index 0000000000..8c32de754d --- /dev/null +++ b/tests/contrib/aiopg/py35/test.py @@ -0,0 +1,63 @@ +# stdlib +import asyncio + +# 3p +import aiopg + +# project +from ddtrace.contrib.aiopg.patch import patch, unpatch +from ddtrace import Pin + +# testing +from tests.contrib.config import POSTGRES_CONFIG +from tests.contrib.asyncio.utils import AsyncioTestCase, mark_asyncio + + +TEST_PORT = str(POSTGRES_CONFIG['port']) + + +class TestPsycopgPatch(AsyncioTestCase): + # default service + TEST_SERVICE = 'postgres' + + def setUp(self): + super().setUp() + self._conn = None + patch() + + def tearDown(self): + super().tearDown() + if self._conn and not self._conn.closed: + self._conn.close() + + unpatch() + + @asyncio.coroutine + def _get_conn_and_tracer(self): + conn = self._conn = yield from aiopg.connect(**POSTGRES_CONFIG) + Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + + return conn, self.tracer + + async def _test_cursor_ctx_manager(self): + conn, tracer = await self._get_conn_and_tracer() + cur = await conn.cursor() + t = type(cur) + + async with conn.cursor() as cur: + assert t == type(cur), '%s != %s' % (t, type(cur)) + await cur.execute(query='select \'blah\'') + rows = await cur.fetchall() + assert len(rows) == 1 + assert rows[0][0] == 'blah' + + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.name == 'postgres.query' + + @mark_asyncio + def test_cursor_ctx_manager(self): + # ensure cursors work with context managers + # https://github.com/DataDog/dd-trace-py/issues/228 + yield from self._test_cursor_ctx_manager() diff --git a/tests/contrib/aiopg/py37/__init__.py b/tests/contrib/aiopg/py37/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/aiopg/py37/test.py b/tests/contrib/aiopg/py37/test.py new file mode 100644 index 0000000000..493786b9dd --- /dev/null +++ b/tests/contrib/aiopg/py37/test.py @@ -0,0 +1,52 @@ +# 3p +import aiopg + +# project +from ddtrace.contrib.aiopg.patch import patch, unpatch +from ddtrace import Pin + +# testing +from tests.contrib.config import POSTGRES_CONFIG +from tests.contrib.asyncio.utils import AsyncioTestCase, mark_asyncio + + +TEST_PORT = str(POSTGRES_CONFIG['port']) + + +class AiopgTestCase(AsyncioTestCase): + # default service + TEST_SERVICE = 'postgres' + + def setUp(self): + super().setUp() + self._conn = None + patch() + + def tearDown(self): + super().tearDown() + if self._conn and not self._conn.closed: + self._conn.close() + + unpatch() + + async def _get_conn_and_tracer(self): + conn = self._conn = await aiopg.connect(**POSTGRES_CONFIG) + Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + + return conn, self.tracer + + @mark_asyncio + async def test_async_generator(self): + conn, tracer = await self._get_conn_and_tracer() + cursor = await conn.cursor() + q = 'select \'foobarblah\'' + await cursor.execute(q) + rows = [] + async for row in cursor: + rows.append(row) + + assert rows == [('foobarblah',)] + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.name == 'postgres.query' diff --git a/tests/contrib/aiopg/test.py b/tests/contrib/aiopg/test.py new file mode 100644 index 0000000000..faf7f17a12 --- /dev/null +++ b/tests/contrib/aiopg/test.py @@ -0,0 +1,238 @@ +# stdlib +import time +import asyncio + +# 3p +import aiopg +from psycopg2 import extras + +# project +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.aiopg.patch import patch, unpatch +from ddtrace import Pin + +# testing +from tests.opentracer.utils import init_tracer +from tests.contrib.config import POSTGRES_CONFIG +from tests.test_tracer import get_dummy_tracer +from tests.contrib.asyncio.utils import AsyncioTestCase, mark_asyncio + + +TEST_PORT = POSTGRES_CONFIG['port'] + + +class AiopgTestCase(AsyncioTestCase): + # default service + TEST_SERVICE = 'postgres' + + def setUp(self): + super().setUp() + self._conn = None + patch() + + def tearDown(self): + super().tearDown() + if self._conn and not self._conn.closed: + self._conn.close() + + unpatch() + + @asyncio.coroutine + def _get_conn_and_tracer(self): + conn = self._conn = yield from aiopg.connect(**POSTGRES_CONFIG) + Pin.get_from(conn).clone(tracer=self.tracer).onto(conn) + + return conn, self.tracer + + @asyncio.coroutine + def assert_conn_is_traced(self, tracer, db, service): + + # ensure the trace aiopg client doesn't add non-standard + # methods + try: + yield from db.execute('select \'foobar\'') + except AttributeError: + pass + + writer = tracer.writer + # Ensure we can run a query and it's correctly traced + q = 'select \'foobarblah\'' + start = time.time() + cursor = yield from db.cursor() + yield from cursor.execute(q) + rows = yield from cursor.fetchall() + end = time.time() + assert rows == [('foobarblah',)] + assert rows + spans = writer.pop() + assert spans + assert len(spans) == 1 + span = spans[0] + assert span.name == 'postgres.query' + assert span.resource == q + assert span.service == service + assert span.meta['sql.query'] == q + assert span.error == 0 + assert span.span_type == 'sql' + assert start <= span.start <= end + assert span.duration <= end - start + + # Ensure OpenTracing compatibility + ot_tracer = init_tracer('aiopg_svc', tracer) + with ot_tracer.start_active_span('aiopg_op'): + cursor = yield from db.cursor() + yield from cursor.execute(q) + rows = yield from cursor.fetchall() + assert rows == [('foobarblah',)] + spans = writer.pop() + assert len(spans) == 2 + ot_span, dd_span = spans + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + assert ot_span.name == 'aiopg_op' + assert ot_span.service == 'aiopg_svc' + assert dd_span.name == 'postgres.query' + assert dd_span.resource == q + assert dd_span.service == service + assert dd_span.meta['sql.query'] == q + assert dd_span.error == 0 + assert dd_span.span_type == 'sql' + + # run a query with an error and ensure all is well + q = 'select * from some_non_existant_table' + cur = yield from db.cursor() + try: + yield from cur.execute(q) + except Exception: + pass + else: + assert 0, 'should have an error' + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + assert span.name == 'postgres.query' + assert span.resource == q + assert span.service == service + assert span.meta['sql.query'] == q + assert span.error == 1 + # assert span.meta['out.host'] == 'localhost' + assert span.metrics['out.port'] == TEST_PORT + assert span.span_type == 'sql' + + @mark_asyncio + def test_disabled_execute(self): + conn, tracer = yield from self._get_conn_and_tracer() + tracer.enabled = False + # these calls were crashing with a previous version of the code. + yield from (yield from conn.cursor()).execute(query='select \'blah\'') + yield from (yield from conn.cursor()).execute('select \'blah\'') + assert not tracer.writer.pop() + + @mark_asyncio + def test_manual_wrap_extension_types(self): + conn, _ = yield from self._get_conn_and_tracer() + # NOTE: this will crash if it doesn't work. + # _ext.register_type(_ext.UUID, conn_or_curs) + # TypeError: argument 2 must be a connection, cursor or None + extras.register_uuid(conn_or_curs=conn) + + @mark_asyncio + def test_connect_factory(self): + tracer = get_dummy_tracer() + + services = ['db', 'another'] + for service in services: + conn, _ = yield from self._get_conn_and_tracer() + Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) + yield from self.assert_conn_is_traced(tracer, conn, service) + conn.close() + + # ensure we have the service types + service_meta = tracer.writer.pop_services() + expected = {} + assert service_meta == expected + + @mark_asyncio + def test_patch_unpatch(self): + tracer = get_dummy_tracer() + writer = tracer.writer + + # Test patch idempotence + patch() + patch() + + service = 'fo' + + conn = yield from aiopg.connect(**POSTGRES_CONFIG) + Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) + yield from (yield from conn.cursor()).execute('select \'blah\'') + conn.close() + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + conn = yield from aiopg.connect(**POSTGRES_CONFIG) + yield from (yield from conn.cursor()).execute('select \'blah\'') + conn.close() + + spans = writer.pop() + assert not spans, spans + + # Test patch again + patch() + + conn = yield from aiopg.connect(**POSTGRES_CONFIG) + Pin.get_from(conn).clone(service=service, tracer=tracer).onto(conn) + yield from (yield from conn.cursor()).execute('select \'blah\'') + conn.close() + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + + +class AiopgAnalyticsTestCase(AiopgTestCase): + @asyncio.coroutine + def trace_spans(self): + conn, _ = yield from self._get_conn_and_tracer() + + Pin.get_from(conn).clone(service='db', tracer=self.tracer).onto(conn) + + cursor = yield from conn.cursor() + yield from cursor.execute('select \'foobar\'') + rows = yield from cursor.fetchall() + assert rows + + return self.get_spans() + + @mark_asyncio + def test_analytics_default(self): + spans = yield from self.trace_spans() + self.assertEqual(len(spans), 1) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + @mark_asyncio + def test_analytics_with_rate(self): + with self.override_config( + 'aiopg', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + spans = yield from self.trace_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + @mark_asyncio + def test_analytics_without_rate(self): + with self.override_config( + 'aiopg', + dict(analytics_enabled=True) + ): + spans = yield from self.trace_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/contrib/algoliasearch/__init__.py b/tests/contrib/algoliasearch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/algoliasearch/test.py b/tests/contrib/algoliasearch/test.py new file mode 100644 index 0000000000..45ecb0db3b --- /dev/null +++ b/tests/contrib/algoliasearch/test.py @@ -0,0 +1,148 @@ +from ddtrace import config, patch_all +from ddtrace.contrib.algoliasearch.patch import (patch, unpatch, algoliasearch_version) +from ddtrace.pin import Pin +from tests.base import BaseTracerTestCase + + +class AlgoliasearchTest(BaseTracerTestCase): + def setUp(self): + super(AlgoliasearchTest, self).setUp() + + # dummy values + def search(self, query, args=None, request_options=None): + return { + 'hits': [ + { + 'dummy': 'dummy' + } + ], + 'processingTimeMS': 23, + 'nbHits': 1, + 'hitsPerPage': 20, + 'exhaustiveNbHits': True, + 'params': 'query=xxx', + 'nbPages': 1, + 'query': 'xxx', + 'page': 0 + } + + # Algolia search is a non free SaaS application, it isn't possible to add it to the + # docker environment to enable a full-fledged integration test. The next best option + # is to mock out the search method to prevent it from making server requests + if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): + import algoliasearch + import algoliasearch.index as index_module + index_module.Index.search = search + client = algoliasearch.algoliasearch.Client('X', 'X') + else: + import algoliasearch.search_index as index_module + from algoliasearch.search_client import SearchClient + index_module.SearchIndex.search = search + client = SearchClient.create('X', 'X') + + # use this index only to properly test stuff + self.index = client.init_index('test_index') + + def patch_algoliasearch(self): + patch() + Pin.override(self.index, tracer=self.tracer) + + def tearDown(self): + super(AlgoliasearchTest, self).tearDown() + unpatch() + if hasattr(self, 'tracer'): + self.reset() + + def perform_search(self, query_text, query_args=None): + if algoliasearch_version < (2, 0) and algoliasearch_version >= (1, 0): + self.index.search(query_text, args=query_args) + else: + self.index.search(query_text, request_options=query_args) + + def test_algoliasearch(self): + self.patch_algoliasearch() + self.perform_search( + 'test search', + {'attributesToRetrieve': 'firstname,lastname', 'unsupportedTotallyNewArgument': 'ignore'} + ) + + spans = self.get_spans() + self.reset() + + assert len(spans) == 1 + span = spans[0] + assert span.service == 'algoliasearch' + assert span.name == 'algoliasearch.search' + assert span.span_type is None + assert span.error == 0 + assert span.get_tag('query.args.attributes_to_retrieve') == 'firstname,lastname' + # Verify that adding new arguments to the search API will simply be ignored and not cause + # errors + assert span.get_tag('query.args.unsupported_totally_new_argument') is None + assert span.get_metric('processing_time_ms') == 23 + assert span.get_metric('number_of_hits') == 1 + + # Verify query_text, which may contain sensitive data, is not passed along + # unless the config value is appropriately set + assert span.get_tag('query.text') is None + + def test_algoliasearch_with_query_text(self): + self.patch_algoliasearch() + config.algoliasearch.collect_query_text = True + + self.perform_search( + 'test search', + {'attributesToRetrieve': 'firstname,lastname', 'unsupportedTotallyNewArgument': 'ignore'} + ) + spans = self.get_spans() + span = spans[0] + assert span.get_tag('query.text') == 'test search' + + def test_patch_unpatch(self): + self.patch_algoliasearch() + # Test patch idempotence + patch() + patch() + + self.perform_search('test search') + + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + self.index.search('test search') + + spans = self.get_spans() + self.reset() + assert not spans, spans + + # Test patch again + self.reset() + patch() + + self.index.search('test search') + + spans = self.get_spans() + assert spans, spans + assert len(spans) == 1 + + def test_patch_all_auto_enable(self): + patch_all() + Pin.override(self.index, tracer=self.tracer) + self.perform_search('test search') + + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 1 + + unpatch() + + self.perform_search('test search') + + spans = self.get_spans() + assert not spans, spans diff --git a/tests/contrib/asyncio/__init__.py b/tests/contrib/asyncio/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/asyncio/test_helpers.py b/tests/contrib/asyncio/test_helpers.py new file mode 100644 index 0000000000..2fc6eb7c04 --- /dev/null +++ b/tests/contrib/asyncio/test_helpers.py @@ -0,0 +1,91 @@ +import asyncio +import pytest + +from ddtrace.context import Context +from ddtrace.internal.context_manager import CONTEXTVARS_IS_AVAILABLE +from ddtrace.contrib.asyncio import helpers +from .utils import AsyncioTestCase, mark_asyncio + + +@pytest.mark.skipif( + CONTEXTVARS_IS_AVAILABLE, + reason='only applicable to legacy asyncio integration' +) +class TestAsyncioHelpers(AsyncioTestCase): + """ + Ensure that helpers set the ``Context`` properly when creating + new ``Task`` or threads. + """ + @mark_asyncio + def test_set_call_context(self): + # a different Context is set for the current logical execution + task = asyncio.Task.current_task() + ctx = Context() + helpers.set_call_context(task, ctx) + assert ctx == self.tracer.get_call_context() + + @mark_asyncio + def test_ensure_future(self): + # the wrapper should create a new Future that has the Context attached + @asyncio.coroutine + def future_work(): + # the ctx is available in this task + ctx = self.tracer.get_call_context() + assert 1 == len(ctx._trace) + assert 'coroutine' == ctx._trace[0].name + return ctx._trace[0].name + + self.tracer.trace('coroutine') + # schedule future work and wait for a result + delayed_task = helpers.ensure_future(future_work(), tracer=self.tracer) + result = yield from asyncio.wait_for(delayed_task, timeout=1) + assert 'coroutine' == result + + @mark_asyncio + def test_run_in_executor_proxy(self): + # the wrapper should pass arguments and results properly + def future_work(number, name): + assert 42 == number + assert 'john' == name + return True + + future = helpers.run_in_executor(self.loop, None, future_work, 42, 'john', tracer=self.tracer) + result = yield from future + assert result + + @mark_asyncio + def test_run_in_executor_traces(self): + # the wrapper should create a different Context when the Thread + # is started; the new Context creates a new trace + def future_work(): + # the Context is empty but the reference to the latest + # span is here to keep the parenting + ctx = self.tracer.get_call_context() + assert 0 == len(ctx._trace) + assert 'coroutine' == ctx._current_span.name + return True + + span = self.tracer.trace('coroutine') + future = helpers.run_in_executor(self.loop, None, future_work, tracer=self.tracer) + # we close the Context + span.finish() + result = yield from future + assert result + + @mark_asyncio + def test_create_task(self): + # the helper should create a new Task that has the Context attached + @asyncio.coroutine + def future_work(): + # the ctx is available in this task + ctx = self.tracer.get_call_context() + assert 0 == len(ctx._trace) + child_span = self.tracer.trace('child_task') + return child_span + + root_span = self.tracer.trace('main_task') + # schedule future work and wait for a result + task = helpers.create_task(future_work()) + result = yield from task + assert root_span.trace_id == result.trace_id + assert root_span.span_id == result.parent_id diff --git a/tests/contrib/asyncio/test_tracer.py b/tests/contrib/asyncio/test_tracer.py new file mode 100644 index 0000000000..9d84783243 --- /dev/null +++ b/tests/contrib/asyncio/test_tracer.py @@ -0,0 +1,392 @@ +import asyncio +import pytest +import time + + +from ddtrace.context import Context +from ddtrace.internal.context_manager import CONTEXTVARS_IS_AVAILABLE +from ddtrace.provider import DefaultContextProvider +from ddtrace.contrib.asyncio.patch import patch, unpatch +from ddtrace.contrib.asyncio.helpers import set_call_context + +from tests.opentracer.utils import init_tracer +from .utils import AsyncioTestCase, mark_asyncio + + +_orig_create_task = asyncio.BaseEventLoop.create_task + + +class TestAsyncioTracer(AsyncioTestCase): + """Ensure that the tracer works with asynchronous executions within + the same ``IOLoop``. + """ + @mark_asyncio + @pytest.mark.skipif( + CONTEXTVARS_IS_AVAILABLE, + reason='only applicable to legacy asyncio provider' + ) + def test_get_call_context(self): + # it should return the context attached to the current Task + # or create a new one + task = asyncio.Task.current_task() + ctx = getattr(task, '__datadog_context', None) + assert ctx is None + # get the context from the loop creates a new one that + # is attached to the Task object + ctx = self.tracer.get_call_context() + assert ctx == getattr(task, '__datadog_context', None) + + @mark_asyncio + def test_get_call_context_twice(self): + # it should return the same Context if called twice + assert self.tracer.get_call_context() == self.tracer.get_call_context() + + @mark_asyncio + def test_trace_coroutine(self): + # it should use the task context when invoked in a coroutine + with self.tracer.trace('coroutine') as span: + span.resource = 'base' + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + assert 'coroutine' == traces[0][0].name + assert 'base' == traces[0][0].resource + + @mark_asyncio + def test_trace_multiple_coroutines(self): + # if multiple coroutines have nested tracing, they must belong + # to the same trace + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.tracer.trace('coroutine_2'): + return 42 + + with self.tracer.trace('coroutine_1'): + value = yield from coro() + + # the coroutine has been called correctly + assert 42 == value + # a single trace has been properly reported + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + assert 'coroutine_1' == traces[0][0].name + assert 'coroutine_2' == traces[0][1].name + # the parenting is correct + assert traces[0][0] == traces[0][1]._parent + assert traces[0][0].trace_id == traces[0][1].trace_id + + @mark_asyncio + def test_event_loop_exception(self): + # it should handle a loop exception + asyncio.set_event_loop(None) + ctx = self.tracer.get_call_context() + assert ctx is not None + + def test_context_task_none(self): + # it should handle the case where a Task is not available + # Note: the @mark_asyncio is missing to simulate an execution + # without a Task + task = asyncio.Task.current_task() + # the task is not available + assert task is None + # but a new Context is still created making the operation safe + ctx = self.tracer.get_call_context() + assert ctx is not None + + @mark_asyncio + def test_exception(self): + @asyncio.coroutine + def f1(): + with self.tracer.trace('f1'): + raise Exception('f1 error') + + with self.assertRaises(Exception): + yield from f1() + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + spans = traces[0] + assert 1 == len(spans) + span = spans[0] + assert 1 == span.error + assert 'f1 error' == span.get_tag('error.msg') + assert 'Exception: f1 error' in span.get_tag('error.stack') + + @mark_asyncio + def test_nested_exceptions(self): + @asyncio.coroutine + def f1(): + with self.tracer.trace('f1'): + raise Exception('f1 error') + + @asyncio.coroutine + def f2(): + with self.tracer.trace('f2'): + yield from f1() + + with self.assertRaises(Exception): + yield from f2() + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + spans = traces[0] + assert 2 == len(spans) + span = spans[0] + assert 'f2' == span.name + assert 1 == span.error # f2 did not catch the exception + assert 'f1 error' == span.get_tag('error.msg') + assert 'Exception: f1 error' in span.get_tag('error.stack') + span = spans[1] + assert 'f1' == span.name + assert 1 == span.error + assert 'f1 error' == span.get_tag('error.msg') + assert 'Exception: f1 error' in span.get_tag('error.stack') + + @mark_asyncio + def test_handled_nested_exceptions(self): + @asyncio.coroutine + def f1(): + with self.tracer.trace('f1'): + raise Exception('f1 error') + + @asyncio.coroutine + def f2(): + with self.tracer.trace('f2'): + try: + yield from f1() + except Exception: + pass + + yield from f2() + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + spans = traces[0] + assert 2 == len(spans) + span = spans[0] + assert 'f2' == span.name + assert 0 == span.error # f2 caught the exception + span = spans[1] + assert 'f1' == span.name + assert 1 == span.error + assert 'f1 error' == span.get_tag('error.msg') + assert 'Exception: f1 error' in span.get_tag('error.stack') + + @mark_asyncio + def test_trace_multiple_calls(self): + # create multiple futures so that we expect multiple + # traces instead of a single one (helper not used) + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.tracer.trace('coroutine'): + yield from asyncio.sleep(0.01) + + futures = [asyncio.ensure_future(coro()) for x in range(10)] + for future in futures: + yield from future + + traces = self.tracer.writer.pop_traces() + assert 10 == len(traces) + assert 1 == len(traces[0]) + assert 'coroutine' == traces[0][0].name + + @mark_asyncio + def test_wrapped_coroutine(self): + @self.tracer.wrap('f1') + @asyncio.coroutine + def f1(): + yield from asyncio.sleep(0.25) + + yield from f1() + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + spans = traces[0] + assert 1 == len(spans) + span = spans[0] + assert span.duration > 0.25, 'span.duration={}'.format(span.duration) + + +class TestAsyncioPropagation(AsyncioTestCase): + """Ensure that asyncio context propagation works between different tasks""" + def setUp(self): + # patch asyncio event loop + super(TestAsyncioPropagation, self).setUp() + patch() + + def tearDown(self): + # unpatch asyncio event loop + super(TestAsyncioPropagation, self).tearDown() + unpatch() + + @mark_asyncio + def test_tasks_chaining(self): + # ensures that the context is propagated between different tasks + @self.tracer.wrap('spawn_task') + @asyncio.coroutine + def coro_2(): + yield from asyncio.sleep(0.01) + + @self.tracer.wrap('main_task') + @asyncio.coroutine + def coro_1(): + yield from asyncio.ensure_future(coro_2()) + + yield from coro_1() + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 2 + assert len(traces[0]) == 1 + assert len(traces[1]) == 1 + spawn_task = traces[0][0] + main_task = traces[1][0] + # check if the context has been correctly propagated + assert spawn_task.trace_id == main_task.trace_id + assert spawn_task.parent_id == main_task.span_id + + @mark_asyncio + def test_concurrent_chaining(self): + # ensures that the context is correctly propagated when + # concurrent tasks are created from a common tracing block + @self.tracer.wrap('f1') + @asyncio.coroutine + def f1(): + yield from asyncio.sleep(0.01) + + @self.tracer.wrap('f2') + @asyncio.coroutine + def f2(): + yield from asyncio.sleep(0.01) + + with self.tracer.trace('main_task'): + yield from asyncio.gather(f1(), f2()) + # do additional synchronous work to confirm main context is + # correctly handled + with self.tracer.trace('main_task_child'): + time.sleep(0.01) + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 3 + assert len(traces[0]) == 1 + assert len(traces[1]) == 1 + assert len(traces[2]) == 2 + child_1 = traces[0][0] + child_2 = traces[1][0] + main_task = traces[2][0] + main_task_child = traces[2][1] + # check if the context has been correctly propagated + assert child_1.trace_id == main_task.trace_id + assert child_1.parent_id == main_task.span_id + assert child_2.trace_id == main_task.trace_id + assert child_2.parent_id == main_task.span_id + assert main_task_child.trace_id == main_task.trace_id + assert main_task_child.parent_id == main_task.span_id + + @pytest.mark.skipif( + CONTEXTVARS_IS_AVAILABLE, + reason='only applicable to legacy asyncio provider' + ) + @mark_asyncio + def test_propagation_with_set_call_context(self): + # ensures that if a new Context is attached to the current + # running Task via helpers, a previous trace is resumed + task = asyncio.Task.current_task() + ctx = Context(trace_id=100, span_id=101) + set_call_context(task, ctx) + + with self.tracer.trace('async_task'): + yield from asyncio.sleep(0.01) + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.trace_id == 100 + assert span.parent_id == 101 + + @mark_asyncio + def test_propagation_with_new_context(self): + # ensures that if a new Context is activated, a trace + # with the Context arguments is created + ctx = Context(trace_id=100, span_id=101) + self.tracer.context_provider.activate(ctx) + + with self.tracer.trace('async_task'): + yield from asyncio.sleep(0.01) + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.trace_id == 100 + assert span.parent_id == 101 + + @mark_asyncio + def test_event_loop_unpatch(self): + # ensures that the event loop can be unpatched + unpatch() + assert isinstance(self.tracer._context_provider, DefaultContextProvider) + assert asyncio.BaseEventLoop.create_task == _orig_create_task + + def test_event_loop_double_patch(self): + # ensures that double patching will not double instrument + # the event loop + patch() + self.test_tasks_chaining() + + @mark_asyncio + def test_trace_multiple_coroutines_ot_outer(self): + """OpenTracing version of test_trace_multiple_coroutines.""" + # if multiple coroutines have nested tracing, they must belong + # to the same trace + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.tracer.trace('coroutine_2'): + return 42 + + ot_tracer = init_tracer('asyncio_svc', self.tracer) + with ot_tracer.start_active_span('coroutine_1'): + value = yield from coro() + + # the coroutine has been called correctly + assert 42 == value + # a single trace has been properly reported + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + assert 'coroutine_1' == traces[0][0].name + assert 'coroutine_2' == traces[0][1].name + # the parenting is correct + assert traces[0][0] == traces[0][1]._parent + assert traces[0][0].trace_id == traces[0][1].trace_id + + @mark_asyncio + def test_trace_multiple_coroutines_ot_inner(self): + """OpenTracing version of test_trace_multiple_coroutines.""" + # if multiple coroutines have nested tracing, they must belong + # to the same trace + ot_tracer = init_tracer('asyncio_svc', self.tracer) + @asyncio.coroutine + def coro(): + # another traced coroutine + with ot_tracer.start_active_span('coroutine_2'): + return 42 + + with self.tracer.trace('coroutine_1'): + value = yield from coro() + + # the coroutine has been called correctly + assert 42 == value + # a single trace has been properly reported + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + assert 'coroutine_1' == traces[0][0].name + assert 'coroutine_2' == traces[0][1].name + # the parenting is correct + assert traces[0][0] == traces[0][1]._parent + assert traces[0][0].trace_id == traces[0][1].trace_id diff --git a/tests/contrib/asyncio/test_tracer_safety.py b/tests/contrib/asyncio/test_tracer_safety.py new file mode 100644 index 0000000000..54acebdffe --- /dev/null +++ b/tests/contrib/asyncio/test_tracer_safety.py @@ -0,0 +1,57 @@ +import asyncio + +from ddtrace.provider import DefaultContextProvider +from .utils import AsyncioTestCase, mark_asyncio + + +class TestAsyncioSafety(AsyncioTestCase): + """ + Ensure that if the ``AsyncioTracer`` is not properly configured, + bad traces are produced but the ``Context`` object will not + leak memory. + """ + def setUp(self): + # Asyncio TestCase with the wrong context provider + super(TestAsyncioSafety, self).setUp() + self.tracer.configure(context_provider=DefaultContextProvider()) + + @mark_asyncio + def test_get_call_context(self): + # it should return a context even if not attached to the Task + ctx = self.tracer.get_call_context() + assert ctx is not None + # test that it behaves the wrong way + task = asyncio.Task.current_task() + task_ctx = getattr(task, '__datadog_context', None) + assert task_ctx is None + + @mark_asyncio + def test_trace_coroutine(self): + # it should use the task context when invoked in a coroutine + with self.tracer.trace('coroutine') as span: + span.resource = 'base' + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + assert 'coroutine' == traces[0][0].name + assert 'base' == traces[0][0].resource + + @mark_asyncio + def test_trace_multiple_calls(self): + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.tracer.trace('coroutine'): + yield from asyncio.sleep(0.01) + + ctx = self.tracer.get_call_context() + futures = [asyncio.ensure_future(coro()) for x in range(1000)] + for future in futures: + yield from future + + # the trace is wrong but the Context is finished + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1000 == len(traces[0]) + assert 0 == len(ctx._trace) diff --git a/tests/contrib/asyncio/utils.py b/tests/contrib/asyncio/utils.py new file mode 100644 index 0000000000..3ffe81aa64 --- /dev/null +++ b/tests/contrib/asyncio/utils.py @@ -0,0 +1,48 @@ +import asyncio + +from functools import wraps + +from ddtrace.contrib.asyncio import context_provider + +from ...base import BaseTracerTestCase + + +class AsyncioTestCase(BaseTracerTestCase): + """ + Base TestCase for asyncio framework that setup a new loop + for each test, preserving the original (not started) main + loop. + """ + def setUp(self): + super(AsyncioTestCase, self).setUp() + + self.tracer.configure(context_provider=context_provider) + + # each test must have its own event loop + self._main_loop = asyncio.get_event_loop() + self.loop = asyncio.new_event_loop() + asyncio.set_event_loop(self.loop) + + def tearDown(self): + super(AsyncioTestCase, self).tearDown() + + # restore the main loop + asyncio.set_event_loop(self._main_loop) + self.loop = None + self._main_loop = None + + +def mark_asyncio(f): + """ + Test decorator that wraps a function so that it can be executed + as an asynchronous coroutine. This uses the event loop set in the + ``TestCase`` class, and runs the loop until it's completed. + """ + @wraps(f) + def wrapper(*args, **kwargs): + coro = asyncio.coroutine(f) + future = coro(*args, **kwargs) + loop = asyncio.get_event_loop() + loop.run_until_complete(future) + loop.close() + return wrapper diff --git a/tests/contrib/boto/__init__.py b/tests/contrib/boto/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/boto/test.py b/tests/contrib/boto/test.py new file mode 100644 index 0000000000..d785a5021a --- /dev/null +++ b/tests/contrib/boto/test.py @@ -0,0 +1,310 @@ +# 3p +import boto.ec2 +import boto.s3 +import boto.awslambda +import boto.sqs +import boto.kms +import boto.sts +import boto.elasticache +from moto import mock_s3, mock_ec2, mock_lambda, mock_sts + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.boto.patch import patch, unpatch +from ddtrace.ext import http + +# testing +from unittest import skipUnless +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + + +class BotoTest(BaseTracerTestCase): + """Botocore integration testsuite""" + + TEST_SERVICE = 'test-boto-tracing' + + def setUp(self): + super(BotoTest, self).setUp() + patch() + + @mock_ec2 + def test_ec2_client(self): + ec2 = boto.ec2.connect_to_region('us-west-2') + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + + ec2.get_all_instances() + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_tag('aws.operation'), 'DescribeInstances') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag(http.METHOD), 'POST') + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + # Create an instance + ec2.run_instances(21) + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_tag('aws.operation'), 'RunInstances') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag(http.METHOD), 'POST') + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.service, 'test-boto-tracing.ec2') + self.assertEqual(span.resource, 'ec2.runinstances') + self.assertEqual(span.name, 'ec2.command') + self.assertEqual(span.span_type, 'http') + + @mock_ec2 + def test_analytics_enabled_with_rate(self): + with self.override_config( + 'boto', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + ec2 = boto.ec2.connect_to_region('us-west-2') + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + + ec2.get_all_instances() + + spans = writer.pop() + assert spans + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + @mock_ec2 + def test_analytics_enabled_without_rate(self): + with self.override_config( + 'boto', + dict(analytics_enabled=True) + ): + ec2 = boto.ec2.connect_to_region('us-west-2') + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + + ec2.get_all_instances() + + spans = writer.pop() + assert spans + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + @mock_s3 + def test_s3_client(self): + s3 = boto.s3.connect_to_region('us-east-1') + + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + + s3.get_all_buckets() + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 1) + span = spans[0] + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag(http.METHOD), 'GET') + self.assertEqual(span.get_tag('aws.operation'), 'get_all_buckets') + + # Create a bucket command + s3.create_bucket('cheese') + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 1) + span = spans[0] + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag(http.METHOD), 'PUT') + self.assertEqual(span.get_tag('path'), '/') + self.assertEqual(span.get_tag('aws.operation'), 'create_bucket') + + # Get the created bucket + s3.get_bucket('cheese') + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 1) + span = spans[0] + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag(http.METHOD), 'HEAD') + self.assertEqual(span.get_tag('aws.operation'), 'head_bucket') + self.assertEqual(span.service, 'test-boto-tracing.s3') + self.assertEqual(span.resource, 's3.head') + self.assertEqual(span.name, 's3.command') + + # Checking for resource incase of error + try: + s3.get_bucket('big_bucket') + except Exception: + spans = writer.pop() + assert spans + span = spans[0] + self.assertEqual(span.resource, 's3.head') + + @mock_s3 + def test_s3_put(self): + s3 = boto.s3.connect_to_region('us-east-1') + + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + s3.create_bucket('mybucket') + bucket = s3.get_bucket('mybucket') + k = boto.s3.key.Key(bucket) + k.key = 'foo' + k.set_contents_from_string('bar') + + spans = writer.pop() + assert spans + # create bucket + self.assertEqual(len(spans), 3) + self.assertEqual(spans[0].get_tag('aws.operation'), 'create_bucket') + assert_span_http_status_code(spans[0], 200) + self.assertEqual(spans[0].service, 'test-boto-tracing.s3') + self.assertEqual(spans[0].resource, 's3.put') + # get bucket + self.assertEqual(spans[1].get_tag('aws.operation'), 'head_bucket') + self.assertEqual(spans[1].resource, 's3.head') + # put object + self.assertEqual(spans[2].get_tag('aws.operation'), '_send_file_internal') + self.assertEqual(spans[2].resource, 's3.put') + + @mock_lambda + def test_unpatch(self): + lamb = boto.awslambda.connect_to_region('us-east-2') + + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + unpatch() + + # multiple calls + lamb.list_functions() + spans = writer.pop() + assert not spans, spans + + @mock_s3 + def test_double_patch(self): + s3 = boto.s3.connect_to_region('us-east-1') + + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + + patch() + patch() + + # Get the created bucket + s3.create_bucket('cheese') + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 1) + + @mock_lambda + def test_lambda_client(self): + lamb = boto.awslambda.connect_to_region('us-east-2') + + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + + # multiple calls + lamb.list_functions() + lamb.list_functions() + + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 2) + span = spans[0] + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag(http.METHOD), 'GET') + self.assertEqual(span.get_tag('aws.region'), 'us-east-2') + self.assertEqual(span.get_tag('aws.operation'), 'list_functions') + self.assertEqual(span.service, 'test-boto-tracing.lambda') + self.assertEqual(span.resource, 'lambda.get') + + @mock_sts + def test_sts_client(self): + sts = boto.sts.connect_to_region('us-west-2') + + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sts) + + sts.get_federation_token(12, duration=10) + + spans = writer.pop() + assert spans + span = spans[0] + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.get_tag('aws.operation'), 'GetFederationToken') + self.assertEqual(span.service, 'test-boto-tracing.sts') + self.assertEqual(span.resource, 'sts.getfederationtoken') + + # checking for protection on sts against security leak + self.assertIsNone(span.get_tag('args.path')) + + @skipUnless( + False, + ('Test to reproduce the case where args sent to patched function are None,' + 'can\'t be mocked: needs AWS crendentials'), + ) + def test_elasticache_client(self): + elasticache = boto.elasticache.connect_to_region('us-west-2') + + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(elasticache) + + elasticache.describe_cache_clusters() + + spans = writer.pop() + assert spans + span = spans[0] + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.service, 'test-boto-tracing.elasticache') + self.assertEqual(span.resource, 'elasticache') + + @mock_ec2 + def test_ec2_client_ot(self): + """OpenTracing compatibility check of the test_ec2_client test.""" + + ec2 = boto.ec2.connect_to_region('us-west-2') + + ot_tracer = init_tracer('my_svc', self.tracer) + writer = self.tracer.writer + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + + with ot_tracer.start_active_span('ot_span'): + ec2.get_all_instances() + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 2) + ot_span, dd_span = spans + + # confirm the parenting + self.assertIsNone(ot_span.parent_id) + self.assertEqual(dd_span.parent_id, ot_span.span_id) + + self.assertEqual(ot_span.resource, 'ot_span') + self.assertEqual(dd_span.get_tag('aws.operation'), 'DescribeInstances') + assert_span_http_status_code(dd_span, 200) + self.assertEqual(dd_span.get_tag(http.METHOD), 'POST') + self.assertEqual(dd_span.get_tag('aws.region'), 'us-west-2') + + with ot_tracer.start_active_span('ot_span'): + ec2.run_instances(21) + spans = writer.pop() + assert spans + self.assertEqual(len(spans), 2) + ot_span, dd_span = spans + + # confirm the parenting + self.assertIsNone(ot_span.parent_id) + self.assertEqual(dd_span.parent_id, ot_span.span_id) + + self.assertEqual(dd_span.get_tag('aws.operation'), 'RunInstances') + assert_span_http_status_code(dd_span, 200) + self.assertEqual(dd_span.get_tag(http.METHOD), 'POST') + self.assertEqual(dd_span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(dd_span.service, 'test-boto-tracing.ec2') + self.assertEqual(dd_span.resource, 'ec2.runinstances') + self.assertEqual(dd_span.name, 'ec2.command') diff --git a/tests/contrib/botocore/__init__.py b/tests/contrib/botocore/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/botocore/test.py b/tests/contrib/botocore/test.py new file mode 100644 index 0000000000..e99aa8ce8b --- /dev/null +++ b/tests/contrib/botocore/test.py @@ -0,0 +1,249 @@ +# 3p +import botocore.session +from moto import mock_s3, mock_ec2, mock_lambda, mock_sqs, mock_kinesis, mock_kms + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.botocore.patch import patch, unpatch +from ddtrace.compat import stringify + +# testing +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + + +class BotocoreTest(BaseTracerTestCase): + """Botocore integration testsuite""" + + TEST_SERVICE = 'test-botocore-tracing' + + def setUp(self): + patch() + + self.session = botocore.session.get_session() + self.session.set_credentials(access_key='access-key', secret_key='secret-key') + + super(BotocoreTest, self).setUp() + + def tearDown(self): + super(BotocoreTest, self).tearDown() + + unpatch() + + @mock_ec2 + def test_traced_client(self): + ec2 = self.session.create_client('ec2', region_name='us-west-2') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + + ec2.describe_instances() + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(len(spans), 1) + self.assertEqual(span.get_tag('aws.agent'), 'botocore') + self.assertEqual(span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(span.get_tag('aws.operation'), 'DescribeInstances') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_metric('retry_attempts'), 0) + self.assertEqual(span.service, 'test-botocore-tracing.ec2') + self.assertEqual(span.resource, 'ec2.describeinstances') + self.assertEqual(span.name, 'ec2.command') + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + @mock_ec2 + def test_traced_client_analytics(self): + with self.override_config( + 'botocore', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + ec2 = self.session.create_client('ec2', region_name='us-west-2') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + ec2.describe_instances() + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + @mock_s3 + def test_s3_client(self): + s3 = self.session.create_client('s3', region_name='us-west-2') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + + s3.list_buckets() + s3.list_buckets() + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(len(spans), 2) + self.assertEqual(span.get_tag('aws.operation'), 'ListBuckets') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'test-botocore-tracing.s3') + self.assertEqual(span.resource, 's3.listbuckets') + + # testing for span error + self.reset() + try: + s3.list_objects(bucket='mybucket') + except Exception: + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(span.error, 1) + self.assertEqual(span.resource, 's3.listobjects') + + @mock_s3 + def test_s3_put(self): + params = dict(Key='foo', Bucket='mybucket', Body=b'bar') + s3 = self.session.create_client('s3', region_name='us-west-2') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(s3) + s3.create_bucket(Bucket='mybucket') + s3.put_object(**params) + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(len(spans), 2) + self.assertEqual(span.get_tag('aws.operation'), 'CreateBucket') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'test-botocore-tracing.s3') + self.assertEqual(span.resource, 's3.createbucket') + self.assertEqual(spans[1].get_tag('aws.operation'), 'PutObject') + self.assertEqual(spans[1].resource, 's3.putobject') + self.assertEqual(spans[1].get_tag('params.Key'), stringify(params['Key'])) + self.assertEqual(spans[1].get_tag('params.Bucket'), stringify(params['Bucket'])) + # confirm blacklisted + self.assertIsNone(spans[1].get_tag('params.Body')) + + @mock_sqs + def test_sqs_client(self): + sqs = self.session.create_client('sqs', region_name='us-east-1') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sqs) + + sqs.list_queues() + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(len(spans), 1) + self.assertEqual(span.get_tag('aws.region'), 'us-east-1') + self.assertEqual(span.get_tag('aws.operation'), 'ListQueues') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'test-botocore-tracing.sqs') + self.assertEqual(span.resource, 'sqs.listqueues') + + @mock_kinesis + def test_kinesis_client(self): + kinesis = self.session.create_client('kinesis', region_name='us-east-1') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(kinesis) + + kinesis.list_streams() + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(len(spans), 1) + self.assertEqual(span.get_tag('aws.region'), 'us-east-1') + self.assertEqual(span.get_tag('aws.operation'), 'ListStreams') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'test-botocore-tracing.kinesis') + self.assertEqual(span.resource, 'kinesis.liststreams') + + @mock_kinesis + def test_unpatch(self): + kinesis = self.session.create_client('kinesis', region_name='us-east-1') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(kinesis) + + unpatch() + + kinesis.list_streams() + spans = self.get_spans() + assert not spans, spans + + @mock_sqs + def test_double_patch(self): + sqs = self.session.create_client('sqs', region_name='us-east-1') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(sqs) + + patch() + patch() + + sqs.list_queues() + + spans = self.get_spans() + assert spans + self.assertEqual(len(spans), 1) + + @mock_lambda + def test_lambda_client(self): + lamb = self.session.create_client('lambda', region_name='us-east-1') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(lamb) + + lamb.list_functions() + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(len(spans), 1) + self.assertEqual(span.get_tag('aws.region'), 'us-east-1') + self.assertEqual(span.get_tag('aws.operation'), 'ListFunctions') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'test-botocore-tracing.lambda') + self.assertEqual(span.resource, 'lambda.listfunctions') + + @mock_kms + def test_kms_client(self): + kms = self.session.create_client('kms', region_name='us-east-1') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(kms) + + kms.list_keys(Limit=21) + + spans = self.get_spans() + assert spans + span = spans[0] + self.assertEqual(len(spans), 1) + self.assertEqual(span.get_tag('aws.region'), 'us-east-1') + self.assertEqual(span.get_tag('aws.operation'), 'ListKeys') + assert_span_http_status_code(span, 200) + self.assertEqual(span.service, 'test-botocore-tracing.kms') + self.assertEqual(span.resource, 'kms.listkeys') + + # checking for protection on sts against security leak + self.assertIsNone(span.get_tag('params')) + + @mock_ec2 + def test_traced_client_ot(self): + """OpenTracing version of test_traced_client.""" + ot_tracer = init_tracer('ec2_svc', self.tracer) + + with ot_tracer.start_active_span('ec2_op'): + ec2 = self.session.create_client('ec2', region_name='us-west-2') + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(ec2) + ec2.describe_instances() + + spans = self.get_spans() + assert spans + self.assertEqual(len(spans), 2) + + ot_span, dd_span = spans + + # confirm the parenting + self.assertIsNone(ot_span.parent_id) + self.assertEqual(dd_span.parent_id, ot_span.span_id) + + self.assertEqual(ot_span.name, 'ec2_op') + self.assertEqual(ot_span.service, 'ec2_svc') + + self.assertEqual(dd_span.get_tag('aws.agent'), 'botocore') + self.assertEqual(dd_span.get_tag('aws.region'), 'us-west-2') + self.assertEqual(dd_span.get_tag('aws.operation'), 'DescribeInstances') + assert_span_http_status_code(dd_span, 200) + self.assertEqual(dd_span.get_metric('retry_attempts'), 0) + self.assertEqual(dd_span.service, 'test-botocore-tracing.ec2') + self.assertEqual(dd_span.resource, 'ec2.describeinstances') + self.assertEqual(dd_span.name, 'ec2.command') diff --git a/tests/contrib/bottle/__init__.py b/tests/contrib/bottle/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/bottle/test.py b/tests/contrib/bottle/test.py new file mode 100644 index 0000000000..28af1a808f --- /dev/null +++ b/tests/contrib/bottle/test.py @@ -0,0 +1,413 @@ +import bottle +import ddtrace +import webtest + +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + +from ddtrace import compat +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.bottle import TracePlugin +from ddtrace.ext import http + +SERVICE = 'bottle-app' + + +class TraceBottleTest(BaseTracerTestCase): + """ + Ensures that Bottle is properly traced. + """ + def setUp(self): + super(TraceBottleTest, self).setUp() + + # provide a dummy tracer + self._original_tracer = ddtrace.tracer + ddtrace.tracer = self.tracer + # provide a Bottle app + self.app = bottle.Bottle() + + def tearDown(self): + # restore the tracer + ddtrace.tracer = self._original_tracer + + def _trace_app(self, tracer=None): + self.app.install(TracePlugin(service=SERVICE, tracer=tracer)) + self.app = webtest.TestApp(self.app) + + def test_200(self, query_string=''): + if query_string: + fqs = '?' + query_string + else: + fqs = '' + + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app(self.tracer) + + # make a request + resp = self.app.get('/hi/dougie' + fqs) + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + # validate it's traced + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.span_type == 'web' + assert s.resource == 'GET /hi/' + assert_span_http_status_code(s, 200) + assert s.get_tag('http.method') == 'GET' + assert s.get_tag(http.URL) == 'http://localhost:80/hi/dougie' + if ddtrace.config.bottle.trace_query_string: + assert s.get_tag(http.QUERY_STRING) == query_string + else: + assert http.QUERY_STRING not in s.meta + + services = self.tracer.writer.pop_services() + assert services == {} + + def test_query_string(self): + return self.test_200('foo=bar') + + def test_query_string_multi_keys(self): + return self.test_200('foo=bar&foo=baz&x=y') + + def test_query_string_trace(self): + with self.override_http_config('bottle', dict(trace_query_string=True)): + return self.test_200('foo=bar') + + def test_query_string_multi_keys_trace(self): + with self.override_http_config('bottle', dict(trace_query_string=True)): + return self.test_200('foo=bar&foo=baz&x=y') + + def test_2xx(self): + @self.app.route('/2xx') + def handled(): + return bottle.HTTPResponse("", status=202) + self._trace_app(self.tracer) + + # make a request + try: + self.app.get('/2xx') + except webtest.AppError: + pass + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.resource == 'GET /2xx' + assert_span_http_status_code(s, 202) + assert s.error == 0 + + def test_400_return(self): + @self.app.route('/400_return') + def handled400(): + return bottle.HTTPResponse(status=400) + self._trace_app(self.tracer) + + # make a request + try: + self.app.get('/400_return') + except webtest.AppError: + pass + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /400_return' + assert_span_http_status_code(s, 400) + assert s.get_tag('http.method') == 'GET' + assert s.get_tag(http.URL) == 'http://localhost:80/400_return' + assert s.error == 0 + + def test_400_raise(self): + @self.app.route('/400_raise') + def handled400(): + raise bottle.HTTPResponse(status=400) + self._trace_app(self.tracer) + + # make a request + try: + self.app.get('/400_raise') + except webtest.AppError: + pass + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /400_raise' + assert_span_http_status_code(s, 400) + assert s.get_tag('http.method') == 'GET' + assert s.get_tag(http.URL) == 'http://localhost:80/400_raise' + assert s.error == 1 + + def test_500(self): + @self.app.route('/hi') + def hi(): + raise Exception('oh no') + self._trace_app(self.tracer) + + # make a request + try: + self.app.get('/hi') + except webtest.AppError: + pass + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /hi' + assert_span_http_status_code(s, 500) + assert s.get_tag('http.method') == 'GET' + assert s.get_tag(http.URL) == 'http://localhost:80/hi' + assert s.error == 1 + + def test_5XX_response(self): + """ + When a 5XX response is returned + The span error attribute should be 1 + """ + @self.app.route('/5XX-1') + def handled500_1(): + raise bottle.HTTPResponse(status=503) + + @self.app.route('/5XX-2') + def handled500_2(): + raise bottle.HTTPError(status=502) + + @self.app.route('/5XX-3') + def handled500_3(): + bottle.response.status = 503 + return 'hmmm' + + self._trace_app(self.tracer) + + try: + self.app.get('/5XX-1') + except webtest.AppError: + pass + spans = self.tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].error == 1 + + try: + self.app.get('/5XX-2') + except webtest.AppError: + pass + spans = self.tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].error == 1 + + try: + self.app.get('/5XX-3') + except webtest.AppError: + pass + spans = self.tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].error == 1 + + def test_abort(self): + @self.app.route('/hi') + def hi(): + raise bottle.abort(420, 'Enhance Your Calm') + self._trace_app(self.tracer) + + # make a request + try: + self.app.get('/hi') + except webtest.AppError: + pass + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /hi' + assert_span_http_status_code(s, 420) + assert s.get_tag('http.method') == 'GET' + assert s.get_tag(http.URL) == 'http://localhost:80/hi' + + def test_bottle_global_tracer(self): + # without providing a Tracer instance, it should work + @self.app.route('/home/') + def home(): + return 'Hello world' + self._trace_app() + + # make a request + resp = self.app.get('/home/') + assert resp.status_int == 200 + # validate it's traced + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /home/' + assert_span_http_status_code(s, 200) + assert s.get_tag('http.method') == 'GET' + assert s.get_tag(http.URL) == 'http://localhost:80/home/' + + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app(self.tracer) + + with self.override_global_config(dict(analytics_enabled=True)): + resp = self.app.get('/hi/dougie') + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + + root = self.get_root_span() + root.assert_matches( + name='bottle.request', + metrics={ + ANALYTICS_SAMPLE_RATE_KEY: 1.0, + }, + ) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app(self.tracer) + + with self.override_global_config(dict(analytics_enabled=True)): + with self.override_config('bottle', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + resp = self.app.get('/hi/dougie') + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + + root = self.get_root_span() + root.assert_matches( + name='bottle.request', + metrics={ + ANALYTICS_SAMPLE_RATE_KEY: 0.5, + }, + ) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app(self.tracer) + + with self.override_global_config(dict(analytics_enabled=False)): + resp = self.app.get('/hi/dougie') + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app(self.tracer) + + with self.override_global_config(dict(analytics_enabled=False)): + with self.override_config('bottle', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + resp = self.app.get('/hi/dougie') + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + + root = self.get_root_span() + root.assert_matches( + name='bottle.request', + metrics={ + ANALYTICS_SAMPLE_RATE_KEY: 0.5, + }, + ) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_200_ot(self): + ot_tracer = init_tracer('my_svc', self.tracer) + + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app(self.tracer) + + # make a request + with ot_tracer.start_active_span('ot_span'): + resp = self.app.get('/hi/dougie') + + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + # validate it's traced + spans = self.tracer.writer.pop() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.resource == 'ot_span' + + assert dd_span.name == 'bottle.request' + assert dd_span.service == 'bottle-app' + assert dd_span.resource == 'GET /hi/' + assert_span_http_status_code(dd_span, 200) + assert dd_span.get_tag('http.method') == 'GET' + assert dd_span.get_tag(http.URL) == 'http://localhost:80/hi/dougie' + + services = self.tracer.writer.pop_services() + assert services == {} diff --git a/tests/contrib/bottle/test_autopatch.py b/tests/contrib/bottle/test_autopatch.py new file mode 100644 index 0000000000..e4ca346860 --- /dev/null +++ b/tests/contrib/bottle/test_autopatch.py @@ -0,0 +1,98 @@ +import bottle +import ddtrace +import webtest + +from unittest import TestCase +from tests.test_tracer import get_dummy_tracer +from ...utils import assert_span_http_status_code + +from ddtrace import compat + + +SERVICE = 'bottle-app' + + +class TraceBottleTest(TestCase): + """ + Ensures that Bottle is properly traced. + """ + def setUp(self): + # provide a dummy tracer + self.tracer = get_dummy_tracer() + self._original_tracer = ddtrace.tracer + ddtrace.tracer = self.tracer + # provide a Bottle app + self.app = bottle.Bottle() + + def tearDown(self): + # restore the tracer + ddtrace.tracer = self._original_tracer + + def _trace_app(self, tracer=None): + self.app = webtest.TestApp(self.app) + + def test_200(self): + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app(self.tracer) + + # make a request + resp = self.app.get('/hi/dougie') + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + # validate it's traced + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /hi/' + assert_span_http_status_code(s, 200) + assert s.get_tag('http.method') == 'GET' + + services = self.tracer.writer.pop_services() + assert services == {} + + def test_500(self): + @self.app.route('/hi') + def hi(): + raise Exception('oh no') + self._trace_app(self.tracer) + + # make a request + try: + resp = self.app.get('/hi') + assert resp.status_int == 500 + except Exception: + pass + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /hi' + assert_span_http_status_code(s, 500) + assert s.get_tag('http.method') == 'GET' + + def test_bottle_global_tracer(self): + # without providing a Tracer instance, it should work + @self.app.route('/home/') + def home(): + return 'Hello world' + self._trace_app() + + # make a request + resp = self.app.get('/home/') + assert resp.status_int == 200 + # validate it's traced + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /home/' + assert_span_http_status_code(s, 200) + assert s.get_tag('http.method') == 'GET' diff --git a/tests/contrib/bottle/test_distributed.py b/tests/contrib/bottle/test_distributed.py new file mode 100644 index 0000000000..742852c3ab --- /dev/null +++ b/tests/contrib/bottle/test_distributed.py @@ -0,0 +1,91 @@ +import bottle +import webtest + +import ddtrace +from ddtrace import compat +from ddtrace.contrib.bottle import TracePlugin + +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + +SERVICE = 'bottle-app' + + +class TraceBottleDistributedTest(BaseTracerTestCase): + """ + Ensures that Bottle is properly traced. + """ + def setUp(self): + super(TraceBottleDistributedTest, self).setUp() + + # provide a dummy tracer + self._original_tracer = ddtrace.tracer + ddtrace.tracer = self.tracer + # provide a Bottle app + self.app = bottle.Bottle() + + def tearDown(self): + # restore the tracer + ddtrace.tracer = self._original_tracer + + def _trace_app_distributed(self, tracer=None): + self.app.install(TracePlugin(service=SERVICE, tracer=tracer)) + self.app = webtest.TestApp(self.app) + + def _trace_app_not_distributed(self, tracer=None): + self.app.install(TracePlugin(service=SERVICE, tracer=tracer, distributed_tracing=False)) + self.app = webtest.TestApp(self.app) + + def test_distributed(self): + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app_distributed(self.tracer) + + # make a request + headers = {'x-datadog-trace-id': '123', + 'x-datadog-parent-id': '456'} + resp = self.app.get('/hi/dougie', headers=headers) + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + + # validate it's traced + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /hi/' + assert_span_http_status_code(s, 200) + assert s.get_tag('http.method') == 'GET' + # check distributed headers + assert 123 == s.trace_id + assert 456 == s.parent_id + + def test_not_distributed(self): + # setup our test app + @self.app.route('/hi/') + def hi(name): + return 'hi %s' % name + self._trace_app_not_distributed(self.tracer) + + # make a request + headers = {'x-datadog-trace-id': '123', + 'x-datadog-parent-id': '456'} + resp = self.app.get('/hi/dougie', headers=headers) + assert resp.status_int == 200 + assert compat.to_unicode(resp.body) == u'hi dougie' + + # validate it's traced + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.name == 'bottle.request' + assert s.service == 'bottle-app' + assert s.resource == 'GET /hi/' + assert_span_http_status_code(s, 200) + assert s.get_tag('http.method') == 'GET' + # check distributed headers + assert 123 != s.trace_id + assert 456 != s.parent_id diff --git a/tests/contrib/cassandra/__init__.py b/tests/contrib/cassandra/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/cassandra/test.py b/tests/contrib/cassandra/test.py new file mode 100644 index 0000000000..e028836658 --- /dev/null +++ b/tests/contrib/cassandra/test.py @@ -0,0 +1,458 @@ +# stdlib +import contextlib +import logging +import unittest +from threading import Event + +# 3p +from cassandra.cluster import Cluster, ResultSet +from cassandra.query import BatchStatement, SimpleStatement + +# project +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.cassandra.patch import patch, unpatch +from ddtrace.contrib.cassandra.session import get_traced_cassandra, SERVICE +from ddtrace.ext import net, cassandra as cassx, errors +from ddtrace import config, Pin + +# testing +from tests.contrib.config import CASSANDRA_CONFIG +from tests.opentracer.utils import init_tracer +from tests.test_tracer import get_dummy_tracer + +# Oftentimes our tests fails because Cassandra connection timeouts during keyspace drop. Slowness in keyspace drop +# is known and is due to 'auto_snapshot' configuration. In our test env we should disable it, but the official cassandra +# image that we are using only allows us to configure a few configs: +# https://github.com/docker-library/cassandra/blob/4474c6c5cc2a81ee57c5615aae00555fca7e26a6/3.11/docker-entrypoint.sh#L51 +# So for now we just increase the timeout, if this is not enough we may want to extend the official image with our own +# custom image. +CONNECTION_TIMEOUT_SECS = 20 # override the default value of 5 + +logging.getLogger('cassandra').setLevel(logging.INFO) + + +def setUpModule(): + # skip all the modules if the Cluster is not available + if not Cluster: + raise unittest.SkipTest('cassandra.cluster.Cluster is not available.') + + # create the KEYSPACE for this test module + cluster = Cluster(port=CASSANDRA_CONFIG['port'], connect_timeout=CONNECTION_TIMEOUT_SECS) + session = cluster.connect() + session.execute('DROP KEYSPACE IF EXISTS test', timeout=10) + session.execute( + "CREATE KEYSPACE if not exists test WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor': 1};" + ) + session.execute('CREATE TABLE if not exists test.person (name text PRIMARY KEY, age int, description text)') + session.execute('CREATE TABLE if not exists test.person_write (name text PRIMARY KEY, age int, description text)') + session.execute("INSERT INTO test.person (name, age, description) VALUES ('Cassandra', 100, 'A cruel mistress')") + session.execute( + "INSERT INTO test.person (name, age, description) VALUES ('Athena', 100, 'Whose shield is thunder')" + ) + session.execute("INSERT INTO test.person (name, age, description) VALUES ('Calypso', 100, 'Softly-braided nymph')") + + +def tearDownModule(): + # destroy the KEYSPACE + cluster = Cluster(port=CASSANDRA_CONFIG['port'], connect_timeout=CONNECTION_TIMEOUT_SECS) + session = cluster.connect() + session.execute('DROP TABLE IF EXISTS test.person') + session.execute('DROP TABLE IF EXISTS test.person_write') + session.execute('DROP KEYSPACE IF EXISTS test', timeout=10) + + +class CassandraBase(object): + """ + Needs a running Cassandra + """ + TEST_QUERY = "SELECT * from test.person WHERE name = 'Cassandra'" + TEST_QUERY_PAGINATED = 'SELECT * from test.person' + TEST_KEYSPACE = 'test' + TEST_PORT = CASSANDRA_CONFIG['port'] + TEST_SERVICE = 'test-cassandra' + + def _traced_session(self): + # implement me + pass + + @contextlib.contextmanager + def override_config(self, integration, values): + """ + Temporarily override an integration configuration value + >>> with self.override_config('flask', dict(service_name='test-service')): + ... # Your test + """ + options = getattr(config, integration) + + original = dict( + (key, options.get(key)) + for key in values.keys() + ) + + options.update(values) + try: + yield + finally: + options.update(original) + + def setUp(self): + self.cluster = Cluster(port=CASSANDRA_CONFIG['port']) + self.session = self.cluster.connect() + + def _assert_result_correct(self, result): + assert len(result.current_rows) == 1 + for r in result: + assert r.name == 'Cassandra' + assert r.age == 100 + assert r.description == 'A cruel mistress' + + def _test_query_base(self, execute_fn): + session, tracer = self._traced_session() + writer = tracer.writer + result = execute_fn(session, self.TEST_QUERY) + self._assert_result_correct(result) + + spans = writer.pop() + assert spans, spans + + # another for the actual query + assert len(spans) == 1 + + query = spans[0] + assert query.service == self.TEST_SERVICE + assert query.resource == self.TEST_QUERY + assert query.span_type == 'cassandra' + + assert query.get_tag(cassx.KEYSPACE) == self.TEST_KEYSPACE + assert query.get_metric(net.TARGET_PORT) == self.TEST_PORT + assert query.get_metric(cassx.ROW_COUNT) == 1 + assert query.get_tag(cassx.PAGE_NUMBER) is None + assert query.get_tag(cassx.PAGINATED) == 'False' + assert query.get_tag(net.TARGET_HOST) == '127.0.0.1' + + # confirm no analytics sample rate set by default + assert query.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + + def test_query(self): + def execute_fn(session, query): + return session.execute(query) + self._test_query_base(execute_fn) + + def test_query_analytics_with_rate(self): + with self.override_config( + 'cassandra', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + session, tracer = self._traced_session() + session.execute(self.TEST_QUERY) + + writer = tracer.writer + spans = writer.pop() + assert spans, spans + # another for the actual query + assert len(spans) == 1 + query = spans[0] + # confirm no analytics sample rate set by default + assert query.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5 + + def test_query_analytics_without_rate(self): + with self.override_config( + 'cassandra', + dict(analytics_enabled=True) + ): + session, tracer = self._traced_session() + session.execute(self.TEST_QUERY) + + writer = tracer.writer + spans = writer.pop() + assert spans, spans + # another for the actual query + assert len(spans) == 1 + query = spans[0] + # confirm no analytics sample rate set by default + assert query.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0 + + def test_query_ot(self): + """Ensure that cassandra works with the opentracer.""" + def execute_fn(session, query): + return session.execute(query) + + session, tracer = self._traced_session() + ot_tracer = init_tracer('cass_svc', tracer) + writer = tracer.writer + + with ot_tracer.start_active_span('cass_op'): + result = execute_fn(session, self.TEST_QUERY) + self._assert_result_correct(result) + + spans = writer.pop() + assert spans, spans + + # another for the actual query + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'cass_op' + assert ot_span.service == 'cass_svc' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.resource == self.TEST_QUERY + assert dd_span.span_type == 'cassandra' + + assert dd_span.get_tag(cassx.KEYSPACE) == self.TEST_KEYSPACE + assert dd_span.get_metric(net.TARGET_PORT) == self.TEST_PORT + assert dd_span.get_metric(cassx.ROW_COUNT) == 1 + assert dd_span.get_tag(cassx.PAGE_NUMBER) is None + assert dd_span.get_tag(cassx.PAGINATED) == 'False' + assert dd_span.get_tag(net.TARGET_HOST) == '127.0.0.1' + + def test_query_async(self): + def execute_fn(session, query): + event = Event() + result = [] + future = session.execute_async(query) + + def callback(results): + result.append(ResultSet(future, results)) + event.set() + + future.add_callback(callback) + event.wait() + return result[0] + self._test_query_base(execute_fn) + + def test_query_async_clearing_callbacks(self): + def execute_fn(session, query): + future = session.execute_async(query) + future.clear_callbacks() + return future.result() + self._test_query_base(execute_fn) + + def test_span_is_removed_from_future(self): + session, tracer = self._traced_session() + future = session.execute_async(self.TEST_QUERY) + future.result() + span = getattr(future, '_ddtrace_current_span', None) + assert span is None + + def test_paginated_query(self): + session, tracer = self._traced_session() + writer = tracer.writer + statement = SimpleStatement(self.TEST_QUERY_PAGINATED, fetch_size=1) + result = session.execute(statement) + # iterate over all pages + results = list(result) + assert len(results) == 3 + + spans = writer.pop() + assert spans, spans + + # There are 4 spans for 3 results since the driver makes a request with + # no result to check that it has reached the last page + assert len(spans) == 4 + + for i in range(4): + query = spans[i] + assert query.service == self.TEST_SERVICE + assert query.resource == self.TEST_QUERY_PAGINATED + assert query.span_type == 'cassandra' + + assert query.get_tag(cassx.KEYSPACE) == self.TEST_KEYSPACE + assert query.get_metric(net.TARGET_PORT) == self.TEST_PORT + if i == 3: + assert query.get_metric(cassx.ROW_COUNT) == 0 + else: + assert query.get_metric(cassx.ROW_COUNT) == 1 + assert query.get_tag(net.TARGET_HOST) == '127.0.0.1' + assert query.get_tag(cassx.PAGINATED) == 'True' + assert query.get_metric(cassx.PAGE_NUMBER) == i + 1 + + def test_trace_with_service(self): + session, tracer = self._traced_session() + writer = tracer.writer + session.execute(self.TEST_QUERY) + spans = writer.pop() + assert spans + assert len(spans) == 1 + query = spans[0] + assert query.service == self.TEST_SERVICE + + def test_trace_error(self): + session, tracer = self._traced_session() + writer = tracer.writer + + try: + session.execute('select * from test.i_dont_exist limit 1') + except Exception: + pass + else: + assert 0 + + spans = writer.pop() + assert spans + query = spans[0] + assert query.error == 1 + for k in (errors.ERROR_MSG, errors.ERROR_TYPE): + assert query.get_tag(k) + + def test_bound_statement(self): + session, tracer = self._traced_session() + writer = tracer.writer + + query = 'INSERT INTO test.person_write (name, age, description) VALUES (?, ?, ?)' + prepared = session.prepare(query) + session.execute(prepared, ('matt', 34, 'can')) + + prepared = session.prepare(query) + bound_stmt = prepared.bind(('leo', 16, 'fr')) + session.execute(bound_stmt) + + spans = writer.pop() + assert len(spans) == 2 + for s in spans: + assert s.resource == query + + def test_batch_statement(self): + session, tracer = self._traced_session() + writer = tracer.writer + + batch = BatchStatement() + batch.add( + SimpleStatement('INSERT INTO test.person_write (name, age, description) VALUES (%s, %s, %s)'), + ('Joe', 1, 'a'), + ) + batch.add( + SimpleStatement('INSERT INTO test.person_write (name, age, description) VALUES (%s, %s, %s)'), + ('Jane', 2, 'b'), + ) + session.execute(batch) + + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.resource == 'BatchStatement' + assert s.get_metric('cassandra.batch_size') == 2 + assert 'test.person' in s.get_tag('cassandra.query') + + def test_batched_bound_statement(self): + session, tracer = self._traced_session() + writer = tracer.writer + + batch = BatchStatement() + + prepared_statement = session.prepare('INSERT INTO test.person_write (name, age, description) VALUES (?, ?, ?)') + batch.add( + prepared_statement.bind(('matt', 34, 'can')) + ) + session.execute(batch) + + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.resource == 'BatchStatement' + assert s.get_tag('cassandra.query') == '' + + +class TestCassPatchDefault(unittest.TestCase, CassandraBase): + """Test Cassandra instrumentation with patching and default configuration""" + + TEST_SERVICE = SERVICE + + def tearDown(self): + unpatch() + + def setUp(self): + CassandraBase.setUp(self) + patch() + + def _traced_session(self): + tracer = get_dummy_tracer() + Pin.get_from(self.cluster).clone(tracer=tracer).onto(self.cluster) + return self.cluster.connect(self.TEST_KEYSPACE), tracer + + +class TestCassPatchAll(TestCassPatchDefault): + """Test Cassandra instrumentation with patching and custom service on all clusters""" + + TEST_SERVICE = 'test-cassandra-patch-all' + + def tearDown(self): + unpatch() + + def setUp(self): + CassandraBase.setUp(self) + patch() + + def _traced_session(self): + tracer = get_dummy_tracer() + # pin the global Cluster to test if they will conflict + Pin(service=self.TEST_SERVICE, tracer=tracer).onto(Cluster) + self.cluster = Cluster(port=CASSANDRA_CONFIG['port']) + + return self.cluster.connect(self.TEST_KEYSPACE), tracer + + +class TestCassPatchOne(TestCassPatchDefault): + """Test Cassandra instrumentation with patching and custom service on one cluster""" + + TEST_SERVICE = 'test-cassandra-patch-one' + + def tearDown(self): + unpatch() + + def setUp(self): + CassandraBase.setUp(self) + patch() + + def _traced_session(self): + tracer = get_dummy_tracer() + # pin the global Cluster to test if they will conflict + Pin(service='not-%s' % self.TEST_SERVICE).onto(Cluster) + self.cluster = Cluster(port=CASSANDRA_CONFIG['port']) + + Pin(service=self.TEST_SERVICE, tracer=tracer).onto(self.cluster) + return self.cluster.connect(self.TEST_KEYSPACE), tracer + + def test_patch_unpatch(self): + # Test patch idempotence + patch() + patch() + + tracer = get_dummy_tracer() + Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) + + session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) + session.execute(self.TEST_QUERY) + + spans = tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) + session.execute(self.TEST_QUERY) + + spans = tracer.writer.pop() + assert not spans, spans + + # Test patch again + patch() + Pin.get_from(Cluster).clone(tracer=tracer).onto(Cluster) + + session = Cluster(port=CASSANDRA_CONFIG['port']).connect(self.TEST_KEYSPACE) + session.execute(self.TEST_QUERY) + + spans = tracer.writer.pop() + assert spans, spans + + +def test_backwards_compat_get_traced_cassandra(): + cluster = get_traced_cassandra() + session = cluster(port=CASSANDRA_CONFIG['port']).connect() + session.execute('drop table if exists test.person') diff --git a/tests/contrib/celery/__init__.py b/tests/contrib/celery/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/celery/autopatch.py b/tests/contrib/celery/autopatch.py new file mode 100644 index 0000000000..b66dfa2a99 --- /dev/null +++ b/tests/contrib/celery/autopatch.py @@ -0,0 +1,9 @@ +from ddtrace import Pin + +if __name__ == '__main__': + # have to import celery in order to have the post-import hooks run + import celery + + # now celery.Celery should be patched and should have a pin + assert Pin.get_from(celery.Celery) + print('Test success') diff --git a/tests/contrib/celery/base.py b/tests/contrib/celery/base.py new file mode 100644 index 0000000000..8c12977fe2 --- /dev/null +++ b/tests/contrib/celery/base.py @@ -0,0 +1,41 @@ +from celery import Celery + +from ddtrace import Pin +from ddtrace.compat import PY2 +from ddtrace.contrib.celery import patch, unpatch + +from ..config import REDIS_CONFIG +from ...base import BaseTracerTestCase + + +REDIS_URL = 'redis://127.0.0.1:{port}'.format(port=REDIS_CONFIG['port']) +BROKER_URL = '{redis}/{db}'.format(redis=REDIS_URL, db=0) +BACKEND_URL = '{redis}/{db}'.format(redis=REDIS_URL, db=1) + + +class CeleryBaseTestCase(BaseTracerTestCase): + """Test case that handles a full fledged Celery application with a + custom tracer. It patches the new Celery application. + """ + + def setUp(self): + super(CeleryBaseTestCase, self).setUp() + + # instrument Celery and create an app with Broker and Result backends + patch() + self.pin = Pin(service='celery-unittest', tracer=self.tracer) + self.app = Celery('celery.test_app', broker=BROKER_URL, backend=BACKEND_URL) + # override pins to use our Dummy Tracer + Pin.override(self.app, tracer=self.tracer) + + def tearDown(self): + # remove instrumentation from Celery + unpatch() + self.app = None + + super(CeleryBaseTestCase, self).tearDown() + + def assert_items_equal(self, a, b): + if PY2: + return self.assertItemsEqual(a, b) + return self.assertCountEqual(a, b) diff --git a/tests/contrib/celery/test_app.py b/tests/contrib/celery/test_app.py new file mode 100644 index 0000000000..ed26f00878 --- /dev/null +++ b/tests/contrib/celery/test_app.py @@ -0,0 +1,21 @@ +import celery + +from ddtrace import Pin +from ddtrace.contrib.celery import unpatch_app + +from .base import CeleryBaseTestCase + + +class CeleryAppTest(CeleryBaseTestCase): + """Ensures the default application is properly instrumented""" + + def test_patch_app(self): + # When celery.App is patched it must include a `Pin` instance + app = celery.Celery() + assert Pin.get_from(app) is not None + + def test_unpatch_app(self): + # When celery.App is unpatched it must not include a `Pin` instance + unpatch_app(celery.Celery) + app = celery.Celery() + assert Pin.get_from(app) is None diff --git a/tests/contrib/celery/test_autopatch.py b/tests/contrib/celery/test_autopatch.py new file mode 100644 index 0000000000..de31352ffc --- /dev/null +++ b/tests/contrib/celery/test_autopatch.py @@ -0,0 +1,13 @@ +#!/usr/bin/env python +import subprocess +import unittest + + +class DdtraceRunTest(unittest.TestCase): + """Test that celery is patched successfully if run with ddtrace-run.""" + + def test_autopatch(self): + out = subprocess.check_output( + ['ddtrace-run', 'python', 'tests/contrib/celery/autopatch.py'] + ) + assert out.startswith(b'Test success') diff --git a/tests/contrib/celery/test_integration.py b/tests/contrib/celery/test_integration.py new file mode 100644 index 0000000000..a93ec0f95f --- /dev/null +++ b/tests/contrib/celery/test_integration.py @@ -0,0 +1,428 @@ +import celery +from celery.exceptions import Retry + +from ddtrace.contrib.celery import patch, unpatch + +from .base import CeleryBaseTestCase + +from tests.opentracer.utils import init_tracer + + +class MyException(Exception): + pass + + +class CeleryIntegrationTask(CeleryBaseTestCase): + """Ensures that the tracer works properly with a real Celery application + without breaking the Application or Task API. + """ + + def test_concurrent_delays(self): + # it should create one trace for each delayed execution + @self.app.task + def fn_task(): + return 42 + + for x in range(100): + fn_task.delay() + + traces = self.tracer.writer.pop_traces() + assert 100 == len(traces) + + def test_idempotent_patch(self): + # calling patch() twice doesn't have side effects + patch() + + @self.app.task + def fn_task(): + return 42 + + t = fn_task.apply() + assert t.successful() + assert 42 == t.result + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + def test_idempotent_unpatch(self): + # calling unpatch() twice doesn't have side effects + unpatch() + unpatch() + + @self.app.task + def fn_task(): + return 42 + + t = fn_task.apply() + assert t.successful() + assert 42 == t.result + + traces = self.tracer.writer.pop_traces() + assert 0 == len(traces) + + def test_fn_task_run(self): + # the body of the function is not instrumented so calling it + # directly doesn't create a trace + @self.app.task + def fn_task(): + return 42 + + t = fn_task.run() + assert t == 42 + + traces = self.tracer.writer.pop_traces() + assert 0 == len(traces) + + def test_fn_task_call(self): + # the body of the function is not instrumented so calling it + # directly doesn't create a trace + @self.app.task + def fn_task(): + return 42 + + t = fn_task() + assert t == 42 + + traces = self.tracer.writer.pop_traces() + assert 0 == len(traces) + + def test_fn_task_apply(self): + # it should execute a traced task with a returning value + @self.app.task + def fn_task(): + return 42 + + t = fn_task.apply() + assert t.successful() + assert 42 == t.result + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.error == 0 + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.fn_task' + assert span.service == 'celery-worker' + assert span.span_type == 'worker' + assert span.get_tag('celery.id') == t.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'SUCCESS' + + def test_fn_task_apply_bind(self): + # it should execute a traced task with a returning value + @self.app.task(bind=True) + def fn_task(self): + return self + + t = fn_task.apply() + assert t.successful() + assert 'fn_task' in t.result.name + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.error == 0 + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.fn_task' + assert span.service == 'celery-worker' + assert span.get_tag('celery.id') == t.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'SUCCESS' + + def test_fn_task_apply_async(self): + # it should execute a traced async task that has parameters + @self.app.task + def fn_task_parameters(user, force_logout=False): + return (user, force_logout) + + t = fn_task_parameters.apply_async(args=['user'], kwargs={'force_logout': True}) + assert 'PENDING' == t.status + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.error == 0 + assert span.name == 'celery.apply' + assert span.resource == 'tests.contrib.celery.test_integration.fn_task_parameters' + assert span.service == 'celery-producer' + assert span.get_tag('celery.id') == t.task_id + assert span.get_tag('celery.action') == 'apply_async' + assert span.get_tag('celery.routing_key') == 'celery' + + def test_fn_task_delay(self): + # using delay shorthand must preserve arguments + @self.app.task + def fn_task_parameters(user, force_logout=False): + return (user, force_logout) + + t = fn_task_parameters.delay('user', force_logout=True) + assert 'PENDING' == t.status + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.error == 0 + assert span.name == 'celery.apply' + assert span.resource == 'tests.contrib.celery.test_integration.fn_task_parameters' + assert span.service == 'celery-producer' + assert span.get_tag('celery.id') == t.task_id + assert span.get_tag('celery.action') == 'apply_async' + assert span.get_tag('celery.routing_key') == 'celery' + + def test_fn_exception(self): + # it should catch exceptions in task functions + @self.app.task + def fn_exception(): + raise Exception('Task class is failing') + + t = fn_exception.apply() + assert t.failed() + assert 'Task class is failing' in t.traceback + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.fn_exception' + assert span.service == 'celery-worker' + assert span.get_tag('celery.id') == t.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'FAILURE' + assert span.error == 1 + assert span.get_tag('error.msg') == 'Task class is failing' + assert 'Traceback (most recent call last)' in span.get_tag('error.stack') + assert 'Task class is failing' in span.get_tag('error.stack') + + def test_fn_exception_expected(self): + # it should catch exceptions in task functions + @self.app.task(throws=(MyException,)) + def fn_exception(): + raise MyException('Task class is failing') + + t = fn_exception.apply() + assert t.failed() + assert 'Task class is failing' in t.traceback + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.fn_exception' + assert span.service == 'celery-worker' + assert span.get_tag('celery.id') == t.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'FAILURE' + assert span.error == 0 + + def test_fn_retry_exception(self): + # it should not catch retry exceptions in task functions + @self.app.task + def fn_exception(): + raise Retry('Task class is being retried') + + t = fn_exception.apply() + assert not t.failed() + assert 'Task class is being retried' in t.traceback + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.fn_exception' + assert span.service == 'celery-worker' + assert span.get_tag('celery.id') == t.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'RETRY' + assert span.get_tag('celery.retry.reason') == 'Task class is being retried' + + # This type of retrying should not be marked as an exception + assert span.error == 0 + assert not span.get_tag('error.msg') + assert not span.get_tag('error.stack') + + def test_class_task(self): + # it should execute class based tasks with a returning value + class BaseTask(self.app.Task): + def run(self): + return 42 + + t = BaseTask() + # register the Task class if it's available (required in Celery 4.0+) + register_task = getattr(self.app, 'register_task', None) + if register_task is not None: + register_task(t) + + r = t.apply() + assert r.successful() + assert 42 == r.result + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.error == 0 + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.BaseTask' + assert span.service == 'celery-worker' + assert span.get_tag('celery.id') == r.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'SUCCESS' + + def test_class_task_exception(self): + # it should catch exceptions in class based tasks + class BaseTask(self.app.Task): + def run(self): + raise Exception('Task class is failing') + + t = BaseTask() + # register the Task class if it's available (required in Celery 4.0+) + register_task = getattr(self.app, 'register_task', None) + if register_task is not None: + register_task(t) + + r = t.apply() + assert r.failed() + assert 'Task class is failing' in r.traceback + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.BaseTask' + assert span.service == 'celery-worker' + assert span.get_tag('celery.id') == r.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'FAILURE' + assert span.error == 1 + assert span.get_tag('error.msg') == 'Task class is failing' + assert 'Traceback (most recent call last)' in span.get_tag('error.stack') + assert 'Task class is failing' in span.get_tag('error.stack') + + def test_class_task_exception_expected(self): + # it should catch exceptions in class based tasks + class BaseTask(self.app.Task): + throws = (MyException,) + + def run(self): + raise MyException('Task class is failing') + + t = BaseTask() + # register the Task class if it's available (required in Celery 4.0+) + register_task = getattr(self.app, 'register_task', None) + if register_task is not None: + register_task(t) + + r = t.apply() + assert r.failed() + assert 'Task class is failing' in r.traceback + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.name == 'celery.run' + assert span.resource == 'tests.contrib.celery.test_integration.BaseTask' + assert span.service == 'celery-worker' + assert span.get_tag('celery.id') == r.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'FAILURE' + assert span.error == 0 + + def test_shared_task(self): + # Ensure Django Shared Task are supported + @celery.shared_task + def add(x, y): + return x + y + + res = add.apply([2, 2]) + assert res.result == 4 + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert span.error == 0 + assert span.name == 'celery.run' + assert span.service == 'celery-worker' + assert span.resource == 'tests.contrib.celery.test_integration.add' + assert span.parent_id is None + assert span.get_tag('celery.id') == res.task_id + assert span.get_tag('celery.action') == 'run' + assert span.get_tag('celery.state') == 'SUCCESS' + + def test_worker_service_name(self): + @self.app.task + def fn_task(): + return 42 + + # Ensure worker service name can be changed via + # configuration object + with self.override_config('celery', dict(worker_service_name='worker-notify')): + t = fn_task.apply() + self.assertTrue(t.successful()) + self.assertEqual(42, t.result) + + traces = self.tracer.writer.pop_traces() + self.assertEqual(1, len(traces)) + self.assertEqual(1, len(traces[0])) + span = traces[0][0] + self.assertEqual(span.service, 'worker-notify') + + def test_producer_service_name(self): + @self.app.task + def fn_task(): + return 42 + + # Ensure producer service name can be changed via + # configuration object + with self.override_config('celery', dict(producer_service_name='task-queue')): + t = fn_task.delay() + self.assertEqual('PENDING', t.status) + + traces = self.tracer.writer.pop_traces() + self.assertEqual(1, len(traces)) + self.assertEqual(1, len(traces[0])) + span = traces[0][0] + self.assertEqual(span.service, 'task-queue') + + def test_fn_task_apply_async_ot(self): + """OpenTracing version of test_fn_task_apply_async.""" + ot_tracer = init_tracer('celery_svc', self.tracer) + + # it should execute a traced async task that has parameters + @self.app.task + def fn_task_parameters(user, force_logout=False): + return (user, force_logout) + + with ot_tracer.start_active_span('celery_op'): + t = fn_task_parameters.apply_async(args=['user'], kwargs={'force_logout': True}) + assert 'PENDING' == t.status + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + ot_span, dd_span = traces[0] + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'celery_op' + assert ot_span.service == 'celery_svc' + + assert dd_span.error == 0 + assert dd_span.name == 'celery.apply' + assert dd_span.resource == 'tests.contrib.celery.test_integration.fn_task_parameters' + assert dd_span.service == 'celery-producer' + assert dd_span.get_tag('celery.id') == t.task_id + assert dd_span.get_tag('celery.action') == 'apply_async' + assert dd_span.get_tag('celery.routing_key') == 'celery' diff --git a/tests/contrib/celery/test_old_style_task.py b/tests/contrib/celery/test_old_style_task.py new file mode 100644 index 0000000000..74c646d074 --- /dev/null +++ b/tests/contrib/celery/test_old_style_task.py @@ -0,0 +1,50 @@ +import celery + +from .base import CeleryBaseTestCase + + +class CeleryOldStyleTaskTest(CeleryBaseTestCase): + """Ensure Old Style Tasks are properly instrumented""" + + def test_apply_async_previous_style_tasks(self): + # ensures apply_async is properly patched if Celery 1.0 style tasks + # are used even in newer versions. This should extend support to + # previous versions of Celery. + # Regression test: https://github.com/DataDog/dd-trace-py/pull/449 + class CelerySuperClass(celery.task.Task): + abstract = True + + @classmethod + def apply_async(cls, args=None, kwargs=None, **kwargs_): + return super(CelerySuperClass, cls).apply_async(args=args, kwargs=kwargs, **kwargs_) + + def run(self, *args, **kwargs): + if 'stop' in kwargs: + # avoid call loop + return + CelerySubClass.apply_async(args=[], kwargs={'stop': True}) + + class CelerySubClass(CelerySuperClass): + pass + + t = CelerySubClass() + res = t.apply() + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + run_span = traces[0][0] + assert run_span.error == 0 + assert run_span.name == 'celery.run' + assert run_span.resource == 'tests.contrib.celery.test_old_style_task.CelerySubClass' + assert run_span.service == 'celery-worker' + assert run_span.get_tag('celery.id') == res.task_id + assert run_span.get_tag('celery.action') == 'run' + assert run_span.get_tag('celery.state') == 'SUCCESS' + apply_span = traces[0][1] + assert apply_span.error == 0 + assert apply_span.name == 'celery.apply' + assert apply_span.resource == 'tests.contrib.celery.test_old_style_task.CelerySubClass' + assert apply_span.service == 'celery-producer' + assert apply_span.get_tag('celery.action') == 'apply_async' + assert apply_span.get_tag('celery.routing_key') == 'celery' diff --git a/tests/contrib/celery/test_patch.py b/tests/contrib/celery/test_patch.py new file mode 100644 index 0000000000..1fd676c073 --- /dev/null +++ b/tests/contrib/celery/test_patch.py @@ -0,0 +1,20 @@ +import unittest +from ddtrace import Pin + + +class CeleryPatchTest(unittest.TestCase): + def test_patch_after_import(self): + import celery + from ddtrace import patch + patch(celery=True) + + app = celery.Celery() + assert Pin.get_from(app) is not None + + def test_patch_before_import(self): + from ddtrace import patch + patch(celery=True) + import celery + + app = celery.Celery() + assert Pin.get_from(app) is not None diff --git a/tests/contrib/celery/test_task_deprecation.py b/tests/contrib/celery/test_task_deprecation.py new file mode 100644 index 0000000000..89daf3b231 --- /dev/null +++ b/tests/contrib/celery/test_task_deprecation.py @@ -0,0 +1,50 @@ +import warnings +import unittest + +from celery import Celery + +from ddtrace.contrib.celery import patch_task, unpatch_task, unpatch + + +class CeleryDeprecatedTaskPatch(unittest.TestCase): + """Ensures that the previous Task instrumentation is available + as Deprecated API. + """ + def setUp(self): + # create a not instrumented Celery App + self.app = Celery('celery.test_app') + + def tearDown(self): + # be sure the system is always unpatched + unpatch() + self.app = None + + def test_patch_signals_connect(self): + # calling `patch_task` enables instrumentation globally + # while raising a Deprecation warning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + @patch_task + @self.app.task + def fn_task(): + return 42 + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert 'patch(celery=True)' in str(w[-1].message) + + def test_unpatch_signals_diconnect(self): + # calling `unpatch_task` is a no-op that raises a Deprecation + # warning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + @unpatch_task + @self.app.task + def fn_task(): + return 42 + + assert len(w) == 1 + assert issubclass(w[-1].category, DeprecationWarning) + assert 'unpatch()' in str(w[-1].message) diff --git a/tests/contrib/celery/test_utils.py b/tests/contrib/celery/test_utils.py new file mode 100644 index 0000000000..884560870b --- /dev/null +++ b/tests/contrib/celery/test_utils.py @@ -0,0 +1,195 @@ +import gc + +from ddtrace.contrib.celery.utils import ( + tags_from_context, + retrieve_task_id, + attach_span, + detach_span, + retrieve_span, +) + +from .base import CeleryBaseTestCase + + +class CeleryTagsTest(CeleryBaseTestCase): + """Ensures that Celery doesn't extract too much meta + data when executing tasks asynchronously. + """ + def test_tags_from_context(self): + # it should extract only relevant keys + context = { + 'correlation_id': '44b7f305', + 'delivery_info': '{"eager": "True"}', + 'eta': 'soon', + 'expires': 'later', + 'hostname': 'localhost', + 'id': '44b7f305', + 'reply_to': '44b7f305', + 'retries': 4, + 'timelimit': ('now', 'later'), + 'custom_meta': 'custom_value', + } + + metas = tags_from_context(context) + assert metas['celery.correlation_id'] == '44b7f305' + assert metas['celery.delivery_info'] == '{"eager": "True"}' + assert metas['celery.eta'] == 'soon' + assert metas['celery.expires'] == 'later' + assert metas['celery.hostname'] == 'localhost' + assert metas['celery.id'] == '44b7f305' + assert metas['celery.reply_to'] == '44b7f305' + assert metas['celery.retries'] == 4 + assert metas['celery.timelimit'] == ('now', 'later') + assert metas.get('custom_meta', None) is None + + def test_tags_from_context_empty_keys(self): + # it should not extract empty keys + context = { + 'correlation_id': None, + 'exchange': '', + 'timelimit': (None, None), + 'retries': 0, + } + + tags = tags_from_context(context) + assert {} == tags + # edge case: `timelimit` can also be a list of None values + context = { + 'timelimit': [None, None], + } + + tags = tags_from_context(context) + assert {} == tags + + def test_span_propagation(self): + # ensure spans getter and setter works properly + @self.app.task + def fn_task(): + return 42 + + # propagate and retrieve a Span + task_id = '7c6731af-9533-40c3-83a9-25b58f0d837f' + span_before = self.tracer.trace('celery.run') + attach_span(fn_task, task_id, span_before) + span_after = retrieve_span(fn_task, task_id) + assert span_before is span_after + + def test_span_delete(self): + # ensure the helper removes properly a propagated Span + @self.app.task + def fn_task(): + return 42 + + # propagate a Span + task_id = '7c6731af-9533-40c3-83a9-25b58f0d837f' + span = self.tracer.trace('celery.run') + attach_span(fn_task, task_id, span) + # delete the Span + weak_dict = getattr(fn_task, '__dd_task_span') + detach_span(fn_task, task_id) + assert weak_dict.get((task_id, False)) is None + + def test_span_delete_empty(self): + # ensure the helper works even if the Task doesn't have + # a propagation + @self.app.task + def fn_task(): + return 42 + + # delete the Span + exception = None + task_id = '7c6731af-9533-40c3-83a9-25b58f0d837f' + try: + detach_span(fn_task, task_id) + except Exception as e: + exception = e + assert exception is None + + def test_memory_leak_safety(self): + # Spans are shared between signals using a Dictionary (task_id -> span). + # This test ensures the GC correctly cleans finished spans. If this test + # fails a memory leak will happen for sure. + @self.app.task + def fn_task(): + return 42 + + # propagate and finish a Span for `fn_task` + task_id = '7c6731af-9533-40c3-83a9-25b58f0d837f' + attach_span(fn_task, task_id, self.tracer.trace('celery.run')) + weak_dict = getattr(fn_task, '__dd_task_span') + key = (task_id, False) + assert weak_dict.get(key) + # flush data and force the GC + weak_dict.get(key).finish() + self.tracer.writer.pop() + self.tracer.writer.pop_traces() + gc.collect() + assert weak_dict.get(key) is None + + def test_task_id_from_protocol_v1(self): + # ensures a `task_id` is properly returned when Protocol v1 is used. + # `context` is an example of an emitted Signal with Protocol v1 + context = { + 'body': { + 'expires': None, + 'utc': True, + 'args': ['user'], + 'chord': None, + 'callbacks': None, + 'errbacks': None, + 'taskset': None, + 'id': 'dffcaec1-dd92-4a1a-b3ab-d6512f4beeb7', + 'retries': 0, + 'task': 'tests.contrib.celery.test_integration.fn_task_parameters', + 'timelimit': (None, None), + 'eta': None, + 'kwargs': {'force_logout': True} + }, + 'sender': 'tests.contrib.celery.test_integration.fn_task_parameters', + 'exchange': 'celery', + 'routing_key': 'celery', + 'retry_policy': None, + 'headers': {}, + 'properties': {}, + } + + task_id = retrieve_task_id(context) + assert task_id == 'dffcaec1-dd92-4a1a-b3ab-d6512f4beeb7' + + def test_task_id_from_protocol_v2(self): + # ensures a `task_id` is properly returned when Protocol v2 is used. + # `context` is an example of an emitted Signal with Protocol v2 + context = { + 'body': ( + ['user'], + {'force_logout': True}, + {u'chord': None, u'callbacks': None, u'errbacks': None, u'chain': None}, + ), + 'sender': u'tests.contrib.celery.test_integration.fn_task_parameters', + 'exchange': u'', + 'routing_key': u'celery', + 'retry_policy': None, + 'headers': { + u'origin': u'gen83744@hostname', + u'root_id': '7e917b83-4018-431d-9832-73a28e1fb6c0', + u'expires': None, + u'shadow': None, + u'id': '7e917b83-4018-431d-9832-73a28e1fb6c0', + u'kwargsrepr': u"{'force_logout': True}", + u'lang': u'py', + u'retries': 0, + u'task': u'tests.contrib.celery.test_integration.fn_task_parameters', + u'group': None, + u'timelimit': [None, None], + u'parent_id': None, + u'argsrepr': u"['user']", + u'eta': None, + }, + 'properties': { + u'reply_to': 'c3054a07-5b28-3855-b18c-1623a24aaeca', + u'correlation_id': '7e917b83-4018-431d-9832-73a28e1fb6c0', + }, + } + + task_id = retrieve_task_id(context) + assert task_id == '7e917b83-4018-431d-9832-73a28e1fb6c0' diff --git a/tests/contrib/config.py b/tests/contrib/config.py new file mode 100644 index 0000000000..c69b9aa83b --- /dev/null +++ b/tests/contrib/config.py @@ -0,0 +1,74 @@ +""" +testing config. +""" + +import os + + +# default config for backing services +# NOTE: defaults may be duplicated in the .env file; update both or +# simply write down a function that parses the .env file + +ELASTICSEARCH_CONFIG = { + 'port': int(os.getenv('TEST_ELASTICSEARCH_PORT', 9200)), +} + +CASSANDRA_CONFIG = { + 'port': int(os.getenv('TEST_CASSANDRA_PORT', 9042)), +} + +CONSUL_CONFIG = { + 'host': '127.0.0.1', + 'port': int(os.getenv('TEST_CONSUL_PORT', 8500)), +} + +# Use host=127.0.0.1 since local docker testing breaks with localhost + +POSTGRES_CONFIG = { + 'host': '127.0.0.1', + 'port': int(os.getenv('TEST_POSTGRES_PORT', 5432)), + 'user': os.getenv('TEST_POSTGRES_USER', 'postgres'), + 'password': os.getenv('TEST_POSTGRES_PASSWORD', 'postgres'), + 'dbname': os.getenv('TEST_POSTGRES_DB', 'postgres'), +} + +MYSQL_CONFIG = { + 'host': '127.0.0.1', + 'port': int(os.getenv('TEST_MYSQL_PORT', 3306)), + 'user': os.getenv('TEST_MYSQL_USER', 'test'), + 'password': os.getenv('TEST_MYSQL_PASSWORD', 'test'), + 'database': os.getenv('TEST_MYSQL_DATABASE', 'test'), +} + +REDIS_CONFIG = { + 'port': int(os.getenv('TEST_REDIS_PORT', 6379)), +} + +REDISCLUSTER_CONFIG = { + 'host': '127.0.0.1', + 'ports': os.getenv('TEST_REDISCLUSTER_PORTS', '7000,7001,7002,7003,7004,7005'), +} + +MONGO_CONFIG = { + 'port': int(os.getenv('TEST_MONGO_PORT', 27017)), +} + +MEMCACHED_CONFIG = { + 'host': os.getenv('TEST_MEMCACHED_HOST', '127.0.0.1'), + 'port': int(os.getenv('TEST_MEMCACHED_PORT', 11211)), +} + +VERTICA_CONFIG = { + 'host': os.getenv('TEST_VERTICA_HOST', '127.0.0.1'), + 'port': os.getenv('TEST_VERTICA_PORT', 5433), + 'user': os.getenv('TEST_VERTICA_USER', 'dbadmin'), + 'password': os.getenv('TEST_VERTICA_PASSWORD', 'abc123'), + 'database': os.getenv('TEST_VERTICA_DATABASE', 'docker'), +} + +RABBITMQ_CONFIG = { + 'host': os.getenv('TEST_RABBITMQ_HOST', '127.0.0.1'), + 'user': os.getenv('TEST_RABBITMQ_USER', 'guest'), + 'password': os.getenv('TEST_RABBITMQ_PASSWORD', 'guest'), + 'port': int(os.getenv('TEST_RABBITMQ_PORT', 5672)), +} diff --git a/tests/contrib/consul/__init__.py b/tests/contrib/consul/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/consul/test.py b/tests/contrib/consul/test.py new file mode 100644 index 0000000000..b68f6b85ab --- /dev/null +++ b/tests/contrib/consul/test.py @@ -0,0 +1,170 @@ +import consul +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.ext import consul as consulx +from ddtrace.vendor.wrapt import BoundFunctionWrapper +from ddtrace.contrib.consul.patch import patch, unpatch + +from ..config import CONSUL_CONFIG +from ...base import BaseTracerTestCase + + +class TestConsulPatch(BaseTracerTestCase): + + TEST_SERVICE = 'test-consul' + + def setUp(self): + super(TestConsulPatch, self).setUp() + patch() + c = consul.Consul( + host=CONSUL_CONFIG['host'], + port=CONSUL_CONFIG['port'], + ) + Pin.override(consul.Consul, service=self.TEST_SERVICE, tracer=self.tracer) + Pin.override(consul.Consul.KV, service=self.TEST_SERVICE, tracer=self.tracer) + self.c = c + + def tearDown(self): + unpatch() + super(TestConsulPatch, self).tearDown() + + def test_put(self): + key = 'test/put/consul' + value = 'test_value' + + self.c.kv.put(key, value) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == consulx.CMD + assert span.resource == 'PUT' + assert span.error == 0 + tags = { + consulx.KEY: key, + consulx.CMD: 'PUT', + } + for k, v in tags.items(): + assert span.get_tag(k) == v + + def test_get(self): + key = 'test/get/consul' + + self.c.kv.get(key) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == consulx.CMD + assert span.resource == 'GET' + assert span.error == 0 + tags = { + consulx.KEY: key, + consulx.CMD: 'GET', + } + for k, v in tags.items(): + assert span.get_tag(k) == v + + def test_delete(self): + key = 'test/delete/consul' + + self.c.kv.delete(key) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == consulx.CMD + assert span.resource == 'DELETE' + assert span.error == 0 + tags = { + consulx.KEY: key, + consulx.CMD: 'DELETE', + } + for k, v in tags.items(): + assert span.get_tag(k) == v + + def test_kwargs(self): + key = 'test/kwargs/consul' + value = 'test_value' + + self.c.kv.put(key=key, value=value) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == consulx.CMD + assert span.resource == 'PUT' + assert span.error == 0 + tags = { + consulx.KEY: key, + } + for k, v in tags.items(): + assert span.get_tag(k) == v + + def test_patch_idempotence(self): + key = 'test/patch/idempotence' + + patch() + patch() + + self.c.kv.get(key) + assert self.spans + assert isinstance(self.c.kv.get, BoundFunctionWrapper) + + unpatch() + self.reset() + + self.c.kv.get(key) + assert not self.spans + assert not isinstance(self.c.kv.get, BoundFunctionWrapper) + + def test_patch_preserves_functionality(self): + key = 'test/functionality' + value = b'test_value' + + self.c.kv.put(key, value) + _, data = self.c.kv.get(key) + assert data['Value'] == value + self.c.kv.delete(key) + _, data = self.c.kv.get(key) + assert data is None + + def test_analytics_without_rate(self): + with self.override_config('consul', {'analytics_enabled': True}): + key = 'test/kwargs/consul' + value = 'test_value' + + self.c.kv.put(key=key, value=value) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0 + + def test_analytics_with_rate(self): + with self.override_config('consul', {'analytics_enabled': True, 'analytics_sample_rate': 0.5}): + key = 'test/kwargs/consul' + value = 'test_value' + + self.c.kv.put(key=key, value=value) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5 + + def test_analytics_disabled(self): + with self.override_config('consul', {'analytics_enabled': False}): + key = 'test/kwargs/consul' + value = 'test_value' + + self.c.kv.put(key=key, value=value) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None diff --git a/tests/contrib/dbapi/__init__.py b/tests/contrib/dbapi/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/dbapi/test_unit.py b/tests/contrib/dbapi/test_unit.py new file mode 100644 index 0000000000..18b78f6051 --- /dev/null +++ b/tests/contrib/dbapi/test_unit.py @@ -0,0 +1,523 @@ +import mock + +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.dbapi import FetchTracedCursor, TracedCursor, TracedConnection +from ddtrace.span import Span +from ...base import BaseTracerTestCase + + +class TestTracedCursor(BaseTracerTestCase): + + def setUp(self): + super(TestTracedCursor, self).setUp() + self.cursor = mock.Mock() + + def test_execute_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.execute.return_value = '__result__' + + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + # DEV: We always pass through the result + assert '__result__' == traced_cursor.execute('__query__', 'arg_1', kwarg1='kwarg1') + cursor.execute.assert_called_once_with('__query__', 'arg_1', kwarg1='kwarg1') + + def test_executemany_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.executemany.return_value = '__result__' + + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + # DEV: We always pass through the result + assert '__result__' == traced_cursor.executemany('__query__', 'arg_1', kwarg1='kwarg1') + cursor.executemany.assert_called_once_with('__query__', 'arg_1', kwarg1='kwarg1') + + def test_fetchone_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchone.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchone('arg_1', kwarg1='kwarg1') + cursor.fetchone.assert_called_once_with('arg_1', kwarg1='kwarg1') + + def test_fetchall_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchall.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchall('arg_1', kwarg1='kwarg1') + cursor.fetchall.assert_called_once_with('arg_1', kwarg1='kwarg1') + + def test_fetchmany_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchmany.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchmany('arg_1', kwarg1='kwarg1') + cursor.fetchmany.assert_called_once_with('arg_1', kwarg1='kwarg1') + + def test_correct_span_names(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 0 + pin = Pin('pin_name', tracer=tracer) + traced_cursor = TracedCursor(cursor, pin) + + traced_cursor.execute('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='sql.query')) + self.reset() + + traced_cursor.executemany('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='sql.query')) + self.reset() + + traced_cursor.callproc('arg_1', 'arg2') + self.assert_structure(dict(name='sql.query')) + self.reset() + + traced_cursor.fetchone('arg_1', kwarg1='kwarg1') + self.assert_has_no_spans() + + traced_cursor.fetchmany('arg_1', kwarg1='kwarg1') + self.assert_has_no_spans() + + traced_cursor.fetchall('arg_1', kwarg1='kwarg1') + self.assert_has_no_spans() + + def test_correct_span_names_can_be_overridden_by_pin(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 0 + pin = Pin('pin_name', app='changed', tracer=tracer) + traced_cursor = TracedCursor(cursor, pin) + + traced_cursor.execute('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='changed.query')) + self.reset() + + traced_cursor.executemany('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='changed.query')) + self.reset() + + traced_cursor.callproc('arg_1', 'arg2') + self.assert_structure(dict(name='changed.query')) + self.reset() + + traced_cursor.fetchone('arg_1', kwarg1='kwarg1') + self.assert_has_no_spans() + + traced_cursor.fetchmany('arg_1', kwarg1='kwarg1') + self.assert_has_no_spans() + + traced_cursor.fetchall('arg_1', kwarg1='kwarg1') + self.assert_has_no_spans() + + def test_when_pin_disabled_then_no_tracing(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 0 + cursor.execute.return_value = '__result__' + cursor.executemany.return_value = '__result__' + + tracer.enabled = False + pin = Pin('pin_name', tracer=tracer) + traced_cursor = TracedCursor(cursor, pin) + + assert '__result__' == traced_cursor.execute('arg_1', kwarg1='kwarg1') + assert len(tracer.writer.pop()) == 0 + + assert '__result__' == traced_cursor.executemany('arg_1', kwarg1='kwarg1') + assert len(tracer.writer.pop()) == 0 + + cursor.callproc.return_value = 'callproc' + assert 'callproc' == traced_cursor.callproc('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + cursor.fetchone.return_value = 'fetchone' + assert 'fetchone' == traced_cursor.fetchone('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + cursor.fetchmany.return_value = 'fetchmany' + assert 'fetchmany' == traced_cursor.fetchmany('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + cursor.fetchall.return_value = 'fetchall' + assert 'fetchall' == traced_cursor.fetchall('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + def test_span_info(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 123 + pin = Pin('my_service', app='my_app', tracer=tracer, tags={'pin1': 'value_pin1'}) + traced_cursor = TracedCursor(cursor, pin) + + def method(): + pass + + traced_cursor._trace_method(method, 'my_name', 'my_resource', {'extra1': 'value_extra1'}) + span = tracer.writer.pop()[0] # type: Span + assert span.meta['pin1'] == 'value_pin1', 'Pin tags are preserved' + assert span.meta['extra1'] == 'value_extra1', 'Extra tags are merged into pin tags' + assert span.name == 'my_name', 'Span name is respected' + assert span.service == 'my_service', 'Service from pin' + assert span.resource == 'my_resource', 'Resource is respected' + assert span.span_type == 'sql', 'Span has the correct span type' + # Row count + assert span.get_metric('db.rowcount') == 123, 'Row count is set as a metric' + assert span.get_metric('sql.rows') == 123, 'Row count is set as a tag (for legacy django cursor replacement)' + + def test_django_traced_cursor_backward_compatibility(self): + cursor = self.cursor + tracer = self.tracer + # Django integration used to have its own TracedCursor implementation. When we replaced such custom + # implementation with the generic dbapi traced cursor, we had to make sure to add the tag 'sql.rows' that was + # set by the legacy replaced implementation. + cursor.rowcount = 123 + pin = Pin('my_service', app='my_app', tracer=tracer, tags={'pin1': 'value_pin1'}) + traced_cursor = TracedCursor(cursor, pin) + + def method(): + pass + + traced_cursor._trace_method(method, 'my_name', 'my_resource', {'extra1': 'value_extra1'}) + span = tracer.writer.pop()[0] # type: Span + # Row count + assert span.get_metric('db.rowcount') == 123, 'Row count is set as a metric' + assert span.get_metric('sql.rows') == 123, 'Row count is set as a tag (for legacy django cursor replacement)' + + def test_cursor_analytics_default(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.execute.return_value = '__result__' + + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + # DEV: We always pass through the result + assert '__result__' == traced_cursor.execute('__query__', 'arg_1', kwarg1='kwarg1') + + span = self.tracer.writer.pop()[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_cursor_analytics_with_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + cursor = self.cursor + cursor.rowcount = 0 + cursor.execute.return_value = '__result__' + + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + # DEV: We always pass through the result + assert '__result__' == traced_cursor.execute('__query__', 'arg_1', kwarg1='kwarg1') + + span = self.tracer.writer.pop()[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_cursor_analytics_without_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True) + ): + cursor = self.cursor + cursor.rowcount = 0 + cursor.execute.return_value = '__result__' + + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = TracedCursor(cursor, pin) + # DEV: We always pass through the result + assert '__result__' == traced_cursor.execute('__query__', 'arg_1', kwarg1='kwarg1') + + span = self.tracer.writer.pop()[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + +class TestFetchTracedCursor(BaseTracerTestCase): + + def setUp(self): + super(TestFetchTracedCursor, self).setUp() + self.cursor = mock.Mock() + + def test_execute_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.execute.return_value = '__result__' + + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.execute('__query__', 'arg_1', kwarg1='kwarg1') + cursor.execute.assert_called_once_with('__query__', 'arg_1', kwarg1='kwarg1') + + def test_executemany_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.executemany.return_value = '__result__' + + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.executemany('__query__', 'arg_1', kwarg1='kwarg1') + cursor.executemany.assert_called_once_with('__query__', 'arg_1', kwarg1='kwarg1') + + def test_fetchone_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchone.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchone('arg_1', kwarg1='kwarg1') + cursor.fetchone.assert_called_once_with('arg_1', kwarg1='kwarg1') + + def test_fetchall_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchall.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchall('arg_1', kwarg1='kwarg1') + cursor.fetchall.assert_called_once_with('arg_1', kwarg1='kwarg1') + + def test_fetchmany_wrapped_is_called_and_returned(self): + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchmany.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchmany('arg_1', kwarg1='kwarg1') + cursor.fetchmany.assert_called_once_with('arg_1', kwarg1='kwarg1') + + def test_correct_span_names(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 0 + pin = Pin('pin_name', tracer=tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + + traced_cursor.execute('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='sql.query')) + self.reset() + + traced_cursor.executemany('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='sql.query')) + self.reset() + + traced_cursor.callproc('arg_1', 'arg2') + self.assert_structure(dict(name='sql.query')) + self.reset() + + traced_cursor.fetchone('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='sql.query.fetchone')) + self.reset() + + traced_cursor.fetchmany('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='sql.query.fetchmany')) + self.reset() + + traced_cursor.fetchall('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='sql.query.fetchall')) + self.reset() + + def test_correct_span_names_can_be_overridden_by_pin(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 0 + pin = Pin('pin_name', app='changed', tracer=tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + + traced_cursor.execute('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='changed.query')) + self.reset() + + traced_cursor.executemany('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='changed.query')) + self.reset() + + traced_cursor.callproc('arg_1', 'arg2') + self.assert_structure(dict(name='changed.query')) + self.reset() + + traced_cursor.fetchone('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='changed.query.fetchone')) + self.reset() + + traced_cursor.fetchmany('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='changed.query.fetchmany')) + self.reset() + + traced_cursor.fetchall('arg_1', kwarg1='kwarg1') + self.assert_structure(dict(name='changed.query.fetchall')) + self.reset() + + def test_when_pin_disabled_then_no_tracing(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 0 + cursor.execute.return_value = '__result__' + cursor.executemany.return_value = '__result__' + + tracer.enabled = False + pin = Pin('pin_name', tracer=tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + + assert '__result__' == traced_cursor.execute('arg_1', kwarg1='kwarg1') + assert len(tracer.writer.pop()) == 0 + + assert '__result__' == traced_cursor.executemany('arg_1', kwarg1='kwarg1') + assert len(tracer.writer.pop()) == 0 + + cursor.callproc.return_value = 'callproc' + assert 'callproc' == traced_cursor.callproc('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + cursor.fetchone.return_value = 'fetchone' + assert 'fetchone' == traced_cursor.fetchone('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + cursor.fetchmany.return_value = 'fetchmany' + assert 'fetchmany' == traced_cursor.fetchmany('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + cursor.fetchall.return_value = 'fetchall' + assert 'fetchall' == traced_cursor.fetchall('arg_1', 'arg_2') + assert len(tracer.writer.pop()) == 0 + + def test_span_info(self): + cursor = self.cursor + tracer = self.tracer + cursor.rowcount = 123 + pin = Pin('my_service', app='my_app', tracer=tracer, tags={'pin1': 'value_pin1'}) + traced_cursor = FetchTracedCursor(cursor, pin) + + def method(): + pass + + traced_cursor._trace_method(method, 'my_name', 'my_resource', {'extra1': 'value_extra1'}) + span = tracer.writer.pop()[0] # type: Span + assert span.meta['pin1'] == 'value_pin1', 'Pin tags are preserved' + assert span.meta['extra1'] == 'value_extra1', 'Extra tags are merged into pin tags' + assert span.name == 'my_name', 'Span name is respected' + assert span.service == 'my_service', 'Service from pin' + assert span.resource == 'my_resource', 'Resource is respected' + assert span.span_type == 'sql', 'Span has the correct span type' + # Row count + assert span.get_metric('db.rowcount') == 123, 'Row count is set as a metric' + assert span.get_metric('sql.rows') == 123, 'Row count is set as a tag (for legacy django cursor replacement)' + + def test_django_traced_cursor_backward_compatibility(self): + cursor = self.cursor + tracer = self.tracer + # Django integration used to have its own FetchTracedCursor implementation. When we replaced such custom + # implementation with the generic dbapi traced cursor, we had to make sure to add the tag 'sql.rows' that was + # set by the legacy replaced implementation. + cursor.rowcount = 123 + pin = Pin('my_service', app='my_app', tracer=tracer, tags={'pin1': 'value_pin1'}) + traced_cursor = FetchTracedCursor(cursor, pin) + + def method(): + pass + + traced_cursor._trace_method(method, 'my_name', 'my_resource', {'extra1': 'value_extra1'}) + span = tracer.writer.pop()[0] # type: Span + # Row count + assert span.get_metric('db.rowcount') == 123, 'Row count is set as a metric' + assert span.get_metric('sql.rows') == 123, 'Row count is set as a tag (for legacy django cursor replacement)' + + def test_fetch_no_analytics(self): + """ Confirm fetch* methods do not have analytics sample rate metric """ + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True) + ): + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchone.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchone('arg_1', kwarg1='kwarg1') + + span = self.tracer.writer.pop()[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchall.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchall('arg_1', kwarg1='kwarg1') + + span = self.tracer.writer.pop()[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + cursor = self.cursor + cursor.rowcount = 0 + cursor.fetchmany.return_value = '__result__' + pin = Pin('pin_name', tracer=self.tracer) + traced_cursor = FetchTracedCursor(cursor, pin) + assert '__result__' == traced_cursor.fetchmany('arg_1', kwarg1='kwarg1') + + span = self.tracer.writer.pop()[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + +class TestTracedConnection(BaseTracerTestCase): + def setUp(self): + super(TestTracedConnection, self).setUp() + self.connection = mock.Mock() + + def test_cursor_class(self): + pin = Pin('pin_name', tracer=self.tracer) + + # Default + traced_connection = TracedConnection(self.connection, pin=pin) + self.assertTrue(traced_connection._self_cursor_cls is TracedCursor) + + # Trace fetched methods + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + traced_connection = TracedConnection(self.connection, pin=pin) + self.assertTrue(traced_connection._self_cursor_cls is FetchTracedCursor) + + # Manually provided cursor class + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + traced_connection = TracedConnection(self.connection, pin=pin, cursor_cls=TracedCursor) + self.assertTrue(traced_connection._self_cursor_cls is TracedCursor) + + def test_commit_is_traced(self): + connection = self.connection + tracer = self.tracer + connection.commit.return_value = None + pin = Pin('pin_name', tracer=tracer) + traced_connection = TracedConnection(connection, pin) + traced_connection.commit() + assert tracer.writer.pop()[0].name == 'mock.connection.commit' + connection.commit.assert_called_with() + + def test_rollback_is_traced(self): + connection = self.connection + tracer = self.tracer + connection.rollback.return_value = None + pin = Pin('pin_name', tracer=tracer) + traced_connection = TracedConnection(connection, pin) + traced_connection.rollback() + assert tracer.writer.pop()[0].name == 'mock.connection.rollback' + connection.rollback.assert_called_with() + + def test_connection_analytics_with_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + connection = self.connection + tracer = self.tracer + connection.commit.return_value = None + pin = Pin('pin_name', tracer=tracer) + traced_connection = TracedConnection(connection, pin) + traced_connection.commit() + span = tracer.writer.pop()[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) diff --git a/tests/contrib/django/__init__.py b/tests/contrib/django/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/django/app/__init__.py b/tests/contrib/django/app/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/django/app/middlewares.py b/tests/contrib/django/app/middlewares.py new file mode 100644 index 0000000000..c316997291 --- /dev/null +++ b/tests/contrib/django/app/middlewares.py @@ -0,0 +1,36 @@ +from django.http import HttpResponse + +try: + from django.utils.deprecation import MiddlewareMixin + MiddlewareClass = MiddlewareMixin +except ImportError: + MiddlewareClass = object + + +class CatchExceptionMiddleware(MiddlewareClass): + def process_exception(self, request, exception): + return HttpResponse(status=500) + + +class HandleErrorMiddlewareSuccess(MiddlewareClass): + """ Converts an HttpError (that may be returned from an exception handler) + generated by a view or previous middleware and returns a 200 + HttpResponse. + """ + def process_response(self, request, response): + if response.status_code == 500: + return HttpResponse(status=200) + + return response + + +class HandleErrorMiddlewareClientError(MiddlewareClass): + """ Converts an HttpError (that may be returned from an exception handler) + generated by a view or previous middleware and returns a 404 + HttpResponse. + """ + def process_response(self, request, response): + if response.status_code == 500: + return HttpResponse(status=404) + + return response diff --git a/tests/contrib/django/app/settings.py b/tests/contrib/django/app/settings.py new file mode 100644 index 0000000000..b17e604e49 --- /dev/null +++ b/tests/contrib/django/app/settings.py @@ -0,0 +1,132 @@ +""" +Settings configuration for the Django web framework. Update this +configuration if you need to change the default behavior of +Django during tests +""" +import os +import django + + +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ':memory:' + } +} + +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'LOCATION': 'unique-snowflake', + }, + 'redis': { + 'BACKEND': 'django_redis.cache.RedisCache', + 'LOCATION': 'redis://127.0.0.1:6379/1', + }, + 'pylibmc': { + 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + }, + 'python_memcached': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': '127.0.0.1:11211', + }, + 'django_pylibmc': { + 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + 'BINARY': True, + 'OPTIONS': { + 'tcp_nodelay': True, + 'ketama': True + } + }, +} + +SITE_ID = 1 +SECRET_KEY = 'not_very_secret_in_tests' +USE_I18N = True +USE_L10N = True +STATIC_URL = '/static/' +ROOT_URLCONF = 'tests.contrib.django.app.views' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [ + os.path.join(BASE_DIR, 'app', 'templates'), + ], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +if (1, 10) <= django.VERSION < (2, 0): + MIDDLEWARE = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', + + 'tests.contrib.django.app.middlewares.CatchExceptionMiddleware', + ] + +# Django 2.0 has different defaults +elif django.VERSION >= (2, 0): + MIDDLEWARE = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', + + 'tests.contrib.django.app.middlewares.CatchExceptionMiddleware', + ] + +# Pre 1.10 style +else: + MIDDLEWARE_CLASSES = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', + + 'tests.contrib.django.app.middlewares.CatchExceptionMiddleware', + ] + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + + # tracer app + 'ddtrace.contrib.django', +] + +DATADOG_TRACE = { + # tracer with a DummyWriter + 'TRACER': 'tests.contrib.django.utils.tracer', + 'ENABLED': True, + 'TAGS': { + 'env': 'test', + }, +} diff --git a/tests/contrib/django/app/settings_untraced.py b/tests/contrib/django/app/settings_untraced.py new file mode 100644 index 0000000000..eb9f878b85 --- /dev/null +++ b/tests/contrib/django/app/settings_untraced.py @@ -0,0 +1,106 @@ +""" +Settings configuration for the Django web framework. Update this +configuration if you need to change the default behavior of +Django during tests +""" +import os + + +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ':memory:' + } +} + +CACHES = { + 'default': { + 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', + 'LOCATION': 'unique-snowflake', + }, + 'redis': { + 'BACKEND': 'django_redis.cache.RedisCache', + 'LOCATION': 'redis://127.0.0.1:6379/1', + }, + 'pylibmc': { + 'BACKEND': 'django.core.cache.backends.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + }, + 'python_memcached': { + 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', + 'LOCATION': '127.0.0.1:11211', + }, + 'django_pylibmc': { + 'BACKEND': 'django_pylibmc.memcached.PyLibMCCache', + 'LOCATION': '127.0.0.1:11211', + 'BINARY': True, + 'OPTIONS': { + 'tcp_nodelay': True, + 'ketama': True + } + }, +} + +SITE_ID = 1 +SECRET_KEY = 'not_very_secret_in_tests' +USE_I18N = True +USE_L10N = True +STATIC_URL = '/static/' +ROOT_URLCONF = 'tests.contrib.django.app.views' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [ + os.path.join(BASE_DIR, 'app', 'templates'), + ], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +# 1.10+ style +MIDDLEWARE = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', +] + +# Pre 1.10 style +MIDDLEWARE_CLASSES = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', +] + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', +] + +DATADOG_TRACE = { + # tracer with a DummyWriter + 'TRACER': 'tests.contrib.django.utils.tracer', + 'ENABLED': True, +} diff --git a/tests/contrib/django/app/templates/cached_list.html b/tests/contrib/django/app/templates/cached_list.html new file mode 100644 index 0000000000..b36c1c6829 --- /dev/null +++ b/tests/contrib/django/app/templates/cached_list.html @@ -0,0 +1,7 @@ +{% load cache %} + +{% cache 60 users_list %} + {% for user in object_list %} + {{ user }} + {% endfor %} +{% endcache %} diff --git a/tests/contrib/django/app/templates/users_list.html b/tests/contrib/django/app/templates/users_list.html new file mode 100644 index 0000000000..8661f5e6ee --- /dev/null +++ b/tests/contrib/django/app/templates/users_list.html @@ -0,0 +1,3 @@ +{% for user in object_list %} + {{ user }} +{% endfor %} diff --git a/tests/contrib/django/app/views.py b/tests/contrib/django/app/views.py new file mode 100644 index 0000000000..8e570a536b --- /dev/null +++ b/tests/contrib/django/app/views.py @@ -0,0 +1,74 @@ +""" +Class based views used for Django tests. +""" + +from functools import partial + +from django.http import HttpResponse +from django.conf.urls import url + +from django.views.generic import ListView, TemplateView +from django.views.decorators.cache import cache_page + +from django.contrib.auth.models import User +from django.contrib.syndication.views import Feed + + +class UserList(ListView): + model = User + template_name = 'users_list.html' + + +class TemplateCachedUserList(ListView): + model = User + template_name = 'cached_list.html' + + +class ForbiddenView(TemplateView): + def get(self, request, *args, **kwargs): + return HttpResponse(status=403) + + +def function_view(request): + return HttpResponse(status=200) + + +def error_500(request): + raise Exception('Error 500') + + +class FeedView(Feed): + """ + A callable view that is part of the Django framework + """ + title = 'Police beat site news' + link = '/sitenews/' + description = 'Updates on changes and additions to police beat central.' + + def items(self): + return [] + + def item_title(self, item): + return 'empty' + + def item_description(self, item): + return 'empty' + + +partial_view = partial(function_view) + +# disabling flake8 test below, yes, declaring a func like this is bad, we know +lambda_view = lambda request: function_view(request) # NOQA + +# use this url patterns for tests +urlpatterns = [ + url(r'^users/$', UserList.as_view(), name='users-list'), + url(r'^cached-template/$', TemplateCachedUserList.as_view(), name='cached-template-list'), + url(r'^cached-users/$', cache_page(60)(UserList.as_view()), name='cached-users-list'), + url(r'^fail-view/$', ForbiddenView.as_view(), name='forbidden-view'), + url(r'^fn-view/$', function_view, name='fn-view'), + url(r'^feed-view/$', FeedView(), name='feed-view'), + url(r'^partial-view/$', partial_view, name='partial-view'), + url(r'^lambda-view/$', lambda_view, name='lambda-view'), + url(r'^error-500/$', error_500, name='error-500'), +] diff --git a/tests/contrib/django/compat.py b/tests/contrib/django/compat.py new file mode 100644 index 0000000000..c591277ff7 --- /dev/null +++ b/tests/contrib/django/compat.py @@ -0,0 +1,6 @@ +__all__ = ['reverse'] + +try: + from django.core.urlresolvers import reverse +except ImportError: + from django.urls import reverse diff --git a/tests/contrib/django/conftest.py b/tests/contrib/django/conftest.py new file mode 100644 index 0000000000..37511609f7 --- /dev/null +++ b/tests/contrib/django/conftest.py @@ -0,0 +1,16 @@ +import os +import django +from django.conf import settings + +# We manually designate which settings we will be using in an environment variable +# This is similar to what occurs in the `manage.py` +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.contrib.django.app.settings') + + +# `pytest` automatically calls this function once when tests are run. +def pytest_configure(): + settings.DEBUG = False + if django.VERSION < (1, 7, 0): + settings.configure() + else: + django.setup() diff --git a/tests/contrib/django/runtests.py b/tests/contrib/django/runtests.py new file mode 100755 index 0000000000..0ece0b6956 --- /dev/null +++ b/tests/contrib/django/runtests.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python +import os +import sys + + +if __name__ == '__main__': + # define django defaults + app_to_test = 'tests/contrib/django' + + # append the project root to the PYTHONPATH: + # this is required because we don't want to put the current file + # in the project_root + current_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + project_root = os.path.join(current_dir, '..', '..') + sys.path.append(project_root) + + from django.core.management import execute_from_command_line + execute_from_command_line([sys.argv[0], 'test', app_to_test]) diff --git a/tests/contrib/django/test_autopatching.py b/tests/contrib/django/test_autopatching.py new file mode 100644 index 0000000000..270bdd2573 --- /dev/null +++ b/tests/contrib/django/test_autopatching.py @@ -0,0 +1,98 @@ +import django + +from ddtrace.monkey import patch +from .utils import DjangoTraceTestCase +from django.conf import settings +from unittest import skipIf + + +class DjangoAutopatchTest(DjangoTraceTestCase): + def setUp(self): + super(DjangoAutopatchTest, self).setUp() + patch(django=True) + django.setup() + + @skipIf(django.VERSION >= (1, 10), 'skip if version above 1.10') + def test_autopatching_middleware_classes(self): + assert django._datadog_patch + assert 'ddtrace.contrib.django' in settings.INSTALLED_APPS + assert settings.MIDDLEWARE_CLASSES[0] == 'ddtrace.contrib.django.TraceMiddleware' + assert settings.MIDDLEWARE_CLASSES[-1] == 'ddtrace.contrib.django.TraceExceptionMiddleware' + + @skipIf(django.VERSION >= (1, 10), 'skip if version above 1.10') + def test_autopatching_twice_middleware_classes(self): + assert django._datadog_patch + # Call django.setup() twice and ensure we don't add a duplicate tracer + django.setup() + + found_app = settings.INSTALLED_APPS.count('ddtrace.contrib.django') + assert found_app == 1 + + assert settings.MIDDLEWARE_CLASSES[0] == 'ddtrace.contrib.django.TraceMiddleware' + assert settings.MIDDLEWARE_CLASSES[-1] == 'ddtrace.contrib.django.TraceExceptionMiddleware' + + found_mw = settings.MIDDLEWARE_CLASSES.count('ddtrace.contrib.django.TraceMiddleware') + assert found_mw == 1 + found_mw = settings.MIDDLEWARE_CLASSES.count('ddtrace.contrib.django.TraceExceptionMiddleware') + assert found_mw == 1 + + @skipIf(django.VERSION < (1, 10), 'skip if version is below 1.10') + def test_autopatching_middleware(self): + assert django._datadog_patch + assert 'ddtrace.contrib.django' in settings.INSTALLED_APPS + assert settings.MIDDLEWARE[0] == 'ddtrace.contrib.django.TraceMiddleware' + # MIDDLEWARE_CLASSES gets created internally in django 1.10 & 1.11 but doesn't + # exist at all in 2.0. + assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \ + 'ddtrace.contrib.django.TraceMiddleware' \ + not in settings.MIDDLEWARE_CLASSES + assert settings.MIDDLEWARE[-1] == 'ddtrace.contrib.django.TraceExceptionMiddleware' + assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \ + 'ddtrace.contrib.django.TraceExceptionMiddleware' \ + not in settings.MIDDLEWARE_CLASSES + + @skipIf(django.VERSION < (1, 10), 'skip if version is below 1.10') + def test_autopatching_twice_middleware(self): + assert django._datadog_patch + # Call django.setup() twice and ensure we don't add a duplicate tracer + django.setup() + + found_app = settings.INSTALLED_APPS.count('ddtrace.contrib.django') + assert found_app == 1 + + assert settings.MIDDLEWARE[0] == 'ddtrace.contrib.django.TraceMiddleware' + # MIDDLEWARE_CLASSES gets created internally in django 1.10 & 1.11 but doesn't + # exist at all in 2.0. + assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \ + 'ddtrace.contrib.django.TraceMiddleware' \ + not in settings.MIDDLEWARE_CLASSES + assert settings.MIDDLEWARE[-1] == 'ddtrace.contrib.django.TraceExceptionMiddleware' + assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \ + 'ddtrace.contrib.django.TraceExceptionMiddleware' \ + not in settings.MIDDLEWARE_CLASSES + + found_mw = settings.MIDDLEWARE.count('ddtrace.contrib.django.TraceMiddleware') + assert found_mw == 1 + + found_mw = settings.MIDDLEWARE.count('ddtrace.contrib.django.TraceExceptionMiddleware') + assert found_mw == 1 + + +class DjangoAutopatchCustomMiddlewareTest(DjangoTraceTestCase): + @skipIf(django.VERSION < (1, 10), 'skip if version is below 1.10') + def test_autopatching_empty_middleware(self): + with self.settings(MIDDLEWARE=[]): + patch(django=True) + django.setup() + assert django._datadog_patch + assert 'ddtrace.contrib.django' in settings.INSTALLED_APPS + assert settings.MIDDLEWARE[0] == 'ddtrace.contrib.django.TraceMiddleware' + # MIDDLEWARE_CLASSES gets created internally in django 1.10 & 1.11 but doesn't + # exist at all in 2.0. + assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \ + 'ddtrace.contrib.django.TraceMiddleware' \ + not in settings.MIDDLEWARE_CLASSES + assert settings.MIDDLEWARE[-1] == 'ddtrace.contrib.django.TraceExceptionMiddleware' + assert not getattr(settings, 'MIDDLEWARE_CLASSES', None) or \ + 'ddtrace.contrib.django.TraceExceptionMiddleware' \ + not in settings.MIDDLEWARE_CLASSES diff --git a/tests/contrib/django/test_cache_backends.py b/tests/contrib/django/test_cache_backends.py new file mode 100644 index 0000000000..c0cf349090 --- /dev/null +++ b/tests/contrib/django/test_cache_backends.py @@ -0,0 +1,246 @@ +import time + +# 3rd party +from django.core.cache import caches + +# testing +from .utils import DjangoTraceTestCase +from ...util import assert_dict_issuperset + + +class DjangoCacheRedisTest(DjangoTraceTestCase): + """ + Ensures that the cache system is properly traced in + different cache backend + """ + def test_cache_redis_get(self): + # get the redis cache + cache = caches['redis'] + + # (trace) the cache miss + start = time.time() + cache.get('missing_key') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django_redis.cache.RedisCache', + 'django.cache.key': 'missing_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_redis_get_many(self): + # get the redis cache + cache = caches['redis'] + + # (trace) the cache miss + start = time.time() + cache.get_many(['missing_key', 'another_key']) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get_many' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django_redis.cache.RedisCache', + 'django.cache.key': str(['missing_key', 'another_key']), + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_pylibmc_get(self): + # get the redis cache + cache = caches['pylibmc'] + + # (trace) the cache miss + start = time.time() + cache.get('missing_key') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.memcached.PyLibMCCache', + 'django.cache.key': 'missing_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_pylibmc_get_many(self): + # get the redis cache + cache = caches['pylibmc'] + + # (trace) the cache miss + start = time.time() + cache.get_many(['missing_key', 'another_key']) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get_many' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.memcached.PyLibMCCache', + 'django.cache.key': str(['missing_key', 'another_key']), + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_memcached_get(self): + # get the redis cache + cache = caches['python_memcached'] + + # (trace) the cache miss + start = time.time() + cache.get('missing_key') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.memcached.MemcachedCache', + 'django.cache.key': 'missing_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_memcached_get_many(self): + # get the redis cache + cache = caches['python_memcached'] + + # (trace) the cache miss + start = time.time() + cache.get_many(['missing_key', 'another_key']) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get_many' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.memcached.MemcachedCache', + 'django.cache.key': str(['missing_key', 'another_key']), + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_django_pylibmc_get(self): + # get the redis cache + cache = caches['django_pylibmc'] + + # (trace) the cache miss + start = time.time() + cache.get('missing_key') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django_pylibmc.memcached.PyLibMCCache', + 'django.cache.key': 'missing_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_django_pylibmc_get_many(self): + # get the redis cache + cache = caches['django_pylibmc'] + + # (trace) the cache miss + start = time.time() + cache.get_many(['missing_key', 'another_key']) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get_many' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django_pylibmc.memcached.PyLibMCCache', + 'django.cache.key': str(['missing_key', 'another_key']), + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end diff --git a/tests/contrib/django/test_cache_client.py b/tests/contrib/django/test_cache_client.py new file mode 100644 index 0000000000..8972507308 --- /dev/null +++ b/tests/contrib/django/test_cache_client.py @@ -0,0 +1,364 @@ +import time + +# 3rd party +from django.core.cache import caches + +# testing +from .utils import DjangoTraceTestCase, override_ddtrace_settings +from ...util import assert_dict_issuperset + + +class DjangoCacheWrapperTest(DjangoTraceTestCase): + """ + Ensures that the cache system is properly traced + """ + def test_cache_get(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + start = time.time() + cache.get('missing_key') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'get' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'missing_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + @override_ddtrace_settings(DEFAULT_CACHE_SERVICE='foo') + def test_cache_service_can_be_overriden(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + cache.get('missing_key') + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'foo' + + @override_ddtrace_settings(INSTRUMENT_CACHE=False) + def test_cache_disabled(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + cache.get('missing_key') + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 0 + + def test_cache_set(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + start = time.time() + cache.set('a_new_key', 50) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'set' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'a_new_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_add(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + start = time.time() + cache.add('a_new_key', 50) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'add' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'a_new_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_delete(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + start = time.time() + cache.delete('an_existing_key') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == 'django' + assert span.resource == 'delete' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'an_existing_key', + 'env': 'test', + } + + assert_dict_issuperset(span.meta, expected_meta) + assert start < span.start < span.start + span.duration < end + + def test_cache_incr(self): + # get the default cache, set the value and reset the spans + cache = caches['default'] + cache.set('value', 0) + self.tracer.writer.spans = [] + + # (trace) the cache miss + start = time.time() + cache.incr('value') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 2 + + span_incr = spans[0] + span_get = spans[1] + + # LocMemCache doesn't provide an atomic operation + assert span_get.service == 'django' + assert span_get.resource == 'get' + assert span_get.name == 'django.cache' + assert span_get.span_type == 'cache' + assert span_get.error == 0 + assert span_incr.service == 'django' + assert span_incr.resource == 'incr' + assert span_incr.name == 'django.cache' + assert span_incr.span_type == 'cache' + assert span_incr.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'value', + 'env': 'test', + } + + assert_dict_issuperset(span_get.meta, expected_meta) + assert_dict_issuperset(span_incr.meta, expected_meta) + assert start < span_incr.start < span_incr.start + span_incr.duration < end + + def test_cache_decr(self): + # get the default cache, set the value and reset the spans + cache = caches['default'] + cache.set('value', 0) + self.tracer.writer.spans = [] + + # (trace) the cache miss + start = time.time() + cache.decr('value') + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 3 + + span_decr = spans[0] + span_incr = spans[1] + span_get = spans[2] + + # LocMemCache doesn't provide an atomic operation + assert span_get.service == 'django' + assert span_get.resource == 'get' + assert span_get.name == 'django.cache' + assert span_get.span_type == 'cache' + assert span_get.error == 0 + assert span_incr.service == 'django' + assert span_incr.resource == 'incr' + assert span_incr.name == 'django.cache' + assert span_incr.span_type == 'cache' + assert span_incr.error == 0 + assert span_decr.service == 'django' + assert span_decr.resource == 'decr' + assert span_decr.name == 'django.cache' + assert span_decr.span_type == 'cache' + assert span_decr.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'value', + 'env': 'test', + } + + assert_dict_issuperset(span_get.meta, expected_meta) + assert_dict_issuperset(span_incr.meta, expected_meta) + assert_dict_issuperset(span_decr.meta, expected_meta) + assert start < span_decr.start < span_decr.start + span_decr.duration < end + + def test_cache_get_many(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + start = time.time() + cache.get_many(['missing_key', 'another_key']) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 3 + + span_get_many = spans[0] + span_get_first = spans[1] + span_get_second = spans[2] + + # LocMemCache doesn't provide an atomic operation + assert span_get_first.service == 'django' + assert span_get_first.resource == 'get' + assert span_get_first.name == 'django.cache' + assert span_get_first.span_type == 'cache' + assert span_get_first.error == 0 + assert span_get_second.service == 'django' + assert span_get_second.resource == 'get' + assert span_get_second.name == 'django.cache' + assert span_get_second.span_type == 'cache' + assert span_get_second.error == 0 + assert span_get_many.service == 'django' + assert span_get_many.resource == 'get_many' + assert span_get_many.name == 'django.cache' + assert span_get_many.span_type == 'cache' + assert span_get_many.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': str(['missing_key', 'another_key']), + 'env': 'test', + } + + assert_dict_issuperset(span_get_many.meta, expected_meta) + assert start < span_get_many.start < span_get_many.start + span_get_many.duration < end + + def test_cache_set_many(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + start = time.time() + cache.set_many({'first_key': 1, 'second_key': 2}) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 3 + + span_set_many = spans[0] + span_set_first = spans[1] + span_set_second = spans[2] + + # LocMemCache doesn't provide an atomic operation + assert span_set_first.service == 'django' + assert span_set_first.resource == 'set' + assert span_set_first.name == 'django.cache' + assert span_set_first.span_type == 'cache' + assert span_set_first.error == 0 + assert span_set_second.service == 'django' + assert span_set_second.resource == 'set' + assert span_set_second.name == 'django.cache' + assert span_set_second.span_type == 'cache' + assert span_set_second.error == 0 + assert span_set_many.service == 'django' + assert span_set_many.resource == 'set_many' + assert span_set_many.name == 'django.cache' + assert span_set_many.span_type == 'cache' + assert span_set_many.error == 0 + + assert span_set_many.meta['django.cache.backend'] == 'django.core.cache.backends.locmem.LocMemCache' + assert 'first_key' in span_set_many.meta['django.cache.key'] + assert 'second_key' in span_set_many.meta['django.cache.key'] + assert start < span_set_many.start < span_set_many.start + span_set_many.duration < end + + def test_cache_delete_many(self): + # get the default cache + cache = caches['default'] + + # (trace) the cache miss + start = time.time() + cache.delete_many(['missing_key', 'another_key']) + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 3 + + span_delete_many = spans[0] + span_delete_first = spans[1] + span_delete_second = spans[2] + + # LocMemCache doesn't provide an atomic operation + assert span_delete_first.service == 'django' + assert span_delete_first.resource == 'delete' + assert span_delete_first.name == 'django.cache' + assert span_delete_first.span_type == 'cache' + assert span_delete_first.error == 0 + assert span_delete_second.service == 'django' + assert span_delete_second.resource == 'delete' + assert span_delete_second.name == 'django.cache' + assert span_delete_second.span_type == 'cache' + assert span_delete_second.error == 0 + assert span_delete_many.service == 'django' + assert span_delete_many.resource == 'delete_many' + assert span_delete_many.name == 'django.cache' + assert span_delete_many.span_type == 'cache' + assert span_delete_many.error == 0 + + assert span_delete_many.meta['django.cache.backend'] == 'django.core.cache.backends.locmem.LocMemCache' + assert 'missing_key' in span_delete_many.meta['django.cache.key'] + assert 'another_key' in span_delete_many.meta['django.cache.key'] + assert start < span_delete_many.start < span_delete_many.start + span_delete_many.duration < end diff --git a/tests/contrib/django/test_cache_views.py b/tests/contrib/django/test_cache_views.py new file mode 100644 index 0000000000..a26394809e --- /dev/null +++ b/tests/contrib/django/test_cache_views.py @@ -0,0 +1,93 @@ +# testing +from .compat import reverse +from .utils import DjangoTraceTestCase + + +class DjangoCacheViewTest(DjangoTraceTestCase): + """ + Ensures that the cache system is properly traced + """ + def test_cached_view(self): + # make the first request so that the view is cached + url = reverse('cached-users-list') + response = self.client.get(url) + assert response.status_code == 200 + + # check the first call for a non-cached view + spans = self.tracer.writer.pop() + assert len(spans) == 6 + # the cache miss + assert spans[1].resource == 'get' + # store the result in the cache + assert spans[4].resource == 'set' + assert spans[5].resource == 'set' + + # check if the cache hit is traced + response = self.client.get(url) + spans = self.tracer.writer.pop() + assert len(spans) == 3 + + span_header = spans[1] + span_view = spans[2] + assert span_view.service == 'django' + assert span_view.resource == 'get' + assert span_view.name == 'django.cache' + assert span_view.span_type == 'cache' + assert span_view.error == 0 + assert span_header.service == 'django' + assert span_header.resource == 'get' + assert span_header.name == 'django.cache' + assert span_header.span_type == 'cache' + assert span_header.error == 0 + + expected_meta_view = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': ( + 'views.decorators.cache.cache_page..' + 'GET.03cdc1cc4aab71b038a6764e5fcabb82.d41d8cd98f00b204e9800998ecf8427e.en-us' + ), + 'env': 'test', + } + + expected_meta_header = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'views.decorators.cache.cache_header..03cdc1cc4aab71b038a6764e5fcabb82.en-us', + 'env': 'test', + } + + assert span_view.meta == expected_meta_view + assert span_header.meta == expected_meta_header + + def test_cached_template(self): + # make the first request so that the view is cached + url = reverse('cached-template-list') + response = self.client.get(url) + assert response.status_code == 200 + + # check the first call for a non-cached view + spans = self.tracer.writer.pop() + assert len(spans) == 5 + # the cache miss + assert spans[2].resource == 'get' + # store the result in the cache + assert spans[4].resource == 'set' + + # check if the cache hit is traced + response = self.client.get(url) + spans = self.tracer.writer.pop() + assert len(spans) == 3 + + span_template_cache = spans[2] + assert span_template_cache.service == 'django' + assert span_template_cache.resource == 'get' + assert span_template_cache.name == 'django.cache' + assert span_template_cache.span_type == 'cache' + assert span_template_cache.error == 0 + + expected_meta = { + 'django.cache.backend': 'django.core.cache.backends.locmem.LocMemCache', + 'django.cache.key': 'template.cache.users_list.d41d8cd98f00b204e9800998ecf8427e', + 'env': 'test', + } + + assert span_template_cache.meta == expected_meta diff --git a/tests/contrib/django/test_cache_wrapper.py b/tests/contrib/django/test_cache_wrapper.py new file mode 100644 index 0000000000..11c314c009 --- /dev/null +++ b/tests/contrib/django/test_cache_wrapper.py @@ -0,0 +1,133 @@ +# 3rd party +from django.core.cache import caches +import pytest + +# testing +from .utils import DjangoTraceTestCase + + +class DjangoCacheTest(DjangoTraceTestCase): + """ + Ensures that the tracing doesn't break the Django + cache framework + """ + def test_wrapper_get_and_set(self): + # get the default cache + cache = caches['default'] + + value = cache.get('missing_key') + assert value is None + + cache.set('a_key', 50) + value = cache.get('a_key') + assert value == 50 + + def test_wrapper_add(self): + # get the default cache + cache = caches['default'] + + cache.add('a_key', 50) + value = cache.get('a_key') + assert value == 50 + + # add should not update a key if it's present + cache.add('a_key', 40) + value = cache.get('a_key') + assert value == 50 + + def test_wrapper_delete(self): + # get the default cache + cache = caches['default'] + + cache.set('a_key', 50) + cache.delete('a_key') + value = cache.get('a_key') + assert value is None + + def test_wrapper_incr_safety(self): + # get the default cache + cache = caches['default'] + + # it should fail not because of our wrapper + with pytest.raises(ValueError) as ex: + cache.incr('missing_key') + + # the error is not caused by our tracer + assert ex.value.args[0] == "Key 'missing_key' not found" + # an error trace must be sent + spans = self.tracer.writer.pop() + assert len(spans) == 2 + span = spans[0] + assert span.resource == 'incr' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 1 + + def test_wrapper_incr(self): + # get the default cache + cache = caches['default'] + + cache.set('value', 0) + value = cache.incr('value') + assert value == 1 + value = cache.get('value') + assert value == 1 + + def test_wrapper_decr_safety(self): + # get the default cache + cache = caches['default'] + + # it should fail not because of our wrapper + with pytest.raises(ValueError) as ex: + cache.decr('missing_key') + + # the error is not caused by our tracer + assert ex.value.args[0] == "Key 'missing_key' not found" + # an error trace must be sent + spans = self.tracer.writer.pop() + assert len(spans) == 3 + span = spans[0] + assert span.resource == 'decr' + assert span.name == 'django.cache' + assert span.span_type == 'cache' + assert span.error == 1 + + def test_wrapper_decr(self): + # get the default cache + cache = caches['default'] + + cache.set('value', 0) + value = cache.decr('value') + assert value == -1 + value = cache.get('value') + assert value == -1 + + def test_wrapper_get_many(self): + # get the default cache + cache = caches['default'] + + cache.set('a_key', 50) + cache.set('another_key', 60) + + values = cache.get_many(['a_key', 'another_key']) + assert isinstance(values, dict) + assert values['a_key'] == 50 + assert values['another_key'] == 60 + + def test_wrapper_set_many(self): + # get the default cache + cache = caches['default'] + + cache.set_many({'a_key': 50, 'another_key': 60}) + assert cache.get('a_key') == 50 + assert cache.get('another_key') == 60 + + def test_wrapper_delete_many(self): + # get the default cache + cache = caches['default'] + + cache.set('a_key', 50) + cache.set('another_key', 60) + cache.delete_many(['a_key', 'another_key']) + assert cache.get('a_key') is None + assert cache.get('another_key') is None diff --git a/tests/contrib/django/test_connection.py b/tests/contrib/django/test_connection.py new file mode 100644 index 0000000000..a18ffc222c --- /dev/null +++ b/tests/contrib/django/test_connection.py @@ -0,0 +1,72 @@ +import mock +import time + +# 3rd party +from django.contrib.auth.models import User + +from ddtrace.contrib.django.conf import settings +from ddtrace.contrib.django.patch import apply_django_patches, connections + +# testing +from .utils import DjangoTraceTestCase, override_ddtrace_settings + + +class DjangoConnectionTest(DjangoTraceTestCase): + """ + Ensures that database connections are properly traced + """ + def test_connection(self): + # trace a simple query + start = time.time() + users = User.objects.count() + assert users == 0 + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + + span = spans[0] + assert span.name == 'sqlite.query' + assert span.service == 'defaultdb' + assert span.span_type == 'sql' + assert span.get_tag('django.db.vendor') == 'sqlite' + assert span.get_tag('django.db.alias') == 'default' + assert start < span.start < span.start + span.duration < end + + def test_django_db_query_in_resource_not_in_tags(self): + User.objects.count() + spans = self.tracer.writer.pop() + assert spans[0].name == 'sqlite.query' + assert spans[0].resource == 'SELECT COUNT(*) AS "__count" FROM "auth_user"' + assert spans[0].get_tag('sql.query') is None + + @override_ddtrace_settings(INSTRUMENT_DATABASE=False) + def test_connection_disabled(self): + # trace a simple query + users = User.objects.count() + assert users == 0 + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 0 + + def test_should_append_database_prefix(self): + # trace a simple query and check if the prefix is correctly + # loaded from Django settings + settings.DEFAULT_DATABASE_PREFIX = 'my_prefix_db' + User.objects.count() + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.service == 'my_prefix_db-defaultdb' + + def test_apply_django_patches_calls_connections_all(self): + with mock.patch.object(connections, 'all') as mock_connections: + apply_django_patches(patch_rest_framework=False) + + assert mock_connections.call_count == 1 + assert mock_connections.mock_calls == [mock.call()] diff --git a/tests/contrib/django/test_instrumentation.py b/tests/contrib/django/test_instrumentation.py new file mode 100644 index 0000000000..a8960efcb5 --- /dev/null +++ b/tests/contrib/django/test_instrumentation.py @@ -0,0 +1,35 @@ +# project +from ddtrace.contrib.django.conf import DatadogSettings + +# testing +from .utils import DjangoTraceTestCase + + +class DjangoInstrumentationTest(DjangoTraceTestCase): + """ + Ensures that Django is correctly configured according to + users settings + """ + def test_tracer_flags(self): + assert self.tracer.enabled + assert self.tracer.writer.api.hostname == 'localhost' + assert self.tracer.writer.api.port == 8126 + assert self.tracer.tags == {'env': 'test'} + + def test_environment_vars(self): + # Django defaults can be overridden by env vars, ensuring that + # environment strings are properly converted + with self.override_env(dict( + DATADOG_TRACE_AGENT_HOSTNAME='agent.consul.local', + DATADOG_TRACE_AGENT_PORT='58126' + )): + settings = DatadogSettings() + assert settings.AGENT_HOSTNAME == 'agent.consul.local' + assert settings.AGENT_PORT == 58126 + + def test_environment_var_wrong_port(self): + # ensures that a wrong Agent Port doesn't crash the system + # and defaults to 8126 + with self.override_env(dict(DATADOG_TRACE_AGENT_PORT='something')): + settings = DatadogSettings() + assert settings.AGENT_PORT == 8126 diff --git a/tests/contrib/django/test_middleware.py b/tests/contrib/django/test_middleware.py new file mode 100644 index 0000000000..fb4271a077 --- /dev/null +++ b/tests/contrib/django/test_middleware.py @@ -0,0 +1,460 @@ +# 3rd party +from django.test import modify_settings +from django.db import connections + +# project +from ddtrace import config +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY, SAMPLING_PRIORITY_KEY +from ddtrace.contrib.django.db import unpatch_conn +from ddtrace.ext import errors, http + +# testing +from tests.opentracer.utils import init_tracer +from .compat import reverse +from .utils import DjangoTraceTestCase, override_ddtrace_settings +from ...utils import assert_span_http_status_code + + +class DjangoMiddlewareTest(DjangoTraceTestCase): + """ + Ensures that the middleware traces all Django internals + """ + def test_middleware_trace_request(self, query_string=''): + # ensures that the internals are properly traced + url = reverse('users-list') + if query_string: + fqs = '?' + query_string + else: + fqs = '' + response = self.client.get(url + fqs) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + sp_template = spans[1] + sp_database = spans[2] + assert sp_database.get_tag('django.db.vendor') == 'sqlite' + assert sp_template.get_tag('django.template_name') == 'users_list.html' + assert_span_http_status_code(sp_request, 200) + assert sp_request.get_tag(http.URL) == 'http://testserver/users/' + assert sp_request.get_tag('django.user.is_authenticated') == 'False' + assert sp_request.get_tag('http.method') == 'GET' + assert sp_request.span_type == 'web' + assert sp_request.resource == 'tests.contrib.django.app.views.UserList' + if config.django.trace_query_string: + assert sp_request.get_tag(http.QUERY_STRING) == query_string + else: + assert http.QUERY_STRING not in sp_request.meta + + def test_middleware_trace_request_qs(self): + return self.test_middleware_trace_request('foo=bar') + + def test_middleware_trace_request_multi_qs(self): + return self.test_middleware_trace_request('foo=bar&foo=baz&x=y') + + def test_middleware_trace_request_no_qs_trace(self): + with self.override_global_config(dict(trace_query_string=True)): + return self.test_middleware_trace_request() + + def test_middleware_trace_request_qs_trace(self): + with self.override_global_config(dict(trace_query_string=True)): + return self.test_middleware_trace_request('foo=bar') + + def test_middleware_trace_request_multi_qs_trace(self): + with self.override_global_config(dict(trace_query_string=True)): + return self.test_middleware_trace_request('foo=bar&foo=baz&x=y') + + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + url = reverse('users-list') + response = self.client.get(url) + self.assertEqual(response.status_code, 200) + + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + sp_template = spans[1] + sp_database = spans[2] + self.assertEqual(sp_request.name, 'django.request') + self.assertEqual(sp_request.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + self.assertIsNone(sp_template.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + self.assertIsNone(sp_database.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + @override_ddtrace_settings(ANALYTICS_ENABLED=True, ANALYTICS_SAMPLE_RATE=0.5) + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + url = reverse('users-list') + response = self.client.get(url) + self.assertEqual(response.status_code, 200) + + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + sp_template = spans[1] + sp_database = spans[2] + self.assertEqual(sp_request.name, 'django.request') + self.assertEqual(sp_request.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + self.assertIsNone(sp_template.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + self.assertIsNone(sp_database.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + url = reverse('users-list') + response = self.client.get(url) + self.assertEqual(response.status_code, 200) + + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + sp_template = spans[1] + sp_database = spans[2] + self.assertEqual(sp_request.name, 'django.request') + self.assertIsNone(sp_request.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + self.assertIsNone(sp_template.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + self.assertIsNone(sp_database.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + @override_ddtrace_settings(ANALYTICS_ENABLED=True, ANALYTICS_SAMPLE_RATE=0.5) + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + url = reverse('users-list') + response = self.client.get(url) + self.assertEqual(response.status_code, 200) + + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + sp_template = spans[1] + sp_database = spans[2] + self.assertEqual(sp_request.name, 'django.request') + self.assertEqual(sp_request.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + self.assertIsNone(sp_template.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + self.assertIsNone(sp_database.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + @override_ddtrace_settings(ANALYTICS_ENABLED=True, ANALYTICS_SAMPLE_RATE=None) + def test_analytics_global_off_integration_on_and_none(self): + """ + When making a request + When an integration trace search is enabled + Sample rate is set to None + Globally trace search is disabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + url = reverse('users-list') + response = self.client.get(url) + self.assertEqual(response.status_code, 200) + + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + sp_template = spans[1] + sp_database = spans[2] + self.assertEqual(sp_request.name, 'django.request') + assert sp_request.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + assert sp_template.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + assert sp_database.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + + def test_database_patch(self): + # We want to test that a connection-recreation event causes connections + # to get repatched. However since django tests are a atomic transaction + # we can't change the connection. Instead we test that the connection + # does get repatched if it's not patched. + for conn in connections.all(): + unpatch_conn(conn) + # ensures that the internals are properly traced + url = reverse('users-list') + response = self.client.get(url) + assert response.status_code == 200 + + # We would be missing span #3, the database span, if the connection + # wasn't patched. + spans = self.tracer.writer.pop() + assert len(spans) == 3 + assert spans[0].name == 'django.request' + assert spans[1].name == 'django.template' + assert spans[2].name == 'sqlite.query' + + def test_middleware_trace_errors(self): + # ensures that the internals are properly traced + url = reverse('forbidden-view') + response = self.client.get(url) + assert response.status_code == 403 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert_span_http_status_code(span, 403) + assert span.get_tag(http.URL) == 'http://testserver/fail-view/' + assert span.resource == 'tests.contrib.django.app.views.ForbiddenView' + + def test_middleware_trace_function_based_view(self): + # ensures that the internals are properly traced when using a function views + url = reverse('fn-view') + response = self.client.get(url) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert_span_http_status_code(span, 200) + assert span.get_tag(http.URL) == 'http://testserver/fn-view/' + assert span.resource == 'tests.contrib.django.app.views.function_view' + + def test_middleware_trace_error_500(self): + # ensures we trace exceptions generated by views + url = reverse('error-500') + response = self.client.get(url) + assert response.status_code == 500 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.error == 1 + assert_span_http_status_code(span, 500) + assert span.get_tag(http.URL) == 'http://testserver/error-500/' + assert span.resource == 'tests.contrib.django.app.views.error_500' + assert 'Error 500' in span.get_tag('error.stack') + + def test_middleware_trace_callable_view(self): + # ensures that the internals are properly traced when using callable views + url = reverse('feed-view') + response = self.client.get(url) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert_span_http_status_code(span, 200) + assert span.get_tag(http.URL) == 'http://testserver/feed-view/' + assert span.resource == 'tests.contrib.django.app.views.FeedView' + + def test_middleware_trace_partial_based_view(self): + # ensures that the internals are properly traced when using a function views + url = reverse('partial-view') + response = self.client.get(url) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert_span_http_status_code(span, 200) + assert span.get_tag(http.URL) == 'http://testserver/partial-view/' + assert span.resource == 'partial' + + def test_middleware_trace_lambda_based_view(self): + # ensures that the internals are properly traced when using a function views + url = reverse('lambda-view') + response = self.client.get(url) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert_span_http_status_code(span, 200) + assert span.get_tag(http.URL) == 'http://testserver/lambda-view/' + assert span.resource == 'tests.contrib.django.app.views.' + + @modify_settings( + MIDDLEWARE={ + 'remove': 'django.contrib.auth.middleware.AuthenticationMiddleware', + }, + MIDDLEWARE_CLASSES={ + 'remove': 'django.contrib.auth.middleware.AuthenticationMiddleware', + }, + ) + def test_middleware_without_user(self): + # remove the AuthenticationMiddleware so that the ``request`` + # object doesn't have the ``user`` field + url = reverse('users-list') + response = self.client.get(url) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + assert_span_http_status_code(sp_request, 200) + assert sp_request.get_tag('django.user.is_authenticated') is None + + def test_middleware_propagation(self): + # ensures that we properly propagate http context + url = reverse('users-list') + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '2', + } + response = self.client.get(url, **headers) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + + # Check for proper propagated attributes + assert sp_request.trace_id == 100 + assert sp_request.parent_id == 42 + assert sp_request.get_metric(SAMPLING_PRIORITY_KEY) == 2 + + @override_ddtrace_settings(DISTRIBUTED_TRACING=False) + def test_middleware_no_propagation(self): + # ensures that we properly propagate http context + url = reverse('users-list') + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '2', + } + response = self.client.get(url, **headers) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 3 + sp_request = spans[0] + + # Check that propagation didn't happen + assert sp_request.trace_id != 100 + assert sp_request.parent_id != 42 + assert sp_request.get_metric(SAMPLING_PRIORITY_KEY) != 2 + + @modify_settings( + MIDDLEWARE={ + 'append': 'tests.contrib.django.app.middlewares.HandleErrorMiddlewareSuccess', + }, + MIDDLEWARE_CLASSES={ + 'append': 'tests.contrib.django.app.middlewares.HandleErrorMiddlewareSuccess', + }, + ) + def test_middleware_handled_view_exception_success(self): + """ Test when an exception is raised in a view and then handled, that + the resulting span does not possess error properties. + """ + url = reverse('error-500') + response = self.client.get(url) + assert response.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + sp_request = spans[0] + + assert sp_request.error == 0 + assert sp_request.get_tag(errors.ERROR_STACK) is None + assert sp_request.get_tag(errors.ERROR_MSG) is None + assert sp_request.get_tag(errors.ERROR_TYPE) is None + + @modify_settings( + MIDDLEWARE={ + 'append': 'tests.contrib.django.app.middlewares.HandleErrorMiddlewareClientError', + }, + MIDDLEWARE_CLASSES={ + 'append': 'tests.contrib.django.app.middlewares.HandleErrorMiddlewareClientError', + }, + ) + def test_middleware_handled_view_exception_client_error(self): + """ Test the case that when an exception is raised in a view and then + handled, that the resulting span does not possess error properties. + """ + url = reverse('error-500') + response = self.client.get(url) + assert response.status_code == 404 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + sp_request = spans[0] + + assert sp_request.error == 0 + assert sp_request.get_tag(errors.ERROR_STACK) is None + assert sp_request.get_tag(errors.ERROR_MSG) is None + assert sp_request.get_tag(errors.ERROR_TYPE) is None + + def test_middleware_trace_request_ot(self): + """OpenTracing version of test_middleware_trace_request.""" + ot_tracer = init_tracer('my_svc', self.tracer) + + # ensures that the internals are properly traced + url = reverse('users-list') + with ot_tracer.start_active_span('ot_span'): + response = self.client.get(url) + assert response.status_code == 200 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 4 + ot_span = spans[0] + sp_request = spans[1] + sp_template = spans[2] + sp_database = spans[3] + + # confirm parenting + assert ot_span.parent_id is None + assert sp_request.parent_id == ot_span.span_id + + assert ot_span.resource == 'ot_span' + assert ot_span.service == 'my_svc' + + assert sp_database.get_tag('django.db.vendor') == 'sqlite' + assert sp_template.get_tag('django.template_name') == 'users_list.html' + assert_span_http_status_code(sp_request, 200) + assert sp_request.get_tag(http.URL) == 'http://testserver/users/' + assert sp_request.get_tag('django.user.is_authenticated') == 'False' + assert sp_request.get_tag('http.method') == 'GET' + + def test_middleware_trace_request_404(self): + """ + When making a request to an unknown url in django + when we do not have a 404 view handler set + we set a resource name for the default view handler + """ + response = self.client.get('/unknown-url') + assert response.status_code == 404 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 2 + sp_request = spans[0] + sp_template = spans[1] + + # Template + # DEV: The template name is `unknown` because unless they define a `404.html` + # django generates the template from a string, which will not have a `Template.name` set + assert sp_template.get_tag('django.template_name') == 'unknown' + + # Request + assert_span_http_status_code(sp_request, 404) + assert sp_request.get_tag(http.URL) == 'http://testserver/unknown-url' + assert sp_request.get_tag('django.user.is_authenticated') == 'False' + assert sp_request.get_tag('http.method') == 'GET' + assert sp_request.span_type == 'web' + assert sp_request.resource == 'django.views.defaults.page_not_found' diff --git a/tests/contrib/django/test_templates.py b/tests/contrib/django/test_templates.py new file mode 100644 index 0000000000..8e8b6ea021 --- /dev/null +++ b/tests/contrib/django/test_templates.py @@ -0,0 +1,46 @@ +import time + +# 3rd party +from django.template import Context, Template + +# testing +from .utils import DjangoTraceTestCase, override_ddtrace_settings + + +class DjangoTemplateTest(DjangoTraceTestCase): + """ + Ensures that the template system is properly traced + """ + def test_template(self): + # prepare a base template using the default engine + template = Template('Hello {{name}}!') + ctx = Context({'name': 'Django'}) + + # (trace) the template rendering + start = time.time() + assert template.render(ctx) == 'Hello Django!' + end = time.time() + + # tests + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + + span = spans[0] + assert span.span_type == 'template' + assert span.name == 'django.template' + assert span.get_tag('django.template_name') == 'unknown' + assert start < span.start < span.start + span.duration < end + + @override_ddtrace_settings(INSTRUMENT_TEMPLATE=False) + def test_template_disabled(self): + # prepare a base template using the default engine + template = Template('Hello {{name}}!') + ctx = Context({'name': 'Django'}) + + # (trace) the template rendering + assert template.render(ctx) == 'Hello Django!' + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 0 diff --git a/tests/contrib/django/test_tracing_disabled.py b/tests/contrib/django/test_tracing_disabled.py new file mode 100644 index 0000000000..61605f2dea --- /dev/null +++ b/tests/contrib/django/test_tracing_disabled.py @@ -0,0 +1,42 @@ +# 3rd party +from django.apps import apps +from django.test import TestCase + +# project +from ddtrace.tracer import Tracer +from ddtrace.contrib.django.conf import settings + +# testing +from ...test_tracer import DummyWriter + + +class DjangoTracingDisabledTest(TestCase): + def setUp(self): + # backup previous conf + self.backupEnabled = settings.ENABLED + self.backupTracer = settings.TRACER + + # Use a new tracer to be sure that a new service + # would be sent to the the writer + self.tracer = Tracer() + self.tracer.writer = DummyWriter() + + # Restart app with tracing disabled + settings.ENABLED = False + self.app = apps.get_app_config('datadog_django') + self.app.ready() + + def tearDown(self): + # Reset the original settings + settings.ENABLED = self.backupEnabled + settings.TRACER = self.backupTracer + self.app.ready() + + def test_no_service_info_is_written(self): + services = self.tracer.writer.pop_services() + assert len(services) == 0 + + def test_no_trace_is_written(self): + settings.TRACER.trace('client.testing').finish() + traces = self.tracer.writer.pop_traces() + assert len(traces) == 0 diff --git a/tests/contrib/django/test_utils.py b/tests/contrib/django/test_utils.py new file mode 100644 index 0000000000..2d7cf07405 --- /dev/null +++ b/tests/contrib/django/test_utils.py @@ -0,0 +1,17 @@ +# 3d party +from django.test import TestCase + +# project +from ddtrace.contrib.django.utils import quantize_key_values + + +class DjangoUtilsTest(TestCase): + def test_quantize_key_values(self): + """ + Ensure that the utility functions properly convert a dictionary object + """ + key = {'second_key': 2, 'first_key': 1} + result = quantize_key_values(key) + assert len(result) == 2 + assert 'first_key' in result + assert 'second_key' in result diff --git a/tests/contrib/django/utils.py b/tests/contrib/django/utils.py new file mode 100644 index 0000000000..9cd2c420f3 --- /dev/null +++ b/tests/contrib/django/utils.py @@ -0,0 +1,87 @@ +from functools import wraps + +# 3rd party +from django.apps import apps +from django.test import TestCase + +# project +from ddtrace.tracer import Tracer +from ddtrace.contrib.django.conf import settings +from ddtrace.contrib.django.db import patch_db, unpatch_db +from ddtrace.contrib.django.cache import unpatch_cache +from ddtrace.contrib.django.templates import unpatch_template +from ddtrace.contrib.django.middleware import remove_exception_middleware, remove_trace_middleware + +# testing +from ...base import BaseTestCase +from ...test_tracer import DummyWriter + + +# testing tracer +tracer = Tracer() +tracer.writer = DummyWriter() + + +class DjangoTraceTestCase(BaseTestCase, TestCase): + """ + Base class that provides an internal tracer according to given + Datadog settings. This class ensures that the tracer spans are + properly reset after each run. The tracer is available in + the ``self.tracer`` attribute. + """ + def setUp(self): + # assign the default tracer + self.tracer = settings.TRACER + # empty the tracer spans from previous operations + # such as database creation queries + self.tracer.writer.spans = [] + self.tracer.writer.pop_traces() + # gets unpatched for some tests + patch_db(self.tracer) + + def tearDown(self): + # empty the tracer spans from test operations + self.tracer.writer.spans = [] + self.tracer.writer.pop_traces() + + +class override_ddtrace_settings(object): + def __init__(self, *args, **kwargs): + self.items = list(kwargs.items()) + + def unpatch_all(self): + unpatch_cache() + unpatch_db() + unpatch_template() + remove_trace_middleware() + remove_exception_middleware() + + def __enter__(self): + self.enable() + + def __exit__(self, exc_type, exc_value, traceback): + self.disable() + + def enable(self): + self.backup = {} + for name, value in self.items: + self.backup[name] = getattr(settings, name) + setattr(settings, name, value) + self.unpatch_all() + app = apps.get_app_config('datadog_django') + app.ready() + + def disable(self): + for name, value in self.items: + setattr(settings, name, self.backup[name]) + self.unpatch_all() + remove_exception_middleware() + app = apps.get_app_config('datadog_django') + app.ready() + + def __call__(self, func): + @wraps(func) + def inner(*args, **kwargs): + with(self): + return func(*args, **kwargs) + return inner diff --git a/tests/contrib/djangorestframework/__init__.py b/tests/contrib/djangorestframework/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/djangorestframework/app/__init__.py b/tests/contrib/djangorestframework/app/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/djangorestframework/app/exceptions.py b/tests/contrib/djangorestframework/app/exceptions.py new file mode 100644 index 0000000000..0f4fce70e4 --- /dev/null +++ b/tests/contrib/djangorestframework/app/exceptions.py @@ -0,0 +1,13 @@ +from rest_framework.views import exception_handler +from rest_framework.response import Response +from rest_framework import status + + +def custom_exception_handler(exc, context): + response = exception_handler(exc, context) + + # We overwrite the response status code to 500 + if response is not None: + return Response({'detail': str(exc)}, status=status.HTTP_500_INTERNAL_SERVER_ERROR) + + return response diff --git a/tests/contrib/djangorestframework/app/settings.py b/tests/contrib/djangorestframework/app/settings.py new file mode 100644 index 0000000000..ac24bd45fd --- /dev/null +++ b/tests/contrib/djangorestframework/app/settings.py @@ -0,0 +1,115 @@ +""" +Settings configuration for the Django web framework. Update this +configuration if you need to change the default behavior of +Django during tests +""" +import os +import django + + +BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) + +DATABASES = { + 'default': { + 'ENGINE': 'django.db.backends.sqlite3', + 'NAME': ':memory:' + } +} + +SITE_ID = 1 +SECRET_KEY = 'not_very_secret_in_tests' +USE_I18N = True +USE_L10N = True +STATIC_URL = '/static/' +ROOT_URLCONF = 'tests.contrib.djangorestframework.app.views' + +TEMPLATES = [ + { + 'BACKEND': 'django.template.backends.django.DjangoTemplates', + 'DIRS': [ + os.path.join(BASE_DIR, 'app', 'templates'), + ], + 'APP_DIRS': True, + 'OPTIONS': { + 'context_processors': [ + 'django.template.context_processors.debug', + 'django.template.context_processors.request', + 'django.contrib.auth.context_processors.auth', + 'django.contrib.messages.context_processors.messages', + ], + }, + }, +] + +if (1, 10) <= django.VERSION < (2, 0): + MIDDLEWARE = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', + + 'tests.contrib.django.app.middlewares.CatchExceptionMiddleware', + ] + +# Django 2.0 has different defaults +elif django.VERSION >= (2, 0): + MIDDLEWARE = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', + + 'tests.contrib.django.app.middlewares.CatchExceptionMiddleware', + ] + +# Pre 1.10 style +else: + MIDDLEWARE_CLASSES = [ + 'django.contrib.sessions.middleware.SessionMiddleware', + 'django.middleware.common.CommonMiddleware', + 'django.middleware.csrf.CsrfViewMiddleware', + 'django.contrib.auth.middleware.AuthenticationMiddleware', + 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', + 'django.contrib.messages.middleware.MessageMiddleware', + 'django.middleware.clickjacking.XFrameOptionsMiddleware', + 'django.middleware.security.SecurityMiddleware', + + 'tests.contrib.django.app.middlewares.CatchExceptionMiddleware', + ] + +INSTALLED_APPS = [ + 'django.contrib.admin', + 'django.contrib.auth', + 'django.contrib.contenttypes', + 'django.contrib.sessions', + + # tracer app + 'ddtrace.contrib.django', + + # djangorestframework + 'rest_framework' +] + +DATADOG_TRACE = { + # tracer with a DummyWriter + 'TRACER': 'tests.contrib.django.utils.tracer', + 'ENABLED': True, + 'TAGS': { + 'env': 'test', + }, +} + +REST_FRAMEWORK = { + 'DEFAULT_PERMISSION_CLASSES': [ + 'rest_framework.permissions.IsAdminUser', + ], + + 'EXCEPTION_HANDLER': 'tests.contrib.djangorestframework.app.exceptions.custom_exception_handler' +} diff --git a/tests/contrib/djangorestframework/app/views.py b/tests/contrib/djangorestframework/app/views.py new file mode 100644 index 0000000000..88179c6771 --- /dev/null +++ b/tests/contrib/djangorestframework/app/views.py @@ -0,0 +1,29 @@ +from django.conf.urls import url, include +from django.contrib.auth.models import User + +from rest_framework import viewsets, routers, serializers + + +class UserSerializer(serializers.HyperlinkedModelSerializer): + class Meta: + model = User + fields = ('url', 'username', 'email', 'groups') + + +class UserViewSet(viewsets.ModelViewSet): + """ + API endpoint that allows users to be viewed or edited. + """ + queryset = User.objects.all().order_by('-date_joined') + serializer_class = UserSerializer + + +router = routers.DefaultRouter() +router.register(r'users', UserViewSet) + +# Wire up our API using automatic URL routing. +# Additionally, we include login URLs for the browsable API. +urlpatterns = [ + url(r'^', include(router.urls)), + url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')), +] diff --git a/tests/contrib/djangorestframework/conftest.py b/tests/contrib/djangorestframework/conftest.py new file mode 100644 index 0000000000..a30ef07cfd --- /dev/null +++ b/tests/contrib/djangorestframework/conftest.py @@ -0,0 +1,16 @@ +import os +import django +from django.conf import settings + +# We manually designate which settings we will be using in an environment variable +# This is similar to what occurs in the `manage.py` +os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'tests.contrib.djangorestframework.app.settings') + + +# `pytest` automatically calls this function once when tests are run. +def pytest_configure(): + settings.DEBUG = False + if django.VERSION < (1, 7, 0): + settings.configure() + else: + django.setup() diff --git a/tests/contrib/djangorestframework/test_djangorestframework.py b/tests/contrib/djangorestframework/test_djangorestframework.py new file mode 100644 index 0000000000..a91c9f9f01 --- /dev/null +++ b/tests/contrib/djangorestframework/test_djangorestframework.py @@ -0,0 +1,62 @@ +import django +from django.apps import apps +from unittest import skipIf + +from tests.contrib.django.utils import DjangoTraceTestCase +from ...utils import assert_span_http_status_code + + +@skipIf(django.VERSION < (1, 10), 'requires django version >= 1.10') +class RestFrameworkTest(DjangoTraceTestCase): + def setUp(self): + super(RestFrameworkTest, self).setUp() + + # would raise an exception + from rest_framework.views import APIView + from ddtrace.contrib.django.restframework import unpatch_restframework + + self.APIView = APIView + self.unpatch_restframework = unpatch_restframework + + def test_setup(self): + assert apps.is_installed('rest_framework') + assert hasattr(self.APIView, '_datadog_patch') + + def test_unpatch(self): + self.unpatch_restframework() + assert not getattr(self.APIView, '_datadog_patch') + + response = self.client.get('/users/') + + # Our custom exception handler is setting the status code to 500 + assert response.status_code == 500 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + sp = spans[0] + assert sp.name == 'django.request' + assert sp.resource == 'tests.contrib.djangorestframework.app.views.UserViewSet' + assert sp.error == 0 + assert sp.span_type == 'web' + assert_span_http_status_code(sp, 500) + assert sp.get_tag('error.msg') is None + + def test_trace_exceptions(self): + response = self.client.get('/users/') + + # Our custom exception handler is setting the status code to 500 + assert response.status_code == 500 + + # check for spans + spans = self.tracer.writer.pop() + assert len(spans) == 1 + sp = spans[0] + assert sp.name == 'django.request' + assert sp.resource == 'tests.contrib.djangorestframework.app.views.UserViewSet' + assert sp.error == 1 + assert sp.span_type == 'web' + assert sp.get_tag('http.method') == 'GET' + assert_span_http_status_code(sp, 500) + assert sp.get_tag('error.msg') == 'Authentication credentials were not provided.' + assert 'NotAuthenticated' in sp.get_tag('error.stack') diff --git a/tests/contrib/dogpile_cache/__init__.py b/tests/contrib/dogpile_cache/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/dogpile_cache/test_tracing.py b/tests/contrib/dogpile_cache/test_tracing.py new file mode 100644 index 0000000000..1548bf24e1 --- /dev/null +++ b/tests/contrib/dogpile_cache/test_tracing.py @@ -0,0 +1,182 @@ +import dogpile +import pytest + +from ddtrace import Pin +from ddtrace.contrib.dogpile_cache.patch import patch, unpatch + +from tests.test_tracer import get_dummy_tracer + + +@pytest.fixture +def tracer(): + return get_dummy_tracer() + + +@pytest.fixture +def region(tracer): + patch() + # Setup a simple dogpile cache region for testing. + # The backend is trivial so we can use memory to simplify test setup. + test_region = dogpile.cache.make_region(name="TestRegion") + test_region.configure("dogpile.cache.memory") + Pin.override(dogpile.cache, tracer=tracer) + return test_region + + +@pytest.fixture(autouse=True) +def cleanup(): + yield + unpatch() + + +@pytest.fixture +def single_cache(region): + @region.cache_on_arguments() + def fn(x): + return x * 2 + + return fn + + +@pytest.fixture +def multi_cache(region): + @region.cache_multi_on_arguments() + def fn(*x): + return [i * 2 for i in x] + + return fn + + +def test_doesnt_trace_with_no_pin(tracer, single_cache, multi_cache): + # No pin is set + unpatch() + + assert single_cache(1) == 2 + assert tracer.writer.pop_traces() == [] + + assert multi_cache(2, 3) == [4, 6] + assert tracer.writer.pop_traces() == [] + + +def test_doesnt_trace_with_disabled_pin(tracer, single_cache, multi_cache): + tracer.enabled = False + + assert single_cache(1) == 2 + assert tracer.writer.pop_traces() == [] + + assert multi_cache(2, 3) == [4, 6] + assert tracer.writer.pop_traces() == [] + + +def test_traces_get_or_create(tracer, single_cache): + assert single_cache(1) == 2 + traces = tracer.writer.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.name == "dogpile.cache" + assert span.resource == "get_or_create" + assert span.meta["key"] == "tests.contrib.dogpile_cache.test_tracing:fn|1" + assert span.meta["hit"] == "False" + assert span.meta["expired"] == "True" + assert span.meta["backend"] == "MemoryBackend" + assert span.meta["region"] == "TestRegion" + + # Now the results should be cached. + assert single_cache(1) == 2 + traces = tracer.writer.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.name == "dogpile.cache" + assert span.resource == "get_or_create" + assert span.meta["key"] == "tests.contrib.dogpile_cache.test_tracing:fn|1" + assert span.meta["hit"] == "True" + assert span.meta["expired"] == "False" + assert span.meta["backend"] == "MemoryBackend" + assert span.meta["region"] == "TestRegion" + + +def test_traces_get_or_create_multi(tracer, multi_cache): + assert multi_cache(2, 3) == [4, 6] + traces = tracer.writer.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.meta["keys"] == ( + "['tests.contrib.dogpile_cache.test_tracing:fn|2', " + "'tests.contrib.dogpile_cache.test_tracing:fn|3']" + ) + assert span.meta["hit"] == "False" + assert span.meta["expired"] == "True" + assert span.meta["backend"] == "MemoryBackend" + assert span.meta["region"] == "TestRegion" + + # Partial hit + assert multi_cache(2, 4) == [4, 8] + traces = tracer.writer.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.meta["keys"] == ( + "['tests.contrib.dogpile_cache.test_tracing:fn|2', " + "'tests.contrib.dogpile_cache.test_tracing:fn|4']" + ) + assert span.meta["hit"] == "False" + assert span.meta["expired"] == "True" + assert span.meta["backend"] == "MemoryBackend" + assert span.meta["region"] == "TestRegion" + + # Full hit + assert multi_cache(2, 4) == [4, 8] + traces = tracer.writer.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.meta["keys"] == ( + "['tests.contrib.dogpile_cache.test_tracing:fn|2', " + "'tests.contrib.dogpile_cache.test_tracing:fn|4']" + ) + assert span.meta["hit"] == "True" + assert span.meta["expired"] == "False" + assert span.meta["backend"] == "MemoryBackend" + assert span.meta["region"] == "TestRegion" + + +class TestInnerFunctionCalls(object): + def single_cache(self, x): + return x * 2 + + def multi_cache(self, *x): + return [i * 2 for i in x] + + def test_calls_inner_functions_correctly(self, region, mocker): + """ This ensures the get_or_create behavior of dogpile is not altered. """ + spy_single_cache = mocker.spy(self, "single_cache") + spy_multi_cache = mocker.spy(self, "multi_cache") + + single_cache = region.cache_on_arguments()(self.single_cache) + multi_cache = region.cache_multi_on_arguments()(self.multi_cache) + + assert 2 == single_cache(1) + spy_single_cache.assert_called_once_with(1) + + # It's now cached - shouldn't need to call the inner function. + spy_single_cache.reset_mock() + assert 2 == single_cache(1) + assert spy_single_cache.call_count == 0 + + assert [6, 8] == multi_cache(3, 4) + spy_multi_cache.assert_called_once_with(3, 4) + + # Partial hit. Only the "new" key should be passed to the inner function. + spy_multi_cache.reset_mock() + assert [6, 10] == multi_cache(3, 5) + spy_multi_cache.assert_called_once_with(5) + + # Full hit. No call to inner function. + spy_multi_cache.reset_mock() + assert [6, 10] == multi_cache(3, 5) + assert spy_single_cache.call_count == 0 diff --git a/tests/contrib/elasticsearch/__init__.py b/tests/contrib/elasticsearch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/elasticsearch/test.py b/tests/contrib/elasticsearch/test.py new file mode 100644 index 0000000000..3d705b3e46 --- /dev/null +++ b/tests/contrib/elasticsearch/test.py @@ -0,0 +1,381 @@ +import datetime +import unittest + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.ext import http +from ddtrace.contrib.elasticsearch import get_traced_transport +from ddtrace.contrib.elasticsearch.elasticsearch import elasticsearch +from ddtrace.contrib.elasticsearch.patch import patch, unpatch + +# testing +from tests.opentracer.utils import init_tracer +from ..config import ELASTICSEARCH_CONFIG +from ...test_tracer import get_dummy_tracer +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + + +class ElasticsearchTest(unittest.TestCase): + """ + Elasticsearch integration test suite. + Need a running ElasticSearch + """ + ES_INDEX = 'ddtrace_index' + ES_TYPE = 'ddtrace_type' + + TEST_SERVICE = 'test' + TEST_PORT = str(ELASTICSEARCH_CONFIG['port']) + + def setUp(self): + """Prepare ES""" + es = elasticsearch.Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + es.indices.delete(index=self.ES_INDEX, ignore=[400, 404]) + + def tearDown(self): + """Clean ES""" + es = elasticsearch.Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + es.indices.delete(index=self.ES_INDEX, ignore=[400, 404]) + + def test_elasticsearch(self): + """Test the elasticsearch integration + + All in this for now. Will split it later. + """ + tracer = get_dummy_tracer() + writer = tracer.writer + transport_class = get_traced_transport( + datadog_tracer=tracer, + datadog_service=self.TEST_SERVICE, + ) + + es = elasticsearch.Elasticsearch(transport_class=transport_class, port=ELASTICSEARCH_CONFIG['port']) + + # Test index creation + mapping = {'mapping': {'properties': {'created': {'type': 'date', 'format': 'yyyy-MM-dd'}}}} + es.indices.create(index=self.ES_INDEX, ignore=400, body=mapping) + + spans = writer.pop() + assert spans + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'elasticsearch.query' + assert span.span_type == 'elasticsearch' + assert span.error == 0 + assert span.get_tag('elasticsearch.method') == 'PUT' + assert span.get_tag('elasticsearch.url') == '/%s' % self.ES_INDEX + assert span.resource == 'PUT /%s' % self.ES_INDEX + + # Put data + args = {'index': self.ES_INDEX, 'doc_type': self.ES_TYPE} + es.index(id=10, body={'name': 'ten', 'created': datetime.date(2016, 1, 1)}, **args) + es.index(id=11, body={'name': 'eleven', 'created': datetime.date(2016, 2, 1)}, **args) + es.index(id=12, body={'name': 'twelve', 'created': datetime.date(2016, 3, 1)}, **args) + + spans = writer.pop() + assert spans + assert len(spans) == 3 + span = spans[0] + assert span.error == 0 + assert span.get_tag('elasticsearch.method') == 'PUT' + assert span.get_tag('elasticsearch.url') == '/%s/%s/%s' % (self.ES_INDEX, self.ES_TYPE, 10) + assert span.resource == 'PUT /%s/%s/?' % (self.ES_INDEX, self.ES_TYPE) + + # Make the data available + es.indices.refresh(index=self.ES_INDEX) + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + assert span.resource == 'POST /%s/_refresh' % self.ES_INDEX + assert span.get_tag('elasticsearch.method') == 'POST' + assert span.get_tag('elasticsearch.url') == '/%s/_refresh' % self.ES_INDEX + + # Search data + result = es.search( + sort=['name:desc'], size=100, + body={'query': {'match_all': {}}}, + **args + ) + + assert len(result['hits']['hits']) == 3, result + + spans = writer.pop() + assert spans + assert len(spans) == 1 + span = spans[0] + assert span.resource == 'GET /%s/%s/_search' % (self.ES_INDEX, self.ES_TYPE) + assert span.get_tag('elasticsearch.method') == 'GET' + assert span.get_tag('elasticsearch.url') == '/%s/%s/_search' % (self.ES_INDEX, self.ES_TYPE) + + assert span.get_tag('elasticsearch.body').replace(' ', '') == '{"query":{"match_all":{}}}' + assert set(span.get_tag('elasticsearch.params').split('&')) == {'sort=name%3Adesc', 'size=100'} + assert http.QUERY_STRING not in span.meta + + self.assertTrue(span.get_metric('elasticsearch.took') > 0) + + # Search by type not supported by default json encoder + query = {'range': {'created': {'gte': datetime.date(2016, 2, 1)}}} + result = es.search(size=100, body={'query': query}, **args) + + assert len(result['hits']['hits']) == 2, result + + # Raise error 404 with a non existent index + writer.pop() + try: + es.get(index='non_existent_index', id=100, doc_type='_all') + assert 'error_not_raised' == 'elasticsearch.exceptions.TransportError' + except elasticsearch.exceptions.TransportError: + spans = writer.pop() + assert spans + span = spans[0] + assert_span_http_status_code(span, 404) + + # Raise error 400, the index 10 is created twice + try: + es.indices.create(index=10) + es.indices.create(index=10) + assert 'error_not_raised' == 'elasticsearch.exceptions.TransportError' + except elasticsearch.exceptions.TransportError: + spans = writer.pop() + assert spans + span = spans[-1] + assert_span_http_status_code(span, 400) + + # Drop the index, checking it won't raise exception on success or failure + es.indices.delete(index=self.ES_INDEX, ignore=[400, 404]) + es.indices.delete(index=self.ES_INDEX, ignore=[400, 404]) + + def test_elasticsearch_ot(self): + """Shortened OpenTracing version of test_elasticsearch.""" + tracer = get_dummy_tracer() + writer = tracer.writer + ot_tracer = init_tracer('my_svc', tracer) + + transport_class = get_traced_transport( + datadog_tracer=tracer, + datadog_service=self.TEST_SERVICE, + ) + + es = elasticsearch.Elasticsearch(transport_class=transport_class, port=ELASTICSEARCH_CONFIG['port']) + + # Test index creation + mapping = {'mapping': {'properties': {'created': {'type': 'date', 'format': 'yyyy-MM-dd'}}}} + + with ot_tracer.start_active_span('ot_span'): + es.indices.create(index=self.ES_INDEX, ignore=400, body=mapping) + + spans = writer.pop() + assert spans + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'my_svc' + assert ot_span.resource == 'ot_span' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'elasticsearch.query' + assert dd_span.span_type == 'elasticsearch' + assert dd_span.error == 0 + assert dd_span.get_tag('elasticsearch.method') == 'PUT' + assert dd_span.get_tag('elasticsearch.url') == '/%s' % self.ES_INDEX + assert dd_span.resource == 'PUT /%s' % self.ES_INDEX + + +class ElasticsearchPatchTest(BaseTracerTestCase): + """ + Elasticsearch integration test suite. + Need a running ElasticSearch. + Test cases with patching. + Will merge when patching will be the default/only way. + """ + ES_INDEX = 'ddtrace_index' + ES_TYPE = 'ddtrace_type' + + TEST_SERVICE = 'test' + TEST_PORT = str(ELASTICSEARCH_CONFIG['port']) + + def setUp(self): + """Prepare ES""" + super(ElasticsearchPatchTest, self).setUp() + + es = elasticsearch.Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(es.transport) + mapping = {'mapping': {'properties': {'created': {'type': 'date', 'format': 'yyyy-MM-dd'}}}} + es.indices.create(index=self.ES_INDEX, ignore=400, body=mapping) + + patch() + + self.es = es + + def tearDown(self): + """Clean ES""" + super(ElasticsearchPatchTest, self).tearDown() + + unpatch() + self.es.indices.delete(index=self.ES_INDEX, ignore=[400, 404]) + + def test_elasticsearch(self): + es = self.es + mapping = {'mapping': {'properties': {'created': {'type': 'date', 'format': 'yyyy-MM-dd'}}}} + es.indices.create(index=self.ES_INDEX, ignore=400, body=mapping) + + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'elasticsearch.query' + assert span.span_type == 'elasticsearch' + assert span.error == 0 + assert span.get_tag('elasticsearch.method') == 'PUT' + assert span.get_tag('elasticsearch.url') == '/%s' % self.ES_INDEX + assert span.resource == 'PUT /%s' % self.ES_INDEX + + args = {'index': self.ES_INDEX, 'doc_type': self.ES_TYPE} + es.index(id=10, body={'name': 'ten', 'created': datetime.date(2016, 1, 1)}, **args) + es.index(id=11, body={'name': 'eleven', 'created': datetime.date(2016, 2, 1)}, **args) + es.index(id=12, body={'name': 'twelve', 'created': datetime.date(2016, 3, 1)}, **args) + + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 3 + span = spans[0] + assert span.error == 0 + assert span.get_tag('elasticsearch.method') == 'PUT' + assert span.get_tag('elasticsearch.url') == '/%s/%s/%s' % (self.ES_INDEX, self.ES_TYPE, 10) + assert span.resource == 'PUT /%s/%s/?' % (self.ES_INDEX, self.ES_TYPE) + + args = {'index': self.ES_INDEX, 'doc_type': self.ES_TYPE} + es.indices.refresh(index=self.ES_INDEX) + + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + assert span.resource == 'POST /%s/_refresh' % self.ES_INDEX + assert span.get_tag('elasticsearch.method') == 'POST' + assert span.get_tag('elasticsearch.url') == '/%s/_refresh' % self.ES_INDEX + + # search data + args = {'index': self.ES_INDEX, 'doc_type': self.ES_TYPE} + with self.override_http_config('elasticsearch', dict(trace_query_string=True)): + es.index(id=10, body={'name': 'ten', 'created': datetime.date(2016, 1, 1)}, **args) + es.index(id=11, body={'name': 'eleven', 'created': datetime.date(2016, 2, 1)}, **args) + es.index(id=12, body={'name': 'twelve', 'created': datetime.date(2016, 3, 1)}, **args) + result = es.search( + sort=['name:desc'], + size=100, + body={'query': {'match_all': {}}}, + **args + ) + + assert len(result['hits']['hits']) == 3, result + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 4 + span = spans[-1] + assert span.resource == 'GET /%s/%s/_search' % (self.ES_INDEX, self.ES_TYPE) + assert span.get_tag('elasticsearch.method') == 'GET' + assert span.get_tag('elasticsearch.url') == '/%s/%s/_search' % (self.ES_INDEX, self.ES_TYPE) + assert span.get_tag('elasticsearch.body').replace(' ', '') == '{"query":{"match_all":{}}}' + assert set(span.get_tag('elasticsearch.params').split('&')) == {'sort=name%3Adesc', 'size=100'} + assert set(span.get_tag(http.QUERY_STRING).split('&')) == {'sort=name%3Adesc', 'size=100'} + + self.assertTrue(span.get_metric('elasticsearch.took') > 0) + + # Search by type not supported by default json encoder + query = {'range': {'created': {'gte': datetime.date(2016, 2, 1)}}} + result = es.search(size=100, body={'query': query}, **args) + + assert len(result['hits']['hits']) == 2, result + + def test_analytics_default(self): + es = self.es + mapping = {'mapping': {'properties': {'created': {'type': 'date', 'format': 'yyyy-MM-dd'}}}} + es.indices.create(index=self.ES_INDEX, ignore=400, body=mapping) + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'elasticsearch', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + es = self.es + mapping = {'mapping': {'properties': {'created': {'type': 'date', 'format': 'yyyy-MM-dd'}}}} + es.indices.create(index=self.ES_INDEX, ignore=400, body=mapping) + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'elasticsearch', + dict(analytics_enabled=True) + ): + es = self.es + mapping = {'mapping': {'properties': {'created': {'type': 'date', 'format': 'yyyy-MM-dd'}}}} + es.indices.create(index=self.ES_INDEX, ignore=400, body=mapping) + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + def test_patch_unpatch(self): + # Test patch idempotence + patch() + patch() + + es = elasticsearch.Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(es.transport) + + # Test index creation + es.indices.create(index=self.ES_INDEX, ignore=400) + + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + self.reset() + unpatch() + + es = elasticsearch.Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + + # Test index creation + es.indices.create(index=self.ES_INDEX, ignore=400) + + spans = self.get_spans() + self.reset() + assert not spans, spans + + # Test patch again + self.reset() + patch() + + es = elasticsearch.Elasticsearch(port=ELASTICSEARCH_CONFIG['port']) + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(es.transport) + + # Test index creation + es.indices.create(index=self.ES_INDEX, ignore=400) + + spans = self.get_spans() + self.reset() + assert spans, spans + assert len(spans) == 1 diff --git a/tests/contrib/falcon/__init__.py b/tests/contrib/falcon/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/falcon/app/__init__.py b/tests/contrib/falcon/app/__init__.py new file mode 100644 index 0000000000..457c3b50bc --- /dev/null +++ b/tests/contrib/falcon/app/__init__.py @@ -0,0 +1 @@ +from .app import get_app # noqa diff --git a/tests/contrib/falcon/app/app.py b/tests/contrib/falcon/app/app.py new file mode 100644 index 0000000000..4f1cb65d74 --- /dev/null +++ b/tests/contrib/falcon/app/app.py @@ -0,0 +1,20 @@ +import falcon + +from ddtrace.contrib.falcon import TraceMiddleware + +from . import resources + + +def get_app(tracer=None, distributed_tracing=True): + # initialize a traced Falcon application + middleware = [TraceMiddleware( + tracer, distributed_tracing=distributed_tracing)] if tracer else [] + app = falcon.API(middleware=middleware) + + # add resource routing + app.add_route('/200', resources.Resource200()) + app.add_route('/201', resources.Resource201()) + app.add_route('/500', resources.Resource500()) + app.add_route('/exception', resources.ResourceException()) + app.add_route('/not_found', resources.ResourceNotFound()) + return app diff --git a/tests/contrib/falcon/app/resources.py b/tests/contrib/falcon/app/resources.py new file mode 100644 index 0000000000..132456baa4 --- /dev/null +++ b/tests/contrib/falcon/app/resources.py @@ -0,0 +1,40 @@ +import falcon + + +class Resource200(object): + """Throw a handled exception here to ensure our use of + set_traceback() doesn't affect 200s + """ + def on_get(self, req, resp, **kwargs): + try: + 1 / 0 + except Exception: + pass + + resp.status = falcon.HTTP_200 + resp.body = 'Success' + resp.append_header('my-response-header', 'my_response_value') + + +class Resource201(object): + def on_post(self, req, resp, **kwargs): + resp.status = falcon.HTTP_201 + resp.body = 'Success' + + +class Resource500(object): + def on_get(self, req, resp, **kwargs): + resp.status = falcon.HTTP_500 + resp.body = 'Failure' + + +class ResourceException(object): + def on_get(self, req, resp, **kwargs): + raise Exception('Ouch!') + + +class ResourceNotFound(object): + def on_get(self, req, resp, **kwargs): + # simulate that the endpoint is hit but raise a 404 because + # the object isn't found in the database + raise falcon.HTTPNotFound() diff --git a/tests/contrib/falcon/test_autopatch.py b/tests/contrib/falcon/test_autopatch.py new file mode 100644 index 0000000000..4dd9e0b91b --- /dev/null +++ b/tests/contrib/falcon/test_autopatch.py @@ -0,0 +1,36 @@ +from falcon import testing + +import ddtrace + +from ...base import BaseTracerTestCase +from .app import get_app +from .test_suite import FalconTestCase + + +class AutoPatchTestCase(BaseTracerTestCase, testing.TestCase, FalconTestCase): + + # Added because falcon 1.3 and 1.4 test clients (falcon.testing.client.TestClient) expect this property to be + # defined. It would be initialized in the constructor, but we call it here like in 'TestClient.__init__(self, None)' + # because falcon 1.0.x does not have such module and would fail. Once we stop supporting falcon 1.0.x then we can + # use the cleaner __init__ invocation + _default_headers = None + + def setUp(self): + super(AutoPatchTestCase, self).setUp() + + self._service = 'my-falcon' + + # Since most integrations do `from ddtrace import tracer` we cannot update do `ddtrace.tracer = self.tracer` + self.original_writer = ddtrace.tracer.writer + ddtrace.tracer.writer = self.tracer.writer + self.tracer = ddtrace.tracer + + # build a test app without adding a tracer middleware; + # reconfigure the global tracer since the autopatch mode + # uses it + self.api = get_app(tracer=None) + + def tearDown(self): + super(AutoPatchTestCase, self).tearDown() + + ddtrace.tracer.writer = self.original_writer diff --git a/tests/contrib/falcon/test_distributed_tracing.py b/tests/contrib/falcon/test_distributed_tracing.py new file mode 100644 index 0000000000..809563f88c --- /dev/null +++ b/tests/contrib/falcon/test_distributed_tracing.py @@ -0,0 +1,52 @@ +from falcon import testing +from tests.test_tracer import get_dummy_tracer + +from .app import get_app + + +class DistributedTracingTestCase(testing.TestCase): + """Executes tests using the manual instrumentation so a middleware + is explicitly added. + """ + + def setUp(self): + super(DistributedTracingTestCase, self).setUp() + self._service = 'falcon' + self.tracer = get_dummy_tracer() + self.api = get_app(tracer=self.tracer) + + def test_distributred_tracing(self): + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + } + out = self.simulate_get('/200', headers=headers) + assert out.status_code == 200 + assert out.content.decode('utf-8') == 'Success' + + traces = self.tracer.writer.pop_traces() + + assert len(traces) == 1 + assert len(traces[0]) == 1 + + assert traces[0][0].parent_id == 42 + assert traces[0][0].trace_id == 100 + + def test_distributred_tracing_disabled(self): + self.tracer = get_dummy_tracer() + self.api = get_app(tracer=self.tracer, distributed_tracing=False) + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + } + out = self.simulate_get('/200', headers=headers) + assert out.status_code == 200 + assert out.content.decode('utf-8') == 'Success' + + traces = self.tracer.writer.pop_traces() + + assert len(traces) == 1 + assert len(traces[0]) == 1 + + assert traces[0][0].parent_id != 42 + assert traces[0][0].trace_id != 100 diff --git a/tests/contrib/falcon/test_middleware.py b/tests/contrib/falcon/test_middleware.py new file mode 100644 index 0000000000..1218b05c0c --- /dev/null +++ b/tests/contrib/falcon/test_middleware.py @@ -0,0 +1,17 @@ +from falcon import testing + +from .app import get_app +from .test_suite import FalconTestCase +from ...base import BaseTracerTestCase + + +class MiddlewareTestCase(BaseTracerTestCase, testing.TestCase, FalconTestCase): + """Executes tests using the manual instrumentation so a middleware + is explicitly added. + """ + def setUp(self): + super(MiddlewareTestCase, self).setUp() + + # build a test app with a dummy tracer + self._service = 'falcon' + self.api = get_app(tracer=self.tracer) diff --git a/tests/contrib/falcon/test_suite.py b/tests/contrib/falcon/test_suite.py new file mode 100644 index 0000000000..6797ce62d3 --- /dev/null +++ b/tests/contrib/falcon/test_suite.py @@ -0,0 +1,266 @@ +from ddtrace import config +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.ext import errors as errx, http as httpx + +from tests.opentracer.utils import init_tracer +from ...utils import assert_span_http_status_code + + +class FalconTestCase(object): + """Falcon mixin test case that includes all possible tests. If you need + to add new tests, add them here so that they're shared across manual + and automatic instrumentation. + """ + def test_404(self): + out = self.simulate_get('/fake_endpoint') + assert out.status_code == 404 + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.name == 'falcon.request' + assert span.service == self._service + assert span.resource == 'GET 404' + assert_span_http_status_code(span, 404) + assert span.get_tag(httpx.URL) == 'http://falconframework.org/fake_endpoint' + assert httpx.QUERY_STRING not in span.meta + assert span.parent_id is None + + def test_exception(self): + try: + self.simulate_get('/exception') + except Exception: + pass + else: + assert 0 + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.name == 'falcon.request' + assert span.service == self._service + assert span.resource == 'GET tests.contrib.falcon.app.resources.ResourceException' + assert_span_http_status_code(span, 500) + assert span.get_tag(httpx.URL) == 'http://falconframework.org/exception' + assert span.parent_id is None + + def test_200(self, query_string=''): + out = self.simulate_get('/200', query_string=query_string) + assert out.status_code == 200 + assert out.content.decode('utf-8') == 'Success' + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.name == 'falcon.request' + assert span.service == self._service + assert span.resource == 'GET tests.contrib.falcon.app.resources.Resource200' + assert_span_http_status_code(span, 200) + fqs = ('?' + query_string) if query_string else '' + assert span.get_tag(httpx.URL) == 'http://falconframework.org/200' + fqs + if config.falcon.trace_query_string: + assert span.get_tag(httpx.QUERY_STRING) == query_string + else: + assert httpx.QUERY_STRING not in span.meta + assert span.parent_id is None + assert span.span_type == 'web' + + def test_200_qs(self): + return self.test_200('foo=bar') + + def test_200_multi_qs(self): + return self.test_200('foo=bar&foo=baz&x=y') + + def test_200_qs_trace(self): + with self.override_http_config('falcon', dict(trace_query_string=True)): + return self.test_200('foo=bar') + + def test_200_multi_qs_trace(self): + with self.override_http_config('falcon', dict(trace_query_string=True)): + return self.test_200('foo=bar&foo=baz&x=y') + + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + out = self.simulate_get('/200') + self.assertEqual(out.status_code, 200) + self.assertEqual(out.content.decode('utf-8'), 'Success') + + self.assert_structure( + dict(name='falcon.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}) + ) + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + with self.override_config('falcon', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + out = self.simulate_get('/200') + self.assertEqual(out.status_code, 200) + self.assertEqual(out.content.decode('utf-8'), 'Success') + + self.assert_structure( + dict(name='falcon.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}) + ) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + out = self.simulate_get('/200') + self.assertEqual(out.status_code, 200) + self.assertEqual(out.content.decode('utf-8'), 'Success') + + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + with self.override_config('falcon', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + out = self.simulate_get('/200') + self.assertEqual(out.status_code, 200) + self.assertEqual(out.content.decode('utf-8'), 'Success') + + self.assert_structure( + dict(name='falcon.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}) + ) + + def test_201(self): + out = self.simulate_post('/201') + assert out.status_code == 201 + assert out.content.decode('utf-8') == 'Success' + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.name == 'falcon.request' + assert span.service == self._service + assert span.resource == 'POST tests.contrib.falcon.app.resources.Resource201' + assert_span_http_status_code(span, 201) + assert span.get_tag(httpx.URL) == 'http://falconframework.org/201' + assert span.parent_id is None + + def test_500(self): + out = self.simulate_get('/500') + assert out.status_code == 500 + assert out.content.decode('utf-8') == 'Failure' + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.name == 'falcon.request' + assert span.service == self._service + assert span.resource == 'GET tests.contrib.falcon.app.resources.Resource500' + assert_span_http_status_code(span, 500) + assert span.get_tag(httpx.URL) == 'http://falconframework.org/500' + assert span.parent_id is None + + def test_404_exception(self): + out = self.simulate_get('/not_found') + assert out.status_code == 404 + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.name == 'falcon.request' + assert span.service == self._service + assert span.resource == 'GET tests.contrib.falcon.app.resources.ResourceNotFound' + assert_span_http_status_code(span, 404) + assert span.get_tag(httpx.URL) == 'http://falconframework.org/not_found' + assert span.parent_id is None + + def test_404_exception_no_stacktracer(self): + # it should not have the stacktrace when a 404 exception is raised + out = self.simulate_get('/not_found') + assert out.status_code == 404 + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.name == 'falcon.request' + assert span.service == self._service + assert_span_http_status_code(span, 404) + assert span.get_tag(errx.ERROR_TYPE) is None + assert span.parent_id is None + + def test_200_ot(self): + """OpenTracing version of test_200.""" + ot_tracer = init_tracer('my_svc', self.tracer) + + with ot_tracer.start_active_span('ot_span'): + out = self.simulate_get('/200') + + assert out.status_code == 200 + assert out.content.decode('utf-8') == 'Success' + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 2 + ot_span, dd_span = traces[0] + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'my_svc' + assert ot_span.resource == 'ot_span' + + assert dd_span.name == 'falcon.request' + assert dd_span.service == self._service + assert dd_span.resource == 'GET tests.contrib.falcon.app.resources.Resource200' + assert_span_http_status_code(dd_span, 200) + assert dd_span.get_tag(httpx.URL) == 'http://falconframework.org/200' + + def test_falcon_request_hook(self): + @config.falcon.hooks.on('request') + def on_falcon_request(span, request, response): + span.set_tag('my.custom', 'tag') + + out = self.simulate_get('/200') + assert out.status_code == 200 + assert out.content.decode('utf-8') == 'Success' + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.get_tag('http.request.headers.my_header') is None + assert span.get_tag('http.response.headers.my_response_header') is None + + assert span.name == 'falcon.request' + + assert span.get_tag('my.custom') == 'tag' + + def test_http_header_tracing(self): + with self.override_config('falcon', {}): + config.falcon.http.trace_headers(['my-header', 'my-response-header']) + self.simulate_get('/200', headers={'my-header': 'my_value'}) + traces = self.tracer.writer.pop_traces() + + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + assert span.get_tag('http.request.headers.my-header') == 'my_value' + assert span.get_tag('http.response.headers.my-response-header') == 'my_response_value' diff --git a/tests/contrib/flask/__init__.py b/tests/contrib/flask/__init__.py new file mode 100644 index 0000000000..582f0d4489 --- /dev/null +++ b/tests/contrib/flask/__init__.py @@ -0,0 +1,47 @@ +from ddtrace import Pin +from ddtrace.contrib.flask import patch, unpatch +import flask +from ddtrace.vendor import wrapt + +from ...base import BaseTracerTestCase + + +class BaseFlaskTestCase(BaseTracerTestCase): + def setUp(self): + super(BaseFlaskTestCase, self).setUp() + + patch() + + self.app = flask.Flask(__name__, template_folder="test_templates/") + self.client = self.app.test_client() + Pin.override(self.app, tracer=self.tracer) + + def tearDown(self): + # Remove any remaining spans + self.tracer.writer.pop() + + # Unpatch Flask + unpatch() + + def get_spans(self): + return self.tracer.writer.pop() + + def assert_is_wrapped(self, obj): + self.assertTrue(isinstance(obj, wrapt.ObjectProxy), "{} is not wrapped".format(obj)) + + def assert_is_not_wrapped(self, obj): + self.assertFalse(isinstance(obj, wrapt.ObjectProxy), "{} is wrapped".format(obj)) + + def find_span_by_name(self, spans, name, required=True): + """Helper to find the first span with a given name from a list""" + span = next((s for s in spans if s.name == name), None) + if required: + self.assertIsNotNone(span, "could not find span with name {}".format(name)) + return span + + def find_span_parent(self, spans, span, required=True): + """Helper to search for a span's parent in a given list of spans""" + parent = next((s for s in spans if s.span_id == span.parent_id), None) + if required: + self.assertIsNotNone(parent, "could not find parent span {}".format(span)) + return parent diff --git a/tests/contrib/flask/static/test.txt b/tests/contrib/flask/static/test.txt new file mode 100644 index 0000000000..f11db5cd24 --- /dev/null +++ b/tests/contrib/flask/static/test.txt @@ -0,0 +1 @@ +Hello Flask diff --git a/tests/contrib/flask/test_blueprint.py b/tests/contrib/flask/test_blueprint.py new file mode 100644 index 0000000000..c0ed7bc297 --- /dev/null +++ b/tests/contrib/flask/test_blueprint.py @@ -0,0 +1,155 @@ +import flask + +from ddtrace import Pin +from ddtrace.contrib.flask import unpatch + +from . import BaseFlaskTestCase + + +class FlaskBlueprintTestCase(BaseFlaskTestCase): + def test_patch(self): + """ + When we patch Flask + Then ``flask.Blueprint.register`` is patched + Then ``flask.Blueprint.add_url_rule`` is patched + """ + # DEV: We call `patch` in `setUp` + self.assert_is_wrapped(flask.Blueprint.register) + self.assert_is_wrapped(flask.Blueprint.add_url_rule) + + def test_unpatch(self): + """ + When we unpatch Flask + Then ``flask.Blueprint.register`` is not patched + Then ``flask.Blueprint.add_url_rule`` is not patched + """ + unpatch() + self.assert_is_not_wrapped(flask.Blueprint.register) + self.assert_is_not_wrapped(flask.Blueprint.add_url_rule) + + def test_blueprint_register(self): + """ + When we register a ``flask.Blueprint`` to a ``flask.Flask`` + When no ``Pin`` is attached to the ``Blueprint`` + We attach the pin from the ``flask.Flask`` app + When a ``Pin`` is manually added to the ``Blueprint`` + We do not use the ``flask.Flask`` app ``Pin`` + """ + bp = flask.Blueprint('pinned', __name__) + Pin(service='flask-bp', tracer=self.tracer).onto(bp) + + # DEV: This is more common than calling ``flask.Blueprint.register`` directly + self.app.register_blueprint(bp) + pin = Pin.get_from(bp) + self.assertEqual(pin.service, 'flask-bp') + + bp = flask.Blueprint('not-pinned', __name__) + self.app.register_blueprint(bp) + pin = Pin.get_from(bp) + self.assertEqual(pin.service, 'flask') + + def test_blueprint_add_url_rule(self): + """ + When we call ``flask.Blueprint.add_url_rule`` + When the ``Blueprint`` has a ``Pin`` attached + We clone the Blueprint's ``Pin`` to the view + When the ``Blueprint`` does not have a ``Pin`` attached + We do not attach a ``Pin`` to the func + """ + # When the Blueprint has a Pin attached + bp = flask.Blueprint('pinned', __name__) + Pin(service='flask-bp', tracer=self.tracer).onto(bp) + + @bp.route('/') + def test_view(): + pass + + # Assert the view func has a `Pin` attached with the Blueprint's service name + pin = Pin.get_from(test_view) + self.assertIsNotNone(pin) + self.assertEqual(pin.service, 'flask-bp') + + # When the Blueprint does not have a Pin attached + bp = flask.Blueprint('not-pinned', __name__) + + @bp.route('/') + def test_view(): + pass + + # Assert the view does not have a `Pin` attached + pin = Pin.get_from(test_view) + self.assertIsNone(pin) + + def test_blueprint_request(self): + """ + When making a request to a Blueprint's endpoint + We create the expected spans + """ + bp = flask.Blueprint('bp', __name__) + + @bp.route('/') + def test(): + return 'test' + + self.app.register_blueprint(bp) + + # Request the endpoint + self.client.get('/') + + # Only extract the span we care about + # DEV: Making a request creates a bunch of lifecycle spans, + # ignore them, we test them elsewhere + span = self.find_span_by_name(self.get_spans(), 'bp.test') + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'bp.test') + self.assertEqual(span.resource, '/') + self.assertEqual(span.meta, dict()) + + def test_blueprint_request_pin_override(self): + """ + When making a request to a Blueprint's endpoint + When we attach a ``Pin`` to the Blueprint + We create the expected spans + """ + bp = flask.Blueprint('bp', __name__) + Pin.override(bp, service='flask-bp', tracer=self.tracer) + + @bp.route('/') + def test(): + return 'test' + + self.app.register_blueprint(bp) + + # Request the endpoint + self.client.get('/') + + # Only extract the span we care about + # DEV: Making a request creates a bunch of lifecycle spans, + # ignore them, we test them elsewhere + span = self.find_span_by_name(self.get_spans(), 'bp.test') + self.assertEqual(span.service, 'flask-bp') + self.assertEqual(span.name, 'bp.test') + self.assertEqual(span.resource, '/') + self.assertEqual(span.meta, dict()) + + def test_blueprint_request_pin_disabled(self): + """ + When making a request to a Blueprint's endpoint + When the app's ``Pin`` is disabled + We do not create any spans + """ + pin = Pin.get_from(self.app) + pin.tracer.enabled = False + + bp = flask.Blueprint('bp', __name__) + + @bp.route('/') + def test(): + return 'test' + + self.app.register_blueprint(bp) + + # Request the endpoint + self.client.get('/') + + self.assertEqual(len(self.get_spans()), 0) diff --git a/tests/contrib/flask/test_errorhandler.py b/tests/contrib/flask/test_errorhandler.py new file mode 100644 index 0000000000..bd4d875910 --- /dev/null +++ b/tests/contrib/flask/test_errorhandler.py @@ -0,0 +1,293 @@ +import flask + +from . import BaseFlaskTestCase +from ...utils import assert_span_http_status_code + + +class FlaskErrorhandlerTestCase(BaseFlaskTestCase): + def test_default_404_handler(self): + """ + When making a 404 request + And no user defined error handler is defined + We create the expected spans + """ + # Make our 404 request + res = self.client.get('/unknown') + self.assertEqual(res.status_code, 404) + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + dispatch_span = self.find_span_by_name(spans, 'flask.dispatch_request') + user_ex_span = self.find_span_by_name(spans, 'flask.handle_user_exception') + http_ex_span = self.find_span_by_name(spans, 'flask.handle_http_exception') + + # flask.request span + self.assertEqual(req_span.error, 0) + assert_span_http_status_code(req_span, 404) + self.assertIsNone(req_span.get_tag('flask.endpoint')) + self.assertIsNone(req_span.get_tag('flask.url_rule')) + + # flask.dispatch_request span + self.assertEqual(dispatch_span.error, 1) + error_msg = dispatch_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('404 Not Found')) + error_stack = dispatch_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = dispatch_span.get_tag('error.type') + self.assertEqual(error_type, 'werkzeug.exceptions.NotFound') + + # flask.handle_user_exception span + self.assertEqual(user_ex_span.meta, dict()) + self.assertEqual(user_ex_span.error, 0) + + # flask.handle_http_exception span + self.assertEqual(http_ex_span.meta, dict()) + self.assertEqual(http_ex_span.error, 0) + + def test_abort_500(self): + """ + When making a 500 request + And no user defined error handler is defined + We create the expected spans + """ + @self.app.route('/500') + def endpoint_500(): + flask.abort(500) + + # Make our 500 request + res = self.client.get('/500') + self.assertEqual(res.status_code, 500) + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + dispatch_span = self.find_span_by_name(spans, 'flask.dispatch_request') + endpoint_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_errorhandler.endpoint_500') + user_ex_span = self.find_span_by_name(spans, 'flask.handle_user_exception') + http_ex_span = self.find_span_by_name(spans, 'flask.handle_http_exception') + + # flask.request span + self.assertEqual(req_span.error, 1) + + assert_span_http_status_code(req_span, 500) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'endpoint_500') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') + + # flask.dispatch_request span + self.assertEqual(dispatch_span.error, 1) + error_msg = dispatch_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('500 Internal Server Error')) + error_stack = dispatch_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = dispatch_span.get_tag('error.type') + self.assertEqual(error_type, 'werkzeug.exceptions.InternalServerError') + + # tests.contrib.flask.test_errorhandler.endpoint_500 span + self.assertEqual(endpoint_span.error, 1) + error_msg = endpoint_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('500 Internal Server Error')) + error_stack = endpoint_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = endpoint_span.get_tag('error.type') + self.assertEqual(error_type, 'werkzeug.exceptions.InternalServerError') + + # flask.handle_user_exception span + self.assertEqual(user_ex_span.meta, dict()) + self.assertEqual(user_ex_span.error, 0) + + # flask.handle_http_exception span + self.assertEqual(http_ex_span.meta, dict()) + self.assertEqual(http_ex_span.error, 0) + + def test_abort_500_custom_handler(self): + """ + When making a 500 request + And a user defined error handler is defined + We create the expected spans + """ + @self.app.errorhandler(500) + def handle_500(e): + return 'whoops', 200 + + @self.app.route('/500') + def endpoint_500(): + flask.abort(500) + + # Make our 500 request + res = self.client.get('/500') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'whoops') + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + dispatch_span = self.find_span_by_name(spans, 'flask.dispatch_request') + endpoint_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_errorhandler.endpoint_500') + handler_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_errorhandler.handle_500') + user_ex_span = self.find_span_by_name(spans, 'flask.handle_user_exception') + http_ex_span = self.find_span_by_name(spans, 'flask.handle_http_exception') + + # flask.request span + self.assertEqual(req_span.error, 0) + assert_span_http_status_code(req_span, 200) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'endpoint_500') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') + + # flask.dispatch_request span + self.assertEqual(dispatch_span.error, 1) + error_msg = dispatch_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('500 Internal Server Error')) + error_stack = dispatch_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = dispatch_span.get_tag('error.type') + self.assertEqual(error_type, 'werkzeug.exceptions.InternalServerError') + + # tests.contrib.flask.test_errorhandler.endpoint_500 span + self.assertEqual(endpoint_span.error, 1) + error_msg = endpoint_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('500 Internal Server Error')) + error_stack = endpoint_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = endpoint_span.get_tag('error.type') + self.assertEqual(error_type, 'werkzeug.exceptions.InternalServerError') + + # tests.contrib.flask.test_errorhandler.handle_500 span + self.assertEqual(handler_span.error, 0) + self.assertIsNone(handler_span.get_tag('error.msg')) + self.assertIsNone(handler_span.get_tag('error.stack')) + self.assertIsNone(handler_span.get_tag('error.type')) + + # flask.handle_user_exception span + self.assertEqual(user_ex_span.meta, dict()) + self.assertEqual(user_ex_span.error, 0) + + # flask.handle_http_exception span + self.assertEqual(http_ex_span.meta, dict()) + self.assertEqual(http_ex_span.error, 0) + + def test_raise_user_exception(self): + """ + When raising a custom user exception + And no user defined error handler is defined + We create the expected spans + """ + class FlaskTestException(Exception): + pass + + @self.app.route('/error') + def endpoint_error(): + raise FlaskTestException('custom error message') + + # Make our 500 request + res = self.client.get('/error') + self.assertEqual(res.status_code, 500) + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + dispatch_span = self.find_span_by_name(spans, 'flask.dispatch_request') + endpoint_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_errorhandler.endpoint_error') + user_ex_span = self.find_span_by_name(spans, 'flask.handle_user_exception') + http_ex_span = self.find_span_by_name(spans, 'flask.handle_http_exception', required=False) + + # flask.request span + self.assertEqual(req_span.error, 1) + assert_span_http_status_code(req_span, 500) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'endpoint_error') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/error') + + # flask.dispatch_request span + self.assertEqual(dispatch_span.error, 1) + error_msg = dispatch_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('custom error message')) + error_stack = dispatch_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = dispatch_span.get_tag('error.type') + self.assertEqual(error_type, 'tests.contrib.flask.test_errorhandler.FlaskTestException') + + # tests.contrib.flask.test_errorhandler.endpoint_500 span + self.assertEqual(endpoint_span.error, 1) + error_msg = endpoint_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('custom error message')) + error_stack = endpoint_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = endpoint_span.get_tag('error.type') + self.assertEqual(error_type, 'tests.contrib.flask.test_errorhandler.FlaskTestException') + + # flask.handle_user_exception span + self.assertEqual(user_ex_span.error, 1) + error_msg = user_ex_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('custom error message')) + error_stack = user_ex_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = user_ex_span.get_tag('error.type') + self.assertEqual(error_type, 'tests.contrib.flask.test_errorhandler.FlaskTestException') + + # flask.handle_http_exception span + self.assertIsNone(http_ex_span) + + def test_raise_user_exception_handler(self): + """ + When raising a custom user exception + And a user defined error handler is defined + We create the expected spans + """ + class FlaskTestException(Exception): + pass + + @self.app.errorhandler(FlaskTestException) + def handle_error(e): + return 'whoops', 200 + + @self.app.route('/error') + def endpoint_error(): + raise FlaskTestException('custom error message') + + # Make our 500 request + res = self.client.get('/error') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'whoops') + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + dispatch_span = self.find_span_by_name(spans, 'flask.dispatch_request') + endpoint_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_errorhandler.endpoint_error') + handler_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_errorhandler.handle_error') + user_ex_span = self.find_span_by_name(spans, 'flask.handle_user_exception') + http_ex_span = self.find_span_by_name(spans, 'flask.handle_http_exception', required=False) + + # flask.request span + self.assertEqual(req_span.error, 0) + assert_span_http_status_code(req_span, 200) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'endpoint_error') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/error') + + # flask.dispatch_request span + self.assertEqual(dispatch_span.error, 1) + error_msg = dispatch_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('custom error message')) + error_stack = dispatch_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = dispatch_span.get_tag('error.type') + self.assertEqual(error_type, 'tests.contrib.flask.test_errorhandler.FlaskTestException') + + # tests.contrib.flask.test_errorhandler.endpoint_500 span + self.assertEqual(endpoint_span.error, 1) + error_msg = endpoint_span.get_tag('error.msg') + self.assertTrue(error_msg.startswith('custom error message')) + error_stack = endpoint_span.get_tag('error.stack') + self.assertTrue(error_stack.startswith('Traceback (most recent call last):')) + error_type = endpoint_span.get_tag('error.type') + self.assertEqual(error_type, 'tests.contrib.flask.test_errorhandler.FlaskTestException') + + # tests.contrib.flask.test_errorhandler.handle_error span + self.assertEqual(handler_span.error, 0) + + # flask.handle_user_exception span + self.assertEqual(user_ex_span.error, 0) + self.assertEqual(user_ex_span.meta, dict()) + + # flask.handle_http_exception span + self.assertIsNone(http_ex_span) diff --git a/tests/contrib/flask/test_flask_helpers.py b/tests/contrib/flask/test_flask_helpers.py new file mode 100644 index 0000000000..847dee8396 --- /dev/null +++ b/tests/contrib/flask/test_flask_helpers.py @@ -0,0 +1,121 @@ +import flask + +from ddtrace import Pin +from ddtrace.contrib.flask import unpatch +from ddtrace.compat import StringIO + +from . import BaseFlaskTestCase + + +class FlaskHelpersTestCase(BaseFlaskTestCase): + def test_patch(self): + """ + When we patch Flask + Then ``flask.jsonify`` is patched + Then ``flask.send_file`` is patched + """ + # DEV: We call `patch` in `setUp` + self.assert_is_wrapped(flask.jsonify) + self.assert_is_wrapped(flask.send_file) + + def test_unpatch(self): + """ + When we unpatch Flask + Then ``flask.jsonify`` is unpatched + Then ``flask.send_file`` is unpatched + """ + unpatch() + self.assert_is_not_wrapped(flask.jsonify) + self.assert_is_not_wrapped(flask.send_file) + + def test_jsonify(self): + """ + When we call a patched ``flask.jsonify`` + We create a span as expected + """ + # DEV: `jsonify` requires a active app and request contexts + with self.app.app_context(): + with self.app.test_request_context('/'): + response = flask.jsonify(dict(key='value')) + self.assertTrue(isinstance(response, flask.Response)) + self.assertEqual(response.status_code, 200) + + # 1 span for `jsonify` + # 1 span for tearing down the app context we created + # 1 span for tearing down the request context we created + spans = self.get_spans() + self.assertEqual(len(spans), 3) + + self.assertIsNone(spans[0].service) + self.assertEqual(spans[0].name, 'flask.jsonify') + self.assertEqual(spans[0].resource, 'flask.jsonify') + assert spans[0].meta == dict() + + self.assertEqual(spans[1].name, 'flask.do_teardown_request') + self.assertEqual(spans[2].name, 'flask.do_teardown_appcontext') + + def test_jsonify_pin_disabled(self): + """ + When we call a patched ``flask.jsonify`` + When the ``flask.Flask`` ``Pin`` is disabled + We do not create a span + """ + # Disable the pin on the app + pin = Pin.get_from(self.app) + pin.tracer.enabled = False + + # DEV: `jsonify` requires a active app and request contexts + with self.app.app_context(): + with self.app.test_request_context('/'): + response = flask.jsonify(dict(key='value')) + self.assertTrue(isinstance(response, flask.Response)) + self.assertEqual(response.status_code, 200) + + self.assertEqual(len(self.get_spans()), 0) + + def test_send_file(self): + """ + When calling a patched ``flask.send_file`` + We create the expected spans + """ + fp = StringIO('static file') + + with self.app.app_context(): + with self.app.test_request_context('/'): + # DEV: Flask >= (0, 12, 0) tries to infer mimetype, so set explicitly + response = flask.send_file(fp, mimetype='text/plain') + self.assertTrue(isinstance(response, flask.Response)) + self.assertEqual(response.status_code, 200) + + # 1 for `send_file` + # 1 for tearing down the request context we created + # 1 for tearing down the app context we created + spans = self.get_spans() + self.assertEqual(len(spans), 3) + + self.assertEqual(spans[0].service, 'flask') + self.assertEqual(spans[0].name, 'flask.send_file') + self.assertEqual(spans[0].resource, 'flask.send_file') + assert spans[0].meta == dict() + + self.assertEqual(spans[1].name, 'flask.do_teardown_request') + self.assertEqual(spans[2].name, 'flask.do_teardown_appcontext') + + def test_send_file_pin_disabled(self): + """ + When calling a patched ``flask.send_file`` + When the app's ``Pin`` has been disabled + We do not create any spans + """ + pin = Pin.get_from(self.app) + pin.tracer.enabled = False + + fp = StringIO('static file') + with self.app.app_context(): + with self.app.test_request_context('/'): + # DEV: Flask >= (0, 12, 0) tries to infer mimetype, so set explicitly + response = flask.send_file(fp, mimetype='text/plain') + self.assertTrue(isinstance(response, flask.Response)) + self.assertEqual(response.status_code, 200) + + self.assertEqual(len(self.get_spans()), 0) diff --git a/tests/contrib/flask/test_hooks.py b/tests/contrib/flask/test_hooks.py new file mode 100644 index 0000000000..43daaa6f67 --- /dev/null +++ b/tests/contrib/flask/test_hooks.py @@ -0,0 +1,469 @@ +from flask import Blueprint + +from ddtrace.ext import http +from . import BaseFlaskTestCase +from ...utils import assert_span_http_status_code + + +class FlaskHookTestCase(BaseFlaskTestCase): + def setUp(self): + super(FlaskHookTestCase, self).setUp() + + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + self.bp = Blueprint(__name__, 'bp') + + @self.bp.route('/bp') + def bp(): + return 'Hello Blueprint', 200 + + def test_before_request(self): + """ + When Flask before_request hook is registered + We create the expected spans + """ + @self.app.before_request + def before_request(): + pass + + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + # DEV: This will raise an exception if this span doesn't exist + self.find_span_by_name(spans, 'flask.dispatch_request') + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.before_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.before_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.before_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.preprocess_request') + + def test_before_request_return(self): + """ + When Flask before_request hook is registered + When the hook handles the request + We create the expected spans + """ + @self.app.before_request + def before_request(): + return 'Not Allowed', 401 + + req = self.client.get('/') + self.assertEqual(req.status_code, 401) + self.assertEqual(req.data, b'Not Allowed') + + spans = self.get_spans() + self.assertEqual(len(spans), 7) + + dispatch = self.find_span_by_name(spans, 'flask.dispatch_request', required=False) + self.assertIsNone(dispatch) + + root = self.find_span_by_name(spans, 'flask.request') + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.before_request') + parent = self.find_span_parent(spans, span) + + # Assert root hook + # DEV: This is the main thing we need to check with this test + self.assertEqual(root.service, 'flask') + self.assertEqual(root.name, 'flask.request') + self.assertEqual(root.resource, 'GET /') + self.assertEqual(root.get_tag('flask.endpoint'), 'index') + self.assertEqual(root.get_tag('flask.url_rule'), '/') + self.assertEqual(root.get_tag('http.method'), 'GET') + assert_span_http_status_code(root, 401) + self.assertEqual(root.get_tag(http.URL), 'http://localhost/') + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.before_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.before_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.preprocess_request') + + def test_before_first_request(self): + """ + When Flask before_first_request hook is registered + We create the expected spans + """ + @self.app.before_first_request + def before_first_request(): + pass + + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.before_first_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.before_first_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.before_first_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.try_trigger_before_first_request_functions') + + # Make a second request to ensure a span isn't created + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 8) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.before_first_request', required=False) + self.assertIsNone(span) + + def test_after_request(self): + """ + When Flask after_request hook is registered + We create the expected spans + """ + @self.app.after_request + def after_request(response): + return response + + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.after_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.after_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.after_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.process_response') + + def test_after_request_change_status(self): + """ + When Flask after_request hook is registered + We create the expected spans + """ + @self.app.after_request + def after_request(response): + response.status_code = 401 + return response + + req = self.client.get('/') + self.assertEqual(req.status_code, 401) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + root = self.find_span_by_name(spans, 'flask.request') + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.after_request') + parent = self.find_span_parent(spans, span) + + # Assert root span + assert_span_http_status_code(root, 401) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.after_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.after_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.process_response') + + def test_teardown_request(self): + """ + When Flask teardown_request hook is registered + We create the expected spans + """ + @self.app.teardown_request + def teardown_request(request): + pass + + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.teardown_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.teardown_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.teardown_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.do_teardown_request') + + def test_teardown_appcontext(self): + """ + When Flask teardown_appcontext hook is registered + We create the expected spans + """ + @self.app.teardown_appcontext + def teardown_appcontext(appctx): + pass + + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.teardown_appcontext') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.teardown_appcontext') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.teardown_appcontext') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.do_teardown_appcontext') + + def test_bp_before_request(self): + """ + When Blueprint before_request hook is registered + We create the expected spans + """ + @self.bp.before_request + def bp_before_request(): + pass + + self.app.register_blueprint(self.bp) + req = self.client.get('/bp') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Blueprint') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.bp_before_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.bp_before_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.bp_before_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.preprocess_request') + + def test_bp_before_app_request(self): + """ + When Blueprint before_app_request hook is registered + We create the expected spans + """ + @self.bp.before_app_request + def bp_before_app_request(): + pass + + self.app.register_blueprint(self.bp) + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.bp_before_app_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.bp_before_app_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.bp_before_app_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.preprocess_request') + + def test_before_app_first_request(self): + """ + When Blueprint before_first_request hook is registered + We create the expected spans + """ + @self.bp.before_app_first_request + def bp_before_app_first_request(): + pass + + self.app.register_blueprint(self.bp) + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.bp_before_app_first_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.bp_before_app_first_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.bp_before_app_first_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.try_trigger_before_first_request_functions') + + # Make a second request to ensure a span isn't created + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 8) + + span = self.find_span_by_name( + spans, + 'tests.contrib.flask.test_hooks.bp_before_app_first_request', + required=False, + ) + self.assertIsNone(span) + + def test_bp_after_request(self): + """ + When Blueprint after_request hook is registered + We create the expected spans + """ + @self.bp.after_request + def bp_after_request(response): + return response + + self.app.register_blueprint(self.bp) + req = self.client.get('/bp') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Blueprint') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.bp_after_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.bp_after_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.bp_after_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.process_response') + + def test_bp_after_app_request(self): + """ + When Blueprint after_app_request hook is registered + We create the expected spans + """ + @self.bp.after_app_request + def bp_after_app_request(response): + return response + + self.app.register_blueprint(self.bp) + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.bp_after_app_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.bp_after_app_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.bp_after_app_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.process_response') + + def test_bp_teardown_request(self): + """ + When Blueprint teardown_request hook is registered + We create the expected spans + """ + @self.bp.teardown_request + def bp_teardown_request(request): + pass + + self.app.register_blueprint(self.bp) + req = self.client.get('/bp') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Blueprint') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.bp_teardown_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.bp_teardown_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.bp_teardown_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.do_teardown_request') + + def test_bp_teardown_app_request(self): + """ + When Blueprint teardown_app_request hook is registered + We create the expected spans + """ + @self.bp.teardown_app_request + def bp_teardown_app_request(request): + pass + + self.app.register_blueprint(self.bp) + req = self.client.get('/') + self.assertEqual(req.status_code, 200) + self.assertEqual(req.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + span = self.find_span_by_name(spans, 'tests.contrib.flask.test_hooks.bp_teardown_app_request') + parent = self.find_span_parent(spans, span) + + # Assert hook span + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.test_hooks.bp_teardown_app_request') + self.assertEqual(span.resource, 'tests.contrib.flask.test_hooks.bp_teardown_app_request') + self.assertEqual(span.meta, dict()) + + # Assert correct parent span + self.assertEqual(parent.name, 'flask.do_teardown_request') diff --git a/tests/contrib/flask/test_idempotency.py b/tests/contrib/flask/test_idempotency.py new file mode 100644 index 0000000000..b9c21ffbe0 --- /dev/null +++ b/tests/contrib/flask/test_idempotency.py @@ -0,0 +1,80 @@ +import mock +import unittest + +import flask +from ddtrace.vendor import wrapt + +from ddtrace.contrib.flask import patch, unpatch +from ddtrace.contrib.flask.patch import _w, _u + + +class FlaskIdempotencyTestCase(unittest.TestCase): + def tearDown(self): + # Double check we unpatch after every test + unpatch() + + def assert_is_patched(self): + self.assertTrue(flask._datadog_patch) + self.assertTrue(isinstance(flask.render_template, wrapt.ObjectProxy)) + + def assert_is_not_patched(self): + self.assertFalse(flask._datadog_patch) + self.assertFalse(isinstance(flask.render_template, wrapt.ObjectProxy)) + + def test_datadog_patch(self): + # If we have been patching/testing in other files, + # at least make sure this is where we want it + if hasattr(flask, '_datadog_patch'): + self.assertFalse(flask._datadog_patch) + + # Patching sets `_datadog_patch` to `True` + patch() + self.assert_is_patched() + + # Unpatching sets `_datadog_patch` to `False` + unpatch() + self.assert_is_not_patched() + + # DEV: Use `side_effect` so the original function still gets called + @mock.patch('ddtrace.contrib.flask._patch._w', side_effect=_w) + def test_patch_idempotency(self, _w): + # Ensure we didn't do any patching automatically + _w.assert_not_called() + self.assert_is_not_patched() + + # Patch for the first time + patch() + _w.assert_called() + self.assert_is_patched() + + # Reset the mock so we can assert call count + _w.reset_mock() + + # Call patch a second time + patch() + _w.assert_not_called() + self.assert_is_patched() + + # DEV: Use `side_effect` so the original function still gets called + @mock.patch('ddtrace.contrib.flask._patch._w', side_effect=_w) + @mock.patch('ddtrace.contrib.flask._patch._u', side_effect=_u) + def test_unpatch_idempotency(self, _u, _w): + # We need to patch in order to unpatch + patch() + _w.assert_called() + self.assert_is_patched() + + # Ensure we didn't do any unpatching automatically + _u.assert_not_called() + + unpatch() + _u.assert_called() + self.assert_is_not_patched() + + # Reset the mock so we can assert call count + _u.reset_mock() + + # Call unpatch a second time + unpatch() + _u.assert_not_called() + self.assert_is_not_patched() diff --git a/tests/contrib/flask/test_middleware.py b/tests/contrib/flask/test_middleware.py new file mode 100644 index 0000000000..4261c31678 --- /dev/null +++ b/tests/contrib/flask/test_middleware.py @@ -0,0 +1,387 @@ +# -*- coding: utf-8 -*- +import time +import re + +from unittest import TestCase + +from ddtrace.contrib.flask import TraceMiddleware +from ddtrace.constants import SAMPLING_PRIORITY_KEY +from ddtrace.ext import http, errors + +from tests.opentracer.utils import init_tracer +from .web import create_app +from ...test_tracer import get_dummy_tracer +from ...utils import assert_span_http_status_code + + +class TestFlask(TestCase): + """Ensures Flask is properly instrumented.""" + + def setUp(self): + self.tracer = get_dummy_tracer() + self.flask_app = create_app() + self.traced_app = TraceMiddleware( + self.flask_app, + self.tracer, + service='test.flask.service', + distributed_tracing=True, + ) + + # make the app testable + self.flask_app.config['TESTING'] = True + self.app = self.flask_app.test_client() + + def test_double_instrumentation(self): + # ensure Flask is never instrumented twice when `ddtrace-run` + # and `TraceMiddleware` are used together. `traced_app` MUST + # be assigned otherwise it's not possible to reproduce the + # problem (the test scope must keep a strong reference) + traced_app = TraceMiddleware(self.flask_app, self.tracer) # noqa: F841 + rv = self.app.get('/child') + assert rv.status_code == 200 + spans = self.tracer.writer.pop() + assert len(spans) == 2 + + def test_double_instrumentation_config(self): + # ensure Flask uses the last set configuration to be sure + # there are no breaking changes for who uses `ddtrace-run` + # with the `TraceMiddleware` + TraceMiddleware( + self.flask_app, + self.tracer, + service='new-intake', + distributed_tracing=False, + ) + assert self.flask_app._service == 'new-intake' + assert self.flask_app._use_distributed_tracing is False + rv = self.app.get('/child') + assert rv.status_code == 200 + spans = self.tracer.writer.pop() + assert len(spans) == 2 + + def test_child(self): + start = time.time() + rv = self.app.get('/child') + end = time.time() + # ensure request worked + assert rv.status_code == 200 + assert rv.data == b'child' + # ensure trace worked + spans = self.tracer.writer.pop() + assert len(spans) == 2 + + spans_by_name = {s.name: s for s in spans} + + s = spans_by_name['flask.request'] + assert s.span_id + assert s.trace_id + assert not s.parent_id + assert s.service == 'test.flask.service' + assert s.resource == 'child' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 0 + + c = spans_by_name['child'] + assert c.span_id + assert c.trace_id == s.trace_id + assert c.parent_id == s.span_id + assert c.service == 'test.flask.service' + assert c.resource == 'child' + assert c.start >= start + assert c.duration <= end - start + assert c.error == 0 + + def test_success(self): + start = time.time() + rv = self.app.get('/') + end = time.time() + + # ensure request worked + assert rv.status_code == 200 + assert rv.data == b'hello' + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == 'test.flask.service' + assert s.resource == 'index' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 0 + assert_span_http_status_code(s, 200) + assert s.meta.get(http.METHOD) == 'GET' + + services = self.tracer.writer.pop_services() + expected = {} + assert services == expected + + def test_template(self): + start = time.time() + rv = self.app.get('/tmpl') + end = time.time() + + # ensure request worked + assert rv.status_code == 200 + assert rv.data == b'hello earth' + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 2 + by_name = {s.name: s for s in spans} + s = by_name['flask.request'] + assert s.service == 'test.flask.service' + assert s.resource == 'tmpl' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 0 + assert_span_http_status_code(s, 200) + assert s.meta.get(http.METHOD) == 'GET' + + t = by_name['flask.template'] + assert t.get_tag('flask.template') == 'test.html' + assert t.parent_id == s.span_id + assert t.trace_id == s.trace_id + assert s.start < t.start < t.start + t.duration < end + + def test_handleme(self): + start = time.time() + rv = self.app.get('/handleme') + end = time.time() + + # ensure request worked + assert rv.status_code == 202 + assert rv.data == b'handled' + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == 'test.flask.service' + assert s.resource == 'handle_me' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 0 + assert_span_http_status_code(s, 202) + assert s.meta.get(http.METHOD) == 'GET' + + def test_template_err(self): + start = time.time() + try: + self.app.get('/tmpl/err') + except Exception: + pass + else: + assert 0 + end = time.time() + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + by_name = {s.name: s for s in spans} + s = by_name['flask.request'] + assert s.service == 'test.flask.service' + assert s.resource == 'tmpl_err' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 1 + assert_span_http_status_code(s, 500) + assert s.meta.get(http.METHOD) == 'GET' + + def test_template_render_err(self): + start = time.time() + try: + self.app.get('/tmpl/render_err') + except Exception: + pass + else: + assert 0 + end = time.time() + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 2 + by_name = {s.name: s for s in spans} + s = by_name['flask.request'] + assert s.service == 'test.flask.service' + assert s.resource == 'tmpl_render_err' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 1 + assert_span_http_status_code(s, 500) + assert s.meta.get(http.METHOD) == 'GET' + t = by_name['flask.template'] + assert t.get_tag('flask.template') == 'render_err.html' + assert t.error == 1 + assert t.parent_id == s.span_id + assert t.trace_id == s.trace_id + + def test_error(self): + start = time.time() + rv = self.app.get('/error') + end = time.time() + + # ensure the request itself worked + assert rv.status_code == 500 + assert rv.data == b'error' + + # ensure the request was traced. + assert not self.tracer.current_span() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == 'test.flask.service' + assert s.resource == 'error' + assert s.start >= start + assert s.duration <= end - start + assert_span_http_status_code(s, 500) + assert s.meta.get(http.METHOD) == 'GET' + + def test_fatal(self): + if not self.traced_app.use_signals: + return + + start = time.time() + try: + self.app.get('/fatal') + except ZeroDivisionError: + pass + else: + assert 0 + end = time.time() + + # ensure the request was traced. + assert not self.tracer.current_span() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == 'test.flask.service' + assert s.resource == 'fatal' + assert s.start >= start + assert s.duration <= end - start + assert_span_http_status_code(s, 500) + assert s.meta.get(http.METHOD) == 'GET' + assert 'ZeroDivisionError' in s.meta.get(errors.ERROR_TYPE), s.meta + assert 'by zero' in s.meta.get(errors.ERROR_MSG) + assert re.search('File ".*/contrib/flask/web.py", line [0-9]+, in fatal', s.meta.get(errors.ERROR_STACK)) + + def test_unicode(self): + start = time.time() + rv = self.app.get(u'/üŋïĉóđē') + end = time.time() + + # ensure request worked + assert rv.status_code == 200 + assert rv.data == b'\xc3\xbc\xc5\x8b\xc3\xaf\xc4\x89\xc3\xb3\xc4\x91\xc4\x93' + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == 'test.flask.service' + assert s.resource == u'üŋïĉóđē' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 0 + assert_span_http_status_code(s, 200) + assert s.meta.get(http.METHOD) == 'GET' + assert s.meta.get(http.URL) == u'http://localhost/üŋïĉóđē' + + def test_404(self): + start = time.time() + rv = self.app.get(u'/404/üŋïĉóđē') + end = time.time() + + # ensure that we hit a 404 + assert rv.status_code == 404 + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == 'test.flask.service' + assert s.resource == u'404' + assert s.start >= start + assert s.duration <= end - start + assert s.error == 0 + assert_span_http_status_code(s, 404) + assert s.meta.get(http.METHOD) == 'GET' + assert s.meta.get(http.URL) == u'http://localhost/404/üŋïĉóđē' + + def test_propagation(self): + rv = self.app.get('/', headers={ + 'x-datadog-trace-id': '1234', + 'x-datadog-parent-id': '4567', + 'x-datadog-sampling-priority': '2' + }) + + # ensure request worked + assert rv.status_code == 200 + assert rv.data == b'hello' + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + # ensure the propagation worked well + assert s.trace_id == 1234 + assert s.parent_id == 4567 + assert s.get_metric(SAMPLING_PRIORITY_KEY) == 2 + + def test_custom_span(self): + rv = self.app.get('/custom_span') + assert rv.status_code == 200 + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == 'test.flask.service' + assert s.resource == 'overridden' + assert s.error == 0 + assert_span_http_status_code(s, 200) + assert s.meta.get(http.METHOD) == 'GET' + + def test_success_200_ot(self): + """OpenTracing version of test_success_200.""" + ot_tracer = init_tracer('my_svc', self.tracer) + writer = self.tracer.writer + + with ot_tracer.start_active_span('ot_span'): + start = time.time() + rv = self.app.get('/') + end = time.time() + + # ensure request worked + assert rv.status_code == 200 + assert rv.data == b'hello' + + # ensure trace worked + assert not self.tracer.current_span(), self.tracer.current_span().pprint() + spans = writer.pop() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.resource == 'ot_span' + assert ot_span.service == 'my_svc' + + assert dd_span.resource == 'index' + assert dd_span.start >= start + assert dd_span.duration <= end - start + assert dd_span.error == 0 + assert_span_http_status_code(dd_span, 200) + assert dd_span.meta.get(http.METHOD) == 'GET' diff --git a/tests/contrib/flask/test_request.py b/tests/contrib/flask/test_request.py new file mode 100644 index 0000000000..86c0f5e44b --- /dev/null +++ b/tests/contrib/flask/test_request.py @@ -0,0 +1,771 @@ +# -*- coding: utf-8 -*- +from ddtrace.compat import PY2 +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.flask.patch import flask_version +from ddtrace.ext import http +from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID +from flask import abort + +from . import BaseFlaskTestCase +from ...utils import assert_span_http_status_code + + +base_exception_name = 'builtins.Exception' +if PY2: + base_exception_name = 'exceptions.Exception' + + +class FlaskRequestTestCase(BaseFlaskTestCase): + def test_request(self): + """ + When making a request + We create the expected spans + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + res = self.client.get('/') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 8) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.index', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 0) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') + assert_span_http_status_code(req_span, 200) + assert http.QUERY_STRING not in req_span.meta + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') + self.assertEqual(handler_span.resource, '/') + self.assertEqual(req_span.error, 0) + + def test_request_query_string_trace(self): + """Make sure when making a request that we create the expected spans and capture the query string.""" + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + with self.override_http_config('flask', dict(trace_query_string=True)): + self.client.get('/?foo=bar&baz=biz') + spans = self.get_spans() + + # Request tags + assert spans[0].get_tag(http.QUERY_STRING) == 'foo=bar&baz=biz' + + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + with self.override_global_config(dict(analytics_enabled=True)): + res = self.client.get('/') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + root = self.get_root_span() + root.assert_matches( + name='flask.request', + metrics={ + ANALYTICS_SAMPLE_RATE_KEY: 1.0, + }, + ) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + with self.override_global_config(dict(analytics_enabled=True)): + with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + res = self.client.get('/') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + root = self.get_root_span() + root.assert_matches( + name='flask.request', + metrics={ + ANALYTICS_SAMPLE_RATE_KEY: 0.5, + }, + ) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + with self.override_global_config(dict(analytics_enabled=False)): + res = self.client.get('/') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + with self.override_global_config(dict(analytics_enabled=False)): + with self.override_config('flask', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + res = self.client.get('/') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + root = self.get_root_span() + root.assert_matches( + name='flask.request', + metrics={ + ANALYTICS_SAMPLE_RATE_KEY: 0.5, + }, + ) + + for span in self.spans: + if span == root: + continue + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_distributed_tracing(self): + """ + When making a request + When distributed tracing headers are present + We create the expected spans + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + # Default: distributed tracing enabled + res = self.client.get('/', headers={ + HTTP_HEADER_PARENT_ID: '12345', + HTTP_HEADER_TRACE_ID: '678910', + }) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + # Assert parent and trace id are properly set on the root span + span = self.find_span_by_name(self.get_spans(), 'flask.request') + self.assertEqual(span.trace_id, 678910) + self.assertEqual(span.parent_id, 12345) + + # Explicitly enable distributed tracing + with self.override_config('flask', dict(distributed_tracing_enabled=True)): + res = self.client.get('/', headers={ + HTTP_HEADER_PARENT_ID: '12345', + HTTP_HEADER_TRACE_ID: '678910', + }) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + # Assert parent and trace id are properly set on the root span + span = self.find_span_by_name(self.get_spans(), 'flask.request') + self.assertEqual(span.trace_id, 678910) + self.assertEqual(span.parent_id, 12345) + + # With distributed tracing disabled + with self.override_config('flask', dict(distributed_tracing_enabled=False)): + res = self.client.get('/', headers={ + HTTP_HEADER_PARENT_ID: '12345', + HTTP_HEADER_TRACE_ID: '678910', + }) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + # Assert parent and trace id are properly set on the root span + span = self.find_span_by_name(self.get_spans(), 'flask.request') + self.assertNotEqual(span.trace_id, 678910) + self.assertIsNone(span.parent_id) + + def test_request_query_string(self): + """ + When making a request + When the request contains a query string + We create the expected spans + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + res = self.client.get('/', query_string=dict(hello='flask')) + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + spans = self.get_spans() + self.assertEqual(len(spans), 8) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.index', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + # Note: contains no query string + self.assertEqual(req_span.resource, 'GET /') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 0) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') + # Note: contains no query string + self.assertEqual(req_span.get_tag('flask.url_rule'), '/') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + # Note: contains no query string + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') + assert_span_http_status_code(req_span, 200) + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.index') + # Note: contains no query string + self.assertEqual(handler_span.resource, '/') + self.assertEqual(req_span.error, 0) + + def test_request_unicode(self): + """ + When making a request + When the url contains unicode + We create the expected spans + """ + @self.app.route(u'/üŋïĉóđē') + def unicode(): + return 'üŋïĉóđē', 200 + + res = self.client.get(u'/üŋïĉóđē') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'\xc3\xbc\xc5\x8b\xc3\xaf\xc4\x89\xc3\xb3\xc4\x91\xc4\x93') + + spans = self.get_spans() + self.assertEqual(len(spans), 8) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.unicode', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, u'GET /üŋïĉóđē') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 0) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('flask.endpoint'), 'unicode') + self.assertEqual(req_span.get_tag('flask.url_rule'), u'/üŋïĉóđē') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), u'http://localhost/üŋïĉóđē') + assert_span_http_status_code(req_span, 200) + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.unicode') + self.assertEqual(handler_span.resource, u'/üŋïĉóđē') + self.assertEqual(req_span.error, 0) + + def test_request_404(self): + """ + When making a request + When the requested endpoint was not found + We create the expected spans + """ + res = self.client.get('/not-found') + self.assertEqual(res.status_code, 404) + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'flask.handle_user_exception', + 'flask.handle_http_exception', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET 404') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 0) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') + assert_span_http_status_code(req_span, 404) + + # Dispatch span + dispatch_span = spans[3] + self.assertEqual(dispatch_span.service, 'flask') + self.assertEqual(dispatch_span.name, 'flask.dispatch_request') + self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') + self.assertEqual(dispatch_span.error, 1) + self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) + self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') + + def test_request_abort_404(self): + """ + When making a request + When the requested endpoint calls `abort(404)` + We create the expected spans + """ + @self.app.route('/not-found') + def not_found(): + abort(404) + + res = self.client.get('/not-found') + self.assertEqual(res.status_code, 404) + + spans = self.get_spans() + self.assertEqual(len(spans), 10) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.not_found', + 'flask.handle_user_exception', + 'flask.handle_http_exception', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /not-found') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 0) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/not-found') + assert_span_http_status_code(req_span, 404) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'not_found') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/not-found') + + # Dispatch span + dispatch_span = spans[3] + self.assertEqual(dispatch_span.service, 'flask') + self.assertEqual(dispatch_span.name, 'flask.dispatch_request') + self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') + self.assertEqual(dispatch_span.error, 1) + self.assertTrue(dispatch_span.get_tag('error.msg').startswith('404 Not Found')) + self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.not_found') + self.assertEqual(handler_span.resource, '/not-found') + self.assertEqual(handler_span.error, 1) + self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) + self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') + + def test_request_500(self): + """ + When making a request + When the requested endpoint raises an exception + We create the expected spans + """ + @self.app.route('/500') + def fivehundred(): + raise Exception('500 error') + + res = self.client.get('/500') + self.assertEqual(res.status_code, 500) + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.fivehundred', + 'flask.handle_user_exception', + 'flask.handle_exception', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /500') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 1) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') + assert_span_http_status_code(req_span, 500) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') + + # Dispatch span + dispatch_span = spans[3] + self.assertEqual(dispatch_span.service, 'flask') + self.assertEqual(dispatch_span.name, 'flask.dispatch_request') + self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') + self.assertEqual(dispatch_span.error, 1) + self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) + self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') + self.assertEqual(handler_span.resource, '/500') + self.assertEqual(handler_span.error, 1) + self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) + self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) + + # User exception span + user_ex_span = spans[5] + self.assertEqual(user_ex_span.service, 'flask') + self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') + self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') + self.assertEqual(user_ex_span.error, 1) + self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) + self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) + + def test_request_501(self): + """ + When making a request + When the requested endpoint calls `abort(501)` + We create the expected spans + """ + @self.app.route('/501') + def fivehundredone(): + abort(501) + + res = self.client.get('/501') + self.assertEqual(res.status_code, 501) + + spans = self.get_spans() + self.assertEqual(len(spans), 10) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.fivehundredone', + 'flask.handle_user_exception', + 'flask.handle_http_exception', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /501') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 1) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/501') + assert_span_http_status_code(req_span, 501) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundredone') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/501') + + # Dispatch span + dispatch_span = spans[3] + self.assertEqual(dispatch_span.service, 'flask') + self.assertEqual(dispatch_span.name, 'flask.dispatch_request') + self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') + self.assertEqual(dispatch_span.error, 1) + self.assertTrue(dispatch_span.get_tag('error.msg').startswith('501 Not Implemented')) + self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(dispatch_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundredone') + self.assertEqual(handler_span.resource, '/501') + self.assertEqual(handler_span.error, 1) + self.assertTrue(handler_span.get_tag('error.msg').startswith('501 Not Implemented')) + self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotImplemented') + + # User exception span + user_ex_span = spans[5] + self.assertEqual(user_ex_span.service, 'flask') + self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') + self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') + self.assertEqual(user_ex_span.error, 0) + + def test_request_error_handler(self): + """ + When making a request + When the requested endpoint raises an exception + We create the expected spans + """ + @self.app.errorhandler(500) + def error_handler(e): + return 'Whoops', 500 + + @self.app.route('/500') + def fivehundred(): + raise Exception('500 error') + + res = self.client.get('/500') + self.assertEqual(res.status_code, 500) + self.assertEqual(res.data, b'Whoops') + + spans = self.get_spans() + + if flask_version >= (0, 12, 0): + self.assertEqual(len(spans), 11) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.fivehundred', + 'flask.handle_user_exception', + 'flask.handle_exception', + 'tests.contrib.flask.test_request.error_handler', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + else: + self.assertEqual(len(spans), 10) + + # Assert the order of the spans created + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask.test_request.fivehundred', + 'flask.handle_user_exception', + 'flask.handle_exception', + 'tests.contrib.flask.test_request.error_handler', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /500') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 1) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/500') + assert_span_http_status_code(req_span, 500) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'fivehundred') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/500') + + # Dispatch span + dispatch_span = spans[3] + self.assertEqual(dispatch_span.service, 'flask') + self.assertEqual(dispatch_span.name, 'flask.dispatch_request') + self.assertEqual(dispatch_span.resource, 'flask.dispatch_request') + self.assertEqual(dispatch_span.error, 1) + self.assertTrue(dispatch_span.get_tag('error.msg').startswith('500 error')) + self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask.test_request.fivehundred') + self.assertEqual(handler_span.resource, '/500') + self.assertEqual(handler_span.error, 1) + self.assertTrue(handler_span.get_tag('error.msg').startswith('500 error')) + self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) + + # User exception span + user_ex_span = spans[5] + self.assertEqual(user_ex_span.service, 'flask') + self.assertEqual(user_ex_span.name, 'flask.handle_user_exception') + self.assertEqual(user_ex_span.resource, 'flask.handle_user_exception') + self.assertEqual(user_ex_span.error, 1) + self.assertTrue(user_ex_span.get_tag('error.msg').startswith('500 error')) + self.assertTrue(user_ex_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(user_ex_span.get_tag('error.type'), base_exception_name) diff --git a/tests/contrib/flask/test_signals.py b/tests/contrib/flask/test_signals.py new file mode 100644 index 0000000000..fc6e7b1206 --- /dev/null +++ b/tests/contrib/flask/test_signals.py @@ -0,0 +1,176 @@ +import mock + +import flask + +from ddtrace import Pin +from ddtrace.contrib.flask import unpatch +from ddtrace.contrib.flask.patch import flask_version + +from . import BaseFlaskTestCase + + +class FlaskSignalsTestCase(BaseFlaskTestCase): + def get_signal(self, signal_name): + # v0.9 missed importing `appcontext_tearing_down` in `flask/__init__.py` + # https://github.com/pallets/flask/blob/0.9/flask/__init__.py#L35-L37 + # https://github.com/pallets/flask/blob/0.9/flask/signals.py#L52 + # DEV: Version 0.9 doesn't have a patch version + if flask_version <= (0, 9) and signal_name == 'appcontext_tearing_down': + return getattr(flask.signals, signal_name) + return getattr(flask, signal_name) + + def signal_function(self, name): + def signal(*args, **kwargs): + pass + + func = mock.Mock(signal, name=name) + func.__module__ = 'tests.contrib.flask' + func.__name__ = name + return func + + def call_signal(self, signal_name, *args, **kwargs): + """Context manager helper used for generating a mock signal function and registering with flask""" + func = self.signal_function(signal_name) + + signal = self.get_signal(signal_name) + signal.connect(func, self.app) + + try: + signal.send(*args, **kwargs) + return func + finally: + # DEV: There is a bug in `blinker.Signal.disconnect` :( + signal.receivers.clear() + + def signals(self): + """Helper to get the signals for the current Flask version being tested""" + signals = [ + 'request_started', + 'request_finished', + 'request_tearing_down', + + 'template_rendered', + + 'got_request_exception', + 'appcontext_tearing_down', + ] + # This signal was added in 0.11.0 + if flask_version >= (0, 11): + signals.append('before_render_template') + + # These were added in 0.10 + if flask_version >= (0, 10): + signals.append('appcontext_pushed') + signals.append('appcontext_popped') + signals.append('message_flashed') + + return signals + + def test_patched(self): + """ + When the signals are patched + Their ``receivers_for`` method is wrapped as a ``wrapt.ObjectProxy`` + """ + # DEV: We call `patch()` in `setUp` + for signal_name in self.signals(): + signal = self.get_signal(signal_name) + receivers_for = getattr(signal, 'receivers_for') + self.assert_is_wrapped(receivers_for) + + def test_unpatch(self): + """ + When the signals are unpatched + Their ``receivers_for`` method is not a ``wrapt.ObjectProxy`` + """ + unpatch() + + for signal_name in self.signals(): + signal = self.get_signal(signal_name) + receivers_for = getattr(signal, 'receivers_for') + self.assert_is_not_wrapped(receivers_for) + + def test_signals(self): + """ + When a signal is connected + We create a span whenever that signal is sent + """ + for signal_name in self.signals(): + func = self.call_signal(signal_name, self.app) + + # Ensure our handler was called + func.assert_called_once_with(self.app) + + # Assert number of spans created + spans = self.get_spans() + self.assertEqual(len(spans), 1) + + # Assert the span that was created + span = spans[0] + self.assertEqual(span.service, 'flask') + self.assertEqual(span.name, 'tests.contrib.flask.{}'.format(signal_name)) + self.assertEqual(span.resource, 'tests.contrib.flask.{}'.format(signal_name)) + self.assertEqual(set(span.meta.keys()), set(['flask.signal'])) + self.assertEqual(span.meta['flask.signal'], signal_name) + + def test_signals_multiple(self): + """ + When a signal is connected + When multiple functions are registered for that signal + We create a span whenever that signal is sent + """ + # Our signal handlers + request_started_a = self.signal_function('request_started_a') + request_started_b = self.signal_function('request_started_b') + + flask.request_started.connect(request_started_a, self.app) + flask.request_started.connect(request_started_b, self.app) + + try: + flask.request_started.send(self.app) + finally: + # DEV: There is a bug in `blinker.Signal.disconnect` :( + flask.request_started.receivers.clear() + + # Ensure our handlers were called only once + request_started_a.assert_called_once_with(self.app) + request_started_b.assert_called_once_with(self.app) + + # Assert number of spans created + spans = self.get_spans() + self.assertEqual(len(spans), 2) + + # Assert the span that was created + span_a = spans[0] + self.assertEqual(span_a.service, 'flask') + self.assertEqual(span_a.name, 'tests.contrib.flask.request_started_a') + self.assertEqual(span_a.resource, 'tests.contrib.flask.request_started_a') + self.assertEqual(set(span_a.meta.keys()), set(['flask.signal'])) + self.assertEqual(span_a.meta['flask.signal'], 'request_started') + + # Assert the span that was created + span_b = spans[1] + self.assertEqual(span_b.service, 'flask') + self.assertEqual(span_b.name, 'tests.contrib.flask.request_started_b') + self.assertEqual(span_b.resource, 'tests.contrib.flask.request_started_b') + self.assertEqual(set(span_b.meta.keys()), set(['flask.signal'])) + self.assertEqual(span_b.meta['flask.signal'], 'request_started') + + def test_signals_pin_disabled(self): + """ + When a signal is connected + When the app pin is disabled + We do not create any spans when the signal is sent + """ + # Disable the pin on the app + pin = Pin.get_from(self.app) + pin.tracer.enabled = False + + for signal_name in self.signals(): + func = self.call_signal(signal_name, self.app) + + # Ensure our function was called by the signal + func.assert_called_once_with(self.app) + + # Assert number of spans created + spans = self.get_spans() + self.assertEqual(len(spans), 0) diff --git a/tests/contrib/flask/test_static.py b/tests/contrib/flask/test_static.py new file mode 100644 index 0000000000..9ad8be9053 --- /dev/null +++ b/tests/contrib/flask/test_static.py @@ -0,0 +1,94 @@ +from ddtrace.ext import http + +from . import BaseFlaskTestCase +from ...utils import assert_span_http_status_code + + +class FlaskStaticFileTestCase(BaseFlaskTestCase): + def test_serve_static_file(self): + """ + When fetching a static file + We create the expected spans + """ + # DEV: By default a static handler for `./static/` is configured for us + res = self.client.get('/static/test.txt') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask\n') + + spans = self.get_spans() + self.assertEqual(len(spans), 9) + + req_span = self.find_span_by_name(spans, 'flask.request') + handler_span = self.find_span_by_name(spans, 'static') + send_file_span = self.find_span_by_name(spans, 'flask.send_static_file') + + # flask.request span + self.assertEqual(req_span.error, 0) + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /static/') + self.assertEqual(req_span.get_tag('flask.endpoint'), 'static') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/static/') + self.assertEqual(req_span.get_tag('flask.view_args.filename'), 'test.txt') + assert_span_http_status_code(req_span, 200) + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/static/test.txt') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + + # static span + self.assertEqual(handler_span.error, 0) + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'static') + self.assertEqual(handler_span.resource, '/static/') + + # flask.send_static_file span + self.assertEqual(send_file_span.error, 0) + self.assertEqual(send_file_span.service, 'flask') + self.assertEqual(send_file_span.name, 'flask.send_static_file') + self.assertEqual(send_file_span.resource, 'flask.send_static_file') + + def test_serve_static_file_404(self): + """ + When fetching a static file + When the file does not exist + We create the expected spans + """ + # DEV: By default a static handler for `./static/` is configured for us + res = self.client.get('/static/unknown-file') + self.assertEqual(res.status_code, 404) + + spans = self.get_spans() + self.assertEqual(len(spans), 11) + + req_span = self.find_span_by_name(spans, 'flask.request') + handler_span = self.find_span_by_name(spans, 'static') + send_file_span = self.find_span_by_name(spans, 'flask.send_static_file') + + # flask.request span + self.assertEqual(req_span.error, 0) + self.assertEqual(req_span.service, 'flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /static/') + self.assertEqual(req_span.get_tag('flask.endpoint'), 'static') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/static/') + self.assertEqual(req_span.get_tag('flask.view_args.filename'), 'unknown-file') + assert_span_http_status_code(req_span, 404) + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/static/unknown-file') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + + # static span + self.assertEqual(handler_span.error, 1) + self.assertEqual(handler_span.service, 'flask') + self.assertEqual(handler_span.name, 'static') + self.assertEqual(handler_span.resource, '/static/') + self.assertTrue(handler_span.get_tag('error.msg').startswith('404 Not Found')) + self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(handler_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') + + # flask.send_static_file span + self.assertEqual(send_file_span.error, 1) + self.assertEqual(send_file_span.service, 'flask') + self.assertEqual(send_file_span.name, 'flask.send_static_file') + self.assertEqual(send_file_span.resource, 'flask.send_static_file') + self.assertTrue(send_file_span.get_tag('error.msg').startswith('404 Not Found')) + self.assertTrue(send_file_span.get_tag('error.stack').startswith('Traceback')) + self.assertEqual(send_file_span.get_tag('error.type'), 'werkzeug.exceptions.NotFound') diff --git a/tests/contrib/flask/test_template.py b/tests/contrib/flask/test_template.py new file mode 100644 index 0000000000..e159315694 --- /dev/null +++ b/tests/contrib/flask/test_template.py @@ -0,0 +1,114 @@ +import flask + +from ddtrace import Pin +from ddtrace.contrib.flask import unpatch + +from . import BaseFlaskTestCase + + +class FlaskTemplateTestCase(BaseFlaskTestCase): + def test_patch(self): + """ + When we patch Flask + Then ``flask.render_template`` is patched + Then ``flask.render_template_string`` is patched + Then ``flask.templating._render`` is patched + """ + # DEV: We call `patch` in `setUp` + self.assert_is_wrapped(flask.render_template) + self.assert_is_wrapped(flask.render_template_string) + self.assert_is_wrapped(flask.templating._render) + + def test_unpatch(self): + """ + When we unpatch Flask + Then ``flask.render_template`` is unpatched + Then ``flask.render_template_string`` is unpatched + Then ``flask.templating._render`` is unpatched + """ + unpatch() + self.assert_is_not_wrapped(flask.render_template) + self.assert_is_not_wrapped(flask.render_template_string) + self.assert_is_not_wrapped(flask.templating._render) + + def test_render_template(self): + """ + When we call a patched ``flask.render_template`` + We create the expected spans + """ + with self.app.app_context(): + with self.app.test_request_context('/'): + response = flask.render_template('test.html', world='world') + self.assertEqual(response, 'hello world') + + # 1 for calling `flask.render_template` + # 1 for tearing down the request + # 1 for tearing down the app context we created + spans = self.get_spans() + self.assertEqual(len(spans), 3) + + self.assertIsNone(spans[0].service) + self.assertEqual(spans[0].name, 'flask.render_template') + self.assertEqual(spans[0].resource, 'test.html') + self.assertEqual(set(spans[0].meta.keys()), set(['flask.template_name'])) + self.assertEqual(spans[0].meta['flask.template_name'], 'test.html') + + self.assertEqual(spans[1].name, 'flask.do_teardown_request') + self.assertEqual(spans[2].name, 'flask.do_teardown_appcontext') + + def test_render_template_pin_disabled(self): + """ + When we call a patched ``flask.render_template`` + When the app's ``Pin`` is disabled + We do not create any spans + """ + pin = Pin.get_from(self.app) + pin.tracer.enabled = False + + with self.app.app_context(): + with self.app.test_request_context('/'): + response = flask.render_template('test.html', world='world') + self.assertEqual(response, 'hello world') + + self.assertEqual(len(self.get_spans()), 0) + + def test_render_template_string(self): + """ + When we call a patched ``flask.render_template_string`` + We create the expected spans + """ + with self.app.app_context(): + with self.app.test_request_context('/'): + response = flask.render_template_string('hello {{world}}', world='world') + self.assertEqual(response, 'hello world') + + # 1 for calling `flask.render_template` + # 1 for tearing down the request + # 1 for tearing down the app context we created + spans = self.get_spans() + self.assertEqual(len(spans), 3) + + self.assertIsNone(spans[0].service) + self.assertEqual(spans[0].name, 'flask.render_template_string') + self.assertEqual(spans[0].resource, '') + self.assertEqual(set(spans[0].meta.keys()), set(['flask.template_name'])) + self.assertEqual(spans[0].meta['flask.template_name'], '') + + self.assertEqual(spans[1].name, 'flask.do_teardown_request') + self.assertEqual(spans[2].name, 'flask.do_teardown_appcontext') + + def test_render_template_string_pin_disabled(self): + """ + When we call a patched ``flask.render_template_string`` + When the app's ``Pin`` is disabled + We do not create any spans + """ + pin = Pin.get_from(self.app) + pin.tracer.enabled = False + + with self.app.app_context(): + with self.app.test_request_context('/'): + response = flask.render_template_string('hello {{world}}', world='world') + self.assertEqual(response, 'hello world') + + self.assertEqual(len(self.get_spans()), 0) diff --git a/tests/contrib/flask/test_templates/err.html b/tests/contrib/flask/test_templates/err.html new file mode 100644 index 0000000000..fc310aeb9e --- /dev/null +++ b/tests/contrib/flask/test_templates/err.html @@ -0,0 +1,2 @@ + +oh {{no diff --git a/tests/contrib/flask/test_templates/render_err.html b/tests/contrib/flask/test_templates/render_err.html new file mode 100644 index 0000000000..b11f8041e1 --- /dev/null +++ b/tests/contrib/flask/test_templates/render_err.html @@ -0,0 +1 @@ +hello {{object.method()}} diff --git a/tests/contrib/flask/test_templates/test.html b/tests/contrib/flask/test_templates/test.html new file mode 100644 index 0000000000..d3f694cd1e --- /dev/null +++ b/tests/contrib/flask/test_templates/test.html @@ -0,0 +1 @@ +hello {{world}} diff --git a/tests/contrib/flask/test_views.py b/tests/contrib/flask/test_views.py new file mode 100644 index 0000000000..c45fa4446c --- /dev/null +++ b/tests/contrib/flask/test_views.py @@ -0,0 +1,172 @@ +from flask.views import MethodView, View + +from ddtrace.compat import PY2 +from ddtrace.ext import http + +from . import BaseFlaskTestCase +from ...utils import assert_span_http_status_code + + +base_exception_name = 'builtins.Exception' +if PY2: + base_exception_name = 'exceptions.Exception' + + +class FlaskViewTestCase(BaseFlaskTestCase): + def test_view_handler(self): + """ + When using a flask.views.View + We create spans as expected + """ + class TestView(View): + methods = ['GET'] + + def dispatch_request(self, name): + return 'Hello {}'.format(name) + + self.app.add_url_rule('/hello/', view_func=TestView.as_view('hello')) + + res = self.client.get('/hello/flask') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello flask') + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + handler_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_views.hello') + + # flask.request + self.assertEqual(req_span.error, 0) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'hello') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/hello/') + self.assertEqual(req_span.get_tag('flask.view_args.name'), 'flask') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + assert_span_http_status_code(req_span, 200) + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/hello/flask') + + # tests.contrib.flask.test_views.hello + # DEV: We do not add any additional metadata to view spans + self.assertEqual(handler_span.error, 0) + self.assertEqual(handler_span.meta, dict()) + + def test_view_handler_error(self): + """ + When using a flask.views.View + When it raises an exception + We create spans as expected + """ + class TestView(View): + methods = ['GET'] + + def dispatch_request(self, name): + raise Exception('an error') + + self.app.add_url_rule('/hello/', view_func=TestView.as_view('hello')) + + res = self.client.get('/hello/flask') + self.assertEqual(res.status_code, 500) + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + dispatch_span = self.find_span_by_name(spans, 'flask.dispatch_request') + handler_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_views.hello') + + # flask.request + self.assertEqual(req_span.error, 1) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'hello') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/hello/') + self.assertEqual(req_span.get_tag('flask.view_args.name'), 'flask') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + assert_span_http_status_code(req_span, 500) + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/hello/flask') + + # flask.dispatch_request + self.assertEqual(dispatch_span.error, 1) + self.assertEqual(dispatch_span.get_tag('error.msg'), 'an error') + self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback (most recent call last):')) + self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) + + # tests.contrib.flask.test_views.hello + # DEV: We do not add any additional metadata to view spans + self.assertEqual(handler_span.error, 1) + self.assertEqual(handler_span.get_tag('error.msg'), 'an error') + self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback (most recent call last):')) + self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) + + def test_method_view_handler(self): + """ + When using a flask.views.MethodView + We create spans as expected + """ + class TestView(MethodView): + def get(self, name): + return 'Hello {}'.format(name) + + self.app.add_url_rule('/hello/', view_func=TestView.as_view('hello')) + + res = self.client.get('/hello/flask') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello flask') + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + handler_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_views.hello') + + # flask.request + self.assertEqual(req_span.error, 0) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'hello') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/hello/') + self.assertEqual(req_span.get_tag('flask.view_args.name'), 'flask') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + assert_span_http_status_code(req_span, 200) + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/hello/flask') + + # tests.contrib.flask.test_views.hello + # DEV: We do not add any additional metadata to view spans + self.assertEqual(handler_span.error, 0) + self.assertEqual(handler_span.meta, dict()) + + def test_method_view_handler_error(self): + """ + When using a flask.views.View + When it raises an exception + We create spans as expected + """ + class TestView(MethodView): + def get(self, name): + raise Exception('an error') + + self.app.add_url_rule('/hello/', view_func=TestView.as_view('hello')) + + res = self.client.get('/hello/flask') + self.assertEqual(res.status_code, 500) + + spans = self.get_spans() + + req_span = self.find_span_by_name(spans, 'flask.request') + dispatch_span = self.find_span_by_name(spans, 'flask.dispatch_request') + handler_span = self.find_span_by_name(spans, 'tests.contrib.flask.test_views.hello') + + # flask.request + self.assertEqual(req_span.error, 1) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'hello') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/hello/') + self.assertEqual(req_span.get_tag('flask.view_args.name'), 'flask') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + assert_span_http_status_code(req_span, 500) + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/hello/flask') + + # flask.dispatch_request + self.assertEqual(dispatch_span.error, 1) + self.assertEqual(dispatch_span.get_tag('error.msg'), 'an error') + self.assertTrue(dispatch_span.get_tag('error.stack').startswith('Traceback (most recent call last):')) + self.assertEqual(dispatch_span.get_tag('error.type'), base_exception_name) + + # tests.contrib.flask.test_views.hello + # DEV: We do not add any additional metadata to view spans + self.assertEqual(handler_span.error, 1) + self.assertEqual(handler_span.get_tag('error.msg'), 'an error') + self.assertTrue(handler_span.get_tag('error.stack').startswith('Traceback (most recent call last):')) + self.assertEqual(handler_span.get_tag('error.type'), base_exception_name) diff --git a/tests/contrib/flask/web.py b/tests/contrib/flask/web.py new file mode 100644 index 0000000000..3fb746fa13 --- /dev/null +++ b/tests/contrib/flask/web.py @@ -0,0 +1,84 @@ +# -*- coding: utf-8 -*- +import os + +from flask import Flask, render_template + + +class TestError(Exception): + pass + + +class HandleMe(Exception): + pass + + +def create_app(): + """Initializes a new Flask application. This method is required to + be sure each time a test is executed, the Flask app is always new + and without any tracing side effect from the previous execution. + """ + cur_dir = os.path.dirname(os.path.realpath(__file__)) + tmpl_path = os.path.join(cur_dir, 'test_templates') + app = Flask(__name__, template_folder=tmpl_path) + + @app.route('/') + def index(): + return 'hello' + + @app.route('/error') + def error(): + raise TestError() + + @app.route('/handleme') + def handle_me(): + raise HandleMe() + + @app.route('/fatal') + def fatal(): + 1 / 0 + + @app.route('/tmpl') + def tmpl(): + return render_template('test.html', world='earth') + + @app.route('/tmpl/err') + def tmpl_err(): + return render_template('err.html') + + @app.route('/tmpl/render_err') + def tmpl_render_err(): + return render_template('render_err.html') + + @app.route('/child') + def child(): + with app._tracer.trace('child') as span: + span.set_tag('a', 'b') + return 'child' + + @app.route('/custom_span') + def custom_span(): + span = app._tracer.current_span() + assert span + span.resource = 'overridden' + return 'hiya' + + def unicode_view(): + return u'üŋïĉóđē' + + app.add_url_rule( + u'/üŋïĉóđē', + u'üŋïĉóđē', + unicode_view, + ) + + @app.errorhandler(TestError) + def handle_my_exception(e): + assert isinstance(e, TestError) + return 'error', 500 + + @app.errorhandler(HandleMe) + def err_to_202(e): + assert isinstance(e, HandleMe) + return 'handled', 202 + + return app diff --git a/tests/contrib/flask_autopatch/__init__.py b/tests/contrib/flask_autopatch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/flask_autopatch/test_flask_autopatch.py b/tests/contrib/flask_autopatch/test_flask_autopatch.py new file mode 100644 index 0000000000..1ce50499b9 --- /dev/null +++ b/tests/contrib/flask_autopatch/test_flask_autopatch.py @@ -0,0 +1,101 @@ +# -*- coding: utf-8 -*- +import unittest + +import flask + +from ddtrace.vendor import wrapt +from ddtrace.ext import http +from ddtrace import Pin + +from ...test_tracer import get_dummy_tracer +from ...utils import assert_span_http_status_code + + +class FlaskAutopatchTestCase(unittest.TestCase): + def setUp(self): + self.tracer = get_dummy_tracer() + self.app = flask.Flask(__name__) + Pin.override(self.app, service='test-flask', tracer=self.tracer) + self.client = self.app.test_client() + + def test_patched(self): + """ + When using ddtrace-run + Then the `flask` module is patched + """ + # DEV: We have great test coverage in tests.contrib.flask, + # we only need basic tests here to assert `ddtrace-run` patched thingsa + + # Assert module is marked as patched + self.assertTrue(flask._datadog_patch) + + # Assert our instance of flask.app.Flask is patched + self.assertTrue(isinstance(self.app.add_url_rule, wrapt.ObjectProxy)) + self.assertTrue(isinstance(self.app.wsgi_app, wrapt.ObjectProxy)) + + # Assert the base module flask.app.Flask methods are patched + self.assertTrue(isinstance(flask.app.Flask.add_url_rule, wrapt.ObjectProxy)) + self.assertTrue(isinstance(flask.app.Flask.wsgi_app, wrapt.ObjectProxy)) + + def test_request(self): + """ + When using ddtrace-run + When making a request to flask app + We generate the expected spans + """ + @self.app.route('/') + def index(): + return 'Hello Flask', 200 + + res = self.client.get('/') + self.assertEqual(res.status_code, 200) + self.assertEqual(res.data, b'Hello Flask') + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 8) + + self.assertListEqual( + [ + 'flask.request', + 'flask.try_trigger_before_first_request_functions', + 'flask.preprocess_request', + 'flask.dispatch_request', + 'tests.contrib.flask_autopatch.test_flask_autopatch.index', + 'flask.process_response', + 'flask.do_teardown_request', + 'flask.do_teardown_appcontext', + ], + [s.name for s in spans], + ) + + # Assert span services + for span in spans: + self.assertEqual(span.service, 'test-flask') + + # Root request span + req_span = spans[0] + self.assertEqual(req_span.service, 'test-flask') + self.assertEqual(req_span.name, 'flask.request') + self.assertEqual(req_span.resource, 'GET /') + self.assertEqual(req_span.span_type, 'web') + self.assertEqual(req_span.error, 0) + self.assertIsNone(req_span.parent_id) + + # Request tags + self.assertEqual( + set(['flask.version', 'http.url', 'http.method', 'http.status_code', + 'flask.endpoint', 'flask.url_rule']), + set(req_span.meta.keys()), + ) + self.assertEqual(req_span.get_tag('flask.endpoint'), 'index') + self.assertEqual(req_span.get_tag('flask.url_rule'), '/') + self.assertEqual(req_span.get_tag('http.method'), 'GET') + self.assertEqual(req_span.get_tag(http.URL), 'http://localhost/') + assert_span_http_status_code(req_span, 200) + + # Handler span + handler_span = spans[4] + self.assertEqual(handler_span.service, 'test-flask') + self.assertEqual(handler_span.name, 'tests.contrib.flask_autopatch.test_flask_autopatch.index') + self.assertEqual(handler_span.resource, '/') + self.assertEqual(req_span.error, 0) diff --git a/tests/contrib/flask_autopatch/test_templates/err.html b/tests/contrib/flask_autopatch/test_templates/err.html new file mode 100644 index 0000000000..fc310aeb9e --- /dev/null +++ b/tests/contrib/flask_autopatch/test_templates/err.html @@ -0,0 +1,2 @@ + +oh {{no diff --git a/tests/contrib/flask_autopatch/test_templates/test.html b/tests/contrib/flask_autopatch/test_templates/test.html new file mode 100644 index 0000000000..d3f694cd1e --- /dev/null +++ b/tests/contrib/flask_autopatch/test_templates/test.html @@ -0,0 +1 @@ +hello {{world}} diff --git a/tests/contrib/flask_cache/__init__.py b/tests/contrib/flask_cache/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/flask_cache/test.py b/tests/contrib/flask_cache/test.py new file mode 100644 index 0000000000..1515cd93f8 --- /dev/null +++ b/tests/contrib/flask_cache/test.py @@ -0,0 +1,280 @@ +# -*- coding: utf-8 -*- + +# project +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.ext import net +from ddtrace.contrib.flask_cache import get_traced_cache +from ddtrace.contrib.flask_cache.tracers import CACHE_BACKEND + +# 3rd party +from flask import Flask + +# testing +from tests.opentracer.utils import init_tracer +from ..config import REDIS_CONFIG, MEMCACHED_CONFIG +from ...base import BaseTracerTestCase +from ...util import assert_dict_issuperset + + +class FlaskCacheTest(BaseTracerTestCase): + SERVICE = 'test-flask-cache' + TEST_REDIS_PORT = REDIS_CONFIG['port'] + TEST_MEMCACHED_PORT = MEMCACHED_CONFIG['port'] + + def setUp(self): + super(FlaskCacheTest, self).setUp() + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(self.tracer, service=self.SERVICE) + app = Flask(__name__) + self.cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + def test_simple_cache_get(self): + self.cache.get(u'á_complex_operation') + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'get') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + expected_meta = { + 'flask_cache.key': u'á_complex_operation', + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(span.meta, expected_meta) + + def test_simple_cache_set(self): + self.cache.set(u'á_complex_operation', u'with_á_value\nin two lines') + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'set') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + expected_meta = { + 'flask_cache.key': u'á_complex_operation', + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(span.meta, expected_meta) + + def test_simple_cache_add(self): + self.cache.add(u'á_complex_number', 50) + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'add') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + expected_meta = { + 'flask_cache.key': u'á_complex_number', + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(span.meta, expected_meta) + + def test_simple_cache_delete(self): + self.cache.delete(u'á_complex_operation') + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'delete') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + expected_meta = { + 'flask_cache.key': u'á_complex_operation', + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(span.meta, expected_meta) + + def test_simple_cache_delete_many(self): + self.cache.delete_many('complex_operation', 'another_complex_op') + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'delete_many') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + expected_meta = { + 'flask_cache.key': "['complex_operation', 'another_complex_op']", + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(span.meta, expected_meta) + + def test_simple_cache_clear(self): + self.cache.clear() + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'clear') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + expected_meta = { + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(span.meta, expected_meta) + + def test_simple_cache_get_many(self): + self.cache.get_many('first_complex_op', 'second_complex_op') + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'get_many') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + expected_meta = { + 'flask_cache.key': "['first_complex_op', 'second_complex_op']", + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(span.meta, expected_meta) + + def test_simple_cache_set_many(self): + self.cache.set_many({ + 'first_complex_op': 10, + 'second_complex_op': 20, + }) + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'set_many') + self.assertEqual(span.name, 'flask_cache.cmd') + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.error, 0) + + self.assertEqual(span.meta['flask_cache.backend'], 'simple') + self.assertTrue('first_complex_op' in span.meta['flask_cache.key']) + self.assertTrue('second_complex_op' in span.meta['flask_cache.key']) + + def test_default_span_tags(self): + # test tags and attributes + with self.cache._TracedCache__trace('flask_cache.cmd') as span: + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.meta[CACHE_BACKEND], 'simple') + self.assertTrue(net.TARGET_HOST not in span.meta) + self.assertTrue(net.TARGET_PORT not in span.meta) + + def test_default_span_tags_for_redis(self): + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(self.tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'redis', + 'CACHE_REDIS_PORT': self.TEST_REDIS_PORT, + } + cache = Cache(app, config=config) + # test tags and attributes + with cache._TracedCache__trace('flask_cache.cmd') as span: + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.meta[CACHE_BACKEND], 'redis') + self.assertEqual(span.meta[net.TARGET_HOST], 'localhost') + self.assertEqual(span.metrics[net.TARGET_PORT], self.TEST_REDIS_PORT) + + def test_default_span_tags_memcached(self): + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(self.tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'memcached', + 'CACHE_MEMCACHED_SERVERS': ['127.0.0.1:{}'.format(self.TEST_MEMCACHED_PORT)], + } + cache = Cache(app, config=config) + # test tags and attributes + with cache._TracedCache__trace('flask_cache.cmd') as span: + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.meta[CACHE_BACKEND], 'memcached') + self.assertEqual(span.meta[net.TARGET_HOST], '127.0.0.1') + self.assertEqual(span.metrics[net.TARGET_PORT], self.TEST_MEMCACHED_PORT) + + def test_simple_cache_get_ot(self): + """OpenTracing version of test_simple_cache_get.""" + ot_tracer = init_tracer('my_svc', self.tracer) + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(self.tracer, service=self.SERVICE) + app = Flask(__name__) + cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + with ot_tracer.start_active_span('ot_span'): + cache.get(u'á_complex_operation') + + spans = self.get_spans() + self.assertEqual(len(spans), 2) + ot_span, dd_span = spans + + # confirm the parenting + self.assertIsNone(ot_span.parent_id) + self.assertEqual(dd_span.parent_id, ot_span.span_id) + + self.assertEqual(ot_span.resource, 'ot_span') + self.assertEqual(ot_span.service, 'my_svc') + + self.assertEqual(dd_span.service, self.SERVICE) + self.assertEqual(dd_span.resource, 'get') + self.assertEqual(dd_span.name, 'flask_cache.cmd') + self.assertEqual(dd_span.span_type, 'cache') + self.assertEqual(dd_span.error, 0) + + expected_meta = { + 'flask_cache.key': u'á_complex_operation', + 'flask_cache.backend': 'simple', + } + + assert_dict_issuperset(dd_span.meta, expected_meta) + + def test_analytics_default(self): + self.cache.get(u'á_complex_operation') + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'flask_cache', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + self.cache.get(u'á_complex_operation') + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'flask_cache', + dict(analytics_enabled=True) + ): + self.cache.get(u'á_complex_operation') + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/contrib/flask_cache/test_utils.py b/tests/contrib/flask_cache/test_utils.py new file mode 100644 index 0000000000..db487d06c3 --- /dev/null +++ b/tests/contrib/flask_cache/test_utils.py @@ -0,0 +1,110 @@ +import unittest + +# project +from ddtrace.tracer import Tracer +from ddtrace.contrib.flask_cache import get_traced_cache +from ddtrace.contrib.flask_cache.utils import _extract_conn_tags, _resource_from_cache_prefix + +# 3rd party +from flask import Flask + +# testing +from ..config import REDIS_CONFIG, MEMCACHED_CONFIG + + +class FlaskCacheUtilsTest(unittest.TestCase): + SERVICE = 'test-flask-cache' + + def test_extract_redis_connection_metadata(self): + # create the TracedCache instance for a Flask app + tracer = Tracer() + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'redis', + 'CACHE_REDIS_PORT': REDIS_CONFIG['port'], + } + traced_cache = Cache(app, config=config) + # extract client data + meta = _extract_conn_tags(traced_cache.cache._client) + expected_meta = {'out.host': 'localhost', 'out.port': REDIS_CONFIG['port'], 'out.redis_db': 0} + assert meta == expected_meta + + def test_extract_memcached_connection_metadata(self): + # create the TracedCache instance for a Flask app + tracer = Tracer() + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'memcached', + 'CACHE_MEMCACHED_SERVERS': ['127.0.0.1:{}'.format(MEMCACHED_CONFIG['port'])], + } + traced_cache = Cache(app, config=config) + # extract client data + meta = _extract_conn_tags(traced_cache.cache._client) + expected_meta = {'out.host': '127.0.0.1', 'out.port': MEMCACHED_CONFIG['port']} + assert meta == expected_meta + + def test_extract_memcached_multiple_connection_metadata(self): + # create the TracedCache instance for a Flask app + tracer = Tracer() + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'memcached', + 'CACHE_MEMCACHED_SERVERS': [ + '127.0.0.1:{}'.format(MEMCACHED_CONFIG['port']), + 'localhost:{}'.format(MEMCACHED_CONFIG['port']), + ], + } + traced_cache = Cache(app, config=config) + # extract client data + meta = _extract_conn_tags(traced_cache.cache._client) + expected_meta = { + 'out.host': '127.0.0.1', + 'out.port': MEMCACHED_CONFIG['port'], + } + assert meta == expected_meta + + def test_resource_from_cache_with_prefix(self): + # create the TracedCache instance for a Flask app + tracer = Tracer() + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'redis', + 'CACHE_REDIS_PORT': REDIS_CONFIG['port'], + 'CACHE_KEY_PREFIX': 'users', + } + traced_cache = Cache(app, config=config) + # expect a resource with a prefix + expected_resource = 'get users' + resource = _resource_from_cache_prefix('GET', traced_cache.cache) + assert resource == expected_resource + + def test_resource_from_cache_with_empty_prefix(self): + # create the TracedCache instance for a Flask app + tracer = Tracer() + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'redis', + 'CACHE_REDIS_PORT': REDIS_CONFIG['port'], + 'CACHE_KEY_PREFIX': '', + } + traced_cache = Cache(app, config=config) + # expect a resource with a prefix + expected_resource = 'get' + resource = _resource_from_cache_prefix('GET', traced_cache.cache) + assert resource == expected_resource + + def test_resource_from_cache_without_prefix(self): + # create the TracedCache instance for a Flask app + tracer = Tracer() + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + traced_cache = Cache(app, config={'CACHE_TYPE': 'redis'}) + # expect only the resource name + expected_resource = 'get' + resource = _resource_from_cache_prefix('GET', traced_cache.config) + assert resource == expected_resource diff --git a/tests/contrib/flask_cache/test_wrapper_safety.py b/tests/contrib/flask_cache/test_wrapper_safety.py new file mode 100644 index 0000000000..302e3a2dbf --- /dev/null +++ b/tests/contrib/flask_cache/test_wrapper_safety.py @@ -0,0 +1,231 @@ +# -*- coding: utf-8 -*- +import unittest + +# project +from ddtrace.ext import net +from ddtrace.tracer import Tracer +from ddtrace.contrib.flask_cache import get_traced_cache +from ddtrace.contrib.flask_cache.tracers import CACHE_BACKEND + +# 3rd party +from flask import Flask +from redis.exceptions import ConnectionError +import pytest + +# testing +from ...test_tracer import DummyWriter + + +class FlaskCacheWrapperTest(unittest.TestCase): + SERVICE = 'test-flask-cache' + + def test_cache_get_without_arguments(self): + # initialize the dummy writer + writer = DummyWriter() + tracer = Tracer() + tracer.writer = writer + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + # make a wrong call + with pytest.raises(TypeError) as ex: + cache.get() + + # ensure that the error is not caused by our tracer + assert 'get()' in ex.value.args[0] + assert 'argument' in ex.value.args[0] + spans = writer.pop() + # an error trace must be sent + assert len(spans) == 1 + span = spans[0] + assert span.service == self.SERVICE + assert span.resource == 'get' + assert span.name == 'flask_cache.cmd' + assert span.span_type == 'cache' + assert span.error == 1 + + def test_cache_set_without_arguments(self): + # initialize the dummy writer + writer = DummyWriter() + tracer = Tracer() + tracer.writer = writer + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + # make a wrong call + with pytest.raises(TypeError) as ex: + cache.set() + + # ensure that the error is not caused by our tracer + assert 'set()' in ex.value.args[0] + assert 'argument' in ex.value.args[0] + spans = writer.pop() + # an error trace must be sent + assert len(spans) == 1 + span = spans[0] + assert span.service == self.SERVICE + assert span.resource == 'set' + assert span.name == 'flask_cache.cmd' + assert span.span_type == 'cache' + assert span.error == 1 + + def test_cache_add_without_arguments(self): + # initialize the dummy writer + writer = DummyWriter() + tracer = Tracer() + tracer.writer = writer + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + # make a wrong call + with pytest.raises(TypeError) as ex: + cache.add() + + # ensure that the error is not caused by our tracer + assert 'add()' in ex.value.args[0] + assert 'argument' in ex.value.args[0] + spans = writer.pop() + # an error trace must be sent + assert len(spans) == 1 + span = spans[0] + assert span.service == self.SERVICE + assert span.resource == 'add' + assert span.name == 'flask_cache.cmd' + assert span.span_type == 'cache' + assert span.error == 1 + + def test_cache_delete_without_arguments(self): + # initialize the dummy writer + writer = DummyWriter() + tracer = Tracer() + tracer.writer = writer + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + # make a wrong call + with pytest.raises(TypeError) as ex: + cache.delete() + + # ensure that the error is not caused by our tracer + assert 'delete()' in ex.value.args[0] + assert 'argument' in ex.value.args[0] + spans = writer.pop() + # an error trace must be sent + assert len(spans) == 1 + span = spans[0] + assert span.service == self.SERVICE + assert span.resource == 'delete' + assert span.name == 'flask_cache.cmd' + assert span.span_type == 'cache' + assert span.error == 1 + + def test_cache_set_many_without_arguments(self): + # initialize the dummy writer + writer = DummyWriter() + tracer = Tracer() + tracer.writer = writer + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + cache = Cache(app, config={'CACHE_TYPE': 'simple'}) + + # make a wrong call + with pytest.raises(TypeError) as ex: + cache.set_many() + + # ensure that the error is not caused by our tracer + assert 'set_many()' in ex.value.args[0] + assert 'argument' in ex.value.args[0] + spans = writer.pop() + # an error trace must be sent + assert len(spans) == 1 + span = spans[0] + assert span.service == self.SERVICE + assert span.resource == 'set_many' + assert span.name == 'flask_cache.cmd' + assert span.span_type == 'cache' + assert span.error == 1 + + def test_redis_cache_tracing_with_a_wrong_connection(self): + # initialize the dummy writer + writer = DummyWriter() + tracer = Tracer() + tracer.writer = writer + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'redis', + 'CACHE_REDIS_PORT': 2230, + 'CACHE_REDIS_HOST': '127.0.0.1' + } + cache = Cache(app, config=config) + + # use a wrong redis connection + with pytest.raises(ConnectionError) as ex: + cache.get(u'á_complex_operation') + + # ensure that the error is not caused by our tracer + assert '127.0.0.1:2230. Connection refused.' in ex.value.args[0] + spans = writer.pop() + # an error trace must be sent + assert len(spans) == 1 + span = spans[0] + assert span.service == self.SERVICE + assert span.resource == 'get' + assert span.name == 'flask_cache.cmd' + assert span.span_type == 'cache' + assert span.meta[CACHE_BACKEND] == 'redis' + assert span.meta[net.TARGET_HOST] == '127.0.0.1' + assert span.metrics[net.TARGET_PORT] == 2230 + assert span.error == 1 + + def test_memcached_cache_tracing_with_a_wrong_connection(self): + # initialize the dummy writer + writer = DummyWriter() + tracer = Tracer() + tracer.writer = writer + + # create the TracedCache instance for a Flask app + Cache = get_traced_cache(tracer, service=self.SERVICE) + app = Flask(__name__) + config = { + 'CACHE_TYPE': 'memcached', + 'CACHE_MEMCACHED_SERVERS': ['localhost:2230'], + } + cache = Cache(app, config=config) + + # use a wrong memcached connection + try: + cache.get(u'á_complex_operation') + except Exception: + pass + + # ensure that the error is not caused by our tracer + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.SERVICE + assert span.resource == 'get' + assert span.name == 'flask_cache.cmd' + assert span.span_type == 'cache' + assert span.meta[CACHE_BACKEND] == 'memcached' + assert span.meta[net.TARGET_HOST] == 'localhost' + assert span.metrics[net.TARGET_PORT] == 2230 + + # the pylibmc backend raises an exception and memcached backend does + # not, so don't test anything about the status. diff --git a/tests/contrib/futures/__init__.py b/tests/contrib/futures/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/futures/test_propagation.py b/tests/contrib/futures/test_propagation.py new file mode 100644 index 0000000000..530d7b7699 --- /dev/null +++ b/tests/contrib/futures/test_propagation.py @@ -0,0 +1,356 @@ +import time +import concurrent + +from ddtrace.contrib.futures import patch, unpatch + +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase + + +class PropagationTestCase(BaseTracerTestCase): + """Ensures the Context Propagation works between threads + when the ``futures`` library is used, or when the + ``concurrent`` module is available (Python 3 only) + """ + def setUp(self): + super(PropagationTestCase, self).setUp() + + # instrument ``concurrent`` + patch() + + def tearDown(self): + # remove instrumentation + unpatch() + + super(PropagationTestCase, self).tearDown() + + def test_propagation(self): + # it must propagate the tracing context if available + + def fn(): + # an active context must be available + # DEV: With `ContextManager` `.active()` will never be `None` + self.assertIsNotNone(self.tracer.context_provider.active()) + with self.tracer.trace('executor.thread'): + return 42 + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn) + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_structure( + dict(name='main.thread'), + ( + dict(name='executor.thread'), + ), + ) + + def test_propagation_with_params(self): + # instrumentation must proxy arguments if available + + def fn(value, key=None): + # an active context must be available + # DEV: With `ThreadLocalContext` `.active()` will never be `None` + self.assertIsNotNone(self.tracer.context_provider.active()) + with self.tracer.trace('executor.thread'): + return value, key + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn, 42, 'CheeseShop') + value, key = future.result() + # assert the right result + self.assertEqual(value, 42) + self.assertEqual(key, 'CheeseShop') + + # the trace must be completed + self.assert_structure( + dict(name='main.thread'), + ( + dict(name='executor.thread'), + ), + ) + + def test_disabled_instrumentation(self): + # it must not propagate if the module is disabled + unpatch() + + def fn(): + # an active context must be available + # DEV: With `ThreadLocalContext` `.active()` will never be `None` + self.assertIsNotNone(self.tracer.context_provider.active()) + with self.tracer.trace('executor.thread'): + return 42 + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn) + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # we provide two different traces + self.assert_span_count(2) + + # Retrieve the root spans (no parents) + # DEV: Results are sorted based on root span start time + traces = self.get_root_spans() + self.assertEqual(len(traces), 2) + + traces[0].assert_structure(dict(name='main.thread')) + traces[1].assert_structure(dict(name='executor.thread')) + + def test_double_instrumentation(self): + # double instrumentation must not happen + patch() + + def fn(): + with self.tracer.trace('executor.thread'): + return 42 + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn) + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_structure( + dict(name='main.thread'), + ( + dict(name='executor.thread'), + ), + ) + + def test_no_parent_span(self): + def fn(): + with self.tracer.trace('executor.thread'): + return 42 + + with self.override_global_tracer(): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn) + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_structure(dict(name='executor.thread')) + + def test_multiple_futures(self): + def fn(): + with self.tracer.trace('executor.thread'): + return 42 + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: + futures = [executor.submit(fn) for _ in range(4)] + for future in futures: + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_structure( + dict(name='main.thread'), + ( + dict(name='executor.thread'), + dict(name='executor.thread'), + dict(name='executor.thread'), + dict(name='executor.thread'), + ), + ) + + def test_multiple_futures_no_parent(self): + def fn(): + with self.tracer.trace('executor.thread'): + return 42 + + with self.override_global_tracer(): + with concurrent.futures.ThreadPoolExecutor(max_workers=4) as executor: + futures = [executor.submit(fn) for _ in range(4)] + for future in futures: + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_span_count(4) + traces = self.get_root_spans() + self.assertEqual(len(traces), 4) + for trace in traces: + trace.assert_structure(dict(name='executor.thread')) + + def test_nested_futures(self): + def fn2(): + with self.tracer.trace('nested.thread'): + return 42 + + def fn(): + with self.tracer.trace('executor.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn2) + result = future.result() + self.assertEqual(result, 42) + return result + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn) + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_span_count(3) + self.assert_structure( + dict(name='main.thread'), + ( + ( + dict(name='executor.thread'), + ( + dict(name='nested.thread'), + ), + ), + ), + ) + + def test_multiple_nested_futures(self): + def fn2(): + with self.tracer.trace('nested.thread'): + return 42 + + def fn(): + with self.tracer.trace('executor.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + futures = [executor.submit(fn2) for _ in range(4)] + for future in futures: + result = future.result() + self.assertEqual(result, 42) + return result + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + futures = [executor.submit(fn) for _ in range(4)] + for future in futures: + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_structure( + dict(name='main.thread'), + ( + ( + dict(name='executor.thread'), + ( + dict(name='nested.thread'), + ) * 4, + ), + ) * 4, + ) + + def test_multiple_nested_futures_no_parent(self): + def fn2(): + with self.tracer.trace('nested.thread'): + return 42 + + def fn(): + with self.tracer.trace('executor.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + futures = [executor.submit(fn2) for _ in range(4)] + for future in futures: + result = future.result() + self.assertEqual(result, 42) + return result + + with self.override_global_tracer(): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + futures = [executor.submit(fn) for _ in range(4)] + for future in futures: + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + traces = self.get_root_spans() + self.assertEqual(len(traces), 4) + + for trace in traces: + trace.assert_structure( + dict(name='executor.thread'), + ( + dict(name='nested.thread'), + ) * 4, + ) + + def test_send_trace_when_finished(self): + # it must send the trace only when all threads are finished + + def fn(): + with self.tracer.trace('executor.thread'): + # wait before returning + time.sleep(0.05) + return 42 + + with self.override_global_tracer(): + with self.tracer.trace('main.thread'): + # don't wait for the execution + executor = concurrent.futures.ThreadPoolExecutor(max_workers=2) + future = executor.submit(fn) + time.sleep(0.01) + + # assert main thread span is fniished first + self.assert_span_count(1) + self.assert_structure(dict(name='main.thread')) + + # then wait for the second thread and send the trace + result = future.result() + self.assertEqual(result, 42) + + self.assert_span_count(2) + self.assert_structure( + dict(name='main.thread'), + ( + dict(name='executor.thread'), + ), + ) + + def test_propagation_ot(self): + """OpenTracing version of test_propagation.""" + # it must propagate the tracing context if available + ot_tracer = init_tracer('my_svc', self.tracer) + + def fn(): + # an active context must be available + self.assertTrue(self.tracer.context_provider.active() is not None) + with self.tracer.trace('executor.thread'): + return 42 + + with self.override_global_tracer(): + with ot_tracer.start_active_span('main.thread'): + with concurrent.futures.ThreadPoolExecutor(max_workers=2) as executor: + future = executor.submit(fn) + result = future.result() + # assert the right result + self.assertEqual(result, 42) + + # the trace must be completed + self.assert_structure( + dict(name='main.thread'), + ( + dict(name='executor.thread'), + ), + ) diff --git a/tests/contrib/gevent/__init__.py b/tests/contrib/gevent/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/gevent/test_tracer.py b/tests/contrib/gevent/test_tracer.py new file mode 100644 index 0000000000..3f2ac54a5f --- /dev/null +++ b/tests/contrib/gevent/test_tracer.py @@ -0,0 +1,438 @@ +import gevent +import gevent.pool +import ddtrace + +from ddtrace.constants import SAMPLING_PRIORITY_KEY +from ddtrace.context import Context +from ddtrace.contrib.gevent import patch, unpatch +from ddtrace.ext.priority import USER_KEEP + +from unittest import TestCase +from opentracing.scope_managers.gevent import GeventScopeManager +from tests.opentracer.utils import init_tracer +from tests.test_tracer import get_dummy_tracer + +from .utils import silence_errors + + +class TestGeventTracer(TestCase): + """ + Ensures that greenlets are properly traced when using + the default Tracer. + """ + def setUp(self): + # use a dummy tracer + self.tracer = get_dummy_tracer() + self._original_tracer = ddtrace.tracer + ddtrace.tracer = self.tracer + # trace gevent + patch() + + def tearDown(self): + # clean the active Context + self.tracer.context_provider.activate(None) + # restore the original tracer + ddtrace.tracer = self._original_tracer + # untrace gevent + unpatch() + + def test_main_greenlet(self): + # the main greenlet must not be affected by the tracer + main_greenlet = gevent.getcurrent() + ctx = getattr(main_greenlet, '__datadog_context', None) + assert ctx is None + + def test_main_greenlet_context(self): + # the main greenlet must have a ``Context`` if called + ctx_tracer = self.tracer.get_call_context() + main_greenlet = gevent.getcurrent() + ctx_greenlet = getattr(main_greenlet, '__datadog_context', None) + assert ctx_tracer is ctx_greenlet + assert len(ctx_tracer._trace) == 0 + + def test_get_call_context(self): + # it should return the context attached to the provider + def greenlet(): + return self.tracer.get_call_context() + + g = gevent.spawn(greenlet) + g.join() + ctx = g.value + stored_ctx = getattr(g, '__datadog_context', None) + assert stored_ctx is not None + assert ctx == stored_ctx + + def test_get_call_context_twice(self): + # it should return the same Context if called twice + def greenlet(): + assert self.tracer.get_call_context() == self.tracer.get_call_context() + return True + + g = gevent.spawn(greenlet) + g.join() + assert g.value + + def test_spawn_greenlet_no_context(self): + # a greenlet will not have a context if the tracer is not used + def greenlet(): + gevent.sleep(0.01) + + g = gevent.spawn(greenlet) + g.join() + ctx = getattr(g, '__datadog_context', None) + assert ctx is None + + def test_spawn_greenlet(self): + # a greenlet will have a context if the tracer is used + def greenlet(): + self.tracer.get_call_context() + + g = gevent.spawn(greenlet) + g.join() + ctx = getattr(g, '__datadog_context', None) + assert ctx is not None + assert 0 == len(ctx._trace) + + def test_spawn_later_greenlet(self): + # a greenlet will have a context if the tracer is used even + # if it's spawned later + def greenlet(): + self.tracer.get_call_context() + + g = gevent.spawn_later(0.01, greenlet) + g.join() + ctx = getattr(g, '__datadog_context', None) + assert ctx is not None + assert 0 == len(ctx._trace) + + def test_trace_greenlet(self): + # a greenlet can be traced using the trace API + def greenlet(): + with self.tracer.trace('greenlet') as span: + span.resource = 'base' + + gevent.spawn(greenlet).join() + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + assert 'greenlet' == traces[0][0].name + assert 'base' == traces[0][0].resource + + def test_trace_map_greenlet(self): + # a greenlet can be traced using the trace API + def greenlet(_): + with self.tracer.trace('greenlet', resource='base'): + gevent.sleep(0.01) + + funcs = [ + gevent.pool.Group().map, + gevent.pool.Group().imap, + gevent.pool.Group().imap_unordered, + gevent.pool.Pool(2).map, + gevent.pool.Pool(2).imap, + gevent.pool.Pool(2).imap_unordered, + ] + for func in funcs: + with self.tracer.trace('outer', resource='base') as span: + # Use a list to force evaluation + list(func(greenlet, [0, 1, 2])) + traces = self.tracer.writer.pop_traces() + + assert 4 == len(traces) + spans = [] + outer_span = None + for t in traces: + assert 1 == len(t) + span = t[0] + spans.append(span) + if span.name == 'outer': + outer_span = span + + assert outer_span is not None + assert 'base' == outer_span.resource + inner_spans = [s for s in spans if s is not outer_span] + for s in inner_spans: + assert 'greenlet' == s.name + assert 'base' == s.resource + assert outer_span.trace_id == s.trace_id + assert outer_span.span_id == s.parent_id + + def test_trace_later_greenlet(self): + # a greenlet can be traced using the trace API + def greenlet(): + with self.tracer.trace('greenlet') as span: + span.resource = 'base' + + gevent.spawn_later(0.01, greenlet).join() + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + assert 'greenlet' == traces[0][0].name + assert 'base' == traces[0][0].resource + + def test_trace_sampling_priority_spawn_multiple_greenlets_multiple_traces(self): + # multiple greenlets must be part of the same trace + def entrypoint(): + with self.tracer.trace('greenlet.main') as span: + span.context.sampling_priority = USER_KEEP + span.resource = 'base' + jobs = [gevent.spawn(green_1), gevent.spawn(green_2)] + gevent.joinall(jobs) + + def green_1(): + with self.tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '1') + gevent.sleep(0.01) + + def green_2(): + with self.tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '2') + gevent.sleep(0.01) + + gevent.spawn(entrypoint).join() + traces = self.tracer.writer.pop_traces() + assert 3 == len(traces) + assert 1 == len(traces[0]) + parent_span = traces[2][0] + worker_1 = traces[0][0] + worker_2 = traces[1][0] + # check sampling priority + assert parent_span.get_metric(SAMPLING_PRIORITY_KEY) == USER_KEEP + assert worker_1.get_metric(SAMPLING_PRIORITY_KEY) == USER_KEEP + assert worker_2.get_metric(SAMPLING_PRIORITY_KEY) == USER_KEEP + + def test_trace_spawn_multiple_greenlets_multiple_traces(self): + # multiple greenlets must be part of the same trace + def entrypoint(): + with self.tracer.trace('greenlet.main') as span: + span.resource = 'base' + jobs = [gevent.spawn(green_1), gevent.spawn(green_2)] + gevent.joinall(jobs) + + def green_1(): + with self.tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '1') + gevent.sleep(0.01) + + def green_2(): + with self.tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '2') + gevent.sleep(0.01) + + gevent.spawn(entrypoint).join() + traces = self.tracer.writer.pop_traces() + assert 3 == len(traces) + assert 1 == len(traces[0]) + parent_span = traces[2][0] + worker_1 = traces[0][0] + worker_2 = traces[1][0] + # check spans data and hierarchy + assert parent_span.name == 'greenlet.main' + assert parent_span.resource == 'base' + assert worker_1.get_tag('worker_id') == '1' + assert worker_1.name == 'greenlet.worker' + assert worker_1.resource == 'greenlet.worker' + assert worker_1.parent_id == parent_span.span_id + assert worker_2.get_tag('worker_id') == '2' + assert worker_2.name == 'greenlet.worker' + assert worker_2.resource == 'greenlet.worker' + assert worker_2.parent_id == parent_span.span_id + + def test_trace_spawn_later_multiple_greenlets_multiple_traces(self): + # multiple greenlets must be part of the same trace + def entrypoint(): + with self.tracer.trace('greenlet.main') as span: + span.resource = 'base' + jobs = [gevent.spawn_later(0.01, green_1), gevent.spawn_later(0.01, green_2)] + gevent.joinall(jobs) + + def green_1(): + with self.tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '1') + gevent.sleep(0.01) + + def green_2(): + with self.tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '2') + gevent.sleep(0.01) + + gevent.spawn(entrypoint).join() + traces = self.tracer.writer.pop_traces() + assert 3 == len(traces) + assert 1 == len(traces[0]) + parent_span = traces[2][0] + worker_1 = traces[0][0] + worker_2 = traces[1][0] + # check spans data and hierarchy + assert parent_span.name == 'greenlet.main' + assert parent_span.resource == 'base' + assert worker_1.get_tag('worker_id') == '1' + assert worker_1.name == 'greenlet.worker' + assert worker_1.resource == 'greenlet.worker' + assert worker_1.parent_id == parent_span.span_id + assert worker_2.get_tag('worker_id') == '2' + assert worker_2.name == 'greenlet.worker' + assert worker_2.resource == 'greenlet.worker' + assert worker_2.parent_id == parent_span.span_id + + def test_trace_concurrent_calls(self): + # create multiple futures so that we expect multiple + # traces instead of a single one + def greenlet(): + with self.tracer.trace('greenlet'): + gevent.sleep(0.01) + + jobs = [gevent.spawn(greenlet) for x in range(100)] + gevent.joinall(jobs) + + traces = self.tracer.writer.pop_traces() + assert 100 == len(traces) + assert 1 == len(traces[0]) + assert 'greenlet' == traces[0][0].name + + def test_propagation_with_new_context(self): + # create multiple futures so that we expect multiple + # traces instead of a single one + ctx = Context(trace_id=100, span_id=101) + self.tracer.context_provider.activate(ctx) + + def greenlet(): + with self.tracer.trace('greenlet'): + gevent.sleep(0.01) + + jobs = [gevent.spawn(greenlet) for x in range(1)] + gevent.joinall(jobs) + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + assert traces[0][0].trace_id == 100 + assert traces[0][0].parent_id == 101 + + def test_trace_concurrent_spawn_later_calls(self): + # create multiple futures so that we expect multiple + # traces instead of a single one, even if greenlets + # are delayed + def greenlet(): + with self.tracer.trace('greenlet'): + gevent.sleep(0.01) + + jobs = [gevent.spawn_later(0.01, greenlet) for x in range(100)] + gevent.joinall(jobs) + + traces = self.tracer.writer.pop_traces() + assert 100 == len(traces) + assert 1 == len(traces[0]) + assert 'greenlet' == traces[0][0].name + + @silence_errors + def test_exception(self): + # it should catch the exception like usual + def greenlet(): + with self.tracer.trace('greenlet'): + raise Exception('Custom exception') + + g = gevent.spawn(greenlet) + g.join() + assert isinstance(g.exception, Exception) + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + span = traces[0][0] + assert 1 == span.error + assert 'Custom exception' == span.get_tag('error.msg') + assert 'Traceback (most recent call last)' in span.get_tag('error.stack') + + def _assert_spawn_multiple_greenlets(self, spans): + """A helper to assert the parenting of a trace when greenlets are + spawned within another greenlet. + + This is meant to help maintain compatibility between the Datadog and + OpenTracing tracer implementations. + + Note that for gevent there is differing behaviour between the context + management so the traces are not identical in form. However, the + parenting of the spans must remain the same. + """ + assert len(spans) == 3 + + parent = None + worker_1 = None + worker_2 = None + # get the spans since they can be in any order + for span in spans: + if span.name == 'greenlet.main': + parent = span + if span.name == 'greenlet.worker1': + worker_1 = span + if span.name == 'greenlet.worker2': + worker_2 = span + assert parent + assert worker_1 + assert worker_2 + + # confirm the parenting + assert worker_1.parent_id == parent.span_id + assert worker_2.parent_id == parent.span_id + + # check spans data and hierarchy + assert parent.name == 'greenlet.main' + assert worker_1.get_tag('worker_id') == '1' + assert worker_1.name == 'greenlet.worker1' + assert worker_1.resource == 'greenlet.worker1' + assert worker_2.get_tag('worker_id') == '2' + assert worker_2.name == 'greenlet.worker2' + assert worker_2.resource == 'greenlet.worker2' + + def test_trace_spawn_multiple_greenlets_multiple_traces_dd(self): + """Datadog version of the same test.""" + def entrypoint(): + with self.tracer.trace('greenlet.main') as span: + span.resource = 'base' + jobs = [gevent.spawn(green_1), gevent.spawn(green_2)] + gevent.joinall(jobs) + + def green_1(): + with self.tracer.trace('greenlet.worker1') as span: + span.set_tag('worker_id', '1') + gevent.sleep(0.01) + + # note that replacing the `tracer.trace` call here with the + # OpenTracing equivalent will cause the checks to fail + def green_2(): + with self.tracer.trace('greenlet.worker2') as span: + span.set_tag('worker_id', '2') + gevent.sleep(0.01) + + gevent.spawn(entrypoint).join() + spans = self.tracer.writer.pop() + self._assert_spawn_multiple_greenlets(spans) + + def test_trace_spawn_multiple_greenlets_multiple_traces_ot(self): + """OpenTracing version of the same test.""" + + ot_tracer = init_tracer('my_svc', self.tracer, scope_manager=GeventScopeManager()) + + def entrypoint(): + with ot_tracer.start_active_span('greenlet.main') as span: + span.resource = 'base' + jobs = [gevent.spawn(green_1), gevent.spawn(green_2)] + gevent.joinall(jobs) + + def green_1(): + with self.tracer.trace('greenlet.worker1') as span: + span.set_tag('worker_id', '1') + gevent.sleep(0.01) + + # note that replacing the `tracer.trace` call here with the + # OpenTracing equivalent will cause the checks to fail + def green_2(): + with ot_tracer.start_active_span('greenlet.worker2') as scope: + scope.span.set_tag('worker_id', '2') + gevent.sleep(0.01) + + gevent.spawn(entrypoint).join() + + spans = self.tracer.writer.pop() + self._assert_spawn_multiple_greenlets(spans) diff --git a/tests/contrib/gevent/utils.py b/tests/contrib/gevent/utils.py new file mode 100644 index 0000000000..39118564d6 --- /dev/null +++ b/tests/contrib/gevent/utils.py @@ -0,0 +1,19 @@ +import gevent + +from functools import wraps + + +_NOT_ERROR = gevent.hub.Hub.NOT_ERROR + + +def silence_errors(f): + """ + Test decorator for gevent that silences all errors when + a greenlet raises an exception. + """ + @wraps(f) + def wrapper(*args, **kwargs): + gevent.hub.Hub.NOT_ERROR = (Exception,) + f(*args, **kwargs) + gevent.hub.Hub.NOT_ERROR = _NOT_ERROR + return wrapper diff --git a/tests/contrib/grpc/__init__.py b/tests/contrib/grpc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/grpc/hello.proto b/tests/contrib/grpc/hello.proto new file mode 100644 index 0000000000..38726c79bf --- /dev/null +++ b/tests/contrib/grpc/hello.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; +package helloworld; + +service Hello { + // Sends a greeting + rpc SayHello (HelloRequest) returns (HelloReply) {} + rpc SayHelloTwice (HelloRequest) returns (stream HelloReply) {} + rpc SayHelloRepeatedly (stream HelloRequest) returns (stream HelloReply) {} + rpc SayHelloLast (stream HelloRequest) returns (HelloReply) {} +} + +// The request message containing the user's name. +message HelloRequest { + string name = 1; +} + +// The response message containing the greetings +message HelloReply { + string message = 1; +} diff --git a/tests/contrib/grpc/hello_pb2.py b/tests/contrib/grpc/hello_pb2.py new file mode 100644 index 0000000000..3b8bfb011e --- /dev/null +++ b/tests/contrib/grpc/hello_pb2.py @@ -0,0 +1,161 @@ +# flake8: noqa +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: hello.proto + +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='hello.proto', + package='helloworld', + syntax='proto3', + serialized_options=None, + serialized_pb=_b('\n\x0bhello.proto\x12\nhelloworld\"\x1c\n\x0cHelloRequest\x12\x0c\n\x04name\x18\x01 \x01(\t\"\x1d\n\nHelloReply\x12\x0f\n\x07message\x18\x01 \x01(\t2\xa2\x02\n\x05Hello\x12>\n\x08SayHello\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x12\x45\n\rSayHelloTwice\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00\x30\x01\x12L\n\x12SayHelloRepeatedly\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00(\x01\x30\x01\x12\x44\n\x0cSayHelloLast\x12\x18.helloworld.HelloRequest\x1a\x16.helloworld.HelloReply\"\x00(\x01\x62\x06proto3') +) + + + + +_HELLOREQUEST = _descriptor.Descriptor( + name='HelloRequest', + full_name='helloworld.HelloRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='name', full_name='helloworld.HelloRequest.name', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=27, + serialized_end=55, +) + + +_HELLOREPLY = _descriptor.Descriptor( + name='HelloReply', + full_name='helloworld.HelloReply', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='message', full_name='helloworld.HelloReply.message', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=_b("").decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=57, + serialized_end=86, +) + +DESCRIPTOR.message_types_by_name['HelloRequest'] = _HELLOREQUEST +DESCRIPTOR.message_types_by_name['HelloReply'] = _HELLOREPLY +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +HelloRequest = _reflection.GeneratedProtocolMessageType('HelloRequest', (_message.Message,), { + 'DESCRIPTOR' : _HELLOREQUEST, + '__module__' : 'hello_pb2' + # @@protoc_insertion_point(class_scope:helloworld.HelloRequest) + }) +_sym_db.RegisterMessage(HelloRequest) + +HelloReply = _reflection.GeneratedProtocolMessageType('HelloReply', (_message.Message,), { + 'DESCRIPTOR' : _HELLOREPLY, + '__module__' : 'hello_pb2' + # @@protoc_insertion_point(class_scope:helloworld.HelloReply) + }) +_sym_db.RegisterMessage(HelloReply) + + + +_HELLO = _descriptor.ServiceDescriptor( + name='Hello', + full_name='helloworld.Hello', + file=DESCRIPTOR, + index=0, + serialized_options=None, + serialized_start=89, + serialized_end=379, + methods=[ + _descriptor.MethodDescriptor( + name='SayHello', + full_name='helloworld.Hello.SayHello', + index=0, + containing_service=None, + input_type=_HELLOREQUEST, + output_type=_HELLOREPLY, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='SayHelloTwice', + full_name='helloworld.Hello.SayHelloTwice', + index=1, + containing_service=None, + input_type=_HELLOREQUEST, + output_type=_HELLOREPLY, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='SayHelloRepeatedly', + full_name='helloworld.Hello.SayHelloRepeatedly', + index=2, + containing_service=None, + input_type=_HELLOREQUEST, + output_type=_HELLOREPLY, + serialized_options=None, + ), + _descriptor.MethodDescriptor( + name='SayHelloLast', + full_name='helloworld.Hello.SayHelloLast', + index=3, + containing_service=None, + input_type=_HELLOREQUEST, + output_type=_HELLOREPLY, + serialized_options=None, + ), +]) +_sym_db.RegisterServiceDescriptor(_HELLO) + +DESCRIPTOR.services_by_name['Hello'] = _HELLO + +# @@protoc_insertion_point(module_scope) diff --git a/tests/contrib/grpc/hello_pb2_grpc.py b/tests/contrib/grpc/hello_pb2_grpc.py new file mode 100644 index 0000000000..046ae5d756 --- /dev/null +++ b/tests/contrib/grpc/hello_pb2_grpc.py @@ -0,0 +1,98 @@ +# flake8: noqa +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +import grpc + +from . import hello_pb2 as hello__pb2 + + +class HelloStub(object): + # missing associated documentation comment in .proto file + pass + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.SayHello = channel.unary_unary( + '/helloworld.Hello/SayHello', + request_serializer=hello__pb2.HelloRequest.SerializeToString, + response_deserializer=hello__pb2.HelloReply.FromString, + ) + self.SayHelloTwice = channel.unary_stream( + '/helloworld.Hello/SayHelloTwice', + request_serializer=hello__pb2.HelloRequest.SerializeToString, + response_deserializer=hello__pb2.HelloReply.FromString, + ) + self.SayHelloRepeatedly = channel.stream_stream( + '/helloworld.Hello/SayHelloRepeatedly', + request_serializer=hello__pb2.HelloRequest.SerializeToString, + response_deserializer=hello__pb2.HelloReply.FromString, + ) + self.SayHelloLast = channel.stream_unary( + '/helloworld.Hello/SayHelloLast', + request_serializer=hello__pb2.HelloRequest.SerializeToString, + response_deserializer=hello__pb2.HelloReply.FromString, + ) + + +class HelloServicer(object): + # missing associated documentation comment in .proto file + pass + + def SayHello(self, request, context): + """Sends a greeting + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SayHelloTwice(self, request, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SayHelloRepeatedly(self, request_iterator, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SayHelloLast(self, request_iterator, context): + # missing associated documentation comment in .proto file + pass + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_HelloServicer_to_server(servicer, server): + rpc_method_handlers = { + 'SayHello': grpc.unary_unary_rpc_method_handler( + servicer.SayHello, + request_deserializer=hello__pb2.HelloRequest.FromString, + response_serializer=hello__pb2.HelloReply.SerializeToString, + ), + 'SayHelloTwice': grpc.unary_stream_rpc_method_handler( + servicer.SayHelloTwice, + request_deserializer=hello__pb2.HelloRequest.FromString, + response_serializer=hello__pb2.HelloReply.SerializeToString, + ), + 'SayHelloRepeatedly': grpc.stream_stream_rpc_method_handler( + servicer.SayHelloRepeatedly, + request_deserializer=hello__pb2.HelloRequest.FromString, + response_serializer=hello__pb2.HelloReply.SerializeToString, + ), + 'SayHelloLast': grpc.stream_unary_rpc_method_handler( + servicer.SayHelloLast, + request_deserializer=hello__pb2.HelloRequest.FromString, + response_serializer=hello__pb2.HelloReply.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'helloworld.Hello', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) diff --git a/tests/contrib/grpc/test_grpc.py b/tests/contrib/grpc/test_grpc.py new file mode 100644 index 0000000000..56dc9eb514 --- /dev/null +++ b/tests/contrib/grpc/test_grpc.py @@ -0,0 +1,508 @@ +import grpc +from grpc._grpcio_metadata import __version__ as _GRPC_VERSION +import time +from grpc.framework.foundation import logging_pool +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.grpc import patch, unpatch +from ddtrace.contrib.grpc import constants +from ddtrace.contrib.grpc.patch import _unpatch_server +from ddtrace.ext import errors +from ddtrace import Pin + +from ...base import BaseTracerTestCase + +from .hello_pb2 import HelloRequest, HelloReply +from .hello_pb2_grpc import add_HelloServicer_to_server, HelloStub, HelloServicer + +_GRPC_PORT = 50531 +_GRPC_VERSION = tuple([int(i) for i in _GRPC_VERSION.split('.')]) + + +class GrpcTestCase(BaseTracerTestCase): + def setUp(self): + super(GrpcTestCase, self).setUp() + patch() + Pin.override(constants.GRPC_PIN_MODULE_SERVER, tracer=self.tracer) + Pin.override(constants.GRPC_PIN_MODULE_CLIENT, tracer=self.tracer) + self._start_server() + + def tearDown(self): + self._stop_server() + # Remove any remaining spans + self.tracer.writer.pop() + # Unpatch grpc + unpatch() + super(GrpcTestCase, self).tearDown() + + def get_spans_with_sync_and_assert(self, size=0, retry=20): + # testing instrumentation with grpcio < 1.14.0 presents a problem for + # checking spans written to the dummy tracer + # see https://github.com/grpc/grpc/issues/14621 + + spans = super(GrpcTestCase, self).get_spans() + + if _GRPC_VERSION >= (1, 14): + assert len(spans) == size + return spans + + for _ in range(retry): + if len(spans) == size: + assert len(spans) == size + return spans + time.sleep(0.1) + + return spans + + def _start_server(self): + self._server = grpc.server(logging_pool.pool(2)) + self._server.add_insecure_port('[::]:%d' % (_GRPC_PORT)) + add_HelloServicer_to_server(_HelloServicer(), self._server) + self._server.start() + + def _stop_server(self): + self._server.stop(0) + + def _check_client_span(self, span, service, method_name, method_kind): + assert span.name == 'grpc' + assert span.resource == '/helloworld.Hello/{}'.format(method_name) + assert span.service == service + assert span.error == 0 + assert span.span_type == 'grpc' + assert span.get_tag('grpc.method.path') == '/helloworld.Hello/{}'.format(method_name) + assert span.get_tag('grpc.method.package') == 'helloworld' + assert span.get_tag('grpc.method.service') == 'Hello' + assert span.get_tag('grpc.method.name') == method_name + assert span.get_tag('grpc.method.kind') == method_kind + assert span.get_tag('grpc.status.code') == 'StatusCode.OK' + assert span.get_tag('grpc.host') == 'localhost' + assert span.get_tag('grpc.port') == '50531' + + def _check_server_span(self, span, service, method_name, method_kind): + assert span.name == 'grpc' + assert span.resource == '/helloworld.Hello/{}'.format(method_name) + assert span.service == service + assert span.error == 0 + assert span.span_type == 'grpc' + assert span.get_tag('grpc.method.path') == '/helloworld.Hello/{}'.format(method_name) + assert span.get_tag('grpc.method.package') == 'helloworld' + assert span.get_tag('grpc.method.service') == 'Hello' + assert span.get_tag('grpc.method.name') == method_name + assert span.get_tag('grpc.method.kind') == method_kind + + def test_insecure_channel_using_args_parameter(self): + def insecure_channel_using_args(target): + return grpc.insecure_channel(target) + self._test_insecure_channel(insecure_channel_using_args) + + def test_insecure_channel_using_kwargs_parameter(self): + def insecure_channel_using_kwargs(target): + return grpc.insecure_channel(target=target) + self._test_insecure_channel(insecure_channel_using_kwargs) + + def _test_insecure_channel(self, insecure_channel_function): + target = 'localhost:%d' % (_GRPC_PORT) + with insecure_channel_function(target) as channel: + stub = HelloStub(channel) + stub.SayHello(HelloRequest(name='test')) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + self._check_client_span(client_span, 'grpc-client', 'SayHello', 'unary') + self._check_server_span(server_span, 'grpc-server', 'SayHello', 'unary') + + def test_secure_channel_using_args_parameter(self): + def secure_channel_using_args(target, **kwargs): + return grpc.secure_channel(target, **kwargs) + self._test_secure_channel(secure_channel_using_args) + + def test_secure_channel_using_kwargs_parameter(self): + def secure_channel_using_kwargs(target, **kwargs): + return grpc.secure_channel(target=target, **kwargs) + self._test_secure_channel(secure_channel_using_kwargs) + + def _test_secure_channel(self, secure_channel_function): + target = 'localhost:%d' % (_GRPC_PORT) + with secure_channel_function(target, credentials=grpc.ChannelCredentials(None)) as channel: + stub = HelloStub(channel) + stub.SayHello(HelloRequest(name='test')) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + self._check_client_span(client_span, 'grpc-client', 'SayHello', 'unary') + self._check_server_span(server_span, 'grpc-server', 'SayHello', 'unary') + + def test_pin_not_activated(self): + self.tracer.configure(enabled=False) + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + stub = HelloStub(channel) + stub.SayHello(HelloRequest(name='test')) + + spans = self.get_spans_with_sync_and_assert() + assert len(spans) == 0 + + def test_pin_tags_are_put_in_span(self): + # DEV: stop and restart server to catch overriden pin + self._stop_server() + Pin.override(constants.GRPC_PIN_MODULE_SERVER, service='server1') + Pin.override(constants.GRPC_PIN_MODULE_SERVER, tags={'tag1': 'server'}) + Pin.override(constants.GRPC_PIN_MODULE_CLIENT, tags={'tag2': 'client'}) + self._start_server() + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + stub = HelloStub(channel) + stub.SayHello(HelloRequest(name='test')) + + spans = self.get_spans_with_sync_and_assert(size=2) + assert spans[0].service == 'server1' + assert spans[0].get_tag('tag1') == 'server' + assert spans[1].get_tag('tag2') == 'client' + + def test_pin_can_be_defined_per_channel(self): + Pin.override(constants.GRPC_PIN_MODULE_CLIENT, service='grpc1') + channel1 = grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) + + Pin.override(constants.GRPC_PIN_MODULE_CLIENT, service='grpc2') + channel2 = grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) + + stub1 = HelloStub(channel1) + stub1.SayHello(HelloRequest(name='test')) + channel1.close() + + # DEV: make sure we have two spans before proceeding + spans = self.get_spans_with_sync_and_assert(size=2) + + stub2 = HelloStub(channel2) + stub2.SayHello(HelloRequest(name='test')) + channel2.close() + + spans = self.get_spans_with_sync_and_assert(size=4) + + # DEV: Server service default, client services override + self._check_server_span(spans[0], 'grpc-server', 'SayHello', 'unary') + self._check_client_span(spans[1], 'grpc1', 'SayHello', 'unary') + self._check_server_span(spans[2], 'grpc-server', 'SayHello', 'unary') + self._check_client_span(spans[3], 'grpc2', 'SayHello', 'unary') + + def test_analytics_default(self): + with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel: + stub = HelloStub(channel) + stub.SayHello(HelloRequest(name='test')) + + spans = self.get_spans_with_sync_and_assert(size=2) + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + assert spans[1].get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + + def test_analytics_with_rate(self): + with self.override_config( + 'grpc_server', + dict(analytics_enabled=True, analytics_sample_rate=0.75) + ): + with self.override_config( + 'grpc', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + with grpc.secure_channel( + 'localhost:%d' % (_GRPC_PORT), + credentials=grpc.ChannelCredentials(None) + ) as channel: + stub = HelloStub(channel) + stub.SayHello(HelloRequest(name='test')) + + spans = self.get_spans_with_sync_and_assert(size=2) + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.75 + assert spans[1].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5 + + def test_analytics_without_rate(self): + with self.override_config( + 'grpc_server', + dict(analytics_enabled=True) + ): + with self.override_config( + 'grpc', + dict(analytics_enabled=True) + ): + with grpc.secure_channel( + 'localhost:%d' % (_GRPC_PORT), + credentials=grpc.ChannelCredentials(None) + ) as channel: + stub = HelloStub(channel) + stub.SayHello(HelloRequest(name='test')) + + spans = self.get_spans_with_sync_and_assert(size=2) + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0 + assert spans[1].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0 + + def test_server_stream(self): + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + stub = HelloStub(channel) + responses_iterator = stub.SayHelloTwice(HelloRequest(name='test')) + assert len(list(responses_iterator)) == 2 + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + self._check_client_span(client_span, 'grpc-client', 'SayHelloTwice', 'server_streaming') + self._check_server_span(server_span, 'grpc-server', 'SayHelloTwice', 'server_streaming') + + def test_client_stream(self): + requests_iterator = iter( + HelloRequest(name=name) for name in + ['first', 'second'] + ) + + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + stub = HelloStub(channel) + response = stub.SayHelloLast(requests_iterator) + assert response.message == 'first;second' + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + self._check_client_span(client_span, 'grpc-client', 'SayHelloLast', 'client_streaming') + self._check_server_span(server_span, 'grpc-server', 'SayHelloLast', 'client_streaming') + + def test_bidi_stream(self): + requests_iterator = iter( + HelloRequest(name=name) for name in + ['first', 'second', 'third', 'fourth', 'fifth'] + ) + + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + stub = HelloStub(channel) + responses = stub.SayHelloRepeatedly(requests_iterator) + messages = [r.message for r in responses] + assert list(messages) == ['first;second', 'third;fourth', 'fifth'] + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + self._check_client_span(client_span, 'grpc-client', 'SayHelloRepeatedly', 'bidi_streaming') + self._check_server_span(server_span, 'grpc-server', 'SayHelloRepeatedly', 'bidi_streaming') + + def test_priority_sampling(self): + # DEV: Priority sampling is enabled by default + # Setting priority sampling reset the writer, we need to re-override it + + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + stub = HelloStub(channel) + response = stub.SayHello(HelloRequest(name='propogator')) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + assert 'x-datadog-trace-id={}'.format(client_span.trace_id) in response.message + assert 'x-datadog-parent-id={}'.format(client_span.span_id) in response.message + assert 'x-datadog-sampling-priority=1' in response.message + + def test_unary_abort(self): + with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel: + stub = HelloStub(channel) + with self.assertRaises(grpc.RpcError): + stub.SayHello(HelloRequest(name='abort')) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + assert client_span.resource == '/helloworld.Hello/SayHello' + assert client_span.error == 1 + assert client_span.get_tag(errors.ERROR_MSG) == 'aborted' + assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.ABORTED' + assert client_span.get_tag('grpc.status.code') == 'StatusCode.ABORTED' + + def test_custom_interceptor_exception(self): + # add an interceptor that raises a custom exception and check error tags + # are added to spans + raise_exception_interceptor = _RaiseExceptionClientInterceptor() + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + with self.assertRaises(_CustomException): + intercept_channel = grpc.intercept_channel( + channel, + raise_exception_interceptor + ) + stub = HelloStub(intercept_channel) + stub.SayHello(HelloRequest(name='custom-exception')) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + assert client_span.resource == '/helloworld.Hello/SayHello' + assert client_span.error == 1 + assert client_span.get_tag(errors.ERROR_MSG) == 'custom' + assert client_span.get_tag(errors.ERROR_TYPE) == 'tests.contrib.grpc.test_grpc._CustomException' + assert client_span.get_tag(errors.ERROR_STACK) is not None + assert client_span.get_tag('grpc.status.code') == 'StatusCode.INTERNAL' + + # no exception on server end + assert server_span.resource == '/helloworld.Hello/SayHello' + assert server_span.error == 0 + assert server_span.get_tag(errors.ERROR_MSG) is None + assert server_span.get_tag(errors.ERROR_TYPE) is None + assert server_span.get_tag(errors.ERROR_STACK) is None + + def test_client_cancellation(self): + # unpatch and restart server since we are only testing here caller cancellation + self._stop_server() + _unpatch_server() + self._start_server() + + # have servicer sleep whenever request is handled to ensure we can cancel before server responds + # to requests + requests_iterator = iter( + HelloRequest(name=name) for name in + ['sleep'] + ) + + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + with self.assertRaises(grpc.RpcError): + stub = HelloStub(channel) + responses = stub.SayHelloRepeatedly(requests_iterator) + responses.cancel() + next(responses) + + spans = self.get_spans_with_sync_and_assert(size=1) + client_span = spans[0] + + assert client_span.resource == '/helloworld.Hello/SayHelloRepeatedly' + assert client_span.error == 1 + assert client_span.get_tag(errors.ERROR_MSG) == 'Locally cancelled by application!' + assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.CANCELLED' + assert client_span.get_tag(errors.ERROR_STACK) is None + assert client_span.get_tag('grpc.status.code') == 'StatusCode.CANCELLED' + + def test_unary_exception(self): + with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel: + stub = HelloStub(channel) + with self.assertRaises(grpc.RpcError): + stub.SayHello(HelloRequest(name='exception')) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + assert client_span.resource == '/helloworld.Hello/SayHello' + assert client_span.error == 1 + assert client_span.get_tag(errors.ERROR_MSG) == 'exception' + assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT' + assert client_span.get_tag('grpc.status.code') == 'StatusCode.INVALID_ARGUMENT' + + assert server_span.resource == '/helloworld.Hello/SayHello' + assert server_span.error == 1 + assert server_span.get_tag(errors.ERROR_MSG) == 'exception' + assert server_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT' + assert 'Traceback' in server_span.get_tag(errors.ERROR_STACK) + assert 'grpc.StatusCode.INVALID_ARGUMENT' in server_span.get_tag(errors.ERROR_STACK) + + def test_client_stream_exception(self): + requests_iterator = iter( + HelloRequest(name=name) for name in + ['first', 'exception'] + ) + + with grpc.insecure_channel('localhost:%d' % (_GRPC_PORT)) as channel: + stub = HelloStub(channel) + with self.assertRaises(grpc.RpcError): + stub.SayHelloLast(requests_iterator) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + assert client_span.resource == '/helloworld.Hello/SayHelloLast' + assert client_span.error == 1 + assert client_span.get_tag(errors.ERROR_MSG) == 'exception' + assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT' + assert client_span.get_tag('grpc.status.code') == 'StatusCode.INVALID_ARGUMENT' + + assert server_span.resource == '/helloworld.Hello/SayHelloLast' + assert server_span.error == 1 + assert server_span.get_tag(errors.ERROR_MSG) == 'exception' + assert server_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.INVALID_ARGUMENT' + assert 'Traceback' in server_span.get_tag(errors.ERROR_STACK) + assert 'grpc.StatusCode.INVALID_ARGUMENT' in server_span.get_tag(errors.ERROR_STACK) + + def test_server_stream_exception(self): + with grpc.secure_channel('localhost:%d' % (_GRPC_PORT), credentials=grpc.ChannelCredentials(None)) as channel: + stub = HelloStub(channel) + with self.assertRaises(grpc.RpcError): + list(stub.SayHelloTwice(HelloRequest(name='exception'))) + + spans = self.get_spans_with_sync_and_assert(size=2) + server_span, client_span = spans + + assert client_span.resource == '/helloworld.Hello/SayHelloTwice' + assert client_span.error == 1 + assert client_span.get_tag(errors.ERROR_MSG) == 'exception' + assert client_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.RESOURCE_EXHAUSTED' + assert client_span.get_tag('grpc.status.code') == 'StatusCode.RESOURCE_EXHAUSTED' + + assert server_span.resource == '/helloworld.Hello/SayHelloTwice' + assert server_span.error == 1 + assert server_span.get_tag(errors.ERROR_MSG) == 'exception' + assert server_span.get_tag(errors.ERROR_TYPE) == 'StatusCode.RESOURCE_EXHAUSTED' + assert 'Traceback' in server_span.get_tag(errors.ERROR_STACK) + assert 'grpc.StatusCode.RESOURCE_EXHAUSTED' in server_span.get_tag(errors.ERROR_STACK) + + +class _HelloServicer(HelloServicer): + def SayHello(self, request, context): + if request.name == 'propogator': + metadata = context.invocation_metadata() + context.set_code(grpc.StatusCode.OK) + message = ';'.join( + w.key + '=' + w.value + for w in metadata + if w.key.startswith('x-datadog') + ) + return HelloReply(message=message) + + if request.name == 'abort': + context.abort(grpc.StatusCode.ABORTED, 'aborted') + + if request.name == 'exception': + context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'exception') + + return HelloReply(message='Hello {}'.format(request.name)) + + def SayHelloTwice(self, request, context): + yield HelloReply(message='first response') + + if request.name == 'exception': + context.abort(grpc.StatusCode.RESOURCE_EXHAUSTED, 'exception') + + yield HelloReply(message='secondresponse') + + def SayHelloLast(self, request_iterator, context): + names = [r.name for r in list(request_iterator)] + + if 'exception' in names: + context.abort(grpc.StatusCode.INVALID_ARGUMENT, 'exception') + + return HelloReply(message='{}'.format( + ';'.join(names))) + + def SayHelloRepeatedly(self, request_iterator, context): + last_request = None + for request in request_iterator: + if last_request is not None: + yield HelloReply(message='{}'.format( + ';'.join([last_request.name, request.name]) + )) + last_request = None + else: + last_request = request + + # response for dangling request + if last_request is not None: + yield HelloReply(message='{}'.format(last_request.name)) + + +class _CustomException(Exception): + pass + + +class _RaiseExceptionClientInterceptor(grpc.UnaryUnaryClientInterceptor): + def _intercept_call(self, continuation, client_call_details, + request_or_iterator): + # allow computation to complete + continuation(client_call_details, request_or_iterator).result() + + raise _CustomException('custom') + + def intercept_unary_unary(self, continuation, client_call_details, request): + return self._intercept_call(continuation, client_call_details, request) diff --git a/tests/contrib/grpc/test_grpc_utils.py b/tests/contrib/grpc/test_grpc_utils.py new file mode 100644 index 0000000000..5cd078889c --- /dev/null +++ b/tests/contrib/grpc/test_grpc_utils.py @@ -0,0 +1,13 @@ +from ddtrace.contrib.grpc.utils import parse_method_path + + +def test_parse_method_path_with_package(): + method_path = '/package.service/method' + parsed = parse_method_path(method_path) + assert parsed == ('package', 'service', 'method') + + +def test_parse_method_path_without_package(): + method_path = '/service/method' + parsed = parse_method_path(method_path) + assert parsed == (None, 'service', 'method') diff --git a/tests/contrib/httplib/__init__.py b/tests/contrib/httplib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/httplib/test_httplib.py b/tests/contrib/httplib/test_httplib.py new file mode 100644 index 0000000000..9f0037c27e --- /dev/null +++ b/tests/contrib/httplib/test_httplib.py @@ -0,0 +1,584 @@ +# Standard library +import contextlib +import sys + +# Third party +from ddtrace.vendor import wrapt + +# Project +from ddtrace import config +from ddtrace.compat import httplib, PY2 +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.httplib import patch, unpatch +from ddtrace.contrib.httplib.patch import should_skip_request +from ddtrace.ext import http +from ddtrace.pin import Pin + +from tests.opentracer.utils import init_tracer + +from ...base import BaseTracerTestCase +from ...util import override_global_tracer +from ...utils import assert_span_http_status_code + +if PY2: + from urllib2 import urlopen, build_opener, Request +else: + from urllib.request import urlopen, build_opener, Request + + +# socket name comes from https://english.stackexchange.com/a/44048 +SOCKET = 'httpbin.org' +URL_200 = 'http://{}/status/200'.format(SOCKET) +URL_500 = 'http://{}/status/500'.format(SOCKET) +URL_404 = 'http://{}/status/404'.format(SOCKET) + + +# Base test mixin for shared tests between Py2 and Py3 +class HTTPLibBaseMixin(object): + SPAN_NAME = 'httplib.request' if PY2 else 'http.client.request' + + def to_str(self, value): + return value.decode('utf-8') + + def setUp(self): + super(HTTPLibBaseMixin, self).setUp() + + patch() + Pin.override(httplib, tracer=self.tracer) + + def tearDown(self): + unpatch() + + super(HTTPLibBaseMixin, self).tearDown() + + +# Main test cases for httplib/http.client and urllib2/urllib.request +class HTTPLibTestCase(HTTPLibBaseMixin, BaseTracerTestCase): + SPAN_NAME = 'httplib.request' if PY2 else 'http.client.request' + + def to_str(self, value): + """Helper method to decode a string or byte object to a string""" + return value.decode('utf-8') + + def get_http_connection(self, *args, **kwargs): + conn = httplib.HTTPConnection(*args, **kwargs) + Pin.override(conn, tracer=self.tracer) + return conn + + def get_https_connection(self, *args, **kwargs): + conn = httplib.HTTPSConnection(*args, **kwargs) + Pin.override(conn, tracer=self.tracer) + return conn + + def test_patch(self): + """ + When patching httplib + we patch the correct module/methods + """ + self.assertIsInstance(httplib.HTTPConnection.__init__, wrapt.BoundFunctionWrapper) + self.assertIsInstance(httplib.HTTPConnection.putrequest, wrapt.BoundFunctionWrapper) + self.assertIsInstance(httplib.HTTPConnection.getresponse, wrapt.BoundFunctionWrapper) + + def test_unpatch(self): + """ + When unpatching httplib + we restore the correct module/methods + """ + original_init = httplib.HTTPConnection.__init__.__wrapped__ + original_putrequest = httplib.HTTPConnection.putrequest.__wrapped__ + original_getresponse = httplib.HTTPConnection.getresponse.__wrapped__ + unpatch() + + self.assertEqual(httplib.HTTPConnection.__init__, original_init) + self.assertEqual(httplib.HTTPConnection.putrequest, original_putrequest) + self.assertEqual(httplib.HTTPConnection.getresponse, original_getresponse) + + def test_should_skip_request(self): + """ + When calling should_skip_request + with an enabled Pin and non-internal request + returns False + with a disabled Pin and non-internal request + returns True + with an enabled Pin and internal request + returns True + with a disabled Pin and internal request + returns True + """ + # Enabled Pin and non-internal request + self.tracer.enabled = True + request = self.get_http_connection(SOCKET) + pin = Pin.get_from(request) + self.assertFalse(should_skip_request(pin, request)) + + # Disabled Pin and non-internal request + self.tracer.enabled = False + request = self.get_http_connection(SOCKET) + pin = Pin.get_from(request) + self.assertTrue(should_skip_request(pin, request)) + + # Enabled Pin and internal request + self.tracer.enabled = True + request = self.get_http_connection(self.tracer.writer.api.hostname, self.tracer.writer.api.port) + pin = Pin.get_from(request) + self.assertTrue(should_skip_request(pin, request)) + + # Disabled Pin and internal request + self.tracer.enabled = False + request = self.get_http_connection(self.tracer.writer.api.hostname, self.tracer.writer.api.port) + pin = Pin.get_from(request) + self.assertTrue(should_skip_request(pin, request)) + + def test_httplib_request_get_request(self, query_string=''): + """ + When making a GET request via httplib.HTTPConnection.request + we return the original response + we capture a span for the request + """ + if query_string: + fqs = '?' + query_string + else: + fqs = '' + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200' + fqs) + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + assert span.get_tag('http.method') == 'GET' + assert span.get_tag('http.url') == URL_200 + assert_span_http_status_code(span, 200) + if config.httplib.trace_query_string: + assert span.get_tag(http.QUERY_STRING) == query_string + else: + assert http.QUERY_STRING not in span.meta + + def test_httplib_request_get_request_qs(self): + with self.override_http_config('httplib', dict(trace_query_string=True)): + return self.test_httplib_request_get_request('foo=bar') + + def test_httplib_request_get_request_multiqs(self): + with self.override_http_config('httplib', dict(trace_query_string=True)): + return self.test_httplib_request_get_request('foo=bar&foo=baz&x=y') + + def test_httplib_request_get_request_https(self): + """ + When making a GET request via httplib.HTTPConnection.request + when making an HTTPS connection + we return the original response + we capture a span for the request + """ + conn = self.get_https_connection('httpbin.org') + with contextlib.closing(conn): + conn.request('GET', '/status/200') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + assert span.get_tag('http.method') == 'GET' + assert_span_http_status_code(span, 200) + assert span.get_tag('http.url') == 'https://httpbin.org/status/200' + + def test_httplib_request_post_request(self): + """ + When making a POST request via httplib.HTTPConnection.request + we return the original response + we capture a span for the request + """ + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('POST', '/status/200', body='key=value') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + assert span.get_tag('http.method') == 'POST' + assert_span_http_status_code(span, 200) + assert span.get_tag('http.url') == URL_200 + + def test_httplib_request_get_request_query_string(self): + """ + When making a GET request with a query string via httplib.HTTPConnection.request + we capture the all of the url in the span except for the query string + """ + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200?key=value&key2=value2') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + assert span.get_tag('http.method') == 'GET' + assert_span_http_status_code(span, 200) + assert span.get_tag('http.url') == URL_200 + + def test_httplib_request_500_request(self): + """ + When making a GET request via httplib.HTTPConnection.request + when the response is a 500 + we raise the original exception + we mark the span as an error + we capture the correct span tags + """ + try: + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/500') + conn.getresponse() + except httplib.HTTPException: + resp = sys.exc_info()[1] + self.assertEqual(self.to_str(resp.read()), '500 Internal Server Error') + self.assertEqual(resp.status, 500) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 1) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 500) + self.assertEqual(span.get_tag('http.url'), URL_500) + + def test_httplib_request_non_200_request(self): + """ + When making a GET request via httplib.HTTPConnection.request + when the response is a non-200 + we raise the original exception + we mark the span as an error + we capture the correct span tags + """ + try: + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/404') + conn.getresponse() + except httplib.HTTPException: + resp = sys.exc_info()[1] + self.assertEqual(self.to_str(resp.read()), '404 Not Found') + self.assertEqual(resp.status, 404) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 404) + self.assertEqual(span.get_tag('http.url'), URL_404) + + def test_httplib_request_get_request_disabled(self): + """ + When making a GET request via httplib.HTTPConnection.request + when the tracer is disabled + we do not capture any spans + """ + self.tracer.enabled = False + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 0) + + def test_httplib_request_get_request_disabled_and_enabled(self): + """ + When making a GET request via httplib.HTTPConnection.request + when the tracer is disabled + we do not capture any spans + """ + self.tracer.enabled = False + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200') + self.tracer.enabled = True + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 0) + + def test_httplib_request_and_response_headers(self): + + # Disabled when not configured + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200', headers={'my-header': 'my_value'}) + conn.getresponse() + spans = self.tracer.writer.pop() + s = spans[0] + self.assertEqual(s.get_tag('http.request.headers.my_header'), None) + self.assertEqual(s.get_tag('http.response.headers.access_control_allow_origin'), None) + + # Enabled when configured + with self.override_config('hhtplib', {}): + from ddtrace.settings import IntegrationConfig + integration_config = config.httplib # type: IntegrationConfig + integration_config.http.trace_headers(['my-header', 'access-control-allow-origin']) + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200', headers={'my-header': 'my_value'}) + conn.getresponse() + spans = self.tracer.writer.pop() + s = spans[0] + self.assertEqual(s.get_tag('http.request.headers.my-header'), 'my_value') + self.assertEqual(s.get_tag('http.response.headers.access-control-allow-origin'), '*') + + def test_urllib_request(self): + """ + When making a request via urllib.request.urlopen + we return the original response + we capture a span for the request + """ + with override_global_tracer(self.tracer): + resp = urlopen(URL_200) + + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.getcode(), 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag('http.url'), URL_200) + + def test_urllib_request_https(self): + """ + When making a request via urllib.request.urlopen + when making an HTTPS connection + we return the original response + we capture a span for the request + """ + with override_global_tracer(self.tracer): + resp = urlopen('https://httpbin.org/status/200') + + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.getcode(), 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag('http.url'), 'https://httpbin.org/status/200') + + def test_urllib_request_object(self): + """ + When making a request via urllib.request.urlopen + with a urllib.request.Request object + we return the original response + we capture a span for the request + """ + req = Request(URL_200) + with override_global_tracer(self.tracer): + resp = urlopen(req) + + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.getcode(), 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag('http.url'), URL_200) + + def test_urllib_request_opener(self): + """ + When making a request via urllib.request.OpenerDirector + we return the original response + we capture a span for the request + """ + opener = build_opener() + with override_global_tracer(self.tracer): + resp = opener.open(URL_200) + + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.getcode(), 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, self.SPAN_NAME) + self.assertEqual(span.error, 0) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag('http.url'), URL_200) + + def test_httplib_request_get_request_ot(self): + """ OpenTracing version of test with same name. """ + ot_tracer = init_tracer('my_svc', self.tracer) + + with ot_tracer.start_active_span('ot_span'): + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 2) + ot_span, dd_span = spans + + # confirm the parenting + self.assertEqual(ot_span.parent_id, None) + self.assertEqual(dd_span.parent_id, ot_span.span_id) + + self.assertEqual(ot_span.service, 'my_svc') + self.assertEqual(ot_span.name, 'ot_span') + + self.assertEqual(dd_span.span_type, 'http') + self.assertEqual(dd_span.name, self.SPAN_NAME) + self.assertEqual(dd_span.error, 0) + assert dd_span.get_tag('http.method') == 'GET' + assert_span_http_status_code(dd_span, 200) + assert dd_span.get_tag('http.url') == URL_200 + + def test_analytics_default(self): + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'httplib', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'httplib', + dict(analytics_enabled=True) + ): + conn = self.get_http_connection(SOCKET) + with contextlib.closing(conn): + conn.request('GET', '/status/200') + resp = conn.getresponse() + self.assertEqual(self.to_str(resp.read()), '') + self.assertEqual(resp.status, 200) + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + +# Additional Python2 test cases for urllib +if PY2: + import urllib + + class HTTPLibPython2Test(HTTPLibBaseMixin, BaseTracerTestCase): + def test_urllib_request(self): + """ + When making a request via urllib.urlopen + we return the original response + we capture a span for the request + """ + with override_global_tracer(self.tracer): + resp = urllib.urlopen(URL_200) + + self.assertEqual(resp.read(), '') + self.assertEqual(resp.getcode(), 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, 'httplib.request') + self.assertEqual(span.error, 0) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag('http.url'), URL_200) + + def test_urllib_request_https(self): + """ + When making a request via urllib.urlopen + when making an HTTPS connection + we return the original response + we capture a span for the request + """ + with override_global_tracer(self.tracer): + resp = urllib.urlopen('https://httpbin.org/status/200') + + self.assertEqual(resp.read(), '') + self.assertEqual(resp.getcode(), 200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.span_type, 'http') + self.assertIsNone(span.service) + self.assertEqual(span.name, 'httplib.request') + self.assertEqual(span.error, 0) + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag('http.url'), 'https://httpbin.org/status/200') diff --git a/tests/contrib/jinja2/templates/base.html b/tests/contrib/jinja2/templates/base.html new file mode 100644 index 0000000000..05490d0c02 --- /dev/null +++ b/tests/contrib/jinja2/templates/base.html @@ -0,0 +1 @@ +Message: {% block content %}{% endblock %} diff --git a/tests/contrib/jinja2/templates/template.html b/tests/contrib/jinja2/templates/template.html new file mode 100644 index 0000000000..ab28182415 --- /dev/null +++ b/tests/contrib/jinja2/templates/template.html @@ -0,0 +1,2 @@ +{% extends 'base.html' %} +{% block content %}Hello {{name}}!{% endblock %} diff --git a/tests/contrib/jinja2/test_jinja2.py b/tests/contrib/jinja2/test_jinja2.py new file mode 100644 index 0000000000..f7d1d0eea6 --- /dev/null +++ b/tests/contrib/jinja2/test_jinja2.py @@ -0,0 +1,120 @@ +import os.path +import unittest + +# 3rd party +import jinja2 + +from ddtrace import Pin, config +from ddtrace.contrib.jinja2 import patch, unpatch +from tests.test_tracer import get_dummy_tracer + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) +TMPL_DIR = os.path.join(TEST_DIR, 'templates') + + +class Jinja2Test(unittest.TestCase): + def setUp(self): + patch() + # prevent cache effects when using Template('code...') + jinja2.environment._spontaneous_environments.clear() + # provide a dummy tracer + self.tracer = get_dummy_tracer() + Pin.override(jinja2.environment.Environment, tracer=self.tracer) + + def tearDown(self): + # restore the tracer + unpatch() + + def test_render_inline_template(self): + t = jinja2.environment.Template('Hello {{name}}!') + assert t.render(name='Jinja') == 'Hello Jinja!' + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 2 + + for span in spans: + assert span.service is None + assert span.span_type == 'template' + assert span.get_tag('jinja2.template_name') == '' + + assert spans[0].name == 'jinja2.compile' + assert spans[1].name == 'jinja2.render' + + def test_generate_inline_template(self): + t = jinja2.environment.Template('Hello {{name}}!') + assert ''.join(t.generate(name='Jinja')) == 'Hello Jinja!' + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 2 + + for span in spans: + assert span.service is None + assert span.span_type == 'template' + assert span.get_tag('jinja2.template_name') == '' + + assert spans[0].name == 'jinja2.compile' + assert spans[1].name == 'jinja2.render' + + def test_file_template(self): + loader = jinja2.loaders.FileSystemLoader(TMPL_DIR) + env = jinja2.Environment(loader=loader) + t = env.get_template('template.html') + assert t.render(name='Jinja') == 'Message: Hello Jinja!' + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 5 + + for span in spans: + assert span.span_type == 'template' + assert span.service is None + + # templates.html extends base.html + def get_def(s): + return s.name, s.get_tag('jinja2.template_name') + + assert get_def(spans[0]) == ('jinja2.load', 'template.html') + assert get_def(spans[1]) == ('jinja2.compile', 'template.html') + assert get_def(spans[2]) == ('jinja2.render', 'template.html') + assert get_def(spans[3]) == ('jinja2.load', 'base.html') + assert get_def(spans[4]) == ('jinja2.compile', 'base.html') + + # additionnal checks for jinja2.load + assert spans[0].get_tag('jinja2.template_path') == os.path.join(TMPL_DIR, 'template.html') + assert spans[3].get_tag('jinja2.template_path') == os.path.join(TMPL_DIR, 'base.html') + + def test_service_name(self): + # don't inherit the service name from the parent span, but force the value. + loader = jinja2.loaders.FileSystemLoader(TMPL_DIR) + env = jinja2.Environment(loader=loader) + + cfg = config.get_from(env) + cfg['service_name'] = 'renderer' + + t = env.get_template('template.html') + assert t.render(name='Jinja') == 'Message: Hello Jinja!' + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 5 + + for span in spans: + assert span.service == 'renderer' + + def test_inherit_service(self): + # When there is a parent span and no custom service_name, the service name is inherited + loader = jinja2.loaders.FileSystemLoader(TMPL_DIR) + env = jinja2.Environment(loader=loader) + + with self.tracer.trace('parent.span', service='web'): + t = env.get_template('template.html') + assert t.render(name='Jinja') == 'Message: Hello Jinja!' + + # tests + spans = self.tracer.writer.pop() + assert len(spans) == 6 + + for span in spans: + assert span.service == 'web' diff --git a/tests/contrib/kombu/__init__.py b/tests/contrib/kombu/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/kombu/test.py b/tests/contrib/kombu/test.py new file mode 100644 index 0000000000..82fd8a93ab --- /dev/null +++ b/tests/contrib/kombu/test.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +import kombu + +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.kombu.patch import patch, unpatch +from ddtrace.contrib.kombu import utils +from ddtrace.ext import kombu as kombux +from ..config import RABBITMQ_CONFIG +from ...base import BaseTracerTestCase + + +class TestKombuPatch(BaseTracerTestCase): + + TEST_SERVICE = 'kombu-patch' + TEST_PORT = RABBITMQ_CONFIG['port'] + + def setUp(self): + super(TestKombuPatch, self).setUp() + + conn = kombu.Connection('amqp://guest:guest@127.0.0.1:{p}//'.format(p=self.TEST_PORT)) + conn.connect() + producer = conn.Producer() + Pin.override(producer, service=self.TEST_SERVICE, tracer=self.tracer) + + self.conn = conn + self.producer = producer + + patch() + + def tearDown(self): + unpatch() + + super(TestKombuPatch, self).tearDown() + + def test_basics(self): + self._publish_consume() + self._assert_spans() + + def test_extract_conn_tags(self): + result = utils.extract_conn_tags(self.conn) + assert result['out.host'] == '127.0.0.1' + assert result['out.port'] == str(self.TEST_PORT) + + def _publish_consume(self): + results = [] + + def process_message(body, message): + results.append(body) + message.ack() + + task_queue = kombu.Queue('tasks', kombu.Exchange('tasks'), routing_key='tasks') + to_publish = {'hello': 'world'} + self.producer.publish(to_publish, + exchange=task_queue.exchange, + routing_key=task_queue.routing_key, + declare=[task_queue]) + + with kombu.Consumer(self.conn, [task_queue], accept=['json'], callbacks=[process_message]) as consumer: + Pin.override(consumer, service='kombu-patch', tracer=self.tracer) + self.conn.drain_events(timeout=2) + + self.assertEqual(results[0], to_publish) + + def _assert_spans(self): + """Tests both producer and consumer tracing""" + spans = self.get_spans() + self.assertEqual(len(spans), 2) + consumer_span = spans[0] + self.assertEqual(consumer_span.service, self.TEST_SERVICE) + self.assertEqual(consumer_span.name, kombux.PUBLISH_NAME) + self.assertEqual(consumer_span.span_type, 'worker') + self.assertEqual(consumer_span.error, 0) + self.assertEqual(consumer_span.get_tag('out.vhost'), '/') + self.assertEqual(consumer_span.get_tag('out.host'), '127.0.0.1') + self.assertEqual(consumer_span.get_tag('kombu.exchange'), u'tasks') + self.assertEqual(consumer_span.get_metric('kombu.body_length'), 18) + self.assertEqual(consumer_span.get_tag('kombu.routing_key'), u'tasks') + self.assertEqual(consumer_span.resource, 'tasks') + + producer_span = spans[1] + self.assertEqual(producer_span.service, self.TEST_SERVICE) + self.assertEqual(producer_span.name, kombux.RECEIVE_NAME) + self.assertEqual(producer_span.span_type, 'worker') + self.assertEqual(producer_span.error, 0) + self.assertEqual(producer_span.get_tag('kombu.exchange'), u'tasks') + self.assertEqual(producer_span.get_tag('kombu.routing_key'), u'tasks') + + def test_analytics_default(self): + self._publish_consume() + + spans = self.get_spans() + self.assertEqual(len(spans), 2) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'kombu', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + self._publish_consume() + + spans = self.get_spans() + self.assertEqual(len(spans), 2) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'kombu', + dict(analytics_enabled=True) + ): + self._publish_consume() + + spans = self.get_spans() + self.assertEqual(len(spans), 2) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/contrib/logging/__init__.py b/tests/contrib/logging/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/logging/test_logging.py b/tests/contrib/logging/test_logging.py new file mode 100644 index 0000000000..b236b2bcef --- /dev/null +++ b/tests/contrib/logging/test_logging.py @@ -0,0 +1,95 @@ +import logging + +from ddtrace.helpers import get_correlation_ids +from ddtrace.compat import StringIO +from ddtrace.contrib.logging import patch, unpatch +from ddtrace.vendor import wrapt + +from ...base import BaseTracerTestCase + + +logger = logging.getLogger() +logger.level = logging.INFO + + +def capture_function_log(func, fmt): + # add stream handler to capture output + out = StringIO() + sh = logging.StreamHandler(out) + + try: + formatter = logging.Formatter(fmt) + sh.setFormatter(formatter) + logger.addHandler(sh) + result = func() + finally: + logger.removeHandler(sh) + + return out.getvalue().strip(), result + + +class LoggingTestCase(BaseTracerTestCase): + def setUp(self): + patch() + super(LoggingTestCase, self).setUp() + + def tearDown(self): + unpatch() + super(LoggingTestCase, self).tearDown() + + def test_patch(self): + """ + Confirm patching was successful + """ + patch() + log = logging.getLogger() + self.assertTrue(isinstance(log.makeRecord, wrapt.BoundFunctionWrapper)) + + def test_log_trace(self): + """ + Check logging patched and formatter including trace info + """ + @self.tracer.wrap() + def func(): + logger.info('Hello!') + return get_correlation_ids(tracer=self.tracer) + + with self.override_config('logging', dict(tracer=self.tracer)): + # with format string for trace info + output, result = capture_function_log( + func, + fmt='%(message)s - dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s', + ) + self.assertEqual( + output, + 'Hello! - dd.trace_id={} dd.span_id={}'.format(*result), + ) + + # without format string + output, _ = capture_function_log( + func, + fmt='%(message)s', + ) + self.assertEqual( + output, + 'Hello!', + ) + + def test_log_no_trace(self): + """ + Check traced funclogging patched and formatter not including trace info + """ + def func(): + logger.info('Hello!') + return get_correlation_ids() + + with self.override_config('logging', dict(tracer=self.tracer)): + # with format string for trace info + output, _ = capture_function_log( + func, + fmt='%(message)s - dd.trace_id=%(dd.trace_id)s dd.span_id=%(dd.span_id)s', + ) + self.assertEqual( + output, + 'Hello! - dd.trace_id=0 dd.span_id=0', + ) diff --git a/tests/contrib/mako/templates/template.html b/tests/contrib/mako/templates/template.html new file mode 100644 index 0000000000..62d75dc53d --- /dev/null +++ b/tests/contrib/mako/templates/template.html @@ -0,0 +1 @@ +Hello ${name}! diff --git a/tests/contrib/mako/test_mako.py b/tests/contrib/mako/test_mako.py new file mode 100644 index 0000000000..b663fbcb1a --- /dev/null +++ b/tests/contrib/mako/test_mako.py @@ -0,0 +1,80 @@ +import os.path +import unittest + +# 3rd party +from mako.template import Template +from mako.lookup import TemplateLookup +from mako.runtime import Context + +from ddtrace import Pin +from ddtrace.contrib.mako import patch, unpatch +from ddtrace.compat import StringIO, to_unicode +from tests.test_tracer import get_dummy_tracer + +TEST_DIR = os.path.dirname(os.path.realpath(__file__)) +TMPL_DIR = os.path.join(TEST_DIR, 'templates') + + +class MakoTest(unittest.TestCase): + def setUp(self): + patch() + self.tracer = get_dummy_tracer() + Pin.override(Template, tracer=self.tracer) + + def tearDown(self): + unpatch() + + def test_render(self): + # render + t = Template('Hello ${name}!') + self.assertEqual(t.render(name='mako'), 'Hello mako!') + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + + self.assertEqual(spans[0].service, 'mako') + self.assertEqual(spans[0].span_type, 'template') + self.assertEqual(spans[0].get_tag('mako.template_name'), '') + self.assertEqual(spans[0].name, 'mako.template.render') + self.assertEqual(spans[0].resource, '') + + # render_unicode + t = Template('Hello ${name}!') + self.assertEqual(t.render_unicode(name='mako'), to_unicode('Hello mako!')) + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].service, 'mako') + self.assertEqual(spans[0].span_type, 'template') + self.assertEqual(spans[0].get_tag('mako.template_name'), '') + self.assertEqual(spans[0].name, 'mako.template.render_unicode') + self.assertEqual(spans[0].resource, '') + + # render_context + t = Template('Hello ${name}!') + buf = StringIO() + c = Context(buf, name='mako') + t.render_context(c) + self.assertEqual(buf.getvalue(), 'Hello mako!') + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].service, 'mako') + self.assertEqual(spans[0].span_type, 'template') + self.assertEqual(spans[0].get_tag('mako.template_name'), '') + self.assertEqual(spans[0].name, 'mako.template.render_context') + self.assertEqual(spans[0].resource, '') + + def test_file_template(self): + tmpl_lookup = TemplateLookup(directories=[TMPL_DIR]) + t = tmpl_lookup.get_template('template.html') + self.assertEqual(t.render(name='mako'), 'Hello mako!\n') + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + + template_name = os.path.join(TMPL_DIR, 'template.html') + + self.assertEqual(spans[0].span_type, 'template') + self.assertEqual(spans[0].service, 'mako') + self.assertEqual(spans[0].get_tag('mako.template_name'), template_name) + self.assertEqual(spans[0].name, 'mako.template.render') + self.assertEqual(spans[0].resource, template_name) diff --git a/tests/contrib/molten/__init__.py b/tests/contrib/molten/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/molten/test_molten.py b/tests/contrib/molten/test_molten.py new file mode 100644 index 0000000000..8564234bd5 --- /dev/null +++ b/tests/contrib/molten/test_molten.py @@ -0,0 +1,315 @@ +import molten +from molten.testing import TestClient + +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.ext import errors, http +from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID, HTTP_HEADER_PARENT_ID +from ddtrace.contrib.molten import patch, unpatch +from ddtrace.contrib.molten.patch import MOLTEN_VERSION + +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + + +# NOTE: Type annotations required by molten otherwise parameters cannot be coerced +def hello(name: str, age: int) -> str: + return f'Hello {age} year old named {name}!' + + +def molten_client(headers=None, params=None): + app = molten.App(routes=[molten.Route('/hello/{name}/{age}', hello)]) + client = TestClient(app) + uri = app.reverse_uri('hello', name='Jim', age=24) + return client.request('GET', uri, headers=headers, params=params) + + +class TestMolten(BaseTracerTestCase): + """"Ensures Molten is properly instrumented.""" + + TEST_SERVICE = 'molten-patch' + + def setUp(self): + super(TestMolten, self).setUp() + patch() + Pin.override(molten, tracer=self.tracer) + + def tearDown(self): + super(TestMolten, self).setUp() + unpatch() + + def test_route_success(self): + """ Tests request was a success with the expected span tags """ + response = molten_client() + spans = self.tracer.writer.pop() + self.assertEqual(response.status_code, 200) + # TestResponse from TestClient is wrapper around Response so we must + # access data property + self.assertEqual(response.data, '"Hello 24 year old named Jim!"') + span = spans[0] + self.assertEqual(span.service, 'molten') + self.assertEqual(span.name, 'molten.request') + self.assertEqual(span.span_type, 'web') + self.assertEqual(span.resource, 'GET /hello/{name}/{age}') + self.assertEqual(span.get_tag('http.method'), 'GET') + self.assertEqual(span.get_tag(http.URL), 'http://127.0.0.1:8000/hello/Jim/24') + assert_span_http_status_code(span, 200) + assert http.QUERY_STRING not in span.meta + + # See test_resources below for specifics of this difference + if MOLTEN_VERSION >= (0, 7, 2): + self.assertEqual(len(spans), 18) + else: + self.assertEqual(len(spans), 16) + + # test override of service name + Pin.override(molten, service=self.TEST_SERVICE) + response = molten_client() + spans = self.tracer.writer.pop() + self.assertEqual(spans[0].service, 'molten-patch') + + def test_route_success_query_string(self): + with self.override_http_config('molten', dict(trace_query_string=True)): + response = molten_client(params={'foo': 'bar'}) + spans = self.tracer.writer.pop() + self.assertEqual(response.status_code, 200) + # TestResponse from TestClient is wrapper around Response so we must + # access data property + self.assertEqual(response.data, '"Hello 24 year old named Jim!"') + span = spans[0] + self.assertEqual(span.service, 'molten') + self.assertEqual(span.name, 'molten.request') + self.assertEqual(span.resource, 'GET /hello/{name}/{age}') + self.assertEqual(span.get_tag('http.method'), 'GET') + self.assertEqual(span.get_tag(http.URL), 'http://127.0.0.1:8000/hello/Jim/24') + assert_span_http_status_code(span, 200) + self.assertEqual(span.get_tag(http.QUERY_STRING), 'foo=bar') + + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + response = molten_client() + self.assertEqual(response.status_code, 200) + # TestResponse from TestClient is wrapper around Response so we must + # access data property + self.assertEqual(response.data, '"Hello 24 year old named Jim!"') + + root_span = self.get_root_span() + root_span.assert_matches( + name='molten.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}, + ) + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + with self.override_config('molten', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + response = molten_client() + self.assertEqual(response.status_code, 200) + # TestResponse from TestClient is wrapper around Response so we must + # access data property + self.assertEqual(response.data, '"Hello 24 year old named Jim!"') + + root_span = self.get_root_span() + root_span.assert_matches( + name='molten.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}, + ) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + response = molten_client() + self.assertEqual(response.status_code, 200) + # TestResponse from TestClient is wrapper around Response so we must + # access data property + self.assertEqual(response.data, '"Hello 24 year old named Jim!"') + + root_span = self.get_root_span() + self.assertIsNone(root_span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + with self.override_config('molten', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + response = molten_client() + self.assertEqual(response.status_code, 200) + # TestResponse from TestClient is wrapper around Response so we must + # access data property + self.assertEqual(response.data, '"Hello 24 year old named Jim!"') + + root_span = self.get_root_span() + root_span.assert_matches( + name='molten.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}, + ) + + def test_route_failure(self): + app = molten.App(routes=[molten.Route('/hello/{name}/{age}', hello)]) + client = TestClient(app) + response = client.get('/goodbye') + spans = self.tracer.writer.pop() + self.assertEqual(response.status_code, 404) + span = spans[0] + self.assertEqual(span.service, 'molten') + self.assertEqual(span.name, 'molten.request') + self.assertEqual(span.resource, 'GET 404') + self.assertEqual(span.get_tag(http.URL), 'http://127.0.0.1:8000/goodbye') + self.assertEqual(span.get_tag('http.method'), 'GET') + assert_span_http_status_code(span, 404) + + def test_route_exception(self): + def route_error() -> str: + raise Exception('Error message') + app = molten.App(routes=[molten.Route('/error', route_error)]) + client = TestClient(app) + response = client.get('/error') + spans = self.tracer.writer.pop() + self.assertEqual(response.status_code, 500) + span = spans[0] + route_error_span = spans[-1] + self.assertEqual(span.service, 'molten') + self.assertEqual(span.name, 'molten.request') + self.assertEqual(span.resource, 'GET /error') + self.assertEqual(span.error, 1) + # error tags only set for route function span and not root span + self.assertIsNone(span.get_tag(errors.ERROR_MSG)) + self.assertEqual(route_error_span.get_tag(errors.ERROR_MSG), 'Error message') + + def test_resources(self): + """ Tests request has expected span resources """ + molten_client() + spans = self.tracer.writer.pop() + + # `can_handle_parameter` appears twice since two parameters are in request + # TODO[tahir]: missing ``resolve` method for components + + expected = [ + 'GET /hello/{name}/{age}', + 'molten.middleware.ResponseRendererMiddleware', + 'molten.components.HeaderComponent.can_handle_parameter', + 'molten.components.CookiesComponent.can_handle_parameter', + 'molten.components.QueryParamComponent.can_handle_parameter', + 'molten.components.RequestBodyComponent.can_handle_parameter', + 'molten.components.RequestDataComponent.can_handle_parameter', + 'molten.components.SchemaComponent.can_handle_parameter', + 'molten.components.UploadedFileComponent.can_handle_parameter', + 'molten.components.HeaderComponent.can_handle_parameter', + 'molten.components.CookiesComponent.can_handle_parameter', + 'molten.components.QueryParamComponent.can_handle_parameter', + 'molten.components.RequestBodyComponent.can_handle_parameter', + 'molten.components.RequestDataComponent.can_handle_parameter', + 'molten.components.SchemaComponent.can_handle_parameter', + 'molten.components.UploadedFileComponent.can_handle_parameter', + 'tests.contrib.molten.test_molten.hello', + 'molten.renderers.JSONRenderer.render' + ] + + # Addition of `UploadedFileComponent` in 0.7.2 changes expected spans + if MOLTEN_VERSION < (0, 7, 2): + expected = [ + r + for r in expected + if not r.startswith('molten.components.UploadedFileComponent') + ] + + self.assertEqual(len(spans), len(expected)) + self.assertEqual([s.resource for s in spans], expected) + + def test_distributed_tracing(self): + """ Tests whether span IDs are propogated when distributed tracing is on """ + # Default: distributed tracing enabled + response = molten_client(headers={ + HTTP_HEADER_TRACE_ID: '100', + HTTP_HEADER_PARENT_ID: '42', + }) + self.assertEqual(response.status_code, 200) + self.assertEqual(response.json(), 'Hello 24 year old named Jim!') + + spans = self.tracer.writer.pop() + span = spans[0] + self.assertEqual(span.name, 'molten.request') + self.assertEqual(span.trace_id, 100) + self.assertEqual(span.parent_id, 42) + + # Explicitly enable distributed tracing + with self.override_config('molten', dict(distributed_tracing=True)): + response = molten_client(headers={ + HTTP_HEADER_TRACE_ID: '100', + HTTP_HEADER_PARENT_ID: '42', + }) + self.assertEqual(response.status_code, 200) + self.assertEqual(response.json(), 'Hello 24 year old named Jim!') + + spans = self.tracer.writer.pop() + span = spans[0] + self.assertEqual(span.name, 'molten.request') + self.assertEqual(span.trace_id, 100) + self.assertEqual(span.parent_id, 42) + + # Now without tracing on + with self.override_config('molten', dict(distributed_tracing=False)): + response = molten_client(headers={ + HTTP_HEADER_TRACE_ID: '100', + HTTP_HEADER_PARENT_ID: '42', + }) + self.assertEqual(response.status_code, 200) + self.assertEqual(response.json(), 'Hello 24 year old named Jim!') + + spans = self.tracer.writer.pop() + span = spans[0] + self.assertEqual(span.name, 'molten.request') + self.assertNotEqual(span.trace_id, 100) + self.assertNotEqual(span.parent_id, 42) + + def test_unpatch_patch(self): + """ Tests unpatch-patch cycle """ + unpatch() + self.assertIsNone(Pin.get_from(molten)) + molten_client() + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 0) + + patch() + # Need to override Pin here as we do in setUp + Pin.override(molten, tracer=self.tracer) + self.assertTrue(Pin.get_from(molten) is not None) + molten_client() + spans = self.tracer.writer.pop() + self.assertTrue(len(spans) > 0) + + def test_patch_unpatch(self): + """ Tests repatch-unpatch cycle """ + # Already call patch in setUp + self.assertTrue(Pin.get_from(molten) is not None) + molten_client() + spans = self.tracer.writer.pop() + self.assertTrue(len(spans) > 0) + + # Test unpatch + unpatch() + self.assertTrue(Pin.get_from(molten) is None) + molten_client() + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 0) + + def test_patch_idempotence(self): + """ Tests repatching """ + # Already call patch in setUp but patch again + patch() + molten_client() + spans = self.tracer.writer.pop() + self.assertTrue(len(spans) > 0) diff --git a/tests/contrib/molten/test_molten_di.py b/tests/contrib/molten/test_molten_di.py new file mode 100644 index 0000000000..238f1bcc45 --- /dev/null +++ b/tests/contrib/molten/test_molten_di.py @@ -0,0 +1,118 @@ +from unittest import TestCase + +# Test base adapted from molten/tests/test_dependency_injection.py + +from inspect import Parameter + +import molten +from molten import DependencyInjector + +from ddtrace import Pin +from ddtrace.contrib.molten import patch, unpatch + +from ...test_tracer import get_dummy_tracer + + +class Settings(dict): + pass + + +class SettingsComponent: + is_singleton = True + + def can_handle_parameter(self, parameter: Parameter) -> bool: + return parameter.annotation is Settings + + def resolve(self) -> Settings: + return Settings() + + +class Metrics: + __slots__ = ['settings'] + + def __init__(self, settings: Settings) -> None: + self.settings = settings + + +class MetricsComponent: + is_singleton = True + + def can_handle_parameter(self, parameter: Parameter) -> bool: + return parameter.annotation is Metrics + + def resolve(self, settings: Settings) -> Metrics: + return Metrics(settings) + + +class DB: + __slots__ = ['settings', 'metrics'] + + def __init__(self, settings: Settings, metrics: Metrics) -> None: + self.settings = settings + self.metrics = metrics + + +class DBComponent: + is_singleton = True + + def can_handle_parameter(self, parameter: Parameter) -> bool: + return parameter.annotation is DB + + def resolve(self, settings: Settings, metrics: Metrics) -> DB: + return DB(settings, metrics) + + +class Accounts: + def __init__(self, db: DB) -> None: + self.db = db + + def get_all(self): + return [] + + +class AccountsComponent: + def can_handle_parameter(self, parameter: Parameter) -> bool: + return parameter.annotation is Accounts + + def resolve(self, db: DB) -> Accounts: + return Accounts(db) + + +class TestMoltenDI(TestCase): + """"Ensures Molten dependency injection is properly instrumented.""" + + TEST_SERVICE = 'molten-patch-di' + + def setUp(self): + patch() + self.tracer = get_dummy_tracer() + Pin.override(molten, tracer=self.tracer, service=self.TEST_SERVICE) + + def tearDown(self): + unpatch() + self.tracer.writer.pop() + + def test_di_can_inject_dependencies(self): + # Given that I have a DI instance + di = DependencyInjector(components=[ + SettingsComponent(), + MetricsComponent(), + DBComponent(), + AccountsComponent(), + ]) + + # And a function that uses DI + def example(accounts: Accounts): + assert accounts.get_all() == [] + return accounts + + # When I resolve that function + # Then all the parameters should resolve as expected + resolver = di.get_resolver() + resolved_example = resolver.resolve(example) + resolved_example() + + spans = self.tracer.writer.pop() + + # TODO[tahir]: We could in future trace the resolve method on components + self.assertEqual(len(spans), 0) diff --git a/tests/contrib/mongoengine/__init__.py b/tests/contrib/mongoengine/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/mongoengine/test.py b/tests/contrib/mongoengine/test.py new file mode 100644 index 0000000000..fad28a549f --- /dev/null +++ b/tests/contrib/mongoengine/test.py @@ -0,0 +1,303 @@ +# stdib +import time +import unittest + +# 3p +import mongoengine +import pymongo + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.mongoengine.patch import patch, unpatch +from ddtrace.ext import mongo as mongox + +# testing +from tests.opentracer.utils import init_tracer +from ..config import MONGO_CONFIG +from ...base import override_config +from ...test_tracer import get_dummy_tracer + + +class Artist(mongoengine.Document): + first_name = mongoengine.StringField(max_length=50) + last_name = mongoengine.StringField(max_length=50) + + +class MongoEngineCore(object): + + # Define the service at the class level, so that each test suite can use a different service + # and therefore catch any sneaky badly-unpatched stuff. + TEST_SERVICE = 'deadbeef' + + def get_tracer_and_connect(self): + # implement me + pass + + def test_insert_update_delete_query(self): + tracer = self.get_tracer_and_connect() + + start = time.time() + Artist.drop_collection() + end = time.time() + + # ensure we get a drop collection span + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.resource == 'drop artist' + assert span.span_type == 'mongodb' + assert span.service == self.TEST_SERVICE + _assert_timing(span, start, end) + + start = end + joni = Artist() + joni.first_name = 'Joni' + joni.last_name = 'Mitchell' + joni.save() + end = time.time() + + # ensure we get an insert span + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.resource == 'insert artist' + assert span.span_type == 'mongodb' + assert span.service == self.TEST_SERVICE + _assert_timing(span, start, end) + + # ensure full scans work + start = time.time() + artists = [a for a in Artist.objects] + end = time.time() + assert len(artists) == 1 + assert artists[0].first_name == 'Joni' + assert artists[0].last_name == 'Mitchell' + + # query names should be used in pymongo>3.1 + name = 'find' if pymongo.version_tuple >= (3, 1, 0) else 'query' + + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.resource == '{} artist'.format(name) + assert span.span_type == 'mongodb' + assert span.service == self.TEST_SERVICE + _assert_timing(span, start, end) + + # ensure filtered queries work + start = time.time() + artists = [a for a in Artist.objects(first_name='Joni')] + end = time.time() + assert len(artists) == 1 + joni = artists[0] + assert artists[0].first_name == 'Joni' + assert artists[0].last_name == 'Mitchell' + + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.resource == '{} artist {{"first_name": "?"}}'.format(name) + assert span.span_type == 'mongodb' + assert span.service == self.TEST_SERVICE + _assert_timing(span, start, end) + + # ensure updates work + start = time.time() + joni.last_name = 'From Saskatoon' + joni.save() + end = time.time() + + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.resource == 'update artist {"_id": "?"}' + assert span.span_type == 'mongodb' + assert span.service == self.TEST_SERVICE + _assert_timing(span, start, end) + + # ensure deletes + start = time.time() + joni.delete() + end = time.time() + + spans = tracer.writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.resource == 'delete artist {"_id": "?"}' + assert span.span_type == 'mongodb' + assert span.service == self.TEST_SERVICE + _assert_timing(span, start, end) + + def test_opentracing(self): + """Ensure the opentracer works with mongoengine.""" + tracer = self.get_tracer_and_connect() + ot_tracer = init_tracer('my_svc', tracer) + + with ot_tracer.start_active_span('ot_span'): + start = time.time() + Artist.drop_collection() + end = time.time() + + # ensure we get a drop collection span + spans = tracer.writer.pop() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'ot_span' + assert ot_span.service == 'my_svc' + + assert dd_span.resource == 'drop artist' + assert dd_span.span_type == 'mongodb' + assert dd_span.service == self.TEST_SERVICE + _assert_timing(dd_span, start, end) + + def test_analytics_default(self): + tracer = self.get_tracer_and_connect() + Artist.drop_collection() + + spans = tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + + def test_analytics_with_rate(self): + with override_config( + 'pymongo', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + tracer = self.get_tracer_and_connect() + Artist.drop_collection() + + spans = tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5 + + def test_analytics_without_rate(self): + with override_config( + 'pymongo', + dict(analytics_enabled=True) + ): + tracer = self.get_tracer_and_connect() + Artist.drop_collection() + + spans = tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0 + + +class TestMongoEnginePatchConnectDefault(unittest.TestCase, MongoEngineCore): + """Test suite with a global Pin for the connect function with the default configuration""" + + TEST_SERVICE = mongox.SERVICE + + def setUp(self): + patch() + + def tearDown(self): + unpatch() + # Disconnect and remove the client + mongoengine.connection.disconnect() + + def get_tracer_and_connect(self): + tracer = get_dummy_tracer() + Pin.get_from(mongoengine.connect).clone( + tracer=tracer).onto(mongoengine.connect) + mongoengine.connect(port=MONGO_CONFIG['port']) + + return tracer + + +class TestMongoEnginePatchConnect(TestMongoEnginePatchConnectDefault): + """Test suite with a global Pin for the connect function with custom service""" + + TEST_SERVICE = 'test-mongo-patch-connect' + + def get_tracer_and_connect(self): + tracer = TestMongoEnginePatchConnectDefault.get_tracer_and_connect(self) + Pin(service=self.TEST_SERVICE, tracer=tracer).onto(mongoengine.connect) + mongoengine.connect(port=MONGO_CONFIG['port']) + + return tracer + + +class TestMongoEnginePatchClientDefault(unittest.TestCase, MongoEngineCore): + """Test suite with a Pin local to a specific client with default configuration""" + + TEST_SERVICE = mongox.SERVICE + + def setUp(self): + patch() + + def tearDown(self): + unpatch() + # Disconnect and remove the client + mongoengine.connection.disconnect() + + def get_tracer_and_connect(self): + tracer = get_dummy_tracer() + client = mongoengine.connect(port=MONGO_CONFIG['port']) + Pin.get_from(client).clone(tracer=tracer).onto(client) + + return tracer + + +class TestMongoEnginePatchClient(TestMongoEnginePatchClientDefault): + """Test suite with a Pin local to a specific client with custom service""" + + TEST_SERVICE = 'test-mongo-patch-client' + + def get_tracer_and_connect(self): + tracer = get_dummy_tracer() + # Set a connect-level service, to check that we properly override it + Pin(service='not-%s' % self.TEST_SERVICE).onto(mongoengine.connect) + client = mongoengine.connect(port=MONGO_CONFIG['port']) + Pin(service=self.TEST_SERVICE, tracer=tracer).onto(client) + + return tracer + + def test_patch_unpatch(self): + tracer = get_dummy_tracer() + + # Test patch idempotence + patch() + patch() + + client = mongoengine.connect(port=MONGO_CONFIG['port']) + Pin.get_from(client).clone(tracer=tracer).onto(client) + + Artist.drop_collection() + spans = tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + + mongoengine.connection.disconnect() + tracer.writer.pop() + + # Test unpatch + unpatch() + + mongoengine.connect(port=MONGO_CONFIG['port']) + + Artist.drop_collection() + spans = tracer.writer.pop() + assert not spans, spans + + # Test patch again + patch() + + client = mongoengine.connect(port=MONGO_CONFIG['port']) + Pin.get_from(client).clone(tracer=tracer).onto(client) + + Artist.drop_collection() + spans = tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + + +def _assert_timing(span, start, end): + assert start < span.start < end + assert span.duration < end - start diff --git a/tests/contrib/mongoengine/test_backwards.py b/tests/contrib/mongoengine/test_backwards.py new file mode 100644 index 0000000000..ff396ec3de --- /dev/null +++ b/tests/contrib/mongoengine/test_backwards.py @@ -0,0 +1,26 @@ +""" +ensure old interfaces exist and won't break things. +""" +import mongoengine + +from tests.test_tracer import get_dummy_tracer +from tests.contrib import config + + +class Singer(mongoengine.Document): + first_name = mongoengine.StringField(max_length=50) + last_name = mongoengine.StringField(max_length=50) + + +def test_less_than_v04(): + # interface from < v0.4 + from ddtrace.contrib.mongoengine import trace_mongoengine + tracer = get_dummy_tracer() + + connect = trace_mongoengine(tracer, service='my-mongo-db', patch=False) + connect(port=config.MONGO_CONFIG['port']) + + lc = Singer() + lc.first_name = 'leonard' + lc.last_name = 'cohen' + lc.save() diff --git a/tests/contrib/mysql/__init__.py b/tests/contrib/mysql/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/mysql/test_backwards_compatibility.py b/tests/contrib/mysql/test_backwards_compatibility.py new file mode 100644 index 0000000000..302cb0fdce --- /dev/null +++ b/tests/contrib/mysql/test_backwards_compatibility.py @@ -0,0 +1,13 @@ + +from ddtrace.contrib.mysql import get_traced_mysql_connection +from tests.test_tracer import get_dummy_tracer +from tests.contrib import config + + +def test_pre_v4(): + tracer = get_dummy_tracer() + MySQL = get_traced_mysql_connection(tracer, service='my-mysql-server') + conn = MySQL(**config.MYSQL_CONFIG) + cursor = conn.cursor() + cursor.execute('SELECT 1') + assert cursor.fetchone()[0] == 1 diff --git a/tests/contrib/mysql/test_mysql.py b/tests/contrib/mysql/test_mysql.py new file mode 100644 index 0000000000..d9872c3f92 --- /dev/null +++ b/tests/contrib/mysql/test_mysql.py @@ -0,0 +1,437 @@ +# 3p +import mysql + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.mysql.patch import patch, unpatch + +# tests +from tests.contrib.config import MYSQL_CONFIG +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase +from ...util import assert_dict_issuperset + + +class MySQLCore(object): + """Base test case for MySQL drivers""" + conn = None + TEST_SERVICE = 'test-mysql' + + def tearDown(self): + super(MySQLCore, self).tearDown() + + # Reuse the connection across tests + if self.conn: + try: + self.conn.ping() + except mysql.InterfaceError: + pass + else: + self.conn.close() + unpatch() + + def _get_conn_tracer(self): + # implement me + pass + + def test_simple_query(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + def test_simple_query_fetchll(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 2 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + assert spans[1].name == 'mysql.query.fetchall' + + def test_query_with_several_rows(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 3 + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.get_tag('sql.query') is None + + def test_query_with_several_rows_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 3 + spans = writer.pop() + assert len(spans) == 2 + span = spans[0] + assert span.get_tag('sql.query') is None + assert spans[1].name == 'mysql.query.fetchall' + + def test_query_many(self): + # tests that the executemany method is correctly wrapped. + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + tracer.enabled = False + cursor = conn.cursor() + + cursor.execute(""" + create table if not exists dummy ( + dummy_key VARCHAR(32) PRIMARY KEY, + dummy_value TEXT NOT NULL)""") + tracer.enabled = True + + stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)' + data = [ + ('foo', 'this is foo'), + ('bar', 'this is bar'), + ] + cursor.executemany(stmt, data) + query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 2 + assert rows[0][0] == 'bar' + assert rows[0][1] == 'this is bar' + assert rows[1][0] == 'foo' + assert rows[1][1] == 'this is foo' + + spans = writer.pop() + assert len(spans) == 2 + span = spans[-1] + assert span.get_tag('sql.query') is None + cursor.execute('drop table if exists dummy') + + def test_query_many_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + # tests that the executemany method is correctly wrapped. + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + tracer.enabled = False + cursor = conn.cursor() + + cursor.execute(""" + create table if not exists dummy ( + dummy_key VARCHAR(32) PRIMARY KEY, + dummy_value TEXT NOT NULL)""") + tracer.enabled = True + + stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)' + data = [ + ('foo', 'this is foo'), + ('bar', 'this is bar'), + ] + cursor.executemany(stmt, data) + query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 2 + assert rows[0][0] == 'bar' + assert rows[0][1] == 'this is bar' + assert rows[1][0] == 'foo' + assert rows[1][1] == 'this is foo' + + spans = writer.pop() + assert len(spans) == 3 + span = spans[-1] + assert span.get_tag('sql.query') is None + cursor.execute('drop table if exists dummy') + + assert spans[2].name == 'mysql.query.fetchall' + + def test_query_proc(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + + # create a procedure + tracer.enabled = False + cursor = conn.cursor() + cursor.execute('DROP PROCEDURE IF EXISTS sp_sum') + cursor.execute(""" + CREATE PROCEDURE sp_sum (IN p1 INTEGER, IN p2 INTEGER, OUT p3 INTEGER) + BEGIN + SET p3 := p1 + p2; + END;""") + + tracer.enabled = True + proc = 'sp_sum' + data = (40, 2, None) + output = cursor.callproc(proc, data) + assert len(output) == 3 + assert output[2] == 42 + + spans = writer.pop() + assert spans, spans + + # number of spans depends on MySQL implementation details, + # typically, internal calls to execute, but at least we + # can expect the last closed span to be our proc. + span = spans[len(spans) - 1] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + assert span.get_tag('sql.query') is None + + def test_simple_query_ot(self): + """OpenTracing version of test_simple_query.""" + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + + ot_tracer = init_tracer('mysql_svc', tracer) + + with ot_tracer.start_active_span('mysql_op'): + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + + spans = writer.pop() + assert len(spans) == 2 + + ot_span, dd_span = spans + + # confirm parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'mysql_svc' + assert ot_span.name == 'mysql_op' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'mysql.query' + assert dd_span.span_type == 'sql' + assert dd_span.error == 0 + assert dd_span.get_metric('out.port') == 3306 + assert_dict_issuperset(dd_span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + def test_simple_query_ot_fetchall(self): + """OpenTracing version of test_simple_query.""" + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + + ot_tracer = init_tracer('mysql_svc', tracer) + + with ot_tracer.start_active_span('mysql_op'): + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + + spans = writer.pop() + assert len(spans) == 3 + + ot_span, dd_span, fetch_span = spans + + # confirm parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'mysql_svc' + assert ot_span.name == 'mysql_op' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'mysql.query' + assert dd_span.span_type == 'sql' + assert dd_span.error == 0 + assert dd_span.get_metric('out.port') == 3306 + assert_dict_issuperset(dd_span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + assert fetch_span.name == 'mysql.query.fetchall' + + def test_commit(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + conn.commit() + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.connection.commit' + + def test_rollback(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + conn.rollback() + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.connection.rollback' + + def test_analytics_default(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True) + ): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + +class TestMysqlPatch(MySQLCore, BaseTracerTestCase): + + def setUp(self): + super(TestMysqlPatch, self).setUp() + patch() + + def tearDown(self): + super(TestMysqlPatch, self).tearDown() + unpatch() + + def _get_conn_tracer(self): + if not self.conn: + self.conn = mysql.connector.connect(**MYSQL_CONFIG) + assert self.conn.is_connected() + # Ensure that the default pin is there, with its default value + pin = Pin.get_from(self.conn) + assert pin + assert pin.service == 'mysql' + # Customize the service + # we have to apply it on the existing one since new one won't inherit `app` + pin.clone( + service=self.TEST_SERVICE, tracer=self.tracer).onto(self.conn) + + return self.conn, self.tracer + + def test_patch_unpatch(self): + unpatch() + # assert we start unpatched + conn = mysql.connector.connect(**MYSQL_CONFIG) + assert not Pin.get_from(conn) + conn.close() + + patch() + try: + writer = self.tracer.writer + conn = mysql.connector.connect(**MYSQL_CONFIG) + pin = Pin.get_from(conn) + assert pin + pin.clone( + service=self.TEST_SERVICE, tracer=self.tracer).onto(conn) + assert conn.is_connected() + + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + assert span.get_tag('sql.query') is None + + finally: + unpatch() + + # assert we finish unpatched + conn = mysql.connector.connect(**MYSQL_CONFIG) + assert not Pin.get_from(conn) + conn.close() + + patch() diff --git a/tests/contrib/mysqldb/__init__.py b/tests/contrib/mysqldb/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/mysqldb/test_mysql.py b/tests/contrib/mysqldb/test_mysql.py new file mode 100644 index 0000000000..15581e1f1d --- /dev/null +++ b/tests/contrib/mysqldb/test_mysql.py @@ -0,0 +1,505 @@ +import MySQLdb + +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.mysqldb.patch import patch, unpatch + +from tests.opentracer.utils import init_tracer +from ..config import MYSQL_CONFIG +from ...base import BaseTracerTestCase +from ...util import assert_dict_issuperset + + +class MySQLCore(object): + """Base test case for MySQL drivers""" + conn = None + TEST_SERVICE = 'test-mysql' + + def setUp(self): + super(MySQLCore, self).setUp() + + patch() + + def tearDown(self): + super(MySQLCore, self).tearDown() + + # Reuse the connection across tests + if self.conn: + try: + self.conn.ping() + except MySQLdb.InterfaceError: + pass + else: + self.conn.close() + unpatch() + + def _get_conn_tracer(self): + # implement me + pass + + def test_simple_query(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + rowcount = cursor.execute('SELECT 1') + assert rowcount == 1 + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + def test_simple_query_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 2 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + fetch_span = spans[1] + assert fetch_span.name == 'mysql.query.fetchall' + + def test_simple_query_with_positional_args(self): + conn, tracer = self._get_conn_tracer_with_positional_args() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + def test_simple_query_with_positional_args_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer_with_positional_args() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 2 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + fetch_span = spans[1] + assert fetch_span.name == 'mysql.query.fetchall' + + def test_query_with_several_rows(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 3 + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.get_tag('sql.query') is None + + def test_query_with_several_rows_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 3 + spans = writer.pop() + assert len(spans) == 2 + span = spans[0] + assert span.get_tag('sql.query') is None + fetch_span = spans[1] + assert fetch_span.name == 'mysql.query.fetchall' + + def test_query_many(self): + # tests that the executemany method is correctly wrapped. + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + tracer.enabled = False + cursor = conn.cursor() + + cursor.execute(""" + create table if not exists dummy ( + dummy_key VARCHAR(32) PRIMARY KEY, + dummy_value TEXT NOT NULL)""") + tracer.enabled = True + + stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)' + data = [ + ('foo', 'this is foo'), + ('bar', 'this is bar'), + ] + cursor.executemany(stmt, data) + query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 2 + assert rows[0][0] == 'bar' + assert rows[0][1] == 'this is bar' + assert rows[1][0] == 'foo' + assert rows[1][1] == 'this is foo' + + spans = writer.pop() + assert len(spans) == 2 + span = spans[1] + assert span.get_tag('sql.query') is None + cursor.execute('drop table if exists dummy') + + def test_query_many_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + # tests that the executemany method is correctly wrapped. + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + tracer.enabled = False + cursor = conn.cursor() + + cursor.execute(""" + create table if not exists dummy ( + dummy_key VARCHAR(32) PRIMARY KEY, + dummy_value TEXT NOT NULL)""") + tracer.enabled = True + + stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)' + data = [ + ('foo', 'this is foo'), + ('bar', 'this is bar'), + ] + cursor.executemany(stmt, data) + query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 2 + assert rows[0][0] == 'bar' + assert rows[0][1] == 'this is bar' + assert rows[1][0] == 'foo' + assert rows[1][1] == 'this is foo' + + spans = writer.pop() + assert len(spans) == 3 + span = spans[1] + assert span.get_tag('sql.query') is None + cursor.execute('drop table if exists dummy') + fetch_span = spans[2] + assert fetch_span.name == 'mysql.query.fetchall' + + def test_query_proc(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + + # create a procedure + tracer.enabled = False + cursor = conn.cursor() + cursor.execute('DROP PROCEDURE IF EXISTS sp_sum') + cursor.execute(""" + CREATE PROCEDURE sp_sum (IN p1 INTEGER, IN p2 INTEGER, OUT p3 INTEGER) + BEGIN + SET p3 := p1 + p2; + END;""") + + tracer.enabled = True + proc = 'sp_sum' + data = (40, 2, None) + output = cursor.callproc(proc, data) + assert len(output) == 3 + # resulted p3 isn't stored on output[2], we need to fetch it with select + # http://mysqlclient.readthedocs.io/user_guide.html#cursor-objects + cursor.execute('SELECT @_sp_sum_2;') + assert cursor.fetchone()[0] == 42 + + spans = writer.pop() + assert spans, spans + + # number of spans depends on MySQL implementation details, + # typically, internal calls to execute, but at least we + # can expect the next to the last closed span to be our proc. + span = spans[-2] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + assert span.get_tag('sql.query') is None + + def test_simple_query_ot(self): + """OpenTracing version of test_simple_query.""" + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + ot_tracer = init_tracer('mysql_svc', tracer) + with ot_tracer.start_active_span('mysql_op'): + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + + spans = writer.pop() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'mysql_svc' + assert ot_span.name == 'mysql_op' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'mysql.query' + assert dd_span.span_type == 'sql' + assert dd_span.error == 0 + assert dd_span.get_metric('out.port') == 3306 + assert_dict_issuperset(dd_span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + def test_simple_query_ot_fetchall(self): + """OpenTracing version of test_simple_query.""" + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + ot_tracer = init_tracer('mysql_svc', tracer) + with ot_tracer.start_active_span('mysql_op'): + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + + spans = writer.pop() + assert len(spans) == 3 + ot_span, dd_span, fetch_span = spans + + # confirm parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'mysql_svc' + assert ot_span.name == 'mysql_op' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'mysql.query' + assert dd_span.span_type == 'sql' + assert dd_span.error == 0 + assert dd_span.get_metric('out.port') == 3306 + assert_dict_issuperset(dd_span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + + assert fetch_span.name == 'mysql.query.fetchall' + + def test_commit(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + conn.commit() + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'MySQLdb.connection.commit' + + def test_rollback(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + conn.rollback() + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'MySQLdb.connection.rollback' + + def test_analytics_default(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True) + ): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + +class TestMysqlPatch(MySQLCore, BaseTracerTestCase): + """Ensures MysqlDB is properly patched""" + + def _connect_with_kwargs(self): + return MySQLdb.Connect(**{ + 'host': MYSQL_CONFIG['host'], + 'user': MYSQL_CONFIG['user'], + 'passwd': MYSQL_CONFIG['password'], + 'db': MYSQL_CONFIG['database'], + 'port': MYSQL_CONFIG['port'], + }) + + def _get_conn_tracer(self): + if not self.conn: + self.conn = self._connect_with_kwargs() + self.conn.ping() + # Ensure that the default pin is there, with its default value + pin = Pin.get_from(self.conn) + assert pin + assert pin.service == 'mysql' + # Customize the service + # we have to apply it on the existing one since new one won't inherit `app` + pin.clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.conn) + + return self.conn, self.tracer + + def _get_conn_tracer_with_positional_args(self): + if not self.conn: + self.conn = MySQLdb.Connect( + MYSQL_CONFIG['host'], + MYSQL_CONFIG['user'], + MYSQL_CONFIG['password'], + MYSQL_CONFIG['database'], + MYSQL_CONFIG['port'], + ) + self.conn.ping() + # Ensure that the default pin is there, with its default value + pin = Pin.get_from(self.conn) + assert pin + assert pin.service == 'mysql' + # Customize the service + # we have to apply it on the existing one since new one won't inherit `app` + pin.clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.conn) + + return self.conn, self.tracer + + def test_patch_unpatch(self): + unpatch() + # assert we start unpatched + conn = self._connect_with_kwargs() + assert not Pin.get_from(conn) + conn.close() + + patch() + try: + writer = self.tracer.writer + conn = self._connect_with_kwargs() + pin = Pin.get_from(conn) + assert pin + pin.clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(conn) + conn.ping() + + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'mysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == 3306 + assert_dict_issuperset(span.meta, { + 'out.host': u'127.0.0.1', + 'db.name': u'test', + 'db.user': u'test', + }) + assert span.get_tag('sql.query') is None + + finally: + unpatch() + + # assert we finish unpatched + conn = self._connect_with_kwargs() + assert not Pin.get_from(conn) + conn.close() + + patch() diff --git a/tests/contrib/patch.py b/tests/contrib/patch.py new file mode 100644 index 0000000000..e9d7361778 --- /dev/null +++ b/tests/contrib/patch.py @@ -0,0 +1,645 @@ +import functools +import importlib +import sys +import unittest + +from ddtrace.vendor import wrapt + +from tests.subprocesstest import SubprocessTestCase, run_in_subprocess + + +class PatchMixin(unittest.TestCase): + """ + TestCase for testing the patch logic of an integration. + """ + def module_imported(self, modname): + """ + Returns whether a module is imported or not. + """ + return modname in sys.modules + + def assert_module_imported(self, modname): + """ + Asserts that the module, given its name is imported. + """ + assert self.module_imported(modname), '{} module not imported'.format(modname) + + def assert_not_module_imported(self, modname): + """ + Asserts that the module, given its name is not imported. + """ + assert not self.module_imported(modname), '{} module is imported'.format(modname) + + def is_wrapped(self, obj): + return isinstance(obj, wrapt.ObjectProxy) + + def assert_wrapped(self, obj): + """ + Helper to assert that a given object is properly wrapped by wrapt. + """ + self.assertTrue(self.is_wrapped(obj), '{} is not wrapped'.format(obj)) + + def assert_not_wrapped(self, obj): + """ + Helper to assert that a given object is not wrapped by wrapt. + """ + self.assertFalse(self.is_wrapped(obj), '{} is wrapped'.format(obj)) + + def assert_not_double_wrapped(self, obj): + """ + Helper to assert that a given already wrapped object is not wrapped twice. + + This is useful for asserting idempotence. + """ + self.assert_wrapped(obj) + self.assert_not_wrapped(obj.__wrapped__) + + +def raise_if_no_attrs(f): + """ + A helper for PatchTestCase test methods that will check if there are any + modules to use else raise a NotImplementedError. + + :param f: method to wrap with a check + """ + required_attrs = [ + '__module_name__', + '__integration_name__', + '__unpatch_func__', + ] + + @functools.wraps(f) + def checked_method(self, *args, **kwargs): + for attr in required_attrs: + if not getattr(self, attr): + raise NotImplementedError(f.__doc__) + return f(self, *args, **kwargs) + return checked_method + + +class PatchTestCase(object): + """ + unittest or other test runners will pick up the base test case as a testcase + since it inherits from unittest.TestCase unless we wrap it with this empty + parent class. + """ + @run_in_subprocess + class Base(SubprocessTestCase, PatchMixin): + """Provides default test methods to be used for testing common integration patching logic. + Each test method provides a default implementation which will use the + provided attributes (described below). If the attributes are not + provided a NotImplementedError will be raised for each method that is + not overridden. + + Attributes: + __integration_name__ the name of the integration. + __module_name__ module which the integration patches. + __unpatch_func__ unpatch function from the integration. + + Example: + A simple implementation inheriting this TestCase looks like:: + + from ddtrace.contrib.redis import unpatch + + class RedisPatchTestCase(PatchTestCase.Base): + __integration_name__ = 'redis' + __module_name__ 'redis' + __unpatch_func__ = unpatch + + def assert_module_patched(self, redis): + # assert patching logic + # self.assert_wrapped(...) + + def assert_not_module_patched(self, redis): + # assert patching logic + # self.assert_not_wrapped(...) + + def assert_not_module_double_patched(self, redis): + # assert patching logic + # self.assert_not_double_wrapped(...) + + # override this particular test case + def test_patch_import(self): + # custom patch before import check + + # optionally override other test methods... + """ + __integration_name__ = None + __module_name__ = None + __unpatch_func__ = None + + def __init__(self, *args, **kwargs): + # DEV: Python will wrap a function when assigning to a class as an + # attribute. So we cannot call self.__unpatch_func__() as the `self` + # reference will be passed as an argument. + # So we need to unwrap the function and then wrap it in a function + # that will absorb the unpatch function. + if self.__unpatch_func__: + unpatch_func = self.__unpatch_func__.__func__ + + def unpatch(): + unpatch_func() + self.__unpatch_func__ = unpatch + super(PatchTestCase.Base, self).__init__(*args, **kwargs) + + def patch(self, *args, **kwargs): + from ddtrace import patch + return patch(*args, **kwargs) + + def _gen_test_attrs(self, ops): + """ + A helper to return test names for tests given a list of different + operations. + :return: + """ + from itertools import permutations + return [ + 'test_{}'.format('_'.join(c)) for c in permutations(ops, len(ops)) + ] + + def test_verify_test_coverage(self): + """ + This TestCase should cover a variety of combinations of importing, + patching and unpatching. + """ + tests = [] + tests += self._gen_test_attrs(['import', 'patch']) + tests += self._gen_test_attrs(['import', 'patch', 'patch']) + tests += self._gen_test_attrs(['import', 'patch', 'unpatch']) + tests += self._gen_test_attrs(['import', 'patch', 'unpatch', 'unpatch']) + + # TODO: it may be possible to generate test cases dynamically. For + # now focus on the important ones. + test_ignore = set([ + 'test_unpatch_import_patch', + 'test_import_unpatch_patch_unpatch', + 'test_import_unpatch_unpatch_patch', + 'test_patch_import_unpatch_unpatch', + 'test_unpatch_import_patch_unpatch', + 'test_unpatch_import_unpatch_patch', + 'test_unpatch_patch_import_unpatch', + 'test_unpatch_patch_unpatch_import', + 'test_unpatch_unpatch_import_patch', + 'test_unpatch_unpatch_patch_import', + ]) + + for test_attr in tests: + if test_attr in test_ignore: + continue + assert hasattr(self, test_attr), '{} not found in expected test attrs'.format(test_attr) + + def assert_module_patched(self, module): + """ + Asserts that the given module is patched. + + For example, the redis integration patches the following methods: + - redis.StrictRedis.execute_command + - redis.StrictRedis.pipeline + - redis.Redis.pipeline + - redis.client.BasePipeline.execute + - redis.client.BasePipeline.immediate_execute_command + + So an appropriate assert_module_patched would look like:: + + def assert_module_patched(self, redis): + self.assert_wrapped(redis.StrictRedis.execute_command) + self.assert_wrapped(redis.StrictRedis.pipeline) + self.assert_wrapped(redis.Redis.pipeline) + self.assert_wrapped(redis.client.BasePipeline.execute) + self.assert_wrapped(redis.client.BasePipeline.immediate_execute_command) + + :param module: module to check + :return: None + """ + raise NotImplementedError(self.assert_module_patched.__doc__) + + def assert_not_module_patched(self, module): + """ + Asserts that the given module is not patched. + + For example, the redis integration patches the following methods: + - redis.StrictRedis.execute_command + - redis.StrictRedis.pipeline + - redis.Redis.pipeline + - redis.client.BasePipeline.execute + - redis.client.BasePipeline.immediate_execute_command + + So an appropriate assert_not_module_patched would look like:: + + def assert_not_module_patched(self, redis): + self.assert_not_wrapped(redis.StrictRedis.execute_command) + self.assert_not_wrapped(redis.StrictRedis.pipeline) + self.assert_not_wrapped(redis.Redis.pipeline) + self.assert_not_wrapped(redis.client.BasePipeline.execute) + self.assert_not_wrapped(redis.client.BasePipeline.immediate_execute_command) + + :param module: + :return: None + """ + raise NotImplementedError(self.assert_not_module_patched.__doc__) + + def assert_not_module_double_patched(self, module): + """ + Asserts that the given module is not patched twice. + + For example, the redis integration patches the following methods: + - redis.StrictRedis.execute_command + - redis.StrictRedis.pipeline + - redis.Redis.pipeline + - redis.client.BasePipeline.execute + - redis.client.BasePipeline.immediate_execute_command + + So an appropriate assert_not_module_double_patched would look like:: + + def assert_not_module_double_patched(self, redis): + self.assert_not_double_wrapped(redis.StrictRedis.execute_command) + self.assert_not_double_wrapped(redis.StrictRedis.pipeline) + self.assert_not_double_wrapped(redis.Redis.pipeline) + self.assert_not_double_wrapped(redis.client.BasePipeline.execute) + self.assert_not_double_wrapped(redis.client.BasePipeline.immediate_execute_command) + + :param module: module to check + :return: None + """ + raise NotImplementedError(self.assert_not_module_double_patched.__doc__) + + @raise_if_no_attrs + def test_import_patch(self): + """ + The integration should test that each class, method or function that + is to be patched is in fact done so when ddtrace.patch() is called + before the module is imported. + + For example: + + an appropriate ``test_patch_import`` would be:: + + import redis + ddtrace.patch(redis=True) + self.assert_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + module = importlib.import_module(self.__module_name__) + self.assert_not_module_patched(module) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_patch_import(self): + """ + The integration should test that each class, method or function that + is to be patched is in fact done so when ddtrace.patch() is called + after the module is imported. + + an appropriate ``test_patch_import`` would be:: + + import redis + ddtrace.patch(redis=True) + self.assert_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + module = importlib.import_module(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_import_patch_patch(self): + """ + Proper testing should be done to ensure that multiple calls to the + integration.patch() method are idempotent. That is, that the + integration does not patch its library more than once. + + An example for what this might look like for the redis integration:: + + import redis + ddtrace.patch(redis=True) + self.assert_module_patched(redis) + ddtrace.patch(redis=True) + self.assert_not_module_double_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__module_name__: True}) + module = importlib.import_module(self.__module_name__) + self.assert_module_patched(module) + self.patch(**{self.__module_name__: True}) + self.assert_not_module_double_patched(module) + + @raise_if_no_attrs + def test_patch_import_patch(self): + """ + Proper testing should be done to ensure that multiple calls to the + integration.patch() method are idempotent. That is, that the + integration does not patch its library more than once. + + An example for what this might look like for the redis integration:: + + ddtrace.patch(redis=True) + import redis + self.assert_module_patched(redis) + ddtrace.patch(redis=True) + self.assert_not_module_double_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__module_name__: True}) + module = importlib.import_module(self.__module_name__) + self.assert_module_patched(module) + self.patch(**{self.__module_name__: True}) + self.assert_not_module_double_patched(module) + + @raise_if_no_attrs + def test_patch_patch_import(self): + """ + Proper testing should be done to ensure that multiple calls to the + integration.patch() method are idempotent. That is, that the + integration does not patch its library more than once. + + An example for what this might look like for the redis integration:: + + ddtrace.patch(redis=True) + ddtrace.patch(redis=True) + import redis + self.assert_not_double_wrapped(redis.StrictRedis.execute_command) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__module_name__: True}) + self.patch(**{self.__module_name__: True}) + module = importlib.import_module(self.__module_name__) + self.assert_module_patched(module) + self.assert_not_module_double_patched(module) + + @raise_if_no_attrs + def test_import_patch_unpatch_patch(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it and then subsequently + patch it again. + + For example:: + + import redis + from ddtrace.contrib.redis import unpatch + + ddtrace.patch(redis=True) + unpatch() + ddtrace.patch(redis=True) + self.assert_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + module = importlib.import_module(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + self.__unpatch_func__() + self.assert_not_module_patched(module) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_patch_import_unpatch_patch(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it and then subsequently + patch it again. + + For example:: + + from ddtrace.contrib.redis import unpatch + + ddtrace.patch(redis=True) + import redis + unpatch() + ddtrace.patch(redis=True) + self.assert_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + module = importlib.import_module(self.__module_name__) + self.assert_module_patched(module) + self.__unpatch_func__() + self.assert_not_module_patched(module) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_patch_unpatch_import_patch(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it and then subsequently + patch it again. + + For example:: + + from ddtrace.contrib.redis import unpatch + + ddtrace.patch(redis=True) + import redis + unpatch() + ddtrace.patch(redis=True) + self.assert_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.__unpatch_func__() + module = importlib.import_module(self.__module_name__) + self.assert_not_module_patched(module) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_patch_unpatch_patch_import(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it and then subsequently + patch it again. + + For example:: + + from ddtrace.contrib.redis import unpatch + + ddtrace.patch(redis=True) + unpatch() + ddtrace.patch(redis=True) + import redis + self.assert_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.__unpatch_func__() + self.patch(**{self.__integration_name__: True}) + module = importlib.import_module(self.__module_name__) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_unpatch_patch_import(self): + """ + Make sure unpatching before patch does not break patching. + + For example:: + + from ddtrace.contrib.redis import unpatch + unpatch() + ddtrace.patch(redis=True) + import redis + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.__unpatch_func__() + self.patch(**{self.__integration_name__: True}) + module = importlib.import_module(self.__module_name__) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_patch_unpatch_import(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it before importing the + library. + + For example:: + + ddtrace.patch(redis=True) + from ddtrace.contrib.redis import unpatch + unpatch() + import redis + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.__unpatch_func__() + module = importlib.import_module(self.__module_name__) + self.assert_not_module_patched(module) + + @raise_if_no_attrs + def test_import_unpatch_patch(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it before patching. + + For example:: + + import redis + from ddtrace.contrib.redis import unpatch + ddtrace.patch(redis=True) + unpatch() + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + module = importlib.import_module(self.__module_name__) + self.__unpatch_func__() + self.assert_not_module_patched(module) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + + @raise_if_no_attrs + def test_import_patch_unpatch(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it after patching. + + For example:: + + import redis + from ddtrace.contrib.redis import unpatch + ddtrace.patch(redis=True) + unpatch() + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + module = importlib.import_module(self.__module_name__) + self.assert_not_module_patched(module) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + self.__unpatch_func__() + self.assert_not_module_patched(module) + + @raise_if_no_attrs + def test_patch_import_unpatch(self): + """ + To ensure that we can thoroughly test the installation/patching of + an integration we must be able to unpatch it after patching. + + For example:: + + from ddtrace.contrib.redis import unpatch + ddtrace.patch(redis=True) + import redis + unpatch() + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + module = importlib.import_module(self.__module_name__) + self.assert_module_patched(module) + self.__unpatch_func__() + self.assert_not_module_patched(module) + + @raise_if_no_attrs + def test_import_patch_unpatch_unpatch(self): + """ + Unpatching twice should be a no-op. + + For example:: + + import redis + from ddtrace.contrib.redis import unpatch + + ddtrace.patch(redis=True) + self.assert_module_patched(redis) + unpatch() + self.assert_not_module_patched(redis) + unpatch() + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + module = importlib.import_module(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.assert_module_patched(module) + self.__unpatch_func__() + self.assert_not_module_patched(module) + self.__unpatch_func__() + self.assert_not_module_patched(module) + + @raise_if_no_attrs + def test_patch_unpatch_import_unpatch(self): + """ + Unpatching twice should be a no-op. + + For example:: + + from ddtrace.contrib.redis import unpatch + + ddtrace.patch(redis=True) + unpatch() + import redis + self.assert_not_module_patched(redis) + unpatch() + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.__unpatch_func__() + module = importlib.import_module(self.__module_name__) + self.assert_not_module_patched(module) + self.__unpatch_func__() + self.assert_not_module_patched(module) + + @raise_if_no_attrs + def test_patch_unpatch_unpatch_import(self): + """ + Unpatching twice should be a no-op. + + For example:: + + from ddtrace.contrib.redis import unpatch + + ddtrace.patch(redis=True) + unpatch() + unpatch() + import redis + self.assert_not_module_patched(redis) + """ + self.assert_not_module_imported(self.__module_name__) + self.patch(**{self.__integration_name__: True}) + self.__unpatch_func__() + self.__unpatch_func__() + module = importlib.import_module(self.__module_name__) + self.assert_not_module_patched(module) diff --git a/tests/contrib/psycopg/__init__.py b/tests/contrib/psycopg/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/psycopg/test_psycopg.py b/tests/contrib/psycopg/test_psycopg.py new file mode 100644 index 0000000000..2955b0ec36 --- /dev/null +++ b/tests/contrib/psycopg/test_psycopg.py @@ -0,0 +1,343 @@ +# stdlib +import time + +# 3p +import psycopg2 +from psycopg2 import extensions +from psycopg2 import extras + +from unittest import skipIf + +# project +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.psycopg import connection_factory +from ddtrace.contrib.psycopg.patch import patch, unpatch, PSYCOPG2_VERSION +from ddtrace import Pin + +# testing +from tests.opentracer.utils import init_tracer +from tests.contrib.config import POSTGRES_CONFIG +from ...base import BaseTracerTestCase +from ...utils.tracer import DummyTracer + + +if PSYCOPG2_VERSION >= (2, 7): + from psycopg2.sql import SQL + +TEST_PORT = POSTGRES_CONFIG['port'] + + +class PsycopgCore(BaseTracerTestCase): + + # default service + TEST_SERVICE = 'postgres' + + def setUp(self): + super(PsycopgCore, self).setUp() + + patch() + + def tearDown(self): + super(PsycopgCore, self).tearDown() + + unpatch() + + def _get_conn(self, service=None): + conn = psycopg2.connect(**POSTGRES_CONFIG) + pin = Pin.get_from(conn) + if pin: + pin.clone(service=service, tracer=self.tracer).onto(conn) + + return conn + + def test_patch_unpatch(self): + # Test patch idempotence + patch() + patch() + + service = 'fo' + + conn = self._get_conn(service=service) + conn.cursor().execute("""select 'blah'""") + self.assert_structure(dict(name='postgres.query', service=service)) + self.reset() + + # Test unpatch + unpatch() + + conn = self._get_conn() + conn.cursor().execute("""select 'blah'""") + self.assert_has_no_spans() + + # Test patch again + patch() + + conn = self._get_conn(service=service) + conn.cursor().execute("""select 'blah'""") + self.assert_structure(dict(name='postgres.query', service=service)) + + def assert_conn_is_traced(self, db, service): + + # ensure the trace pscyopg client doesn't add non-standard + # methods + try: + db.execute("""select 'foobar'""") + except AttributeError: + pass + + # Ensure we can run a query and it's correctly traced + q = """select 'foobarblah'""" + + start = time.time() + cursor = db.cursor() + res = cursor.execute(q) + self.assertIsNone(res) + rows = cursor.fetchall() + end = time.time() + + self.assertEquals(rows, [('foobarblah',)]) + + self.assert_structure( + dict(name='postgres.query', resource=q, service=service, error=0, span_type='sql'), + ) + root = self.get_root_span() + self.assertIsNone(root.get_tag('sql.query')) + assert start <= root.start <= end + assert root.duration <= end - start + # confirm analytics disabled by default + self.reset() + + # run a query with an error and ensure all is well + q = """select * from some_non_existant_table""" + cur = db.cursor() + try: + cur.execute(q) + except Exception: + pass + else: + assert 0, 'should have an error' + + self.assert_structure( + dict( + name='postgres.query', + resource=q, + service=service, + error=1, + span_type='sql', + meta={ + 'out.host': '127.0.0.1', + }, + metrics={ + 'out.port': TEST_PORT, + }, + ), + ) + root = self.get_root_span() + self.assertIsNone(root.get_tag('sql.query')) + self.reset() + + def test_opentracing_propagation(self): + # ensure OpenTracing plays well with our integration + query = """SELECT 'tracing'""" + + db = self._get_conn() + ot_tracer = init_tracer('psycopg-svc', self.tracer) + + with ot_tracer.start_active_span('db.access'): + cursor = db.cursor() + cursor.execute(query) + rows = cursor.fetchall() + + self.assertEquals(rows, [('tracing',)]) + + self.assert_structure( + dict(name='db.access', service='psycopg-svc'), + ( + dict(name='postgres.query', resource=query, service='postgres', error=0, span_type='sql'), + ), + ) + self.reset() + + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + db = self._get_conn() + ot_tracer = init_tracer('psycopg-svc', self.tracer) + + with ot_tracer.start_active_span('db.access'): + cursor = db.cursor() + cursor.execute(query) + rows = cursor.fetchall() + + self.assertEquals(rows, [('tracing',)]) + + self.assert_structure( + dict(name='db.access', service='psycopg-svc'), + ( + dict(name='postgres.query', resource=query, service='postgres', error=0, span_type='sql'), + dict(name='postgres.query.fetchall', resource=query, service='postgres', error=0, span_type='sql'), + ), + ) + + @skipIf(PSYCOPG2_VERSION < (2, 5), 'context manager not available in psycopg2==2.4') + def test_cursor_ctx_manager(self): + # ensure cursors work with context managers + # https://github.com/DataDog/dd-trace-py/issues/228 + conn = self._get_conn() + t = type(conn.cursor()) + with conn.cursor() as cur: + assert t == type(cur), '{} != {}'.format(t, type(cur)) + cur.execute(query="""select 'blah'""") + rows = cur.fetchall() + assert len(rows) == 1, rows + assert rows[0][0] == 'blah' + + self.assert_structure( + dict(name='postgres.query'), + ) + + def test_disabled_execute(self): + conn = self._get_conn() + self.tracer.enabled = False + # these calls were crashing with a previous version of the code. + conn.cursor().execute(query="""select 'blah'""") + conn.cursor().execute("""select 'blah'""") + self.assert_has_no_spans() + + @skipIf(PSYCOPG2_VERSION < (2, 5), '_json is not available in psycopg2==2.4') + def test_manual_wrap_extension_types(self): + conn = self._get_conn() + # NOTE: this will crash if it doesn't work. + # _ext.register_type(_ext.UUID, conn_or_curs) + # TypeError: argument 2 must be a connection, cursor or None + extras.register_uuid(conn_or_curs=conn) + + # NOTE: this will crash if it doesn't work. + # _ext.register_default_json(conn) + # TypeError: argument 2 must be a connection, cursor or None + extras.register_default_json(conn) + + def test_manual_wrap_extension_adapt(self): + conn = self._get_conn() + # NOTE: this will crash if it doesn't work. + # items = _ext.adapt([1, 2, 3]) + # items.prepare(conn) + # TypeError: argument 2 must be a connection, cursor or None + items = extensions.adapt([1, 2, 3]) + items.prepare(conn) + + # NOTE: this will crash if it doesn't work. + # binary = _ext.adapt(b'12345) + # binary.prepare(conn) + # TypeError: argument 2 must be a connection, cursor or None + binary = extensions.adapt(b'12345') + binary.prepare(conn) + + @skipIf(PSYCOPG2_VERSION < (2, 7), 'quote_ident not available in psycopg2<2.7') + def test_manual_wrap_extension_quote_ident(self): + from ddtrace import patch_all + patch_all() + from psycopg2.extensions import quote_ident + + # NOTE: this will crash if it doesn't work. + # TypeError: argument 2 must be a connection or a cursor + conn = psycopg2.connect(**POSTGRES_CONFIG) + quote_ident('foo', conn) + + def test_connect_factory(self): + services = ['db', 'another'] + for service in services: + conn = self._get_conn(service=service) + self.assert_conn_is_traced(conn, service) + + # ensure we have the service types + service_meta = self.tracer.writer.pop_services() + expected = {} + self.assertEquals(service_meta, expected) + + def test_commit(self): + conn = self._get_conn() + conn.commit() + + self.assert_structure( + dict(name='postgres.connection.commit', service=self.TEST_SERVICE) + ) + + def test_rollback(self): + conn = self._get_conn() + conn.rollback() + + self.assert_structure( + dict(name='postgres.connection.rollback', service=self.TEST_SERVICE) + ) + + @skipIf(PSYCOPG2_VERSION < (2, 7), 'SQL string composition not available in psycopg2<2.7') + def test_composed_query(self): + """ Checks whether execution of composed SQL string is traced """ + query = SQL(' union all ').join( + [SQL("""select 'one' as x"""), + SQL("""select 'two' as x""")]) + db = self._get_conn() + + with db.cursor() as cur: + cur.execute(query=query) + rows = cur.fetchall() + assert len(rows) == 2, rows + assert rows[0][0] == 'one' + assert rows[1][0] == 'two' + + self.assert_structure( + dict(name='postgres.query', resource=query.as_string(db)), + ) + + def test_analytics_default(self): + conn = self._get_conn() + conn.cursor().execute("""select 'blah'""") + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + conn = self._get_conn() + conn.cursor().execute("""select 'blah'""") + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True) + ): + conn = self._get_conn() + conn.cursor().execute("""select 'blah'""") + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + +def test_backwards_compatibilty_v3(): + tracer = DummyTracer() + factory = connection_factory(tracer, service='my-postgres-db') + conn = psycopg2.connect(connection_factory=factory, **POSTGRES_CONFIG) + conn.cursor().execute("""select 'blah'""") + + +@skipIf(PSYCOPG2_VERSION < (2, 7), 'quote_ident not available in psycopg2<2.7') +def test_manual_wrap_extension_quote_ident_standalone(): + from ddtrace import patch_all + patch_all() + from psycopg2.extensions import quote_ident + + # NOTE: this will crash if it doesn't work. + # TypeError: argument 2 must be a connection or a cursor + conn = psycopg2.connect(**POSTGRES_CONFIG) + quote_ident('foo', conn) diff --git a/tests/contrib/pylibmc/__init__.py b/tests/contrib/pylibmc/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pylibmc/test.py b/tests/contrib/pylibmc/test.py new file mode 100644 index 0000000000..b9444f0e42 --- /dev/null +++ b/tests/contrib/pylibmc/test.py @@ -0,0 +1,320 @@ +# stdlib +import time +from unittest.case import SkipTest + +# 3p +import pylibmc + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.pylibmc import TracedClient +from ddtrace.contrib.pylibmc.patch import patch, unpatch +from ddtrace.ext import memcached + +# testing +from ...opentracer.utils import init_tracer +from ...contrib.config import MEMCACHED_CONFIG as cfg +from ...base import BaseTracerTestCase + + +class PylibmcCore(object): + """Core of the test suite for pylibmc + + Shared tests between the patch and TracedClient interface. + Will be merge back to a single class once the TracedClient is deprecated. + """ + + TEST_SERVICE = memcached.SERVICE + + def get_client(self): + # Implement me + pass + + def test_upgrade(self): + raise SkipTest('upgrade memcached') + # add tests for touch, cas, gets etc + + def test_append_prepend(self): + client, tracer = self.get_client() + # test + start = time.time() + client.set('a', 'crow') + client.prepend('a', 'holy ') + client.append('a', '!') + + # FIXME[matt] there is a bug in pylibmc & python 3 (perhaps with just + # some versions of the libmemcache?) where append/prepend are replaced + # with get. our traced versions do the right thing, so skipping this + # test. + try: + assert client.get('a') == 'holy crow!' + except AssertionError: + pass + + end = time.time() + # verify spans + spans = tracer.writer.pop() + for s in spans: + self._verify_cache_span(s, start, end) + expected_resources = sorted(['append', 'prepend', 'get', 'set']) + resources = sorted(s.resource for s in spans) + assert expected_resources == resources + + def test_incr_decr(self): + client, tracer = self.get_client() + # test + start = time.time() + client.set('a', 1) + client.incr('a', 2) + client.decr('a', 1) + v = client.get('a') + assert v == 2 + end = time.time() + # verify spans + spans = tracer.writer.pop() + for s in spans: + self._verify_cache_span(s, start, end) + expected_resources = sorted(['get', 'set', 'incr', 'decr']) + resources = sorted(s.resource for s in spans) + assert expected_resources == resources + + def test_incr_decr_ot(self): + """OpenTracing version of test_incr_decr.""" + client, tracer = self.get_client() + ot_tracer = init_tracer('memcached', tracer) + + start = time.time() + with ot_tracer.start_active_span('mc_ops'): + client.set('a', 1) + client.incr('a', 2) + client.decr('a', 1) + v = client.get('a') + assert v == 2 + end = time.time() + + # verify spans + spans = tracer.writer.pop() + ot_span = spans[0] + + assert ot_span.name == 'mc_ops' + + for s in spans[1:]: + assert s.parent_id == ot_span.span_id + self._verify_cache_span(s, start, end) + expected_resources = sorted(['get', 'set', 'incr', 'decr']) + resources = sorted(s.resource for s in spans[1:]) + assert expected_resources == resources + + def test_clone(self): + # ensure cloned connections are traced as well. + client, tracer = self.get_client() + cloned = client.clone() + start = time.time() + cloned.get('a') + end = time.time() + spans = tracer.writer.pop() + for s in spans: + self._verify_cache_span(s, start, end) + expected_resources = ['get'] + resources = sorted(s.resource for s in spans) + assert expected_resources == resources + + def test_get_set_multi(self): + client, tracer = self.get_client() + # test + start = time.time() + client.set_multi({'a': 1, 'b': 2}) + out = client.get_multi(['a', 'c']) + assert out == {'a': 1} + client.delete_multi(['a', 'c']) + end = time.time() + # verify + spans = tracer.writer.pop() + for s in spans: + self._verify_cache_span(s, start, end) + expected_resources = sorted(['get_multi', 'set_multi', 'delete_multi']) + resources = sorted(s.resource for s in spans) + assert expected_resources == resources + + def test_get_set_multi_prefix(self): + client, tracer = self.get_client() + # test + start = time.time() + client.set_multi({'a': 1, 'b': 2}, key_prefix='foo') + out = client.get_multi(['a', 'c'], key_prefix='foo') + assert out == {'a': 1} + client.delete_multi(['a', 'c'], key_prefix='foo') + end = time.time() + # verify + spans = tracer.writer.pop() + for s in spans: + self._verify_cache_span(s, start, end) + assert s.get_tag('memcached.query') == '%s foo' % s.resource + expected_resources = sorted(['get_multi', 'set_multi', 'delete_multi']) + resources = sorted(s.resource for s in spans) + assert expected_resources == resources + + def test_get_set_delete(self): + client, tracer = self.get_client() + # test + k = u'cafe' + v = 'val-foo' + start = time.time() + client.delete(k) # just in case + out = client.get(k) + assert out is None, out + client.set(k, v) + out = client.get(k) + assert out == v + end = time.time() + # verify + spans = tracer.writer.pop() + for s in spans: + self._verify_cache_span(s, start, end) + assert s.get_tag('memcached.query') == '%s %s' % (s.resource, k) + expected_resources = sorted(['get', 'get', 'delete', 'set']) + resources = sorted(s.resource for s in spans) + assert expected_resources == resources + + def _verify_cache_span(self, s, start, end): + assert s.start > start + assert s.start + s.duration < end + assert s.service == self.TEST_SERVICE + assert s.span_type == 'cache' + assert s.name == 'memcached.cmd' + assert s.get_tag('out.host') == cfg['host'] + assert s.get_metric('out.port') == cfg['port'] + + def test_analytics_default(self): + client, tracer = self.get_client() + client.set('a', 'crow') + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'pylibmc', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + client, tracer = self.get_client() + client.set('a', 'crow') + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'pylibmc', + dict(analytics_enabled=True) + ): + client, tracer = self.get_client() + client.set('a', 'crow') + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + def test_disabled(self): + """ + Ensure client works when the tracer is disabled + """ + client, tracer = self.get_client() + try: + tracer.enabled = False + + client.set('a', 'crow') + + spans = self.get_spans() + assert len(spans) == 0 + finally: + tracer.enabled = True + + +class TestPylibmcLegacy(BaseTracerTestCase, PylibmcCore): + """Test suite for the tracing of pylibmc with the legacy TracedClient interface""" + + TEST_SERVICE = 'mc-legacy' + + def get_client(self): + url = '%s:%s' % (cfg['host'], cfg['port']) + raw_client = pylibmc.Client([url]) + raw_client.flush_all() + + client = TracedClient(raw_client, tracer=self.tracer, service=self.TEST_SERVICE) + return client, self.tracer + + +class TestPylibmcPatchDefault(BaseTracerTestCase, PylibmcCore): + """Test suite for the tracing of pylibmc with the default lib patching""" + + def setUp(self): + super(TestPylibmcPatchDefault, self).setUp() + patch() + + def tearDown(self): + unpatch() + super(TestPylibmcPatchDefault, self).tearDown() + + def get_client(self): + url = '%s:%s' % (cfg['host'], cfg['port']) + client = pylibmc.Client([url]) + client.flush_all() + + Pin.get_from(client).clone(tracer=self.tracer).onto(client) + + return client, self.tracer + + +class TestPylibmcPatch(TestPylibmcPatchDefault): + """Test suite for the tracing of pylibmc with a configured lib patching""" + + TEST_SERVICE = 'mc-custom-patch' + + def get_client(self): + client, tracer = TestPylibmcPatchDefault.get_client(self) + + Pin.get_from(client).clone(service=self.TEST_SERVICE).onto(client) + + return client, tracer + + def test_patch_unpatch(self): + url = '%s:%s' % (cfg['host'], cfg['port']) + + # Test patch idempotence + patch() + patch() + + client = pylibmc.Client([url]) + Pin.get_from(client).clone( + service=self.TEST_SERVICE, + tracer=self.tracer).onto(client) + + client.set('a', 1) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + client = pylibmc.Client([url]) + client.set('a', 1) + + spans = self.tracer.writer.pop() + assert not spans, spans + + # Test patch again + patch() + + client = pylibmc.Client([url]) + Pin(service=self.TEST_SERVICE, tracer=self.tracer).onto(client) + client.set('a', 1) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 diff --git a/tests/contrib/pylons/__init__.py b/tests/contrib/pylons/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pylons/app/__init__.py b/tests/contrib/pylons/app/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pylons/app/controllers/__init__.py b/tests/contrib/pylons/app/controllers/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pylons/app/controllers/root.py b/tests/contrib/pylons/app/controllers/root.py new file mode 100644 index 0000000000..f0fb71c24f --- /dev/null +++ b/tests/contrib/pylons/app/controllers/root.py @@ -0,0 +1,44 @@ +from pylons.controllers import WSGIController + +from ..lib.helpers import ExceptionWithCodeMethod, get_render_fn + + +class BaseController(WSGIController): + + def __call__(self, environ, start_response): + """Invoke the Controller""" + # WSGIController.__call__ dispatches to the Controller method + # the request is routed to. This routing information is + # available in environ['pylons.routes_dict'] + return WSGIController.__call__(self, environ, start_response) + + +class RootController(BaseController): + """Controller used for most tests""" + + def index(self): + return 'Hello World' + + def raise_exception(self): + raise Exception('Ouch!') + + def raise_wrong_code(self): + e = Exception('Ouch!') + e.code = 'wrong formatted code' + raise e + + def raise_code_method(self): + raise ExceptionWithCodeMethod('Ouch!') + + def raise_custom_code(self): + e = Exception('Ouch!') + e.code = '512' + raise e + + def render(self): + render = get_render_fn() + return render('/template.mako') + + def render_exception(self): + render = get_render_fn() + return render('/exception.mako') diff --git a/tests/contrib/pylons/app/lib/__init__.py b/tests/contrib/pylons/app/lib/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pylons/app/lib/base.py b/tests/contrib/pylons/app/lib/base.py new file mode 100644 index 0000000000..0e9d09cfd0 --- /dev/null +++ b/tests/contrib/pylons/app/lib/base.py @@ -0,0 +1 @@ +# this file is required when Pylons calls the `legacy` module diff --git a/tests/contrib/pylons/app/lib/helpers.py b/tests/contrib/pylons/app/lib/helpers.py new file mode 100644 index 0000000000..769c2be942 --- /dev/null +++ b/tests/contrib/pylons/app/lib/helpers.py @@ -0,0 +1,29 @@ +from webhelpers import * # noqa + + +class ExceptionWithCodeMethod(Exception): + """Use case where the status code is defined by + the `code()` method. + """ + def __init__(self, message): + super(ExceptionWithCodeMethod, self).__init__(message) + + def code(): + pass + + +class AppGlobals(object): + """Object used to store application globals.""" + pass + + +def get_render_fn(): + """Re-import the function everytime so that double-patching + is correctly tested. + """ + try: + from pylons.templating import render_mako as render + except ImportError: + from pylons.templating import render + + return render diff --git a/tests/contrib/pylons/app/middleware.py b/tests/contrib/pylons/app/middleware.py new file mode 100644 index 0000000000..13fd8c6fbb --- /dev/null +++ b/tests/contrib/pylons/app/middleware.py @@ -0,0 +1,43 @@ +from webob import Request, Response + + +class ExceptionMiddleware(object): + """A middleware which raises an exception.""" + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + raise Exception('Middleware exception') + + +class ExceptionToSuccessMiddleware(object): + """A middleware which catches any exceptions that occur in a later + middleware and returns a successful request. + """ + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + req = Request(environ) + try: + response = req.get_response(self.app) + except Exception: + response = Response() + response.status_int = 200 + response.body = 'An error has been handled appropriately' + return response(environ, start_response) + + +class ExceptionToClientErrorMiddleware(object): + def __init__(self, app): + self.app = app + + def __call__(self, environ, start_response): + req = Request(environ) + try: + response = req.get_response(self.app) + except Exception: + response = Response() + response.status_int = 404 + response.body = 'An error has occured with proper client error handling' + return response(environ, start_response) diff --git a/tests/contrib/pylons/app/router.py b/tests/contrib/pylons/app/router.py new file mode 100644 index 0000000000..54ebd7e9ed --- /dev/null +++ b/tests/contrib/pylons/app/router.py @@ -0,0 +1,20 @@ +import os + +from routes import Mapper + + +def create_routes(): + """Change this function if you need to add more routes + to your Pylons test app. + """ + app_dir = os.path.dirname(os.path.abspath(__file__)) + controller_dir = os.path.join(app_dir, 'controllers') + routes = Mapper(directory=controller_dir) + routes.connect('/', controller='root', action='index') + routes.connect('/raise_exception', controller='root', action='raise_exception') + routes.connect('/raise_wrong_code', controller='root', action='raise_wrong_code') + routes.connect('/raise_custom_code', controller='root', action='raise_custom_code') + routes.connect('/raise_code_method', controller='root', action='raise_code_method') + routes.connect('/render', controller='root', action='render') + routes.connect('/render_exception', controller='root', action='render_exception') + return routes diff --git a/tests/contrib/pylons/app/templates/exception.mako b/tests/contrib/pylons/app/templates/exception.mako new file mode 100644 index 0000000000..370df1da38 --- /dev/null +++ b/tests/contrib/pylons/app/templates/exception.mako @@ -0,0 +1 @@ +${1/0} diff --git a/tests/contrib/pylons/app/templates/template.mako b/tests/contrib/pylons/app/templates/template.mako new file mode 100644 index 0000000000..cd0875583a --- /dev/null +++ b/tests/contrib/pylons/app/templates/template.mako @@ -0,0 +1 @@ +Hello world! diff --git a/tests/contrib/pylons/app/web.py b/tests/contrib/pylons/app/web.py new file mode 100644 index 0000000000..5e98f10ffd --- /dev/null +++ b/tests/contrib/pylons/app/web.py @@ -0,0 +1,41 @@ +import os + +from mako.lookup import TemplateLookup + +from pylons import config +from pylons.wsgiapp import PylonsApp + +from routes.middleware import RoutesMiddleware +from beaker.middleware import SessionMiddleware, CacheMiddleware + +from paste.registry import RegistryManager + +from .router import create_routes +from .lib.helpers import AppGlobals + + +def make_app(global_conf, full_stack=True, **app_conf): + # load Pylons environment + root = os.path.dirname(os.path.abspath(__file__)) + paths = dict( + templates=[os.path.join(root, 'templates')], + ) + config.init_app(global_conf, app_conf, paths=paths) + config['pylons.package'] = 'tests.contrib.pylons.app' + config['pylons.app_globals'] = AppGlobals() + + # set Pylons routes + config['routes.map'] = create_routes() + + # Create the Mako TemplateLookup, with the default auto-escaping + config['pylons.app_globals'].mako_lookup = TemplateLookup( + directories=paths['templates'], + ) + + # define a default middleware stack + app = PylonsApp() + app = RoutesMiddleware(app, config['routes.map']) + app = SessionMiddleware(app, config) + app = CacheMiddleware(app, config) + app = RegistryManager(app) + return app diff --git a/tests/contrib/pylons/test.ini b/tests/contrib/pylons/test.ini new file mode 100644 index 0000000000..ea5a165ce5 --- /dev/null +++ b/tests/contrib/pylons/test.ini @@ -0,0 +1,9 @@ +[DEFAULT] +debug = false + +[app:main] +use = call:tests.contrib.pylons.app.web:make_app +full_stack = true +cache_dir = %(here)s/.cache +beaker.session.key = helloworld +beaker.session.secret = somesecret diff --git a/tests/contrib/pylons/test_pylons.py b/tests/contrib/pylons/test_pylons.py new file mode 100644 index 0000000000..3bf903e01a --- /dev/null +++ b/tests/contrib/pylons/test_pylons.py @@ -0,0 +1,429 @@ +import os + +from routes import url_for +from paste import fixture +from paste.deploy import loadapp +import pytest + +from ddtrace import config +from ddtrace.ext import http, errors +from ddtrace.constants import SAMPLING_PRIORITY_KEY, ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.pylons import PylonsTraceMiddleware + +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + + +class PylonsTestCase(BaseTracerTestCase): + """Pylons Test Controller that is used to test specific + cases defined in the Pylons controller. To test a new behavior, + add a new action in the `app.controllers.root` module. + """ + conf_dir = os.path.dirname(os.path.abspath(__file__)) + + def setUp(self): + super(PylonsTestCase, self).setUp() + # initialize a real traced Pylons app + wsgiapp = loadapp('config:test.ini', relative_to=PylonsTestCase.conf_dir) + self._wsgiapp = wsgiapp + app = PylonsTraceMiddleware(wsgiapp, self.tracer, service='web') + self.app = fixture.TestApp(app) + + def test_controller_exception(self): + """Ensure exceptions thrown in controllers can be handled. + + No error tags should be set in the span. + """ + from .app.middleware import ExceptionToSuccessMiddleware + wsgiapp = ExceptionToSuccessMiddleware(self._wsgiapp) + app = PylonsTraceMiddleware(wsgiapp, self.tracer, service='web') + + app = fixture.TestApp(app) + app.get(url_for(controller='root', action='raise_exception')) + + spans = self.tracer.writer.pop() + + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.raise_exception' + assert span.error == 0 + assert span.get_tag(http.URL) == 'http://localhost:80/raise_exception' + assert_span_http_status_code(span, 200) + assert http.QUERY_STRING not in span.meta + assert span.get_tag(errors.ERROR_MSG) is None + assert span.get_tag(errors.ERROR_TYPE) is None + assert span.get_tag(errors.ERROR_STACK) is None + assert span.span_type == 'web' + + def test_mw_exc_success(self): + """Ensure exceptions can be properly handled by other middleware. + + No error should be reported in the span. + """ + from .app.middleware import ExceptionMiddleware, ExceptionToSuccessMiddleware + wsgiapp = ExceptionMiddleware(self._wsgiapp) + wsgiapp = ExceptionToSuccessMiddleware(wsgiapp) + app = PylonsTraceMiddleware(wsgiapp, self.tracer, service='web') + app = fixture.TestApp(app) + + app.get(url_for(controller='root', action='index')) + + spans = self.tracer.writer.pop() + + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'None.None' + assert span.error == 0 + assert span.get_tag(http.URL) == 'http://localhost:80/' + assert_span_http_status_code(span, 200) + assert span.get_tag(errors.ERROR_MSG) is None + assert span.get_tag(errors.ERROR_TYPE) is None + assert span.get_tag(errors.ERROR_STACK) is None + + def test_middleware_exception(self): + """Ensure exceptions raised in middleware are properly handled. + + Uncaught exceptions should result in error tagged spans. + """ + from .app.middleware import ExceptionMiddleware + wsgiapp = ExceptionMiddleware(self._wsgiapp) + app = PylonsTraceMiddleware(wsgiapp, self.tracer, service='web') + app = fixture.TestApp(app) + + with pytest.raises(Exception): + app.get(url_for(controller='root', action='index')) + + spans = self.tracer.writer.pop() + + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'None.None' + assert span.error == 1 + assert span.get_tag(http.URL) == 'http://localhost:80/' + assert_span_http_status_code(span, 500) + assert span.get_tag(errors.ERROR_MSG) == 'Middleware exception' + assert span.get_tag(errors.ERROR_TYPE) == 'exceptions.Exception' + assert span.get_tag(errors.ERROR_STACK) + + def test_exc_success(self): + from .app.middleware import ExceptionToSuccessMiddleware + wsgiapp = ExceptionToSuccessMiddleware(self._wsgiapp) + app = PylonsTraceMiddleware(wsgiapp, self.tracer, service='web') + app = fixture.TestApp(app) + + app.get(url_for(controller='root', action='raise_exception')) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.raise_exception' + assert span.error == 0 + assert span.get_tag(http.URL) == 'http://localhost:80/raise_exception' + assert_span_http_status_code(span, 200) + assert span.get_tag(errors.ERROR_MSG) is None + assert span.get_tag(errors.ERROR_TYPE) is None + assert span.get_tag(errors.ERROR_STACK) is None + + def test_exc_client_failure(self): + from .app.middleware import ExceptionToClientErrorMiddleware + wsgiapp = ExceptionToClientErrorMiddleware(self._wsgiapp) + app = PylonsTraceMiddleware(wsgiapp, self.tracer, service='web') + app = fixture.TestApp(app) + + app.get(url_for(controller='root', action='raise_exception'), status=404) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.raise_exception' + assert span.error == 0 + assert span.get_tag(http.URL) == 'http://localhost:80/raise_exception' + assert_span_http_status_code(span, 404) + assert span.get_tag(errors.ERROR_MSG) is None + assert span.get_tag(errors.ERROR_TYPE) is None + assert span.get_tag(errors.ERROR_STACK) is None + + def test_success_200(self, query_string=''): + if query_string: + fqs = '?' + query_string + else: + fqs = '' + res = self.app.get(url_for(controller='root', action='index') + fqs) + assert res.status == 200 + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.index' + assert_span_http_status_code(span, 200) + if config.pylons.trace_query_string: + assert span.meta.get(http.QUERY_STRING) == query_string + else: + assert http.QUERY_STRING not in span.meta + assert span.error == 0 + + def test_query_string(self): + return self.test_success_200('foo=bar') + + def test_multi_query_string(self): + return self.test_success_200('foo=bar&foo=baz&x=y') + + def test_query_string_trace(self): + with self.override_http_config('pylons', dict(trace_query_string=True)): + return self.test_success_200('foo=bar') + + def test_multi_query_string_trace(self): + with self.override_http_config('pylons', dict(trace_query_string=True)): + return self.test_success_200('foo=bar&foo=baz&x=y') + + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + res = self.app.get(url_for(controller='root', action='index')) + self.assertEqual(res.status, 200) + + self.assert_structure( + dict(name='pylons.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}) + ) + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + with self.override_config('pylons', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + res = self.app.get(url_for(controller='root', action='index')) + self.assertEqual(res.status, 200) + + self.assert_structure( + dict(name='pylons.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}) + ) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + res = self.app.get(url_for(controller='root', action='index')) + self.assertEqual(res.status, 200) + + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + with self.override_config('pylons', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + res = self.app.get(url_for(controller='root', action='index')) + self.assertEqual(res.status, 200) + + self.assert_structure( + dict(name='pylons.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}) + ) + + def test_template_render(self): + res = self.app.get(url_for(controller='root', action='render')) + assert res.status == 200 + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 2 + request = spans[0] + template = spans[1] + + assert request.service == 'web' + assert request.resource == 'root.render' + assert_span_http_status_code(request, 200) + assert request.error == 0 + + assert template.service == 'web' + assert template.resource == 'pylons.render' + assert template.meta.get('template.name') == '/template.mako' + assert template.error == 0 + + def test_template_render_exception(self): + with pytest.raises(Exception): + self.app.get(url_for(controller='root', action='render_exception')) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 2 + request = spans[0] + template = spans[1] + + assert request.service == 'web' + assert request.resource == 'root.render_exception' + assert_span_http_status_code(request, 500) + assert request.error == 1 + + assert template.service == 'web' + assert template.resource == 'pylons.render' + assert template.meta.get('template.name') == '/exception.mako' + assert template.error == 1 + assert template.get_tag('error.msg') == 'integer division or modulo by zero' + assert 'ZeroDivisionError: integer division or modulo by zero' in template.get_tag('error.stack') + + def test_failure_500(self): + with pytest.raises(Exception): + self.app.get(url_for(controller='root', action='raise_exception')) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.raise_exception' + assert span.error == 1 + assert_span_http_status_code(span, 500) + assert span.get_tag('error.msg') == 'Ouch!' + assert span.get_tag(http.URL) == 'http://localhost:80/raise_exception' + assert 'Exception: Ouch!' in span.get_tag('error.stack') + + def test_failure_500_with_wrong_code(self): + with pytest.raises(Exception): + self.app.get(url_for(controller='root', action='raise_wrong_code')) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.raise_wrong_code' + assert span.error == 1 + assert_span_http_status_code(span, 500) + assert span.meta.get(http.URL) == 'http://localhost:80/raise_wrong_code' + assert span.get_tag('error.msg') == 'Ouch!' + assert 'Exception: Ouch!' in span.get_tag('error.stack') + + def test_failure_500_with_custom_code(self): + with pytest.raises(Exception): + self.app.get(url_for(controller='root', action='raise_custom_code')) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.raise_custom_code' + assert span.error == 1 + assert_span_http_status_code(span, 512) + assert span.meta.get(http.URL) == 'http://localhost:80/raise_custom_code' + assert span.get_tag('error.msg') == 'Ouch!' + assert 'Exception: Ouch!' in span.get_tag('error.stack') + + def test_failure_500_with_code_method(self): + with pytest.raises(Exception): + self.app.get(url_for(controller='root', action='raise_code_method')) + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.service == 'web' + assert span.resource == 'root.raise_code_method' + assert span.error == 1 + assert_span_http_status_code(span, 500) + assert span.meta.get(http.URL) == 'http://localhost:80/raise_code_method' + assert span.get_tag('error.msg') == 'Ouch!' + + def test_distributed_tracing_default(self): + # ensure by default, distributed tracing is not enabled + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '2', + } + res = self.app.get(url_for(controller='root', action='index'), headers=headers) + assert res.status == 200 + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.trace_id == 100 + assert span.parent_id == 42 + assert span.get_metric(SAMPLING_PRIORITY_KEY) == 2 + + def test_distributed_tracing_disabled(self): + # ensure distributed tracing propagator is working + middleware = self.app.app + middleware._distributed_tracing = False + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '2', + } + + res = self.app.get(url_for(controller='root', action='index'), headers=headers) + assert res.status == 200 + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 1 + span = spans[0] + + assert span.trace_id != 100 + assert span.parent_id != 42 + assert span.get_metric(SAMPLING_PRIORITY_KEY) != 2 + + def test_success_200_ot(self): + """OpenTracing version of test_success_200.""" + ot_tracer = init_tracer('pylons_svc', self.tracer) + + with ot_tracer.start_active_span('pylons_get'): + res = self.app.get(url_for(controller='root', action='index')) + assert res.status == 200 + + spans = self.tracer.writer.pop() + assert spans, spans + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'pylons_get' + assert ot_span.service == 'pylons_svc' + + assert dd_span.service == 'web' + assert dd_span.resource == 'root.index' + assert_span_http_status_code(dd_span, 200) + assert dd_span.meta.get(http.URL) == 'http://localhost:80/' + assert dd_span.error == 0 diff --git a/tests/contrib/pymemcache/__init__.py b/tests/contrib/pymemcache/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pymemcache/autopatch/__init__.py b/tests/contrib/pymemcache/autopatch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pymemcache/autopatch/test.py b/tests/contrib/pymemcache/autopatch/test.py new file mode 100644 index 0000000000..68f55f3069 --- /dev/null +++ b/tests/contrib/pymemcache/autopatch/test.py @@ -0,0 +1,23 @@ +import pymemcache +import unittest +from ddtrace.vendor import wrapt + + +class AutoPatchTestCase(unittest.TestCase): + """Test ensuring that ddtrace-run patches pymemcache. + + This ensures that things like the patch functions are properly exported + from the module and used to patch the library. + + Note: you may get cryptic errors due to ddtrace-run failing, such as + + Traceback (most recent call last): + File ".../dev/dd-trace-py/tests/contrib/pymemcache/test_autopatch.py", line 8, in test_patch + assert issubclass(pymemcache.client.base.Client, wrapt.ObjectProxy) + AttributeError: 'module' object has no attribute 'client' + + this is indicitive of the patch function not being exported by the module. + """ + + def test_patch(self): + assert issubclass(pymemcache.client.base.Client, wrapt.ObjectProxy) diff --git a/tests/contrib/pymemcache/test_client.py b/tests/contrib/pymemcache/test_client.py new file mode 100644 index 0000000000..72467e4743 --- /dev/null +++ b/tests/contrib/pymemcache/test_client.py @@ -0,0 +1,321 @@ +# 3p +import pymemcache +from pymemcache.exceptions import ( + MemcacheClientError, + MemcacheServerError, + MemcacheUnknownCommandError, + MemcacheUnknownError, + MemcacheIllegalInputError, +) +import pytest +import unittest +from ddtrace.vendor import wrapt + +# project +from ddtrace import Pin +from ddtrace.contrib.pymemcache.patch import patch, unpatch +from .utils import MockSocket, _str +from .test_client_mixin import PymemcacheClientTestCaseMixin, TEST_HOST, TEST_PORT + +from tests.test_tracer import get_dummy_tracer + + +_Client = pymemcache.client.base.Client + + +class PymemcacheClientTestCase(PymemcacheClientTestCaseMixin): + """ Tests for a patched pymemcache.client.base.Client. """ + + def test_patch(self): + assert issubclass(pymemcache.client.base.Client, wrapt.ObjectProxy) + client = self.make_client([]) + self.assertIsInstance(client, wrapt.ObjectProxy) + + def test_unpatch(self): + unpatch() + from pymemcache.client.base import Client + + self.assertEqual(Client, _Client) + + def test_set_get(self): + client = self.make_client([b'STORED\r\n', b'VALUE key 0 5\r\nvalue\r\nEND\r\n']) + client.set(b'key', b'value', noreply=False) + result = client.get(b'key') + assert _str(result) == 'value' + + self.check_spans(2, ['set', 'get'], ['set key', 'get key']) + + def test_append_stored(self): + client = self.make_client([b'STORED\r\n']) + result = client.append(b'key', b'value', noreply=False) + assert result is True + + self.check_spans(1, ['append'], ['append key']) + + def test_prepend_stored(self): + client = self.make_client([b'STORED\r\n']) + result = client.prepend(b'key', b'value', noreply=False) + assert result is True + + self.check_spans(1, ['prepend'], ['prepend key']) + + def test_cas_stored(self): + client = self.make_client([b'STORED\r\n']) + result = client.cas(b'key', b'value', b'cas', noreply=False) + assert result is True + + self.check_spans(1, ['cas'], ['cas key']) + + def test_cas_exists(self): + client = self.make_client([b'EXISTS\r\n']) + result = client.cas(b'key', b'value', b'cas', noreply=False) + assert result is False + + self.check_spans(1, ['cas'], ['cas key']) + + def test_cas_not_found(self): + client = self.make_client([b'NOT_FOUND\r\n']) + result = client.cas(b'key', b'value', b'cas', noreply=False) + assert result is None + + self.check_spans(1, ['cas'], ['cas key']) + + def test_delete_exception(self): + client = self.make_client([Exception('fail')]) + + def _delete(): + client.delete(b'key', noreply=False) + + pytest.raises(Exception, _delete) + + spans = self.check_spans(1, ['delete'], ['delete key']) + self.assertEqual(spans[0].error, 1) + + def test_flush_all(self): + client = self.make_client([b'OK\r\n']) + result = client.flush_all(noreply=False) + assert result is True + + self.check_spans(1, ['flush_all'], ['flush_all']) + + def test_incr_exception(self): + client = self.make_client([Exception('fail')]) + + def _incr(): + client.incr(b'key', 1) + + pytest.raises(Exception, _incr) + + spans = self.check_spans(1, ['incr'], ['incr key']) + self.assertEqual(spans[0].error, 1) + + def test_get_error(self): + client = self.make_client([b'ERROR\r\n']) + + def _get(): + client.get(b'key') + + pytest.raises(MemcacheUnknownCommandError, _get) + + spans = self.check_spans(1, ['get'], ['get key']) + self.assertEqual(spans[0].error, 1) + + def test_get_unknown_error(self): + client = self.make_client([b'foobarbaz\r\n']) + + def _get(): + client.get(b'key') + + pytest.raises(MemcacheUnknownError, _get) + + self.check_spans(1, ['get'], ['get key']) + + def test_gets_found(self): + client = self.make_client([b'VALUE key 0 5 10\r\nvalue\r\nEND\r\n']) + result = client.gets(b'key') + assert result == (b'value', b'10') + + self.check_spans(1, ['gets'], ['gets key']) + + def test_touch_not_found(self): + client = self.make_client([b'NOT_FOUND\r\n']) + result = client.touch(b'key', noreply=False) + assert result is False + + self.check_spans(1, ['touch'], ['touch key']) + + def test_set_client_error(self): + client = self.make_client([b'CLIENT_ERROR some message\r\n']) + + def _set(): + client.set('key', 'value', noreply=False) + + pytest.raises(MemcacheClientError, _set) + + spans = self.check_spans(1, ['set'], ['set key']) + self.assertEqual(spans[0].error, 1) + + def test_set_server_error(self): + client = self.make_client([b'SERVER_ERROR some message\r\n']) + + def _set(): + client.set(b'key', b'value', noreply=False) + + pytest.raises(MemcacheServerError, _set) + + spans = self.check_spans(1, ['set'], ['set key']) + self.assertEqual(spans[0].error, 1) + + def test_set_key_with_space(self): + client = self.make_client([b'']) + + def _set(): + client.set(b'key has space', b'value', noreply=False) + + pytest.raises(MemcacheIllegalInputError, _set) + + spans = self.check_spans(1, ['set'], ['set key has space']) + self.assertEqual(spans[0].error, 1) + + def test_quit(self): + client = self.make_client([]) + result = client.quit() + assert result is None + + self.check_spans(1, ['quit'], ['quit']) + + def test_replace_not_stored(self): + client = self.make_client([b'NOT_STORED\r\n']) + result = client.replace(b'key', b'value', noreply=False) + assert result is False + + self.check_spans(1, ['replace'], ['replace key']) + + def test_version_success(self): + client = self.make_client([b'VERSION 1.2.3\r\n'], default_noreply=False) + result = client.version() + assert result == b'1.2.3' + + self.check_spans(1, ['version'], ['version']) + + def test_stats(self): + client = self.make_client([b'STAT fake_stats 1\r\n', b'END\r\n']) + result = client.stats() + assert client.sock.send_bufs == [b'stats \r\n'] + assert result == {b'fake_stats': 1} + + self.check_spans(1, ['stats'], ['stats']) + + def test_service_name_override(self): + client = self.make_client([b'STORED\r\n', b'VALUE key 0 5\r\nvalue\r\nEND\r\n']) + Pin.override(client, service='testsvcname') + client.set(b'key', b'value', noreply=False) + result = client.get(b'key') + assert _str(result) == 'value' + + spans = self.get_spans() + self.assertEqual(spans[0].service, 'testsvcname') + self.assertEqual(spans[1].service, 'testsvcname') + + +class PymemcacheHashClientTestCase(PymemcacheClientTestCaseMixin): + """ Tests for a patched pymemcache.client.hash.HashClient. """ + + def get_spans(self): + spans = [] + for _, client in self.client.clients.items(): + pin = Pin.get_from(client) + tracer = pin.tracer + spans.extend(tracer.writer.pop()) + return spans + + def make_client_pool(self, hostname, mock_socket_values, serializer=None, **kwargs): + mock_client = pymemcache.client.base.Client( + hostname, serializer=serializer, **kwargs + ) + tracer = get_dummy_tracer() + Pin.override(mock_client, tracer=tracer) + + mock_client.sock = MockSocket(mock_socket_values) + client = pymemcache.client.base.PooledClient(hostname, serializer=serializer) + client.client_pool = pymemcache.pool.ObjectPool(lambda: mock_client) + return mock_client + + def make_client(self, *mock_socket_values, **kwargs): + current_port = TEST_PORT + from pymemcache.client.hash import HashClient + + self.client = HashClient([], **kwargs) + ip = TEST_HOST + + for vals in mock_socket_values: + s = '{}:{}'.format(ip, current_port) + c = self.make_client_pool((ip, current_port), vals, **kwargs) + self.client.clients[s] = c + self.client.hasher.add_node(s) + current_port += 1 + return self.client + + def test_delete_many_found(self): + """ + delete_many internally calls client.delete so we should expect to get + delete for our span resource. + + for base.Clients self.delete() is called which by-passes our tracing + on delete() + """ + client = self.make_client([b'STORED\r', b'\n', b'DELETED\r\n']) + result = client.add(b'key', b'value', noreply=False) + result = client.delete_many([b'key'], noreply=False) + assert result is True + + self.check_spans(2, ['add', 'delete'], ['add key', 'delete key']) + + +class PymemcacheClientConfiguration(unittest.TestCase): + """Ensure that pymemache can be configured properly.""" + + def setUp(self): + patch() + + def tearDown(self): + unpatch() + + def make_client(self, mock_socket_values, **kwargs): + tracer = get_dummy_tracer() + Pin.override(pymemcache, tracer=tracer) + self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs) + self.client.sock = MockSocket(list(mock_socket_values)) + return self.client + + def test_same_tracer(self): + """Ensure same tracer reference is used by the pin on pymemache and + Clients. + """ + client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT)) + self.assertEqual(Pin.get_from(client).tracer, Pin.get_from(pymemcache).tracer) + + def test_override_parent_pin(self): + """Test that the service set on `pymemcache` is used for Clients.""" + Pin.override(pymemcache, service='mysvc') + client = self.make_client([b'STORED\r\n', b'VALUE key 0 5\r\nvalue\r\nEND\r\n']) + client.set(b'key', b'value', noreply=False) + + pin = Pin.get_from(pymemcache) + tracer = pin.tracer + spans = tracer.writer.pop() + + self.assertEqual(spans[0].service, 'mysvc') + + def test_override_client_pin(self): + """Test that the service set on `pymemcache` is used for Clients.""" + client = self.make_client([b'STORED\r\n', b'VALUE key 0 5\r\nvalue\r\nEND\r\n']) + Pin.override(client, service='mysvc2') + + client.set(b'key', b'value', noreply=False) + + pin = Pin.get_from(pymemcache) + tracer = pin.tracer + spans = tracer.writer.pop() + + self.assertEqual(spans[0].service, 'mysvc2') diff --git a/tests/contrib/pymemcache/test_client_mixin.py b/tests/contrib/pymemcache/test_client_mixin.py new file mode 100644 index 0000000000..edd9fe292a --- /dev/null +++ b/tests/contrib/pymemcache/test_client_mixin.py @@ -0,0 +1,175 @@ +# 3p +import unittest +import pymemcache + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.pymemcache.patch import patch, unpatch +from ddtrace.ext import memcached as memcachedx, net +from .utils import MockSocket + +from tests.test_tracer import get_dummy_tracer +from ...base import override_config + + +_Client = pymemcache.client.base.Client + +TEST_HOST = 'localhost' +TEST_PORT = 117711 + + +class PymemcacheClientTestCaseMixin(unittest.TestCase): + """ Tests for a patched pymemcache.client.base.Client. """ + + def get_spans(self): + pin = Pin.get_from(self.client) + tracer = pin.tracer + spans = tracer.writer.pop() + return spans + + def check_spans(self, num_expected, resources_expected, queries_expected): + """A helper for validating basic span information.""" + spans = self.get_spans() + self.assertEqual(num_expected, len(spans)) + + for span, resource, query in zip(spans, resources_expected, queries_expected): + self.assertEqual(span.get_tag(net.TARGET_HOST), TEST_HOST) + self.assertEqual(span.get_metric(net.TARGET_PORT), TEST_PORT) + self.assertEqual(span.name, memcachedx.CMD) + self.assertEqual(span.span_type, 'cache') + self.assertEqual(span.service, memcachedx.SERVICE) + self.assertEqual(span.get_tag(memcachedx.QUERY), query) + self.assertEqual(span.resource, resource) + + return spans + + def setUp(self): + patch() + + def tearDown(self): + unpatch() + + def make_client(self, mock_socket_values, **kwargs): + tracer = get_dummy_tracer() + Pin.override(pymemcache, tracer=tracer) + self.client = pymemcache.client.base.Client((TEST_HOST, TEST_PORT), **kwargs) + self.client.sock = MockSocket(list(mock_socket_values)) + return self.client + + def test_set_success(self): + client = self.make_client([b'STORED\r\n']) + result = client.set(b'key', b'value', noreply=False) + assert result is True + + self.check_spans(1, ['set'], ['set key']) + + def test_get_many_none_found(self): + client = self.make_client([b'END\r\n']) + result = client.get_many([b'key1', b'key2']) + assert result == {} + + self.check_spans(1, ['get_many'], ['get_many key1 key2']) + + def test_get_multi_none_found(self): + client = self.make_client([b'END\r\n']) + result = client.get_multi([b'key1', b'key2']) + assert result == {} + + self.check_spans(1, ['get_many'], ['get_many key1 key2']) + + def test_delete_not_found(self): + client = self.make_client([b'NOT_FOUND\r\n']) + result = client.delete(b'key', noreply=False) + assert result is False + + self.check_spans(1, ['delete'], ['delete key']) + + def test_incr_found(self): + client = self.make_client([b'STORED\r\n', b'1\r\n']) + client.set(b'key', 0, noreply=False) + result = client.incr(b'key', 1, noreply=False) + assert result == 1 + + self.check_spans(2, ['set', 'incr'], ['set key', 'incr key']) + + def test_get_found(self): + client = self.make_client([b'STORED\r\n', b'VALUE key 0 5\r\nvalue\r\nEND\r\n']) + result = client.set(b'key', b'value', noreply=False) + result = client.get(b'key') + assert result == b'value' + + self.check_spans(2, ['set', 'get'], ['set key', 'get key']) + + def test_decr_found(self): + client = self.make_client([b'STORED\r\n', b'1\r\n']) + client.set(b'key', 2, noreply=False) + result = client.decr(b'key', 1, noreply=False) + assert result == 1 + + self.check_spans(2, ['set', 'decr'], ['set key', 'decr key']) + + def test_add_stored(self): + client = self.make_client([b'STORED\r', b'\n']) + result = client.add(b'key', b'value', noreply=False) + assert result is True + + self.check_spans(1, ['add'], ['add key']) + + def test_delete_many_found(self): + client = self.make_client([b'STORED\r', b'\n', b'DELETED\r\n']) + result = client.add(b'key', b'value', noreply=False) + result = client.delete_many([b'key'], noreply=False) + assert result is True + + self.check_spans(2, ['add', 'delete_many'], ['add key', 'delete_many key']) + + def test_set_many_success(self): + client = self.make_client([b'STORED\r\n']) + result = client.set_many({b'key': b'value'}, noreply=False) + assert result is True + + self.check_spans(1, ['set_many'], ['set_many key']) + + def test_set_multi_success(self): + # Should just map to set_many + client = self.make_client([b'STORED\r\n']) + result = client.set_multi({b'key': b'value'}, noreply=False) + assert result is True + + self.check_spans(1, ['set_many'], ['set_many key']) + + def test_analytics_default(self): + client = self.make_client([b'STORED\r\n']) + result = client.set(b'key', b'value', noreply=False) + assert result is True + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with override_config( + 'pymemcache', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + client = self.make_client([b'STORED\r\n']) + result = client.set(b'key', b'value', noreply=False) + assert result is True + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with override_config( + 'pymemcache', + dict(analytics_enabled=True) + ): + client = self.make_client([b'STORED\r\n']) + result = client.set(b'key', b'value', noreply=False) + assert result is True + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/contrib/pymemcache/utils.py b/tests/contrib/pymemcache/utils.py new file mode 100644 index 0000000000..c748ba2189 --- /dev/null +++ b/tests/contrib/pymemcache/utils.py @@ -0,0 +1,60 @@ +import collections +import socket + + +class MockSocket(object): + def __init__(self, recv_bufs, connect_failure=None): + self.recv_bufs = collections.deque(recv_bufs) + self.send_bufs = [] + self.closed = False + self.timeouts = [] + self.connect_failure = connect_failure + self.connections = [] + self.socket_options = [] + + def sendall(self, value): + self.send_bufs.append(value) + + def close(self): + self.closed = True + + def recv(self, size): + value = self.recv_bufs.popleft() + if isinstance(value, Exception): + raise value + return value + + def settimeout(self, timeout): + self.timeouts.append(timeout) + + def connect(self, server): + if isinstance(self.connect_failure, Exception): + raise self.connect_failure + self.connections.append(server) + + def setsockopt(self, level, option, value): + self.socket_options.append((level, option, value)) + + +class MockSocketModule(object): + def __init__(self, connect_failure=None): + self.connect_failure = connect_failure + self.sockets = [] + + def socket(self, family, type): # noqa: A002 + socket = MockSocket([], connect_failure=self.connect_failure) + self.sockets.append(socket) + return socket + + def __getattr__(self, name): + return getattr(socket, name) + + +# Compatibility to get a string back from a request +def _str(s): + if type(s) is str: + return s + elif type(s) is bytes: + return s.decode() + else: + return str(s) diff --git a/tests/contrib/pymongo/__init__.py b/tests/contrib/pymongo/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pymongo/test.py b/tests/contrib/pymongo/test.py new file mode 100644 index 0000000000..1e69ad1228 --- /dev/null +++ b/tests/contrib/pymongo/test.py @@ -0,0 +1,445 @@ +# stdlib +import time +import unittest + +# 3p +import pymongo + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.ext import mongo as mongox +from ddtrace.contrib.pymongo.client import trace_mongo_client, normalize_filter +from ddtrace.contrib.pymongo.patch import patch, unpatch + +# testing +from tests.opentracer.utils import init_tracer +from ..config import MONGO_CONFIG +from ...base import override_config +from ...test_tracer import get_dummy_tracer + + +def test_normalize_filter(): + # ensure we can properly normalize queries FIXME[matt] move to the agent + cases = [ + (None, {}), + ( + {'team': 'leafs'}, + {'team': '?'}, + ), + ( + {'age': {'$gt': 20}}, + {'age': {'$gt': '?'}}, + ), + ( + {'age': {'$gt': 20}}, + {'age': {'$gt': '?'}}, + ), + ( + {'_id': {'$in': [1, 2, 3]}}, + {'_id': {'$in': '?'}}, + ), + ( + {'_id': {'$nin': [1, 2, 3]}}, + {'_id': {'$nin': '?'}}, + ), + + ( + 20, + {}, + ), + ( + { + 'status': 'A', + '$or': [{'age': {'$lt': 30}}, {'type': 1}], + }, + { + 'status': '?', + '$or': [{'age': {'$lt': '?'}}, {'type': '?'}], + }, + ), + ] + for i, expected in cases: + out = normalize_filter(i) + assert expected == out + + +class PymongoCore(object): + """Test suite for pymongo + + Independant of the way it got instrumented. + TODO: merge to a single class when patching is the only way. + """ + + TEST_SERVICE = 'test-mongo' + + def get_tracer_and_client(service): + # implement me + pass + + def test_update(self): + # ensure we trace deletes + tracer, client = self.get_tracer_and_client() + writer = tracer.writer + db = client['testdb'] + db.drop_collection('songs') + input_songs = [ + {'name': 'Powderfinger', 'artist': 'Neil'}, + {'name': 'Harvest', 'artist': 'Neil'}, + {'name': 'Suzanne', 'artist': 'Leonard'}, + {'name': 'Partisan', 'artist': 'Leonard'}, + ] + db.songs.insert_many(input_songs) + + result = db.songs.update_many( + {'artist': 'Neil'}, + {'$set': {'artist': 'Shakey'}}, + ) + + assert result.matched_count == 2 + assert result.modified_count == 2 + + # ensure all is traced. + spans = writer.pop() + assert spans, spans + for span in spans: + # ensure all the of the common metadata is set + assert span.service == self.TEST_SERVICE + assert span.span_type == 'mongodb' + assert span.meta.get('mongodb.collection') == 'songs' + assert span.meta.get('mongodb.db') == 'testdb' + assert span.meta.get('out.host') + assert span.metrics.get('out.port') + + expected_resources = set([ + 'drop songs', + 'update songs {"artist": "?"}', + 'insert songs', + ]) + + assert expected_resources == {s.resource for s in spans} + + def test_delete(self): + # ensure we trace deletes + tracer, client = self.get_tracer_and_client() + writer = tracer.writer + db = client['testdb'] + collection_name = 'here.are.songs' + db.drop_collection(collection_name) + input_songs = [ + {'name': 'Powderfinger', 'artist': 'Neil'}, + {'name': 'Harvest', 'artist': 'Neil'}, + {'name': 'Suzanne', 'artist': 'Leonard'}, + {'name': 'Partisan', 'artist': 'Leonard'}, + ] + + songs = db[collection_name] + songs.insert_many(input_songs) + + # test delete one + af = {'artist': 'Neil'} + assert songs.count(af) == 2 + songs.delete_one(af) + assert songs.count(af) == 1 + + # test delete many + af = {'artist': 'Leonard'} + assert songs.count(af) == 2 + songs.delete_many(af) + assert songs.count(af) == 0 + + # ensure all is traced. + spans = writer.pop() + assert spans, spans + for span in spans: + # ensure all the of the common metadata is set + assert span.service == self.TEST_SERVICE + assert span.span_type == 'mongodb' + assert span.meta.get('mongodb.collection') == collection_name + assert span.meta.get('mongodb.db') == 'testdb' + assert span.meta.get('out.host') + assert span.metrics.get('out.port') + + expected_resources = [ + 'drop here.are.songs', + 'count here.are.songs', + 'count here.are.songs', + 'count here.are.songs', + 'count here.are.songs', + 'delete here.are.songs {"artist": "?"}', + 'delete here.are.songs {"artist": "?"}', + 'insert here.are.songs', + ] + + assert sorted(expected_resources) == sorted(s.resource for s in spans) + + def test_insert_find(self): + tracer, client = self.get_tracer_and_client() + writer = tracer.writer + + start = time.time() + db = client.testdb + db.drop_collection('teams') + teams = [ + { + 'name': 'Toronto Maple Leafs', + 'established': 1917, + }, + { + 'name': 'Montreal Canadiens', + 'established': 1910, + }, + { + 'name': 'New York Rangers', + 'established': 1926, + } + ] + + # create some data (exercising both ways of inserting) + + db.teams.insert_one(teams[0]) + db.teams.insert_many(teams[1:]) + + # wildcard query (using the [] syntax) + cursor = db['teams'].find() + count = 0 + for row in cursor: + count += 1 + assert count == len(teams) + + # scoped query (using the getattr syntax) + q = {'name': 'Toronto Maple Leafs'} + queried = list(db.teams.find(q)) + end = time.time() + assert len(queried) == 1 + assert queried[0]['name'] == 'Toronto Maple Leafs' + assert queried[0]['established'] == 1917 + + spans = writer.pop() + for span in spans: + # ensure all the of the common metadata is set + assert span.service == self.TEST_SERVICE + assert span.span_type == 'mongodb' + assert span.meta.get('mongodb.collection') == 'teams' + assert span.meta.get('mongodb.db') == 'testdb' + assert span.meta.get('out.host'), span.pprint() + assert span.metrics.get('out.port'), span.pprint() + assert span.start > start + assert span.duration < end - start + + expected_resources = [ + 'drop teams', + 'insert teams', + 'insert teams', + ] + + # query names should be used in >3.1 + name = 'find' if pymongo.version_tuple >= (3, 1, 0) else 'query' + + expected_resources.extend([ + '{} teams'.format(name), + '{} teams {{"name": "?"}}'.format(name), + ]) + + assert expected_resources == list(s.resource for s in spans) + + # confirm query tag for find all + assert spans[-2].get_tag('mongodb.query') is None + + # confirm query tag find with query criteria on name + assert spans[-1].get_tag('mongodb.query') == '{\'name\': \'?\'}' + + def test_update_ot(self): + """OpenTracing version of test_update.""" + tracer, client = self.get_tracer_and_client() + ot_tracer = init_tracer('mongo_svc', tracer) + + writer = tracer.writer + with ot_tracer.start_active_span('mongo_op'): + db = client['testdb'] + db.drop_collection('songs') + input_songs = [ + {'name': 'Powderfinger', 'artist': 'Neil'}, + {'name': 'Harvest', 'artist': 'Neil'}, + {'name': 'Suzanne', 'artist': 'Leonard'}, + {'name': 'Partisan', 'artist': 'Leonard'}, + ] + db.songs.insert_many(input_songs) + result = db.songs.update_many( + {'artist': 'Neil'}, + {'$set': {'artist': 'Shakey'}}, + ) + + assert result.matched_count == 2 + assert result.modified_count == 2 + + # ensure all is traced. + spans = writer.pop() + assert spans, spans + assert len(spans) == 4 + + ot_span = spans[0] + assert ot_span.parent_id is None + assert ot_span.name == 'mongo_op' + assert ot_span.service == 'mongo_svc' + + for span in spans[1:]: + # ensure the parenting + assert span.parent_id == ot_span.span_id + # ensure all the of the common metadata is set + assert span.service == self.TEST_SERVICE + assert span.span_type == 'mongodb' + assert span.meta.get('mongodb.collection') == 'songs' + assert span.meta.get('mongodb.db') == 'testdb' + assert span.meta.get('out.host') + assert span.metrics.get('out.port') + + expected_resources = set([ + 'drop songs', + 'update songs {"artist": "?"}', + 'insert songs', + ]) + + assert expected_resources == {s.resource for s in spans[1:]} + + def test_analytics_default(self): + tracer, client = self.get_tracer_and_client() + db = client['testdb'] + db.drop_collection('songs') + + spans = tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + + def test_analytics_with_rate(self): + with override_config( + 'pymongo', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + tracer, client = self.get_tracer_and_client() + db = client['testdb'] + db.drop_collection('songs') + + spans = tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5 + + def test_analytics_without_rate(self): + with override_config( + 'pymongo', + dict(analytics_enabled=True) + ): + tracer, client = self.get_tracer_and_client() + db = client['testdb'] + db.drop_collection('songs') + + spans = tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0 + + +class TestPymongoTraceClient(unittest.TestCase, PymongoCore): + """Test suite for pymongo with the legacy trace interface""" + + TEST_SERVICE = 'test-mongo-trace-client' + + def get_tracer_and_client(self): + tracer = get_dummy_tracer() + original_client = pymongo.MongoClient(port=MONGO_CONFIG['port']) + client = trace_mongo_client(original_client, tracer, service=self.TEST_SERVICE) + return tracer, client + + +class TestPymongoPatchDefault(unittest.TestCase, PymongoCore): + """Test suite for pymongo with the default patched library""" + + TEST_SERVICE = mongox.SERVICE + + def setUp(self): + patch() + + def tearDown(self): + unpatch() + + def get_tracer_and_client(self): + tracer = get_dummy_tracer() + client = pymongo.MongoClient(port=MONGO_CONFIG['port']) + Pin.get_from(client).clone(tracer=tracer).onto(client) + return tracer, client + + def test_service(self): + tracer, client = self.get_tracer_and_client() + writer = tracer.writer + db = client['testdb'] + db.drop_collection('songs') + + services = writer.pop_services() + assert services == {} + + def test_host_kwarg(self): + # simulate what celery and django do when instantiating a new client + conf = { + 'host': 'localhost', + } + client = pymongo.MongoClient(**conf) + + conf = { + 'host': None, + } + client = pymongo.MongoClient(**conf) + + assert client + + +class TestPymongoPatchConfigured(unittest.TestCase, PymongoCore): + """Test suite for pymongo with a configured patched library""" + + TEST_SERVICE = 'test-mongo-trace-client' + + def setUp(self): + patch() + + def tearDown(self): + unpatch() + + def get_tracer_and_client(self): + tracer = get_dummy_tracer() + client = pymongo.MongoClient(port=MONGO_CONFIG['port']) + Pin(service=self.TEST_SERVICE, tracer=tracer).onto(client) + return tracer, client + + def test_patch_unpatch(self): + tracer = get_dummy_tracer() + writer = tracer.writer + + # Test patch idempotence + patch() + patch() + + client = pymongo.MongoClient(port=MONGO_CONFIG['port']) + Pin.get_from(client).clone(tracer=tracer).onto(client) + client['testdb'].drop_collection('whatever') + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + client = pymongo.MongoClient(port=MONGO_CONFIG['port']) + client['testdb'].drop_collection('whatever') + + spans = writer.pop() + assert not spans, spans + + # Test patch again + patch() + + client = pymongo.MongoClient(port=MONGO_CONFIG['port']) + Pin.get_from(client).clone(tracer=tracer).onto(client) + client['testdb'].drop_collection('whatever') + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 diff --git a/tests/contrib/pymongo/test_spec.py b/tests/contrib/pymongo/test_spec.py new file mode 100644 index 0000000000..0709a64b5d --- /dev/null +++ b/tests/contrib/pymongo/test_spec.py @@ -0,0 +1,52 @@ +""" +tests for parsing specs. +""" + +from bson.son import SON + +from ddtrace.contrib.pymongo.parse import parse_spec + + +def test_empty(): + cmd = parse_spec(SON([])) + assert cmd is None + + +def test_create(): + cmd = parse_spec(SON([('create', 'foo')])) + assert cmd.name == 'create' + assert cmd.coll == 'foo' + assert cmd.tags == {} + assert cmd.metrics == {} + + +def test_insert(): + spec = SON([ + ('insert', 'bla'), + ('ordered', True), + ('documents', ['a', 'b']), + ]) + cmd = parse_spec(spec) + assert cmd.name == 'insert' + assert cmd.coll == 'bla' + assert cmd.tags == {'mongodb.ordered': True} + assert cmd.metrics == {'mongodb.documents': 2} + + +def test_update(): + spec = SON([ + ('update', u'songs'), + ('ordered', True), + ('updates', [ + SON([ + ('q', {'artist': 'Neil'}), + ('u', {'$set': {'artist': 'Shakey'}}), + ('multi', True), + ('upsert', False) + ]) + ]) + ]) + cmd = parse_spec(spec) + assert cmd.name == 'update' + assert cmd.coll == 'songs' + assert cmd.query == {'artist': 'Neil'} diff --git a/tests/contrib/pymysql/__init__.py b/tests/contrib/pymysql/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/pymysql/test_backwards_compatibility.py b/tests/contrib/pymysql/test_backwards_compatibility.py new file mode 100644 index 0000000000..46cc4cc293 --- /dev/null +++ b/tests/contrib/pymysql/test_backwards_compatibility.py @@ -0,0 +1,12 @@ +from ddtrace.contrib.pymysql import get_traced_pymysql_connection +from tests.test_tracer import get_dummy_tracer +from tests.contrib import config + + +def test_pre_v4(): + tracer = get_dummy_tracer() + MySQL = get_traced_pymysql_connection(tracer, service='my-mysql-server') + conn = MySQL(**config.MYSQL_CONFIG) + cursor = conn.cursor() + cursor.execute('SELECT 1') + assert cursor.fetchone()[0] == 1 diff --git a/tests/contrib/pymysql/test_pymysql.py b/tests/contrib/pymysql/test_pymysql.py new file mode 100644 index 0000000000..5951c33ce3 --- /dev/null +++ b/tests/contrib/pymysql/test_pymysql.py @@ -0,0 +1,428 @@ +# 3p +import pymysql + +# project +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.compat import PY2 +from ddtrace.compat import stringify +from ddtrace.contrib.pymysql.patch import patch, unpatch + +# testing +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase +from ...util import assert_dict_issuperset +from ...contrib.config import MYSQL_CONFIG + + +class PyMySQLCore(object): + """PyMySQL test case reuses the connection across tests""" + conn = None + TEST_SERVICE = 'test-pymysql' + + DB_INFO = { + 'out.host': MYSQL_CONFIG.get('host'), + } + if PY2: + DB_INFO.update({ + 'db.user': MYSQL_CONFIG.get('user'), + 'db.name': MYSQL_CONFIG.get('database') + }) + else: + DB_INFO.update({ + 'db.user': stringify(bytes(MYSQL_CONFIG.get('user'), encoding='utf-8')), + 'db.name': stringify(bytes(MYSQL_CONFIG.get('database'), encoding='utf-8')) + }) + + def setUp(self): + super(PyMySQLCore, self).setUp() + patch() + + def tearDown(self): + super(PyMySQLCore, self).tearDown() + if self.conn and not self.conn._closed: + self.conn.close() + unpatch() + + def _get_conn_tracer(self): + # implement me + pass + + def test_simple_query(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + + # PyMySQL returns back the rowcount instead of a cursor + rowcount = cursor.execute('SELECT 1') + assert rowcount == 1 + + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'pymysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == MYSQL_CONFIG.get('port') + meta = {} + meta.update(self.DB_INFO) + assert_dict_issuperset(span.meta, meta) + + def test_simple_query_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 2 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'pymysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == MYSQL_CONFIG.get('port') + meta = {} + meta.update(self.DB_INFO) + assert_dict_issuperset(span.meta, meta) + + fetch_span = spans[1] + assert fetch_span.name == 'pymysql.query.fetchall' + + def test_query_with_several_rows(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 3 + spans = writer.pop() + assert len(spans) == 1 + self.assertEqual(spans[0].name, 'pymysql.query') + + def test_query_with_several_rows_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + query = 'SELECT n FROM (SELECT 42 n UNION SELECT 421 UNION SELECT 4210) m' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 3 + spans = writer.pop() + assert len(spans) == 2 + + fetch_span = spans[1] + assert fetch_span.name == 'pymysql.query.fetchall' + + def test_query_many(self): + # tests that the executemany method is correctly wrapped. + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + tracer.enabled = False + cursor = conn.cursor() + + cursor.execute(""" + create table if not exists dummy ( + dummy_key VARCHAR(32) PRIMARY KEY, + dummy_value TEXT NOT NULL)""") + tracer.enabled = True + + stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)' + data = [('foo', 'this is foo'), + ('bar', 'this is bar')] + + # PyMySQL `executemany()` returns the rowcount + rowcount = cursor.executemany(stmt, data) + assert rowcount == 2 + + query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 2 + assert rows[0][0] == 'bar' + assert rows[0][1] == 'this is bar' + assert rows[1][0] == 'foo' + assert rows[1][1] == 'this is foo' + + spans = writer.pop() + assert len(spans) == 2 + cursor.execute('drop table if exists dummy') + + def test_query_many_fetchall(self): + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + # tests that the executemany method is correctly wrapped. + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + tracer.enabled = False + cursor = conn.cursor() + + cursor.execute(""" + create table if not exists dummy ( + dummy_key VARCHAR(32) PRIMARY KEY, + dummy_value TEXT NOT NULL)""") + tracer.enabled = True + + stmt = 'INSERT INTO dummy (dummy_key, dummy_value) VALUES (%s, %s)' + data = [('foo', 'this is foo'), + ('bar', 'this is bar')] + cursor.executemany(stmt, data) + query = 'SELECT dummy_key, dummy_value FROM dummy ORDER BY dummy_key' + cursor.execute(query) + rows = cursor.fetchall() + assert len(rows) == 2 + assert rows[0][0] == 'bar' + assert rows[0][1] == 'this is bar' + assert rows[1][0] == 'foo' + assert rows[1][1] == 'this is foo' + + spans = writer.pop() + assert len(spans) == 3 + cursor.execute('drop table if exists dummy') + + fetch_span = spans[2] + assert fetch_span.name == 'pymysql.query.fetchall' + + def test_query_proc(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + + # create a procedure + tracer.enabled = False + cursor = conn.cursor() + cursor.execute('DROP PROCEDURE IF EXISTS sp_sum') + cursor.execute(""" + CREATE PROCEDURE sp_sum (IN p1 INTEGER, IN p2 INTEGER, OUT p3 INTEGER) + BEGIN + SET p3 := p1 + p2; + END;""") + + tracer.enabled = True + proc = 'sp_sum' + data = (40, 2, None) + + # spans[len(spans) - 2] + cursor.callproc(proc, data) + + # spans[len(spans) - 1] + cursor.execute(""" + SELECT @_sp_sum_0, @_sp_sum_1, @_sp_sum_2 + """) + output = cursor.fetchone() + assert len(output) == 3 + assert output[2] == 42 + + spans = writer.pop() + assert spans, spans + + # number of spans depends on PyMySQL implementation details, + # typically, internal calls to execute, but at least we + # can expect the last closed span to be our proc. + span = spans[len(spans) - 2] + assert span.service == self.TEST_SERVICE + assert span.name == 'pymysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == MYSQL_CONFIG.get('port') + meta = {} + meta.update(self.DB_INFO) + assert_dict_issuperset(span.meta, meta) + + def test_simple_query_ot(self): + """OpenTracing version of test_simple_query.""" + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + ot_tracer = init_tracer('mysql_svc', tracer) + with ot_tracer.start_active_span('mysql_op'): + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + + spans = writer.pop() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'mysql_svc' + assert ot_span.name == 'mysql_op' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'pymysql.query' + assert dd_span.span_type == 'sql' + assert dd_span.error == 0 + assert dd_span.get_metric('out.port') == MYSQL_CONFIG.get('port') + meta = {} + meta.update(self.DB_INFO) + assert_dict_issuperset(dd_span.meta, meta) + + def test_simple_query_ot_fetchall(self): + """OpenTracing version of test_simple_query.""" + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + ot_tracer = init_tracer('mysql_svc', tracer) + with ot_tracer.start_active_span('mysql_op'): + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + + spans = writer.pop() + assert len(spans) == 3 + ot_span, dd_span, fetch_span = spans + + # confirm parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.service == 'mysql_svc' + assert ot_span.name == 'mysql_op' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'pymysql.query' + assert dd_span.span_type == 'sql' + assert dd_span.error == 0 + assert dd_span.get_metric('out.port') == MYSQL_CONFIG.get('port') + meta = {} + meta.update(self.DB_INFO) + assert_dict_issuperset(dd_span.meta, meta) + + assert fetch_span.name == 'pymysql.query.fetchall' + + def test_commit(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + conn.commit() + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'pymysql.connection.commit' + + def test_rollback(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + conn.rollback() + spans = writer.pop() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'pymysql.connection.rollback' + + def test_analytics_default(self): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True) + ): + conn, tracer = self._get_conn_tracer() + writer = tracer.writer + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) + + +class TestPyMysqlPatch(PyMySQLCore, BaseTracerTestCase): + def _get_conn_tracer(self): + if not self.conn: + self.conn = pymysql.connect(**MYSQL_CONFIG) + assert not self.conn._closed + # Ensure that the default pin is there, with its default value + pin = Pin.get_from(self.conn) + assert pin + assert pin.service == 'pymysql' + # Customize the service + # we have to apply it on the existing one since new one won't inherit `app` + pin.clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(self.conn) + + return self.conn, self.tracer + + def test_patch_unpatch(self): + unpatch() + # assert we start unpatched + conn = pymysql.connect(**MYSQL_CONFIG) + assert not Pin.get_from(conn) + conn.close() + + patch() + try: + writer = self.tracer.writer + conn = pymysql.connect(**MYSQL_CONFIG) + pin = Pin.get_from(conn) + assert pin + pin.clone(service=self.TEST_SERVICE, tracer=self.tracer).onto(conn) + assert not conn._closed + + cursor = conn.cursor() + cursor.execute('SELECT 1') + rows = cursor.fetchall() + assert len(rows) == 1 + spans = writer.pop() + assert len(spans) == 1 + + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'pymysql.query' + assert span.span_type == 'sql' + assert span.error == 0 + assert span.get_metric('out.port') == MYSQL_CONFIG.get('port') + + meta = {} + meta.update(self.DB_INFO) + assert_dict_issuperset(span.meta, meta) + finally: + unpatch() + + # assert we finish unpatched + conn = pymysql.connect(**MYSQL_CONFIG) + assert not Pin.get_from(conn) + conn.close() + + patch() diff --git a/tests/contrib/pyramid/__init__.py b/tests/contrib/pyramid/__init__.py new file mode 100644 index 0000000000..0c810a0b71 --- /dev/null +++ b/tests/contrib/pyramid/__init__.py @@ -0,0 +1,3 @@ +from .test_pyramid_autopatch import _include_me + +__all__ = ['_include_me'] diff --git a/tests/contrib/pyramid/app/__init__.py b/tests/contrib/pyramid/app/__init__.py new file mode 100644 index 0000000000..65cf7f8c0d --- /dev/null +++ b/tests/contrib/pyramid/app/__init__.py @@ -0,0 +1 @@ +from .web import create_app # noqa diff --git a/tests/contrib/pyramid/app/web.py b/tests/contrib/pyramid/app/web.py new file mode 100644 index 0000000000..849e57c636 --- /dev/null +++ b/tests/contrib/pyramid/app/web.py @@ -0,0 +1,70 @@ +from ddtrace.contrib.pyramid import trace_pyramid + +from pyramid.response import Response +from pyramid.config import Configurator +from pyramid.renderers import render_to_response +from pyramid.httpexceptions import ( + HTTPInternalServerError, + HTTPFound, + HTTPNotFound, + HTTPNoContent, +) + + +def create_app(settings, instrument): + """Return a pyramid wsgi app""" + + def index(request): + return Response('idx') + + def error(request): + raise HTTPInternalServerError('oh no') + + def exception(request): + 1 / 0 + + def json(request): + return {'a': 1} + + def renderer(request): + return render_to_response('template.pt', {'foo': 'bar'}, request=request) + + def raise_redirect(request): + raise HTTPFound() + + def raise_no_content(request): + raise HTTPNoContent() + + def custom_exception_view(context, request): + """Custom view that forces a HTTPException when no views + are found to handle given request + """ + if 'raise_exception' in request.url: + raise HTTPNotFound() + else: + return HTTPNotFound() + + config = Configurator(settings=settings) + config.add_route('index', '/') + config.add_route('error', '/error') + config.add_route('exception', '/exception') + config.add_route('json', '/json') + config.add_route('renderer', '/renderer') + config.add_route('raise_redirect', '/redirect') + config.add_route('raise_no_content', '/nocontent') + config.add_view(index, route_name='index') + config.add_view(error, route_name='error') + config.add_view(exception, route_name='exception') + config.add_view(json, route_name='json', renderer='json') + config.add_view(renderer, route_name='renderer', renderer='template.pt') + config.add_view(raise_redirect, route_name='raise_redirect') + config.add_view(raise_no_content, route_name='raise_no_content') + # required to reproduce a regression test + config.add_notfound_view(custom_exception_view) + # required for rendering tests + renderer = config.testing_add_renderer('template.pt') + + if instrument: + trace_pyramid(config) + + return config.make_wsgi_app(), renderer diff --git a/tests/contrib/pyramid/test_pyramid.py b/tests/contrib/pyramid/test_pyramid.py new file mode 100644 index 0000000000..b2429d8a90 --- /dev/null +++ b/tests/contrib/pyramid/test_pyramid.py @@ -0,0 +1,74 @@ +from ddtrace.constants import SAMPLING_PRIORITY_KEY, ORIGIN_KEY + +from .utils import PyramidTestCase, PyramidBase + + +def includeme(config): + pass + + +class TestPyramid(PyramidTestCase): + instrument = True + + def test_tween_overridden(self): + # in case our tween is overriden by the user config we should + # not log rendering + self.override_settings({'pyramid.tweens': 'pyramid.tweens.excview_tween_factory'}) + self.app.get('/json', status=200) + spans = self.tracer.writer.pop() + assert len(spans) == 0 + + +class TestPyramidDistributedTracingDefault(PyramidBase): + instrument = True + + def get_settings(self): + return {} + + def test_distributed_tracing(self): + # ensure the Context is properly created + # if distributed tracing is enabled + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '2', + 'x-datadog-origin': 'synthetics', + } + self.app.get('/', headers=headers, status=200) + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + # check the propagated Context + span = spans[0] + assert span.trace_id == 100 + assert span.parent_id == 42 + assert span.get_metric(SAMPLING_PRIORITY_KEY) == 2 + assert span.get_tag(ORIGIN_KEY) == 'synthetics' + + +class TestPyramidDistributedTracingDisabled(PyramidBase): + instrument = True + + def get_settings(self): + return { + 'datadog_distributed_tracing': False, + } + + def test_distributed_tracing_disabled(self): + # we do not inherit context if distributed tracing is disabled + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '2', + 'x-datadog-origin': 'synthetics', + } + self.app.get('/', headers=headers, status=200) + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + # check the propagated Context + span = spans[0] + assert span.trace_id != 100 + assert span.parent_id != 42 + assert span.get_metric(SAMPLING_PRIORITY_KEY) != 2 + assert span.get_tag(ORIGIN_KEY) != 'synthetics' diff --git a/tests/contrib/pyramid/test_pyramid_autopatch.py b/tests/contrib/pyramid/test_pyramid_autopatch.py new file mode 100644 index 0000000000..0e4c8c39bd --- /dev/null +++ b/tests/contrib/pyramid/test_pyramid_autopatch.py @@ -0,0 +1,49 @@ +from pyramid.config import Configurator + +from .test_pyramid import PyramidTestCase, PyramidBase + + +class TestPyramidAutopatch(PyramidTestCase): + instrument = False + + +class TestPyramidExplicitTweens(PyramidTestCase): + instrument = False + + def get_settings(self): + return { + 'pyramid.tweens': 'pyramid.tweens.excview_tween_factory\n', + } + + +class TestPyramidDistributedTracing(PyramidBase): + instrument = False + + def test_distributed_tracing(self): + # ensure the Context is properly created + # if distributed tracing is enabled + headers = { + 'x-datadog-trace-id': '100', + 'x-datadog-parent-id': '42', + 'x-datadog-sampling-priority': '2', + 'x-datadog-origin': 'synthetics', + } + self.app.get('/', headers=headers, status=200) + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + # check the propagated Context + span = spans[0] + assert span.trace_id == 100 + assert span.parent_id == 42 + assert span.get_metric('_sampling_priority_v1') == 2 + + +def _include_me(config): + pass + + +def test_config_include(): + """Makes sure that relative imports still work when the application is run with ddtrace-run.""" + config = Configurator() + config.include('tests.contrib.pyramid._include_me') diff --git a/tests/contrib/pyramid/utils.py b/tests/contrib/pyramid/utils.py new file mode 100644 index 0000000000..3e9ee9dc81 --- /dev/null +++ b/tests/contrib/pyramid/utils.py @@ -0,0 +1,357 @@ +import json + +from pyramid.httpexceptions import HTTPException +import pytest +import webtest + +from ddtrace import compat +from ddtrace import config +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.pyramid.patch import insert_tween_if_needed +from ddtrace.ext import http + +from .app import create_app + +from ...opentracer.utils import init_tracer +from ...base import BaseTracerTestCase +from ...utils import assert_span_http_status_code + + +class PyramidBase(BaseTracerTestCase): + """Base Pyramid test application""" + + def setUp(self): + super(PyramidBase, self).setUp() + self.create_app() + + def create_app(self, settings=None): + # get default settings or use what is provided + settings = settings or self.get_settings() + # always set the dummy tracer as a default tracer + settings.update({"datadog_tracer": self.tracer}) + + app, renderer = create_app(settings, self.instrument) + self.app = webtest.TestApp(app) + self.renderer = renderer + + def get_settings(self): + return {} + + def override_settings(self, settings): + self.create_app(settings) + + +class PyramidTestCase(PyramidBase): + """Pyramid TestCase that includes tests for automatic instrumentation""" + + instrument = True + + def get_settings(self): + return { + "datadog_trace_service": "foobar", + } + + def test_200(self, query_string=""): + if query_string: + fqs = "?" + query_string + else: + fqs = "" + res = self.app.get("/" + fqs, status=200) + assert b"idx" in res.body + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == "foobar" + assert s.resource == "GET index" + assert s.error == 0 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 200) + assert s.meta.get(http.URL) == "http://localhost/" + if config.pyramid.trace_query_string: + assert s.meta.get(http.QUERY_STRING) == query_string + else: + assert http.QUERY_STRING not in s.meta + assert s.meta.get("pyramid.route.name") == "index" + + # ensure services are set correctly + services = writer.pop_services() + expected = {} + assert services == expected + + def test_200_query_string(self): + return self.test_200("foo=bar") + + def test_200_query_string_trace(self): + with self.override_http_config("pyramid", dict(trace_query_string=True)): + return self.test_200("foo=bar") + + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + res = self.app.get("/", status=200) + assert b"idx" in res.body + + self.assert_structure(dict(name="pyramid.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}),) + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + self.override_settings(dict(datadog_analytics_enabled=True, datadog_analytics_sample_rate=0.5)) + res = self.app.get("/", status=200) + assert b"idx" in res.body + + self.assert_structure(dict(name="pyramid.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}),) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + res = self.app.get("/", status=200) + assert b"idx" in res.body + + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + self.override_settings(dict(datadog_analytics_enabled=True, datadog_analytics_sample_rate=0.5)) + res = self.app.get("/", status=200) + assert b"idx" in res.body + + self.assert_structure(dict(name="pyramid.request", metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}),) + + def test_404(self): + self.app.get("/404", status=404) + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == "foobar" + assert s.resource == "404" + assert s.error == 0 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 404) + assert s.meta.get(http.URL) == "http://localhost/404" + + def test_302(self): + self.app.get("/redirect", status=302) + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == "foobar" + assert s.resource == "GET raise_redirect" + assert s.error == 0 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 302) + assert s.meta.get(http.URL) == "http://localhost/redirect" + + def test_204(self): + self.app.get("/nocontent", status=204) + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == "foobar" + assert s.resource == "GET raise_no_content" + assert s.error == 0 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 204) + assert s.meta.get(http.URL) == "http://localhost/nocontent" + + def test_exception(self): + try: + self.app.get("/exception", status=500) + except ZeroDivisionError: + pass + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == "foobar" + assert s.resource == "GET exception" + assert s.error == 1 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 500) + assert s.meta.get(http.URL) == "http://localhost/exception" + assert s.meta.get("pyramid.route.name") == "exception" + + def test_500(self): + self.app.get("/error", status=500) + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == "foobar" + assert s.resource == "GET error" + assert s.error == 1 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 500) + assert s.meta.get(http.URL) == "http://localhost/error" + assert s.meta.get("pyramid.route.name") == "error" + assert type(s.error) == int + + def test_json(self): + res = self.app.get("/json", status=200) + parsed = json.loads(compat.to_unicode(res.body)) + assert parsed == {"a": 1} + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 2 + spans_by_name = {s.name: s for s in spans} + s = spans_by_name["pyramid.request"] + assert s.service == "foobar" + assert s.resource == "GET json" + assert s.error == 0 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 200) + assert s.meta.get(http.URL) == "http://localhost/json" + assert s.meta.get("pyramid.route.name") == "json" + + s = spans_by_name["pyramid.render"] + assert s.service == "foobar" + assert s.error == 0 + assert s.span_type == "template" + + def test_renderer(self): + self.app.get("/renderer", status=200) + assert self.renderer._received["request"] is not None + + self.renderer.assert_(foo="bar") + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 2 + spans_by_name = {s.name: s for s in spans} + s = spans_by_name["pyramid.request"] + assert s.service == "foobar" + assert s.resource == "GET renderer" + assert s.error == 0 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 200) + assert s.meta.get(http.URL) == "http://localhost/renderer" + assert s.meta.get("pyramid.route.name") == "renderer" + + s = spans_by_name["pyramid.render"] + assert s.service == "foobar" + assert s.error == 0 + assert s.span_type == "template" + + def test_http_exception_response(self): + with pytest.raises(HTTPException): + self.app.get("/404/raise_exception", status=404) + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.service == "foobar" + assert s.resource == "404" + assert s.error == 1 + assert s.span_type == "web" + assert s.meta.get("http.method") == "GET" + assert_span_http_status_code(s, 404) + assert s.meta.get(http.URL) == "http://localhost/404/raise_exception" + + def test_insert_tween_if_needed_already_set(self): + settings = {"pyramid.tweens": "ddtrace.contrib.pyramid:trace_tween_factory"} + insert_tween_if_needed(settings) + assert settings["pyramid.tweens"] == "ddtrace.contrib.pyramid:trace_tween_factory" + + def test_insert_tween_if_needed_none(self): + settings = {"pyramid.tweens": ""} + insert_tween_if_needed(settings) + assert settings["pyramid.tweens"] == "" + + def test_insert_tween_if_needed_excview(self): + settings = {"pyramid.tweens": "pyramid.tweens.excview_tween_factory"} + insert_tween_if_needed(settings) + assert ( + settings["pyramid.tweens"] + == "ddtrace.contrib.pyramid:trace_tween_factory\npyramid.tweens.excview_tween_factory" + ) + + def test_insert_tween_if_needed_excview_and_other(self): + settings = {"pyramid.tweens": "a.first.tween\npyramid.tweens.excview_tween_factory\na.last.tween\n"} + insert_tween_if_needed(settings) + assert ( + settings["pyramid.tweens"] == "a.first.tween\n" + "ddtrace.contrib.pyramid:trace_tween_factory\n" + "pyramid.tweens.excview_tween_factory\n" + "a.last.tween\n" + ) + + def test_insert_tween_if_needed_others(self): + settings = {"pyramid.tweens": "a.random.tween\nand.another.one"} + insert_tween_if_needed(settings) + assert ( + settings["pyramid.tweens"] == "a.random.tween\nand.another.one\nddtrace.contrib.pyramid:trace_tween_factory" + ) + + def test_include_conflicts(self): + # test that includes do not create conflicts + self.override_settings({"pyramid.includes": "tests.contrib.pyramid.test_pyramid"}) + self.app.get("/404", status=404) + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + def test_200_ot(self): + """OpenTracing version of test_200.""" + ot_tracer = init_tracer("pyramid_svc", self.tracer) + + with ot_tracer.start_active_span("pyramid_get"): + res = self.app.get("/", status=200) + assert b"idx" in res.body + + writer = self.tracer.writer + spans = writer.pop() + assert len(spans) == 2 + + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == "pyramid_get" + assert ot_span.service == "pyramid_svc" + + assert dd_span.service == "foobar" + assert dd_span.resource == "GET index" + assert dd_span.error == 0 + assert dd_span.span_type == "web" + assert dd_span.meta.get("http.method") == "GET" + assert_span_http_status_code(dd_span, 200) + assert dd_span.meta.get(http.URL) == "http://localhost/" + assert dd_span.meta.get("pyramid.route.name") == "index" diff --git a/tests/contrib/redis/__init__.py b/tests/contrib/redis/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/redis/test.py b/tests/contrib/redis/test.py new file mode 100644 index 0000000000..bf746cd118 --- /dev/null +++ b/tests/contrib/redis/test.py @@ -0,0 +1,224 @@ +# -*- coding: utf-8 -*- +import redis + +from ddtrace import Pin, compat +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.redis import get_traced_redis +from ddtrace.contrib.redis.patch import patch, unpatch + +from tests.opentracer.utils import init_tracer +from ..config import REDIS_CONFIG +from ...test_tracer import get_dummy_tracer +from ...base import BaseTracerTestCase + + +def test_redis_legacy(): + # ensure the old interface isn't broken, but doesn't trace + tracer = get_dummy_tracer() + TracedRedisCache = get_traced_redis(tracer, 'foo') + r = TracedRedisCache(port=REDIS_CONFIG['port']) + r.set('a', 'b') + got = r.get('a') + assert compat.to_unicode(got) == 'b' + assert not tracer.writer.pop() + + +class TestRedisPatch(BaseTracerTestCase): + + TEST_SERVICE = 'redis-patch' + TEST_PORT = REDIS_CONFIG['port'] + + def setUp(self): + super(TestRedisPatch, self).setUp() + patch() + r = redis.Redis(port=self.TEST_PORT) + r.flushall() + Pin.override(r, service=self.TEST_SERVICE, tracer=self.tracer) + self.r = r + + def tearDown(self): + unpatch() + super(TestRedisPatch, self).tearDown() + + def test_long_command(self): + self.r.mget(*range(1000)) + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'redis.command' + assert span.span_type == 'redis' + assert span.error == 0 + meta = { + 'out.host': u'localhost', + } + metrics = { + 'out.port': self.TEST_PORT, + 'out.redis_db': 0, + } + for k, v in meta.items(): + assert span.get_tag(k) == v + for k, v in metrics.items(): + assert span.get_metric(k) == v + + assert span.get_tag('redis.raw_command').startswith(u'MGET 0 1 2 3') + assert span.get_tag('redis.raw_command').endswith(u'...') + + def test_basics(self): + us = self.r.get('cheese') + assert us is None + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'redis.command' + assert span.span_type == 'redis' + assert span.error == 0 + assert span.get_metric('out.redis_db') == 0 + assert span.get_tag('out.host') == 'localhost' + assert span.get_tag('redis.raw_command') == u'GET cheese' + assert span.get_metric('redis.args_length') == 2 + assert span.resource == 'GET cheese' + assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + + def test_analytics_without_rate(self): + with self.override_config( + 'redis', + dict(analytics_enabled=True) + ): + us = self.r.get('cheese') + assert us is None + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 1.0 + + def test_analytics_with_rate(self): + with self.override_config( + 'redis', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + us = self.r.get('cheese') + assert us is None + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) == 0.5 + + def test_pipeline_traced(self): + with self.r.pipeline(transaction=False) as p: + p.set('blah', 32) + p.rpush('foo', u'éé') + p.hgetall('xxx') + p.execute() + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'redis.command' + assert span.resource == u'SET blah 32\nRPUSH foo éé\nHGETALL xxx' + assert span.span_type == 'redis' + assert span.error == 0 + assert span.get_metric('out.redis_db') == 0 + assert span.get_tag('out.host') == 'localhost' + assert span.get_tag('redis.raw_command') == u'SET blah 32\nRPUSH foo éé\nHGETALL xxx' + assert span.get_metric('redis.pipeline_length') == 3 + assert span.get_metric('redis.pipeline_length') == 3 + assert span.get_metric(ANALYTICS_SAMPLE_RATE_KEY) is None + + def test_pipeline_immediate(self): + with self.r.pipeline() as p: + p.set('a', 1) + p.immediate_execute_command('SET', 'a', 1) + p.execute() + + spans = self.get_spans() + assert len(spans) == 2 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'redis.command' + assert span.resource == u'SET a 1' + assert span.span_type == 'redis' + assert span.error == 0 + assert span.get_metric('out.redis_db') == 0 + assert span.get_tag('out.host') == 'localhost' + + def test_meta_override(self): + r = self.r + pin = Pin.get_from(r) + if pin: + pin.clone(tags={'cheese': 'camembert'}).onto(r) + + r.get('cheese') + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert 'cheese' in span.meta and span.meta['cheese'] == 'camembert' + + def test_patch_unpatch(self): + tracer = get_dummy_tracer() + writer = tracer.writer + + # Test patch idempotence + patch() + patch() + + r = redis.Redis(port=REDIS_CONFIG['port']) + Pin.get_from(r).clone(tracer=tracer).onto(r) + r.get('key') + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + r = redis.Redis(port=REDIS_CONFIG['port']) + r.get('key') + + spans = writer.pop() + assert not spans, spans + + # Test patch again + patch() + + r = redis.Redis(port=REDIS_CONFIG['port']) + Pin.get_from(r).clone(tracer=tracer).onto(r) + r.get('key') + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + + def test_opentracing(self): + """Ensure OpenTracing works with redis.""" + ot_tracer = init_tracer('redis_svc', self.tracer) + + with ot_tracer.start_active_span('redis_get'): + us = self.r.get('cheese') + assert us is None + + spans = self.get_spans() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'redis_get' + assert ot_span.service == 'redis_svc' + + assert dd_span.service == self.TEST_SERVICE + assert dd_span.name == 'redis.command' + assert dd_span.span_type == 'redis' + assert dd_span.error == 0 + assert dd_span.get_metric('out.redis_db') == 0 + assert dd_span.get_tag('out.host') == 'localhost' + assert dd_span.get_tag('redis.raw_command') == u'GET cheese' + assert dd_span.get_metric('redis.args_length') == 2 + assert dd_span.resource == 'GET cheese' diff --git a/tests/contrib/rediscluster/__init__.py b/tests/contrib/rediscluster/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/rediscluster/test.py b/tests/contrib/rediscluster/test.py new file mode 100644 index 0000000000..f2224cbc04 --- /dev/null +++ b/tests/contrib/rediscluster/test.py @@ -0,0 +1,102 @@ +# -*- coding: utf-8 -*- +import rediscluster + +from ddtrace import Pin +from ddtrace.contrib.rediscluster.patch import patch, unpatch +from ..config import REDISCLUSTER_CONFIG +from ...test_tracer import get_dummy_tracer +from ...base import BaseTracerTestCase + + +class TestRedisPatch(BaseTracerTestCase): + + TEST_SERVICE = 'rediscluster-patch' + TEST_HOST = REDISCLUSTER_CONFIG['host'] + TEST_PORTS = REDISCLUSTER_CONFIG['ports'] + + def _get_test_client(self): + startup_nodes = [ + {'host': self.TEST_HOST, 'port': int(port)} + for port in self.TEST_PORTS.split(',') + ] + return rediscluster.StrictRedisCluster(startup_nodes=startup_nodes) + + def setUp(self): + super(TestRedisPatch, self).setUp() + patch() + r = self._get_test_client() + r.flushall() + Pin.override(r, service=self.TEST_SERVICE, tracer=self.tracer) + self.r = r + + def tearDown(self): + unpatch() + super(TestRedisPatch, self).tearDown() + + def test_basics(self): + us = self.r.get('cheese') + assert us is None + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'redis.command' + assert span.span_type == 'redis' + assert span.error == 0 + assert span.get_tag('redis.raw_command') == u'GET cheese' + assert span.get_metric('redis.args_length') == 2 + assert span.resource == 'GET cheese' + + def test_pipeline(self): + with self.r.pipeline(transaction=False) as p: + p.set('blah', 32) + p.rpush('foo', u'éé') + p.hgetall('xxx') + p.execute() + + spans = self.get_spans() + assert len(spans) == 1 + span = spans[0] + assert span.service == self.TEST_SERVICE + assert span.name == 'redis.command' + assert span.resource == u'SET blah 32\nRPUSH foo éé\nHGETALL xxx' + assert span.span_type == 'redis' + assert span.error == 0 + assert span.get_tag('redis.raw_command') == u'SET blah 32\nRPUSH foo éé\nHGETALL xxx' + assert span.get_metric('redis.pipeline_length') == 3 + + def test_patch_unpatch(self): + tracer = get_dummy_tracer() + writer = tracer.writer + + # Test patch idempotence + patch() + patch() + + r = self._get_test_client() + Pin.get_from(r).clone(tracer=tracer).onto(r) + r.get('key') + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 + + # Test unpatch + unpatch() + + r = self._get_test_client() + r.get('key') + + spans = writer.pop() + assert not spans, spans + + # Test patch again + patch() + + r = self._get_test_client() + Pin.get_from(r).clone(tracer=tracer).onto(r) + r.get('key') + + spans = writer.pop() + assert spans, spans + assert len(spans) == 1 diff --git a/tests/contrib/requests/__init__.py b/tests/contrib/requests/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/requests/test_requests.py b/tests/contrib/requests/test_requests.py new file mode 100644 index 0000000000..97faa06d2f --- /dev/null +++ b/tests/contrib/requests/test_requests.py @@ -0,0 +1,479 @@ +import pytest +import requests +from requests import Session +from requests.exceptions import MissingSchema + +from ddtrace import config +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.requests import patch, unpatch +from ddtrace.ext import errors, http + +from tests.opentracer.utils import init_tracer + +from ...base import BaseTracerTestCase +from ...util import override_global_tracer +from ...utils import assert_span_http_status_code + +# socket name comes from https://english.stackexchange.com/a/44048 +SOCKET = 'httpbin.org' +URL_200 = 'http://{}/status/200'.format(SOCKET) +URL_500 = 'http://{}/status/500'.format(SOCKET) + + +class BaseRequestTestCase(object): + """Create a traced Session, patching during the setUp and + unpatching after the tearDown + """ + def setUp(self): + super(BaseRequestTestCase, self).setUp() + + patch() + self.session = Session() + setattr(self.session, 'datadog_tracer', self.tracer) + + def tearDown(self): + unpatch() + + super(BaseRequestTestCase, self).tearDown() + + +class TestRequests(BaseRequestTestCase, BaseTracerTestCase): + def test_resource_path(self): + out = self.session.get(URL_200) + assert out.status_code == 200 + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag('http.url') == URL_200 + + def test_tracer_disabled(self): + # ensure all valid combinations of args / kwargs work + self.tracer.enabled = False + out = self.session.get(URL_200) + assert out.status_code == 200 + spans = self.tracer.writer.pop() + assert len(spans) == 0 + + def test_args_kwargs(self): + # ensure all valid combinations of args / kwargs work + url = URL_200 + method = 'GET' + inputs = [ + ([], {'method': method, 'url': url}), + ([method], {'url': url}), + ([method, url], {}), + ] + + for args, kwargs in inputs: + # ensure a traced request works with these args + out = self.session.request(*args, **kwargs) + assert out.status_code == 200 + # validation + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'GET' + assert_span_http_status_code(s, 200) + + def test_untraced_request(self): + # ensure the unpatch removes tracing + unpatch() + untraced = Session() + + out = untraced.get(URL_200) + assert out.status_code == 200 + # validation + spans = self.tracer.writer.pop() + assert len(spans) == 0 + + def test_double_patch(self): + # ensure that double patch doesn't duplicate instrumentation + patch() + session = Session() + setattr(session, 'datadog_tracer', self.tracer) + + out = session.get(URL_200) + assert out.status_code == 200 + spans = self.tracer.writer.pop() + assert len(spans) == 1 + + def test_200(self): + out = self.session.get(URL_200) + assert out.status_code == 200 + # validation + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'GET' + assert_span_http_status_code(s, 200) + assert s.error == 0 + assert s.span_type == 'http' + assert http.QUERY_STRING not in s.meta + + def test_200_send(self): + # when calling send directly + req = requests.Request(url=URL_200, method='GET') + req = self.session.prepare_request(req) + + out = self.session.send(req) + assert out.status_code == 200 + # validation + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'GET' + assert_span_http_status_code(s, 200) + assert s.error == 0 + assert s.span_type == 'http' + + def test_200_query_string(self): + # ensure query string is removed before adding url to metadata + query_string = 'key=value&key2=value2' + with self.override_http_config('requests', dict(trace_query_string=True)): + out = self.session.get(URL_200 + '?' + query_string) + assert out.status_code == 200 + # validation + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'GET' + assert_span_http_status_code(s, 200) + assert s.get_tag(http.URL) == URL_200 + assert s.error == 0 + assert s.span_type == 'http' + assert s.get_tag(http.QUERY_STRING) == query_string + + def test_requests_module_200(self): + # ensure the requests API is instrumented even without + # using a `Session` directly + with override_global_tracer(self.tracer): + out = requests.get(URL_200) + assert out.status_code == 200 + # validation + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'GET' + assert_span_http_status_code(s, 200) + assert s.error == 0 + assert s.span_type == 'http' + + def test_post_500(self): + out = self.session.post(URL_500) + # validation + assert out.status_code == 500 + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'POST' + assert_span_http_status_code(s, 500) + assert s.error == 1 + + def test_non_existant_url(self): + try: + self.session.get('http://doesnotexist.google.com') + except Exception: + pass + else: + assert 0, 'expected error' + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'GET' + assert s.error == 1 + assert 'Failed to establish a new connection' in s.get_tag(errors.MSG) + assert 'Failed to establish a new connection' in s.get_tag(errors.STACK) + assert 'Traceback (most recent call last)' in s.get_tag(errors.STACK) + assert 'requests.exception' in s.get_tag(errors.TYPE) + + def test_500(self): + out = self.session.get(URL_500) + assert out.status_code == 500 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag(http.METHOD) == 'GET' + assert_span_http_status_code(s, 500) + assert s.error == 1 + + def test_default_service_name(self): + # ensure a default service name is set + out = self.session.get(URL_200) + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + assert s.service == 'requests' + + def test_user_set_service_name(self): + # ensure a service name set by the user has precedence + cfg = config.get_from(self.session) + cfg['service_name'] = 'clients' + out = self.session.get(URL_200) + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + assert s.service == 'clients' + + def test_parent_service_name_precedence(self): + # ensure the parent service name has precedence if the value + # is not set by the user + with self.tracer.trace('parent.span', service='web'): + out = self.session.get(URL_200) + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 2 + s = spans[1] + + assert s.name == 'requests.request' + assert s.service == 'web' + + def test_parent_without_service_name(self): + # ensure the default value is used if the parent + # doesn't have a service + with self.tracer.trace('parent.span'): + out = self.session.get(URL_200) + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 2 + s = spans[1] + + assert s.name == 'requests.request' + assert s.service == 'requests' + + def test_user_service_name_precedence(self): + # ensure the user service name takes precedence over + # the parent Span + cfg = config.get_from(self.session) + cfg['service_name'] = 'clients' + with self.tracer.trace('parent.span', service='web'): + out = self.session.get(URL_200) + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 2 + s = spans[1] + + assert s.name == 'requests.request' + assert s.service == 'clients' + + def test_split_by_domain(self): + # ensure a service name is generated by the domain name + # of the ongoing call + cfg = config.get_from(self.session) + cfg['split_by_domain'] = True + out = self.session.get(URL_200) + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + assert s.service == 'httpbin.org' + + def test_split_by_domain_precedence(self): + # ensure the split by domain has precedence all the time + cfg = config.get_from(self.session) + cfg['split_by_domain'] = True + cfg['service_name'] = 'intake' + out = self.session.get(URL_200) + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + assert s.service == 'httpbin.org' + + def test_split_by_domain_wrong(self): + # ensure the split by domain doesn't crash in case of a wrong URL; + # in that case, no spans are created + cfg = config.get_from(self.session) + cfg['split_by_domain'] = True + with pytest.raises(MissingSchema): + self.session.get('http:/some>thing') + + # We are wrapping `requests.Session.send` and this error gets thrown before that function + spans = self.tracer.writer.pop() + assert len(spans) == 0 + + def test_split_by_domain_remove_auth_in_url(self): + # ensure that auth details are stripped from URL + cfg = config.get_from(self.session) + cfg['split_by_domain'] = True + out = self.session.get('http://user:pass@httpbin.org') + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + assert s.service == 'httpbin.org' + + def test_split_by_domain_includes_port(self): + # ensure that port is included if present in URL + cfg = config.get_from(self.session) + cfg['split_by_domain'] = True + out = self.session.get('http://httpbin.org:80') + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + assert s.service == 'httpbin.org:80' + + def test_split_by_domain_includes_port_path(self): + # ensure that port is included if present in URL but not path + cfg = config.get_from(self.session) + cfg['split_by_domain'] = True + out = self.session.get('http://httpbin.org:80/anything/v1/foo') + assert out.status_code == 200 + + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + + assert s.service == 'httpbin.org:80' + + def test_200_ot(self): + """OpenTracing version of test_200.""" + + ot_tracer = init_tracer('requests_svc', self.tracer) + + with ot_tracer.start_active_span('requests_get'): + out = self.session.get(URL_200) + assert out.status_code == 200 + + # validation + spans = self.tracer.writer.pop() + assert len(spans) == 2 + + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'requests_get' + assert ot_span.service == 'requests_svc' + + assert dd_span.get_tag(http.METHOD) == 'GET' + assert_span_http_status_code(dd_span, 200) + assert dd_span.error == 0 + assert dd_span.span_type == 'http' + + def test_request_and_response_headers(self): + # Disabled when not configured + self.session.get(URL_200, headers={'my-header': 'my_value'}) + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag('http.request.headers.my-header') is None + assert s.get_tag('http.response.headers.access-control-allow-origin') is None + + # Enabled when explicitly configured + with self.override_config('requests', {}): + config.requests.http.trace_headers(['my-header', 'access-control-allow-origin']) + self.session.get(URL_200, headers={'my-header': 'my_value'}) + spans = self.tracer.writer.pop() + assert len(spans) == 1 + s = spans[0] + assert s.get_tag('http.request.headers.my-header') == 'my_value' + assert s.get_tag('http.response.headers.access-control-allow-origin') == '*' + + def test_analytics_integration_default(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set + We expect the root span to have the appropriate tag + """ + self.session.get(URL_200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + s = spans[0] + self.assertIsNone(s.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_integration_disabled(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set + We expect the root span to have the appropriate tag + """ + with self.override_config('requests', dict(analytics_enabled=False, analytics_sample_rate=0.5)): + self.session.get(URL_200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + s = spans[0] + self.assertIsNone(s.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set + We expect the root span to have the appropriate tag + """ + with self.override_config('requests', dict(analytics_enabled=True, analytics_sample_rate=0.5)): + self.session.get(URL_200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + s = spans[0] + self.assertEqual(s.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_integration_on_using_pin(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set + We expect the root span to have the appropriate tag + """ + pin = Pin(service=__name__, + app='requests', + _config={ + 'service_name': __name__, + 'distributed_tracing': False, + 'split_by_domain': False, + 'analytics_enabled': True, + 'analytics_sample_rate': 0.5, + }) + pin.onto(self.session) + self.session.get(URL_200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + s = spans[0] + self.assertEqual(s.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_integration_on_using_pin_default(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set + We expect the root span to have the appropriate tag + """ + pin = Pin(service=__name__, + app='requests', + _config={ + 'service_name': __name__, + 'distributed_tracing': False, + 'split_by_domain': False, + 'analytics_enabled': True, + }) + pin.onto(self.session) + self.session.get(URL_200) + + spans = self.tracer.writer.pop() + self.assertEqual(len(spans), 1) + s = spans[0] + self.assertEqual(s.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/contrib/requests/test_requests_distributed.py b/tests/contrib/requests/test_requests_distributed.py new file mode 100644 index 0000000000..6b4ea5280d --- /dev/null +++ b/tests/contrib/requests/test_requests_distributed.py @@ -0,0 +1,163 @@ +from requests_mock import Adapter + +from ddtrace import config + +from ...base import BaseTracerTestCase +from .test_requests import BaseRequestTestCase + + +class TestRequestsDistributed(BaseRequestTestCase, BaseTracerTestCase): + def headers_here(self, tracer, request, root_span): + # Use an additional matcher to query the request headers. + # This is because the parent_id can only been known within such a callback, + # as it's defined on the requests span, which is not available when calling register_uri + headers = request.headers + assert 'x-datadog-trace-id' in headers + assert 'x-datadog-parent-id' in headers + assert str(root_span.trace_id) == headers['x-datadog-trace-id'] + req_span = root_span.context.get_current_span() + assert 'requests.request' == req_span.name + assert str(req_span.span_id) == headers['x-datadog-parent-id'] + return True + + def headers_not_here(self, tracer, request): + headers = request.headers + assert 'x-datadog-trace-id' not in headers + assert 'x-datadog-parent-id' not in headers + return True + + def test_propagation_default(self): + # ensure by default, distributed tracing is enabled + adapter = Adapter() + self.session.mount('mock', adapter) + + with self.tracer.trace('root') as root: + def matcher(request): + return self.headers_here(self.tracer, request, root) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text + + def test_propagation_true_global(self): + # distributed tracing can be enabled globally + adapter = Adapter() + self.session.mount('mock', adapter) + + with self.override_config('requests', dict(distributed_tracing=True)): + with self.tracer.trace('root') as root: + def matcher(request): + return self.headers_here(self.tracer, request, root) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text + + def test_propagation_false_global(self): + # distributed tracing can be disabled globally + adapter = Adapter() + self.session.mount('mock', adapter) + + with self.override_config('requests', dict(distributed_tracing=False)): + with self.tracer.trace('root'): + def matcher(request): + return self.headers_not_here(self.tracer, request) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text + + def test_propagation_true(self): + # ensure distributed tracing can be enabled manually + cfg = config.get_from(self.session) + cfg['distributed_tracing'] = True + adapter = Adapter() + self.session.mount('mock', adapter) + + with self.tracer.trace('root') as root: + def matcher(request): + return self.headers_here(self.tracer, request, root) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text + + spans = self.tracer.writer.spans + root, req = spans + assert 'root' == root.name + assert 'requests.request' == req.name + assert root.trace_id == req.trace_id + assert root.span_id == req.parent_id + + def test_propagation_false(self): + # ensure distributed tracing can be disabled manually + cfg = config.get_from(self.session) + cfg['distributed_tracing'] = False + adapter = Adapter() + self.session.mount('mock', adapter) + + with self.tracer.trace('root'): + def matcher(request): + return self.headers_not_here(self.tracer, request) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text + + def test_propagation_true_legacy_default(self): + # [Backward compatibility]: ensure users can switch the distributed + # tracing flag using the `Session` attribute + adapter = Adapter() + self.session.mount('mock', adapter) + + with self.tracer.trace('root') as root: + def matcher(request): + return self.headers_here(self.tracer, request, root) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text + + spans = self.tracer.writer.spans + root, req = spans + assert 'root' == root.name + assert 'requests.request' == req.name + assert root.trace_id == req.trace_id + assert root.span_id == req.parent_id + + def test_propagation_true_legacy(self): + # [Backward compatibility]: ensure users can switch the distributed + # tracing flag using the `Session` attribute + adapter = Adapter() + self.session.mount('mock', adapter) + self.session.distributed_tracing = True + + with self.tracer.trace('root') as root: + def matcher(request): + return self.headers_here(self.tracer, request, root) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text + + spans = self.tracer.writer.spans + root, req = spans + assert 'root' == root.name + assert 'requests.request' == req.name + assert root.trace_id == req.trace_id + assert root.span_id == req.parent_id + + def test_propagation_false_legacy(self): + # [Backward compatibility]: ensure users can switch the distributed + # tracing flag using the `Session` attribute + adapter = Adapter() + self.session.mount('mock', adapter) + self.session.distributed_tracing = False + + with self.tracer.trace('root'): + def matcher(request): + return self.headers_not_here(self.tracer, request) + adapter.register_uri('GET', 'mock://datadog/foo', additional_matcher=matcher, text='bar') + resp = self.session.get('mock://datadog/foo') + assert 200 == resp.status_code + assert 'bar' == resp.text diff --git a/tests/contrib/requests_gevent/__init__.py b/tests/contrib/requests_gevent/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/requests_gevent/test_requests_gevent.py b/tests/contrib/requests_gevent/test_requests_gevent.py new file mode 100644 index 0000000000..aa576b7c29 --- /dev/null +++ b/tests/contrib/requests_gevent/test_requests_gevent.py @@ -0,0 +1,47 @@ +import sys +import unittest + + +class TestRequestsGevent(unittest.TestCase): + def test_patch(self): + """ + Patching `requests` before `gevent` monkeypatching + + This is a regression test for https://github.com/DataDog/dd-trace-py/issues/506 + + When using `ddtrace-run` along with `requests` and `gevent` our patching causes + `requests` and `urllib3` to get loaded before `gevent` has a chance to monkey patch. + + This causes `gevent` to show a warning and under certain versions cause + a maxiumum recursion exception to be raised. + """ + # Assert none of our modules have been imported yet + # DEV: This regression test depends on being able to control import order of these modules + # DEV: This is not entirely necessary but is a nice safe guard + self.assertNotIn('ddtrace', sys.modules) + self.assertNotIn('gevent', sys.modules) + self.assertNotIn('requests', sys.modules) + self.assertNotIn('urllib3', sys.modules) + + try: + # Import ddtrace and patch only `requests` + # DEV: We do not need to patch `gevent` for the exception to occur + from ddtrace import patch + patch(requests=True) + + # Import gevent and monkeypatch + from gevent import monkey + monkey.patch_all() + + # This is typically what will fail if `requests` (or `urllib3`) + # gets loaded before running `monkey.patch_all()` + # DEV: We are testing that no exception gets raised + import requests + + # DEV: We **MUST** use an HTTPS request, that is what causes the issue + requests.get('https://httpbin.org/get') + + finally: + # Ensure we always unpatch `requests` when we are done + from ddtrace.contrib.requests import unpatch + unpatch() diff --git a/tests/contrib/sqlalchemy/__init__.py b/tests/contrib/sqlalchemy/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/sqlalchemy/mixins.py b/tests/contrib/sqlalchemy/mixins.py new file mode 100644 index 0000000000..a73a400717 --- /dev/null +++ b/tests/contrib/sqlalchemy/mixins.py @@ -0,0 +1,236 @@ +# stdlib +import contextlib + +# 3rd party +from sqlalchemy.ext.declarative import declarative_base +from sqlalchemy.orm import sessionmaker +from sqlalchemy import ( + create_engine, + Column, + Integer, + String, +) + +# project +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.sqlalchemy import trace_engine + +# testing +from tests.opentracer.utils import init_tracer + + +Base = declarative_base() + + +class Player(Base): + """Player entity used to test SQLAlchemy ORM""" + __tablename__ = 'players' + + id = Column(Integer, primary_key=True) + name = Column(String(20)) + + +class SQLAlchemyTestMixin(object): + """SQLAlchemy test mixin that includes a complete set of tests + that must be executed for different engine. When a new test (or + a regression test) should be added to SQLAlchemy test suite, a new + entry must be appended here so that it will be executed for all + available and supported engines. If the test is specific to only + one engine, that test must be added to the specific `TestCase` + implementation. + + To support a new engine, create a new `TestCase` that inherits from + `SQLAlchemyTestMixin` and `TestCase`. Then you must define the following + static class variables: + * VENDOR: the database vendor name + * SQL_DB: the `sql.db` tag that we expect (it's the name of the database available in the `.env` file) + * SERVICE: the service that we expect by default + * ENGINE_ARGS: all arguments required to create the engine + + To check specific tags in each test, you must implement the + `check_meta(self, span)` method. + """ + VENDOR = None + SQL_DB = None + SERVICE = None + ENGINE_ARGS = None + + def create_engine(self, engine_args): + # create a SQLAlchemy engine + config = dict(engine_args) + url = config.pop('url') + return create_engine(url, **config) + + @contextlib.contextmanager + def connection(self): + # context manager that provides a connection + # to the underlying database + try: + conn = self.engine.connect() + yield conn + finally: + conn.close() + + def check_meta(self, span): + # function that can be implemented according to the + # specific engine implementation + return + + def setUp(self): + super(SQLAlchemyTestMixin, self).setUp() + + # create an engine with the given arguments + self.engine = self.create_engine(self.ENGINE_ARGS) + + # create the database / entities and prepare a session for the test + Base.metadata.drop_all(bind=self.engine) + Base.metadata.create_all(self.engine, checkfirst=False) + Session = sessionmaker(bind=self.engine) + self.session = Session() + + # trace the engine + trace_engine(self.engine, self.tracer) + + def tearDown(self): + # clear the database and dispose the engine + self.session.close() + Base.metadata.drop_all(bind=self.engine) + self.engine.dispose() + super(SQLAlchemyTestMixin, self).tearDown() + + def test_orm_insert(self): + # ensures that the ORM session is traced + wayne = Player(id=1, name='wayne') + self.session.add(wayne) + self.session.commit() + + traces = self.tracer.writer.pop_traces() + # trace composition + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + # span fields + assert span.name == '{}.query'.format(self.VENDOR) + assert span.service == self.SERVICE + assert 'INSERT INTO players' in span.resource + assert span.get_tag('sql.db') == self.SQL_DB + assert span.get_metric('sql.rows') == 1 + self.check_meta(span) + assert span.span_type == 'sql' + assert span.error == 0 + assert span.duration > 0 + + def test_session_query(self): + # ensures that the Session queries are traced + out = list(self.session.query(Player).filter_by(name='wayne')) + assert len(out) == 0 + + traces = self.tracer.writer.pop_traces() + # trace composition + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + # span fields + assert span.name == '{}.query'.format(self.VENDOR) + assert span.service == self.SERVICE + assert 'SELECT players.id AS players_id, players.name AS players_name \nFROM players \nWHERE players.name' \ + in span.resource + assert span.get_tag('sql.db') == self.SQL_DB + self.check_meta(span) + assert span.span_type == 'sql' + assert span.error == 0 + assert span.duration > 0 + + def test_engine_connect_execute(self): + # ensures that engine.connect() is properly traced + with self.connection() as conn: + rows = conn.execute('SELECT * FROM players').fetchall() + assert len(rows) == 0 + + traces = self.tracer.writer.pop_traces() + # trace composition + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + # span fields + assert span.name == '{}.query'.format(self.VENDOR) + assert span.service == self.SERVICE + assert span.resource == 'SELECT * FROM players' + assert span.get_tag('sql.db') == self.SQL_DB + self.check_meta(span) + assert span.span_type == 'sql' + assert span.error == 0 + assert span.duration > 0 + + def test_traced_service(self): + # ensures that the service is set as expected + services = self.tracer.writer.pop_services() + expected = {} + assert services == expected + + def test_opentracing(self): + """Ensure that sqlalchemy works with the opentracer.""" + ot_tracer = init_tracer('sqlalch_svc', self.tracer) + + with ot_tracer.start_active_span('sqlalch_op'): + with self.connection() as conn: + rows = conn.execute('SELECT * FROM players').fetchall() + assert len(rows) == 0 + + traces = self.tracer.writer.pop_traces() + # trace composition + assert len(traces) == 1 + assert len(traces[0]) == 2 + ot_span, dd_span = traces[0] + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'sqlalch_op' + assert ot_span.service == 'sqlalch_svc' + + # span fields + assert dd_span.name == '{}.query'.format(self.VENDOR) + assert dd_span.service == self.SERVICE + assert dd_span.resource == 'SELECT * FROM players' + assert dd_span.get_tag('sql.db') == self.SQL_DB + assert dd_span.span_type == 'sql' + assert dd_span.error == 0 + assert dd_span.duration > 0 + + def test_analytics_default(self): + # ensures that the ORM session is traced + wayne = Player(id=1, name='wayne') + self.session.add(wayne) + self.session.commit() + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'sqlalchemy', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + wayne = Player(id=1, name='wayne') + self.session.add(wayne) + self.session.commit() + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'sqlalchemy', + dict(analytics_enabled=True) + ): + wayne = Player(id=1, name='wayne') + self.session.add(wayne) + self.session.commit() + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/contrib/sqlalchemy/test_mysql.py b/tests/contrib/sqlalchemy/test_mysql.py new file mode 100644 index 0000000000..2e2037c100 --- /dev/null +++ b/tests/contrib/sqlalchemy/test_mysql.py @@ -0,0 +1,51 @@ +from sqlalchemy.exc import ProgrammingError +import pytest + +from .mixins import SQLAlchemyTestMixin +from ..config import MYSQL_CONFIG +from ...base import BaseTracerTestCase + + +class MysqlConnectorTestCase(SQLAlchemyTestMixin, BaseTracerTestCase): + """TestCase for mysql-connector engine""" + VENDOR = 'mysql' + SQL_DB = 'test' + SERVICE = 'mysql' + ENGINE_ARGS = {'url': 'mysql+mysqlconnector://%(user)s:%(password)s@%(host)s:%(port)s/%(database)s' % MYSQL_CONFIG} + + def setUp(self): + super(MysqlConnectorTestCase, self).setUp() + + def tearDown(self): + super(MysqlConnectorTestCase, self).tearDown() + + def check_meta(self, span): + # check database connection tags + self.assertEqual(span.get_tag('out.host'), MYSQL_CONFIG['host']) + self.assertEqual(span.get_metric('out.port'), MYSQL_CONFIG['port']) + + def test_engine_execute_errors(self): + # ensures that SQL errors are reported + with pytest.raises(ProgrammingError): + with self.connection() as conn: + conn.execute('SELECT * FROM a_wrong_table').fetchall() + + traces = self.tracer.writer.pop_traces() + # trace composition + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + span = traces[0][0] + # span fields + self.assertEqual(span.name, '{}.query'.format(self.VENDOR)) + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'SELECT * FROM a_wrong_table') + self.assertEqual(span.get_tag('sql.db'), self.SQL_DB) + self.assertIsNone(span.get_tag('sql.rows') or span.get_metric('sql.rows')) + self.check_meta(span) + self.assertEqual(span.span_type, 'sql') + self.assertTrue(span.duration > 0) + # check the error + self.assertEqual(span.error, 1) + self.assertEqual(span.get_tag('error.type'), 'mysql.connector.errors.ProgrammingError') + self.assertTrue("Table 'test.a_wrong_table' doesn't exist" in span.get_tag('error.msg')) + self.assertTrue("Table 'test.a_wrong_table' doesn't exist" in span.get_tag('error.stack')) diff --git a/tests/contrib/sqlalchemy/test_patch.py b/tests/contrib/sqlalchemy/test_patch.py new file mode 100644 index 0000000000..05a9a0e27c --- /dev/null +++ b/tests/contrib/sqlalchemy/test_patch.py @@ -0,0 +1,105 @@ +import sqlalchemy + +from ddtrace import Pin +from ddtrace.contrib.sqlalchemy import patch, unpatch +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY + +from ..config import POSTGRES_CONFIG +from ...base import BaseTracerTestCase + + +class SQLAlchemyPatchTestCase(BaseTracerTestCase): + """TestCase that checks if the engine is properly traced + when the `patch()` method is used. + """ + def setUp(self): + super(SQLAlchemyPatchTestCase, self).setUp() + + # create a traced engine with the given arguments + # and configure the current PIN instance + patch() + dsn = 'postgresql://%(user)s:%(password)s@%(host)s:%(port)s/%(dbname)s' % POSTGRES_CONFIG + self.engine = sqlalchemy.create_engine(dsn) + Pin.override(self.engine, tracer=self.tracer) + + # prepare a connection + self.conn = self.engine.connect() + + def tearDown(self): + super(SQLAlchemyPatchTestCase, self).tearDown() + + # clear the database and dispose the engine + self.conn.close() + self.engine.dispose() + unpatch() + + def test_engine_traced(self): + # ensures that the engine is traced + rows = self.conn.execute('SELECT 1').fetchall() + assert len(rows) == 1 + + traces = self.tracer.writer.pop_traces() + # trace composition + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + # check subset of span fields + assert span.name == 'postgres.query' + assert span.service == 'postgres' + assert span.error == 0 + assert span.duration > 0 + + def test_engine_pin_service(self): + # ensures that the engine service is updated with the PIN object + Pin.override(self.engine, service='replica-db') + rows = self.conn.execute('SELECT 1').fetchall() + assert len(rows) == 1 + + traces = self.tracer.writer.pop_traces() + # trace composition + assert len(traces) == 1 + assert len(traces[0]) == 1 + span = traces[0][0] + # check subset of span fields + assert span.name == 'postgres.query' + assert span.service == 'replica-db' + assert span.error == 0 + assert span.duration > 0 + + def test_analytics_sample_rate(self): + # [ , ] + matrix = [ + # Default, not enabled, not set + [dict(), None], + + # Not enabled, but sample rate set + [dict(analytics_sample_rate=0.5), None], + + # Enabled and rate set + [dict(analytics_enabled=True, analytics_sample_rate=0.5), 0.5], + [dict(analytics_enabled=True, analytics_sample_rate=1), 1.0], + [dict(analytics_enabled=True, analytics_sample_rate=0), 0], + [dict(analytics_enabled=True, analytics_sample_rate=True), 1.0], + [dict(analytics_enabled=True, analytics_sample_rate=False), 0], + + # Disabled and rate set + [dict(analytics_enabled=False, analytics_sample_rate=0.5), None], + + # Enabled and rate not set + [dict(analytics_enabled=True), 1.0], + ] + for config, metric_value in matrix: + with self.override_config('sqlalchemy', config): + self.conn.execute('SELECT 1').fetchall() + + root = self.get_root_span() + root.assert_matches(name='postgres.query') + + # If the value is None assert it was not set, otherwise assert the expected value + # DEV: root.assert_metrics(metrics, exact=True) won't work here since we have another sample + # rate keys getting added + if metric_value is None: + assert ANALYTICS_SAMPLE_RATE_KEY not in root.metrics + else: + assert root.metrics[ANALYTICS_SAMPLE_RATE_KEY] == metric_value + self.reset() diff --git a/tests/contrib/sqlalchemy/test_postgres.py b/tests/contrib/sqlalchemy/test_postgres.py new file mode 100644 index 0000000000..7832fa4d19 --- /dev/null +++ b/tests/contrib/sqlalchemy/test_postgres.py @@ -0,0 +1,64 @@ +import psycopg2 + +from sqlalchemy.exc import ProgrammingError + +import pytest + +from .mixins import SQLAlchemyTestMixin +from ..config import POSTGRES_CONFIG +from ...base import BaseTracerTestCase + + +class PostgresTestCase(SQLAlchemyTestMixin, BaseTracerTestCase): + """TestCase for Postgres Engine""" + VENDOR = 'postgres' + SQL_DB = 'postgres' + SERVICE = 'postgres' + ENGINE_ARGS = {'url': 'postgresql://%(user)s:%(password)s@%(host)s:%(port)s/%(dbname)s' % POSTGRES_CONFIG} + + def setUp(self): + super(PostgresTestCase, self).setUp() + + def tearDown(self): + super(PostgresTestCase, self).tearDown() + + def check_meta(self, span): + # check database connection tags + self.assertEqual(span.get_tag('out.host'), POSTGRES_CONFIG['host']) + self.assertEqual(span.get_metric('out.port'), POSTGRES_CONFIG['port']) + + def test_engine_execute_errors(self): + # ensures that SQL errors are reported + with pytest.raises(ProgrammingError): + with self.connection() as conn: + conn.execute('SELECT * FROM a_wrong_table').fetchall() + + traces = self.tracer.writer.pop_traces() + # trace composition + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + span = traces[0][0] + # span fields + self.assertEqual(span.name, '{}.query'.format(self.VENDOR)) + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'SELECT * FROM a_wrong_table') + self.assertEqual(span.get_tag('sql.db'), self.SQL_DB) + self.assertIsNone(span.get_tag('sql.rows') or span.get_metric('sql.rows')) + self.check_meta(span) + self.assertEqual(span.span_type, 'sql') + self.assertTrue(span.duration > 0) + # check the error + self.assertEqual(span.error, 1) + self.assertTrue('relation "a_wrong_table" does not exist' in span.get_tag('error.msg')) + assert 'psycopg2.errors.UndefinedTable' in span.get_tag('error.type') + assert 'UndefinedTable: relation "a_wrong_table" does not exist' in span.get_tag('error.stack') + + +class PostgresCreatorTestCase(PostgresTestCase): + """TestCase for Postgres Engine that includes the same tests set + of `PostgresTestCase`, but it uses a specific `creator` function. + """ + VENDOR = 'postgres' + SQL_DB = 'postgres' + SERVICE = 'postgres' + ENGINE_ARGS = {'url': 'postgresql://', 'creator': lambda: psycopg2.connect(**POSTGRES_CONFIG)} diff --git a/tests/contrib/sqlalchemy/test_sqlite.py b/tests/contrib/sqlalchemy/test_sqlite.py new file mode 100644 index 0000000000..b3a78b4e90 --- /dev/null +++ b/tests/contrib/sqlalchemy/test_sqlite.py @@ -0,0 +1,45 @@ +import pytest + +from sqlalchemy.exc import OperationalError + +from .mixins import SQLAlchemyTestMixin +from ...base import BaseTracerTestCase + + +class SQLiteTestCase(SQLAlchemyTestMixin, BaseTracerTestCase): + """TestCase for the SQLite engine""" + VENDOR = 'sqlite' + SQL_DB = ':memory:' + SERVICE = 'sqlite' + ENGINE_ARGS = {'url': 'sqlite:///:memory:'} + + def setUp(self): + super(SQLiteTestCase, self).setUp() + + def tearDown(self): + super(SQLiteTestCase, self).tearDown() + + def test_engine_execute_errors(self): + # ensures that SQL errors are reported + with pytest.raises(OperationalError): + with self.connection() as conn: + conn.execute('SELECT * FROM a_wrong_table').fetchall() + + traces = self.tracer.writer.pop_traces() + # trace composition + self.assertEqual(len(traces), 1) + self.assertEqual(len(traces[0]), 1) + span = traces[0][0] + # span fields + self.assertEqual(span.name, '{}.query'.format(self.VENDOR)) + self.assertEqual(span.service, self.SERVICE) + self.assertEqual(span.resource, 'SELECT * FROM a_wrong_table') + self.assertEqual(span.get_tag('sql.db'), self.SQL_DB) + self.assertIsNone(span.get_tag('sql.rows') or span.get_metric('sql.rows')) + self.assertEqual(span.span_type, 'sql') + self.assertTrue(span.duration > 0) + # check the error + self.assertEqual(span.error, 1) + self.assertEqual(span.get_tag('error.msg'), 'no such table: a_wrong_table') + self.assertTrue('OperationalError' in span.get_tag('error.type')) + self.assertTrue('OperationalError: no such table: a_wrong_table' in span.get_tag('error.stack')) diff --git a/tests/contrib/sqlite3/__init__.py b/tests/contrib/sqlite3/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/sqlite3/test_sqlite3.py b/tests/contrib/sqlite3/test_sqlite3.py new file mode 100644 index 0000000000..e1294bd053 --- /dev/null +++ b/tests/contrib/sqlite3/test_sqlite3.py @@ -0,0 +1,324 @@ +# stdlib +import sqlite3 +import time + +# project +import ddtrace +from ddtrace import Pin +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.sqlite3 import connection_factory +from ddtrace.contrib.sqlite3.patch import patch, unpatch, TracedSQLiteCursor +from ddtrace.ext import errors + +# testing +from tests.opentracer.utils import init_tracer +from ...base import BaseTracerTestCase + + +class TestSQLite(BaseTracerTestCase): + def setUp(self): + super(TestSQLite, self).setUp() + patch() + + def tearDown(self): + unpatch() + super(TestSQLite, self).tearDown() + + def test_backwards_compat(self): + # a small test to ensure that if the previous interface is used + # things still work + factory = connection_factory(self.tracer, service='my_db_service') + conn = sqlite3.connect(':memory:', factory=factory) + q = 'select * from sqlite_master' + cursor = conn.execute(q) + self.assertIsInstance(cursor, TracedSQLiteCursor) + assert not cursor.fetchall() + assert not self.spans + + def test_service_info(self): + backup_tracer = ddtrace.tracer + ddtrace.tracer = self.tracer + + sqlite3.connect(':memory:') + + services = self.tracer.writer.pop_services() + self.assertEqual(services, {}) + + ddtrace.tracer = backup_tracer + + def test_sqlite(self): + # ensure we can trace multiple services without stomping + services = ['db', 'another'] + for service in services: + db = sqlite3.connect(':memory:') + pin = Pin.get_from(db) + assert pin + pin.clone( + service=service, + tracer=self.tracer).onto(db) + + # Ensure we can run a query and it's correctly traced + q = 'select * from sqlite_master' + start = time.time() + cursor = db.execute(q) + self.assertIsInstance(cursor, TracedSQLiteCursor) + rows = cursor.fetchall() + end = time.time() + assert not rows + self.assert_structure( + dict(name='sqlite.query', span_type='sql', resource=q, service=service, error=0), + ) + root = self.get_root_span() + self.assertIsNone(root.get_tag('sql.query')) + assert start <= root.start <= end + assert root.duration <= end - start + self.reset() + + # run a query with an error and ensure all is well + q = 'select * from some_non_existant_table' + try: + db.execute(q) + except Exception: + pass + else: + assert 0, 'should have an error' + + self.assert_structure( + dict(name='sqlite.query', span_type='sql', resource=q, service=service, error=1), + ) + root = self.get_root_span() + self.assertIsNone(root.get_tag('sql.query')) + self.assertIsNotNone(root.get_tag(errors.ERROR_STACK)) + self.assertIn('OperationalError', root.get_tag(errors.ERROR_TYPE)) + self.assertIn('no such table', root.get_tag(errors.ERROR_MSG)) + self.reset() + + def test_sqlite_fetchall_is_traced(self): + q = 'select * from sqlite_master' + + # Not traced by default + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchall() + self.assert_structure(dict(name='sqlite.query', resource=q)) + self.reset() + + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchall() + + # We have two spans side by side + query_span, fetchall_span = self.get_root_spans() + + # Assert query + query_span.assert_structure(dict(name='sqlite.query', resource=q)) + + # Assert fetchall + fetchall_span.assert_structure(dict(name='sqlite.query.fetchall', resource=q, span_type='sql', error=0)) + self.assertIsNone(fetchall_span.get_tag('sql.query')) + + def test_sqlite_fetchone_is_traced(self): + q = 'select * from sqlite_master' + + # Not traced by default + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchone() + self.assert_structure(dict(name='sqlite.query', resource=q)) + self.reset() + + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchone() + + # We have two spans side by side + query_span, fetchone_span = self.get_root_spans() + + # Assert query + query_span.assert_structure(dict(name='sqlite.query', resource=q)) + + # Assert fetchone + fetchone_span.assert_structure( + dict( + name='sqlite.query.fetchone', + resource=q, + span_type='sql', + error=0, + ), + ) + self.assertIsNone(fetchone_span.get_tag('sql.query')) + + def test_sqlite_fetchmany_is_traced(self): + q = 'select * from sqlite_master' + + # Not traced by default + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchmany(123) + self.assert_structure(dict(name='sqlite.query', resource=q)) + self.reset() + + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchmany(123) + + # We have two spans side by side + query_span, fetchmany_span = self.get_root_spans() + + # Assert query + query_span.assert_structure(dict(name='sqlite.query', resource=q)) + + # Assert fetchmany + fetchmany_span.assert_structure( + dict( + name='sqlite.query.fetchmany', + resource=q, + span_type='sql', + error=0, + metrics={'db.fetch.size': 123}, + ), + ) + self.assertIsNone(fetchmany_span.get_tag('sql.query')) + + def test_sqlite_ot(self): + """Ensure sqlite works with the opentracer.""" + ot_tracer = init_tracer('sqlite_svc', self.tracer) + + # Ensure we can run a query and it's correctly traced + q = 'select * from sqlite_master' + with ot_tracer.start_active_span('sqlite_op'): + db = sqlite3.connect(':memory:') + pin = Pin.get_from(db) + assert pin + pin.clone(tracer=self.tracer).onto(db) + cursor = db.execute(q) + rows = cursor.fetchall() + assert not rows + + self.assert_structure( + dict(name='sqlite_op', service='sqlite_svc'), + ( + dict(name='sqlite.query', span_type='sql', resource=q, error=0), + ) + ) + self.reset() + + with self.override_config('dbapi2', dict(trace_fetch_methods=True)): + with ot_tracer.start_active_span('sqlite_op'): + db = sqlite3.connect(':memory:') + pin = Pin.get_from(db) + assert pin + pin.clone(tracer=self.tracer).onto(db) + cursor = db.execute(q) + rows = cursor.fetchall() + assert not rows + + self.assert_structure( + dict(name='sqlite_op', service='sqlite_svc'), + ( + dict(name='sqlite.query', span_type='sql', resource=q, error=0), + dict(name='sqlite.query.fetchall', span_type='sql', resource=q, error=0), + ), + ) + + def test_commit(self): + connection = self._given_a_traced_connection(self.tracer) + connection.commit() + self.assertEqual(len(self.spans), 1) + span = self.spans[0] + self.assertEqual(span.service, 'sqlite') + self.assertEqual(span.name, 'sqlite.connection.commit') + + def test_rollback(self): + connection = self._given_a_traced_connection(self.tracer) + connection.rollback() + self.assert_structure( + dict(name='sqlite.connection.rollback', service='sqlite'), + ) + + def test_patch_unpatch(self): + # Test patch idempotence + patch() + patch() + + db = sqlite3.connect(':memory:') + pin = Pin.get_from(db) + assert pin + pin.clone(tracer=self.tracer).onto(db) + db.cursor().execute('select \'blah\'').fetchall() + + self.assert_structure( + dict(name='sqlite.query'), + ) + self.reset() + + # Test unpatch + unpatch() + + db = sqlite3.connect(':memory:') + db.cursor().execute('select \'blah\'').fetchall() + + self.assert_has_no_spans() + + # Test patch again + patch() + + db = sqlite3.connect(':memory:') + pin = Pin.get_from(db) + assert pin + pin.clone(tracer=self.tracer).onto(db) + db.cursor().execute('select \'blah\'').fetchall() + + self.assert_structure( + dict(name='sqlite.query'), + ) + + def _given_a_traced_connection(self, tracer): + db = sqlite3.connect(':memory:') + Pin.get_from(db).clone(tracer=tracer).onto(db) + return db + + def test_analytics_default(self): + q = 'select * from sqlite_master' + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchall() + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertIsNone(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + q = 'select * from sqlite_master' + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchall() + + spans = self.get_spans() + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'dbapi2', + dict(analytics_enabled=True) + ): + q = 'select * from sqlite_master' + connection = self._given_a_traced_connection(self.tracer) + cursor = connection.execute(q) + cursor.fetchall() + + spans = self.get_spans() + + self.assertEqual(len(spans), 1) + span = spans[0] + self.assertEqual(span.get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/contrib/test_utils.py b/tests/contrib/test_utils.py new file mode 100644 index 0000000000..cdbb53af79 --- /dev/null +++ b/tests/contrib/test_utils.py @@ -0,0 +1,67 @@ +from functools import partial +from ddtrace.utils.importlib import func_name + + +class SomethingCallable(object): + """ + A dummy class that implements __call__(). + """ + value = 42 + + def __call__(self): + return 'something' + + def me(self): + return self + + @staticmethod + def add(a, b): + return a + b + + @classmethod + def answer(cls): + return cls.value + + +def some_function(): + """ + A function doing nothing. + """ + return 'nothing' + + +def minus(a, b): + return a - b + + +minus_two = partial(minus, b=2) # partial funcs need special handling (no module) + +# disabling flake8 test below, yes, declaring a func like this is bad, we know +plus_three = lambda x: x + 3 # noqa: E731 + + +class TestContrib(object): + """ + Ensure that contrib utility functions handles corner cases + """ + def test_func_name(self): + # check that func_name works on anything callable, not only funcs. + assert 'nothing' == some_function() + assert 'tests.contrib.test_utils.some_function' == func_name(some_function) + + f = SomethingCallable() + assert 'something' == f() + assert 'tests.contrib.test_utils.SomethingCallable' == func_name(f) + + assert f == f.me() + assert 'tests.contrib.test_utils.me' == func_name(f.me) + assert 3 == f.add(1, 2) + assert 'tests.contrib.test_utils.add' == func_name(f.add) + assert 42 == f.answer() + assert 'tests.contrib.test_utils.answer' == func_name(f.answer) + + assert 'tests.contrib.test_utils.minus' == func_name(minus) + assert 5 == minus_two(7) + assert 'partial' == func_name(minus_two) + assert 10 == plus_three(7) + assert 'tests.contrib.test_utils.' == func_name(plus_three) diff --git a/tests/contrib/tornado/__init__.py b/tests/contrib/tornado/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/tornado/test_config.py b/tests/contrib/tornado/test_config.py new file mode 100644 index 0000000000..96b03cbbb8 --- /dev/null +++ b/tests/contrib/tornado/test_config.py @@ -0,0 +1,37 @@ +from ddtrace.filters import FilterRequestsOnUrl + +from .utils import TornadoTestCase + + +class TestTornadoSettings(TornadoTestCase): + """ + Ensure that Tornado web Application configures properly + the given tracer. + """ + def get_settings(self): + # update tracer settings + return { + 'datadog_trace': { + 'default_service': 'custom-tornado', + 'tags': {'env': 'production', 'debug': 'false'}, + 'enabled': False, + 'agent_hostname': 'dd-agent.service.consul', + 'agent_port': 8126, + 'settings': { + 'FILTERS': [ + FilterRequestsOnUrl(r'http://test\.example\.com'), + ], + }, + }, + } + + def test_tracer_is_properly_configured(self): + # the tracer must be properly configured + assert self.tracer.tags == {'env': 'production', 'debug': 'false'} + assert self.tracer.enabled is False + assert self.tracer.writer.api.hostname == 'dd-agent.service.consul' + assert self.tracer.writer.api.port == 8126 + # settings are properly passed + assert self.tracer.writer._filters is not None + assert len(self.tracer.writer._filters) == 1 + assert isinstance(self.tracer.writer._filters[0], FilterRequestsOnUrl) diff --git a/tests/contrib/tornado/test_executor_decorator.py b/tests/contrib/tornado/test_executor_decorator.py new file mode 100644 index 0000000000..f46e3849e4 --- /dev/null +++ b/tests/contrib/tornado/test_executor_decorator.py @@ -0,0 +1,181 @@ +import unittest + +from ddtrace.contrib.tornado.compat import futures_available +from ddtrace.ext import http + +from tornado import version_info + +from .utils import TornadoTestCase +from ...utils import assert_span_http_status_code + + +class TestTornadoExecutor(TornadoTestCase): + """ + Ensure that Tornado web handlers are properly traced even if + ``@run_on_executor`` decorator is used. + """ + def test_on_executor_handler(self): + # it should trace a handler that uses @run_on_executor + response = self.fetch('/executor_handler/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 2 == len(traces) + assert 1 == len(traces[0]) + assert 1 == len(traces[1]) + + # this trace yields the execution of the thread + request_span = traces[1][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExecutorHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/executor_handler/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + assert request_span.duration >= 0.05 + + # this trace is executed in a different thread + executor_span = traces[0][0] + assert 'tornado-web' == executor_span.service + assert 'tornado.executor.with' == executor_span.name + assert executor_span.parent_id == request_span.span_id + assert 0 == executor_span.error + assert executor_span.duration >= 0.05 + + @unittest.skipUnless(futures_available, 'Futures must be available to test direct submit') + def test_on_executor_submit(self): + # it should propagate the context when a handler uses directly the `executor.submit()` + response = self.fetch('/executor_submit_handler/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 2 == len(traces) + assert 1 == len(traces[0]) + assert 1 == len(traces[1]) + + # this trace yields the execution of the thread + request_span = traces[1][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExecutorSubmitHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/executor_submit_handler/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + assert request_span.duration >= 0.05 + + # this trace is executed in a different thread + executor_span = traces[0][0] + assert 'tornado-web' == executor_span.service + assert 'tornado.executor.query' == executor_span.name + assert executor_span.parent_id == request_span.span_id + assert 0 == executor_span.error + assert executor_span.duration >= 0.05 + + def test_on_executor_exception_handler(self): + # it should trace a handler that uses @run_on_executor + response = self.fetch('/executor_exception/') + assert 500 == response.code + + traces = self.tracer.writer.pop_traces() + assert 2 == len(traces) + assert 1 == len(traces[0]) + assert 1 == len(traces[1]) + + # this trace yields the execution of the thread + request_span = traces[1][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExecutorExceptionHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/executor_exception/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'Ouch!' == request_span.get_tag('error.msg') + assert 'Exception: Ouch!' in request_span.get_tag('error.stack') + + # this trace is executed in a different thread + executor_span = traces[0][0] + assert 'tornado-web' == executor_span.service + assert 'tornado.executor.with' == executor_span.name + assert executor_span.parent_id == request_span.span_id + assert 1 == executor_span.error + assert 'Ouch!' == executor_span.get_tag('error.msg') + assert 'Exception: Ouch!' in executor_span.get_tag('error.stack') + + @unittest.skipIf( + (version_info[0], version_info[1]) in [(4, 0), (4, 1)], + reason='Custom kwargs are available only for Tornado 4.2+', + ) + def test_on_executor_custom_kwarg(self): + # it should trace a handler that uses @run_on_executor + # with the `executor` kwarg + response = self.fetch('/executor_custom_handler/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 2 == len(traces) + assert 1 == len(traces[0]) + assert 1 == len(traces[1]) + + # this trace yields the execution of the thread + request_span = traces[1][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExecutorCustomHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/executor_custom_handler/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + assert request_span.duration >= 0.05 + + # this trace is executed in a different thread + executor_span = traces[0][0] + assert 'tornado-web' == executor_span.service + assert 'tornado.executor.with' == executor_span.name + assert executor_span.parent_id == request_span.span_id + assert 0 == executor_span.error + assert executor_span.duration >= 0.05 + + @unittest.skipIf( + (version_info[0], version_info[1]) in [(4, 0), (4, 1)], + reason='Custom kwargs are available only for Tornado 4.2+', + ) + def test_on_executor_custom_args_kwarg(self): + # it should raise an exception if the decorator is used improperly + response = self.fetch('/executor_custom_args_handler/') + assert 500 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + # this trace yields the execution of the thread + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExecutorCustomArgsHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/executor_custom_args_handler/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'cannot combine positional and keyword args' == request_span.get_tag('error.msg') + assert 'ValueError' in request_span.get_tag('error.stack') + + @unittest.skipUnless(futures_available, 'Futures must be available to test direct submit') + def test_futures_double_instrumentation(self): + # it should not double wrap `ThreadpPoolExecutor.submit` method if + # `futures` is already instrumented + from ddtrace import patch + patch(futures=True) + from concurrent.futures import ThreadPoolExecutor + from ddtrace.vendor.wrapt import BoundFunctionWrapper + + fn_wrapper = getattr(ThreadPoolExecutor.submit, '__wrapped__', None) + assert not isinstance(fn_wrapper, BoundFunctionWrapper) diff --git a/tests/contrib/tornado/test_safety.py b/tests/contrib/tornado/test_safety.py new file mode 100644 index 0000000000..7ebcfbab1d --- /dev/null +++ b/tests/contrib/tornado/test_safety.py @@ -0,0 +1,158 @@ +import threading + +from tornado import httpclient +from tornado.testing import gen_test + +from ddtrace.contrib.tornado import patch, unpatch +from ddtrace.ext import http + +from . import web +from .web.app import CustomDefaultHandler +from .utils import TornadoTestCase + + +class TestAsyncConcurrency(TornadoTestCase): + """ + Ensure that application instrumentation doesn't break asynchronous concurrency. + """ + @gen_test + def test_concurrent_requests(self): + REQUESTS_NUMBER = 25 + responses = [] + + # the application must handle concurrent calls + def make_requests(): + # use a blocking HTTP client (we're in another thread) + http_client = httpclient.HTTPClient() + url = self.get_url('/nested/') + response = http_client.fetch(url) + responses.append(response) + assert 200 == response.code + assert 'OK' == response.body.decode('utf-8') + # freeing file descriptors + http_client.close() + + # blocking call executed in different threads + threads = [threading.Thread(target=make_requests) for _ in range(REQUESTS_NUMBER)] + for t in threads: + t.start() + + while len(responses) < REQUESTS_NUMBER: + yield web.compat.sleep(0.001) + + for t in threads: + t.join() + + # the trace is created + traces = self.tracer.writer.pop_traces() + assert REQUESTS_NUMBER == len(traces) + assert 2 == len(traces[0]) + + +class TestAppSafety(TornadoTestCase): + """ + Ensure that the application patch has the proper safety guards. + """ + + def test_trace_unpatch(self): + # the application must not be traced if unpatch() is called + patch() + unpatch() + + response = self.fetch('/success/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 0 == len(traces) + + def test_trace_unpatch_not_traced(self): + # the untrace must be safe if the app is not traced + unpatch() + unpatch() + + response = self.fetch('/success/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 0 == len(traces) + + def test_trace_app_twice(self): + # the application must not be traced multiple times + patch() + patch() + + response = self.fetch('/success/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + def test_arbitrary_resource_querystring(self): + # users inputs should not determine `span.resource` field + response = self.fetch('/success/?magic_number=42') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tests.contrib.tornado.web.app.SuccessHandler' == request_span.resource + assert self.get_url('/success/') == request_span.get_tag(http.URL) + + def test_arbitrary_resource_404(self): + # users inputs should not determine `span.resource` field + response = self.fetch('/does_not_exist/') + assert 404 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado.web.ErrorHandler' == request_span.resource + assert self.get_url('/does_not_exist/') == request_span.get_tag(http.URL) + + @gen_test + def test_futures_without_context(self): + # ensures that if futures propagation is available, an empty + # context doesn't crash the system + from .web.compat import ThreadPoolExecutor + + def job(): + with self.tracer.trace('job'): + return 42 + + executor = ThreadPoolExecutor(max_workers=3) + yield executor.submit(job) + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + # this trace yields the execution of the thread + span = traces[0][0] + assert 'job' == span.name + + +class TestCustomAppSafety(TornadoTestCase): + """ + Ensure that the application patch has the proper safety guards, + even for custom default handlers. + """ + def get_settings(self): + return { + 'default_handler_class': CustomDefaultHandler, + 'default_handler_args': dict(status_code=400), + } + + def test_trace_unpatch(self): + # the application must not be traced if unpatch() is called + unpatch() + + response = self.fetch('/custom_handler/') + assert 400 == response.code + + traces = self.tracer.writer.pop_traces() + assert 0 == len(traces) diff --git a/tests/contrib/tornado/test_stack_context.py b/tests/contrib/tornado/test_stack_context.py new file mode 100644 index 0000000000..a3727dfe82 --- /dev/null +++ b/tests/contrib/tornado/test_stack_context.py @@ -0,0 +1,54 @@ +import pytest +import tornado + +from ddtrace.context import Context +from ddtrace.contrib.tornado import TracerStackContext + +from .utils import TornadoTestCase +from .web.compat import sleep + + +class TestStackContext(TornadoTestCase): + @pytest.mark.skipif(tornado.version_info >= (5, 0), + reason='tornado.stack_context deprecated in Tornado 5.0 and removed in Tornado 6.0') + def test_without_stack_context(self): + # without a TracerStackContext, propagation is not available + ctx = self.tracer.context_provider.active() + assert ctx is None + + def test_stack_context(self): + # a TracerStackContext should automatically propagate a tracing context + with TracerStackContext(): + ctx = self.tracer.context_provider.active() + + assert ctx is not None + + def test_propagation_with_new_context(self): + # inside a TracerStackContext it should be possible to set + # a new Context for distributed tracing + with TracerStackContext(): + ctx = Context(trace_id=100, span_id=101) + self.tracer.context_provider.activate(ctx) + with self.tracer.trace('tornado'): + sleep(0.01) + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + assert traces[0][0].trace_id == 100 + assert traces[0][0].parent_id == 101 + + @pytest.mark.skipif(tornado.version_info >= (5, 0), + reason='tornado.stack_context deprecated in Tornado 5.0 and removed in Tornado 6.0') + def test_propagation_without_stack_context(self): + # a Context is discarded if not set inside a TracerStackContext + ctx = Context(trace_id=100, span_id=101) + self.tracer.context_provider.activate(ctx) + with self.tracer.trace('tornado'): + sleep(0.01) + + traces = self.tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + assert traces[0][0].trace_id != 100 + assert traces[0][0].parent_id != 101 diff --git a/tests/contrib/tornado/test_tornado_template.py b/tests/contrib/tornado/test_tornado_template.py new file mode 100644 index 0000000000..a496c9145f --- /dev/null +++ b/tests/contrib/tornado/test_tornado_template.py @@ -0,0 +1,169 @@ +from tornado import template + +import pytest + +from .utils import TornadoTestCase +from ...utils import assert_span_http_status_code + +from ddtrace.ext import http + + +class TestTornadoTemplate(TornadoTestCase): + """ + Ensure that Tornado templates are properly traced inside and + outside web handlers. + """ + def test_template_handler(self): + # it should trace the template rendering + response = self.fetch('/template/') + assert 200 == response.code + assert 'This is a rendered page called "home"\n' == response.body.decode('utf-8') + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.TemplateHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/template/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + + template_span = traces[0][1] + assert 'tornado-web' == template_span.service + assert 'tornado.template' == template_span.name + assert 'template' == template_span.span_type + assert 'templates/page.html' == template_span.resource + assert 'templates/page.html' == template_span.get_tag('tornado.template_name') + assert template_span.parent_id == request_span.span_id + assert 0 == template_span.error + + def test_template_renderer(self): + # it should trace the Template generation even outside web handlers + t = template.Template('Hello {{ name }}!') + value = t.generate(name='world') + assert value == b'Hello world!' + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + template_span = traces[0][0] + assert 'tornado-web' == template_span.service + assert 'tornado.template' == template_span.name + assert 'template' == template_span.span_type + assert 'render_string' == template_span.resource + assert 'render_string' == template_span.get_tag('tornado.template_name') + assert 0 == template_span.error + + def test_template_partials(self): + # it should trace the template rendering when partials are used + response = self.fetch('/template_partial/') + assert 200 == response.code + assert 'This is a list:\n\n* python\n\n\n* go\n\n\n* ruby\n\n\n' == response.body.decode('utf-8') + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 5 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.TemplatePartialHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/template_partial/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + + template_root = traces[0][1] + assert 'tornado-web' == template_root.service + assert 'tornado.template' == template_root.name + assert 'template' == template_root.span_type + assert 'templates/list.html' == template_root.resource + assert 'templates/list.html' == template_root.get_tag('tornado.template_name') + assert template_root.parent_id == request_span.span_id + assert 0 == template_root.error + + template_span = traces[0][2] + assert 'tornado-web' == template_span.service + assert 'tornado.template' == template_span.name + assert 'template' == template_span.span_type + assert 'templates/item.html' == template_span.resource + assert 'templates/item.html' == template_span.get_tag('tornado.template_name') + assert template_span.parent_id == template_root.span_id + assert 0 == template_span.error + + template_span = traces[0][3] + assert 'tornado-web' == template_span.service + assert 'tornado.template' == template_span.name + assert 'template' == template_span.span_type + assert 'templates/item.html' == template_span.resource + assert 'templates/item.html' == template_span.get_tag('tornado.template_name') + assert template_span.parent_id == template_root.span_id + assert 0 == template_span.error + + template_span = traces[0][4] + assert 'tornado-web' == template_span.service + assert 'tornado.template' == template_span.name + assert 'template' == template_span.span_type + assert 'templates/item.html' == template_span.resource + assert 'templates/item.html' == template_span.get_tag('tornado.template_name') + assert template_span.parent_id == template_root.span_id + assert 0 == template_span.error + + def test_template_exception_handler(self): + # it should trace template rendering exceptions + response = self.fetch('/template_exception/') + assert 500 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.TemplateExceptionHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/template_exception/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'ModuleThatDoesNotExist' in request_span.get_tag('error.msg') + assert 'AttributeError' in request_span.get_tag('error.stack') + + template_span = traces[0][1] + assert 'tornado-web' == template_span.service + assert 'tornado.template' == template_span.name + assert 'template' == template_span.span_type + assert 'templates/exception.html' == template_span.resource + assert 'templates/exception.html' == template_span.get_tag('tornado.template_name') + assert template_span.parent_id == request_span.span_id + assert 1 == template_span.error + assert 'ModuleThatDoesNotExist' in template_span.get_tag('error.msg') + assert 'AttributeError' in template_span.get_tag('error.stack') + + def test_template_renderer_exception(self): + # it should trace the Template exceptions generation even outside web handlers + t = template.Template('{% module ModuleThatDoesNotExist() %}') + with pytest.raises(NameError): + t.generate() + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + template_span = traces[0][0] + assert 'tornado-web' == template_span.service + assert 'tornado.template' == template_span.name + assert 'template' == template_span.span_type + assert 'render_string' == template_span.resource + assert 'render_string' == template_span.get_tag('tornado.template_name') + assert 1 == template_span.error + assert 'is not defined' in template_span.get_tag('error.msg') + assert 'NameError' in template_span.get_tag('error.stack') diff --git a/tests/contrib/tornado/test_tornado_web.py b/tests/contrib/tornado/test_tornado_web.py new file mode 100644 index 0000000000..e710da7103 --- /dev/null +++ b/tests/contrib/tornado/test_tornado_web.py @@ -0,0 +1,491 @@ +from .web.app import CustomDefaultHandler +from .utils import TornadoTestCase + +from ddtrace import config +from ddtrace.constants import SAMPLING_PRIORITY_KEY, ORIGIN_KEY, ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.ext import http +import pytest +import tornado + +from tests.opentracer.utils import init_tracer +from ...utils import assert_span_http_status_code + + +class TestTornadoWeb(TornadoTestCase): + """ + Ensure that Tornado web handlers are properly traced. + """ + def test_success_handler(self, query_string=''): + # it should trace a handler that returns 200 + if query_string: + fqs = '?' + query_string + else: + fqs = '' + response = self.fetch('/success/' + fqs) + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.SuccessHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/success/') == request_span.get_tag(http.URL) + if config.tornado.trace_query_string: + assert query_string == request_span.get_tag(http.QUERY_STRING) + else: + assert http.QUERY_STRING not in request_span.meta + assert 0 == request_span.error + + def test_success_handler_query_string(self): + self.test_success_handler('foo=bar') + + def test_success_handler_query_string_trace(self): + with self.override_http_config('tornado', dict(trace_query_string=True)): + self.test_success_handler('foo=bar') + + def test_nested_handler(self): + # it should trace a handler that calls the tracer.trace() method + # using the automatic Context retrieval + response = self.fetch('/nested/') + assert 200 == response.code + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # check request span + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.NestedHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/nested/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + # check nested span + nested_span = traces[0][1] + assert 'tornado-web' == nested_span.service + assert 'tornado.sleep' == nested_span.name + assert 0 == nested_span.error + # check durations because of the yield sleep + assert request_span.duration >= 0.05 + assert nested_span.duration >= 0.05 + + def test_exception_handler(self): + # it should trace a handler that raises an exception + response = self.fetch('/exception/') + assert 500 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExceptionHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/exception/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'Ouch!' == request_span.get_tag('error.msg') + assert 'Exception: Ouch!' in request_span.get_tag('error.stack') + + def test_http_exception_handler(self): + # it should trace a handler that raises a Tornado HTTPError + response = self.fetch('/http_exception/') + assert 501 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.HTTPExceptionHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 501) + assert self.get_url('/http_exception/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'HTTP 501: Not Implemented (unavailable)' == request_span.get_tag('error.msg') + assert 'HTTP 501: Not Implemented (unavailable)' in request_span.get_tag('error.stack') + + def test_http_exception_500_handler(self): + # it should trace a handler that raises a Tornado HTTPError + response = self.fetch('/http_exception_500/') + assert 500 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.HTTPException500Handler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/http_exception_500/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'HTTP 500: Server Error (server error)' == request_span.get_tag('error.msg') + assert 'HTTP 500: Server Error (server error)' in request_span.get_tag('error.stack') + + def test_sync_success_handler(self): + # it should trace a synchronous handler that returns 200 + response = self.fetch('/sync_success/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.SyncSuccessHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/sync_success/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + + def test_sync_exception_handler(self): + # it should trace a handler that raises an exception + response = self.fetch('/sync_exception/') + assert 500 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.SyncExceptionHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/sync_exception/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'Ouch!' == request_span.get_tag('error.msg') + assert 'Exception: Ouch!' in request_span.get_tag('error.stack') + + def test_404_handler(self): + # it should trace 404 + response = self.fetch('/does_not_exist/') + assert 404 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tornado.web.ErrorHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 404) + assert self.get_url('/does_not_exist/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + + def test_redirect_handler(self): + # it should trace the built-in RedirectHandler + response = self.fetch('/redirect/') + assert 200 == response.code + + # we trace two different calls: the RedirectHandler and the SuccessHandler + traces = self.tracer.writer.pop_traces() + assert 2 == len(traces) + assert 1 == len(traces[0]) + assert 1 == len(traces[1]) + + redirect_span = traces[0][0] + assert 'tornado-web' == redirect_span.service + assert 'tornado.request' == redirect_span.name + assert 'web' == redirect_span.span_type + assert 'tornado.web.RedirectHandler' == redirect_span.resource + assert 'GET' == redirect_span.get_tag('http.method') + assert_span_http_status_code(redirect_span, 301) + assert self.get_url('/redirect/') == redirect_span.get_tag(http.URL) + assert 0 == redirect_span.error + + success_span = traces[1][0] + assert 'tornado-web' == success_span.service + assert 'tornado.request' == success_span.name + assert 'web' == success_span.span_type + assert 'tests.contrib.tornado.web.app.SuccessHandler' == success_span.resource + assert 'GET' == success_span.get_tag('http.method') + assert_span_http_status_code(success_span, 200) + assert self.get_url('/success/') == success_span.get_tag(http.URL) + assert 0 == success_span.error + + def test_static_handler(self): + # it should trace the access to static files + response = self.fetch('/statics/empty.txt') + assert 200 == response.code + assert 'Static file\n' == response.body.decode('utf-8') + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tornado.web.StaticFileHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/statics/empty.txt') == request_span.get_tag(http.URL) + assert 0 == request_span.error + + def test_propagation(self): + # it should trace a handler that returns 200 with a propagated context + headers = { + 'x-datadog-trace-id': '1234', + 'x-datadog-parent-id': '4567', + 'x-datadog-sampling-priority': '2' + } + response = self.fetch('/success/', headers=headers) + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + + # simple sanity check on the span + assert 'tornado.request' == request_span.name + assert_span_http_status_code(request_span, 200) + assert self.get_url('/success/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + + # check propagation + assert 1234 == request_span.trace_id + assert 4567 == request_span.parent_id + assert 2 == request_span.get_metric(SAMPLING_PRIORITY_KEY) + + # Opentracing support depends on new AsyncioScopeManager + # See: https://github.com/opentracing/opentracing-python/pull/118 + @pytest.mark.skipif(tornado.version_info >= (5, 0), + reason='Opentracing ScopeManager not available for Tornado >= 5') + def test_success_handler_ot(self): + """OpenTracing version of test_success_handler.""" + from opentracing.scope_managers.tornado import TornadoScopeManager + ot_tracer = init_tracer('tornado_svc', self.tracer, scope_manager=TornadoScopeManager()) + + with ot_tracer.start_active_span('tornado_op'): + response = self.fetch('/success/') + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # dd_span will start and stop before the ot_span finishes + ot_span, dd_span = traces[0] + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert ot_span.name == 'tornado_op' + assert ot_span.service == 'tornado_svc' + + assert 'tornado-web' == dd_span.service + assert 'tornado.request' == dd_span.name + assert 'web' == dd_span.span_type + assert 'tests.contrib.tornado.web.app.SuccessHandler' == dd_span.resource + assert 'GET' == dd_span.get_tag('http.method') + assert_span_http_status_code(dd_span, 200) + assert self.get_url('/success/') == dd_span.get_tag(http.URL) + assert 0 == dd_span.error + + +class TestTornadoWebAnalyticsDefault(TornadoTestCase): + """ + Ensure that Tornado web handlers generate APM events with default settings + """ + def test_analytics_global_on_integration_default(self): + """ + When making a request + When an integration trace search is not event sample rate is not set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + # it should trace a handler that returns 200 + response = self.fetch('/success/') + self.assertEqual(200, response.code) + + self.assert_structure( + dict(name='tornado.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}), + ) + + def test_analytics_global_off_integration_default(self): + """ + When making a request + When an integration trace search is not set and sample rate is set and globally trace search is disabled + We expect the root span to not include tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + # it should trace a handler that returns 200 + response = self.fetch('/success/') + self.assertEqual(200, response.code) + + root = self.get_root_span() + self.assertIsNone(root.get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + +class TestTornadoWebAnalyticsOn(TornadoTestCase): + """ + Ensure that Tornado web handlers generate APM events with default settings + """ + def get_settings(self): + # distributed_tracing needs to be disabled manually + return { + 'datadog_trace': { + 'analytics_enabled': True, + 'analytics_sample_rate': 0.5, + }, + } + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + # it should trace a handler that returns 200 + response = self.fetch('/success/') + self.assertEqual(200, response.code) + + self.assert_structure( + dict(name='tornado.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}), + ) + + def test_analytics_global_off_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is disabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=False)): + # it should trace a handler that returns 200 + response = self.fetch('/success/') + self.assertEqual(200, response.code) + + self.assert_structure( + dict(name='tornado.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 0.5}), + ) + + +class TestTornadoWebAnalyticsNoRate(TornadoTestCase): + """ + Ensure that Tornado web handlers generate APM events with default settings + """ + def get_settings(self): + # distributed_tracing needs to be disabled manually + return { + 'datadog_trace': { + 'analytics_enabled': True, + }, + } + + def test_analytics_global_on_integration_on(self): + """ + When making a request + When an integration trace search is enabled and sample rate is set and globally trace search is enabled + We expect the root span to have the appropriate tag + """ + with self.override_global_config(dict(analytics_enabled=True)): + # it should trace a handler that returns 200 + response = self.fetch('/success/') + self.assertEqual(200, response.code) + + self.assert_structure( + dict(name='tornado.request', metrics={ANALYTICS_SAMPLE_RATE_KEY: 1.0}), + ) + + +class TestNoPropagationTornadoWeb(TornadoTestCase): + """ + Ensure that Tornado web handlers are properly traced and are ignoring propagated HTTP headers when disabled. + """ + def get_settings(self): + # distributed_tracing needs to be disabled manually + return { + 'datadog_trace': { + 'distributed_tracing': False, + }, + } + + def test_no_propagation(self): + # it should not propagate the HTTP context + headers = { + 'x-datadog-trace-id': '1234', + 'x-datadog-parent-id': '4567', + 'x-datadog-sampling-priority': '2', + 'x-datadog-origin': 'synthetics', + } + response = self.fetch('/success/', headers=headers) + assert 200 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + + # simple sanity check on the span + assert 'tornado.request' == request_span.name + assert_span_http_status_code(request_span, 200) + assert self.get_url('/success/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + + # check non-propagation + assert request_span.trace_id != 1234 + assert request_span.parent_id != 4567 + assert request_span.get_metric(SAMPLING_PRIORITY_KEY) != 2 + assert request_span.get_tag(ORIGIN_KEY) != 'synthetics' + + +class TestCustomTornadoWeb(TornadoTestCase): + """ + Ensure that Tornado web handlers are properly traced when using + a custom default handler. + """ + def get_settings(self): + return { + 'default_handler_class': CustomDefaultHandler, + 'default_handler_args': dict(status_code=400), + } + + def test_custom_default_handler(self): + # it should trace any call that uses a custom default handler + response = self.fetch('/custom_handler/') + assert 400 == response.code + + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 1 == len(traces[0]) + + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.CustomDefaultHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 400) + assert self.get_url('/custom_handler/') == request_span.get_tag(http.URL) + assert 0 == request_span.error diff --git a/tests/contrib/tornado/test_wrap_decorator.py b/tests/contrib/tornado/test_wrap_decorator.py new file mode 100644 index 0000000000..1c038dfe87 --- /dev/null +++ b/tests/contrib/tornado/test_wrap_decorator.py @@ -0,0 +1,178 @@ +from ddtrace.ext import http + +from .utils import TornadoTestCase +from ...utils import assert_span_http_status_code + + +class TestTornadoWebWrapper(TornadoTestCase): + """ + Ensure that Tracer.wrap() works with Tornado web handlers. + """ + def test_nested_wrap_handler(self): + # it should trace a handler that calls a coroutine + response = self.fetch('/nested_wrap/') + assert 200 == response.code + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # check request span + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.NestedWrapHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/nested_wrap/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + # check nested span + nested_span = traces[0][1] + assert 'tornado-web' == nested_span.service + assert 'tornado.coro' == nested_span.name + assert 0 == nested_span.error + # check durations because of the yield sleep + assert request_span.duration >= 0.05 + assert nested_span.duration >= 0.05 + + def test_nested_exception_wrap_handler(self): + # it should trace a handler that calls a coroutine that raises an exception + response = self.fetch('/nested_exception_wrap/') + assert 500 == response.code + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # check request span + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.NestedExceptionWrapHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/nested_exception_wrap/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'Ouch!' == request_span.get_tag('error.msg') + assert 'Exception: Ouch!' in request_span.get_tag('error.stack') + # check nested span + nested_span = traces[0][1] + assert 'tornado-web' == nested_span.service + assert 'tornado.coro' == nested_span.name + assert 1 == nested_span.error + assert 'Ouch!' == nested_span.get_tag('error.msg') + assert 'Exception: Ouch!' in nested_span.get_tag('error.stack') + # check durations because of the yield sleep + assert request_span.duration >= 0.05 + assert nested_span.duration >= 0.05 + + def test_sync_nested_wrap_handler(self): + # it should trace a handler that calls a coroutine + response = self.fetch('/sync_nested_wrap/') + assert 200 == response.code + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # check request span + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.SyncNestedWrapHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/sync_nested_wrap/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + # check nested span + nested_span = traces[0][1] + assert 'tornado-web' == nested_span.service + assert 'tornado.func' == nested_span.name + assert 0 == nested_span.error + # check durations because of the yield sleep + assert request_span.duration >= 0.05 + assert nested_span.duration >= 0.05 + + def test_sync_nested_exception_wrap_handler(self): + # it should trace a handler that calls a coroutine that raises an exception + response = self.fetch('/sync_nested_exception_wrap/') + assert 500 == response.code + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # check request span + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.SyncNestedExceptionWrapHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/sync_nested_exception_wrap/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'Ouch!' == request_span.get_tag('error.msg') + assert 'Exception: Ouch!' in request_span.get_tag('error.stack') + # check nested span + nested_span = traces[0][1] + assert 'tornado-web' == nested_span.service + assert 'tornado.func' == nested_span.name + assert 1 == nested_span.error + assert 'Ouch!' == nested_span.get_tag('error.msg') + assert 'Exception: Ouch!' in nested_span.get_tag('error.stack') + # check durations because of the yield sleep + assert request_span.duration >= 0.05 + assert nested_span.duration >= 0.05 + + def test_nested_wrap_executor_handler(self): + # it should trace a handler that calls a blocking function in a different executor + response = self.fetch('/executor_wrap_handler/') + assert 200 == response.code + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # check request span + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExecutorWrapHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 200) + assert self.get_url('/executor_wrap_handler/') == request_span.get_tag(http.URL) + assert 0 == request_span.error + # check nested span in the executor + nested_span = traces[0][1] + assert 'tornado-web' == nested_span.service + assert 'tornado.executor.wrap' == nested_span.name + assert 0 == nested_span.error + # check durations because of the yield sleep + assert request_span.duration >= 0.05 + assert nested_span.duration >= 0.05 + + def test_nested_exception_wrap_executor_handler(self): + # it should trace a handler that calls a blocking function in a different + # executor that raises an exception + response = self.fetch('/executor_wrap_exception/') + assert 500 == response.code + traces = self.tracer.writer.pop_traces() + assert 1 == len(traces) + assert 2 == len(traces[0]) + # check request span + request_span = traces[0][0] + assert 'tornado-web' == request_span.service + assert 'tornado.request' == request_span.name + assert 'web' == request_span.span_type + assert 'tests.contrib.tornado.web.app.ExecutorExceptionWrapHandler' == request_span.resource + assert 'GET' == request_span.get_tag('http.method') + assert_span_http_status_code(request_span, 500) + assert self.get_url('/executor_wrap_exception/') == request_span.get_tag(http.URL) + assert 1 == request_span.error + assert 'Ouch!' == request_span.get_tag('error.msg') + assert 'Exception: Ouch!' in request_span.get_tag('error.stack') + # check nested span + nested_span = traces[0][1] + assert 'tornado-web' == nested_span.service + assert 'tornado.executor.wrap' == nested_span.name + assert 1 == nested_span.error + assert 'Ouch!' == nested_span.get_tag('error.msg') + assert 'Exception: Ouch!' in nested_span.get_tag('error.stack') + # check durations because of the yield sleep + assert request_span.duration >= 0.05 + assert nested_span.duration >= 0.05 diff --git a/tests/contrib/tornado/utils.py b/tests/contrib/tornado/utils.py new file mode 100644 index 0000000000..45803fface --- /dev/null +++ b/tests/contrib/tornado/utils.py @@ -0,0 +1,38 @@ +from tornado.testing import AsyncHTTPTestCase + +from ddtrace.contrib.tornado import patch, unpatch +from ddtrace.compat import reload_module + +from .web import app, compat +from ...base import BaseTracerTestCase + + +class TornadoTestCase(BaseTracerTestCase, AsyncHTTPTestCase): + """ + Generic TornadoTestCase where the framework is globally patched + and unpatched before/after each test. A dummy tracer is provided + in the `self.tracer` attribute. + """ + def get_app(self): + # patch Tornado and reload module app + patch() + reload_module(compat) + reload_module(app) + + settings = self.get_settings() + trace_settings = settings.get('datadog_trace', {}) + settings['datadog_trace'] = trace_settings + trace_settings['tracer'] = self.tracer + self.app = app.make_app(settings=settings) + return self.app + + def get_settings(self): + # override settings in your TestCase + return {} + + def tearDown(self): + super(TornadoTestCase, self).tearDown() + # unpatch Tornado + unpatch() + reload_module(compat) + reload_module(app) diff --git a/tests/contrib/tornado/web/__init__.py b/tests/contrib/tornado/web/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/tornado/web/app.py b/tests/contrib/tornado/web/app.py new file mode 100644 index 0000000000..bb7114d959 --- /dev/null +++ b/tests/contrib/tornado/web/app.py @@ -0,0 +1,330 @@ +import os +import time + +import tornado.web +import tornado.concurrent + +from . import uimodules +from .compat import sleep, ThreadPoolExecutor + + +BASE_DIR = os.path.dirname(os.path.realpath(__file__)) +STATIC_DIR = os.path.join(BASE_DIR, 'statics') + + +class SuccessHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + self.write('OK') + + +class NestedHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + tracer = self.settings['datadog_trace']['tracer'] + with tracer.trace('tornado.sleep'): + yield sleep(0.05) + self.write('OK') + + +class NestedWrapHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + tracer = self.settings['datadog_trace']['tracer'] + + # define a wrapped coroutine: having an inner coroutine + # is only for easy testing + @tracer.wrap('tornado.coro') + @tornado.gen.coroutine + def coro(): + yield sleep(0.05) + + yield coro() + self.write('OK') + + +class NestedExceptionWrapHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + tracer = self.settings['datadog_trace']['tracer'] + + # define a wrapped coroutine: having an inner coroutine + # is only for easy testing + @tracer.wrap('tornado.coro') + @tornado.gen.coroutine + def coro(): + yield sleep(0.05) + raise Exception('Ouch!') + + yield coro() + self.write('OK') + + +class ExceptionHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + raise Exception('Ouch!') + + +class HTTPExceptionHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + raise tornado.web.HTTPError(status_code=501, log_message='unavailable', reason='Not Implemented') + + +class HTTPException500Handler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + raise tornado.web.HTTPError(status_code=500, log_message='server error', reason='Server Error') + + +class TemplateHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + self.render('templates/page.html', name='home') + + +class TemplatePartialHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + self.render('templates/list.html', items=['python', 'go', 'ruby']) + + +class TemplateExceptionHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + self.render('templates/exception.html') + + +class SyncSuccessHandler(tornado.web.RequestHandler): + def get(self): + self.write('OK') + + +class SyncExceptionHandler(tornado.web.RequestHandler): + def get(self): + raise Exception('Ouch!') + + +class SyncNestedWrapHandler(tornado.web.RequestHandler): + def get(self): + tracer = self.settings['datadog_trace']['tracer'] + + # define a wrapped coroutine: having an inner coroutine + # is only for easy testing + @tracer.wrap('tornado.func') + def func(): + time.sleep(0.05) + + func() + self.write('OK') + + +class SyncNestedExceptionWrapHandler(tornado.web.RequestHandler): + def get(self): + tracer = self.settings['datadog_trace']['tracer'] + + # define a wrapped coroutine: having an inner coroutine + # is only for easy testing + @tracer.wrap('tornado.func') + def func(): + time.sleep(0.05) + raise Exception('Ouch!') + + func() + self.write('OK') + + +class CustomDefaultHandler(tornado.web.ErrorHandler): + """ + Default handler that is used in case of 404 error; in our tests + it's used only if defined in the get_app() function. + """ + pass + + +class ExecutorHandler(tornado.web.RequestHandler): + # used automatically by the @run_on_executor decorator + executor = ThreadPoolExecutor(max_workers=3) + + @tornado.concurrent.run_on_executor + def outer_executor(self): + tracer = self.settings['datadog_trace']['tracer'] + with tracer.trace('tornado.executor.with'): + time.sleep(0.05) + + @tornado.gen.coroutine + def get(self): + yield self.outer_executor() + self.write('OK') + + +class ExecutorSubmitHandler(tornado.web.RequestHandler): + executor = ThreadPoolExecutor(max_workers=3) + + def query(self): + tracer = self.settings['datadog_trace']['tracer'] + with tracer.trace('tornado.executor.query'): + time.sleep(0.05) + + @tornado.gen.coroutine + def get(self): + # run the query in another Executor, without using + # Tornado decorators + yield self.executor.submit(self.query) + self.write('OK') + + +class ExecutorDelayedHandler(tornado.web.RequestHandler): + # used automatically by the @run_on_executor decorator + executor = ThreadPoolExecutor(max_workers=3) + + @tornado.concurrent.run_on_executor + def outer_executor(self): + # waiting here means expecting that the `get()` flushes + # the request trace + time.sleep(0.01) + tracer = self.settings['datadog_trace']['tracer'] + with tracer.trace('tornado.executor.with'): + time.sleep(0.05) + + @tornado.gen.coroutine + def get(self): + # we don't yield here but we expect that the outer_executor + # has the right parent; tests that use this handler, must + # yield sleep() to wait thread execution + self.outer_executor() + self.write('OK') + + +try: + class ExecutorCustomHandler(tornado.web.RequestHandler): + # not used automatically, a kwarg is required + custom_thread_pool = ThreadPoolExecutor(max_workers=3) + + @tornado.concurrent.run_on_executor(executor='custom_thread_pool') + def outer_executor(self): + # wait before creating a trace so that we're sure + # the `tornado.executor.with` span has the right + # parent + tracer = self.settings['datadog_trace']['tracer'] + with tracer.trace('tornado.executor.with'): + time.sleep(0.05) + + @tornado.gen.coroutine + def get(self): + yield self.outer_executor() + self.write('OK') +except TypeError: + # the class definition fails because Tornado 4.0 and 4.1 don't support + # `run_on_executor` with params. Because it's just this case, we can + # use a try-except block, but if we have another case we may move + # these endpoints outside the module and use a compatibility system + class ExecutorCustomHandler(tornado.web.RequestHandler): + pass + + +class ExecutorCustomArgsHandler(tornado.web.RequestHandler): + @tornado.gen.coroutine + def get(self): + # this is not a legit use of the decorator so a failure is expected + @tornado.concurrent.run_on_executor(object(), executor='_pool') + def outer_executor(self): + pass + + yield outer_executor(self) + self.write('OK') + + +class ExecutorExceptionHandler(tornado.web.RequestHandler): + # used automatically by the @run_on_executor decorator + executor = ThreadPoolExecutor(max_workers=3) + + @tornado.concurrent.run_on_executor + def outer_executor(self): + # wait before creating a trace so that we're sure + # the `tornado.executor.with` span has the right + # parent + time.sleep(0.05) + tracer = self.settings['datadog_trace']['tracer'] + with tracer.trace('tornado.executor.with'): + raise Exception('Ouch!') + + @tornado.gen.coroutine + def get(self): + yield self.outer_executor() + self.write('OK') + + +class ExecutorWrapHandler(tornado.web.RequestHandler): + # used automatically by the @run_on_executor decorator + executor = ThreadPoolExecutor(max_workers=3) + + @tornado.gen.coroutine + def get(self): + tracer = self.settings['datadog_trace']['tracer'] + + @tracer.wrap('tornado.executor.wrap') + @tornado.concurrent.run_on_executor + def outer_executor(self): + time.sleep(0.05) + + yield outer_executor(self) + self.write('OK') + + +class ExecutorExceptionWrapHandler(tornado.web.RequestHandler): + # used automatically by the @run_on_executor decorator + executor = ThreadPoolExecutor(max_workers=3) + + @tornado.gen.coroutine + def get(self): + tracer = self.settings['datadog_trace']['tracer'] + + @tracer.wrap('tornado.executor.wrap') + @tornado.concurrent.run_on_executor + def outer_executor(self): + time.sleep(0.05) + raise Exception('Ouch!') + + yield outer_executor(self) + self.write('OK') + + +def make_app(settings={}): + """ + Create a Tornado web application, useful to test + different behaviors. + """ + settings['ui_modules'] = uimodules + + return tornado.web.Application([ + # custom handlers + (r'/success/', SuccessHandler), + (r'/nested/', NestedHandler), + (r'/nested_wrap/', NestedWrapHandler), + (r'/nested_exception_wrap/', NestedExceptionWrapHandler), + (r'/exception/', ExceptionHandler), + (r'/http_exception/', HTTPExceptionHandler), + (r'/http_exception_500/', HTTPException500Handler), + (r'/template/', TemplateHandler), + (r'/template_partial/', TemplatePartialHandler), + (r'/template_exception/', TemplateExceptionHandler), + # handlers that spawn new threads + (r'/executor_handler/', ExecutorHandler), + (r'/executor_submit_handler/', ExecutorSubmitHandler), + (r'/executor_delayed_handler/', ExecutorDelayedHandler), + (r'/executor_custom_handler/', ExecutorCustomHandler), + (r'/executor_custom_args_handler/', ExecutorCustomArgsHandler), + (r'/executor_exception/', ExecutorExceptionHandler), + (r'/executor_wrap_handler/', ExecutorWrapHandler), + (r'/executor_wrap_exception/', ExecutorExceptionWrapHandler), + # built-in handlers + (r'/redirect/', tornado.web.RedirectHandler, {'url': '/success/'}), + (r'/statics/(.*)', tornado.web.StaticFileHandler, {'path': STATIC_DIR}), + # synchronous handlers + (r'/sync_success/', SyncSuccessHandler), + (r'/sync_exception/', SyncExceptionHandler), + (r'/sync_nested_wrap/', SyncNestedWrapHandler), + (r'/sync_nested_exception_wrap/', SyncNestedExceptionWrapHandler), + ], **settings) diff --git a/tests/contrib/tornado/web/compat.py b/tests/contrib/tornado/web/compat.py new file mode 100644 index 0000000000..87ad3bad56 --- /dev/null +++ b/tests/contrib/tornado/web/compat.py @@ -0,0 +1,35 @@ +from tornado.concurrent import Future +import tornado.gen +from tornado.ioloop import IOLoop + + +try: + from concurrent.futures import ThreadPoolExecutor +except ImportError: + from tornado.concurrent import DummyExecutor + + class ThreadPoolExecutor(DummyExecutor): + """ + Fake executor class used to test our tracer when Python 2 is used + without the `futures` backport. This is not a real use case, but + it's required to be defensive when we have different `Executor` + implementations. + """ + def __init__(self, *args, **kwargs): + # we accept any kind of interface + super(ThreadPoolExecutor, self).__init__() + + +if hasattr(tornado.gen, 'sleep'): + sleep = tornado.gen.sleep +else: + # Tornado <= 4.0 + def sleep(duration): + """ + Compatibility helper that return a Future() that can be yielded. + This is used because Tornado 4.0 doesn't have a ``gen.sleep()`` + function, that we require to test the ``TracerStackContext``. + """ + f = Future() + IOLoop.current().call_later(duration, lambda: f.set_result(None)) + return f diff --git a/tests/contrib/tornado/web/statics/empty.txt b/tests/contrib/tornado/web/statics/empty.txt new file mode 100644 index 0000000000..3083bfa69c --- /dev/null +++ b/tests/contrib/tornado/web/statics/empty.txt @@ -0,0 +1 @@ +Static file diff --git a/tests/contrib/tornado/web/templates/exception.html b/tests/contrib/tornado/web/templates/exception.html new file mode 100644 index 0000000000..8315c9aba6 --- /dev/null +++ b/tests/contrib/tornado/web/templates/exception.html @@ -0,0 +1 @@ +{% module ModuleThatDoesNotExist() %} diff --git a/tests/contrib/tornado/web/templates/item.html b/tests/contrib/tornado/web/templates/item.html new file mode 100644 index 0000000000..43a1ec3580 --- /dev/null +++ b/tests/contrib/tornado/web/templates/item.html @@ -0,0 +1 @@ +* {{ item }} diff --git a/tests/contrib/tornado/web/templates/list.html b/tests/contrib/tornado/web/templates/list.html new file mode 100644 index 0000000000..4a0d2ed64e --- /dev/null +++ b/tests/contrib/tornado/web/templates/list.html @@ -0,0 +1,4 @@ +This is a list: +{% for item in items %} + {% module Item(item) %} +{% end %} diff --git a/tests/contrib/tornado/web/templates/page.html b/tests/contrib/tornado/web/templates/page.html new file mode 100644 index 0000000000..7a857c3126 --- /dev/null +++ b/tests/contrib/tornado/web/templates/page.html @@ -0,0 +1 @@ +This is a rendered page called "{{ name }}" diff --git a/tests/contrib/tornado/web/uimodules.py b/tests/contrib/tornado/web/uimodules.py new file mode 100644 index 0000000000..e09770dfc9 --- /dev/null +++ b/tests/contrib/tornado/web/uimodules.py @@ -0,0 +1,6 @@ +import tornado + + +class Item(tornado.web.UIModule): + def render(self, item): + return self.render_string('templates/item.html', item=item) diff --git a/tests/contrib/vertica/__init__.py b/tests/contrib/vertica/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/contrib/vertica/test_vertica.py b/tests/contrib/vertica/test_vertica.py new file mode 100644 index 0000000000..753915cb73 --- /dev/null +++ b/tests/contrib/vertica/test_vertica.py @@ -0,0 +1,430 @@ +# 3p +import pytest +from ddtrace.vendor import wrapt + +# project +import ddtrace +from ddtrace import Pin, config +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.contrib.vertica.patch import patch, unpatch +from ddtrace.ext import errors +from ddtrace.utils.merge import deepmerge + +# testing +from tests.base import BaseTracerTestCase +from tests.contrib.config import VERTICA_CONFIG +from tests.opentracer.utils import init_tracer +from tests.test_tracer import get_dummy_tracer + +TEST_TABLE = 'test_table' + + +@pytest.fixture(scope='function') +def test_tracer(request): + request.cls.test_tracer = get_dummy_tracer() + return request.cls.test_tracer + + +@pytest.fixture(scope='function') +def test_conn(request, test_tracer): + ddtrace.tracer = test_tracer + patch() + + import vertica_python # must happen AFTER installing with patch() + + conn = vertica_python.connect(**VERTICA_CONFIG) + + cur = conn.cursor() + cur.execute('DROP TABLE IF EXISTS {}'.format(TEST_TABLE)) + cur.execute( + """CREATE TABLE {} ( + a INT, + b VARCHAR(32) + ) + """.format( + TEST_TABLE + ) + ) + test_tracer.writer.pop() + + request.cls.test_conn = (conn, cur) + return conn, cur + + +class TestVerticaPatching(BaseTracerTestCase): + def tearDown(self): + super(TestVerticaPatching, self).tearDown() + unpatch() + + def test_not_patched(self): + """Ensure that vertica is not patched somewhere before our tests.""" + import vertica_python + + assert not isinstance(vertica_python.Connection.cursor, wrapt.ObjectProxy) + assert not isinstance( + vertica_python.vertica.cursor.Cursor.execute, wrapt.ObjectProxy + ) + + def test_patch_after_import(self): + """Patching _after_ the import will not work because we hook into + the module import system. + + Vertica uses a local reference to `Cursor` which won't get patched + if we call `patch` after the module has already been imported. + """ + import vertica_python + + assert not isinstance(vertica_python.vertica.connection.Connection.cursor, wrapt.ObjectProxy) + assert not isinstance( + vertica_python.vertica.cursor.Cursor.execute, wrapt.ObjectProxy + ) + + patch() + + conn = vertica_python.connect(**VERTICA_CONFIG) + cursor = conn.cursor() + assert not isinstance(cursor, wrapt.ObjectProxy) + + def test_patch_before_import(self): + patch() + import vertica_python + + # use a patched method from each class as indicators + assert isinstance(vertica_python.Connection.cursor, wrapt.ObjectProxy) + assert isinstance( + vertica_python.vertica.cursor.Cursor.execute, wrapt.ObjectProxy + ) + + def test_idempotent_patch(self): + patch() + patch() + import vertica_python + + assert not isinstance( + vertica_python.Connection.cursor.__wrapped__, wrapt.ObjectProxy + ) + assert not isinstance( + vertica_python.vertica.cursor.Cursor.execute.__wrapped__, wrapt.ObjectProxy + ) + assert isinstance(vertica_python.Connection.cursor, wrapt.ObjectProxy) + assert isinstance( + vertica_python.vertica.cursor.Cursor.execute, wrapt.ObjectProxy + ) + + def test_unpatch_before_import(self): + patch() + unpatch() + import vertica_python + + assert not isinstance(vertica_python.Connection.cursor, wrapt.ObjectProxy) + assert not isinstance( + vertica_python.vertica.cursor.Cursor.execute, wrapt.ObjectProxy + ) + + def test_unpatch_after_import(self): + patch() + import vertica_python + + unpatch() + assert not isinstance(vertica_python.Connection.cursor, wrapt.ObjectProxy) + assert not isinstance( + vertica_python.vertica.cursor.Cursor.execute, wrapt.ObjectProxy + ) + + +@pytest.mark.usefixtures('test_tracer', 'test_conn') +class TestVertica(BaseTracerTestCase): + def tearDown(self): + super(TestVertica, self).tearDown() + + unpatch() + + def test_configuration_service_name(self): + """Ensure that the integration can be configured.""" + with self.override_config('vertica', dict(service_name='test_svc_name')): + patch() + import vertica_python + + test_tracer = get_dummy_tracer() + + conn = vertica_python.connect(**VERTICA_CONFIG) + cur = conn.cursor() + Pin.override(cur, tracer=test_tracer) + with conn: + cur.execute('DROP TABLE IF EXISTS {}'.format(TEST_TABLE)) + spans = test_tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].service == 'test_svc_name' + + def test_configuration_routine(self): + """Ensure that the integration routines can be configured.""" + routine_config = dict( + patch={ + 'vertica_python.vertica.connection.Connection': dict( + routines=dict( + cursor=dict( + operation_name='get_cursor', + trace_enabled=True, + ), + ), + ), + }, + ) + + # Make a copy of the vertica config first before we merge our settings over + # DEV: First argument gets merged into the second + copy = deepmerge(config.vertica, dict()) + overrides = deepmerge(routine_config, copy) + with self.override_config('vertica', overrides): + patch() + import vertica_python + + test_tracer = get_dummy_tracer() + + conn = vertica_python.connect(**VERTICA_CONFIG) + Pin.override(conn, service='mycustomservice', tracer=test_tracer) + conn.cursor() # should be traced now + conn.close() + spans = test_tracer.writer.pop() + assert len(spans) == 1 + assert spans[0].name == 'get_cursor' + assert spans[0].service == 'mycustomservice' + + def test_execute_metadata(self): + """Metadata related to an `execute` call should be captured.""" + conn, cur = self.test_conn + + Pin.override(cur, tracer=self.test_tracer) + + with conn: + cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) + cur.execute('SELECT * FROM {};'.format(TEST_TABLE)) + + spans = self.test_tracer.writer.pop() + assert len(spans) == 2 + + # check all the metadata + assert spans[0].service == 'vertica' + assert spans[0].span_type == 'sql' + assert spans[0].name == 'vertica.query' + assert spans[0].get_metric('db.rowcount') == -1 + query = "INSERT INTO test_table (a, b) VALUES (1, 'aa');" + assert spans[0].resource == query + assert spans[0].get_tag('out.host') == '127.0.0.1' + assert spans[0].get_metric('out.port') == 5433 + assert spans[0].get_tag('db.name') == 'docker' + assert spans[0].get_tag('db.user') == 'dbadmin' + + assert spans[1].resource == 'SELECT * FROM test_table;' + + def test_cursor_override(self): + """Test overriding the tracer with our own.""" + conn, cur = self.test_conn + + Pin.override(cur, tracer=self.test_tracer) + + with conn: + cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) + cur.execute('SELECT * FROM {};'.format(TEST_TABLE)) + + spans = self.test_tracer.writer.pop() + assert len(spans) == 2 + + # check all the metadata + assert spans[0].service == 'vertica' + assert spans[0].span_type == 'sql' + assert spans[0].name == 'vertica.query' + assert spans[0].get_metric('db.rowcount') == -1 + query = "INSERT INTO test_table (a, b) VALUES (1, 'aa');" + assert spans[0].resource == query + assert spans[0].get_tag('out.host') == '127.0.0.1' + assert spans[0].get_metric('out.port') == 5433 + + assert spans[1].resource == 'SELECT * FROM test_table;' + + def test_execute_exception(self): + """Exceptions should result in appropriate span tagging.""" + from vertica_python.errors import VerticaSyntaxError + + conn, cur = self.test_conn + + with conn, pytest.raises(VerticaSyntaxError): + cur.execute('INVALID QUERY') + + spans = self.test_tracer.writer.pop() + assert len(spans) == 2 + + # check all the metadata + assert spans[0].service == 'vertica' + assert spans[0].error == 1 + assert 'INVALID QUERY' in spans[0].get_tag(errors.ERROR_MSG) + error_type = 'vertica_python.errors.VerticaSyntaxError' + assert spans[0].get_tag(errors.ERROR_TYPE) == error_type + assert spans[0].get_tag(errors.ERROR_STACK) + + assert spans[1].resource == 'COMMIT;' + + def test_rowcount_oddity(self): + """Vertica treats rowcount specially. Ensure we handle it. + + See https://github.com/vertica/vertica-python/tree/029a65a862da893e7bd641a68f772019fd9ecc99#rowcount-oddities + """ + conn, cur = self.test_conn + + with conn: + cur.execute( + """ + INSERT INTO {} (a, b) + SELECT 1, 'a' + UNION ALL + SELECT 2, 'b' + UNION ALL + SELECT 3, 'c' + UNION ALL + SELECT 4, 'd' + UNION ALL + SELECT 5, 'e' + """.format( + TEST_TABLE + ) + ) + assert cur.rowcount == -1 + + cur.execute('SELECT * FROM {};'.format(TEST_TABLE)) + cur.fetchone() + cur.rowcount == 1 + cur.fetchone() + cur.rowcount == 2 + # fetchall just calls fetchone for each remaining row + cur.fetchall() + cur.rowcount == 5 + + spans = self.test_tracer.writer.pop() + assert len(spans) == 9 + + # check all the rowcounts + assert spans[0].name == 'vertica.query' + assert spans[1].get_metric('db.rowcount') == -1 + assert spans[1].name == 'vertica.query' + assert spans[1].get_metric('db.rowcount') == -1 + assert spans[2].name == 'vertica.fetchone' + assert spans[2].get_tag('out.host') == '127.0.0.1' + assert spans[2].get_metric('out.port') == 5433 + assert spans[2].get_metric('db.rowcount') == 1 + assert spans[3].name == 'vertica.fetchone' + assert spans[3].get_metric('db.rowcount') == 2 + assert spans[4].name == 'vertica.fetchall' + assert spans[4].get_metric('db.rowcount') == 5 + + def test_nextset(self): + """cursor.nextset() should be traced.""" + conn, cur = self.test_conn + + with conn: + cur.execute('SELECT * FROM {0}; SELECT * FROM {0}'.format(TEST_TABLE)) + cur.nextset() + + spans = self.test_tracer.writer.pop() + assert len(spans) == 3 + + # check all the rowcounts + assert spans[0].name == 'vertica.query' + assert spans[1].get_metric('db.rowcount') == -1 + assert spans[1].name == 'vertica.nextset' + assert spans[1].get_metric('db.rowcount') == -1 + assert spans[2].name == 'vertica.query' + assert spans[2].resource == 'COMMIT;' + + def test_copy(self): + """cursor.copy() should be traced.""" + conn, cur = self.test_conn + + with conn: + cur.copy( + "COPY {0} (a, b) FROM STDIN DELIMITER ','".format(TEST_TABLE), + '1,foo\n2,bar', + ) + + spans = self.test_tracer.writer.pop() + assert len(spans) == 2 + + # check all the rowcounts + assert spans[0].name == 'vertica.copy' + query = "COPY test_table (a, b) FROM STDIN DELIMITER ','" + assert spans[0].resource == query + assert spans[1].name == 'vertica.query' + assert spans[1].resource == 'COMMIT;' + + def test_opentracing(self): + """Ensure OpenTracing works with vertica.""" + conn, cur = self.test_conn + + ot_tracer = init_tracer('vertica_svc', self.test_tracer) + + with ot_tracer.start_active_span('vertica_execute'): + cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) + conn.close() + + spans = self.test_tracer.writer.pop() + assert len(spans) == 2 + ot_span, dd_span = spans + + # confirm the parenting + assert ot_span.parent_id is None + assert dd_span.parent_id == ot_span.span_id + + assert dd_span.service == 'vertica' + assert dd_span.span_type == 'sql' + assert dd_span.name == 'vertica.query' + assert dd_span.get_metric('db.rowcount') == -1 + query = "INSERT INTO test_table (a, b) VALUES (1, 'aa');" + assert dd_span.resource == query + assert dd_span.get_tag('out.host') == '127.0.0.1' + assert dd_span.get_metric('out.port') == 5433 + + def test_analytics_default(self): + conn, cur = self.test_conn + + Pin.override(cur, tracer=self.test_tracer) + + with conn: + cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) + cur.execute('SELECT * FROM {};'.format(TEST_TABLE)) + + spans = self.test_tracer.writer.pop() + self.assertEqual(len(spans), 2) + self.assertIsNone(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY)) + + def test_analytics_with_rate(self): + with self.override_config( + 'vertica', + dict(analytics_enabled=True, analytics_sample_rate=0.5) + ): + conn, cur = self.test_conn + + Pin.override(cur, tracer=self.test_tracer) + + with conn: + cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) + cur.execute('SELECT * FROM {};'.format(TEST_TABLE)) + + spans = self.test_tracer.writer.pop() + self.assertEqual(len(spans), 2) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 0.5) + + def test_analytics_without_rate(self): + with self.override_config( + 'vertica', + dict(analytics_enabled=True) + ): + conn, cur = self.test_conn + + Pin.override(cur, tracer=self.test_tracer) + + with conn: + cur.execute("INSERT INTO {} (a, b) VALUES (1, 'aa');".format(TEST_TABLE)) + cur.execute('SELECT * FROM {};'.format(TEST_TABLE)) + + spans = self.test_tracer.writer.pop() + self.assertEqual(len(spans), 2) + self.assertEqual(spans[0].get_metric(ANALYTICS_SAMPLE_RATE_KEY), 1.0) diff --git a/tests/ddtrace_run.py b/tests/ddtrace_run.py new file mode 100644 index 0000000000..89d9cbb8c4 --- /dev/null +++ b/tests/ddtrace_run.py @@ -0,0 +1,9 @@ +import os +import sys + +# DEV: We must append to sys path before importing ddtrace_run +sys.path.append('.') +from ddtrace.commands import ddtrace_run # noqa + +os.environ['PYTHONPATH'] = '{}:{}'.format(os.getenv('PYTHONPATH'), os.path.abspath('.')) +ddtrace_run.main() diff --git a/tests/internal/__init__.py b/tests/internal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/internal/runtime/__init__.py b/tests/internal/runtime/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/internal/runtime/test_container.py b/tests/internal/runtime/test_container.py new file mode 100644 index 0000000000..2dce42098a --- /dev/null +++ b/tests/internal/runtime/test_container.py @@ -0,0 +1,302 @@ +import mock + +import pytest + +from ddtrace.compat import PY2 +from ddtrace.internal.runtime.container import CGroupInfo, get_container_info + +from .utils import cgroup_line_valid_test_cases + +# Map expected Py2 exception to Py3 name +if PY2: + FileNotFoundError = IOError # noqa: A001 + + +def get_mock_open(read_data=None): + mock_open = mock.mock_open(read_data=read_data) + return mock.patch('ddtrace.internal.runtime.container.open', mock_open) + + +def test_cgroup_info_init(): + # Assert default all attributes to `None` + info = CGroupInfo() + for attr in ('id', 'groups', 'path', 'container_id', 'controllers', 'pod_id'): + assert getattr(info, attr) is None + + # Assert init with property sets property + info = CGroupInfo(container_id='test-container-id') + assert info.container_id == 'test-container-id' + + +@pytest.mark.parametrize( + 'line,expected_info', + + # Valid generated cases + one off cases + cgroup_line_valid_test_cases() + [ + # Valid, extra spaces + ( + ' 13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 ', + CGroupInfo( + id='13', + groups='name=systemd', + controllers=['name=systemd'], + path='/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + container_id='3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + pod_id=None, + ), + ), + # Valid, bookended newlines + ( + '\r\n13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860\r\n', + CGroupInfo( + id='13', + groups='name=systemd', + controllers=['name=systemd'], + path='/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + container_id='3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + pod_id=None, + ), + ), + + # Invalid container_ids + ( + # One character too short + '13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f86986', + CGroupInfo( + id='13', + groups='name=systemd', + controllers=['name=systemd'], + path='/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f86986', + container_id=None, + pod_id=None, + ), + ), + ( + # One character too long + '13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f8698600', + CGroupInfo( + id='13', + groups='name=systemd', + controllers=['name=systemd'], + path='/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f8698600', + container_id=None, + pod_id=None, + ), + ), + ( + # Non-hex + '13:name=systemd:/docker/3726184226f5d3147c25fzyxw5b60097e378e8a720503a5e19ecfdf29f869860', + CGroupInfo( + id='13', + groups='name=systemd', + controllers=['name=systemd'], + path='/docker/3726184226f5d3147c25fzyxw5b60097e378e8a720503a5e19ecfdf29f869860', + container_id=None, + pod_id=None, + ), + ), + + # Invalid id + ( + # non-digit + 'a:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + None, + ), + ( + # missing + ':name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + None, + ), + + # Missing group + ( + # empty + '13::/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + CGroupInfo( + id='13', + groups='', + controllers=[], + path='/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + container_id='3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + pod_id=None, + ), + ), + ( + # missing + '13:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + None, + ), + + + # Empty line + ( + '', + None, + ), + ], +) +def test_cgroup_info_from_line(line, expected_info): + info = CGroupInfo.from_line(line) + + if expected_info is None: + assert info is None, line + else: + for attr in ('id', 'groups', 'path', 'container_id', 'controllers', 'pod_id'): + assert getattr(info, attr) == getattr(expected_info, attr), line + + +@pytest.mark.parametrize( + 'file_contents,container_id', + ( + # Docker file + ( + """ +13:name=systemd:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +12:pids:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +11:hugetlb:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +10:net_prio:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +9:perf_event:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +8:net_cls:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +7:freezer:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +6:devices:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +5:memory:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +4:blkio:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +3:cpuacct:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +2:cpu:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 +1:cpuset:/docker/3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860 + """, + '3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + ), + + # k8s file + ( + """ +11:perf_event:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +10:pids:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +9:memory:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +8:cpu,cpuacct:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +7:blkio:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +6:cpuset:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +5:devices:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +4:freezer:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +3:net_cls,net_prio:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +2:hugetlb:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 +1:name=systemd:/kubepods/test/pod3d274242-8ee0-11e9-a8a6-1e68d864ef1a/3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1 + """, + '3e74d3fd9db4c9dd921ae05c2502fb984d0cde1b36e581b13f79c639da4518a1', + ), + + # ECS file + ( + """ +9:perf_event:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +8:memory:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +7:hugetlb:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +6:freezer:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +5:devices:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +4:cpuset:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +3:cpuacct:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +2:cpu:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce +1:blkio:/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce + """, + '38fac3e99302b3622be089dd41e7ccf38aff368a86cc339972075136ee2710ce', + ), + + # Fargate file + ( + """ +11:hugetlb:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +10:pids:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +9:cpuset:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +8:net_cls,net_prio:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +7:cpu,cpuacct:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +6:perf_event:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +5:freezer:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +4:devices:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +3:blkio:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +2:memory:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da +1:name=systemd:/ecs/55091c13-b8cf-4801-b527-f4601742204d/432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da + """, + '432624d2150b349fe35ba397284dea788c2bf66b885d14dfc1569b01890ca7da', + ), + + # Linux non-containerized file + ( + """ +11:blkio:/user.slice/user-0.slice/session-14.scope +10:memory:/user.slice/user-0.slice/session-14.scope +9:hugetlb:/ +8:cpuset:/ +7:pids:/user.slice/user-0.slice/session-14.scope +6:freezer:/ +5:net_cls,net_prio:/ +4:perf_event:/ +3:cpu,cpuacct:/user.slice/user-0.slice/session-14.scope +2:devices:/user.slice/user-0.slice/session-14.scope +1:name=systemd:/user.slice/user-0.slice/session-14.scope + """, + None, + ), + + # Empty file + ( + '', + None, + ), + + # Missing file + ( + None, + None, + ) + ) +) +def test_get_container_info(file_contents, container_id): + with get_mock_open(read_data=file_contents) as mock_open: + # simulate the file not being found + if file_contents is None: + mock_open.side_effect = FileNotFoundError + + info = get_container_info() + + if container_id is None: + assert info is None + else: + assert info.container_id == container_id + + mock_open.assert_called_once_with('/proc/self/cgroup', mode='r') + + +@pytest.mark.parametrize( + 'pid,file_name', + ( + ('13', '/proc/13/cgroup'), + (13, '/proc/13/cgroup'), + ('self', '/proc/self/cgroup'), + ) +) +def test_get_container_info_with_pid(pid, file_name): + # DEV: We need at least 1 line for the loop to call `CGroupInfo.from_line` + with get_mock_open(read_data='\r\n') as mock_open: + assert get_container_info(pid=pid) is None + + mock_open.assert_called_once_with(file_name, mode='r') + + +@mock.patch('ddtrace.internal.runtime.container.CGroupInfo.from_line') +@mock.patch('ddtrace.internal.runtime.container.log') +def test_get_container_info_exception(mock_log, mock_from_line): + exception = Exception() + mock_from_line.side_effect = exception + + # DEV: We need at least 1 line for the loop to call `CGroupInfo.from_line` + with get_mock_open(read_data='\r\n') as mock_open: + # Assert calling `get_container_info()` does not bubble up the exception + assert get_container_info() is None + + # Assert we called everything we expected + mock_from_line.assert_called_once_with('\r\n') + mock_open.assert_called_once_with('/proc/self/cgroup', mode='r') + + # Ensure we logged the exception + mock_log.debug.assert_called_once_with('Failed to parse cgroup file for pid %r', 'self', exc_info=True) diff --git a/tests/internal/runtime/test_metric_collectors.py b/tests/internal/runtime/test_metric_collectors.py new file mode 100644 index 0000000000..1fd3e705a7 --- /dev/null +++ b/tests/internal/runtime/test_metric_collectors.py @@ -0,0 +1,63 @@ +from ddtrace.internal.runtime.metric_collectors import ( + RuntimeMetricCollector, + GCRuntimeMetricCollector, + PSUtilRuntimeMetricCollector, +) + +from ddtrace.internal.runtime.constants import ( + GC_COUNT_GEN0, + GC_RUNTIME_METRICS, + PSUTIL_RUNTIME_METRICS, +) +from ...base import BaseTestCase + + +class TestRuntimeMetricCollector(BaseTestCase): + def test_failed_module_load_collect(self): + """Attempts to collect from a collector when it has failed to load its + module should return no metrics gracefully. + """ + class A(RuntimeMetricCollector): + required_modules = ['moduleshouldnotexist'] + + def collect_fn(self, keys): + return {'k': 'v'} + + self.assertIsNotNone(A().collect(), 'collect should return valid metrics') + + +class TestPSUtilRuntimeMetricCollector(BaseTestCase): + def test_metrics(self): + collector = PSUtilRuntimeMetricCollector() + for (key, value) in collector.collect(PSUTIL_RUNTIME_METRICS): + self.assertIsNotNone(value) + + +class TestGCRuntimeMetricCollector(BaseTestCase): + def test_metrics(self): + collector = GCRuntimeMetricCollector() + for (key, value) in collector.collect(GC_RUNTIME_METRICS): + self.assertIsNotNone(value) + + def test_gen1_changes(self): + # disable gc + import gc + gc.disable() + + # start collector and get current gc counts + collector = GCRuntimeMetricCollector() + gc.collect() + start = gc.get_count() + + # create reference + a = [] + collected = collector.collect([GC_COUNT_GEN0]) + self.assertGreater(collected[0][1], start[0]) + + # delete reference and collect + del a + gc.collect() + collected_after = collector.collect([GC_COUNT_GEN0]) + assert len(collected_after) == 1 + assert collected_after[0][0] == 'runtime.python.gc.count.gen0' + assert isinstance(collected_after[0][1], int) diff --git a/tests/internal/runtime/test_metrics.py b/tests/internal/runtime/test_metrics.py new file mode 100644 index 0000000000..0b6dd5a4cb --- /dev/null +++ b/tests/internal/runtime/test_metrics.py @@ -0,0 +1,118 @@ +import mock +from ddtrace.internal.runtime.collector import ValueCollector + +from ...base import BaseTestCase + + +def mocked_collector(mock_collect, **kwargs): + collector = ValueCollector(**kwargs) + collector.collect_fn = mock_collect + return collector + + +class TestValueCollector(BaseTestCase): + + def test_default_usage(self): + mock_collect = mock.MagicMock() + mock_collect.side_effect = lambda k: [ + ('key1', 'value1'), + ('key2', 'value2'), + ] + + vc = mocked_collector(mock_collect) + + self.assertEqual(vc.collect(keys=set(['key1'])), [ + ('key1', 'value1'), + ]) + mock_collect.assert_called_once() + mock_collect.assert_called_with(set(['key1'])) + + self.assertEqual(mock_collect.call_count, 1, + 'Collector is not periodic by default') + + def test_enabled(self): + collect = mock.MagicMock() + vc = mocked_collector(collect, enabled=False) + collect.assert_not_called() + vc.collect() + collect.assert_not_called() + + def test_periodic(self): + collect = mock.MagicMock() + vc = mocked_collector(collect, periodic=True) + vc.collect() + self.assertEqual(collect.call_count, 1) + vc.collect() + self.assertEqual(collect.call_count, 2) + + def test_not_periodic(self): + collect = mock.MagicMock() + vc = mocked_collector(collect) + collect.assert_not_called() + vc.collect() + self.assertEqual(collect.call_count, 1) + vc.collect() + self.assertEqual(collect.call_count, 1) + vc.collect() + self.assertEqual(collect.call_count, 1) + + def test_required_module(self): + mock_module = mock.MagicMock() + mock_module.fn.side_effect = lambda: 'test' + with self.override_sys_modules(dict(A=mock_module)): + class AVC(ValueCollector): + required_modules = ['A'] + + def collect_fn(self, keys): + a = self.modules.get('A') + a.fn() + + vc = AVC() + vc.collect() + mock_module.fn.assert_called_once() + + def test_required_module_not_installed(self): + collect = mock.MagicMock() + with mock.patch('ddtrace.internal.runtime.collector.log') as log_mock: + # Should log a warning (tested below) + vc = mocked_collector(collect, required_modules=['moduleshouldnotexist']) + + # Collect should not be called as the collector should be disabled. + collect.assert_not_called() + vc.collect() + collect.assert_not_called() + + calls = [ + mock.call( + 'Could not import module "%s" for %s. Disabling collector.', + 'moduleshouldnotexist', vc, + ) + ] + log_mock.warning.assert_has_calls(calls) + + def test_collected_values(self): + class V(ValueCollector): + i = 0 + + def collect_fn(self, keys): + self.i += 1 + return [('i', self.i)] + + vc = V() + self.assertEqual(vc.collect(), [('i', 1)]) + self.assertEqual(vc.collect(), [('i', 1)]) + self.assertEqual(vc.collect(), [('i', 1)]) + + def test_collected_values_periodic(self): + class V(ValueCollector): + periodic = True + i = 0 + + def collect_fn(self, keys): + self.i += 1 + return [('i', self.i)] + + vc = V() + self.assertEqual(vc.collect(), [('i', 1)]) + self.assertEqual(vc.collect(), [('i', 2)]) + self.assertEqual(vc.collect(), [('i', 3)]) diff --git a/tests/internal/runtime/test_runtime_metrics.py b/tests/internal/runtime/test_runtime_metrics.py new file mode 100644 index 0000000000..e95a7fb0fd --- /dev/null +++ b/tests/internal/runtime/test_runtime_metrics.py @@ -0,0 +1,119 @@ +import time + +import mock + +from ddtrace.internal.runtime.runtime_metrics import ( + RuntimeTags, + RuntimeMetrics, +) +from ddtrace.internal.runtime.constants import ( + DEFAULT_RUNTIME_METRICS, + GC_COUNT_GEN0, + SERVICE, + ENV +) + +from ...base import ( + BaseTestCase, + BaseTracerTestCase, +) + + +class TestRuntimeTags(BaseTracerTestCase): + def test_all_tags(self): + with self.override_global_tracer(): + with self.trace('test', service='test'): + tags = set([k for (k, v) in RuntimeTags()]) + assert SERVICE in tags + # no env set by default + assert ENV not in tags + + def test_one_tag(self): + with self.override_global_tracer(): + with self.trace('test', service='test'): + tags = [k for (k, v) in RuntimeTags(enabled=[SERVICE])] + self.assertEqual(tags, [SERVICE]) + + def test_env_tag(self): + def filter_only_env_tags(tags): + return [ + (k, v) + for (k, v) in RuntimeTags() + if k == 'env' + ] + + with self.override_global_tracer(): + # first without env tag set in tracer + with self.trace('first-test', service='test'): + tags = filter_only_env_tags(RuntimeTags()) + assert tags == [] + + # then with an env tag set + self.tracer.set_tags({'env': 'tests.dog'}) + with self.trace('second-test', service='test'): + tags = filter_only_env_tags(RuntimeTags()) + assert tags == [('env', 'tests.dog')] + + # check whether updating env works + self.tracer.set_tags({'env': 'staging.dog'}) + with self.trace('third-test', service='test'): + tags = filter_only_env_tags(RuntimeTags()) + assert tags == [('env', 'staging.dog')] + + +class TestRuntimeMetrics(BaseTestCase): + def test_all_metrics(self): + metrics = set([k for (k, v) in RuntimeMetrics()]) + self.assertSetEqual(metrics, DEFAULT_RUNTIME_METRICS) + + def test_one_metric(self): + metrics = [k for (k, v) in RuntimeMetrics(enabled=[GC_COUNT_GEN0])] + self.assertEqual(metrics, [GC_COUNT_GEN0]) + + +class TestRuntimeWorker(BaseTracerTestCase): + def test_tracer_metrics(self): + # Mock socket.socket to hijack the dogstatsd socket + with mock.patch('socket.socket'): + # configure tracer for runtime metrics + self.tracer._RUNTIME_METRICS_INTERVAL = 1. / 4 + self.tracer.configure(collect_metrics=True) + self.tracer.set_tags({'env': 'tests.dog'}) + + with self.override_global_tracer(self.tracer): + root = self.start_span('parent', service='parent') + context = root.context + self.start_span('child', service='child', child_of=context) + + time.sleep(self.tracer._RUNTIME_METRICS_INTERVAL * 2) + + # Get the socket before it disappears + statsd_socket = self.tracer._dogstatsd_client.socket + # now stop collection + self.tracer.configure(collect_metrics=False) + + received = [ + s.args[0].decode('utf-8') for s in statsd_socket.send.mock_calls + ] + + # we expect more than one flush since it is also called on shutdown + assert len(received) > 1 + + # expect all metrics in default set are received + # DEV: dogstatsd gauges in form "{metric_name}:{metric_value}|g#t{tag_name}:{tag_value},..." + self.assertSetEqual( + set([gauge.split(':')[0] + for packet in received + for gauge in packet.split('\n')]), + DEFAULT_RUNTIME_METRICS + ) + + # check to last set of metrics returned to confirm tags were set + for gauge in received[-len(DEFAULT_RUNTIME_METRICS):]: + self.assertRegexpMatches(gauge, 'service:parent') + self.assertRegexpMatches(gauge, 'service:child') + self.assertRegexpMatches(gauge, 'env:tests.dog') + self.assertRegexpMatches(gauge, 'lang_interpreter:') + self.assertRegexpMatches(gauge, 'lang_version:') + self.assertRegexpMatches(gauge, 'lang:') + self.assertRegexpMatches(gauge, 'tracer_version:') diff --git a/tests/internal/runtime/test_tag_collectors.py b/tests/internal/runtime/test_tag_collectors.py new file mode 100644 index 0000000000..2f6ab33d82 --- /dev/null +++ b/tests/internal/runtime/test_tag_collectors.py @@ -0,0 +1,8 @@ +from ddtrace.internal.runtime import constants +from ddtrace.internal.runtime import tag_collectors + + +def test_values(): + ptc = tag_collectors.PlatformTagCollector() + values = dict(ptc.collect()) + assert constants.PLATFORM_TAGS == set(values.keys()) diff --git a/tests/internal/runtime/utils.py b/tests/internal/runtime/utils.py new file mode 100644 index 0000000000..9f70e6c05e --- /dev/null +++ b/tests/internal/runtime/utils.py @@ -0,0 +1,72 @@ +import itertools + +from ddtrace.internal.runtime.container import CGroupInfo + + +def cgroup_line_valid_test_cases(): + controllers = [ + ['name=systemd'], + ['pids'], + ['cpu', 'cpuacct'], + ['perf_event'], + ['net_cls', 'net_prio'], + ] + + ids = [str(i) for i in range(10)] + + container_ids = [ + '3726184226f5d3147c25fdeab5b60097e378e8a720503a5e19ecfdf29f869860', + '37261842-26f5-d314-7c25-fdeab5b60097', + '37261842_26f5_d314_7c25_fdeab5b60097', + ] + + pod_ids = [ + '3d274242-8ee0-11e9-a8a6-1e68d864ef1a', + '3d274242_8ee0_11e9_a8a6_1e68d864ef1a', + ] + + paths = [ + # Docker + '/docker/{0}', + '/docker/{0}.scope', + + # k8s + '/kubepods/test/pod{1}/{0}', + '/kubepods/test/pod{1}.slice/{0}', + '/kubepods/test/pod{1}/{0}.scope', + '/kubepods/test/pod{1}.slice/{0}.scope', + + # ECS + '/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/{0}', + '/ecs/test-ecs-classic/5a0d5ceddf6c44c1928d367a815d890f/{0}.scope', + + # Fargate + '/ecs/55091c13-b8cf-4801-b527-f4601742204d/{0}', + '/ecs/55091c13-b8cf-4801-b527-f4601742204d/{0}.scope', + + # Linux non-containerized + '/user.slice/user-0.slice/session-83.scope', + ] + + valid_test_cases = dict( + ( + ':'.join([id, ','.join(groups), path.format(container_id, pod_id)]), + CGroupInfo( + id=id, + groups=','.join(groups), + path=path.format(container_id, pod_id), + controllers=groups, + container_id=container_id if '{0}' in path else None, + pod_id=pod_id if '{1}' in path else None, + ) + ) + for path, id, groups, container_id, pod_id + in itertools.product(paths, ids, controllers, container_ids, pod_ids) + ) + # Dedupe test cases + valid_test_cases = list(valid_test_cases.items()) + + # Assert here to ensure we are always testing the number of cases we expect + assert len(valid_test_cases) == 2150 + + return valid_test_cases diff --git a/tests/internal/test_context_manager.py b/tests/internal/test_context_manager.py new file mode 100644 index 0000000000..01f5c1305f --- /dev/null +++ b/tests/internal/test_context_manager.py @@ -0,0 +1,60 @@ +import threading + +from ddtrace.context import Context +from ddtrace.internal.context_manager import DefaultContextManager +from ddtrace.span import Span + +from ..base import BaseTestCase + + +class TestDefaultContextManager(BaseTestCase): + """ + Ensures that a ``ContextManager`` makes the Context + local to each thread or task. + """ + def test_get_or_create(self): + # asking the Context multiple times should return + # always the same instance + ctxm = DefaultContextManager() + assert ctxm.get() == ctxm.get() + + def test_set_context(self): + # the Context can be set in the current Thread + ctx = Context() + ctxm = DefaultContextManager() + assert ctxm.get() is not ctx + + ctxm.set(ctx) + assert ctxm.get() is ctx + + def test_multiple_threads_multiple_context(self): + # each thread should have it's own Context + ctxm = DefaultContextManager() + + def _fill_ctx(): + ctx = ctxm.get() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + assert 1 == len(ctx._trace) + + threads = [threading.Thread(target=_fill_ctx) for _ in range(100)] + + for t in threads: + t.daemon = True + t.start() + + for t in threads: + t.join() + + # the main instance should have an empty Context + # because it has not been used in this thread + ctx = ctxm.get() + assert 0 == len(ctx._trace) + + def test_reset_context_manager(self): + ctxm = DefaultContextManager() + ctx = ctxm.get() + + # new context manager should not share same context + ctxm = DefaultContextManager() + assert ctxm.get() is not ctx diff --git a/tests/internal/test_hostname.py b/tests/internal/test_hostname.py new file mode 100644 index 0000000000..6ef048e1f4 --- /dev/null +++ b/tests/internal/test_hostname.py @@ -0,0 +1,14 @@ +import mock + +from ddtrace.internal.hostname import get_hostname + + +@mock.patch('socket.gethostname') +def test_get_hostname(socket_gethostname): + # Test that `get_hostname()` just returns `socket.gethostname` + socket_gethostname.return_value = 'test-hostname' + assert get_hostname() == 'test-hostname' + + # Change the value returned by `socket.gethostname` to test the cache + socket_gethostname.return_value = 'new-hostname' + assert get_hostname() == 'test-hostname' diff --git a/tests/internal/test_logger.py b/tests/internal/test_logger.py new file mode 100644 index 0000000000..206b7bb76d --- /dev/null +++ b/tests/internal/test_logger.py @@ -0,0 +1,335 @@ +import logging +import mock + +from ddtrace.internal.logger import DDLogger, get_logger + +from ..base import BaseTestCase + +ALL_LEVEL_NAMES = ('debug', 'info', 'warn', 'warning', 'error', 'exception', 'critical', 'fatal') + + +class DDLoggerTestCase(BaseTestCase): + def setUp(self): + super(DDLoggerTestCase, self).setUp() + + self.root = logging.root + self.manager = self.root.manager + + def tearDown(self): + # Weeee, forget all existing loggers + logging.Logger.manager.loggerDict.clear() + self.assertEqual(logging.Logger.manager.loggerDict, dict()) + + self.root = None + self.manager = None + + super(DDLoggerTestCase, self).tearDown() + + def _make_record( + self, logger, msg='test', args=(), level=logging.INFO, + fn='module.py', lno=5, exc_info=(None, None, None), func=None, extra=None + ): + return logger.makeRecord(logger.name, level, fn, lno, msg, args, exc_info, func, extra) + + @mock.patch('ddtrace.internal.logger.DDLogger.handle') + def assert_log_records(self, log, expected_levels, handle): + for name in ALL_LEVEL_NAMES: + method = getattr(log, name) + method('test') + + records = [args[0][0] for args in handle.call_args_list] + for record in records: + self.assertIsInstance(record, logging.LogRecord) + self.assertEqual(record.name, 'test.logger') + self.assertEqual(record.msg, 'test') + + levels = [r.levelname for r in records] + self.assertEqual(levels, expected_levels) + + def test_get_logger(self): + """ + When using `get_logger` to get a logger + When the logger does not exist + We create a new DDLogger + When the logger exists + We return the expected logger + When a different logger is requested + We return a new DDLogger + """ + # Assert the logger doesn't already exist + self.assertNotIn('test.logger', self.manager.loggerDict) + + # Fetch a new logger + log = get_logger('test.logger') + self.assertEqual(log.name, 'test.logger') + self.assertEqual(log.level, logging.NOTSET) + + # Ensure it is a DDLogger + self.assertIsInstance(log, DDLogger) + # Make sure it is stored in all the places we expect + self.assertEqual(self.manager.getLogger('test.logger'), log) + self.assertEqual(self.manager.loggerDict['test.logger'], log) + + # Fetch the same logger + same_log = get_logger('test.logger') + # Assert we got the same logger + self.assertEqual(log, same_log) + + # Fetch a different logger + new_log = get_logger('new.test.logger') + # Make sure we didn't get the same one + self.assertNotEqual(log, new_log) + + def test_get_logger_parents(self): + """ + When using `get_logger` to get a logger + We appropriately assign parent loggers + + DEV: This test case is to ensure we are calling `manager._fixupParents(logger)` + """ + # Fetch a new logger + test_log = get_logger('test') + self.assertEqual(test_log.parent, self.root) + + # Fetch a new child log + # Auto-associate with parent `test` logger + child_log = get_logger('test.child') + self.assertEqual(child_log.parent, test_log) + + # Deep child + deep_log = get_logger('test.child.logger.from.test.case') + self.assertEqual(deep_log.parent, child_log) + + def test_logger_init(self): + """ + When creating a new DDLogger + Has the same interface as logging.Logger + Configures a defaultdict for buckets + Properly configures the rate limit + """ + # Create a logger + log = DDLogger('test.logger') + + # Ensure we set the name and use default log level + self.assertEqual(log.name, 'test.logger') + self.assertEqual(log.level, logging.NOTSET) + + # Assert DDLogger default properties + self.assertIsInstance(log.buckets, dict) + self.assertEqual(log.rate_limit, 60) + + # Assert manager and parent + # DEV: Parent is `None` because `manager._findParents()` doesn't get called + # unless we use `get_logger` (this is the same behavior as `logging.getLogger` and `Logger('name')`) + self.assertEqual(log.manager, self.manager) + self.assertIsNone(log.parent) + + # Override rate limit from environment variable + with self.override_env(dict(DD_LOGGING_RATE_LIMIT='10')): + log = DDLogger('test.logger') + self.assertEqual(log.rate_limit, 10) + + # Set specific log level + log = DDLogger('test.logger', level=logging.DEBUG) + self.assertEqual(log.level, logging.DEBUG) + + def test_logger_log(self): + """ + When calling `DDLogger` log methods + We call `DDLogger.handle` with the expected log record + """ + log = get_logger('test.logger') + + # -- NOTSET + # By default no level is set so we only get warn, error, and critical messages + self.assertEqual(log.level, logging.NOTSET) + # `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` + self.assert_log_records(log, ['WARNING', 'WARNING', 'ERROR', 'ERROR', 'CRITICAL', 'CRITICAL']) + + # -- CRITICAL + log.setLevel(logging.CRITICAL) + # `log.critical`, `log.fatal` + self.assert_log_records(log, ['CRITICAL', 'CRITICAL']) + + # -- ERROR + log.setLevel(logging.ERROR) + # `log.error`, `log.exception`, `log.critical`, `log.fatal` + self.assert_log_records(log, ['ERROR', 'ERROR', 'CRITICAL', 'CRITICAL']) + + # -- WARN + log.setLevel(logging.WARN) + # `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` + self.assert_log_records(log, ['WARNING', 'WARNING', 'ERROR', 'ERROR', 'CRITICAL', 'CRITICAL']) + + # -- INFO + log.setLevel(logging.INFO) + # `log.info`, `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` + self.assert_log_records(log, ['INFO', 'WARNING', 'WARNING', 'ERROR', 'ERROR', 'CRITICAL', 'CRITICAL']) + + # -- DEBUG + log.setLevel(logging.DEBUG) + # `log.debug`, `log.info`, `log.warn`, `log.warning`, `log.error`, `log.exception`, `log.critical`, `log.fatal` + self.assert_log_records(log, ['DEBUG', 'INFO', 'WARNING', 'WARNING', 'ERROR', 'ERROR', 'CRITICAL', 'CRITICAL']) + + @mock.patch('logging.Logger.handle') + def test_logger_handle_no_limit(self, base_handle): + """ + Calling `DDLogger.handle` + When no rate limit is set + Always calls the base `Logger.handle` + """ + # Configure an INFO logger with no rate limit + log = get_logger('test.logger') + log.setLevel(logging.INFO) + log.rate_limit = 0 + + # Log a bunch of times very quickly (this is fast) + for _ in range(1000): + log.info('test') + + # Assert that we did not perform any rate limiting + self.assertEqual(base_handle.call_count, 1000) + + # Our buckets are empty + self.assertEqual(log.buckets, dict()) + + @mock.patch('logging.Logger.handle') + def test_logger_handle_bucket(self, base_handle): + """ + When calling `DDLogger.handle` + With a record + We pass it to the base `Logger.handle` + We create a bucket for tracking + """ + log = get_logger('test.logger') + + # Create log record and handle it + record = self._make_record(log) + log.handle(record) + + # We passed to base Logger.handle + base_handle.assert_called_once_with(record) + + # We added an bucket entry for this record + key = (record.name, record.levelno, record.pathname, record.lineno) + logging_bucket = log.buckets.get(key) + self.assertIsInstance(logging_bucket, DDLogger.LoggingBucket) + + # The bucket entry is correct + expected_bucket = int(record.created / log.rate_limit) + self.assertEqual(logging_bucket.bucket, expected_bucket) + self.assertEqual(logging_bucket.skipped, 0) + + @mock.patch('logging.Logger.handle') + def test_logger_handle_bucket_limited(self, base_handle): + """ + When calling `DDLogger.handle` + With multiple records in a single time frame + We pass only the first to the base `Logger.handle` + We keep track of the number skipped + """ + log = get_logger('test.logger') + + # Create log record and handle it + first_record = self._make_record(log, msg='first') + log.handle(first_record) + + for _ in range(100): + record = self._make_record(log) + # DEV: Use the same timestamp as `first_record` to ensure we are in the same bucket + record.created = first_record.created + log.handle(record) + + # We passed to base Logger.handle + base_handle.assert_called_once_with(first_record) + + # We added an bucket entry for these records + key = (record.name, record.levelno, record.pathname, record.lineno) + logging_bucket = log.buckets.get(key) + + # The bucket entry is correct + expected_bucket = int(first_record.created / log.rate_limit) + self.assertEqual(logging_bucket.bucket, expected_bucket) + self.assertEqual(logging_bucket.skipped, 100) + + @mock.patch('logging.Logger.handle') + def test_logger_handle_bucket_skipped_msg(self, base_handle): + """ + When calling `DDLogger.handle` + When a bucket exists for a previous time frame + We pass only the record to the base `Logger.handle` + We update the record message to include the number of skipped messages + """ + log = get_logger('test.logger') + + # Create log record to handle + original_msg = 'hello %s' + original_args = (1, ) + record = self._make_record(log, msg=original_msg, args=(1, )) + + # Create a bucket entry for this record + key = (record.name, record.levelno, record.pathname, record.lineno) + bucket = int(record.created / log.rate_limit) + # We want the time bucket to be for an older bucket + log.buckets[key] = DDLogger.LoggingBucket(bucket=bucket - 1, skipped=20) + + # Handle our record + log.handle(record) + + # We passed to base Logger.handle + base_handle.assert_called_once_with(record) + + self.assertEqual(record.msg, original_msg + ', %s additional messages skipped') + self.assertEqual(record.args, original_args + (20, )) + self.assertEqual(record.getMessage(), 'hello 1, 20 additional messages skipped') + + def test_logger_handle_bucket_key(self): + """ + When calling `DDLogger.handle` + With different log messages + We use different buckets to limit them + """ + log = get_logger('test.logger') + + # DEV: This function is inlined in `logger.py` + def get_key(record): + return (record.name, record.levelno, record.pathname, record.lineno) + + # Same record signature but different message + # DEV: These count against the same bucket + record1 = self._make_record(log, msg='record 1') + record2 = self._make_record(log, msg='record 2') + + # Different line number (default is `10`) + record3 = self._make_record(log, lno=10) + + # Different pathnames (default is `module.py`) + record4 = self._make_record(log, fn='log.py') + + # Different level (default is `logging.INFO`) + record5 = self._make_record(log, level=logging.WARN) + + # Different logger name + record6 = self._make_record(log) + record6.name = 'test.logger2' + + # Log all of our records + all_records = (record1, record2, record3, record4, record5, record6) + [log.handle(record) for record in all_records] + + buckets = log.buckets + # We have 6 records but only end up with 5 buckets + self.assertEqual(len(buckets), 5) + + # Assert bucket created for the record1 and record2 + bucket1 = buckets[get_key(record1)] + self.assertEqual(bucket1.skipped, 1) + + bucket2 = buckets[get_key(record2)] + self.assertEqual(bucket1, bucket2) + + # Assert bucket for the remaining records + # None of these other messages should have been grouped together + for record in (record3, record4, record5, record6): + bucket = buckets[get_key(record)] + self.assertEqual(bucket.skipped, 0) diff --git a/tests/internal/test_rate_limiter.py b/tests/internal/test_rate_limiter.py new file mode 100644 index 0000000000..3479025bd0 --- /dev/null +++ b/tests/internal/test_rate_limiter.py @@ -0,0 +1,191 @@ +from __future__ import division +import mock + +import pytest + +from ddtrace.internal.rate_limiter import RateLimiter +from ddtrace.vendor import monotonic + + +def test_rate_limiter_init(): + limiter = RateLimiter(rate_limit=100) + assert limiter.rate_limit == 100 + assert limiter.tokens == 100 + assert limiter.max_tokens == 100 + assert limiter.last_update <= monotonic.monotonic() + + +def test_rate_limiter_rate_limit_0(): + limiter = RateLimiter(rate_limit=0) + assert limiter.rate_limit == 0 + assert limiter.tokens == 0 + assert limiter.max_tokens == 0 + + now = monotonic.monotonic() + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + for i in range(10000): + # Make sure the time is different for every check + mock_time.return_value = now + i + assert limiter.is_allowed() is False + + +def test_rate_limiter_rate_limit_negative(): + limiter = RateLimiter(rate_limit=-1) + assert limiter.rate_limit == -1 + assert limiter.tokens == -1 + assert limiter.max_tokens == -1 + + now = monotonic.monotonic() + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + for i in range(10000): + # Make sure the time is different for every check + mock_time.return_value = now + i + assert limiter.is_allowed() is True + + +@pytest.mark.parametrize('rate_limit', [1, 10, 50, 100, 500, 1000]) +def test_rate_limiter_is_allowed(rate_limit): + limiter = RateLimiter(rate_limit=rate_limit) + + def check_limit(): + # Up to the allowed limit is allowed + for _ in range(rate_limit): + assert limiter.is_allowed() is True + + # Any over the limit is disallowed + for _ in range(1000): + assert limiter.is_allowed() is False + + # Start time + now = monotonic.monotonic() + + # Check the limit for 5 time frames + for i in range(5): + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + # Keep the same timeframe + mock_time.return_value = now + i + + check_limit() + + +def test_rate_limiter_is_allowed_large_gap(): + limiter = RateLimiter(rate_limit=100) + + # Start time + now = monotonic.monotonic() + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + # Keep the same timeframe + mock_time.return_value = now + + for _ in range(100): + assert limiter.is_allowed() is True + + # Large gap before next call to `is_allowed()` + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + mock_time.return_value = now + 100 + + for _ in range(100): + assert limiter.is_allowed() is True + + +def test_rate_limiter_is_allowed_small_gaps(): + limiter = RateLimiter(rate_limit=100) + + # Start time + now = monotonic.monotonic() + gap = 1.0 / 100.0 + # Keep incrementing by a gap to keep us at our rate limit + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + for i in range(10000): + # Keep the same timeframe + mock_time.return_value = now + (gap * i) + + assert limiter.is_allowed() is True + + +def test_rate_liimter_effective_rate_rates(): + limiter = RateLimiter(rate_limit=100) + + # Static rate limit window + starting_window = monotonic.monotonic() + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + mock_time.return_value = starting_window + + for _ in range(100): + assert limiter.is_allowed() is True + assert limiter.effective_rate == 1.0 + assert limiter.current_window == starting_window + + for i in range(1, 101): + assert limiter.is_allowed() is False + rate = 100 / (100 + i) + assert limiter.effective_rate == rate + assert limiter.current_window == starting_window + + prev_rate = 0.5 + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + window = starting_window + 1.0 + mock_time.return_value = window + + for i in range(100): + assert limiter.is_allowed() is True + assert limiter.effective_rate == 0.75 + assert limiter.current_window == window + + for i in range(1, 101): + assert limiter.is_allowed() is False + rate = 100 / (100 + i) + assert limiter.effective_rate == (rate + prev_rate) / 2 + assert limiter.current_window == window + + +def test_rate_liimter_effective_rate_starting_rate(): + limiter = RateLimiter(rate_limit=1) + + now = monotonic.monotonic() + with mock.patch('ddtrace.vendor.monotonic.monotonic') as mock_time: + mock_time.return_value = now + + # Default values + assert limiter.current_window == 0 + assert limiter.prev_window_rate is None + + # Accessing the effective rate doesn't change anything + assert limiter.effective_rate == 1.0 + assert limiter.current_window == 0 + assert limiter.prev_window_rate is None + + # Calling `.is_allowed()` updates the values + assert limiter.is_allowed() is True + assert limiter.effective_rate == 1.0 + assert limiter.current_window == now + assert limiter.prev_window_rate is None + + # Gap of 0.9999 seconds, same window + mock_time.return_value = now + 0.9999 + assert limiter.is_allowed() is False + # DEV: We have rate_limit=1 set + assert limiter.effective_rate == 0.5 + assert limiter.current_window == now + assert limiter.prev_window_rate is None + + # Gap of 1.0 seconds, new window + mock_time.return_value = now + 1.0 + assert limiter.is_allowed() is True + assert limiter.effective_rate == 0.75 + assert limiter.current_window == (now + 1.0) + assert limiter.prev_window_rate == 0.5 + + # Gap of 1.9999 seconds, same window + mock_time.return_value = now + 1.9999 + assert limiter.is_allowed() is False + assert limiter.effective_rate == 0.5 + assert limiter.current_window == (now + 1.0) # Same as old window + assert limiter.prev_window_rate == 0.5 + + # Large gap of 100 seconds, new window + mock_time.return_value = now + 100.0 + assert limiter.is_allowed() is True + assert limiter.effective_rate == 0.75 + assert limiter.current_window == (now + 100.0) + assert limiter.prev_window_rate == 0.5 diff --git a/tests/internal/test_writer.py b/tests/internal/test_writer.py new file mode 100644 index 0000000000..4ee0087dfc --- /dev/null +++ b/tests/internal/test_writer.py @@ -0,0 +1,219 @@ +import time + +import pytest + +import mock + +from ddtrace.span import Span +from ddtrace.api import API +from ddtrace.internal.writer import AgentWriter, Q, Empty +from ..base import BaseTestCase + + +class RemoveAllFilter: + def __init__(self): + self.filtered_traces = 0 + + def process_trace(self, trace): + self.filtered_traces += 1 + return None + + +class KeepAllFilter: + def __init__(self): + self.filtered_traces = 0 + + def process_trace(self, trace): + self.filtered_traces += 1 + return trace + + +class AddTagFilter: + def __init__(self, tag_name): + self.tag_name = tag_name + self.filtered_traces = 0 + + def process_trace(self, trace): + self.filtered_traces += 1 + for span in trace: + span.set_tag(self.tag_name, "A value") + return trace + + +class DummyAPI(API): + def __init__(self): + # Call API.__init__ to setup required properties + super(DummyAPI, self).__init__(hostname="localhost", port=8126) + + self.traces = [] + + def send_traces(self, traces): + responses = [] + for trace in traces: + self.traces.append(trace) + response = mock.Mock() + response.status = 200 + responses.append(response) + return responses + + +class FailingAPI(object): + @staticmethod + def send_traces(traces): + return [Exception("oops")] + + +class AgentWriterTests(BaseTestCase): + N_TRACES = 11 + + def create_worker(self, filters=None, api_class=DummyAPI, enable_stats=False): + with self.override_global_config(dict(health_metrics_enabled=enable_stats)): + self.dogstatsd = mock.Mock() + worker = AgentWriter(dogstatsd=self.dogstatsd, filters=filters) + worker._STATS_EVERY_INTERVAL = 1 + self.api = api_class() + worker.api = self.api + for i in range(self.N_TRACES): + worker.write( + [Span(tracer=None, name="name", trace_id=i, span_id=j, parent_id=j - 1 or None) for j in range(7)] + ) + worker.stop() + worker.join() + return worker + + def test_send_stats(self): + dogstatsd = mock.Mock() + worker = AgentWriter(dogstatsd=dogstatsd) + assert worker._send_stats is False + with self.override_global_config(dict(health_metrics_enabled=True)): + assert worker._send_stats is True + + worker = AgentWriter(dogstatsd=None) + assert worker._send_stats is False + with self.override_global_config(dict(health_metrics_enabled=True)): + assert worker._send_stats is False + + def test_filters_keep_all(self): + filtr = KeepAllFilter() + self.create_worker([filtr]) + self.assertEqual(len(self.api.traces), self.N_TRACES) + self.assertEqual(filtr.filtered_traces, self.N_TRACES) + + def test_filters_remove_all(self): + filtr = RemoveAllFilter() + self.create_worker([filtr]) + self.assertEqual(len(self.api.traces), 0) + self.assertEqual(filtr.filtered_traces, self.N_TRACES) + + def test_filters_add_tag(self): + tag_name = "Tag" + filtr = AddTagFilter(tag_name) + self.create_worker([filtr]) + self.assertEqual(len(self.api.traces), self.N_TRACES) + self.assertEqual(filtr.filtered_traces, self.N_TRACES) + for trace in self.api.traces: + for span in trace: + self.assertIsNotNone(span.get_tag(tag_name)) + + def test_filters_short_circuit(self): + filtr = KeepAllFilter() + filters = [RemoveAllFilter(), filtr] + self.create_worker(filters) + self.assertEqual(len(self.api.traces), 0) + self.assertEqual(filtr.filtered_traces, 0) + + def test_no_dogstats(self): + worker = self.create_worker() + assert worker._send_stats is False + assert [] == self.dogstatsd.gauge.mock_calls + + def test_dogstatsd(self): + self.create_worker(enable_stats=True) + assert [ + mock.call("datadog.tracer.heartbeat", 1), + mock.call("datadog.tracer.queue.max_length", 1000), + ] == self.dogstatsd.gauge.mock_calls + + assert [ + mock.call("datadog.tracer.flushes"), + mock.call("datadog.tracer.flush.traces.total", 11, tags=None), + mock.call("datadog.tracer.flush.spans.total", 77, tags=None), + mock.call("datadog.tracer.flush.traces_filtered.total", 0, tags=None), + mock.call("datadog.tracer.api.requests.total", 11, tags=None), + mock.call("datadog.tracer.api.errors.total", 0, tags=None), + mock.call("datadog.tracer.api.responses.total", 11, tags=["status:200"]), + mock.call("datadog.tracer.queue.dropped.traces", 0), + mock.call("datadog.tracer.queue.enqueued.traces", 11), + mock.call("datadog.tracer.queue.enqueued.spans", 77), + mock.call("datadog.tracer.shutdown"), + ] == self.dogstatsd.increment.mock_calls + + histogram_calls = [ + mock.call("datadog.tracer.flush.traces", 11, tags=None), + mock.call("datadog.tracer.flush.spans", 77, tags=None), + mock.call("datadog.tracer.flush.traces_filtered", 0, tags=None), + mock.call("datadog.tracer.api.requests", 11, tags=None), + mock.call("datadog.tracer.api.errors", 0, tags=None), + mock.call("datadog.tracer.api.responses", 11, tags=["status:200"]), + ] + if hasattr(time, "thread_time"): + histogram_calls.append(mock.call("datadog.tracer.writer.cpu_time", mock.ANY)) + + assert histogram_calls == self.dogstatsd.histogram.mock_calls + + def test_dogstatsd_failing_api(self): + self.create_worker(api_class=FailingAPI, enable_stats=True) + assert [ + mock.call("datadog.tracer.heartbeat", 1), + mock.call("datadog.tracer.queue.max_length", 1000), + ] == self.dogstatsd.gauge.mock_calls + + assert [ + mock.call("datadog.tracer.flushes"), + mock.call("datadog.tracer.flush.traces.total", 11, tags=None), + mock.call("datadog.tracer.flush.spans.total", 77, tags=None), + mock.call("datadog.tracer.flush.traces_filtered.total", 0, tags=None), + mock.call("datadog.tracer.api.requests.total", 1, tags=None), + mock.call("datadog.tracer.api.errors.total", 1, tags=None), + mock.call("datadog.tracer.queue.dropped.traces", 0), + mock.call("datadog.tracer.queue.enqueued.traces", 11), + mock.call("datadog.tracer.queue.enqueued.spans", 77), + mock.call("datadog.tracer.shutdown"), + ] == self.dogstatsd.increment.mock_calls + + histogram_calls = [ + mock.call("datadog.tracer.flush.traces", 11, tags=None), + mock.call("datadog.tracer.flush.spans", 77, tags=None), + mock.call("datadog.tracer.flush.traces_filtered", 0, tags=None), + mock.call("datadog.tracer.api.requests", 1, tags=None), + mock.call("datadog.tracer.api.errors", 1, tags=None), + ] + if hasattr(time, "thread_time"): + histogram_calls.append(mock.call("datadog.tracer.writer.cpu_time", mock.ANY)) + + assert histogram_calls == self.dogstatsd.histogram.mock_calls + + +def test_queue_full(): + q = Q(maxsize=3) + q.put([1]) + q.put(2) + q.put([3]) + q.put([4, 4]) + assert list(q.queue) == [[1], 2, [4, 4]] or list(q.queue) == [[1], [4, 4], [3]] or list(q.queue) == [[4, 4], 2, [3]] + assert q.dropped == 1 + assert q.accepted == 4 + assert q.accepted_lengths == 5 + dropped, accepted, accepted_lengths = q.reset_stats() + assert dropped == 1 + assert accepted == 4 + assert accepted_lengths == 5 + + +def test_queue_get(): + q = Q(maxsize=3) + q.put(1) + q.put(2) + assert list(q.get()) == [1, 2] + with pytest.raises(Empty): + q.get(block=False) diff --git a/tests/memory.py b/tests/memory.py new file mode 100644 index 0000000000..6ec70e3820 --- /dev/null +++ b/tests/memory.py @@ -0,0 +1,78 @@ +""" +a script which uses our integratiosn and prints memory statistics. +a very coarsely grained way of seeing how things are used. +""" + + +# stdlib +import itertools +import logging +import time +import sys + +# 3p +import pylibmc +import pympler.tracker +import psycopg2 +import redis + + +# project +import ddtrace +from tests.contrib import config + + +# verbosity +logging.basicConfig(stream=sys.stderr, level=logging.INFO) + +ddtrace.patch_all() +ddtrace.tracer.writer = None + + +class KitchenSink(object): + + def __init__(self): + self._redis = redis.Redis(**config.REDIS_CONFIG) + self._pg = psycopg2.connect(**config.POSTGRES_CONFIG) + + url = '%s:%s' % ( + config.MEMCACHED_CONFIG['host'], + config.MEMCACHED_CONFIG['port']) + self._pylibmc = pylibmc.Client([url]) + + def ping(self, i): + self._ping_redis(i) + self._ping_pg(i) + self._ping_pylibmc(i) + + def _ping_redis(self, i): + with self._redis.pipeline() as p: + p.get('a') + self._redis.set('a', 'b') + self._redis.get('a') + + def _ping_pg(self, i): + cur = self._pg.cursor() + try: + cur.execute("select 'asdf'") + cur.fetchall() + finally: + cur.close() + + def _ping_pylibmc(self, i): + self._pylibmc.set('a', 1) + self._pylibmc.incr('a', 2) + self._pylibmc.decr('a', 1) + + +if __name__ == '__main__': + k = KitchenSink() + t = pympler.tracker.SummaryTracker() + for i in itertools.count(): + # do the work + k.ping(i) + + # periodically print stats + if i % 500 == 0: + t.print_diff() + time.sleep(0.0001) diff --git a/tests/opentracer/__init__.py b/tests/opentracer/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/opentracer/conftest.py b/tests/opentracer/conftest.py new file mode 100644 index 0000000000..f264d17aa6 --- /dev/null +++ b/tests/opentracer/conftest.py @@ -0,0 +1,61 @@ +""" +pytest local plugin used to automatically make the following fixtures +available for all tests in this directory + +https://docs.pytest.org/en/latest/writing_plugins.html#testing-plugins +""" +import pytest + +from ddtrace.opentracer import Tracer, set_global_tracer + +from tests.test_tracer import get_dummy_tracer + + +@pytest.fixture() +def ot_tracer_factory(): + """Fixture which returns an opentracer ready to use for testing.""" + + def make_ot_tracer( + service_name='my_svc', config=None, scope_manager=None, context_provider=None + ): + config = config or {} + tracer = Tracer( + service_name=service_name, config=config, scope_manager=scope_manager + ) + + # similar to how we test the ddtracer, use a dummy tracer + dd_tracer = get_dummy_tracer() + if context_provider: + dd_tracer.configure(context_provider=context_provider) + + # attach the dummy tracer to the opentracer + tracer._dd_tracer = dd_tracer + return tracer + + return make_ot_tracer + + +@pytest.fixture() +def ot_tracer(ot_tracer_factory): + """Fixture for a default opentracer.""" + return ot_tracer_factory() + + +@pytest.fixture() +def global_tracer(ot_tracer): + """A function similar to one OpenTracing users would write to initialize + their OpenTracing tracer. + """ + set_global_tracer(ot_tracer) + + return ot_tracer + + +@pytest.fixture() +def writer(ot_tracer): + return ot_tracer._dd_tracer.writer + + +@pytest.fixture() +def dd_tracer(ot_tracer): + return ot_tracer._dd_tracer diff --git a/tests/opentracer/test_dd_compatibility.py b/tests/opentracer/test_dd_compatibility.py new file mode 100644 index 0000000000..2bb4c090a3 --- /dev/null +++ b/tests/opentracer/test_dd_compatibility.py @@ -0,0 +1,188 @@ +import ddtrace +import opentracing +from opentracing import Format + +from ddtrace.opentracer.span_context import SpanContext + + +class TestTracerCompatibility(object): + """Ensure that our opentracer produces results in the underlying ddtracer.""" + + def test_ottracer_uses_global_ddtracer(self): + """Ensure that the opentracer will by default use the global ddtracer + as its underlying Datadog tracer. + """ + tracer = ddtrace.opentracer.Tracer() + assert tracer._dd_tracer is ddtrace.tracer + + def test_custom_ddtracer(self): + """A user should be able to specify their own Datadog tracer instance if + they wish. + """ + custom_dd_tracer = ddtrace.Tracer() + tracer = ddtrace.opentracer.Tracer(dd_tracer=custom_dd_tracer) + assert tracer._dd_tracer is custom_dd_tracer + + def test_ot_dd_global_tracers(self, global_tracer): + """Ensure our test function opentracer_init() prep""" + ot_tracer = global_tracer + dd_tracer = global_tracer._dd_tracer + + # check all the global references + assert ot_tracer is opentracing.tracer + assert ot_tracer._dd_tracer is dd_tracer + assert dd_tracer is ddtrace.tracer + + def test_ot_dd_nested_trace(self, ot_tracer, dd_tracer, writer): + """Ensure intertwined usage of the opentracer and ddtracer.""" + + with ot_tracer.start_span('my_ot_span') as ot_span: + with dd_tracer.trace('my_dd_span') as dd_span: + pass + spans = writer.pop() + assert len(spans) == 2 + + # confirm the ordering + assert spans[0] is ot_span._dd_span + assert spans[1] is dd_span + + # check the parenting + assert spans[0].parent_id is None + assert spans[1].parent_id == spans[0].span_id + + def test_dd_ot_nested_trace(self, ot_tracer, dd_tracer, writer): + """Ensure intertwined usage of the opentracer and ddtracer.""" + with dd_tracer.trace('my_dd_span') as dd_span: + with ot_tracer.start_span('my_ot_span') as ot_span: + pass + spans = writer.pop() + assert len(spans) == 2 + + # confirm the ordering + assert spans[0] is dd_span + assert spans[1] is ot_span._dd_span + + # check the parenting + assert spans[0].parent_id is None + assert spans[1].parent_id is spans[0].span_id + + def test_ot_dd_ot_dd_nested_trace(self, ot_tracer, dd_tracer, writer): + """Ensure intertwined usage of the opentracer and ddtracer.""" + with ot_tracer.start_span('my_ot_span') as ot_span: + with dd_tracer.trace('my_dd_span') as dd_span: + with ot_tracer.start_span('my_ot_span') as ot_span2: + with dd_tracer.trace('my_dd_span') as dd_span2: + pass + + spans = writer.pop() + assert len(spans) == 4 + + # confirm the ordering + assert spans[0] is ot_span._dd_span + assert spans[1] is dd_span + assert spans[2] is ot_span2._dd_span + assert spans[3] is dd_span2 + + # check the parenting + assert spans[0].parent_id is None + assert spans[1].parent_id is spans[0].span_id + assert spans[2].parent_id is spans[1].span_id + assert spans[3].parent_id is spans[2].span_id + + def test_ot_ot_dd_ot_dd_nested_trace_active(self, ot_tracer, dd_tracer, writer): + """Ensure intertwined usage of the opentracer and ddtracer.""" + with ot_tracer.start_active_span('my_ot_span') as ot_scope: + with ot_tracer.start_active_span('my_ot_span') as ot_scope2: + with dd_tracer.trace('my_dd_span') as dd_span: + with ot_tracer.start_active_span('my_ot_span') as ot_scope3: + with dd_tracer.trace('my_dd_span') as dd_span2: + pass + + spans = writer.pop() + assert len(spans) == 5 + + # confirm the ordering + assert spans[0] is ot_scope.span._dd_span + assert spans[1] is ot_scope2.span._dd_span + assert spans[2] is dd_span + assert spans[3] is ot_scope3.span._dd_span + assert spans[4] is dd_span2 + + # check the parenting + assert spans[0].parent_id is None + assert spans[1].parent_id == spans[0].span_id + assert spans[2].parent_id == spans[1].span_id + assert spans[3].parent_id == spans[2].span_id + assert spans[4].parent_id == spans[3].span_id + + def test_consecutive_trace(self, ot_tracer, dd_tracer, writer): + """Ensure consecutive usage of the opentracer and ddtracer.""" + with ot_tracer.start_active_span('my_ot_span') as ot_scope: + pass + + with dd_tracer.trace('my_dd_span') as dd_span: + pass + + with ot_tracer.start_active_span('my_ot_span') as ot_scope2: + pass + + with dd_tracer.trace('my_dd_span') as dd_span2: + pass + + spans = writer.pop() + assert len(spans) == 4 + + # confirm the ordering + assert spans[0] is ot_scope.span._dd_span + assert spans[1] is dd_span + assert spans[2] is ot_scope2.span._dd_span + assert spans[3] is dd_span2 + + # check the parenting + assert spans[0].parent_id is None + assert spans[1].parent_id is None + assert spans[2].parent_id is None + assert spans[3].parent_id is None + + def test_ddtrace_wrapped_fn(self, ot_tracer, dd_tracer, writer): + """Ensure ddtrace wrapped functions work with the opentracer""" + + @dd_tracer.wrap() + def fn(): + with ot_tracer.start_span('ot_span_inner'): + pass + + with ot_tracer.start_active_span('ot_span_outer'): + fn() + + spans = writer.pop() + assert len(spans) == 3 + + # confirm the ordering + assert spans[0].name == 'ot_span_outer' + assert spans[1].name == 'tests.opentracer.test_dd_compatibility.fn' + assert spans[2].name == 'ot_span_inner' + + # check the parenting + assert spans[0].parent_id is None + assert spans[1].parent_id is spans[0].span_id + assert spans[2].parent_id is spans[1].span_id + + def test_distributed_trace_propagation(self, ot_tracer, dd_tracer, writer): + """Ensure that a propagated span context is properly activated.""" + span_ctx = SpanContext(trace_id=123, span_id=456) + carrier = {} + ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier) + + # extract should activate the span so that a subsequent start_span + # will inherit from the propagated span context + ot_tracer.extract(Format.HTTP_HEADERS, carrier) + + with dd_tracer.trace('test') as span: + pass + + assert span.parent_id == 456 + assert span.trace_id == 123 + + spans = writer.pop() + assert len(spans) == 1 diff --git a/tests/opentracer/test_span.py b/tests/opentracer/test_span.py new file mode 100644 index 0000000000..4428213067 --- /dev/null +++ b/tests/opentracer/test_span.py @@ -0,0 +1,153 @@ +import pytest +from ddtrace.opentracer.span import Span +from ..test_tracer import get_dummy_tracer + + +@pytest.fixture +def nop_tracer(): + from ddtrace.opentracer import Tracer + tracer = Tracer(service_name='mysvc', config={}) + # use the same test tracer used by the primary tests + tracer._tracer = get_dummy_tracer() + return tracer + + +@pytest.fixture +def nop_span_ctx(): + from ddtrace.ext.priority import AUTO_KEEP + from ddtrace.opentracer.span_context import SpanContext + return SpanContext(sampling_priority=AUTO_KEEP) + + +@pytest.fixture +def nop_span(nop_tracer, nop_span_ctx): + return Span(nop_tracer, nop_span_ctx, 'my_op_name') + + +class TestSpan(object): + """Test the Datadog OpenTracing Span implementation.""" + + def test_init(self, nop_tracer, nop_span_ctx): + """Very basic test for skeleton code""" + span = Span(nop_tracer, nop_span_ctx, 'my_op_name') + assert not span.finished + + def test_tags(self, nop_span): + """Set a tag and get it back.""" + nop_span.set_tag('test', 23) + assert nop_span._get_metric('test') == 23 + + def test_set_baggage(self, nop_span): + """Test setting baggage.""" + r = nop_span.set_baggage_item('test', 23) + assert r is nop_span + + r = nop_span.set_baggage_item('1', 1).set_baggage_item('2', 2) + assert r is nop_span + + def test_get_baggage(self, nop_span): + """Test setting and getting baggage.""" + # test a single item + nop_span.set_baggage_item('test', 23) + assert int(nop_span.get_baggage_item('test')) == 23 + + # test multiple items + nop_span.set_baggage_item('1', '1').set_baggage_item('2', 2) + assert int(nop_span.get_baggage_item('test')) == 23 + assert nop_span.get_baggage_item('1') == '1' + assert int(nop_span.get_baggage_item('2')) == 2 + + def test_log_kv(self, nop_span): + """Ensure logging values doesn't break anything.""" + # just log a bunch of values + nop_span.log_kv({'myval': 2}) + nop_span.log_kv({'myval2': 3}) + nop_span.log_kv({'myval3': 5}) + nop_span.log_kv({'myval': 2}) + + def test_log_dd_kv(self, nop_span): + """Ensure keys that can be handled by our impl. are indeed handled. """ + import traceback + from ddtrace.ext import errors + + stack_trace = str(traceback.format_stack()) + nop_span.log_kv({ + 'event': 'error', + 'error': 3, + 'message': 'my error message', + 'stack': stack_trace, + }) + + # Ensure error flag is set... + assert nop_span._dd_span.error + # ...and that error tags are set with the correct key + assert nop_span._get_tag(errors.ERROR_STACK) == stack_trace + assert nop_span._get_tag(errors.ERROR_MSG) == 'my error message' + assert nop_span._get_metric(errors.ERROR_TYPE) == 3 + + def test_operation_name(self, nop_span): + """Sanity check for setting the operation name.""" + # just try setting the operation name + nop_span.set_operation_name('new_op_name') + assert nop_span._dd_span.name == 'new_op_name' + + def test_context_manager(self, nop_span): + """Test the span context manager.""" + import time + + assert not nop_span.finished + # run the context manager but since the span has not been added + # to the span context, we will not get any traces + with nop_span: + time.sleep(0.005) + + # span should be finished when the context manager exits + assert nop_span.finished + + # there should be no traces (see above comment) + spans = nop_span.tracer._tracer.writer.pop() + assert len(spans) == 0 + + def test_immutable_span_context(self, nop_span): + """Ensure span contexts are immutable.""" + before_ctx = nop_span._context + nop_span.set_baggage_item('key', 'value') + after_ctx = nop_span._context + # should be different contexts + assert before_ctx is not after_ctx + + +class TestSpanCompatibility(object): + """Ensure our opentracer spans features correspond to datadog span features. + """ + def test_set_tag(self, nop_span): + nop_span.set_tag('test', 2) + assert nop_span._get_metric('test') == 2 + + def test_tag_resource_name(self, nop_span): + nop_span.set_tag('resource.name', 'myresource') + assert nop_span._dd_span.resource == 'myresource' + + def test_tag_span_type(self, nop_span): + nop_span.set_tag('span.type', 'db') + assert nop_span._dd_span.span_type == 'db' + + def test_tag_service_name(self, nop_span): + nop_span.set_tag('service.name', 'mysvc234') + assert nop_span._dd_span.service == 'mysvc234' + + def test_tag_db_statement(self, nop_span): + nop_span.set_tag('db.statement', 'SELECT * FROM USERS') + assert nop_span._dd_span.resource == 'SELECT * FROM USERS' + + def test_tag_peer_hostname(self, nop_span): + nop_span.set_tag('peer.hostname', 'peername') + assert nop_span._dd_span.get_tag('out.host') == 'peername' + + def test_tag_peer_port(self, nop_span): + nop_span.set_tag('peer.port', 55555) + assert nop_span._get_metric('out.port') == 55555 + + def test_tag_sampling_priority(self, nop_span): + nop_span.set_tag('sampling.priority', '2') + assert nop_span._dd_span.context._sampling_priority == '2' diff --git a/tests/opentracer/test_span_context.py b/tests/opentracer/test_span_context.py new file mode 100644 index 0000000000..a8d1b2f539 --- /dev/null +++ b/tests/opentracer/test_span_context.py @@ -0,0 +1,39 @@ +from ddtrace.opentracer.span_context import SpanContext + + +class TestSpanContext(object): + + def test_init(self): + """Make sure span context creation is fine.""" + span_ctx = SpanContext() + assert span_ctx + + def test_baggage(self): + """Ensure baggage passed is the resulting baggage of the span context.""" + baggage = { + 'some': 'stuff', + } + + span_ctx = SpanContext(baggage=baggage) + + assert span_ctx.baggage == baggage + + def test_with_baggage_item(self): + """Should allow immutable extension of new span contexts.""" + baggage = { + '1': 1, + } + + first_ctx = SpanContext(baggage=baggage) + + second_ctx = first_ctx.with_baggage_item('2', 2) + + assert '2' not in first_ctx.baggage + assert second_ctx.baggage is not first_ctx.baggage + + def test_span_context_immutable_baggage(self): + """Ensure that two different span contexts do not share baggage.""" + ctx1 = SpanContext() + ctx1.set_baggage_item('test', 3) + ctx2 = SpanContext() + assert 'test' not in ctx2._baggage diff --git a/tests/opentracer/test_tracer.py b/tests/opentracer/test_tracer.py new file mode 100644 index 0000000000..7888e5973f --- /dev/null +++ b/tests/opentracer/test_tracer.py @@ -0,0 +1,568 @@ +import time + +import opentracing +from opentracing import ( + child_of, + Format, + InvalidCarrierException, + UnsupportedFormatException, + SpanContextCorruptedException, +) + +import ddtrace +from ddtrace.ext.priority import AUTO_KEEP +from ddtrace.opentracer import Tracer, set_global_tracer +from ddtrace.opentracer.span_context import SpanContext +from ddtrace.propagation.http import HTTP_HEADER_TRACE_ID +from ddtrace.settings import ConfigException + +import mock +import pytest + + +class TestTracerConfig(object): + def test_config(self): + """Test the configuration of the tracer""" + config = {"enabled": True} + tracer = Tracer(service_name="myservice", config=config) + + assert tracer._service_name == "myservice" + assert tracer._enabled is True + + def test_no_service_name(self): + """A service_name should be generated if one is not provided.""" + tracer = Tracer() + assert tracer._service_name == "pytest" + + def test_multiple_tracer_configs(self): + """Ensure that a tracer config is a copy of the passed config.""" + config = {"enabled": True} + + tracer1 = Tracer(service_name="serv1", config=config) + assert tracer1._service_name == "serv1" + + config["enabled"] = False + tracer2 = Tracer(service_name="serv2", config=config) + + # Ensure tracer1's config was not mutated + assert tracer1._service_name == "serv1" + assert tracer1._enabled is True + + assert tracer2._service_name == "serv2" + assert tracer2._enabled is False + + def test_invalid_config_key(self): + """A config with an invalid key should raise a ConfigException.""" + + config = {"enabeld": False} + + # No debug flag should not raise an error + tracer = Tracer(service_name="mysvc", config=config) + + # With debug flag should raise an error + config["debug"] = True + with pytest.raises(ConfigException) as ce_info: + tracer = Tracer(config=config) + assert "enabeld" in str(ce_info) + assert tracer is not None + + # Test with multiple incorrect keys + config["setttings"] = {} + with pytest.raises(ConfigException) as ce_info: + tracer = Tracer(service_name="mysvc", config=config) + assert ["enabeld", "setttings"] in str(ce_info) + assert tracer is not None + + def test_global_tags(self): + """Global tags should be passed from the opentracer to the tracer.""" + config = { + "global_tags": {"tag1": "value1", "tag2": 2,}, + } + + tracer = Tracer(service_name="mysvc", config=config) + with tracer.start_span("myop") as span: + # global tags should be attached to generated all datadog spans + assert span._dd_span.get_tag("tag1") == "value1" + assert span._dd_span.get_metric("tag2") == 2 + + with tracer.start_span("myop2") as span2: + assert span2._dd_span.get_tag("tag1") == "value1" + assert span2._dd_span.get_metric("tag2") == 2 + + +class TestTracer(object): + def test_start_span(self, ot_tracer, writer): + """Start and finish a span.""" + with ot_tracer.start_span("myop") as span: + pass + + # span should be finished when the context manager exits + assert span.finished + + spans = writer.pop() + assert len(spans) == 1 + + def test_start_span_references(self, ot_tracer, writer): + """Start a span using references.""" + + with ot_tracer.start_span("one", references=[child_of()]): + pass + + spans = writer.pop() + assert spans[0].parent_id is None + + root = ot_tracer.start_active_span("root") + # create a child using a parent reference that is not the context parent + with ot_tracer.start_active_span("one"): + with ot_tracer.start_active_span("two", references=[child_of(root.span)]): + pass + root.close() + + spans = writer.pop() + assert spans[2].parent_id is spans[0].span_id + + def test_start_span_custom_start_time(self, ot_tracer): + """Start a span with a custom start time.""" + t = 100 + with mock.patch("ddtrace.span.time_ns") as time: + time.return_value = 102 * 1e9 + with ot_tracer.start_span("myop", start_time=t) as span: + pass + + assert span._dd_span.start == t + assert span._dd_span.duration == 2 + + def test_start_span_with_spancontext(self, ot_tracer, writer): + """Start and finish a span using a span context as the child_of + reference. + """ + with ot_tracer.start_span("myop") as span: + with ot_tracer.start_span("myop", child_of=span.context) as span2: + pass + + # span should be finished when the context manager exits + assert span.finished + assert span2.finished + + spans = writer.pop() + assert len(spans) == 2 + + # ensure proper parenting + assert spans[1].parent_id is spans[0].span_id + + def test_start_span_with_tags(self, ot_tracer): + """Create a span with initial tags.""" + tags = {"key": "value", "key2": "value2"} + with ot_tracer.start_span("myop", tags=tags) as span: + pass + + assert span._dd_span.get_tag("key") == "value" + assert span._dd_span.get_tag("key2") == "value2" + + def test_start_span_with_resource_name_tag(self, ot_tracer): + """Create a span with the tag to set the resource name""" + tags = {"resource.name": "value", "key2": "value2"} + with ot_tracer.start_span("myop", tags=tags) as span: + pass + + # Span resource name should be set to tag value, and should not get set as + # a tag on the underlying span. + assert span._dd_span.resource == "value" + assert span._dd_span.get_tag("resource.name") is None + + # Other tags are set as normal + assert span._dd_span.get_tag("key2") == "value2" + + def test_start_active_span_multi_child(self, ot_tracer, writer): + """Start and finish multiple child spans. + This should ensure that child spans can be created 2 levels deep. + """ + with ot_tracer.start_active_span("myfirstop") as scope1: + time.sleep(0.009) + with ot_tracer.start_active_span("mysecondop") as scope2: + time.sleep(0.007) + with ot_tracer.start_active_span("mythirdop") as scope3: + time.sleep(0.005) + + # spans should be finished when the context manager exits + assert scope1.span.finished + assert scope2.span.finished + assert scope3.span.finished + + spans = writer.pop() + + # check spans are captured in the trace + assert scope1.span._dd_span is spans[0] + assert scope2.span._dd_span is spans[1] + assert scope3.span._dd_span is spans[2] + + # ensure proper parenting + assert spans[1].parent_id is spans[0].span_id + assert spans[2].parent_id is spans[1].span_id + + # sanity check a lower bound on the durations + assert spans[0].duration >= 0.009 + 0.007 + 0.005 + assert spans[1].duration >= 0.007 + 0.005 + assert spans[2].duration >= 0.005 + + def test_start_active_span_multi_child_siblings(self, ot_tracer, writer): + """Start and finish multiple span at the same level. + This should test to ensure a parent can have multiple child spans at the + same level. + """ + with ot_tracer.start_active_span("myfirstop") as scope1: + time.sleep(0.009) + with ot_tracer.start_active_span("mysecondop") as scope2: + time.sleep(0.007) + with ot_tracer.start_active_span("mythirdop") as scope3: + time.sleep(0.005) + + # spans should be finished when the context manager exits + assert scope1.span.finished + assert scope2.span.finished + assert scope3.span.finished + + spans = writer.pop() + + # check spans are captured in the trace + assert scope1.span._dd_span is spans[0] + assert scope2.span._dd_span is spans[1] + assert scope3.span._dd_span is spans[2] + + # ensure proper parenting + assert spans[1].parent_id is spans[0].span_id + assert spans[2].parent_id is spans[0].span_id + + # sanity check a lower bound on the durations + assert spans[0].duration >= 0.009 + 0.007 + 0.005 + assert spans[1].duration >= 0.007 + assert spans[2].duration >= 0.005 + + def test_start_span_manual_child_of(self, ot_tracer, writer): + """Start spans without using a scope manager. + Spans should be created without parents since there will be no call + for the active span. + """ + root = ot_tracer.start_span("zero") + + with ot_tracer.start_span("one", child_of=root): + with ot_tracer.start_span("two", child_of=root): + with ot_tracer.start_span("three", child_of=root): + pass + root.finish() + + spans = writer.pop() + + assert spans[0].parent_id is None + # ensure each child span is a child of root + assert spans[1].parent_id is root._dd_span.span_id + assert spans[2].parent_id is root._dd_span.span_id + assert spans[3].parent_id is root._dd_span.span_id + assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id + + def test_start_span_no_active_span(self, ot_tracer, writer): + """Start spans without using a scope manager. + Spans should be created without parents since there will be no call + for the active span. + """ + with ot_tracer.start_span("one", ignore_active_span=True): + with ot_tracer.start_span("two", ignore_active_span=True): + pass + with ot_tracer.start_span("three", ignore_active_span=True): + pass + + spans = writer.pop() + + # ensure each span does not have a parent + assert spans[0].parent_id is None + assert spans[1].parent_id is None + assert spans[2].parent_id is None + # and that each span is a new trace + assert ( + spans[0].trace_id != spans[1].trace_id + and spans[1].trace_id != spans[2].trace_id + and spans[0].trace_id != spans[2].trace_id + ) + + def test_start_active_span_child_finish_after_parent(self, ot_tracer, writer): + """Start a child span and finish it after its parent.""" + span1 = ot_tracer.start_active_span("one").span + span2 = ot_tracer.start_active_span("two").span + span1.finish() + time.sleep(0.005) + span2.finish() + + spans = writer.pop() + assert len(spans) == 2 + assert spans[0].parent_id is None + assert spans[1].parent_id is span1._dd_span.span_id + assert spans[1].duration > spans[0].duration + + def test_start_span_multi_intertwined(self, ot_tracer, writer): + """Start multiple spans at the top level intertwined. + Alternate calling between two traces. + """ + import threading + + # synchronize threads with a threading event object + event = threading.Event() + + def trace_one(): + id = 11 # noqa: A001 + with ot_tracer.start_active_span(str(id)): + id += 1 + with ot_tracer.start_active_span(str(id)): + id += 1 + with ot_tracer.start_active_span(str(id)): + event.set() + + def trace_two(): + id = 21 # noqa: A001 + event.wait() + with ot_tracer.start_active_span(str(id)): + id += 1 + with ot_tracer.start_active_span(str(id)): + id += 1 + with ot_tracer.start_active_span(str(id)): + pass + + # the ordering should be + # t1.span1/t2.span1, t2.span2, t1.span2, t1.span3, t2.span3 + t1 = threading.Thread(target=trace_one) + t2 = threading.Thread(target=trace_two) + + t1.start() + t2.start() + # wait for threads to finish + t1.join() + t2.join() + + spans = writer.pop() + + # trace_one will finish before trace_two so its spans should be written + # before the spans from trace_two, let's confirm this + assert spans[0].name == "11" + assert spans[1].name == "12" + assert spans[2].name == "13" + assert spans[3].name == "21" + assert spans[4].name == "22" + assert spans[5].name == "23" + + # next let's ensure that each span has the correct parent: + # trace_one + assert spans[0].parent_id is None + assert spans[1].parent_id is spans[0].span_id + assert spans[2].parent_id is spans[1].span_id + # trace_two + assert spans[3].parent_id is None + assert spans[4].parent_id is spans[3].span_id + assert spans[5].parent_id is spans[3].span_id + + # finally we should ensure that the trace_ids are reasonable + # trace_one + assert spans[0].trace_id == spans[1].trace_id and spans[1].trace_id == spans[2].trace_id + # traces should be independent + assert spans[2].trace_id != spans[3].trace_id + # trace_two + assert spans[3].trace_id == spans[4].trace_id and spans[4].trace_id == spans[5].trace_id + + def test_start_active_span(self, ot_tracer, writer): + with ot_tracer.start_active_span("one") as scope: + pass + + assert scope.span._dd_span.name == "one" + assert scope.span.finished + spans = writer.pop() + assert spans + + def test_start_active_span_finish_on_close(self, ot_tracer, writer): + with ot_tracer.start_active_span("one", finish_on_close=False) as scope: + pass + + assert scope.span._dd_span.name == "one" + assert not scope.span.finished + spans = writer.pop() + assert not spans + + def test_start_active_span_nested(self, ot_tracer): + """Test the active span of multiple nested calls of start_active_span.""" + with ot_tracer.start_active_span("one") as outer_scope: + assert ot_tracer.active_span == outer_scope.span + with ot_tracer.start_active_span("two") as inner_scope: + assert ot_tracer.active_span == inner_scope.span + with ot_tracer.start_active_span("three") as innest_scope: # why isn't it innest? innermost so verbose + assert ot_tracer.active_span == innest_scope.span + with ot_tracer.start_active_span("two") as inner_scope: + assert ot_tracer.active_span == inner_scope.span + assert ot_tracer.active_span == outer_scope.span + assert ot_tracer.active_span is None + + def test_start_active_span_trace(self, ot_tracer, writer): + """Test the active span of multiple nested calls of start_active_span.""" + with ot_tracer.start_active_span("one") as outer_scope: + outer_scope.span.set_tag("outer", 2) + with ot_tracer.start_active_span("two") as inner_scope: + inner_scope.span.set_tag("inner", 3) + with ot_tracer.start_active_span("two") as inner_scope: + inner_scope.span.set_tag("inner", 3) + with ot_tracer.start_active_span("three") as innest_scope: + innest_scope.span.set_tag("innerest", 4) + + spans = writer.pop() + + assert spans[0].parent_id is None + assert spans[1].parent_id is spans[0].span_id + assert spans[2].parent_id is spans[0].span_id + assert spans[3].parent_id is spans[2].span_id + + +@pytest.fixture +def nop_span_ctx(): + + return SpanContext(sampling_priority=AUTO_KEEP) + + +class TestTracerSpanContextPropagation(object): + """Test the injection and extration of a span context from a tracer.""" + + def test_invalid_format(self, ot_tracer, nop_span_ctx): + """An invalid format should raise an UnsupportedFormatException.""" + # test inject + with pytest.raises(UnsupportedFormatException): + ot_tracer.inject(nop_span_ctx, None, {}) + + # test extract + with pytest.raises(UnsupportedFormatException): + ot_tracer.extract(None, {}) + + def test_inject_invalid_carrier(self, ot_tracer, nop_span_ctx): + """Only dicts should be supported as a carrier.""" + with pytest.raises(InvalidCarrierException): + ot_tracer.inject(nop_span_ctx, Format.HTTP_HEADERS, None) + + def test_extract_invalid_carrier(self, ot_tracer): + """Only dicts should be supported as a carrier.""" + with pytest.raises(InvalidCarrierException): + ot_tracer.extract(Format.HTTP_HEADERS, None) + + def test_http_headers_base(self, ot_tracer): + """extract should undo inject for http headers.""" + + span_ctx = SpanContext(trace_id=123, span_id=456) + carrier = {} + + ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier) + assert len(carrier.keys()) > 0 + + ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier) + assert ext_span_ctx._dd_context.trace_id == 123 + assert ext_span_ctx._dd_context.span_id == 456 + + def test_http_headers_baggage(self, ot_tracer): + """extract should undo inject for http headers.""" + span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"}) + carrier = {} + + ot_tracer.inject(span_ctx, Format.HTTP_HEADERS, carrier) + assert len(carrier.keys()) > 0 + + ext_span_ctx = ot_tracer.extract(Format.HTTP_HEADERS, carrier) + assert ext_span_ctx._dd_context.trace_id == 123 + assert ext_span_ctx._dd_context.span_id == 456 + assert ext_span_ctx.baggage == span_ctx.baggage + + def test_empty_propagated_context(self, ot_tracer): + """An empty propagated context should raise a + SpanContextCorruptedException when extracted. + """ + carrier = {} + with pytest.raises(SpanContextCorruptedException): + ot_tracer.extract(Format.HTTP_HEADERS, carrier) + + def test_text(self, ot_tracer): + """extract should undo inject for http headers""" + span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"}) + carrier = {} + + ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier) + assert len(carrier.keys()) > 0 + + ext_span_ctx = ot_tracer.extract(Format.TEXT_MAP, carrier) + assert ext_span_ctx._dd_context.trace_id == 123 + assert ext_span_ctx._dd_context.span_id == 456 + assert ext_span_ctx.baggage == span_ctx.baggage + + def test_corrupted_propagated_context(self, ot_tracer): + """Corrupted context should raise a SpanContextCorruptedException.""" + span_ctx = SpanContext(trace_id=123, span_id=456, baggage={"test": 4, "test2": "string"}) + carrier = {} + + ot_tracer.inject(span_ctx, Format.TEXT_MAP, carrier) + assert len(carrier.keys()) > 0 + + # manually alter a key in the carrier baggage + del carrier[HTTP_HEADER_TRACE_ID] + corrupted_key = HTTP_HEADER_TRACE_ID[2:] + carrier[corrupted_key] = 123 + + with pytest.raises(SpanContextCorruptedException): + ot_tracer.extract(Format.TEXT_MAP, carrier) + + def test_immutable_span_context(self, ot_tracer): + """Span contexts should be immutable.""" + with ot_tracer.start_span("root") as root: + ctx_before = root.context + root.set_baggage_item("test", 2) + assert ctx_before is not root.context + with ot_tracer.start_span("child") as level1: + with ot_tracer.start_span("child") as level2: + pass + assert root.context is not level1.context + assert level2.context is not level1.context + assert level2.context is not root.context + + def test_inherited_baggage(self, ot_tracer): + """Baggage should be inherited by child spans.""" + with ot_tracer.start_active_span("root") as root: + # this should be passed down to the child + root.span.set_baggage_item("root", 1) + root.span.set_baggage_item("root2", 1) + with ot_tracer.start_active_span("child") as level1: + level1.span.set_baggage_item("level1", 1) + with ot_tracer.start_active_span("child") as level2: + level2.span.set_baggage_item("level2", 1) + # ensure immutability + assert level1.span.context is not root.span.context + assert level2.span.context is not level1.span.context + + # level1 should have inherited the baggage of root + assert level1.span.get_baggage_item("root") + assert level1.span.get_baggage_item("root2") + + # level2 should have inherited the baggage of both level1 and level2 + assert level2.span.get_baggage_item("root") + assert level2.span.get_baggage_item("root2") + assert level2.span.get_baggage_item("level1") + assert level2.span.get_baggage_item("level2") + + +class TestTracerCompatibility(object): + """Ensure that our opentracer produces results in the underlying datadog tracer.""" + + def test_required_dd_fields(self): + """Ensure required fields needed for successful tracing are possessed + by the underlying datadog tracer. + """ + # a service name is required + tracer = Tracer("service") + with tracer.start_span("my_span") as span: + assert span._dd_span.service + + +def test_set_global_tracer(): + """Sanity check for set_global_tracer""" + my_tracer = Tracer("service") + set_global_tracer(my_tracer) + + assert opentracing.tracer is my_tracer + assert ddtrace.tracer is my_tracer._dd_tracer diff --git a/tests/opentracer/test_tracer_asyncio.py b/tests/opentracer/test_tracer_asyncio.py new file mode 100644 index 0000000000..3e1e6c0e48 --- /dev/null +++ b/tests/opentracer/test_tracer_asyncio.py @@ -0,0 +1,192 @@ +import asyncio +import pytest +from opentracing.scope_managers.asyncio import AsyncioScopeManager + +import ddtrace +from ddtrace.opentracer.utils import get_context_provider_for_scope_manager + +from tests.contrib.asyncio.utils import AsyncioTestCase, mark_asyncio +from .conftest import ot_tracer_factory # noqa: F401 + + +@pytest.fixture() +def ot_tracer(request, ot_tracer_factory): # noqa: F811 + # use the dummy asyncio ot tracer + request.instance.ot_tracer = ot_tracer_factory( + 'asyncio_svc', + config={}, + scope_manager=AsyncioScopeManager(), + context_provider=ddtrace.contrib.asyncio.context_provider, + ) + request.instance.ot_writer = request.instance.ot_tracer._dd_tracer.writer + request.instance.dd_tracer = request.instance.ot_tracer._dd_tracer + + +@pytest.mark.usefixtures('ot_tracer') +class TestTracerAsyncio(AsyncioTestCase): + + def reset(self): + self.ot_writer.pop_traces() + + @mark_asyncio + def test_trace_coroutine(self): + # it should use the task context when invoked in a coroutine + with self.ot_tracer.start_span('coroutine'): + pass + + traces = self.ot_writer.pop_traces() + + assert len(traces) == 1 + assert len(traces[0]) == 1 + assert traces[0][0].name == 'coroutine' + + @mark_asyncio + def test_trace_multiple_coroutines(self): + # if multiple coroutines have nested tracing, they must belong + # to the same trace + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.ot_tracer.start_active_span('coroutine_2'): + return 42 + + with self.ot_tracer.start_active_span('coroutine_1'): + value = yield from coro() + + # the coroutine has been called correctly + assert value == 42 + # a single trace has been properly reported + traces = self.ot_writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 2 + assert traces[0][0].name == 'coroutine_1' + assert traces[0][1].name == 'coroutine_2' + # the parenting is correct + assert traces[0][0] == traces[0][1]._parent + assert traces[0][0].trace_id == traces[0][1].trace_id + + @mark_asyncio + def test_exception(self): + @asyncio.coroutine + def f1(): + with self.ot_tracer.start_span('f1'): + raise Exception('f1 error') + + with pytest.raises(Exception): + yield from f1() + + traces = self.ot_writer.pop_traces() + assert len(traces) == 1 + spans = traces[0] + assert len(spans) == 1 + span = spans[0] + assert span.error == 1 + assert span.get_tag('error.msg') == 'f1 error' + assert 'Exception: f1 error' in span.get_tag('error.stack') + + @mark_asyncio + def test_trace_multiple_calls(self): + # create multiple futures so that we expect multiple + # traces instead of a single one (helper not used) + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.ot_tracer.start_span('coroutine'): + yield from asyncio.sleep(0.01) + + futures = [asyncio.ensure_future(coro()) for x in range(10)] + for future in futures: + yield from future + + traces = self.ot_writer.pop_traces() + + assert len(traces) == 10 + assert len(traces[0]) == 1 + assert traces[0][0].name == 'coroutine' + + +@pytest.mark.usefixtures('ot_tracer') +class TestTracerAsyncioCompatibility(AsyncioTestCase): + """Ensure the opentracer works in tandem with the ddtracer and asyncio.""" + + @mark_asyncio + def test_trace_multiple_coroutines_ot_dd(self): + """ + Ensure we can trace from opentracer to ddtracer across asyncio + context switches. + """ + # if multiple coroutines have nested tracing, they must belong + # to the same trace + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.dd_tracer.trace('coroutine_2'): + return 42 + + with self.ot_tracer.start_active_span('coroutine_1'): + value = yield from coro() + + # the coroutine has been called correctly + assert value == 42 + # a single trace has been properly reported + traces = self.ot_tracer._dd_tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 2 + assert traces[0][0].name == 'coroutine_1' + assert traces[0][1].name == 'coroutine_2' + # the parenting is correct + assert traces[0][0] == traces[0][1]._parent + assert traces[0][0].trace_id == traces[0][1].trace_id + + @mark_asyncio + def test_trace_multiple_coroutines_dd_ot(self): + """ + Ensure we can trace from ddtracer to opentracer across asyncio + context switches. + """ + # if multiple coroutines have nested tracing, they must belong + # to the same trace + @asyncio.coroutine + def coro(): + # another traced coroutine + with self.ot_tracer.start_span('coroutine_2'): + return 42 + + with self.dd_tracer.trace('coroutine_1'): + value = yield from coro() + + # the coroutine has been called correctly + assert value == 42 + # a single trace has been properly reported + traces = self.ot_tracer._dd_tracer.writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 2 + assert traces[0][0].name == 'coroutine_1' + assert traces[0][1].name == 'coroutine_2' + # the parenting is correct + assert traces[0][0] == traces[0][1]._parent + assert traces[0][0].trace_id == traces[0][1].trace_id + + +@pytest.mark.skipif( + ddtrace.internal.context_manager.CONTEXTVARS_IS_AVAILABLE, + reason='only applicable to legacy asyncio provider' +) +class TestUtilsAsyncio(object): + """Test the util routines of the opentracer with asyncio specific + configuration. + """ + + def test_get_context_provider_for_scope_manager_asyncio(self): + scope_manager = AsyncioScopeManager() + ctx_prov = get_context_provider_for_scope_manager(scope_manager) + assert isinstance( + ctx_prov, ddtrace.contrib.asyncio.provider.AsyncioContextProvider + ) + + def test_tracer_context_provider_config(self): + tracer = ddtrace.opentracer.Tracer('mysvc', scope_manager=AsyncioScopeManager()) + assert isinstance( + tracer._dd_tracer.context_provider, + ddtrace.contrib.asyncio.provider.AsyncioContextProvider, + ) diff --git a/tests/opentracer/test_tracer_gevent.py b/tests/opentracer/test_tracer_gevent.py new file mode 100644 index 0000000000..65f0491e0f --- /dev/null +++ b/tests/opentracer/test_tracer_gevent.py @@ -0,0 +1,217 @@ +import gevent +import pytest +from opentracing.scope_managers.gevent import GeventScopeManager + +import ddtrace +from ddtrace.contrib.gevent import patch, unpatch +from ddtrace.opentracer.utils import get_context_provider_for_scope_manager + + +@pytest.fixture() +def ot_tracer(ot_tracer_factory): + """Fixture providing an opentracer configured for gevent usage.""" + # patch gevent + patch() + yield ot_tracer_factory( + 'gevent_svc', {}, GeventScopeManager(), ddtrace.contrib.gevent.context_provider + ) + # unpatch gevent + unpatch() + + +class TestTracerGevent(object): + """Converted Gevent tests for the regular tracer. + + Ensures that greenlets are properly traced when using + the opentracer. + """ + + def test_no_threading(self, ot_tracer): + with ot_tracer.start_span('span') as span: + span.set_tag('tag', 'value') + + assert span.finished + + def test_greenlets(self, ot_tracer, writer): + def f(): + with ot_tracer.start_span('f') as span: + gevent.sleep(0.04) + span.set_tag('f', 'yes') + + def g(): + with ot_tracer.start_span('g') as span: + gevent.sleep(0.03) + span.set_tag('g', 'yes') + + with ot_tracer.start_span('root'): + gevent.joinall([gevent.spawn(f), gevent.spawn(g)]) + + traces = writer.pop_traces() + assert len(traces) == 3 + + def test_trace_greenlet(self, ot_tracer, writer): + # a greenlet can be traced using the trace API + def greenlet(): + with ot_tracer.start_span('greenlet'): + pass + + gevent.spawn(greenlet).join() + traces = writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 1 + assert traces[0][0].name == 'greenlet' + + def test_trace_later_greenlet(self, ot_tracer, writer): + # a greenlet can be traced using the trace API + def greenlet(): + with ot_tracer.start_span('greenlet'): + pass + + gevent.spawn_later(0.01, greenlet).join() + traces = writer.pop_traces() + + assert len(traces) == 1 + assert len(traces[0]) == 1 + assert traces[0][0].name == 'greenlet' + + def test_trace_concurrent_calls(self, ot_tracer, writer): + # create multiple futures so that we expect multiple + # traces instead of a single one + def greenlet(): + with ot_tracer.start_span('greenlet'): + gevent.sleep(0.01) + + jobs = [gevent.spawn(greenlet) for x in range(100)] + gevent.joinall(jobs) + + traces = writer.pop_traces() + + assert len(traces) == 100 + assert len(traces[0]) == 1 + assert traces[0][0].name == 'greenlet' + + def test_trace_concurrent_spawn_later_calls(self, ot_tracer, writer): + # create multiple futures so that we expect multiple + # traces instead of a single one, even if greenlets + # are delayed + def greenlet(): + with ot_tracer.start_span('greenlet'): + gevent.sleep(0.01) + + jobs = [gevent.spawn_later(0.01, greenlet) for x in range(100)] + gevent.joinall(jobs) + + traces = writer.pop_traces() + assert len(traces) == 100 + assert len(traces[0]) == 1 + assert traces[0][0].name == 'greenlet' + + +class TestTracerGeventCompatibility(object): + """Ensure the opentracer works in tandem with the ddtracer and gevent.""" + + def test_trace_spawn_multiple_greenlets_multiple_traces_ot_parent( + self, ot_tracer, dd_tracer, writer + ): + """ + Copy of gevent test with the same name but testing with mixed usage of + the opentracer and datadog tracers. + + Uses an opentracer span as the parent span. + """ + # multiple greenlets must be part of the same trace + def entrypoint(): + with ot_tracer.start_active_span('greenlet.main'): + jobs = [gevent.spawn(green_1), gevent.spawn(green_2)] + gevent.joinall(jobs) + + def green_1(): + with dd_tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '1') + gevent.sleep(0.01) + + def green_2(): + with ot_tracer.start_span('greenlet.worker') as span: + span.set_tag('worker_id', '2') + gevent.sleep(0.01) + + gevent.spawn(entrypoint).join() + traces = writer.pop_traces() + assert len(traces) == 3 + assert len(traces[0]) == 1 + parent_span = traces[2][0] + worker_1 = traces[0][0] + worker_2 = traces[1][0] + # check spans data and hierarchy + assert parent_span.name == 'greenlet.main' + assert worker_1.get_tag('worker_id') == '1' + assert worker_1.name == 'greenlet.worker' + assert worker_1.resource == 'greenlet.worker' + assert worker_1.parent_id == parent_span.span_id + assert worker_2.get_tag('worker_id') == '2' + assert worker_2.name == 'greenlet.worker' + assert worker_2.resource == 'greenlet.worker' + assert worker_2.parent_id == parent_span.span_id + + def test_trace_spawn_multiple_greenlets_multiple_traces_dd_parent( + self, ot_tracer, dd_tracer, writer + ): + """ + Copy of gevent test with the same name but testing with mixed usage of + the opentracer and datadog tracers. + + Uses an opentracer span as the parent span. + """ + # multiple greenlets must be part of the same trace + def entrypoint(): + with dd_tracer.trace('greenlet.main'): + jobs = [gevent.spawn(green_1), gevent.spawn(green_2)] + gevent.joinall(jobs) + + def green_1(): + with ot_tracer.start_span('greenlet.worker') as span: + span.set_tag('worker_id', '1') + gevent.sleep(0.01) + + def green_2(): + with dd_tracer.trace('greenlet.worker') as span: + span.set_tag('worker_id', '2') + gevent.sleep(0.01) + + gevent.spawn(entrypoint).join() + traces = writer.pop_traces() + assert len(traces) == 3 + assert len(traces[0]) == 1 + parent_span = traces[2][0] + worker_1 = traces[0][0] + worker_2 = traces[1][0] + # check spans data and hierarchy + assert parent_span.name == 'greenlet.main' + assert worker_1.get_tag('worker_id') == '1' + assert worker_1.name == 'greenlet.worker' + assert worker_1.resource == 'greenlet.worker' + assert worker_1.parent_id == parent_span.span_id + assert worker_2.get_tag('worker_id') == '2' + assert worker_2.name == 'greenlet.worker' + assert worker_2.resource == 'greenlet.worker' + assert worker_2.parent_id == parent_span.span_id + + +class TestUtilsGevent(object): + """Test the util routines of the opentracer with gevent specific + configuration. + """ + + def test_get_context_provider_for_scope_manager_asyncio(self): + scope_manager = GeventScopeManager() + ctx_prov = get_context_provider_for_scope_manager(scope_manager) + assert isinstance( + ctx_prov, ddtrace.contrib.gevent.provider.GeventContextProvider + ) + + def test_tracer_context_provider_config(self): + tracer = ddtrace.opentracer.Tracer('mysvc', scope_manager=GeventScopeManager()) + assert isinstance( + tracer._dd_tracer.context_provider, + ddtrace.contrib.gevent.provider.GeventContextProvider, + ) diff --git a/tests/opentracer/test_tracer_tornado.py b/tests/opentracer/test_tracer_tornado.py new file mode 100644 index 0000000000..86f59bac3f --- /dev/null +++ b/tests/opentracer/test_tracer_tornado.py @@ -0,0 +1,30 @@ +import pytest +from opentracing.scope_managers.tornado import TornadoScopeManager + + +@pytest.fixture() +def ot_tracer(ot_tracer_factory): + """Fixture providing an opentracer configured for tornado usage.""" + yield ot_tracer_factory('tornado_svc', {}, TornadoScopeManager()) + + +class TestTracerTornado(object): + """ + Since the ScopeManager is provided by OpenTracing we should simply test + whether it exists and works for a very simple use-case. + """ + + def test_sanity(self, ot_tracer, writer): + with ot_tracer.start_active_span('one'): + with ot_tracer.start_active_span('two'): + pass + + traces = writer.pop_traces() + assert len(traces) == 1 + assert len(traces[0]) == 2 + assert traces[0][0].name == 'one' + assert traces[0][1].name == 'two' + + # the parenting is correct + assert traces[0][0] == traces[0][1]._parent + assert traces[0][0].trace_id == traces[0][1].trace_id diff --git a/tests/opentracer/test_utils.py b/tests/opentracer/test_utils.py new file mode 100644 index 0000000000..d38c0e55cb --- /dev/null +++ b/tests/opentracer/test_utils.py @@ -0,0 +1,13 @@ +from opentracing.scope_managers import ThreadLocalScopeManager + +import ddtrace +from ddtrace.opentracer.utils import ( + get_context_provider_for_scope_manager, +) + + +class TestOpentracerUtils(object): + def test_get_context_provider_for_scope_manager_thread(self): + scope_manager = ThreadLocalScopeManager() + ctx_prov = get_context_provider_for_scope_manager(scope_manager) + assert isinstance(ctx_prov, ddtrace.provider.DefaultContextProvider) diff --git a/tests/opentracer/utils.py b/tests/opentracer/utils.py new file mode 100644 index 0000000000..884f240666 --- /dev/null +++ b/tests/opentracer/utils.py @@ -0,0 +1,14 @@ +from ddtrace.opentracer import Tracer + + +def init_tracer(service_name, dd_tracer, scope_manager=None): + """A method that emulates what a user of OpenTracing would call to + initialize a Datadog opentracer. + + It accepts a Datadog tracer that should be the same one used for testing. + """ + writer = dd_tracer.writer + ot_tracer = Tracer(service_name, dd_tracer=dd_tracer, scope_manager=scope_manager) + dd_tracer.writer = writer + ot_tracer._dd_tracer = dd_tracer + return ot_tracer diff --git a/tests/propagation/__init__.py b/tests/propagation/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/propagation/test_http.py b/tests/propagation/test_http.py new file mode 100644 index 0000000000..267954fd08 --- /dev/null +++ b/tests/propagation/test_http.py @@ -0,0 +1,73 @@ +from unittest import TestCase +from tests.test_tracer import get_dummy_tracer + +from ddtrace.propagation.http import ( + HTTPPropagator, + HTTP_HEADER_TRACE_ID, + HTTP_HEADER_PARENT_ID, + HTTP_HEADER_SAMPLING_PRIORITY, + HTTP_HEADER_ORIGIN, +) + + +class TestHttpPropagation(TestCase): + """ + Tests related to the ``Context`` class that hosts the trace for the + current execution flow. + """ + + def test_inject(self): + tracer = get_dummy_tracer() + + with tracer.trace("global_root_span") as span: + span.context.sampling_priority = 2 + span.context._dd_origin = "synthetics" + headers = {} + propagator = HTTPPropagator() + propagator.inject(span.context, headers) + + assert int(headers[HTTP_HEADER_TRACE_ID]) == span.trace_id + assert int(headers[HTTP_HEADER_PARENT_ID]) == span.span_id + assert int(headers[HTTP_HEADER_SAMPLING_PRIORITY]) == span.context.sampling_priority + assert headers[HTTP_HEADER_ORIGIN] == span.context._dd_origin + + def test_extract(self): + tracer = get_dummy_tracer() + + headers = { + "x-datadog-trace-id": "1234", + "x-datadog-parent-id": "5678", + "x-datadog-sampling-priority": "1", + "x-datadog-origin": "synthetics", + } + + propagator = HTTPPropagator() + context = propagator.extract(headers) + tracer.context_provider.activate(context) + + with tracer.trace("local_root_span") as span: + assert span.trace_id == 1234 + assert span.parent_id == 5678 + assert span.context.sampling_priority == 1 + assert span.context._dd_origin == "synthetics" + + def test_WSGI_extract(self): + """Ensure we support the WSGI formatted headers as well.""" + tracer = get_dummy_tracer() + + headers = { + "HTTP_X_DATADOG_TRACE_ID": "1234", + "HTTP_X_DATADOG_PARENT_ID": "5678", + "HTTP_X_DATADOG_SAMPLING_PRIORITY": "1", + "HTTP_X_DATADOG_ORIGIN": "synthetics", + } + + propagator = HTTPPropagator() + context = propagator.extract(headers) + tracer.context_provider.activate(context) + + with tracer.trace("local_root_span") as span: + assert span.trace_id == 1234 + assert span.parent_id == 5678 + assert span.context.sampling_priority == 1 + assert span.context._dd_origin == "synthetics" diff --git a/tests/propagation/test_utils.py b/tests/propagation/test_utils.py new file mode 100644 index 0000000000..8b80e5a5d6 --- /dev/null +++ b/tests/propagation/test_utils.py @@ -0,0 +1,6 @@ +from ddtrace.propagation.utils import get_wsgi_header + + +class TestPropagationUtils(object): + def test_get_wsgi_header(self): + assert get_wsgi_header('x-datadog-trace-id') == 'HTTP_X_DATADOG_TRACE_ID' diff --git a/tests/subprocesstest.py b/tests/subprocesstest.py new file mode 100644 index 0000000000..7093a5dcbd --- /dev/null +++ b/tests/subprocesstest.py @@ -0,0 +1,131 @@ +""" +subprocesstest enables unittest test cases and suites to be run in separate +python interpreter instances. + +A base class SubprocessTestCase is provided that, when extended, will run test +cases marked with @run_in_subprocess in a separate python interpreter. +""" +import os +import subprocess +import sys +import unittest + + +SUBPROC_TEST_ATTR = '_subproc_test' +SUBPROC_ENV_VAR = 'SUBPROCESS_TEST' + + +def run_in_subprocess(obj): + """ + Marks a test case that is to be run in its own 'clean' interpreter instance. + + When applied to a TestCase class, each method will be run in a separate + interpreter instance. + + Usage on a class:: + + from tests.subprocesstest import SubprocessTestCase, run_in_subprocess + + @run_in_subprocess + class PatchTests(SubprocessTestCase): + # will be run in new interpreter + def test_patch_before_import(self): + patch() + import module + + # will be run in new interpreter as well + def test_patch_after_import(self): + import module + patch() + + + Usage on a test method:: + + class OtherTests(SubprocessTestCase): + @run_in_subprocess + def test_case(self): + pass + + + :param obj: method or class to run in a separate python interpreter. + :return: + """ + setattr(obj, SUBPROC_TEST_ATTR, True) + return obj + + +class SubprocessTestCase(unittest.TestCase): + def _full_method_name(self): + test = getattr(self, self._testMethodName) + # DEV: we have to use the internal self reference of the bound test + # method to pull out the class and module since using a mix of `self` + # and the test attributes will result in inconsistencies when the test + # method is defined on another class. + # A concrete case of this is a parent and child TestCase where the child + # doesn't override a parent test method. The full_method_name we want + # is that of the child test method (even though it exists on the parent) + modpath = test.__self__.__class__.__module__ + clsname = test.__self__.__class__.__name__ + testname = test.__name__ + testcase_name = '{}.{}.{}'.format(modpath, clsname, testname) + return testcase_name + + def _run_test_in_subprocess(self, result): + full_testcase_name = self._full_method_name() + + # copy the environment and include the special subprocess environment + # variable for the subprocess to detect + sp_test_env = os.environ.copy() + sp_test_env[SUBPROC_ENV_VAR] = 'True' + sp_test_cmd = ['python', '-m', 'unittest', full_testcase_name] + sp = subprocess.Popen( + sp_test_cmd, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + env=sp_test_env, + ) + stdout, stderr = sp.communicate() + + if sp.returncode: + try: + cmdf = ' '.join(sp_test_cmd) + raise Exception('Subprocess Test "{}" Failed'.format(cmdf)) + except Exception: + exc_info = sys.exc_info() + + # DEV: stderr, stdout are byte sequences so to print them nicely + # back out they should be decoded. + sys.stderr.write(stderr.decode()) + sys.stdout.write(stdout.decode()) + result.addFailure(self, exc_info) + else: + result.addSuccess(self) + + def _in_subprocess(self): + """Determines if the test is being run in a subprocess. + + This is done by checking for an environment variable that we call the + subprocess test with. + + :return: whether the test is a subprocess test + """ + return os.getenv(SUBPROC_ENV_VAR, None) is not None + + def _is_subprocess_test(self): + if hasattr(self, SUBPROC_TEST_ATTR): + return True + + test = getattr(self, self._testMethodName) + if hasattr(test, SUBPROC_TEST_ATTR): + return True + + return False + + def run(self, result=None): + if not self._is_subprocess_test(): + return super(SubprocessTestCase, self).run(result=result) + + if self._in_subprocess(): + return super(SubprocessTestCase, self).run(result=result) + else: + self._run_test_in_subprocess(result) diff --git a/tests/test_api.py b/tests/test_api.py new file mode 100644 index 0000000000..51d3a1b570 --- /dev/null +++ b/tests/test_api.py @@ -0,0 +1,286 @@ +import mock +import re +import socket +import threading +import time +import warnings + +from unittest import TestCase + +import pytest + +from ddtrace.api import API, Response +from ddtrace.compat import iteritems, httplib, PY3 +from ddtrace.internal.runtime.container import CGroupInfo +from ddtrace.vendor.six.moves import BaseHTTPServer, socketserver + + +class _BaseHTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): + error_message_format = '%(message)s\n' + error_content_type = 'text/plain' + + @staticmethod + def log_message(format, *args): # noqa: A002 + pass + + +class _APIEndpointRequestHandlerTest(_BaseHTTPRequestHandler): + + def do_PUT(self): + self.send_error(200, 'OK') + + +class _TimeoutAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler): + def do_PUT(self): + # This server sleeps longer than our timeout + time.sleep(5) + + +class _ResetAPIEndpointRequestHandlerTest(_BaseHTTPRequestHandler): + + def do_PUT(self): + return + + +_HOST = '0.0.0.0' +_TIMEOUT_PORT = 8743 +_RESET_PORT = _TIMEOUT_PORT + 1 + + +class UDSHTTPServer(socketserver.UnixStreamServer, BaseHTTPServer.HTTPServer): + def server_bind(self): + BaseHTTPServer.HTTPServer.server_bind(self) + + +def _make_uds_server(path, request_handler): + server = UDSHTTPServer(path, request_handler) + t = threading.Thread(target=server.serve_forever) + # Set daemon just in case something fails + t.daemon = True + t.start() + return server, t + + +@pytest.fixture +def endpoint_uds_server(tmp_path): + server, thread = _make_uds_server(str(tmp_path / 'uds_server_socket'), _APIEndpointRequestHandlerTest) + try: + yield server + finally: + server.shutdown() + thread.join() + + +def _make_server(port, request_handler): + server = BaseHTTPServer.HTTPServer((_HOST, port), request_handler) + t = threading.Thread(target=server.serve_forever) + # Set daemon just in case something fails + t.daemon = True + t.start() + return server, t + + +@pytest.fixture(scope='module') +def endpoint_test_timeout_server(): + server, thread = _make_server(_TIMEOUT_PORT, _TimeoutAPIEndpointRequestHandlerTest) + try: + yield thread + finally: + server.shutdown() + thread.join() + + +@pytest.fixture(scope='module') +def endpoint_test_reset_server(): + server, thread = _make_server(_RESET_PORT, _ResetAPIEndpointRequestHandlerTest) + try: + yield thread + finally: + server.shutdown() + thread.join() + + +class ResponseMock: + def __init__(self, content, status=200): + self.status = status + self.content = content + + def read(self): + return self.content + + +def test_api_str(): + api = API('localhost', 8126, https=True) + assert str(api) == 'https://localhost:8126' + api = API('localhost', 8126, '/path/to/uds') + assert str(api) == 'unix:///path/to/uds' + + +class APITests(TestCase): + + def setUp(self): + # DEV: Mock here instead of in tests, before we have patched `httplib.HTTPConnection` + self.conn = mock.MagicMock(spec=httplib.HTTPConnection) + self.api = API('localhost', 8126) + + def tearDown(self): + del self.api + del self.conn + + def test_typecast_port(self): + api = API('localhost', u'8126') + self.assertEqual(api.port, 8126) + + @mock.patch('logging.Logger.debug') + def test_parse_response_json(self, log): + test_cases = { + 'OK': dict( + js=None, + log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date', + ), + 'OK\n': dict( + js=None, + log='Cannot parse Datadog Agent response, please make sure your Datadog Agent is up to date', + ), + 'error:unsupported-endpoint': dict( + js=None, + log='Unable to parse Datadog Agent JSON response: \'error:unsupported-endpoint\'', + ), + 42: dict( # int as key to trigger TypeError + js=None, + log='Unable to parse Datadog Agent JSON response: 42', + ), + '{}': dict(js={}), + '[]': dict(js=[]), + + # Priority sampling "rate_by_service" response + ('{"rate_by_service": ' + '{"service:,env:":0.5, "service:mcnulty,env:test":0.9, "service:postgres,env:test":0.6}}'): dict( + js=dict( + rate_by_service={ + 'service:,env:': 0.5, + 'service:mcnulty,env:test': 0.9, + 'service:postgres,env:test': 0.6, + }, + ), + ), + ' [4,2,1] ': dict(js=[4, 2, 1]), + } + + for k, v in iteritems(test_cases): + log.reset_mock() + + r = Response.from_http_response(ResponseMock(k)) + js = r.get_json() + assert v['js'] == js + if 'log' in v: + log.assert_called_once() + msg = log.call_args[0][0] % log.call_args[0][1:] + assert re.match(v['log'], msg), msg + + @mock.patch('ddtrace.compat.httplib.HTTPConnection') + def test_put_connection_close(self, HTTPConnection): + """ + When calling API._put + we close the HTTPConnection we create + """ + HTTPConnection.return_value = self.conn + + with warnings.catch_warnings(record=True) as w: + self.api._put('/test', '', 1) + + self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w)) + + self.conn.request.assert_called_once() + self.conn.close.assert_called_once() + + @mock.patch('ddtrace.compat.httplib.HTTPConnection') + def test_put_connection_close_exception(self, HTTPConnection): + """ + When calling API._put raises an exception + we close the HTTPConnection we create + """ + HTTPConnection.return_value = self.conn + # Ensure calling `request` raises an exception + self.conn.request.side_effect = Exception + + with warnings.catch_warnings(record=True) as w: + with self.assertRaises(Exception): + self.api._put('/test', '', 1) + + self.assertEqual(len(w), 0, 'Test raised unexpected warnings: {0!r}'.format(w)) + + self.conn.request.assert_called_once() + self.conn.close.assert_called_once() + + +def test_https(): + conn = mock.MagicMock(spec=httplib.HTTPSConnection) + api = API('localhost', 8126, https=True) + with mock.patch('ddtrace.compat.httplib.HTTPSConnection') as HTTPSConnection: + HTTPSConnection.return_value = conn + api._put('/test', '', 1) + conn.request.assert_called_once() + conn.close.assert_called_once() + + +def test_flush_connection_timeout_connect(): + payload = mock.Mock() + payload.get_payload.return_value = 'foobar' + payload.length = 12 + api = API(_HOST, 2019) + response = api._flush(payload) + if PY3: + assert isinstance(response, (OSError, ConnectionRefusedError)) # noqa: F821 + else: + assert isinstance(response, socket.error) + assert response.errno in (99, 111) + + +def test_flush_connection_timeout(endpoint_test_timeout_server): + payload = mock.Mock() + payload.get_payload.return_value = 'foobar' + payload.length = 12 + api = API(_HOST, _TIMEOUT_PORT) + response = api._flush(payload) + assert isinstance(response, socket.timeout) + + +def test_flush_connection_reset(endpoint_test_reset_server): + payload = mock.Mock() + payload.get_payload.return_value = 'foobar' + payload.length = 12 + api = API(_HOST, _RESET_PORT) + response = api._flush(payload) + if PY3: + assert isinstance(response, (httplib.BadStatusLine, ConnectionResetError)) # noqa: F821 + else: + assert isinstance(response, httplib.BadStatusLine) + + +def test_flush_connection_uds(endpoint_uds_server): + payload = mock.Mock() + payload.get_payload.return_value = 'foobar' + payload.length = 12 + api = API(_HOST, 2019, uds_path=endpoint_uds_server.server_address) + response = api._flush(payload) + assert response.status == 200 + + +@mock.patch('ddtrace.internal.runtime.container.get_container_info') +def test_api_container_info(get_container_info): + # When we have container information + # DEV: `get_container_info` will return a `CGroupInfo` with a `container_id` or `None` + info = CGroupInfo(container_id='test-container-id') + get_container_info.return_value = info + + api = API(_HOST, 8126) + assert api._container_info is info + assert api._headers['Datadog-Container-Id'] == 'test-container-id' + + # When we do not have container information + get_container_info.return_value = None + + api = API(_HOST, 8126) + assert api._container_info is None + assert 'Datadog-Container-Id' not in api._headers diff --git a/tests/test_compat.py b/tests/test_compat.py new file mode 100644 index 0000000000..1bd3ec5fad --- /dev/null +++ b/tests/test_compat.py @@ -0,0 +1,116 @@ +# -*- coding: utf-8 -*- +# Define source file encoding to support raw unicode characters in Python 2 +import sys + +# Third party +import pytest + +# Project +from ddtrace.compat import to_unicode, PY3, reraise, get_connection_response, is_integer + + +if PY3: + unicode = str + + +class TestCompat(object): + + def test_to_unicode_string(self): + # Calling `compat.to_unicode` on a non-unicode string + res = to_unicode(b'test') + assert type(res) == unicode + assert res == 'test' + + def test_to_unicode_unicode_encoded(self): + # Calling `compat.to_unicode` on a unicode encoded string + res = to_unicode(b'\xc3\xbf') + assert type(res) == unicode + assert res == u'ÿ' + + def test_to_unicode_unicode_double_decode(self): + # Calling `compat.to_unicode` on a unicode decoded string + # This represents the double-decode issue, which can cause a `UnicodeEncodeError` + # `'\xc3\xbf'.decode('utf-8').decode('utf-8')` + res = to_unicode(b'\xc3\xbf'.decode('utf-8')) + assert type(res) == unicode + assert res == u'ÿ' + + def test_to_unicode_unicode_string(self): + # Calling `compat.to_unicode` on a unicode string + res = to_unicode(u'ÿ') + assert type(res) == unicode + assert res == u'ÿ' + + def test_to_unicode_bytearray(self): + # Calling `compat.to_unicode` with a `bytearray` containing unicode + res = to_unicode(bytearray(b'\xc3\xbf')) + assert type(res) == unicode + assert res == u'ÿ' + + def test_to_unicode_bytearray_double_decode(self): + # Calling `compat.to_unicode` with an already decoded `bytearray` + # This represents the double-decode issue, which can cause a `UnicodeEncodeError` + # `bytearray('\xc3\xbf').decode('utf-8').decode('utf-8')` + res = to_unicode(bytearray(b'\xc3\xbf').decode('utf-8')) + assert type(res) == unicode + assert res == u'ÿ' + + def test_to_unicode_non_string(self): + # Calling `compat.to_unicode` on non-string types + assert to_unicode(1) == u'1' + assert to_unicode(True) == u'True' + assert to_unicode(None) == u'None' + assert to_unicode(dict(key='value')) == u'{\'key\': \'value\'}' + + def test_get_connection_response(self): + """Ensure that buffering is in kwargs.""" + + class MockConn(object): + def getresponse(self, *args, **kwargs): + if PY3: + assert 'buffering' not in kwargs + else: + assert 'buffering' in kwargs + + mock = MockConn() + get_connection_response(mock) + + +class TestPy2Py3Compat(object): + """Common tests to ensure functions are both Python 2 and + Python 3 compatible. + """ + def test_reraise(self): + # ensure the `raise` function is Python 2/3 compatible + with pytest.raises(Exception) as ex: + try: + raise Exception('Ouch!') + except Exception: + # original exception we want to re-raise + (typ, val, tb) = sys.exc_info() + try: + # this exception doesn't allow a re-raise, and we need + # to use the previous one collected via `exc_info()` + raise Exception('Obfuscate!') + except Exception: + pass + # this call must be Python 2 and 3 compatible + raise reraise(typ, val, tb) + assert ex.value.args[0] == 'Ouch!' + + +@pytest.mark.parametrize('obj,expected', [ + (1, True), + (-1, True), + (0, True), + (1.0, False), + (-1.0, False), + (True, False), + (False, False), + (dict(), False), + ([], False), + (tuple(), False), + (object(), False), +]) +def test_is_integer(obj, expected): + assert is_integer(obj) is expected diff --git a/tests/test_context.py b/tests/test_context.py new file mode 100644 index 0000000000..3664d440bd --- /dev/null +++ b/tests/test_context.py @@ -0,0 +1,449 @@ +import contextlib +import logging +import mock +import threading + +from .base import BaseTestCase +from tests.test_tracer import get_dummy_tracer + +import pytest + +from ddtrace.span import Span +from ddtrace.context import Context +from ddtrace.constants import HOSTNAME_KEY +from ddtrace.ext.priority import USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP + + +@pytest.fixture +def tracer_with_debug_logging(): + # All the tracers, dummy or not, shares the same logging object. + tracer = get_dummy_tracer() + level = tracer.log.level + tracer.log.setLevel(logging.DEBUG) + try: + yield tracer + finally: + tracer.log.setLevel(level) + + +@mock.patch('logging.Logger.debug') +def test_log_unfinished_spans(log, tracer_with_debug_logging): + # when the root parent is finished, notify if there are spans still pending + tracer = tracer_with_debug_logging + ctx = Context() + # manually create a root-child trace + root = Span(tracer=tracer, name='root') + child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) + child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id) + child_1._parent = root + child_2._parent = root + ctx.add_span(root) + ctx.add_span(child_1) + ctx.add_span(child_2) + # close only the parent + root.finish() + unfinished_spans_log = log.call_args_list[-3][0][2] + child_1_log = log.call_args_list[-2][0][1] + child_2_log = log.call_args_list[-1][0][1] + assert 2 == unfinished_spans_log + assert 'name child_1' in child_1_log + assert 'name child_2' in child_2_log + assert 'duration 0.000000s' in child_1_log + assert 'duration 0.000000s' in child_2_log + + +class TestTracingContext(BaseTestCase): + """ + Tests related to the ``Context`` class that hosts the trace for the + current execution flow. + """ + @contextlib.contextmanager + def override_partial_flush(self, ctx, enabled, min_spans): + original_enabled = ctx._partial_flush_enabled + original_min_spans = ctx._partial_flush_min_spans + + ctx._partial_flush_enabled = enabled + ctx._partial_flush_min_spans = min_spans + + try: + yield + finally: + ctx._partial_flush_enabled = original_enabled + ctx._partial_flush_min_spans = original_min_spans + + def test_add_span(self): + # it should add multiple spans + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + assert 1 == len(ctx._trace) + assert 'fake_span' == ctx._trace[0].name + assert ctx == span.context + + def test_context_sampled(self): + # a context is sampled if the spans are sampled + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + span.finish() + trace, sampled = ctx.get() + assert sampled is True + assert ctx.sampling_priority is None + + def test_context_priority(self): + # a context is sampled if the spans are sampled + ctx = Context() + for priority in [USER_REJECT, AUTO_REJECT, AUTO_KEEP, USER_KEEP, None, 999]: + ctx.sampling_priority = priority + span = Span(tracer=None, name=('fake_span_%s' % repr(priority))) + ctx.add_span(span) + span.finish() + # It's "normal" to have sampled be true even when priority sampling is + # set to 0 or -1. It would stay false even even with priority set to 2. + # The only criteria to send (or not) the spans to the agent should be + # this "sampled" attribute, as it's tightly related to the trace weight. + assert priority == ctx.sampling_priority + trace, sampled = ctx.get() + assert sampled is True, 'priority has no impact on sampled status' + + def test_current_span(self): + # it should return the current active span + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + assert span == ctx.get_current_span() + + def test_current_root_span_none(self): + # it should return none when there is no root span + ctx = Context() + assert ctx.get_current_root_span() is None + + def test_current_root_span(self): + # it should return the current active root span + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + assert span == ctx.get_current_root_span() + + def test_close_span(self): + # it should keep track of closed spans, moving + # the current active to it's parent + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + ctx.close_span(span) + assert ctx.get_current_span() is None + + def test_get_trace(self): + # it should return the internal trace structure + # if the context is finished + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + span.finish() + trace, sampled = ctx.get() + assert [span] == trace + assert sampled is True + # the context should be empty + assert 0 == len(ctx._trace) + assert ctx._current_span is None + + def test_get_trace_empty(self): + # it should return None if the Context is not finished + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + trace, sampled = ctx.get() + assert trace is None + assert sampled is None + + @mock.patch('ddtrace.internal.hostname.get_hostname') + def test_get_report_hostname_enabled(self, get_hostname): + get_hostname.return_value = 'test-hostname' + + with self.override_global_config(dict(report_hostname=True)): + # Create a context and add a span and finish it + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + span.finish() + + # Assert that we have not added the tag to the span yet + assert span.get_tag(HOSTNAME_KEY) is None + + # Assert that retrieving the trace sets the tag + trace, _ = ctx.get() + assert trace[0].get_tag(HOSTNAME_KEY) == 'test-hostname' + assert span.get_tag(HOSTNAME_KEY) == 'test-hostname' + + @mock.patch('ddtrace.internal.hostname.get_hostname') + def test_get_report_hostname_disabled(self, get_hostname): + get_hostname.return_value = 'test-hostname' + + with self.override_global_config(dict(report_hostname=False)): + # Create a context and add a span and finish it + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + span.finish() + + # Assert that we have not added the tag to the span yet + assert span.get_tag(HOSTNAME_KEY) is None + + # Assert that retrieving the trace does not set the tag + trace, _ = ctx.get() + assert trace[0].get_tag(HOSTNAME_KEY) is None + assert span.get_tag(HOSTNAME_KEY) is None + + @mock.patch('ddtrace.internal.hostname.get_hostname') + def test_get_report_hostname_default(self, get_hostname): + get_hostname.return_value = 'test-hostname' + + # Create a context and add a span and finish it + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + span.finish() + + # Assert that we have not added the tag to the span yet + assert span.get_tag(HOSTNAME_KEY) is None + + # Assert that retrieving the trace does not set the tag + trace, _ = ctx.get() + assert trace[0].get_tag(HOSTNAME_KEY) is None + assert span.get_tag(HOSTNAME_KEY) is None + + def test_partial_flush(self): + """ + When calling `Context.get` + When partial flushing is enabled + When we have just enough finished spans to flush + We return the finished spans + """ + tracer = get_dummy_tracer() + ctx = Context() + + # Create a root span with 5 children, all of the children are finished, the root is not + root = Span(tracer=tracer, name='root') + ctx.add_span(root) + for i in range(5): + child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id) + child._parent = root + child.finished = True + ctx.add_span(child) + ctx.close_span(child) + + with self.override_partial_flush(ctx, enabled=True, min_spans=5): + trace, sampled = ctx.get() + + self.assertIsNotNone(trace) + self.assertIsNotNone(sampled) + + self.assertEqual(len(trace), 5) + self.assertEqual( + set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']), + set([span.name for span in trace]) + ) + + # Ensure we clear/reset internal stats as expected + self.assertEqual(ctx._trace, [root]) + with self.override_partial_flush(ctx, enabled=True, min_spans=5): + trace, sampled = ctx.get() + self.assertIsNone(trace) + self.assertIsNone(sampled) + + def test_partial_flush_too_many(self): + """ + When calling `Context.get` + When partial flushing is enabled + When we have more than the minimum number of spans needed to flush + We return the finished spans + """ + tracer = get_dummy_tracer() + ctx = Context() + + # Create a root span with 5 children, all of the children are finished, the root is not + root = Span(tracer=tracer, name='root') + ctx.add_span(root) + for i in range(5): + child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id) + child._parent = root + child.finished = True + ctx.add_span(child) + ctx.close_span(child) + + with self.override_partial_flush(ctx, enabled=True, min_spans=1): + trace, sampled = ctx.get() + + self.assertIsNotNone(trace) + self.assertIsNotNone(sampled) + + self.assertEqual(len(trace), 5) + self.assertEqual( + set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']), + set([span.name for span in trace]) + ) + + # Ensure we clear/reset internal stats as expected + self.assertEqual(ctx._trace, [root]) + with self.override_partial_flush(ctx, enabled=True, min_spans=5): + trace, sampled = ctx.get() + self.assertIsNone(trace) + self.assertIsNone(sampled) + + def test_partial_flush_too_few(self): + """ + When calling `Context.get` + When partial flushing is enabled + When we do not have enough finished spans to flush + We return no spans + """ + tracer = get_dummy_tracer() + ctx = Context() + + # Create a root span with 5 children, all of the children are finished, the root is not + root = Span(tracer=tracer, name='root') + ctx.add_span(root) + for i in range(5): + child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id) + child._parent = root + child.finished = True + ctx.add_span(child) + ctx.close_span(child) + + # Test with having 1 too few spans for partial flush + with self.override_partial_flush(ctx, enabled=True, min_spans=6): + trace, sampled = ctx.get() + + self.assertIsNone(trace) + self.assertIsNone(sampled) + + self.assertEqual(len(ctx._trace), 6) + self.assertEqual( + set(['root', 'child_0', 'child_1', 'child_2', 'child_3', 'child_4']), + set([span.name for span in ctx._trace]) + ) + + def test_partial_flush_remaining(self): + """ + When calling `Context.get` + When partial flushing is enabled + When we have some unfinished spans + We keep the unfinished spans around + """ + tracer = get_dummy_tracer() + ctx = Context() + + # Create a root span with 5 children, all of the children are finished, the root is not + root = Span(tracer=tracer, name='root') + ctx.add_span(root) + for i in range(10): + child = Span(tracer=tracer, name='child_{}'.format(i), trace_id=root.trace_id, parent_id=root.span_id) + child._parent = root + ctx.add_span(child) + + # CLose the first 5 only + if i < 5: + child.finished = True + ctx.close_span(child) + + with self.override_partial_flush(ctx, enabled=True, min_spans=5): + trace, sampled = ctx.get() + + # Assert partially flushed spans + self.assertTrue(len(trace), 5) + self.assertIsNotNone(sampled) + self.assertEqual( + set(['child_0', 'child_1', 'child_2', 'child_3', 'child_4']), + set([span.name for span in trace]) + ) + + # Assert remaining unclosed spans + self.assertEqual(len(ctx._trace), 6) + self.assertEqual( + set(['root', 'child_5', 'child_6', 'child_7', 'child_8', 'child_9']), + set([span.name for span in ctx._trace]), + ) + + def test_finished(self): + # a Context is finished if all spans inside are finished + ctx = Context() + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + ctx.close_span(span) + + @mock.patch('logging.Logger.debug') + def test_log_unfinished_spans_disabled(self, log): + # the trace finished status logging is disabled + tracer = get_dummy_tracer() + ctx = Context() + # manually create a root-child trace + root = Span(tracer=tracer, name='root') + child_1 = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) + child_2 = Span(tracer=tracer, name='child_2', trace_id=root.trace_id, parent_id=root.span_id) + child_1._parent = root + child_2._parent = root + ctx.add_span(root) + ctx.add_span(child_1) + ctx.add_span(child_2) + # close only the parent + root.finish() + # the logger has never been invoked to print unfinished spans + for call, _ in log.call_args_list: + msg = call[0] + assert 'the trace has %d unfinished spans' not in msg + + @mock.patch('logging.Logger.debug') + def test_log_unfinished_spans_when_ok(self, log): + # if the unfinished spans logging is enabled but the trace is finished, don't log anything + tracer = get_dummy_tracer() + ctx = Context() + # manually create a root-child trace + root = Span(tracer=tracer, name='root') + child = Span(tracer=tracer, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) + child._parent = root + ctx.add_span(root) + ctx.add_span(child) + # close the trace + child.finish() + root.finish() + # the logger has never been invoked to print unfinished spans + for call, _ in log.call_args_list: + msg = call[0] + assert 'the trace has %d unfinished spans' not in msg + + def test_thread_safe(self): + # the Context must be thread-safe + ctx = Context() + + def _fill_ctx(): + span = Span(tracer=None, name='fake_span') + ctx.add_span(span) + + threads = [threading.Thread(target=_fill_ctx) for _ in range(100)] + + for t in threads: + t.daemon = True + t.start() + + for t in threads: + t.join() + + assert 100 == len(ctx._trace) + + def test_clone(self): + ctx = Context() + ctx.sampling_priority = 2 + # manually create a root-child trace + root = Span(tracer=None, name='root') + child = Span(tracer=None, name='child_1', trace_id=root.trace_id, parent_id=root.span_id) + child._parent = root + ctx.add_span(root) + ctx.add_span(child) + cloned_ctx = ctx.clone() + assert cloned_ctx._parent_trace_id == ctx._parent_trace_id + assert cloned_ctx._parent_span_id == ctx._parent_span_id + assert cloned_ctx._sampling_priority == ctx._sampling_priority + assert cloned_ctx._dd_origin == ctx._dd_origin + assert cloned_ctx._current_span == ctx._current_span + assert cloned_ctx._trace == [] diff --git a/tests/test_encoders.py b/tests/test_encoders.py new file mode 100644 index 0000000000..682a550731 --- /dev/null +++ b/tests/test_encoders.py @@ -0,0 +1,136 @@ +import json + +from unittest import TestCase + +from ddtrace.span import Span +from ddtrace.compat import msgpack_type, string_type +from ddtrace.encoding import JSONEncoder, MsgpackEncoder +from ddtrace.vendor import msgpack + + +class TestEncoders(TestCase): + """ + Ensures that Encoders serialize the payload as expected. + """ + def test_encode_traces_json(self): + # test encoding for JSON format + traces = [] + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + + encoder = JSONEncoder() + spans = encoder.encode_traces(traces) + items = json.loads(spans) + + # test the encoded output that should be a string + # and the output must be flatten + assert isinstance(spans, string_type) + assert len(items) == 2 + assert len(items[0]) == 2 + assert len(items[1]) == 2 + for i in range(2): + for j in range(2): + assert 'client.testing' == items[i][j]['name'] + + def test_join_encoded_json(self): + # test encoding for JSON format + traces = [] + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + + encoder = JSONEncoder() + + # Encode each trace on it's own + encoded_traces = [ + encoder.encode_trace(trace) + for trace in traces + ] + + # Join the encoded traces together + data = encoder.join_encoded(encoded_traces) + + # Parse the resulting data + items = json.loads(data) + + # test the encoded output that should be a string + # and the output must be flatten + assert isinstance(data, string_type) + assert len(items) == 2 + assert len(items[0]) == 2 + assert len(items[1]) == 2 + for i in range(2): + for j in range(2): + assert 'client.testing' == items[i][j]['name'] + + def test_encode_traces_msgpack(self): + # test encoding for MsgPack format + traces = [] + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + + encoder = MsgpackEncoder() + spans = encoder.encode_traces(traces) + items = msgpack.unpackb(spans) + + # test the encoded output that should be a string + # and the output must be flatten + assert isinstance(spans, msgpack_type) + assert len(items) == 2 + assert len(items[0]) == 2 + assert len(items[1]) == 2 + for i in range(2): + for j in range(2): + assert b'client.testing' == items[i][j][b'name'] + + def test_join_encoded_msgpack(self): + # test encoding for MsgPack format + traces = [] + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + traces.append([ + Span(name='client.testing', tracer=None), + Span(name='client.testing', tracer=None), + ]) + + encoder = MsgpackEncoder() + + # Encode each individual trace on it's own + encoded_traces = [ + encoder.encode_trace(trace) + for trace in traces + ] + # Join the encoded traces together + data = encoder.join_encoded(encoded_traces) + + # Parse the encoded data + items = msgpack.unpackb(data) + + # test the encoded output that should be a string + # and the output must be flatten + assert isinstance(data, msgpack_type) + assert len(items) == 2 + assert len(items[0]) == 2 + assert len(items[1]) == 2 + for i in range(2): + for j in range(2): + assert b'client.testing' == items[i][j][b'name'] diff --git a/tests/test_filters.py b/tests/test_filters.py new file mode 100644 index 0000000000..d4baacc9b7 --- /dev/null +++ b/tests/test_filters.py @@ -0,0 +1,35 @@ +from unittest import TestCase + +from ddtrace.filters import FilterRequestsOnUrl +from ddtrace.span import Span +from ddtrace.ext.http import URL + + +class FilterRequestOnUrlTests(TestCase): + def test_is_match(self): + span = Span(name='Name', tracer=None) + span.set_tag(URL, r'http://example.com') + filtr = FilterRequestsOnUrl('http://examp.*.com') + trace = filtr.process_trace([span]) + self.assertIsNone(trace) + + def test_is_not_match(self): + span = Span(name='Name', tracer=None) + span.set_tag(URL, r'http://anotherexample.com') + filtr = FilterRequestsOnUrl('http://examp.*.com') + trace = filtr.process_trace([span]) + self.assertIsNotNone(trace) + + def test_list_match(self): + span = Span(name='Name', tracer=None) + span.set_tag(URL, r'http://anotherdomain.example.com') + filtr = FilterRequestsOnUrl([r'http://domain\.example\.com', r'http://anotherdomain\.example\.com']) + trace = filtr.process_trace([span]) + self.assertIsNone(trace) + + def test_list_no_match(self): + span = Span(name='Name', tracer=None) + span.set_tag(URL, r'http://cooldomain.example.com') + filtr = FilterRequestsOnUrl([r'http://domain\.example\.com', r'http://anotherdomain\.example\.com']) + trace = filtr.process_trace([span]) + self.assertIsNotNone(trace) diff --git a/tests/test_global_config.py b/tests/test_global_config.py new file mode 100644 index 0000000000..1b577bc107 --- /dev/null +++ b/tests/test_global_config.py @@ -0,0 +1,243 @@ +import mock +from unittest import TestCase + +import pytest + +from ddtrace import config as global_config +from ddtrace.settings import Config + +from .test_tracer import get_dummy_tracer + + +class GlobalConfigTestCase(TestCase): + """Test the `Configuration` class that stores integration settings""" + def setUp(self): + self.config = Config() + self.tracer = get_dummy_tracer() + + def test_registration(self): + # ensure an integration can register a new list of settings + settings = { + 'distributed_tracing': True, + } + self.config._add('requests', settings) + assert self.config.requests['distributed_tracing'] is True + + def test_settings_copy(self): + # ensure that once an integration is registered, a copy + # of the settings is stored to avoid side-effects + experimental = { + 'request_enqueuing': True, + } + settings = { + 'distributed_tracing': True, + 'experimental': experimental, + } + self.config._add('requests', settings) + + settings['distributed_tracing'] = False + experimental['request_enqueuing'] = False + assert self.config.requests['distributed_tracing'] is True + assert self.config.requests['experimental']['request_enqueuing'] is True + + def test_missing_integration_key(self): + # ensure a meaningful exception is raised when an integration + # that is not available is retrieved in the configuration + # object + with pytest.raises(KeyError) as e: + self.config.new_integration['some_key'] + + assert isinstance(e.value, KeyError) + + def test_global_configuration(self): + # ensure a global configuration is available in the `ddtrace` module + assert isinstance(global_config, Config) + + def test_settings_merge(self): + """ + When calling `config._add()` + when existing settings exist + we do not overwrite the existing settings + """ + self.config.requests['split_by_domain'] = True + self.config._add('requests', dict(split_by_domain=False)) + assert self.config.requests['split_by_domain'] is True + + def test_settings_overwrite(self): + """ + When calling `config._add(..., merge=False)` + when existing settings exist + we overwrite the existing settings + """ + self.config.requests['split_by_domain'] = True + self.config._add('requests', dict(split_by_domain=False), merge=False) + assert self.config.requests['split_by_domain'] is False + + def test_settings_merge_deep(self): + """ + When calling `config._add()` + when existing "deep" settings exist + we do not overwrite the existing settings + """ + self.config.requests['a'] = dict( + b=dict( + c=True, + ), + ) + self.config._add('requests', dict( + a=dict( + b=dict( + c=False, + d=True, + ), + ), + )) + assert self.config.requests['a']['b']['c'] is True + assert self.config.requests['a']['b']['d'] is True + + def test_settings_hook(self): + """ + When calling `Hooks._emit()` + When there is a hook registered + we call the hook as expected + """ + # Setup our hook + @self.config.web.hooks.on('request') + def on_web_request(span): + span.set_tag('web.request', '/') + + # Create our span + span = self.tracer.start_span('web.request') + assert 'web.request' not in span.meta + + # Emit the span + self.config.web.hooks._emit('request', span) + + # Assert we updated the span as expected + assert span.get_tag('web.request') == '/' + + def test_settings_hook_args(self): + """ + When calling `Hooks._emit()` with arguments + When there is a hook registered + we call the hook as expected + """ + # Setup our hook + @self.config.web.hooks.on('request') + def on_web_request(span, request, response): + span.set_tag('web.request', request) + span.set_tag('web.response', response) + + # Create our span + span = self.tracer.start_span('web.request') + assert 'web.request' not in span.meta + + # Emit the span + # DEV: The actual values don't matter, we just want to test args + kwargs usage + self.config.web.hooks._emit('request', span, 'request', response='response') + + # Assert we updated the span as expected + assert span.get_tag('web.request') == 'request' + assert span.get_tag('web.response') == 'response' + + def test_settings_hook_args_failure(self): + """ + When calling `Hooks._emit()` with arguments + When there is a hook registered that is missing parameters + we do not raise an exception + """ + # Setup our hook + # DEV: We are missing the required "response" argument + @self.config.web.hooks.on('request') + def on_web_request(span, request): + span.set_tag('web.request', request) + + # Create our span + span = self.tracer.start_span('web.request') + assert 'web.request' not in span.meta + + # Emit the span + # DEV: This also asserts that no exception was raised + self.config.web.hooks._emit('request', span, 'request', response='response') + + # Assert we did not update the span + assert 'web.request' not in span.meta + + def test_settings_multiple_hooks(self): + """ + When calling `Hooks._emit()` + When there are multiple hooks registered + we do not raise an exception + """ + # Setup our hooks + @self.config.web.hooks.on('request') + def on_web_request(span): + span.set_tag('web.request', '/') + + @self.config.web.hooks.on('request') + def on_web_request2(span): + span.set_tag('web.status', 200) + + @self.config.web.hooks.on('request') + def on_web_request3(span): + span.set_tag('web.method', 'GET') + + # Create our span + span = self.tracer.start_span('web.request') + assert 'web.request' not in span.meta + assert 'web.status' not in span.metrics + assert 'web.method' not in span.meta + + # Emit the span + self.config.web.hooks._emit('request', span) + + # Assert we updated the span as expected + assert span.get_tag('web.request') == '/' + assert span.get_metric('web.status') == 200 + assert span.get_tag('web.method') == 'GET' + + def test_settings_hook_failure(self): + """ + When calling `Hooks._emit()` + When the hook raises an exception + we do not raise an exception + """ + # Setup our failing hook + on_web_request = mock.Mock(side_effect=Exception) + self.config.web.hooks.register('request')(on_web_request) + + # Create our span + span = self.tracer.start_span('web.request') + + # Emit the span + # DEV: This is the test, to ensure no exceptions are raised + self.config.web.hooks._emit('request', span) + on_web_request.assert_called() + + def test_settings_no_hook(self): + """ + When calling `Hooks._emit()` + When no hook is registered + we do not raise an exception + """ + # Create our span + span = self.tracer.start_span('web.request') + + # Emit the span + # DEV: This is the test, to ensure no exceptions are raised + self.config.web.hooks._emit('request', span) + + def test_settings_no_span(self): + """ + When calling `Hooks._emit()` + When no span is provided + we do not raise an exception + """ + # Setup our hooks + @self.config.web.hooks.on('request') + def on_web_request(span): + span.set_tag('web.request', '/') + + # Emit the span + # DEV: This is the test, to ensure no exceptions are raised + self.config.web.hooks._emit('request', None) diff --git a/tests/test_helpers.py b/tests/test_helpers.py new file mode 100644 index 0000000000..a09b055c01 --- /dev/null +++ b/tests/test_helpers.py @@ -0,0 +1,48 @@ +import mock + +from ddtrace import helpers + +from .base import BaseTracerTestCase +from .util import override_global_tracer + + +class HelpersTestCase(BaseTracerTestCase): + """Test suite for ``ddtrace`` helpers""" + def test_correlation_identifiers(self): + # ensures the right correlation identifiers are + # returned when a Trace is active + with override_global_tracer(self.tracer): + span = self.tracer.trace('MockSpan') + active_trace_id, active_span_id = span.trace_id, span.span_id + trace_id, span_id = helpers.get_correlation_ids() + + self.assertEqual(trace_id, active_trace_id) + self.assertEqual(span_id, active_span_id) + + def test_correlation_identifiers_without_trace(self): + # ensures `None` is returned if no Traces are active + with override_global_tracer(self.tracer): + trace_id, span_id = helpers.get_correlation_ids() + + self.assertIsNone(trace_id) + self.assertIsNone(span_id) + + def test_correlation_identifiers_with_disabled_trace(self): + # ensures `None` is returned if tracer is disabled + with override_global_tracer(self.tracer): + self.tracer.enabled = False + self.tracer.trace('MockSpan') + trace_id, span_id = helpers.get_correlation_ids() + + self.assertIsNone(trace_id) + self.assertIsNone(span_id) + + def test_correlation_identifiers_missing_context(self): + # ensures we return `None` if there is no current context + self.tracer.get_call_context = mock.MagicMock(return_value=None) + + with override_global_tracer(self.tracer): + trace_id, span_id = helpers.get_correlation_ids() + + self.assertIsNone(trace_id) + self.assertIsNone(span_id) diff --git a/tests/test_hook.py b/tests/test_hook.py new file mode 100644 index 0000000000..817916a5bc --- /dev/null +++ b/tests/test_hook.py @@ -0,0 +1,180 @@ +import mock + +from ddtrace.compat import reload_module +from ddtrace.utils.hook import ( + register_post_import_hook, + deregister_post_import_hook, +) + +from tests.subprocesstest import SubprocessTestCase, run_in_subprocess + + +@run_in_subprocess +class TestHook(SubprocessTestCase): + def test_register_post_import_hook_before_import(self): + """ + Test that a hook is fired after registering. + """ + test_hook = mock.MagicMock() + register_post_import_hook('tests.utils.test_module', test_hook) + import tests.utils.test_module # noqa + test_hook.assert_called_once() + + def test_register_post_import_hook_after_import(self): + """ + Test that a hook is fired when the module is imported with an + appropriate log debug message. + """ + test_hook = mock.MagicMock() + with mock.patch('ddtrace.utils.hook.log') as log_mock: + import tests.utils.test_module # noqa + register_post_import_hook('tests.utils.test_module', test_hook) + test_hook.assert_called_once() + calls = [ + mock.call('module "%s" already imported, firing hook', "tests.utils.test_module") + ] + log_mock.debug.assert_has_calls(calls) + + def test_register_post_import_hook_reimport(self): + """ + Test that a hook is fired when the module is reimported. + """ + test_hook = mock.MagicMock() + register_post_import_hook('tests.utils.test_module', test_hook) + import tests.utils.test_module + reload_module(tests.utils.test_module) + self.assertEqual(test_hook.call_count, 2) + + def test_register_post_import_hook_multiple(self): + """ + Test that multiple hooks are fired after registering. + """ + test_hook = mock.MagicMock() + test_hook2 = mock.MagicMock() + register_post_import_hook('tests.utils.test_module', test_hook) + register_post_import_hook('tests.utils.test_module', test_hook2) + import tests.utils.test_module # noqa + test_hook.assert_called_once() + test_hook2.assert_called_once() + + def test_register_post_import_hook_different_modules(self): + """ + Test that multiple hooks hooked on different modules are fired after registering. + """ + test_hook = mock.MagicMock() + test_hook_redis = mock.MagicMock() + register_post_import_hook('tests.utils.test_module', test_hook) + register_post_import_hook('ddtrace.contrib.redis', test_hook_redis) + import tests.utils.test_module # noqa + import ddtrace.contrib.redis # noqa + test_hook.assert_called_once() + test_hook_redis.assert_called_once() + + def test_register_post_import_hook_duplicate_register(self): + """ + Test that a function can be registered as a hook twice. + """ + test_hook = mock.MagicMock() + with mock.patch('ddtrace.utils.hook.log') as log_mock: + register_post_import_hook('tests.utils.test_module', test_hook) + register_post_import_hook('tests.utils.test_module', test_hook) + import tests.utils.test_module # noqa + + self.assertEqual(log_mock.debug.mock_calls, [ + mock.call('hook "%s" already exists on module "%s"', test_hook, 'tests.utils.test_module'), + ]) + + def test_deregister_post_import_hook_no_register(self): + """ + Test that deregistering import hooks that do not exist is a no-op. + """ + def hook(): + return + + outcome = deregister_post_import_hook('tests.utils.test_module', hook) + self.assertFalse(outcome) + import tests.utils.test_module # noqa + + def test_deregister_post_import_hook_after_register(self): + """ + Test that import hooks can be deregistered after being registered. + """ + test_hook = mock.MagicMock() + register_post_import_hook('tests.utils.test_module', test_hook) + outcome = deregister_post_import_hook('tests.utils.test_module', test_hook) + self.assertTrue(outcome) + import tests.utils.test_module # noqa + self.assertEqual(test_hook.call_count, 0, 'hook has been deregistered and should have been removed') + + def test_deregister_post_import_hook_after_register_multiple_all(self): + """ + Test that multiple import hooks can be deregistered. + """ + test_hook = mock.MagicMock() + test_hook2 = mock.MagicMock() + register_post_import_hook('tests.utils.test_module', test_hook) + register_post_import_hook('tests.utils.test_module', test_hook2) + + outcome = deregister_post_import_hook('tests.utils.test_module', test_hook) + self.assertTrue(outcome) + outcome = deregister_post_import_hook('tests.utils.test_module', test_hook2) + self.assertTrue(outcome) + import tests.utils.test_module # noqa + self.assertEqual(test_hook.call_count, 0, 'hook has been deregistered and should be removed') + self.assertEqual(test_hook2.call_count, 0, 'hook has been deregistered and should be removed') + + def test_deregister_post_import_hook_after_register_multiple(self): + """ + Test that only the specified import hook can be deregistered after being registered. + """ + # Enforce a spec so that hasattr doesn't vacuously return True. + test_hook = mock.MagicMock(spec=[]) + test_hook2 = mock.MagicMock(spec=[]) + register_post_import_hook('tests.utils.test_module', test_hook) + register_post_import_hook('tests.utils.test_module', test_hook2) + + outcome = deregister_post_import_hook('tests.utils.test_module', test_hook) + self.assertTrue(outcome) + import tests.utils.test_module # noqa + self.assertEqual(test_hook.call_count, 0, 'hook has been deregistered and should be removed') + self.assertEqual(test_hook2.call_count, 1, 'hook should have been called') + + def test_deregister_post_import_hook_after_import(self): + """ + Test that import hooks can be deregistered after being registered. + """ + test_hook = mock.MagicMock() + register_post_import_hook('tests.utils.test_module', test_hook) + + import tests.utils.test_module + test_hook.assert_called_once() + outcome = deregister_post_import_hook('tests.utils.test_module', test_hook) + self.assertTrue(outcome) + reload_module(tests.utils.test_module) + self.assertEqual(test_hook.call_count, 1, 'hook should only be called once') + + def test_hook_exception(self): + """ + Test that when a hook throws an exception that it is caught and logged + as a warning. + """ + def test_hook(module): + raise Exception('test_hook_failed') + register_post_import_hook('tests.utils.test_module', test_hook) + + with mock.patch('ddtrace.utils.hook.log') as log_mock: + import tests.utils.test_module # noqa + calls = [ + mock.call('hook "%s" for module "%s" failed', + test_hook, 'tests.utils.test_module', exc_info=True) + ] + log_mock.warning.assert_has_calls(calls) + + def test_hook_called_with_module(self): + """ + Test that a hook is called with the module that it is hooked on. + """ + def test_hook(module): + self.assertTrue(hasattr(module, 'A')) + register_post_import_hook('tests.utils.test_module', test_hook) + import tests.utils.test_module # noqa diff --git a/tests/test_instance_config.py b/tests/test_instance_config.py new file mode 100644 index 0000000000..871906f1b3 --- /dev/null +++ b/tests/test_instance_config.py @@ -0,0 +1,130 @@ +from unittest import TestCase + +from ddtrace import config +from ddtrace.pin import Pin +from ddtrace.settings import IntegrationConfig + + +class InstanceConfigTestCase(TestCase): + """TestCase for the Configuration API that is used to define + global settings and for each `Pin` instance. + """ + def setUp(self): + class Klass(object): + """Helper class where a Pin is always attached""" + pass + + # define the Class and attach a Pin to it + self.Klass = Klass + Pin(service='metrics').onto(Klass) + + def test_configuration_get_from(self): + # ensure a dictionary is returned + cfg = config.get_from(self.Klass) + assert isinstance(cfg, dict) + + def test_configuration_get_from_twice(self): + # ensure the configuration is the same if `get_from` is used + # in the same instance + instance = self.Klass() + cfg1 = config.get_from(instance) + cfg2 = config.get_from(instance) + assert cfg1 is cfg2 + + def test_configuration_set(self): + # ensure the configuration can be updated in the Pin + instance = self.Klass() + cfg = config.get_from(instance) + cfg['distributed_tracing'] = True + assert config.get_from(instance)['distributed_tracing'] is True + + def test_global_configuration_inheritance(self): + # ensure global configuration is inherited when it's set + cfg = config.get_from(self.Klass) + cfg['distributed_tracing'] = True + instance = self.Klass() + assert config.get_from(instance)['distributed_tracing'] is True + + def test_configuration_override_instance(self): + # ensure instance configuration doesn't override global settings + global_cfg = config.get_from(self.Klass) + global_cfg['distributed_tracing'] = True + instance = self.Klass() + cfg = config.get_from(instance) + cfg['distributed_tracing'] = False + assert config.get_from(self.Klass)['distributed_tracing'] is True + assert config.get_from(instance)['distributed_tracing'] is False + + def test_service_name_for_pin(self): + # ensure for backward compatibility that changing the service + # name via the Pin object also updates integration config + Pin(service='intake').onto(self.Klass) + instance = self.Klass() + cfg = config.get_from(instance) + assert cfg['service_name'] == 'intake' + + def test_service_attribute_priority(self): + # ensure the `service` arg has highest priority over configuration + # for backward compatibility + global_config = { + 'service_name': 'primary_service', + } + Pin(service='service', _config=global_config).onto(self.Klass) + instance = self.Klass() + cfg = config.get_from(instance) + assert cfg['service_name'] == 'service' + + def test_configuration_copy(self): + # ensure when a Pin is used, the given configuration is copied + global_config = { + 'service_name': 'service', + } + Pin(service='service', _config=global_config).onto(self.Klass) + instance = self.Klass() + cfg = config.get_from(instance) + cfg['service_name'] = 'metrics' + assert global_config['service_name'] == 'service' + + def test_configuration_copy_upside_down(self): + # ensure when a Pin is created, it does not copy the given configuration + # until it's used for at least once + global_config = { + 'service_name': 'service', + } + Pin(service='service', _config=global_config).onto(self.Klass) + # override the global config: users do that before using the integration + global_config['service_name'] = 'metrics' + # use the Pin via `get_from` + instance = self.Klass() + cfg = config.get_from(instance) + # it should have users updated value + assert cfg['service_name'] == 'metrics' + + def test_config_attr_and_key(self): + """ + This is a regression test for when mixing attr attribute and key + access we would set the value of the attribute but not the key + """ + integration_config = IntegrationConfig(config, 'test') + + # Our key and attribute do not exist + self.assertFalse(hasattr(integration_config, 'distributed_tracing')) + self.assertNotIn('distributed_tracing', integration_config) + + # Initially set and access + integration_config['distributed_tracing'] = True + self.assertTrue(integration_config['distributed_tracing']) + self.assertTrue(integration_config.get('distributed_tracing')) + self.assertTrue(integration_config.distributed_tracing) + + # Override by key and access + integration_config['distributed_tracing'] = False + self.assertFalse(integration_config['distributed_tracing']) + self.assertFalse(integration_config.get('distributed_tracing')) + self.assertFalse(integration_config.distributed_tracing) + + # Override by attr and access + integration_config.distributed_tracing = None + self.assertIsNone(integration_config['distributed_tracing']) + self.assertIsNone(integration_config.get('distributed_tracing')) + self.assertIsNone(integration_config.distributed_tracing) diff --git a/tests/test_integration.py b/tests/test_integration.py new file mode 100644 index 0000000000..8752413342 --- /dev/null +++ b/tests/test_integration.py @@ -0,0 +1,459 @@ +import os +import json +import logging +import mock +import ddtrace + +from unittest import TestCase, skip, skipUnless + +from ddtrace.api import API, Response +from ddtrace.ext import http +from ddtrace.filters import FilterRequestsOnUrl +from ddtrace.constants import FILTERS_KEY +from ddtrace.tracer import Tracer +from ddtrace.encoding import JSONEncoder, MsgpackEncoder, get_encoder +from ddtrace.compat import httplib, PYTHON_INTERPRETER, PYTHON_VERSION +from ddtrace.internal.runtime.container import CGroupInfo +from ddtrace.vendor import monotonic +from ddtrace.vendor import msgpack +from tests.test_tracer import get_dummy_tracer + + +class MockedLogHandler(logging.Handler): + """Record log messages to verify error logging logic""" + + def __init__(self, *args, **kwargs): + self.messages = {'debug': [], 'info': [], 'warning': [], 'error': [], 'critical': []} + super(MockedLogHandler, self).__init__(*args, **kwargs) + + def emit(self, record): + self.acquire() + try: + self.messages[record.levelname.lower()].append(record.getMessage()) + finally: + self.release() + + +class FlawedAPI(API): + """ + Deliberately report data with an incorrect method to trigger a 4xx response + """ + def _put(self, endpoint, data, count=0): + conn = httplib.HTTPConnection(self.hostname, self.port) + conn.request('HEAD', endpoint, data, self._headers) + return Response.from_http_response(conn.getresponse()) + + +@skipUnless( + os.environ.get('TEST_DATADOG_INTEGRATION', False), + 'You should have a running trace agent and set TEST_DATADOG_INTEGRATION=1 env variable' +) +class TestWorkers(TestCase): + """ + Ensures that a workers interacts correctly with the main thread. These are part + of integration tests so real calls are triggered. + """ + def _decode(self, payload): + """ + Helper function that decodes data based on the given Encoder. + """ + if isinstance(self.api._encoder, JSONEncoder): + return json.loads(payload) + elif isinstance(self.api._encoder, MsgpackEncoder): + return msgpack.unpackb(payload, encoding='utf-8') + + def setUp(self): + """ + Create a tracer with running workers, while spying the ``_put()`` method to + keep trace of triggered API calls. + """ + # create a new tracer + self.tracer = Tracer() + # spy the send() method + self.api = self.tracer.writer.api + self.api._put = mock.Mock(self.api._put, wraps=self.api._put) + + def tearDown(self): + """ + Stop running worker + """ + self._wait_thread_flush() + + def _wait_thread_flush(self): + """ + Helper that waits for the thread flush + """ + self.tracer.writer.stop() + self.tracer.writer.join(None) + + def _get_endpoint_payload(self, calls, endpoint): + """ + Helper to retrieve the endpoint call from a concurrent + trace or service call. + """ + for call, _ in calls: + if endpoint in call[0]: + return call[0], self._decode(call[1]) + + return None, None + + @skipUnless( + os.environ.get('TEST_DATADOG_INTEGRATION_UDS', False), + 'You should have a running trace agent on a socket and set TEST_DATADOG_INTEGRATION_UDS=1 env variable' + ) + def test_worker_single_trace_uds(self): + self.tracer.configure(uds_path='/tmp/ddagent/trace.sock') + # Write a first trace so we get a _worker + self.tracer.trace('client.testing').finish() + worker = self.tracer.writer + worker._log_error_status = mock.Mock( + worker._log_error_status, wraps=worker._log_error_status, + ) + self.tracer.trace('client.testing').finish() + + # one send is expected + self._wait_thread_flush() + # Check that no error was logged + assert worker._log_error_status.call_count == 0 + + def test_worker_single_trace_uds_wrong_socket_path(self): + self.tracer.configure(uds_path='/tmp/ddagent/nosockethere') + # Write a first trace so we get a _worker + self.tracer.trace('client.testing').finish() + worker = self.tracer.writer + worker._log_error_status = mock.Mock( + worker._log_error_status, wraps=worker._log_error_status, + ) + self.tracer.trace('client.testing').finish() + + # one send is expected + self._wait_thread_flush() + # Check that no error was logged + assert worker._log_error_status.call_count == 1 + + def test_worker_single_trace(self): + # create a trace block and send it using the transport system + tracer = self.tracer + tracer.trace('client.testing').finish() + + # one send is expected + self._wait_thread_flush() + assert self.api._put.call_count == 1 + # check and retrieve the right call + endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.4/traces') + assert endpoint == '/v0.4/traces' + assert len(payload) == 1 + assert len(payload[0]) == 1 + assert payload[0][0]['name'] == 'client.testing' + + # DEV: If we can make the writer flushing deterministic for the case of tests, then we can re-enable this + @skip('Writer flush intervals are impossible to time correctly to make this test not flaky') + def test_worker_multiple_traces(self): + # make a single send() if multiple traces are created before the flush interval + tracer = self.tracer + tracer.trace('client.testing').finish() + tracer.trace('client.testing').finish() + + # one send is expected + self._wait_thread_flush() + assert self.api._put.call_count == 1 + # check and retrieve the right call + endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.4/traces') + assert endpoint == '/v0.4/traces' + assert len(payload) == 2 + assert len(payload[0]) == 1 + assert len(payload[1]) == 1 + assert payload[0][0]['name'] == 'client.testing' + assert payload[1][0]['name'] == 'client.testing' + + def test_worker_single_trace_multiple_spans(self): + # make a single send() if a single trace with multiple spans is created before the flush + tracer = self.tracer + parent = tracer.trace('client.testing') + tracer.trace('client.testing').finish() + parent.finish() + + # one send is expected + self._wait_thread_flush() + assert self.api._put.call_count == 1 + # check and retrieve the right call + endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.4/traces') + assert endpoint == '/v0.4/traces' + assert len(payload) == 1 + assert len(payload[0]) == 2 + assert payload[0][0]['name'] == 'client.testing' + assert payload[0][1]['name'] == 'client.testing' + + def test_worker_http_error_logging(self): + # Tests the logging http error logic + tracer = self.tracer + self.tracer.writer.api = FlawedAPI(Tracer.DEFAULT_HOSTNAME, Tracer.DEFAULT_PORT) + tracer.trace('client.testing').finish() + + log = logging.getLogger('ddtrace.internal.writer') + log_handler = MockedLogHandler(level='DEBUG') + log.addHandler(log_handler) + + self._wait_thread_flush() + assert tracer.writer._last_error_ts < monotonic.monotonic() + + logged_errors = log_handler.messages['error'] + assert len(logged_errors) == 1 + assert 'Failed to send traces to Datadog Agent at http://localhost:8126: ' \ + 'HTTP error status 400, reason Bad Request, message Content-Type:' \ + in logged_errors[0] + + def test_worker_filter_request(self): + self.tracer.configure(settings={FILTERS_KEY: [FilterRequestsOnUrl(r'http://example\.com/health')]}) + # spy the send() method + self.api = self.tracer.writer.api + self.api._put = mock.Mock(self.api._put, wraps=self.api._put) + + span = self.tracer.trace('testing.filteredurl') + span.set_tag(http.URL, 'http://example.com/health') + span.finish() + span = self.tracer.trace('testing.nonfilteredurl') + span.set_tag(http.URL, 'http://example.com/api/resource') + span.finish() + self._wait_thread_flush() + + # Only the second trace should have been sent + assert self.api._put.call_count == 1 + # check and retrieve the right call + endpoint, payload = self._get_endpoint_payload(self.api._put.call_args_list, '/v0.4/traces') + assert endpoint == '/v0.4/traces' + assert len(payload) == 1 + assert payload[0][0]['name'] == 'testing.nonfilteredurl' + + +@skipUnless( + os.environ.get('TEST_DATADOG_INTEGRATION', False), + 'You should have a running trace agent and set TEST_DATADOG_INTEGRATION=1 env variable' +) +class TestAPITransport(TestCase): + """ + Ensures that traces are properly sent to a local agent. These are part + of integration tests so real calls are triggered and you have to execute + a real trace-agent to let them pass. + """ + @mock.patch('ddtrace.internal.runtime.container.get_container_info') + def setUp(self, get_container_info): + """ + Create a tracer without workers, while spying the ``send()`` method + """ + # Mock the container id we use for making requests + get_container_info.return_value = CGroupInfo(container_id='test-container-id') + + # create a new API object to test the transport using synchronous calls + self.tracer = get_dummy_tracer() + self.api_json = API('localhost', 8126, encoder=JSONEncoder()) + self.api_msgpack = API('localhost', 8126, encoder=MsgpackEncoder()) + + @mock.patch('ddtrace.api.httplib.HTTPConnection') + def test_send_presampler_headers(self, mocked_http): + # register a single trace with a span and send them to the trace agent + self.tracer.trace('client.testing').finish() + trace = self.tracer.writer.pop() + traces = [trace] + + # make a call and retrieve the `conn` Mock object + self.api_msgpack.send_traces(traces) + request_call = mocked_http.return_value.request + assert request_call.call_count == 1 + + # retrieve the headers from the mocked request call + expected_headers = { + 'Datadog-Container-Id': 'test-container-id', # mocked in setUp() + 'Datadog-Meta-Lang': 'python', + 'Datadog-Meta-Lang-Interpreter': PYTHON_INTERPRETER, + 'Datadog-Meta-Lang-Version': PYTHON_VERSION, + 'Datadog-Meta-Tracer-Version': ddtrace.__version__, + 'X-Datadog-Trace-Count': '1', + 'Content-Type': 'application/msgpack', + } + params, _ = request_call.call_args_list[0] + headers = params[3] + assert len(expected_headers) == len(headers) + for k, v in expected_headers.items(): + assert v == headers[k] + + def _send_traces_and_check(self, traces, nresponses=1): + # test JSON encoder + responses = self.api_json.send_traces(traces) + assert len(responses) == nresponses + for response in responses: + assert response.status == 200 + + # test Msgpack encoder + responses = self.api_msgpack.send_traces(traces) + assert len(responses) == nresponses + for response in responses: + assert response.status == 200 + + def test_send_single_trace(self): + # register a single trace with a span and send them to the trace agent + self.tracer.trace('client.testing').finish() + trace = self.tracer.writer.pop() + traces = [trace] + + self._send_traces_and_check(traces) + + def test_send_many_traces(self): + # register a single trace with a span and send them to the trace agent + self.tracer.trace('client.testing').finish() + trace = self.tracer.writer.pop() + # 30k is a right number to have both json and msgpack send 2 payload :) + traces = [trace] * 30000 + + self._send_traces_and_check(traces, 2) + + def test_send_single_with_wrong_errors(self): + # if the error field is set to True, it must be cast as int so + # that the agent decoder handles that properly without providing + # a decoding error + span = self.tracer.trace('client.testing') + span.error = True + span.finish() + trace = self.tracer.writer.pop() + traces = [trace] + + self._send_traces_and_check(traces) + + def test_send_multiple_traces(self): + # register some traces and send them to the trace agent + self.tracer.trace('client.testing').finish() + trace_1 = self.tracer.writer.pop() + self.tracer.trace('client.testing').finish() + trace_2 = self.tracer.writer.pop() + traces = [trace_1, trace_2] + + self._send_traces_and_check(traces) + + def test_send_single_trace_multiple_spans(self): + # register some traces and send them to the trace agent + with self.tracer.trace('client.testing'): + self.tracer.trace('client.testing').finish() + trace = self.tracer.writer.pop() + traces = [trace] + + self._send_traces_and_check(traces) + + def test_send_multiple_traces_multiple_spans(self): + # register some traces and send them to the trace agent + with self.tracer.trace('client.testing'): + self.tracer.trace('client.testing').finish() + trace_1 = self.tracer.writer.pop() + + with self.tracer.trace('client.testing'): + self.tracer.trace('client.testing').finish() + trace_2 = self.tracer.writer.pop() + + traces = [trace_1, trace_2] + + self._send_traces_and_check(traces) + + +@skipUnless( + os.environ.get('TEST_DATADOG_INTEGRATION', False), + 'You should have a running trace agent and set TEST_DATADOG_INTEGRATION=1 env variable' +) +class TestAPIDowngrade(TestCase): + """ + Ensures that if the tracing client found an earlier trace agent, + it will downgrade the current connection to a stable API version + """ + @skip('msgpack package split breaks this test; it works for newer version of msgpack') + def test_get_encoder_default(self): + # get_encoder should return MsgpackEncoder instance if + # msgpack and the CPP implementaiton are available + encoder = get_encoder() + assert isinstance(encoder, MsgpackEncoder) + + @mock.patch('ddtrace.encoding.MSGPACK_ENCODING', False) + def test_get_encoder_fallback(self): + # get_encoder should return JSONEncoder instance if + # msgpack or the CPP implementaiton, are not available + encoder = get_encoder() + assert isinstance(encoder, JSONEncoder) + + @skip('msgpack package split breaks this test; it works for newer version of msgpack') + def test_downgrade_api(self): + # make a call to a not existing endpoint, downgrades + # the current API to a stable one + tracer = get_dummy_tracer() + tracer.trace('client.testing').finish() + trace = tracer.writer.pop() + + # the encoder is right but we're targeting an API + # endpoint that is not available + api = API('localhost', 8126) + api._traces = '/v0.0/traces' + assert isinstance(api._encoder, MsgpackEncoder) + + # after the call, we downgrade to a working endpoint + response = api.send_traces([trace]) + assert response + assert response.status == 200 + assert isinstance(api._encoder, JSONEncoder) + + +@skipUnless( + os.environ.get('TEST_DATADOG_INTEGRATION', False), + 'You should have a running trace agent and set TEST_DATADOG_INTEGRATION=1 env variable' +) +class TestRateByService(TestCase): + """ + Check we get feedback from the agent and we're able to process it. + """ + def setUp(self): + """ + Create a tracer without workers, while spying the ``send()`` method + """ + # create a new API object to test the transport using synchronous calls + self.tracer = get_dummy_tracer() + self.api_json = API('localhost', 8126, encoder=JSONEncoder(), priority_sampling=True) + self.api_msgpack = API('localhost', 8126, encoder=MsgpackEncoder(), priority_sampling=True) + + def test_send_single_trace(self): + # register a single trace with a span and send them to the trace agent + self.tracer.trace('client.testing').finish() + trace = self.tracer.writer.pop() + traces = [trace] + + # [TODO:christian] when CI has an agent that is able to process the v0.4 + # endpoint, add a check to: + # - make sure the output is a valid JSON + # - make sure the priority sampler (if enabled) is updated + + # test JSON encoder + responses = self.api_json.send_traces(traces) + assert len(responses) == 1 + assert responses[0].status == 200 + assert responses[0].get_json() == dict(rate_by_service={'service:,env:': 1}) + + # test Msgpack encoder + responses = self.api_msgpack.send_traces(traces) + assert len(responses) == 1 + assert responses[0].status == 200 + assert responses[0].get_json() == dict(rate_by_service={'service:,env:': 1}) + + +@skipUnless( + os.environ.get('TEST_DATADOG_INTEGRATION', False), + 'You should have a running trace agent and set TEST_DATADOG_INTEGRATION=1 env variable' +) +class TestConfigure(TestCase): + """ + Ensures that when calling configure without specifying hostname and port, + previous overrides have been kept. + """ + def test_configure_keeps_api_hostname_and_port(self): + tracer = Tracer() # use real tracer with real api + assert 'localhost' == tracer.writer.api.hostname + assert 8126 == tracer.writer.api.port + tracer.configure(hostname='127.0.0.1', port=8127) + assert '127.0.0.1' == tracer.writer.api.hostname + assert 8127 == tracer.writer.api.port + tracer.configure(priority_sampling=True) + assert '127.0.0.1' == tracer.writer.api.hostname + assert 8127 == tracer.writer.api.port diff --git a/tests/test_payload.py b/tests/test_payload.py new file mode 100644 index 0000000000..fc2cd25e6b --- /dev/null +++ b/tests/test_payload.py @@ -0,0 +1,111 @@ +import math + +from ddtrace.encoding import get_encoder, JSONEncoder +from ddtrace.payload import Payload, PayloadFull +from ddtrace.span import Span + +from .base import BaseTracerTestCase + +import pytest + + +class PayloadTestCase(BaseTracerTestCase): + def test_init(self): + """ + When calling `Payload.init` + With an encoder + We use that encoder + With no encoder + We use the default encoder + """ + default_encoder_type = type(get_encoder()) + + payload = Payload() + self.assertIsInstance(payload.encoder, default_encoder_type) + + json_encoder = JSONEncoder() + payload = Payload(encoder=json_encoder) + self.assertEqual(payload.encoder, json_encoder) + + def test_add_trace(self): + """ + When calling `Payload.add_trace` + With a falsey value + Nothing is added to the payload + With a trace + We encode and add the trace to the payload + We increment the payload size by the expected amount + """ + payload = Payload() + + # Add falsey traces + for val in (False, None, 0, '', [], dict()): + payload.add_trace(val) + self.assertEqual(payload.length, 0) + self.assertTrue(payload.empty) + + # Add a single trace to the payload + trace = [Span(self.tracer, name='root.span'), Span(self.tracer, name='child.span')] + payload.add_trace(trace) + + self.assertEqual(payload.length, 1) + self.assertFalse(payload.empty) + + def test_get_payload(self): + """ + When calling `Payload.get_payload` + With no traces + We return the appropriate data + With traces + We return the appropriate data + """ + payload = Payload() + + # No traces + self.assertTrue(payload.empty) + encoded_data = payload.get_payload() + decoded_data = payload.encoder.decode(encoded_data) + self.assertEqual(decoded_data, []) + + # Add traces to the payload + for _ in range(5): + trace = [Span(self.tracer, name='root.span'), Span(self.tracer, name='child.span')] + payload.add_trace(trace) + + self.assertEqual(payload.length, 5) + self.assertFalse(payload.empty) + + # Assert the payload generated from Payload + encoded_data = payload.get_payload() + decoded_data = payload.encoder.decode(encoded_data) + self.assertEqual(len(decoded_data), 5) + for trace in decoded_data: + self.assertEqual(len(trace), 2) + self.assertEqual(trace[0][b'name'], b'root.span') + self.assertEqual(trace[1][b'name'], b'child.span') + + def test_full(self): + payload = Payload() + + # Empty + self.assertTrue(payload.empty) + + # Trace and it's size in bytes + trace = [Span(self.tracer, 'root.span'), Span(self.tracer, 'child.span')] + trace_size = len(payload.encoder.encode_trace(trace)) + + # Number of traces before we hit the max size limit and are considered full + num_traces = int(math.floor(payload.max_payload_size / trace_size)) + + # Add the traces + for _ in range(num_traces): + payload.add_trace(trace) + + # Just confirm + self.assertEqual(payload.length, num_traces) + + with pytest.raises(PayloadFull): + payload.add_trace(trace) + + # Just confirm again + self.assertEqual(payload.length, num_traces) diff --git a/tests/test_pin.py b/tests/test_pin.py new file mode 100644 index 0000000000..b8ccdc0eb7 --- /dev/null +++ b/tests/test_pin.py @@ -0,0 +1,192 @@ +from unittest import TestCase + +import pytest + +from ddtrace import Pin + + +class PinTestCase(TestCase): + """TestCase for the `Pin` object that is used when an object is wrapped + with our tracing functionalities. + """ + def setUp(self): + # define a simple class object + class Obj(object): + pass + + self.Obj = Obj + + def test_pin(self): + # ensure a Pin can be attached to an instance + obj = self.Obj() + pin = Pin(service='metrics') + pin.onto(obj) + + got = Pin.get_from(obj) + assert got.service == pin.service + assert got is pin + + def test_pin_find(self): + # ensure Pin will find the first available pin + + # Override service + obj_a = self.Obj() + pin = Pin(service='service-a') + pin.onto(obj_a) + + # Override service + obj_b = self.Obj() + pin = Pin(service='service-b') + pin.onto(obj_b) + + # No Pin set + obj_c = self.Obj() + + # We find the first pin (obj_b) + pin = Pin._find(obj_c, obj_b, obj_a) + assert pin is not None + assert pin.service == 'service-b' + + # We find the first pin (obj_a) + pin = Pin._find(obj_a, obj_b, obj_c) + assert pin is not None + assert pin.service == 'service-a' + + # We don't find a pin if none is there + pin = Pin._find(obj_c, obj_c, obj_c) + assert pin is None + + def test_cant_pin_with_slots(self): + # ensure a Pin can't be attached if the __slots__ is defined + class Obj(object): + __slots__ = ['value'] + + obj = Obj() + obj.value = 1 + + Pin(service='metrics').onto(obj) + got = Pin.get_from(obj) + assert got is None + + def test_cant_modify(self): + # ensure a Pin is immutable once initialized + pin = Pin(service='metrics') + with pytest.raises(AttributeError): + pin.service = 'intake' + + def test_copy(self): + # ensure a Pin is copied when using the clone methods + p1 = Pin(service='metrics', app='flask', tags={'key': 'value'}) + p2 = p1.clone(service='intake') + # values are the same + assert p1.service == 'metrics' + assert p2.service == 'intake' + assert p1.app == 'flask' + assert p2.app == 'flask' + # but it's a copy + assert p1.tags is not p2.tags + assert p1._config is not p2._config + # of almost everything + assert p1.tracer is p2.tracer + + def test_none(self): + # ensure get_from returns None if a Pin is not available + assert Pin.get_from(None) is None + + def test_repr(self): + # ensure the service name is in the string representation of the Pin + pin = Pin(service='metrics') + assert 'metrics' in str(pin) + + def test_override(self): + # ensure Override works for an instance object + class A(object): + pass + + Pin(service='metrics', app='flask').onto(A) + a = A() + Pin.override(a, app='django') + assert Pin.get_from(a).app == 'django' + assert Pin.get_from(a).service == 'metrics' + + b = A() + assert Pin.get_from(b).app == 'flask' + assert Pin.get_from(b).service == 'metrics' + + def test_override_missing(self): + # ensure overriding an instance doesn't override the Class + class A(object): + pass + + a = A() + assert Pin.get_from(a) is None + Pin.override(a, service='metrics') + assert Pin.get_from(a).service == 'metrics' + + b = A() + assert Pin.get_from(b) is None + + def test_pin_config(self): + # ensure `Pin` has a configuration object that can be modified + obj = self.Obj() + Pin.override(obj, service='metrics') + pin = Pin.get_from(obj) + assert pin._config is not None + pin._config['distributed_tracing'] = True + assert pin._config['distributed_tracing'] is True + + def test_pin_config_is_a_copy(self): + # ensure that when a `Pin` is cloned, the config is a copy + obj = self.Obj() + Pin.override(obj, service='metrics') + p1 = Pin.get_from(obj) + assert p1._config is not None + p1._config['distributed_tracing'] = True + + Pin.override(obj, service='intake') + p2 = Pin.get_from(obj) + assert p2._config is not None + p2._config['distributed_tracing'] = False + + assert p1._config['distributed_tracing'] is True + assert p2._config['distributed_tracing'] is False + + def test_pin_does_not_override_global(self): + # ensure that when a `Pin` is created from a class, the specific + # instance doesn't override the global one + class A(object): + pass + + Pin.override(A, service='metrics') + global_pin = Pin.get_from(A) + global_pin._config['distributed_tracing'] = True + + a = A() + pin = Pin.get_from(a) + assert pin is not None + assert pin._config['distributed_tracing'] is True + pin._config['distributed_tracing'] = False + + assert global_pin._config['distributed_tracing'] is True + assert pin._config['distributed_tracing'] is False + + def test_pin_does_not_override_global_with_new_instance(self): + # ensure that when a `Pin` is created from a class, the specific + # instance doesn't override the global one, even if only the + # `onto()` API has been used + class A(object): + pass + + pin = Pin(service='metrics') + pin.onto(A) + global_pin = Pin.get_from(A) + global_pin._config['distributed_tracing'] = True + + a = A() + pin = Pin.get_from(a) + assert pin is not None + assert pin._config['distributed_tracing'] is True + pin._config['distributed_tracing'] = False + + assert global_pin._config['distributed_tracing'] is True + assert pin._config['distributed_tracing'] is False diff --git a/tests/test_sampler.py b/tests/test_sampler.py new file mode 100644 index 0000000000..ab8e42629a --- /dev/null +++ b/tests/test_sampler.py @@ -0,0 +1,879 @@ +from __future__ import division +import contextlib +import mock +import re +import unittest + +import pytest + +from ddtrace.compat import iteritems +from ddtrace.constants import SAMPLING_PRIORITY_KEY, SAMPLE_RATE_METRIC_KEY +from ddtrace.constants import SAMPLING_AGENT_DECISION, SAMPLING_RULE_DECISION, SAMPLING_LIMIT_DECISION +from ddtrace.ext.priority import AUTO_KEEP, AUTO_REJECT +from ddtrace.internal.rate_limiter import RateLimiter +from ddtrace.sampler import DatadogSampler, SamplingRule +from ddtrace.sampler import RateSampler, AllSampler, RateByServiceSampler +from ddtrace.span import Span + +from .utils import override_env +from .test_tracer import get_dummy_tracer + + +@pytest.fixture +def dummy_tracer(): + return get_dummy_tracer() + + +def assert_sampling_decision_tags(span, agent=None, limit=None, rule=None): + assert span.get_metric(SAMPLING_AGENT_DECISION) == agent + assert span.get_metric(SAMPLING_LIMIT_DECISION) == limit + assert span.get_metric(SAMPLING_RULE_DECISION) == rule + + +def create_span(tracer=None, name='test.span', meta=None, *args, **kwargs): + tracer = tracer or get_dummy_tracer() + if 'context' not in kwargs: + kwargs['context'] = tracer.get_call_context() + span = Span(tracer=tracer, name=name, *args, **kwargs) + if meta: + span.set_tags(meta) + return span + + +class RateSamplerTest(unittest.TestCase): + + def test_set_sample_rate(self): + sampler = RateSampler() + assert sampler.sample_rate == 1.0 + + for rate in [0.001, 0.01, 0.1, 0.25, 0.5, 0.75, 0.99999999, 1.0, 1]: + sampler.set_sample_rate(rate) + assert sampler.sample_rate == float(rate) + + sampler.set_sample_rate(str(rate)) + assert sampler.sample_rate == float(rate) + + def test_set_sample_rate_str(self): + sampler = RateSampler() + sampler.set_sample_rate('0.5') + assert sampler.sample_rate == 0.5 + + def test_sample_rate_deviation(self): + for sample_rate in [0.1, 0.25, 0.5, 1]: + tracer = get_dummy_tracer() + writer = tracer.writer + + tracer.sampler = RateSampler(sample_rate) + + iterations = int(1e4 / sample_rate) + + for i in range(iterations): + span = tracer.trace(i) + span.finish() + + samples = writer.pop() + + # We must have at least 1 sample, check that it has its sample rate properly assigned + assert samples[0].get_metric(SAMPLE_RATE_METRIC_KEY) == sample_rate + + # Less than 5% deviation when 'enough' iterations (arbitrary, just check if it converges) + deviation = abs(len(samples) - (iterations * sample_rate)) / (iterations * sample_rate) + assert deviation < 0.05, 'Deviation too high %f with sample_rate %f' % (deviation, sample_rate) + + def test_deterministic_behavior(self): + """ Test that for a given trace ID, the result is always the same """ + tracer = get_dummy_tracer() + writer = tracer.writer + + tracer.sampler = RateSampler(0.5) + + for i in range(10): + span = tracer.trace(i) + span.finish() + + samples = writer.pop() + assert len(samples) <= 1, 'there should be 0 or 1 spans' + sampled = (1 == len(samples)) + for j in range(10): + other_span = Span(tracer, i, trace_id=span.trace_id) + assert ( + sampled == tracer.sampler.sample(other_span) + ), 'sampling should give the same result for a given trace_id' + + +class RateByServiceSamplerTest(unittest.TestCase): + def test_default_key(self): + assert ( + 'service:,env:' == RateByServiceSampler._default_key + ), 'default key should correspond to no service and no env' + + def test_key(self): + assert RateByServiceSampler._default_key == RateByServiceSampler._key() + assert 'service:mcnulty,env:' == RateByServiceSampler._key(service='mcnulty') + assert 'service:,env:test' == RateByServiceSampler._key(env='test') + assert 'service:mcnulty,env:test' == RateByServiceSampler._key(service='mcnulty', env='test') + assert 'service:mcnulty,env:test' == RateByServiceSampler._key('mcnulty', 'test') + + def test_sample_rate_deviation(self): + for sample_rate in [0.1, 0.25, 0.5, 1]: + tracer = get_dummy_tracer() + writer = tracer.writer + tracer.configure(sampler=AllSampler()) + # We need to set the writer because tracer.configure overrides it, + # indeed, as we enable priority sampling, we must ensure the writer + # is priority sampling aware and pass it a reference on the + # priority sampler to send the feedback it gets from the agent + assert writer != tracer.writer, 'writer should have been updated by configure' + tracer.writer = writer + tracer.priority_sampler.set_sample_rate(sample_rate) + + iterations = int(1e4 / sample_rate) + + for i in range(iterations): + span = tracer.trace(i) + span.finish() + + samples = writer.pop() + samples_with_high_priority = 0 + for sample in samples: + if sample.get_metric(SAMPLING_PRIORITY_KEY) is not None: + if sample.get_metric(SAMPLING_PRIORITY_KEY) > 0: + samples_with_high_priority += 1 + else: + assert ( + 0 == sample.get_metric(SAMPLING_PRIORITY_KEY) + ), 'when priority sampling is on, priority should be 0 when trace is to be dropped' + assert_sampling_decision_tags(sample, agent=sample_rate) + # We must have at least 1 sample, check that it has its sample rate properly assigned + assert samples[0].get_metric(SAMPLE_RATE_METRIC_KEY) is None + + # Less than 5% deviation when 'enough' iterations (arbitrary, just check if it converges) + deviation = abs(samples_with_high_priority - (iterations * sample_rate)) / (iterations * sample_rate) + assert deviation < 0.05, 'Deviation too high %f with sample_rate %f' % (deviation, sample_rate) + + def test_update_rate_by_service_sample_rates(self): + cases = [ + { + 'service:,env:': 1, + }, + { + 'service:,env:': 1, + 'service:mcnulty,env:dev': 0.33, + 'service:postgres,env:dev': 0.7, + }, + { + 'service:,env:': 1, + 'service:mcnulty,env:dev': 0.25, + 'service:postgres,env:dev': 0.5, + 'service:redis,env:prod': 0.75, + }, + ] + + tracer = get_dummy_tracer() + tracer.configure(sampler=AllSampler()) + priority_sampler = tracer.priority_sampler + for case in cases: + priority_sampler.update_rate_by_service_sample_rates(case) + rates = {} + for k, v in iteritems(priority_sampler._by_service_samplers): + rates[k] = v.sample_rate + assert case == rates, '%s != %s' % (case, rates) + # It's important to also test in reverse mode for we want to make sure key deletion + # works as well as key insertion (and doing this both ways ensures we trigger both cases) + cases.reverse() + for case in cases: + priority_sampler.update_rate_by_service_sample_rates(case) + rates = {} + for k, v in iteritems(priority_sampler._by_service_samplers): + rates[k] = v.sample_rate + assert case == rates, '%s != %s' % (case, rates) + + +@pytest.mark.parametrize( + 'sample_rate,allowed', + [ + # Min/max allowed values + (0.0, True), + (1.0, True), + + # Accepted boundaries + (0.000001, True), + (0.999999, True), + + # Outside the bounds + (-0.000000001, False), + (1.0000000001, False), + ] + [ + # Try a bunch of decimal values between 0 and 1 + (1 / i, True) for i in range(1, 50) + ] + [ + # Try a bunch of decimal values less than 0 + (-(1 / i), False) for i in range(1, 50) + ] + [ + # Try a bunch of decimal values greater than 1 + (1 + (1 / i), False) for i in range(1, 50) + ] +) +def test_sampling_rule_init_sample_rate(sample_rate, allowed): + if allowed: + rule = SamplingRule(sample_rate=sample_rate) + assert rule.sample_rate == sample_rate + else: + with pytest.raises(ValueError): + SamplingRule(sample_rate=sample_rate) + + +def test_sampling_rule_init_defaults(): + rule = SamplingRule(sample_rate=1.0) + assert rule.sample_rate == 1.0 + assert rule.service == SamplingRule.NO_RULE + assert rule.name == SamplingRule.NO_RULE + + +def test_sampling_rule_init(): + name_regex = re.compile(r'\.request$') + + def resource_check(resource): + return 'healthcheck' in resource + + rule = SamplingRule( + sample_rate=0.0, + # Value + service='my-service', + # Regex + name=name_regex, + ) + + assert rule.sample_rate == 0.0 + assert rule.service == 'my-service' + assert rule.name == name_regex + + +@pytest.mark.parametrize( + 'span,rule,expected', + [ + # DEV: Use sample_rate=1 to ensure SamplingRule._sample always returns True + (create_span(name=name), SamplingRule( + sample_rate=1, name=pattern), expected) + for name, pattern, expected in [ + ('test.span', SamplingRule.NO_RULE, True), + # DEV: `span.name` cannot be `None` + ('test.span', None, False), + ('test.span', 'test.span', True), + ('test.span', 'test_span', False), + ('test.span', re.compile(r'^test\.span$'), True), + ('test_span', re.compile(r'^test.span$'), True), + ('test.span', re.compile(r'^test_span$'), False), + ('test.span', re.compile(r'test'), True), + ('test.span', re.compile(r'test\.span|another\.span'), True), + ('another.span', re.compile(r'test\.span|another\.span'), True), + ('test.span', lambda name: 'span' in name, True), + ('test.span', lambda name: 'span' not in name, False), + ('test.span', lambda name: 1 / 0, False), + ] + ] +) +def test_sampling_rule_matches_name(span, rule, expected): + assert rule.matches(span) is expected, '{} -> {} -> {}'.format(rule, span, expected) + + +@pytest.mark.parametrize( + 'span,rule,expected', + [ + # DEV: Use sample_rate=1 to ensure SamplingRule._sample always returns True + (create_span(service=service), SamplingRule(sample_rate=1, service=pattern), expected) + for service, pattern, expected in [ + ('my-service', SamplingRule.NO_RULE, True), + ('my-service', None, False), + (None, None, True), + (None, 'my-service', False), + (None, re.compile(r'my-service'), False), + (None, lambda service: 'service' in service, False), + ('my-service', 'my-service', True), + ('my-service', 'my_service', False), + ('my-service', re.compile(r'^my-'), True), + ('my_service', re.compile(r'^my[_-]'), True), + ('my-service', re.compile(r'^my_'), False), + ('my-service', re.compile(r'my-service'), True), + ('my-service', re.compile(r'my'), True), + ('my-service', re.compile(r'my-service|another-service'), True), + ('another-service', re.compile(r'my-service|another-service'), True), + ('my-service', lambda service: 'service' in service, True), + ('my-service', lambda service: 'service' not in service, False), + ('my-service', lambda service: 1 / 0, False), + ] + ] +) +def test_sampling_rule_matches_service(span, rule, expected): + assert rule.matches(span) is expected, '{} -> {} -> {}'.format(rule, span, expected) + + +@pytest.mark.parametrize( + 'span,rule,expected', + [ + # All match + ( + create_span( + name='test.span', + service='my-service', + ), + SamplingRule( + sample_rate=1, + name='test.span', + service=re.compile(r'^my-'), + ), + True, + ), + + # All match, but sample rate of 0% + # DEV: We are checking if it is a match, not computing sampling rate, sample_rate=0 is not considered + ( + create_span( + name='test.span', + service='my-service', + ), + SamplingRule( + sample_rate=0, + name='test.span', + service=re.compile(r'^my-'), + ), + True, + ), + + # Name doesn't match + ( + create_span( + name='test.span', + service='my-service', + ), + SamplingRule( + sample_rate=1, + name='test_span', + service=re.compile(r'^my-'), + ), + False, + ), + + # Service doesn't match + ( + create_span( + name='test.span', + service='my-service', + ), + SamplingRule( + sample_rate=1, + name='test.span', + service=re.compile(r'^service-'), + ), + False, + ), + ], +) +def test_sampling_rule_matches(span, rule, expected): + assert rule.matches(span) is expected, '{} -> {} -> {}'.format(rule, span, expected) + + +def test_sampling_rule_matches_exception(): + e = Exception('an error occurred') + + def pattern(prop): + raise e + + rule = SamplingRule(sample_rate=1.0, name=pattern) + span = create_span(name='test.span') + + with mock.patch('ddtrace.sampler.log') as mock_log: + assert rule.matches(span) is False + mock_log.warning.assert_called_once_with( + '%r pattern %r failed with %r', + rule, + pattern, + 'test.span', + exc_info=True, + ) + + +@pytest.mark.parametrize('sample_rate', [0.01, 0.1, 0.15, 0.25, 0.5, 0.75, 0.85, 0.9, 0.95, 0.991]) +def test_sampling_rule_sample(sample_rate): + tracer = get_dummy_tracer() + rule = SamplingRule(sample_rate=sample_rate) + + iterations = int(1e4 / sample_rate) + sampled = sum( + rule.sample(Span(tracer=tracer, name=i)) + for i in range(iterations) + ) + + # Less than 5% deviation when 'enough' iterations (arbitrary, just check if it converges) + deviation = abs(sampled - (iterations * sample_rate)) / (iterations * sample_rate) + assert deviation < 0.05, ( + 'Deviation {!r} too high with sample_rate {!r} for {} sampled'.format(deviation, sample_rate, sampled) + ) + + +def test_sampling_rule_sample_rate_1(): + tracer = get_dummy_tracer() + rule = SamplingRule(sample_rate=1) + + iterations = int(1e4) + assert all( + rule.sample(Span(tracer=tracer, name=i)) + for i in range(iterations) + ) + + +def test_sampling_rule_sample_rate_0(): + tracer = get_dummy_tracer() + rule = SamplingRule(sample_rate=0) + + iterations = int(1e4) + assert sum( + rule.sample(Span(tracer=tracer, name=i)) + for i in range(iterations) + ) == 0 + + +def test_datadog_sampler_init(): + # No args + sampler = DatadogSampler() + assert sampler.rules == [] + assert isinstance(sampler.limiter, RateLimiter) + assert sampler.limiter.rate_limit == DatadogSampler.DEFAULT_RATE_LIMIT + assert isinstance(sampler.default_sampler, RateByServiceSampler) + + # With rules + rule = SamplingRule(sample_rate=1) + sampler = DatadogSampler(rules=[rule]) + assert sampler.rules == [rule] + assert sampler.limiter.rate_limit == DatadogSampler.DEFAULT_RATE_LIMIT + assert isinstance(sampler.default_sampler, RateByServiceSampler) + + # With rate limit + sampler = DatadogSampler(rate_limit=10) + assert sampler.limiter.rate_limit == 10 + assert isinstance(sampler.default_sampler, RateByServiceSampler) + + # With default_sample_rate + sampler = DatadogSampler(default_sample_rate=0.5) + assert sampler.limiter.rate_limit == DatadogSampler.DEFAULT_RATE_LIMIT + assert isinstance(sampler.default_sampler, SamplingRule) + assert sampler.default_sampler.sample_rate == 0.5 + + # From env variables + with override_env(dict(DD_TRACE_SAMPLE_RATE='0.5', DD_TRACE_RATE_LIMIT='10')): + sampler = DatadogSampler() + assert sampler.limiter.rate_limit == 10 + assert isinstance(sampler.default_sampler, SamplingRule) + assert sampler.default_sampler.sample_rate == 0.5 + + # Invalid rules + for val in (None, True, False, object(), 1, Exception()): + with pytest.raises(TypeError): + DatadogSampler(rules=[val]) + + # Ensure rule order + rule_1 = SamplingRule(sample_rate=1) + rule_2 = SamplingRule(sample_rate=0.5, service='test') + rule_3 = SamplingRule(sample_rate=0.25, name='flask.request') + sampler = DatadogSampler(rules=[rule_1, rule_2, rule_3]) + assert sampler.rules == [rule_1, rule_2, rule_3] + + +@mock.patch('ddtrace.sampler.RateByServiceSampler.sample') +def test_datadog_sampler_sample_no_rules(mock_sample, dummy_tracer): + sampler = DatadogSampler() + span = create_span(tracer=dummy_tracer) + + # Default RateByServiceSampler() is applied + # No rules configured + # No global rate limit + # No rate limit configured + # RateByServiceSampler.sample(span) returns True + mock_sample.return_value = True + assert sampler.sample(span) is True + assert span._context.sampling_priority is AUTO_KEEP + assert span.sampled is True + + span = create_span(tracer=dummy_tracer) + + # Default RateByServiceSampler() is applied + # No rules configured + # No global rate limit + # No rate limit configured + # RateByServiceSampler.sample(span) returns False + mock_sample.return_value = False + assert sampler.sample(span) is False + assert span._context.sampling_priority is AUTO_REJECT + assert span.sampled is False + + +@mock.patch('ddtrace.internal.rate_limiter.RateLimiter.is_allowed') +def test_datadog_sampler_sample_rules(mock_is_allowed, dummy_tracer): + # Do not let the limiter get in the way of our test + mock_is_allowed.return_value = True + + rules = [ + mock.Mock(spec=SamplingRule), + mock.Mock(spec=SamplingRule), + mock.Mock(spec=SamplingRule), + ] + sampler = DatadogSampler(rules=rules) + + # Reset all of our mocks + @contextlib.contextmanager + def reset_mocks(): + def reset(): + mock_is_allowed.reset_mock() + for rule in rules: + rule.reset_mock() + rule.sample_rate = 0.5 + + default_rule = SamplingRule(sample_rate=1.0) + sampler.default_sampler = mock.Mock(spec=SamplingRule, wraps=default_rule) + # Mock has lots of problems with mocking/wrapping over class properties + sampler.default_sampler.sample_rate = default_rule.sample_rate + + reset() # Reset before, just in case + try: + yield + finally: + reset() # Must reset after + + # No rules want to sample + # It is allowed because of default rate sampler + # All rules SamplingRule.matches are called + # No calls to SamplingRule.sample happen + with reset_mocks(): + span = create_span(tracer=dummy_tracer) + for rule in rules: + rule.matches.return_value = False + + assert sampler.sample(span) is True + assert span._context.sampling_priority is AUTO_KEEP + assert span.sampled is True + mock_is_allowed.assert_called_once_with() + for rule in rules: + rule.matches.assert_called_once_with(span) + rule.sample.assert_not_called() + sampler.default_sampler.matches.assert_not_called() + sampler.default_sampler.sample.assert_called_once_with(span) + assert_sampling_decision_tags(span, rule=1.0, limit=1.0) + + # One rule thinks it should be sampled + # All following rule's SamplingRule.matches are not called + # It goes through limiter + # It is allowed + with reset_mocks(): + span = create_span(tracer=dummy_tracer) + + rules[1].matches.return_value = True + rules[1].sample.return_value = True + + assert sampler.sample(span) is True + assert span._context.sampling_priority is AUTO_KEEP + assert span.sampled is True + mock_is_allowed.assert_called_once_with() + sampler.default_sampler.sample.assert_not_called() + assert_sampling_decision_tags(span, rule=0.5, limit=1.0) + + rules[0].matches.assert_called_once_with(span) + rules[0].sample.assert_not_called() + + rules[1].matches.assert_called_once_with(span) + rules[1].sample.assert_called_once_with(span) + + rules[2].matches.assert_not_called() + rules[2].sample.assert_not_called() + + # All rules think it should be sampled + # The first rule's SamplingRule.matches is called + # It goes through limiter + # It is allowed + with reset_mocks(): + span = create_span(tracer=dummy_tracer) + + for rule in rules: + rule.matches.return_value = True + rules[0].sample.return_value = True + + assert sampler.sample(span) is True + assert span._context.sampling_priority is AUTO_KEEP + assert span.sampled is True + mock_is_allowed.assert_called_once_with() + sampler.default_sampler.sample.assert_not_called() + assert_sampling_decision_tags(span, rule=0.5, limit=1.0) + + rules[0].matches.assert_called_once_with(span) + rules[0].sample.assert_called_once_with(span) + for rule in rules[1:]: + rule.matches.assert_not_called() + rule.sample.assert_not_called() + + # Rule matches but does not think it should be sampled + # The rule's SamplingRule.matches is called + # The rule's SamplingRule.sample is called + # Rate limiter is not called + # The span is rejected + with reset_mocks(): + span = create_span(tracer=dummy_tracer) + + rules[0].matches.return_value = False + rules[2].matches.return_value = False + + rules[1].matches.return_value = True + rules[1].sample.return_value = False + + assert sampler.sample(span) is False + assert span._context.sampling_priority is AUTO_REJECT + assert span.sampled is False + mock_is_allowed.assert_not_called() + sampler.default_sampler.sample.assert_not_called() + assert_sampling_decision_tags(span, rule=0.5) + + rules[0].matches.assert_called_once_with(span) + rules[0].sample.assert_not_called() + + rules[1].matches.assert_called_once_with(span) + rules[1].sample.assert_called_once_with(span) + + rules[2].matches.assert_not_called() + rules[2].sample.assert_not_called() + + # No rules match and RateByServiceSampler is used + # All rules SamplingRule.matches are called + # Priority sampler's `sample` method is called + # Result of priority sampler is returned + # Rate limiter is not called + with reset_mocks(): + span = create_span(tracer=dummy_tracer) + + # Configure mock priority sampler + priority_sampler = RateByServiceSampler() + sampler.default_sampler = mock.Mock(spec=RateByServiceSampler, wraps=priority_sampler) + + for rule in rules: + rule.matches.return_value = False + rule.sample.return_value = False + + assert sampler.sample(span) is True + assert span._context.sampling_priority is AUTO_KEEP + assert span.sampled is True + mock_is_allowed.assert_not_called() + sampler.default_sampler.sample.assert_called_once_with(span) + assert_sampling_decision_tags(span, agent=1) + + [r.matches.assert_called_once_with(span) for r in rules] + [r.sample.assert_not_called() for r in rules] + + # No rules match and priority sampler is defined + # All rules SamplingRule.matches are called + # Priority sampler's `sample` method is called + # Result of priority sampler is returned + # Rate limiter is not called + with reset_mocks(): + span = create_span(tracer=dummy_tracer) + + # Configure mock priority sampler + priority_sampler = RateByServiceSampler() + for rate_sampler in priority_sampler._by_service_samplers.values(): + rate_sampler.set_sample_rate(0) + + sampler.default_sampler = mock.Mock(spec=RateByServiceSampler, wraps=priority_sampler) + + for rule in rules: + rule.matches.return_value = False + rule.sample.return_value = False + + assert sampler.sample(span) is False + assert span._context.sampling_priority is AUTO_REJECT + assert span.sampled is False + mock_is_allowed.assert_not_called() + sampler.default_sampler.sample.assert_called_once_with(span) + assert_sampling_decision_tags(span, agent=0) + + [r.matches.assert_called_once_with(span) for r in rules] + [r.sample.assert_not_called() for r in rules] + + +def test_datadog_sampler_tracer(dummy_tracer): + rule = SamplingRule(sample_rate=1.0, name='test.span') + rule_spy = mock.Mock(spec=rule, wraps=rule) + rule_spy.sample_rate = rule.sample_rate + + sampler = DatadogSampler(rules=[rule_spy]) + limiter_spy = mock.Mock(spec=sampler.limiter, wraps=sampler.limiter) + sampler.limiter = limiter_spy + sampler_spy = mock.Mock(spec=sampler, wraps=sampler) + + dummy_tracer.configure(sampler=sampler_spy) + + assert dummy_tracer.sampler is sampler_spy + + with dummy_tracer.trace('test.span') as span: + # Assert all of our expected functions were called + sampler_spy.sample.assert_called_once_with(span) + rule_spy.matches.assert_called_once_with(span) + rule_spy.sample.assert_called_once_with(span) + limiter_spy.is_allowed.assert_called_once_with() + + # It must always mark it as sampled + assert span.sampled is True + # We know it was sampled because we have a sample rate of 1.0 + assert span._context.sampling_priority is AUTO_KEEP + assert_sampling_decision_tags(span, rule=1.0) + + +def test_datadog_sampler_tracer_rate_limited(dummy_tracer): + rule = SamplingRule(sample_rate=1.0, name='test.span') + rule_spy = mock.Mock(spec=rule, wraps=rule) + rule_spy.sample_rate = rule.sample_rate + + sampler = DatadogSampler(rules=[rule_spy]) + limiter_spy = mock.Mock(spec=sampler.limiter, wraps=sampler.limiter) + limiter_spy.is_allowed.return_value = False # Have the limiter deny the span + sampler.limiter = limiter_spy + sampler_spy = mock.Mock(spec=sampler, wraps=sampler) + + dummy_tracer.configure(sampler=sampler_spy) + + assert dummy_tracer.sampler is sampler_spy + + with dummy_tracer.trace('test.span') as span: + # Assert all of our expected functions were called + sampler_spy.sample.assert_called_once_with(span) + rule_spy.matches.assert_called_once_with(span) + rule_spy.sample.assert_called_once_with(span) + limiter_spy.is_allowed.assert_called_once_with() + + # We must always mark the span as sampled + assert span.sampled is True + assert span._context.sampling_priority is AUTO_REJECT + assert_sampling_decision_tags(span, rule=1.0, limit=None) + + +def test_datadog_sampler_tracer_rate_0(dummy_tracer): + rule = SamplingRule(sample_rate=0, name='test.span') # Sample rate of 0 means never sample + rule_spy = mock.Mock(spec=rule, wraps=rule) + rule_spy.sample_rate = rule.sample_rate + + sampler = DatadogSampler(rules=[rule_spy]) + limiter_spy = mock.Mock(spec=sampler.limiter, wraps=sampler.limiter) + sampler.limiter = limiter_spy + sampler_spy = mock.Mock(spec=sampler, wraps=sampler) + + dummy_tracer.configure(sampler=sampler_spy) + + assert dummy_tracer.sampler is sampler_spy + + with dummy_tracer.trace('test.span') as span: + # Assert all of our expected functions were called + sampler_spy.sample.assert_called_once_with(span) + rule_spy.matches.assert_called_once_with(span) + rule_spy.sample.assert_called_once_with(span) + limiter_spy.is_allowed.assert_not_called() + + # It must always mark it as sampled + assert span.sampled is True + # We know it was not sampled because we have a sample rate of 0.0 + assert span._context.sampling_priority is AUTO_REJECT + assert_sampling_decision_tags(span, rule=0) + + +def test_datadog_sampler_tracer_child(dummy_tracer): + rule = SamplingRule(sample_rate=1.0) # No rules means it gets applied to every span + rule_spy = mock.Mock(spec=rule, wraps=rule) + rule_spy.sample_rate = rule.sample_rate + + sampler = DatadogSampler(rules=[rule_spy]) + limiter_spy = mock.Mock(spec=sampler.limiter, wraps=sampler.limiter) + sampler.limiter = limiter_spy + sampler_spy = mock.Mock(spec=sampler, wraps=sampler) + + dummy_tracer.configure(sampler=sampler_spy) + + assert dummy_tracer.sampler is sampler_spy + + with dummy_tracer.trace('parent.span') as parent: + with dummy_tracer.trace('child.span') as child: + # Assert all of our expected functions were called + # DEV: `assert_called_once_with` ensures we didn't also call with the child span + sampler_spy.sample.assert_called_once_with(parent) + rule_spy.matches.assert_called_once_with(parent) + rule_spy.sample.assert_called_once_with(parent) + limiter_spy.is_allowed.assert_called_once_with() + + # We know it was sampled because we have a sample rate of 1.0 + assert parent.sampled is True + assert parent._context.sampling_priority is AUTO_KEEP + assert_sampling_decision_tags(parent, rule=1.0) + + assert child.sampled is True + assert child._parent is parent + assert child._context.sampling_priority is AUTO_KEEP + + +def test_datadog_sampler_tracer_start_span(dummy_tracer): + rule = SamplingRule(sample_rate=1.0) # No rules means it gets applied to every span + rule_spy = mock.Mock(spec=rule, wraps=rule) + rule_spy.sample_rate = rule.sample_rate + + sampler = DatadogSampler(rules=[rule_spy]) + limiter_spy = mock.Mock(spec=sampler.limiter, wraps=sampler.limiter) + sampler.limiter = limiter_spy + sampler_spy = mock.Mock(spec=sampler, wraps=sampler) + + dummy_tracer.configure(sampler=sampler_spy) + + assert dummy_tracer.sampler is sampler_spy + + span = dummy_tracer.start_span('test.span') + + # Assert all of our expected functions were called + sampler_spy.sample.assert_called_once_with(span) + rule_spy.matches.assert_called_once_with(span) + rule_spy.sample.assert_called_once_with(span) + limiter_spy.is_allowed.assert_called_once_with() + + # It must always mark it as sampled + assert span.sampled is True + # We know it was sampled because we have a sample rate of 1.0 + assert span._context.sampling_priority is AUTO_KEEP + assert_sampling_decision_tags(span, rule=1.0) + + +def test_datadog_sampler_update_rate_by_service_sample_rates(dummy_tracer): + cases = [ + { + 'service:,env:': 1, + }, + { + 'service:,env:': 1, + 'service:mcnulty,env:dev': 0.33, + 'service:postgres,env:dev': 0.7, + }, + { + 'service:,env:': 1, + 'service:mcnulty,env:dev': 0.25, + 'service:postgres,env:dev': 0.5, + 'service:redis,env:prod': 0.75, + }, + ] + + # By default sampler sets it's default sampler to RateByServiceSampler + sampler = DatadogSampler() + for case in cases: + sampler.update_rate_by_service_sample_rates(case) + rates = {} + for k, v in iteritems(sampler.default_sampler._by_service_samplers): + rates[k] = v.sample_rate + assert case == rates, '%s != %s' % (case, rates) + + # It's important to also test in reverse mode for we want to make sure key deletion + # works as well as key insertion (and doing this both ways ensures we trigger both cases) + cases.reverse() + for case in cases: + sampler.update_rate_by_service_sample_rates(case) + rates = {} + for k, v in iteritems(sampler.default_sampler._by_service_samplers): + rates[k] = v.sample_rate + assert case == rates, '%s != %s' % (case, rates) diff --git a/tests/test_span.py b/tests/test_span.py new file mode 100644 index 0000000000..426ab8341f --- /dev/null +++ b/tests/test_span.py @@ -0,0 +1,421 @@ +import mock +import time + +from unittest.case import SkipTest + +from ddtrace.context import Context +from ddtrace.constants import ANALYTICS_SAMPLE_RATE_KEY +from ddtrace.span import Span +from ddtrace.ext import SpanTypes, errors, priority +from .base import BaseTracerTestCase + + +class SpanTestCase(BaseTracerTestCase): + def test_ids(self): + s = Span(tracer=None, name='span.test') + assert s.trace_id + assert s.span_id + assert not s.parent_id + + s2 = Span(tracer=None, name='t', trace_id=1, span_id=2, parent_id=1) + assert s2.trace_id == 1 + assert s2.span_id == 2 + assert s2.parent_id == 1 + + def test_tags(self): + s = Span(tracer=None, name='test.span') + s.set_tag('a', 'a') + s.set_tag('b', 1) + s.set_tag('c', '1') + d = s.to_dict() + assert d['meta'] == dict(a='a', c='1') + assert d['metrics'] == dict(b=1) + + def test_numeric_tags(self): + s = Span(tracer=None, name='test.span') + s.set_tag('negative', -1) + s.set_tag('zero', 0) + s.set_tag('positive', 1) + s.set_tag('large_int', 2**53) + s.set_tag('really_large_int', (2**53) + 1) + s.set_tag('large_negative_int', -(2**53)) + s.set_tag('really_large_negative_int', -((2**53) + 1)) + s.set_tag('float', 12.3456789) + s.set_tag('negative_float', -12.3456789) + s.set_tag('large_float', 2.0**53) + s.set_tag('really_large_float', (2.0**53) + 1) + + d = s.to_dict() + assert d['meta'] == dict( + really_large_int=str(((2**53) + 1)), + really_large_negative_int=str(-((2**53) + 1)), + ) + assert d['metrics'] == { + 'negative': -1, + 'zero': 0, + 'positive': 1, + 'large_int': 2**53, + 'large_negative_int': -(2**53), + 'float': 12.3456789, + 'negative_float': -12.3456789, + 'large_float': 2.0**53, + 'really_large_float': (2.0**53) + 1, + } + + def test_set_tag_bool(self): + s = Span(tracer=None, name='test.span') + s.set_tag('true', True) + s.set_tag('false', False) + + d = s.to_dict() + assert d['meta'] == dict(true='True', false='False') + assert 'metrics' not in d + + def test_set_tag_metric(self): + s = Span(tracer=None, name='test.span') + + s.set_tag('test', 'value') + assert s.meta == dict(test='value') + assert s.metrics == dict() + + s.set_tag('test', 1) + assert s.meta == dict() + assert s.metrics == dict(test=1) + + def test_set_valid_metrics(self): + s = Span(tracer=None, name='test.span') + s.set_metric('a', 0) + s.set_metric('b', -12) + s.set_metric('c', 12.134) + s.set_metric('d', 1231543543265475686787869123) + s.set_metric('e', '12.34') + d = s.to_dict() + expected = { + 'a': 0, + 'b': -12, + 'c': 12.134, + 'd': 1231543543265475686787869123, + 'e': 12.34, + } + assert d['metrics'] == expected + + def test_set_invalid_metric(self): + s = Span(tracer=None, name='test.span') + + invalid_metrics = [ + None, + {}, + [], + s, + 'quarante-douze', + float('nan'), + float('inf'), + 1j + ] + + for i, m in enumerate(invalid_metrics): + k = str(i) + s.set_metric(k, m) + assert s.get_metric(k) is None + + def test_set_numpy_metric(self): + try: + import numpy as np + except ImportError: + raise SkipTest('numpy not installed') + s = Span(tracer=None, name='test.span') + s.set_metric('a', np.int64(1)) + assert s.get_metric('a') == 1 + assert type(s.get_metric('a')) == float + + def test_tags_not_string(self): + # ensure we can cast as strings + class Foo(object): + def __repr__(self): + 1 / 0 + + s = Span(tracer=None, name='test.span') + s.set_tag('a', Foo()) + + def test_finish(self): + # ensure finish will record a span + ctx = Context() + s = Span(self.tracer, 'test.span', context=ctx) + ctx.add_span(s) + assert s.duration is None + + sleep = 0.05 + with s as s1: + assert s is s1 + time.sleep(sleep) + assert s.duration >= sleep, '%s < %s' % (s.duration, sleep) + self.assert_span_count(1) + + def test_finish_no_tracer(self): + # ensure finish works with no tracer without raising exceptions + s = Span(tracer=None, name='test.span') + s.finish() + + def test_finish_called_multiple_times(self): + # we should only record a span the first time finish is called on it + ctx = Context() + s = Span(self.tracer, 'bar', context=ctx) + ctx.add_span(s) + s.finish() + s.finish() + self.assert_span_count(1) + + def test_finish_set_span_duration(self): + # If set the duration on a span, the span should be recorded with this + # duration + s = Span(tracer=None, name='test.span') + s.duration = 1337.0 + s.finish() + assert s.duration == 1337.0 + + def test_traceback_with_error(self): + s = Span(None, 'test.span') + try: + 1 / 0 + except ZeroDivisionError: + s.set_traceback() + else: + assert 0, 'should have failed' + + assert s.error + assert 'by zero' in s.get_tag(errors.ERROR_MSG) + assert 'ZeroDivisionError' in s.get_tag(errors.ERROR_TYPE) + + def test_traceback_without_error(self): + s = Span(None, 'test.span') + s.set_traceback() + assert not s.error + assert not s.get_tag(errors.ERROR_MSG) + assert not s.get_tag(errors.ERROR_TYPE) + assert 'in test_traceback_without_error' in s.get_tag(errors.ERROR_STACK) + + def test_ctx_mgr(self): + s = Span(self.tracer, 'bar') + assert not s.duration + assert not s.error + + e = Exception('boo') + try: + with s: + time.sleep(0.01) + raise e + except Exception as out: + assert out == e + assert s.duration > 0, s.duration + assert s.error + assert s.get_tag(errors.ERROR_MSG) == 'boo' + assert 'Exception' in s.get_tag(errors.ERROR_TYPE) + assert s.get_tag(errors.ERROR_STACK) + + else: + assert 0, 'should have failed' + + def test_span_type(self): + s = Span(tracer=None, name='test.span', service='s', resource='r', span_type=SpanTypes.WEB) + s.set_tag('a', '1') + s.set_meta('b', '2') + s.finish() + + d = s.to_dict() + assert d + assert d['span_id'] == s.span_id + assert d['trace_id'] == s.trace_id + assert d['parent_id'] == s.parent_id + assert d['meta'] == {'a': '1', 'b': '2'} + assert d['type'] == 'web' + assert d['error'] == 0 + assert type(d['error']) == int + + def test_span_to_dict(self): + s = Span(tracer=None, name='test.span', service='s', resource='r') + s.span_type = 'foo' + s.set_tag('a', '1') + s.set_meta('b', '2') + s.finish() + + d = s.to_dict() + assert d + assert d['span_id'] == s.span_id + assert d['trace_id'] == s.trace_id + assert d['parent_id'] == s.parent_id + assert d['meta'] == {'a': '1', 'b': '2'} + assert d['type'] == 'foo' + assert d['error'] == 0 + assert type(d['error']) == int + + def test_span_to_dict_sub(self): + parent = Span(tracer=None, name='test.span', service='s', resource='r') + s = Span(tracer=None, name='test.span', service='s', resource='r') + s._parent = parent + s.span_type = 'foo' + s.set_tag('a', '1') + s.set_meta('b', '2') + s.finish() + + d = s.to_dict() + assert d + assert d['span_id'] == s.span_id + assert d['trace_id'] == s.trace_id + assert d['parent_id'] == s.parent_id + assert d['meta'] == {'a': '1', 'b': '2'} + assert d['type'] == 'foo' + assert d['error'] == 0 + assert type(d['error']) == int + + def test_span_boolean_err(self): + s = Span(tracer=None, name='foo.bar', service='s', resource='r') + s.error = True + s.finish() + + d = s.to_dict() + assert d + assert d['error'] == 1 + assert type(d['error']) == int + + @mock.patch('ddtrace.span.log') + def test_numeric_tags_none(self, span_log): + s = Span(tracer=None, name='test.span') + s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, None) + d = s.to_dict() + assert d + assert 'metrics' not in d + + # Ensure we log a debug message + span_log.debug.assert_called_once_with( + 'ignoring not number metric %s:%s', + ANALYTICS_SAMPLE_RATE_KEY, + None, + ) + + def test_numeric_tags_true(self): + s = Span(tracer=None, name='test.span') + s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, True) + d = s.to_dict() + assert d + expected = { + ANALYTICS_SAMPLE_RATE_KEY: 1.0 + } + assert d['metrics'] == expected + + def test_numeric_tags_value(self): + s = Span(tracer=None, name='test.span') + s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 0.5) + d = s.to_dict() + assert d + expected = { + ANALYTICS_SAMPLE_RATE_KEY: 0.5 + } + assert d['metrics'] == expected + + def test_numeric_tags_bad_value(self): + s = Span(tracer=None, name='test.span') + s.set_tag(ANALYTICS_SAMPLE_RATE_KEY, 'Hello') + d = s.to_dict() + assert d + assert 'metrics' not in d + + def test_set_tag_manual_keep(self): + ctx = Context() + s = Span(tracer=None, name='root.span', service='s', resource='r', context=ctx) + + assert s.context == ctx + assert ctx.sampling_priority != priority.USER_KEEP + assert s.context.sampling_priority != priority.USER_KEEP + assert s.meta == dict() + + s.set_tag('manual.keep') + assert ctx.sampling_priority == priority.USER_KEEP + assert s.context.sampling_priority == priority.USER_KEEP + assert s.meta == dict() + + ctx.sampling_priority = priority.AUTO_REJECT + assert ctx.sampling_priority == priority.AUTO_REJECT + assert s.context.sampling_priority == priority.AUTO_REJECT + assert s.meta == dict() + + s.set_tag('manual.keep') + assert ctx.sampling_priority == priority.USER_KEEP + assert s.context.sampling_priority == priority.USER_KEEP + assert s.meta == dict() + + def test_set_tag_manual_drop(self): + ctx = Context() + s = Span(tracer=None, name='root.span', service='s', resource='r', context=ctx) + + assert s.context == ctx + assert ctx.sampling_priority != priority.USER_REJECT + assert s.context.sampling_priority != priority.USER_REJECT + assert s.meta == dict() + + s.set_tag('manual.drop') + assert ctx.sampling_priority == priority.USER_REJECT + assert s.context.sampling_priority == priority.USER_REJECT + assert s.meta == dict() + + ctx.sampling_priority = priority.AUTO_REJECT + assert ctx.sampling_priority == priority.AUTO_REJECT + assert s.context.sampling_priority == priority.AUTO_REJECT + assert s.meta == dict() + + s.set_tag('manual.drop') + assert ctx.sampling_priority == priority.USER_REJECT + assert s.context.sampling_priority == priority.USER_REJECT + assert s.meta == dict() + + def test_set_tag_none(self): + s = Span(tracer=None, name='root.span', service='s', resource='r') + assert s.meta == dict() + + s.set_tag('custom.key', '100') + + assert s.meta == {'custom.key': '100'} + + s.set_tag('custom.key', None) + + assert s.meta == {'custom.key': 'None'} + + def test_duration_zero(self): + s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123) + s.finish(finish_time=123) + assert s.duration_ns == 0 + assert s.duration == 0 + + def test_start_int(self): + s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123) + assert s.start == 123 + assert s.start_ns == 123000000000 + + s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123.123) + assert s.start == 123.123 + assert s.start_ns == 123123000000 + + s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123.123) + s.start = 234567890.0 + assert s.start == 234567890 + assert s.start_ns == 234567890000000000 + + def test_duration_int(self): + s = Span(tracer=None, name='foo.bar', service='s', resource='r') + s.finish() + assert isinstance(s.duration_ns, int) + assert isinstance(s.duration, float) + + s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123) + s.finish(finish_time=123.2) + assert s.duration_ns == 200000000 + assert s.duration == 0.2 + + s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=123.1) + s.finish(finish_time=123.2) + assert s.duration_ns == 100000000 + assert s.duration == 0.1 + + s = Span(tracer=None, name='foo.bar', service='s', resource='r', start=122) + s.finish(finish_time=123) + assert s.duration_ns == 1000000000 + assert s.duration == 1 diff --git a/tests/test_tracer.py b/tests/test_tracer.py new file mode 100644 index 0000000000..2ce840a210 --- /dev/null +++ b/tests/test_tracer.py @@ -0,0 +1,667 @@ +""" +tests for Tracer and utilities. +""" +import contextlib +import multiprocessing +from os import getpid +import sys +import warnings + +from unittest.case import SkipTest + +import mock +import pytest + +import ddtrace +from ddtrace.ext import system +from ddtrace.context import Context + +from .base import BaseTracerTestCase +from .util import override_global_tracer +from .utils.tracer import DummyTracer +from .utils.tracer import DummyWriter # noqa + + +def get_dummy_tracer(): + return DummyTracer() + + +class TracerTestCase(BaseTracerTestCase): + def test_tracer_vars(self): + span = self.trace('a', service='s', resource='r', span_type='t') + span.assert_matches(name='a', service='s', resource='r', span_type='t') + # DEV: Finish to ensure we don't leak `service` between spans + span.finish() + + span = self.trace('a') + span.assert_matches(name='a', service=None, resource='a', span_type=None) + + def test_tracer(self): + def _mix(): + with self.trace('cake.mix'): + pass + + def _bake(): + with self.trace('cake.bake'): + pass + + def _make_cake(): + with self.trace('cake.make') as span: + span.service = 'baker' + span.resource = 'cake' + _mix() + _bake() + + # let's run it and make sure all is well. + self.assert_has_no_spans() + _make_cake() + + # Capture root's trace id to assert later + root_trace_id = self.get_root_span().trace_id + + # Assert structure of this trace + self.assert_structure( + # Root span with 2 children + dict(name='cake.make', resource='cake', service='baker', parent_id=None), + ( + # Span with no children + dict(name='cake.mix', resource='cake.mix', service='baker'), + # Span with no children + dict(name='cake.bake', resource='cake.bake', service='baker'), + ), + ) + + # do it again and make sure it has new trace ids + self.reset() + _make_cake() + self.assert_span_count(3) + for s in self.spans: + assert s.trace_id != root_trace_id + + def test_tracer_wrap(self): + @self.tracer.wrap('decorated_function', service='s', resource='r', span_type='t') + def f(tag_name, tag_value): + # make sure we can still set tags + span = self.tracer.current_span() + span.set_tag(tag_name, tag_value) + + f('a', 'b') + + self.assert_span_count(1) + span = self.get_root_span() + span.assert_matches( + name='decorated_function', service='s', resource='r', span_type='t', meta=dict(a='b'), + ) + + def test_tracer_pid(self): + with self.trace('root') as root_span: + with self.trace('child') as child_span: + pass + + # Root span should contain the pid of the current process + root_span.assert_metrics({system.PID: getpid()}, exact=False) + + # Child span should not contain a pid tag + child_span.assert_metrics(dict(), exact=True) + + def test_tracer_wrap_default_name(self): + @self.tracer.wrap() + def f(): + pass + + f() + + self.assert_structure(dict(name='tests.test_tracer.f')) + + def test_tracer_wrap_exception(self): + @self.tracer.wrap() + def f(): + raise Exception('bim') + + with self.assertRaises(Exception) as ex: + f() + + self.assert_structure( + dict( + name='tests.test_tracer.f', + error=1, + meta={ + 'error.msg': ex.message, + 'error.type': ex.__class__.__name__, + }, + ), + ) + + def test_tracer_wrap_multiple_calls(self): + @self.tracer.wrap() + def f(): + pass + + f() + f() + + self.assert_span_count(2) + assert self.spans[0].span_id != self.spans[1].span_id + + def test_tracer_wrap_span_nesting_current_root_span(self): + @self.tracer.wrap('inner') + def inner(): + root_span = self.tracer.current_root_span() + self.assertEqual(root_span.name, 'outer') + + @self.tracer.wrap('outer') + def outer(): + root_span = self.tracer.current_root_span() + self.assertEqual(root_span.name, 'outer') + + with self.trace('mid'): + root_span = self.tracer.current_root_span() + self.assertEqual(root_span.name, 'outer') + + inner() + + outer() + + def test_tracer_wrap_span_nesting(self): + @self.tracer.wrap('inner') + def inner(): + pass + + @self.tracer.wrap('outer') + def outer(): + with self.trace('mid'): + inner() + + outer() + + self.assert_span_count(3) + self.assert_structure( + dict(name='outer'), + ( + ( + dict(name='mid'), + ( + dict(name='inner'), + ) + ), + ), + ) + + def test_tracer_wrap_class(self): + class Foo(object): + + @staticmethod + @self.tracer.wrap() + def s(): + return 1 + + @classmethod + @self.tracer.wrap() + def c(cls): + return 2 + + @self.tracer.wrap() + def i(cls): + return 3 + + f = Foo() + self.assertEqual(f.s(), 1) + self.assertEqual(f.c(), 2) + self.assertEqual(f.i(), 3) + + self.assert_span_count(3) + self.spans[0].assert_matches(name='tests.test_tracer.s') + self.spans[1].assert_matches(name='tests.test_tracer.c') + self.spans[2].assert_matches(name='tests.test_tracer.i') + + def test_tracer_wrap_factory(self): + def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None): + with tracer.trace('wrap.overwrite') as span: + span.set_tag('args', args) + span.set_tag('kwargs', kwargs) + return fn(*args, **kwargs) + + @self.tracer.wrap() + def wrapped_function(param, kw_param=None): + self.assertEqual(42, param) + self.assertEqual(42, kw_param) + + # set the custom wrap factory after the wrapper has been called + self.tracer.configure(wrap_executor=wrap_executor) + + # call the function expecting that the custom tracing wrapper is used + wrapped_function(42, kw_param=42) + + self.assert_span_count(1) + self.spans[0].assert_matches( + name='wrap.overwrite', + meta=dict(args='(42,)', kwargs='{\'kw_param\': 42}'), + ) + + def test_tracer_wrap_factory_nested(self): + def wrap_executor(tracer, fn, args, kwargs, span_name=None, service=None, resource=None, span_type=None): + with tracer.trace('wrap.overwrite') as span: + span.set_tag('args', args) + span.set_tag('kwargs', kwargs) + return fn(*args, **kwargs) + + @self.tracer.wrap() + def wrapped_function(param, kw_param=None): + self.assertEqual(42, param) + self.assertEqual(42, kw_param) + + # set the custom wrap factory after the wrapper has been called + self.tracer.configure(wrap_executor=wrap_executor) + + # call the function expecting that the custom tracing wrapper is used + with self.trace('wrap.parent', service='webserver'): + wrapped_function(42, kw_param=42) + + self.assert_structure( + dict(name='wrap.parent', service='webserver'), + ( + dict( + name='wrap.overwrite', + service='webserver', + meta=dict(args='(42,)', kwargs='{\'kw_param\': 42}') + ), + ), + ) + + def test_tracer_disabled(self): + self.tracer.enabled = True + with self.trace('foo') as s: + s.set_tag('a', 'b') + + self.assert_has_spans() + self.reset() + + self.tracer.enabled = False + with self.trace('foo') as s: + s.set_tag('a', 'b') + self.assert_has_no_spans() + + def test_unserializable_span_with_finish(self): + try: + import numpy as np + except ImportError: + raise SkipTest('numpy not installed') + + # a weird case where manually calling finish with an unserializable + # span was causing an loop of serialization. + with self.trace('parent') as span: + span.metrics['as'] = np.int64(1) # circumvent the data checks + span.finish() + + def test_tracer_disabled_mem_leak(self): + # ensure that if the tracer is disabled, we still remove things from the + # span buffer upon finishing. + self.tracer.enabled = False + s1 = self.trace('foo') + s1.finish() + + p1 = self.tracer.current_span() + s2 = self.trace('bar') + + self.assertIsNone(s2._parent) + s2.finish() + self.assertIsNone(p1) + + def test_tracer_global_tags(self): + s1 = self.trace('brie') + s1.finish() + self.assertIsNone(s1.get_tag('env')) + self.assertIsNone(s1.get_tag('other')) + + self.tracer.set_tags({'env': 'prod'}) + s2 = self.trace('camembert') + s2.finish() + self.assertEqual(s2.get_tag('env'), 'prod') + self.assertIsNone(s2.get_tag('other')) + + self.tracer.set_tags({'env': 'staging', 'other': 'tag'}) + s3 = self.trace('gruyere') + s3.finish() + self.assertEqual(s3.get_tag('env'), 'staging') + self.assertEqual(s3.get_tag('other'), 'tag') + + def test_global_context(self): + # the tracer uses a global thread-local Context + span = self.trace('fake_span') + ctx = self.tracer.get_call_context() + self.assertEqual(len(ctx._trace), 1) + self.assertEqual(ctx._trace[0], span) + + def test_tracer_current_span(self): + # the current span is in the local Context() + span = self.trace('fake_span') + self.assertEqual(self.tracer.current_span(), span) + + def test_tracer_current_span_missing_context(self): + self.assertIsNone(self.tracer.current_span()) + + def test_tracer_current_root_span_missing_context(self): + self.assertIsNone(self.tracer.current_root_span()) + + def test_default_provider_get(self): + # Tracer Context Provider must return a Context object + # even if empty + ctx = self.tracer.context_provider.active() + self.assertTrue(isinstance(ctx, Context)) + self.assertEqual(len(ctx._trace), 0) + + def test_default_provider_set(self): + # The Context Provider can set the current active Context; + # this could happen in distributed tracing + ctx = Context(trace_id=42, span_id=100) + self.tracer.context_provider.activate(ctx) + span = self.trace('web.request') + span.assert_matches(name='web.request', trace_id=42, parent_id=100) + + def test_default_provider_trace(self): + # Context handled by a default provider must be used + # when creating a trace + span = self.trace('web.request') + ctx = self.tracer.context_provider.active() + self.assertEqual(len(ctx._trace), 1) + self.assertEqual(span._context, ctx) + + def test_start_span(self): + # it should create a root Span + span = self.start_span('web.request') + span.assert_matches( + name='web.request', + tracer=self.tracer, + _parent=None, + parent_id=None, + ) + self.assertIsNotNone(span._context) + self.assertEqual(span._context._current_span, span) + + def test_start_span_optional(self): + # it should create a root Span with arguments + span = self.start_span('web.request', service='web', resource='/', span_type='http') + span.assert_matches( + name='web.request', + service='web', + resource='/', + span_type='http', + ) + + def test_start_child_span(self): + # it should create a child Span for the given parent + parent = self.start_span('web.request') + child = self.start_span('web.worker', child_of=parent) + + parent.assert_matches( + name='web.request', + parent_id=None, + _context=child._context, + _parent=None, + tracer=self.tracer, + ) + child.assert_matches( + name='web.worker', + parent_id=parent.span_id, + _context=parent._context, + _parent=parent, + tracer=self.tracer, + ) + + self.assertEqual(child._context._current_span, child) + + def test_start_child_span_attributes(self): + # it should create a child Span with parent's attributes + parent = self.start_span('web.request', service='web', resource='/', span_type='http') + child = self.start_span('web.worker', child_of=parent) + child.assert_matches(name='web.worker', service='web') + + def test_start_child_from_context(self): + # it should create a child span with a populated Context + root = self.start_span('web.request') + context = root.context + child = self.start_span('web.worker', child_of=context) + + child.assert_matches( + name='web.worker', + parent_id=root.span_id, + trace_id=root.trace_id, + _context=root._context, + _parent=root, + tracer=self.tracer, + ) + self.assertEqual(child._context._current_span, child) + + def test_adding_services(self): + self.assertEqual(self.tracer._services, set()) + root = self.start_span('root', service='one') + context = root.context + self.assertSetEqual(self.tracer._services, set(['one'])) + self.start_span('child', service='two', child_of=context) + self.assertSetEqual(self.tracer._services, set(['one', 'two'])) + + def test_configure_runtime_worker(self): + # by default runtime worker not started though runtime id is set + self.assertIsNone(self.tracer._runtime_worker) + + # configure tracer with runtime metrics collection + self.tracer.configure(collect_metrics=True) + self.assertIsNotNone(self.tracer._runtime_worker) + + def test_configure_dogstatsd_host(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + self.tracer.configure(dogstatsd_host='foo') + assert self.tracer._dogstatsd_client.host == 'foo' + assert self.tracer._dogstatsd_client.port == 8125 + # verify warnings triggered + assert len(w) == 1 + assert issubclass(w[-1].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning) + assert 'Use `dogstatsd_url`' in str(w[-1].message) + + def test_configure_dogstatsd_host_port(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + self.tracer.configure(dogstatsd_host='foo', dogstatsd_port='1234') + assert self.tracer._dogstatsd_client.host == 'foo' + assert self.tracer._dogstatsd_client.port == 1234 + # verify warnings triggered + assert len(w) == 2 + assert issubclass(w[0].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning) + assert 'Use `dogstatsd_url`' in str(w[0].message) + assert issubclass(w[1].category, ddtrace.utils.deprecation.RemovedInDDTrace10Warning) + assert 'Use `dogstatsd_url`' in str(w[1].message) + + def test_configure_dogstatsd_url_host_port(self): + self.tracer.configure(dogstatsd_url='foo:1234') + assert self.tracer._dogstatsd_client.host == 'foo' + assert self.tracer._dogstatsd_client.port == 1234 + + def test_configure_dogstatsd_url_socket(self): + self.tracer.configure(dogstatsd_url='unix:///foo.sock') + assert self.tracer._dogstatsd_client.host is None + assert self.tracer._dogstatsd_client.port is None + assert self.tracer._dogstatsd_client.socket_path == '/foo.sock' + + def test_span_no_runtime_tags(self): + self.tracer.configure(collect_metrics=False) + + root = self.start_span('root') + context = root.context + child = self.start_span('child', child_of=context) + + self.assertIsNone(root.get_tag('language')) + + self.assertIsNone(child.get_tag('language')) + + def test_only_root_span_runtime_internal_span_types(self): + self.tracer.configure(collect_metrics=True) + + for span_type in ("custom", "template", "web", "worker"): + root = self.start_span('root', span_type=span_type) + context = root.context + child = self.start_span('child', child_of=context) + + self.assertEqual(root.get_tag('language'), 'python') + + self.assertIsNone(child.get_tag('language')) + + def test_only_root_span_runtime_external_span_types(self): + self.tracer.configure(collect_metrics=True) + + for span_type in ("algoliasearch.search", "boto", "cache", "cassandra", "elasticsearch", + "grpc", "kombu", "http", "memcached", "redis", "sql", "vertica"): + root = self.start_span('root', span_type=span_type) + context = root.context + child = self.start_span('child', child_of=context) + + self.assertIsNone(root.get_tag('language')) + + self.assertIsNone(child.get_tag('language')) + + +def test_installed_excepthook(): + ddtrace.install_excepthook() + assert sys.excepthook is ddtrace._excepthook + ddtrace.uninstall_excepthook() + assert sys.excepthook is not ddtrace._excepthook + ddtrace.install_excepthook() + assert sys.excepthook is ddtrace._excepthook + + +def test_excepthook(): + ddtrace.install_excepthook() + + class Foobar(Exception): + pass + + called = {} + + def original(tp, value, traceback): + called['yes'] = True + + sys.excepthook = original + ddtrace.install_excepthook() + + e = Foobar() + + tracer = ddtrace.Tracer() + tracer._dogstatsd_client = mock.Mock() + with override_global_tracer(tracer): + sys.excepthook(e.__class__, e, None) + + tracer._dogstatsd_client.increment.assert_has_calls(( + mock.call('datadog.tracer.uncaught_exceptions', 1, tags=['class:Foobar']), + )) + assert called + + +def test_tracer_url(): + t = ddtrace.Tracer() + assert t.writer.api.hostname == 'localhost' + assert t.writer.api.port == 8126 + + t = ddtrace.Tracer(url='http://foobar:12') + assert t.writer.api.hostname == 'foobar' + assert t.writer.api.port == 12 + + t = ddtrace.Tracer(url='unix:///foobar') + assert t.writer.api.uds_path == '/foobar' + + t = ddtrace.Tracer(url='http://localhost') + assert t.writer.api.hostname == 'localhost' + assert t.writer.api.port == 80 + assert not t.writer.api.https + + t = ddtrace.Tracer(url='https://localhost') + assert t.writer.api.hostname == 'localhost' + assert t.writer.api.port == 443 + assert t.writer.api.https + + with pytest.raises(ValueError) as e: + t = ddtrace.Tracer(url='foo://foobar:12') + assert str(e) == 'Unknown scheme `https` for agent URL' + + +def test_tracer_dogstatsd_url(): + t = ddtrace.Tracer() + assert t._dogstatsd_client.host == 'localhost' + assert t._dogstatsd_client.port == 8125 + + t = ddtrace.Tracer(dogstatsd_url='foobar:12') + assert t._dogstatsd_client.host == 'foobar' + assert t._dogstatsd_client.port == 12 + + t = ddtrace.Tracer(dogstatsd_url='udp://foobar:12') + assert t._dogstatsd_client.host == 'foobar' + assert t._dogstatsd_client.port == 12 + + t = ddtrace.Tracer(dogstatsd_url='/var/run/statsd.sock') + assert t._dogstatsd_client.socket_path == '/var/run/statsd.sock' + + t = ddtrace.Tracer(dogstatsd_url='unix:///var/run/statsd.sock') + assert t._dogstatsd_client.socket_path == '/var/run/statsd.sock' + + with pytest.raises(ValueError) as e: + t = ddtrace.Tracer(dogstatsd_url='foo://foobar:12') + assert str(e) == 'Unknown url format for `foo://foobar:12`' + + +def test_tracer_fork(): + t = ddtrace.Tracer() + original_pid = t._pid + original_writer = t.writer + + @contextlib.contextmanager + def capture_failures(errors): + try: + yield + except AssertionError as e: + errors.put(e) + + def task(t, errors): + # Start a new span to trigger process checking + with t.trace('test', service='test') as span: + + # Assert we recreated the writer and have a new queue + with capture_failures(errors): + assert t._pid != original_pid + assert t.writer != original_writer + assert t.writer._trace_queue != original_writer._trace_queue + + # Stop the background worker so we don't accidetnally flush the + # queue before we can assert on it + t.writer.stop() + t.writer.join() + + # Assert the trace got written into the correct queue + assert original_writer._trace_queue.qsize() == 0 + assert t.writer._trace_queue.qsize() == 1 + assert [[span]] == list(t.writer._trace_queue.get()) + + # Assert tracer in a new process correctly recreates the writer + errors = multiprocessing.Queue() + p = multiprocessing.Process(target=task, args=(t, errors)) + try: + p.start() + finally: + p.join(timeout=2) + + while errors.qsize() > 0: + raise errors.get() + + # Ensure writing into the tracer in this process still works as expected + with t.trace('test', service='test') as span: + assert t._pid == original_pid + assert t.writer == original_writer + assert t.writer._trace_queue == original_writer._trace_queue + + # Stop the background worker so we don't accidentally flush the + # queue before we can assert on it + t.writer.stop() + t.writer.join() + + # Assert the trace got written into the correct queue + assert original_writer._trace_queue.qsize() == 1 + assert t.writer._trace_queue.qsize() == 1 + assert [[span]] == list(t.writer._trace_queue.get()) diff --git a/tests/test_utils.py b/tests/test_utils.py new file mode 100644 index 0000000000..959c8acc7d --- /dev/null +++ b/tests/test_utils.py @@ -0,0 +1,98 @@ +import os +import unittest +import warnings + +from ddtrace.utils.deprecation import deprecation, deprecated, format_message +from ddtrace.utils.formats import asbool, get_env, flatten_dict + + +class TestUtils(unittest.TestCase): + def test_asbool(self): + # ensure the value is properly cast + self.assertTrue(asbool('True')) + self.assertTrue(asbool('true')) + self.assertTrue(asbool('1')) + self.assertFalse(asbool('False')) + self.assertFalse(asbool('false')) + self.assertFalse(asbool(None)) + self.assertFalse(asbool('')) + self.assertTrue(asbool(True)) + self.assertFalse(asbool(False)) + + def test_get_env(self): + # ensure `get_env` returns a default value if environment variables + # are not set + value = get_env('django', 'distributed_tracing') + self.assertIsNone(value) + value = get_env('django', 'distributed_tracing', False) + self.assertFalse(value) + + def test_get_env_found(self): + # ensure `get_env` returns a value if the environment variable is set + os.environ['DD_REQUESTS_DISTRIBUTED_TRACING'] = '1' + value = get_env('requests', 'distributed_tracing') + self.assertEqual(value, '1') + + def test_get_env_found_legacy(self): + # ensure `get_env` returns a value if legacy environment variables + # are used, raising a Deprecation warning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + os.environ['DATADOG_REQUESTS_DISTRIBUTED_TRACING'] = '1' + value = get_env('requests', 'distributed_tracing') + self.assertEqual(value, '1') + self.assertEqual(len(w), 1) + self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) + self.assertTrue('Use `DD_` prefix instead' in str(w[-1].message)) + + def test_get_env_key_priority(self): + # ensure `get_env` use `DD_` with highest priority + os.environ['DD_REQUESTS_DISTRIBUTED_TRACING'] = 'highest' + os.environ['DATADOG_REQUESTS_DISTRIBUTED_TRACING'] = 'lowest' + value = get_env('requests', 'distributed_tracing') + self.assertEqual(value, 'highest') + + def test_deprecation_formatter(self): + # ensure the formatter returns the proper message + msg = format_message( + 'deprecated_function', + 'use something else instead', + '1.0.0', + ) + expected = ( + '\'deprecated_function\' is deprecated and will be remove in future versions (1.0.0). ' + 'use something else instead' + ) + self.assertEqual(msg, expected) + + def test_deprecation(self): + # ensure `deprecation` properly raise a DeprecationWarning + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + deprecation( + name='fn', + message='message', + version='1.0.0' + ) + self.assertEqual(len(w), 1) + self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) + self.assertIn('message', str(w[-1].message)) + + def test_deprecated_decorator(self): + # ensure `deprecated` decorator properly raise a DeprecationWarning + @deprecated('decorator', version='1.0.0') + def fxn(): + pass + + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + fxn() + self.assertEqual(len(w), 1) + self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) + self.assertIn('decorator', str(w[-1].message)) + + def test_flatten_dict(self): + """ ensure that flattening of a nested dict results in a normalized, 1-level dict """ + d = dict(A=1, B=2, C=dict(A=3, B=4, C=dict(A=5, B=6))) + e = dict(A=1, B=2, C_A=3, C_B=4, C_C_A=5, C_C_B=6) + self.assertEquals(flatten_dict(d, sep='_'), e) diff --git a/tests/test_worker.py b/tests/test_worker.py new file mode 100644 index 0000000000..a08d2a2af7 --- /dev/null +++ b/tests/test_worker.py @@ -0,0 +1,58 @@ +import pytest + +from ddtrace import _worker + + +def test_start(): + w = _worker.PeriodicWorkerThread() + w.start() + assert w.is_alive() + w.stop() + w.join() + assert not w.is_alive() + + +def test_periodic(): + results = [] + + class MyWorker(_worker.PeriodicWorkerThread): + @staticmethod + def run_periodic(): + results.append(object()) + + w = MyWorker(interval=0, daemon=False) + w.start() + # results should be filled really quickly, but just in case the thread is a snail, wait + while not results: + pass + w.stop() + w.join() + assert results + + +def test_on_shutdown(): + results = [] + + class MyWorker(_worker.PeriodicWorkerThread): + @staticmethod + def on_shutdown(): + results.append(object()) + + w = MyWorker() + w.start() + assert not results + w.stop() + w.join() + assert results + + +def test_restart(): + w = _worker.PeriodicWorkerThread() + w.start() + assert w.is_alive() + w.stop() + w.join() + assert not w.is_alive() + + with pytest.raises(RuntimeError): + w.start() diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/http/__init__.py b/tests/unit/http/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/http/test_headers.py b/tests/unit/http/test_headers.py new file mode 100644 index 0000000000..d0e6b692e9 --- /dev/null +++ b/tests/unit/http/test_headers.py @@ -0,0 +1,170 @@ +import pytest + +from ddtrace import tracer, Span +from ddtrace.http import store_request_headers, store_response_headers +from ddtrace.settings import Config, IntegrationConfig + + +class TestHeaders(object): + + @pytest.fixture() + def span(self): + yield Span(tracer, 'some_span') + + @pytest.fixture() + def config(self): + yield Config() + + @pytest.fixture() + def integration_config(self, config): + yield IntegrationConfig(config, 'test') + + def test_it_does_not_break_if_no_headers(self, span, integration_config): + store_request_headers(None, span, integration_config) + store_response_headers(None, span, integration_config) + + def test_it_does_not_break_if_headers_are_not_a_dict(self, span, integration_config): + store_request_headers(list(), span, integration_config) + store_response_headers(list(), span, integration_config) + + def test_it_accept_headers_as_list_of_tuples(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers(['Content-Type', 'Max-Age']) + store_request_headers([('Content-Type', 'some;value;content-type')], span, integration_config) + assert span.get_tag('http.request.headers.content-type') == 'some;value;content-type' + assert None is span.get_tag('http.request.headers.other') + + def test_store_multiple_request_headers_as_dict(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers(['Content-Type', 'Max-Age']) + store_request_headers({ + 'Content-Type': 'some;value;content-type', + 'Max-Age': 'some;value;max_age', + 'Other': 'some;value;other', + }, span, integration_config) + assert span.get_tag('http.request.headers.content-type') == 'some;value;content-type' + assert span.get_tag('http.request.headers.max-age') == 'some;value;max_age' + assert None is span.get_tag('http.request.headers.other') + + def test_store_multiple_response_headers_as_dict(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers(['Content-Type', 'Max-Age']) + store_response_headers({ + 'Content-Type': 'some;value;content-type', + 'Max-Age': 'some;value;max_age', + 'Other': 'some;value;other', + }, span, integration_config) + assert span.get_tag('http.response.headers.content-type') == 'some;value;content-type' + assert span.get_tag('http.response.headers.max-age') == 'some;value;max_age' + assert None is span.get_tag('http.response.headers.other') + + def test_numbers_in_headers_names_are_allowed(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers('Content-Type123') + store_response_headers({ + 'Content-Type123': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.content-type123') == 'some;value' + + def test_allowed_chars_not_replaced_in_tag_name(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + # See: https://docs.datadoghq.com/tagging/#defining-tags + integration_config.http.trace_headers('C0n_t:e/nt-Type') + store_response_headers({ + 'C0n_t:e/nt-Type': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.c0n_t:e/nt-type') == 'some;value' + + def test_period_is_replaced_by_underscore(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + # Deviation from https://docs.datadoghq.com/tagging/#defining-tags in order to allow + # consistent representation of headers having the period in the name. + integration_config.http.trace_headers('api.token') + store_response_headers({ + 'api.token': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.api_token') == 'some;value' + + def test_non_allowed_chars_replaced(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + # See: https://docs.datadoghq.com/tagging/#defining-tags + integration_config.http.trace_headers('C!#ontent-Type') + store_response_headers({ + 'C!#ontent-Type': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.c__ontent-type') == 'some;value' + + def test_key_trim_leading_trailing_spaced(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers('Content-Type') + store_response_headers({ + ' Content-Type ': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.content-type') == 'some;value' + + def test_value_not_trim_leading_trailing_spaced(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers('Content-Type') + store_response_headers({ + 'Content-Type': ' some;value ', + }, span, integration_config) + assert span.get_tag('http.response.headers.content-type') == ' some;value ' + + def test_no_whitelist(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + store_response_headers({ + 'Content-Type': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.content-type') is None + + def test_whitelist_exact(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers('content-type') + store_response_headers({ + 'Content-Type': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.content-type') == 'some;value' + + def test_whitelist_case_insensitive(self, span, integration_config): + """ + :type span: Span + :type integration_config: IntegrationConfig + """ + integration_config.http.trace_headers('CoNtEnT-tYpE') + store_response_headers({ + 'cOnTeNt-TyPe': 'some;value', + }, span, integration_config) + assert span.get_tag('http.response.headers.content-type') == 'some;value' diff --git a/tests/unit/test_settings.py b/tests/unit/test_settings.py new file mode 100644 index 0000000000..e224322755 --- /dev/null +++ b/tests/unit/test_settings.py @@ -0,0 +1,197 @@ +from ddtrace.settings import Config, HttpConfig, IntegrationConfig + +from ..base import BaseTestCase + + +class TestConfig(BaseTestCase): + def test_environment_analytics_enabled(self): + with self.override_env(dict(DD_ANALYTICS_ENABLED='True')): + config = Config() + self.assertTrue(config.analytics_enabled) + + with self.override_env(dict(DD_ANALYTICS_ENABLED='False')): + config = Config() + self.assertFalse(config.analytics_enabled) + + with self.override_env(dict(DD_TRACE_ANALYTICS_ENABLED='True')): + config = Config() + self.assertTrue(config.analytics_enabled) + + with self.override_env(dict(DD_TRACE_ANALYTICS_ENABLED='False')): + config = Config() + self.assertFalse(config.analytics_enabled) + + def test_environment_analytics_overrides(self): + with self.override_env(dict(DD_ANALYTICS_ENABLED='False', DD_TRACE_ANALYTICS_ENABLED='True')): + config = Config() + self.assertTrue(config.analytics_enabled) + + with self.override_env(dict(DD_ANALYTICS_ENABLED='False', DD_TRACE_ANALYTICS_ENABLED='False')): + config = Config() + self.assertFalse(config.analytics_enabled) + + with self.override_env(dict(DD_ANALYTICS_ENABLED='True', DD_TRACE_ANALYTICS_ENABLED='True')): + config = Config() + self.assertTrue(config.analytics_enabled) + + with self.override_env(dict(DD_ANALYTICS_ENABLED='True', DD_TRACE_ANALYTICS_ENABLED='False')): + config = Config() + self.assertFalse(config.analytics_enabled) + + +class TestHttpConfig(BaseTestCase): + + def test_trace_headers(self): + http_config = HttpConfig() + http_config.trace_headers('some_header') + assert http_config.header_is_traced('some_header') + assert not http_config.header_is_traced('some_other_header') + + def test_trace_headers_whitelist_case_insensitive(self): + http_config = HttpConfig() + http_config.trace_headers('some_header') + assert http_config.header_is_traced('sOmE_hEaDeR') + assert not http_config.header_is_traced('some_other_header') + + def test_trace_multiple_headers(self): + http_config = HttpConfig() + http_config.trace_headers(['some_header_1', 'some_header_2']) + assert http_config.header_is_traced('some_header_1') + assert http_config.header_is_traced('some_header_2') + assert not http_config.header_is_traced('some_header_3') + + def test_empty_entry_do_not_raise_exception(self): + http_config = HttpConfig() + http_config.trace_headers('') + + assert not http_config.header_is_traced('some_header_1') + + def test_none_entry_do_not_raise_exception(self): + http_config = HttpConfig() + http_config.trace_headers(None) + assert not http_config.header_is_traced('some_header_1') + + def test_is_header_tracing_configured(self): + http_config = HttpConfig() + assert not http_config.is_header_tracing_configured + http_config.trace_headers('some_header') + assert http_config.is_header_tracing_configured + + def test_header_is_traced_case_insensitive(self): + http_config = HttpConfig() + http_config.trace_headers('sOmE_hEaDeR') + assert http_config.header_is_traced('SoMe_HeAdEr') + assert not http_config.header_is_traced('some_other_header') + + def test_header_is_traced_false_for_empty_header(self): + http_config = HttpConfig() + http_config.trace_headers('some_header') + assert not http_config.header_is_traced('') + + def test_header_is_traced_false_for_none_header(self): + http_config = HttpConfig() + http_config.trace_headers('some_header') + assert not http_config.header_is_traced(None) + + +class TestIntegrationConfig(BaseTestCase): + def setUp(self): + self.config = Config() + self.integration_config = IntegrationConfig(self.config, 'test') + + def test_is_a_dict(self): + assert isinstance(self.integration_config, dict) + + def test_allow_item_access(self): + self.integration_config['setting'] = 'value' + + # Can be accessed both as item and attr accessor + assert self.integration_config.setting == 'value' + assert self.integration_config['setting'] == 'value' + + def test_allow_attr_access(self): + self.integration_config.setting = 'value' + + # Can be accessed both as item and attr accessor + assert self.integration_config.setting == 'value' + assert self.integration_config['setting'] == 'value' + + def test_allow_both_access(self): + self.integration_config.setting = 'value' + assert self.integration_config['setting'] == 'value' + assert self.integration_config.setting == 'value' + + self.integration_config['setting'] = 'new-value' + assert self.integration_config.setting == 'new-value' + assert self.integration_config['setting'] == 'new-value' + + def test_allow_configuring_http(self): + self.integration_config.http.trace_headers('integration_header') + assert self.integration_config.http.header_is_traced('integration_header') + assert not self.integration_config.http.header_is_traced('other_header') + + def test_allow_exist_both_global_and_integration_config(self): + self.config.trace_headers('global_header') + assert self.integration_config.header_is_traced('global_header') + + self.integration_config.http.trace_headers('integration_header') + assert self.integration_config.header_is_traced('integration_header') + assert not self.integration_config.header_is_traced('global_header') + assert not self.config.header_is_traced('integration_header') + + def test_environment_analytics_enabled(self): + # default + self.assertFalse(self.config.analytics_enabled) + self.assertIsNone(self.config.foo.analytics_enabled) + + with self.override_env(dict(DD_ANALYTICS_ENABLED='True')): + config = Config() + self.assertTrue(config.analytics_enabled) + self.assertIsNone(config.foo.analytics_enabled) + + with self.override_env(dict(DD_FOO_ANALYTICS_ENABLED='True')): + config = Config() + self.assertTrue(config.foo.analytics_enabled) + self.assertEqual(config.foo.analytics_sample_rate, 1.0) + + with self.override_env(dict(DD_FOO_ANALYTICS_ENABLED='False')): + config = Config() + self.assertFalse(config.foo.analytics_enabled) + + with self.override_env(dict(DD_FOO_ANALYTICS_ENABLED='True', DD_FOO_ANALYTICS_SAMPLE_RATE='0.5')): + config = Config() + self.assertTrue(config.foo.analytics_enabled) + self.assertEqual(config.foo.analytics_sample_rate, 0.5) + + def test_analytics_enabled_attribute(self): + """" Confirm environment variable and kwargs are handled properly """ + ic = IntegrationConfig(self.config, 'foo', analytics_enabled=True) + self.assertTrue(ic.analytics_enabled) + + ic = IntegrationConfig(self.config, 'foo', analytics_enabled=False) + self.assertFalse(ic.analytics_enabled) + + with self.override_env(dict(DD_FOO_ANALYTICS_ENABLED='True')): + ic = IntegrationConfig(self.config, 'foo', analytics_enabled=False) + self.assertFalse(ic.analytics_enabled) + + def test_get_analytics_sample_rate(self): + """" Check method for accessing sample rate based on configuration """ + ic = IntegrationConfig(self.config, 'foo', analytics_enabled=True, analytics_sample_rate=0.5) + self.assertEqual(ic.get_analytics_sample_rate(), 0.5) + + ic = IntegrationConfig(self.config, 'foo', analytics_enabled=True) + self.assertEqual(ic.get_analytics_sample_rate(), 1.0) + + ic = IntegrationConfig(self.config, 'foo', analytics_enabled=False) + self.assertIsNone(ic.get_analytics_sample_rate()) + + with self.override_env(dict(DD_ANALYTICS_ENABLED='True')): + config = Config() + ic = IntegrationConfig(config, 'foo') + self.assertEqual(ic.get_analytics_sample_rate(use_global_config=True), 1.0) + + with self.override_env(dict(DD_ANALYTICS_ENABLED='False')): + config = Config() + ic = IntegrationConfig(config, 'foo') + self.assertIsNone(ic.get_analytics_sample_rate(use_global_config=True)) diff --git a/tests/unit/utils/__init__.py b/tests/unit/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/unit/utils/test_http.py b/tests/unit/utils/test_http.py new file mode 100644 index 0000000000..4a4409cb67 --- /dev/null +++ b/tests/unit/utils/test_http.py @@ -0,0 +1,16 @@ +from ddtrace.utils.http import normalize_header_name + + +class TestHeaderNameNormalization(object): + + def test_name_is_trimmed(self): + assert normalize_header_name(' content-type ') == 'content-type' + + def test_name_is_lowered(self): + assert normalize_header_name('Content-Type') == 'content-type' + + def test_none_does_not_raise_exception(self): + assert normalize_header_name(None) is None + + def test_empty_does_not_raise_exception(self): + assert normalize_header_name('') == '' diff --git a/tests/unit/utils/test_time.py b/tests/unit/utils/test_time.py new file mode 100644 index 0000000000..a340f5bd0b --- /dev/null +++ b/tests/unit/utils/test_time.py @@ -0,0 +1,44 @@ +import pytest + +from ddtrace.utils import time + + +def test_no_states(): + watch = time.StopWatch() + with pytest.raises(RuntimeError): + watch.stop() + + +def test_start_stop(): + watch = time.StopWatch() + watch.start() + watch.stop() + + +def test_start_stop_elapsed(): + watch = time.StopWatch() + watch.start() + watch.stop() + e = watch.elapsed() + assert e > 0 + watch.start() + assert watch.elapsed() != e + + +def test_no_elapsed(): + watch = time.StopWatch() + with pytest.raises(RuntimeError): + watch.elapsed() + + +def test_elapsed(): + watch = time.StopWatch() + watch.start() + watch.stop() + assert watch.elapsed() > 0 + + +def test_context_manager(): + with time.StopWatch() as watch: + pass + assert watch.elapsed() > 0 diff --git a/tests/util.py b/tests/util.py new file mode 100644 index 0000000000..6d1204a34a --- /dev/null +++ b/tests/util.py @@ -0,0 +1,20 @@ +import ddtrace +from contextlib import contextmanager + + +def assert_dict_issuperset(a, b): + assert set(a.items()).issuperset(set(b.items())), \ + '{a} is not a superset of {b}'.format(a=a, b=b) + + +@contextmanager +def override_global_tracer(tracer): + """Helper functions that overrides the global tracer available in the + `ddtrace` package. This is required because in some `httplib` tests we + can't get easily the PIN object attached to the `HTTPConnection` to + replace the used tracer with a dummy tracer. + """ + original_tracer = ddtrace.tracer + ddtrace.tracer = tracer + yield + ddtrace.tracer = original_tracer diff --git a/tests/utils/__init__.py b/tests/utils/__init__.py new file mode 100644 index 0000000000..526a52a3c0 --- /dev/null +++ b/tests/utils/__init__.py @@ -0,0 +1,32 @@ +import contextlib +import os + +from ddtrace.ext import http + + +def assert_span_http_status_code(span, code): + """Assert on the span's 'http.status_code' tag""" + tag = span.get_tag(http.STATUS_CODE) + code = str(code) + assert tag == code, "%r != %r" % (tag, code) + + +@contextlib.contextmanager +def override_env(env): + """ + Temporarily override ``os.environ`` with provided values:: + + >>> with self.override_env(dict(DATADOG_TRACE_DEBUG=True)): + # Your test + """ + # Copy the full original environment + original = dict(os.environ) + + # Update based on the passed in arguments + os.environ.update(env) + try: + yield + finally: + # Full clear the environment out and reset back to the original + os.environ.clear() + os.environ.update(original) diff --git a/tests/utils/span.py b/tests/utils/span.py new file mode 100644 index 0000000000..ab4cb97aee --- /dev/null +++ b/tests/utils/span.py @@ -0,0 +1,464 @@ +from ddtrace.span import Span + +NO_CHILDREN = object() + + +class TestSpan(Span): + """ + Test wrapper for a :class:`ddtrace.span.Span` that provides additional functions and assertions + + Example:: + + span = tracer.trace('my.span') + span = TestSpan(span) + + if span.matches(name='my.span'): + print('matches') + + # Raises an AssertionError + span.assert_matches(name='not.my.span', meta={'system.pid': getpid()}) + """ + def __init__(self, span): + """ + Constructor for TestSpan + + :param span: The :class:`ddtrace.span.Span` to wrap + :type span: :class:`ddtrace.span.Span` + """ + if isinstance(span, TestSpan): + span = span._span + + # DEV: Use `object.__setattr__` to by-pass this class's `__setattr__` + object.__setattr__(self, '_span', span) + + def __getattr__(self, key): + """ + First look for property on the base :class:`ddtrace.span.Span` otherwise return this object's attribute + """ + if hasattr(self._span, key): + return getattr(self._span, key) + + return self.__getattribute__(key) + + def __setattr__(self, key, value): + """Pass through all assignment to the base :class:`ddtrace.span.Span`""" + return setattr(self._span, key, value) + + def __eq__(self, other): + """ + Custom equality code to ensure we are using the base :class:`ddtrace.span.Span.__eq__` + + :param other: The object to check equality with + :type other: object + :returns: True if equal, False otherwise + :rtype: bool + """ + if isinstance(other, TestSpan): + return other._span == self._span + elif isinstance(other, Span): + return other == self._span + return other == self + + def matches(self, **kwargs): + """ + Helper function to check if this span's properties matches the expected. + + Example:: + + span = TestSpan(span) + span.matches(name='my.span', resource='GET /') + + :param kwargs: Property/Value pairs to evaluate on this span + :type kwargs: dict + :returns: True if the arguments passed match, False otherwise + :rtype: bool + """ + for name, value in kwargs.items(): + # Special case for `meta` + if name == 'meta' and not self.meta_matches(value): + return False + + # Ensure it has the property first + if not hasattr(self, name): + return False + + # Ensure the values match + if getattr(self, name) != value: + return False + + return True + + def meta_matches(self, meta, exact=False): + """ + Helper function to check if this span's meta matches the expected + + Example:: + + span = TestSpan(span) + span.meta_matches({'system.pid': getpid()}) + + :param meta: Property/Value pairs to evaluate on this span + :type meta: dict + :param exact: Whether to do an exact match on the meta values or not, default: False + :type exact: bool + :returns: True if the arguments passed match, False otherwise + :rtype: bool + """ + if exact: + return self.meta == meta + + for key, value in meta.items(): + if key not in self.meta: + return False + if self.meta[key] != value: + return False + return True + + def assert_matches(self, **kwargs): + """ + Assertion method to ensure this span's properties match as expected + + Example:: + + span = TestSpan(span) + span.assert_matches(name='my.span') + + :param kwargs: Property/Value pairs to evaluate on this span + :type kwargs: dict + :raises: AssertionError + """ + for name, value in kwargs.items(): + # Special case for `meta` + if name == 'meta': + self.assert_meta(value) + elif name == 'metrics': + self.assert_metrics(value) + else: + assert hasattr(self, name), '{0!r} does not have property {1!r}'.format(self, name) + assert getattr(self, name) == value, ( + '{0!r} property {1}: {2!r} != {3!r}' + .format(self, name, getattr(self, name), value) + ) + + def assert_meta(self, meta, exact=False): + """ + Assertion method to ensure this span's meta match as expected + + Example:: + + span = TestSpan(span) + span.assert_meta({'system.pid': getpid()}) + + :param meta: Property/Value pairs to evaluate on this span + :type meta: dict + :param exact: Whether to do an exact match on the meta values or not, default: False + :type exact: bool + :raises: AssertionError + """ + if exact: + assert self.meta == meta + else: + for key, value in meta.items(): + assert key in self.meta, '{0} meta does not have property {1!r}'.format(self, key) + assert self.meta[key] == value, ( + '{0} meta property {1!r}: {2!r} != {3!r}' + .format(self, key, self.meta[key], value) + ) + + def assert_metrics(self, metrics, exact=False): + """ + Assertion method to ensure this span's metrics match as expected + + Example:: + + span = TestSpan(span) + span.assert_metrics({'_dd1.sr.eausr': 1}) + + :param metrics: Property/Value pairs to evaluate on this span + :type metrics: dict + :param exact: Whether to do an exact match on the metrics values or not, default: False + :type exact: bool + :raises: AssertionError + """ + if exact: + assert self.metrics == metrics + else: + for key, value in metrics.items(): + assert key in self.metrics, '{0} metrics does not have property {1!r}'.format(self, key) + assert self.metrics[key] == value, ( + '{0} metrics property {1!r}: {2!r} != {3!r}' + .format(self, key, self.metrics[key], value) + ) + + +class TestSpanContainer(object): + """ + Helper class for a container of Spans. + + Subclasses of this class must implement a `get_spans` method:: + + def get_spans(self): + return [] + + This class provides methods and assertions over a list of spans:: + + class TestCases(BaseTracerTestCase): + def test_spans(self): + # TODO: Create spans + + self.assert_has_spans() + self.assert_span_count(3) + self.assert_structure( ... ) + + # Grab only the `requests.request` spans + spans = self.filter_spans(name='requests.request') + """ + def _ensure_test_spans(self, spans): + """ + internal helper to ensure the list of spans are all :class:`tests.utils.span.TestSpan` + + :param spans: List of :class:`ddtrace.span.Span` or :class:`tests.utils.span.TestSpan` + :type spans: list + :returns: A list og :class:`tests.utils.span.TestSpan` + :rtype: list + """ + return [ + span if isinstance(span, TestSpan) else TestSpan(span) for span in spans + ] + + @property + def spans(self): + return self._ensure_test_spans(self.get_spans()) + + def get_spans(self): + """subclass required property""" + raise NotImplementedError + + def _build_tree(self, root): + """helper to build a tree structure for the provided root span""" + children = [] + for span in self.spans: + if span.parent_id == root.span_id: + children.append(self._build_tree(span)) + + return TestSpanNode(root, children) + + def get_root_span(self): + """ + Helper to get the root span from the list of spans in this container + + :returns: The root span if one was found, None if not, and AssertionError if multiple roots were found + :rtype: :class:`tests.utils.span.TestSpanNode`, None + :raises: AssertionError + """ + root = None + for span in self.spans: + if span.parent_id is None: + if root is not None: + raise AssertionError('Multiple root spans found {0!r} {1!r}'.format(root, span)) + root = span + + assert root, 'No root span found in {0!r}'.format(self.spans) + + return self._build_tree(root) + + def get_root_spans(self): + """ + Helper to get all root spans from the list of spans in this container + + :returns: The root spans if any were found, None if not + :rtype: list of :class:`tests.utils.span.TestSpanNode`, None + """ + roots = [] + for span in self.spans: + if span.parent_id is None: + roots.append(self._build_tree(span)) + + return sorted(roots, key=lambda s: s.start) + + def assert_trace_count(self, count): + """Assert the number of unique trace ids this container has""" + trace_count = len(self.get_root_spans()) + assert trace_count == count, 'Trace count {0} != {1}'.format(trace_count, count) + + def assert_span_count(self, count): + """Assert this container has the expected number of spans""" + assert len(self.spans) == count, 'Span count {0} != {1}'.format(len(self.spans), count) + + def assert_has_spans(self): + """Assert this container has spans""" + assert len(self.spans), 'No spans found' + + def assert_has_no_spans(self): + """Assert this container does not have any spans""" + assert len(self.spans) == 0, 'Span count {0}'.format(len(self.spans)) + + def filter_spans(self, *args, **kwargs): + """ + Helper to filter current spans by provided parameters. + + This function will yield all spans whose `TestSpan.matches` function return `True`. + + :param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches` + :type args: list + :param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches` + :type kwargs: dict + :returns: generator for the matched :class:`tests.utils.span.TestSpan` + :rtype: generator + """ + for span in self.spans: + # ensure we have a TestSpan + if not isinstance(span, TestSpan): + span = TestSpan(span) + + if span.matches(*args, **kwargs): + yield span + + def find_span(self, *args, **kwargs): + """ + Find a single span matches the provided filter parameters. + + This function will find the first span whose `TestSpan.matches` function return `True`. + + :param args: Positional arguments to pass to :meth:`tests.utils.span.TestSpan.matches` + :type args: list + :param kwargs: Keyword arguments to pass to :meth:`tests.utils.span.TestSpan.matches` + :type kwargs: dict + :returns: The first matching span + :rtype: :class:`tests.utils.span.TestSpan` + """ + span = next(self.filter_spans(*args, **kwargs), None) + assert span is not None, ( + 'No span found for filter {0!r} {1!r}, have {2} spans' + .format(args, kwargs, len(self.spans)) + ) + return span + + +class TracerSpanContainer(TestSpanContainer): + """ + A class to wrap a :class:`tests.utils.tracer.DummyTracer` with a + :class:`tests.utils.span.TestSpanContainer` to use in tests + """ + def __init__(self, tracer): + self.tracer = tracer + super(TracerSpanContainer, self).__init__() + + def get_spans(self): + """ + Overridden method to return all spans attached to this tracer + + :returns: List of spans attached to this tracer + :rtype: list + """ + return self.tracer.writer.spans + + def reset(self): + """Helper to reset the existing list of spans created""" + self.tracer.writer.pop() + + +class TestSpanNode(TestSpan, TestSpanContainer): + """ + A :class:`tests.utils.span.TestSpan` which is used as part of a span tree. + + Each :class:`tests.utils.span.TestSpanNode` represents the current :class:`ddtrace.span.Span` + along with any children who have that span as it's parent. + + This class can be used to assert on the parent/child relationships between spans. + + Example:: + + class TestCase(BaseTestCase): + def test_case(self): + # TODO: Create spans + + self.assert_structure( ... ) + + tree = self.get_root_span() + + # Find the first child of the root span with the matching name + request = tree.find_span(name='requests.request') + + # Assert the parent/child relationship of this `request` span + request.assert_structure( ... ) + """ + def __init__(self, root, children=None): + super(TestSpanNode, self).__init__(root) + object.__setattr__(self, '_children', children or []) + + def get_spans(self): + """required subclass property, returns this spans children""" + return self._children + + def assert_structure(self, root, children=NO_CHILDREN): + """ + Assertion to assert on the structure of this node and it's children. + + This assertion takes a dictionary of properties to assert for this node + along with a list of assertions to make for it's children. + + Example:: + + def test_case(self): + # Assert the following structure + # + # One root_span, with two child_spans, one with a requests.request span + # + # | root_span | + # | child_span | | child_span | + # | requests.request | + self.assert_structure( + # Root span with two child_span spans + dict(name='root_span'), + + ( + # Child span with one child of it's own + ( + dict(name='child_span'), + + # One requests.request span with no children + ( + dict(name='requests.request'), + ), + ), + + # Child span with no children + dict(name='child_span'), + ), + ) + + :param root: Properties to assert for this root span, these are passed to + :meth:`tests.utils.span.TestSpan.assert_matches` + :type root: dict + :param children: List of child assertions to make, if children is None then do not make any + assertions about this nodes children. Each list element must be a list with 2 items + the first is a ``dict`` of property assertions on that child, and the second is a ``list`` + of child assertions to make. + :type children: list, None + :raises: + """ + self.assert_matches(**root) + + # Give them a way to ignore asserting on children + if children is None: + return + elif children is NO_CHILDREN: + children = () + + spans = self.spans + self.assert_span_count(len(children)) + for i, child in enumerate(children): + if not isinstance(child, (list, tuple)): + child = (child, NO_CHILDREN) + + root, _children = child + spans[i].assert_matches(parent_id=self.span_id, trace_id=self.trace_id, _parent=self) + spans[i].assert_structure(root, _children) + + def pprint(self): + parts = [super(TestSpanNode, self).pprint()] + for child in self._children: + parts.append('-' * 20) + parts.append(child.pprint()) + return '\r\n'.join(parts) diff --git a/tests/utils/test_module/__init__.py b/tests/utils/test_module/__init__.py new file mode 100644 index 0000000000..9ae1f430dd --- /dev/null +++ b/tests/utils/test_module/__init__.py @@ -0,0 +1,3 @@ +class A(): + def fn(self): + return 1 diff --git a/tests/utils/tracer.py b/tests/utils/tracer.py new file mode 100644 index 0000000000..1b15f4aaf0 --- /dev/null +++ b/tests/utils/tracer.py @@ -0,0 +1,77 @@ +from ddtrace.encoding import JSONEncoder, MsgpackEncoder +from ddtrace.internal.writer import AgentWriter +from ddtrace.tracer import Tracer + + +class DummyWriter(AgentWriter): + """DummyWriter is a small fake writer used for tests. not thread-safe.""" + + def __init__(self, *args, **kwargs): + # original call + super(DummyWriter, self).__init__(*args, **kwargs) + + # dummy components + self.spans = [] + self.traces = [] + self.services = {} + self.json_encoder = JSONEncoder() + self.msgpack_encoder = MsgpackEncoder() + + def write(self, spans=None, services=None): + if spans: + # the traces encoding expect a list of traces so we + # put spans in a list like we do in the real execution path + # with both encoders + trace = [spans] + self.json_encoder.encode_traces(trace) + self.msgpack_encoder.encode_traces(trace) + self.spans += spans + self.traces += trace + + if services: + self.json_encoder.encode_services(services) + self.msgpack_encoder.encode_services(services) + self.services.update(services) + + def pop(self): + # dummy method + s = self.spans + self.spans = [] + return s + + def pop_traces(self): + # dummy method + traces = self.traces + self.traces = [] + return traces + + def pop_services(self): + # dummy method + + # Setting service info has been deprecated, we want to make sure nothing ever gets written here + assert self.services == {} + s = self.services + self.services = {} + return s + + +class DummyTracer(Tracer): + """ + DummyTracer is a tracer which uses the DummyWriter by default + """ + def __init__(self): + super(DummyTracer, self).__init__() + self._update_writer() + + def _update_writer(self): + self.writer = DummyWriter( + hostname=self.writer.api.hostname, + port=self.writer.api.port, + filters=self.writer._filters, + priority_sampler=self.writer._priority_sampler, + ) + + def configure(self, *args, **kwargs): + super(DummyTracer, self).configure(*args, **kwargs) + # `.configure()` may reset the writer + self._update_writer() diff --git a/tests/vendor/__init__.py b/tests/vendor/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/vendor/test_dogstatsd.py b/tests/vendor/test_dogstatsd.py new file mode 100644 index 0000000000..c9b1005049 --- /dev/null +++ b/tests/vendor/test_dogstatsd.py @@ -0,0 +1,7 @@ +from ddtrace.internal.logger import DDLogger +from ddtrace.vendor.dogstatsd.base import log + + +def test_dogstatsd_logger(): + """Ensure dogstatsd logger is initialized as a rate limited logger""" + assert isinstance(log, DDLogger) diff --git a/tests/wait-for-services.py b/tests/wait-for-services.py new file mode 100644 index 0000000000..2c30f89cb8 --- /dev/null +++ b/tests/wait-for-services.py @@ -0,0 +1,114 @@ +import sys +import time + +import mysql.connector +from psycopg2 import connect, OperationalError +from cassandra.cluster import Cluster, NoHostAvailable +import rediscluster +import vertica_python +import kombu + +from contrib.config import ( + POSTGRES_CONFIG, + CASSANDRA_CONFIG, + MYSQL_CONFIG, + REDISCLUSTER_CONFIG, + VERTICA_CONFIG, + RABBITMQ_CONFIG +) + + +def try_until_timeout(exception): + """Utility decorator that tries to call a check until there is a + timeout. The default timeout is about 20 seconds. + + """ + def wrap(fn): + def wrapper(*args, **kwargs): + err = None + + for i in range(100): + try: + fn() + except exception as e: + err = e + time.sleep(0.2) + else: + break + else: + if err: + raise err + return wrapper + return wrap + + +@try_until_timeout(OperationalError) +def check_postgres(): + conn = connect(**POSTGRES_CONFIG) + try: + conn.cursor().execute('SELECT 1;') + finally: + conn.close() + + +@try_until_timeout(NoHostAvailable) +def check_cassandra(): + with Cluster(**CASSANDRA_CONFIG).connect() as conn: + conn.execute('SELECT now() FROM system.local') + + +@try_until_timeout(Exception) +def check_mysql(): + conn = mysql.connector.connect(**MYSQL_CONFIG) + try: + conn.cursor().execute('SELECT 1;') + finally: + conn.close() + + +@try_until_timeout(Exception) +def check_rediscluster(): + test_host = REDISCLUSTER_CONFIG['host'] + test_ports = REDISCLUSTER_CONFIG['ports'] + startup_nodes = [ + {'host': test_host, 'port': int(port)} + for port in test_ports.split(',') + ] + r = rediscluster.StrictRedisCluster(startup_nodes=startup_nodes) + r.flushall() + + +@try_until_timeout(Exception) +def check_vertica(): + conn = vertica_python.connect(**VERTICA_CONFIG) + try: + conn.cursor().execute('SELECT 1;') + finally: + conn.close() + + +@try_until_timeout(Exception) +def check_rabbitmq(): + url = 'amqp://{user}:{password}@{host}:{port}//'.format(**RABBITMQ_CONFIG) + conn = kombu.Connection(url) + try: + conn.connect() + finally: + conn.release() + + +if __name__ == '__main__': + check_functions = { + 'cassandra': check_cassandra, + 'postgres': check_postgres, + 'mysql': check_mysql, + 'rediscluster': check_rediscluster, + 'vertica': check_vertica, + 'rabbitmq': check_rabbitmq, + } + if len(sys.argv) >= 2: + for service in sys.argv[1:]: + check_functions[service]() + else: + print('usage: python {} SERVICE_NAME'.format(sys.argv[0])) + sys.exit(1) diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000000..5e40dd2a22 --- /dev/null +++ b/tox.ini @@ -0,0 +1,857 @@ +# the tox file specifies a way of running our test suite +# against different combinations of libraries and python +# versions. + +[tox] +# Our various test environments. The py*-all tasks will run the core +# library tests and all contrib tests with the latest library versions. +# The others will test specific versions of libraries. +# +# FIXME[gabin]: +# If the env name is longer than 128 characters (linux kernel limit specified +# in "master/include/linux/binfmts.h"), we'll get a "bad interpreter: No such file or directory" error. +# +#See linux kernel limitation: +# - https://github.com/torvalds/linux/blob/master/include/linux/binfmts.h#L12 +# +#See related github topic: +# - https://github.com/pypa/virtualenv/issues/596 +envlist = + flake8 + black + wait + {py27,py34,py35,py36,py37}-tracer + {py27,py34,py35,py36,py37}-internal + {py27,py34,py35,py36,py37}-integration + {py27,py34,py35,py36,py37}-ddtracerun + {py27,py34,py35,py36,py37}-test_utils + {py27,py34,py35,py36,py37}-test_logging +# Integrations environments + aiobotocore_contrib-py34-aiobotocore{02,03,04} + aiobotocore_contrib-{py35,py36}-aiobotocore{02,03,04,05,07,08,09,010} + # aiobotocore 0.2 and 0.4 do not work because they use async as a reserved keyword + aiobotocore_contrib-py37-aiobotocore{03,05,07,08,09,010} + # Python 3.7 needs at least aiohttp 2.3 + aiohttp_contrib-{py34,py35,py36}-aiohttp{12,13,20,21,22}-aiohttp_jinja{012,013}-yarl + aiohttp_contrib-{py34,py35,py36,py37}-aiohttp23-aiohttp_jinja{015}-yarl10 + aiohttp_contrib-{py35,py36,py37}-aiohttp{30,31,32,33,34,35}-aiohttp_jinja{015}-yarl10 + aiopg_contrib-{py34,py35,py36}-aiopg{012,015} + aiopg_contrib-py37-aiopg015 + algoliasearch_contrib-{py27,py34,py35,py36,py37}-algoliasearch{1,2} + asyncio_contrib-{py34,py35,py36,py37} +# boto needs moto<1 and moto<1 does not support Python >= 3.7 + boto_contrib-{py27,py34,py35,py36}-boto + botocore_contrib-{py27,py34,py35,py36,py37}-botocore + bottle_contrib{,_autopatch}-{py27,py34,py35,py36,py37}-bottle{11,12}-webtest + cassandra_contrib-{py27,py34,py35,py36,py37}-cassandra{35,36,37,38,315} +# Non-4.x celery should be able to use the older redis lib, since it locks to an older kombu + celery_contrib-{py27,py34,py35,py36}-celery{31}-redis{210} +# 4.x celery bumps kombu to 4.4+, which requires redis 3.2 or later, this tests against +# older redis with an older kombu, and newer kombu/newer redis. +# https://github.com/celery/kombu/blob/3e60e6503a77b9b1a987cf7954659929abac9bac/Changelog#L35 + celery_contrib-{py27,py34,py35,py36}-celery{40,41}-{redis210-kombu43,redis320-kombu44} +# Celery 4.2 is now limited to Kombu 4.3 +# https://github.com/celery/celery/commit/1571d414461f01ae55be63a03e2adaa94dbcb15d + celery_contrib-{py27,py34,py35,py36}-celery42-redis210-kombu43 +# Celery 4.3 wants Kombu >= 4.4 and Redis >= 3.2 +# Python 3.7 needs Celery 4.3 + celery_contrib-{py27,py34,py35,py36,py37}-celery43-redis320-kombu44 + consul_contrib-py{27,34,35,36,37}-consul{07,10,11} + dbapi_contrib-{py27,py34,py35,py36} + django_contrib{,_autopatch}-{py27,py34,py35,py36}-django{18,111}-djangopylibmc06-djangoredis45-pylibmc-redis{210}-memcached + django_contrib{,_autopatch}-{py34,py35,py36}-django{200}-djangopylibmc06-djangoredis45-pylibmc-redis{210}-memcached + django_drf_contrib-{py27,py34,py35,py36}-django{111}-djangorestframework{34,37,38} + django_drf_contrib-{py34,py35,py36}-django{200}-djangorestframework{37,38} + dogpile_contrib-{py27,py35,py36,py37}-dogpilecache{06,07,08,latest} + elasticsearch_contrib-{py27,py34,py35,py36}-elasticsearch{16,17,18,23,24,51,52,53,54,63,64} + elasticsearch_contrib-{py27,py34,py35,py36}-elasticsearch1{100} + elasticsearch_contrib-{py27,py34,py35,py36}-elasticsearch2{50} + elasticsearch_contrib-{py27,py34,py35,py36}-elasticsearch5{50} + elasticsearch_contrib-{py27,py34,py35,py36}-elasticsearch6{40} + falcon_contrib{,_autopatch}-{py27,py34,py35,py36}-falcon{10,11,12,13,14} + flask_contrib{,_autopatch}-{py27,py34,py35,py36}-flask{010,011,012,10}-blinker +# Flask <=0.9 does not support Python 3 + flask_contrib{,_autopatch}-{py27}-flask{09}-blinker + flask_cache_contrib{,_autopatch}-{py27,py34,py35,py36,py37}-flask{010,011,012}-flaskcache{013}-memcached-redis{210}-blinker + flask_cache_contrib{,_autopatch}-{py27}-flask{010,011}-flaskcache{012}-memcached-redis{210}-blinker + futures_contrib-{py27}-futures{30,31,32} + futures_contrib-{py34,py35,py36,py37} + gevent_contrib-{py27,py34,py35,py36}-gevent{11,12,13} + gevent_contrib-py37-gevent{13,14} +# gevent 1.0 is not python 3 compatible + gevent_contrib-{py27}-gevent{10} + grpc_contrib-{py27,py34,py35,py36,py37}-grpc{112,113,114,115,116,117,118,119,120,121,122} + httplib_contrib-{py27,py34,py35,py36,py37} + jinja2_contrib-{py27,py34,py35,py36,py37}-jinja{27,28,29,210} + mako_contrib-{py27,py34,py35,py36,py37}-mako{010,100} + molten_contrib-py{36,37}-molten{070,072} + mongoengine_contrib-{py27,py34,py35,py36,py37}-mongoengine{015,016,017,018,latest}-pymongo{latest} + mysql_contrib-{py27,py34,py35,py36,py37}-mysqlconnector + mysqldb_contrib-{py27}-mysqldb{12} + mysqldb_contrib-{py27,py34,py35,py36,py37}-mysqlclient{13} + psycopg_contrib-{py27,py34,py35,py36}-psycopg2{24,25,26,27,28} + psycopg_contrib-py37-psycopg2{27,28} + pylibmc_contrib-{py27,py34,py35,py36,py37}-pylibmc{140,150} + pylons_contrib-{py27}-pylons{096,097,010,10} + pymemcache_contrib{,_autopatch}-{py27,py34,py35,py36,py37}-pymemcache{130,140} + pymongo_contrib-{py27,py34,py35,py36,py37}-pymongo{30,31,32,33,34,35,36,37,38,39,latest}-mongoengine{latest} + pymysql_contrib-{py27,py34,py35,py36,py37}-pymysql{07,08,09} + pyramid_contrib{,_autopatch}-{py27,py34,py35,py36,py37}-pyramid{17,18,19}-webtest + redis_contrib-{py27,py34,py35,py36,py37}-redis{26,27,28,29,210,300} + rediscluster_contrib-{py27,py34,py35,py36,py37}-rediscluster{135,136}-redis210 + requests_contrib{,_autopatch}-{py27,py34,py35,py36,py37}-requests{208,209,210,211,212,213,219} + kombu_contrib-{py27,py34,py35,py36}-kombu{40,41,42} + # Python 3.7 needs Kombu >= 4.2 + kombu_contrib-py37-kombu42 +# python 3.6 requests + gevent regression test +# DEV: This is a known issue for gevent 1.1, suggestion is to upgrade to gevent > 1.2 +# https://github.com/gevent/gevent/issues/903 + requests_gevent_contrib-{py36}-requests{208,209,210,211,212,213,219}-gevent{12,13} + requests_gevent_contrib-py37-requests{208,209,210,211,212,213,219}-gevent13 + sqlalchemy_contrib-{py27,py34,py35,py36,py37}-sqlalchemy{10,11,12}-psycopg228-mysqlconnector + sqlite3_contrib-{py27,py34,py35,py36,py37}-sqlite3 + tornado_contrib-{py27,py34,py35,py36,py37}-tornado{40,41,42,43,44,45} + tornado_contrib-{py37}-tornado{50,51,60} + tornado_contrib-{py27}-tornado{40,41,42,43,44,45}-futures{30,31,32} + vertica_contrib-{py27,py34,py35,py36,py37}-vertica{060,070} +# Opentracer + {py27,py34,py35,py36,py37}-opentracer + {py34,py35,py36,py37}-opentracer_asyncio + {py34,py35,py36,py37}-opentracer_tornado-tornado{40,41,42,43,44} + {py27}-opentracer_gevent-gevent{10} + {py27,py34,py35,py36}-opentracer_gevent-gevent{11,12} + py37-opentracer_gevent-gevent{13,14} +# Unit tests: pytest based test suite that do not require any additional dependency + unit_tests-{py27,py34,py35,py36,py37} + benchmarks-{py27,py34,py35,py36,py37} + +[testenv] +# Always re-run `setup.py develop` to ensure the proper C-extension .so files are created +# DEV: If we don't do this sometimes CircleCI gets messed up and only has the py27 .so +# meaning running on py3.x will fail +# https://stackoverflow.com/questions/57459123/why-do-i-need-to-run-tox-twice-to-test-a-python-package-with-c-extension +commands_pre={envpython} {toxinidir}/setup.py develop +usedevelop = True +basepython = + py27: python2.7 + py34: python3.4 + py35: python3.5 + py36: python3.6 + py37: python3.7 + +deps = + pdbpp + pytest>=3 + pytest-benchmark + pytest-cov + pytest-django + pytest-mock + opentracing +# test dependencies installed in all envs + mock +# force the downgrade as a workaround +# https://github.com/aio-libs/aiohttp/issues/2662 + yarl: yarl==0.18.0 + yarl10: yarl>=1.0,<1.1 +# backports + py27: enum34 +# integrations + aiobotocore010: aiobotocore>=0.10,<0.11 + aiobotocore09: aiobotocore>=0.9,<0.10 + aiobotocore08: aiobotocore>=0.8,<0.9 + aiobotocore07: aiobotocore>=0.7,<0.8 + # aiobotocore06 does not work + aiobotocore05: aiobotocore>=0.5,<0.6 + aiobotocore04: aiobotocore>=0.4,<0.5 + aiobotocore03: aiobotocore>=0.3,<0.4 + aiobotocore02: aiobotocore>=0.2,<0.3 + aiobotocore02: multidict==4.5.2 + aiobotocore{02,03,04}-{py34}: typing + aiopg012: aiopg>=0.12,<0.13 + aiopg015: aiopg>=0.15,<0.16 + aiopg: sqlalchemy + aiohttp12: aiohttp>=1.2,<1.3 + aiohttp13: aiohttp>=1.3,<1.4 + aiohttp20: aiohttp>=2.0,<2.1 + aiohttp21: aiohttp>=2.1,<2.2 + aiohttp22: aiohttp>=2.2,<2.3 + aiohttp23: aiohttp>=2.3,<2.4 + aiohttp30: aiohttp>=3.0,<3.1 + aiohttp31: aiohttp>=3.1,<3.2 + aiohttp32: aiohttp>=3.2,<3.3 + aiohttp33: aiohttp>=3.3,<3.4 + aiohttp34: aiohttp>=3.4,<3.5 + aiohttp35: aiohttp>=3.5,<3.6 + aiohttp_jinja012: aiohttp_jinja2>=0.12,<0.13 + aiohttp_jinja013: aiohttp_jinja2>=0.13,<0.14 + aiohttp_jinja015: aiohttp_jinja2>=0.15,<0.16 + algoliasearch1: algoliasearch>=1.2,<2 + algoliasearch2: algoliasearch>=2,<3 + blinker: blinker + boto: boto + boto: moto<1.0 + botocore: botocore + py34-botocore: PyYAML<5.3 + py34-botocore: jsonpatch<1.25 + botocore: moto>=1.0,<2 + bottle11: bottle>=0.11,<0.12 + bottle12: bottle>=0.12,<0.13 + cassandra35: cassandra-driver>=3.5,<3.6 + cassandra36: cassandra-driver>=3.6,<3.7 + cassandra37: cassandra-driver>=3.7,<3.8 + cassandra38: cassandra-driver>=3.8,<3.9 + cassandra315: cassandra-driver>=3.15,<3.16 + celery31: celery>=3.1,<3.2 + celery40: celery>=4.0,<4.1 + celery41: celery>=4.1,<4.2 + celery42: celery>=4.2,<4.3 + celery43: celery>=4.3,<4.4 + consul07: python-consul>=0.7,<1.0 + consul10: python-consul>=1.0,<1.1 + consul11: python-consul>=1.1,<1.2 + ddtracerun: redis + django18: django>=1.8,<1.9 + django111: django>=1.11,<1.12 + django200: django>=2.0,<2.1 + djangopylibmc06: django-pylibmc>=0.6,<0.7 + djangoredis45: django-redis>=4.5,<4.6 + djangorestframework34: djangorestframework>=3.4,<3.5 + djangorestframework37: djangorestframework>=3.7,<3.8 + djangorestframework38: djangorestframework>=3.8,<3.9 + dogpilecache06: dogpile.cache==0.6.* + dogpilecache07: dogpile.cache==0.7.* + dogpilecache08: dogpile.cache==0.8.* + dogpilecachelatest: dogpile.cache + elasticsearch16: elasticsearch>=1.6,<1.7 + elasticsearch17: elasticsearch>=1.7,<1.8 + elasticsearch18: elasticsearch>=1.8,<1.9 + elasticsearch23: elasticsearch>=2.3,<2.4 + elasticsearch24: elasticsearch>=2.4,<2.5 + elasticsearch51: elasticsearch>=5.1,<5.2 + elasticsearch52: elasticsearch>=5.2,<5.3 + elasticsearch53: elasticsearch>=5.3,<5.4 + elasticsearch54: elasticsearch>=5.4,<5.5 + elasticsearch63: elasticsearch>=6.3,<6.4 + elasticsearch64: elasticsearch>=6.4,<6.5 + # elasticsearch1 package + elasticsearch1100: elasticsearch1>=1.10.0,<1.11.0 + # elasticsearch2 package + elasticsearch250: elasticsearch2>=2.5.0,<2.6.0 + # elasticsearch5 package + elasticsearch550: elasticsearch5>=5.5.0,<5.6.0 + # elasticsearch6 package + elasticsearch640: elasticsearch6>=6.4.0,<6.5.0 + falcon10: falcon>=1.0,<1.1 + falcon11: falcon>=1.1,<1.2 + falcon12: falcon>=1.2,<1.3 + falcon13: falcon>=1.3,<1.4 + falcon14: falcon>=1.4,<1.5 + flask09: flask>=0.9,<0.10 + flask010: flask>=0.10,<0.11 + flask011: flask>=0.11,<0.12 + flask012: flask>=0.12,<0.13 + flask10: flask>=1.0,<1.1 + flaskcache012: flask_cache>=0.12,<0.13 + flaskcache013: flask_cache>=0.13,<0.14 + futures: futures + futures30: futures>=3.0,<3.1 + futures31: futures>=3.1,<3.2 + futures32: futures>=3.2,<3.3 + gevent10: gevent>=1.0,<1.1 + gevent11: gevent>=1.1,<1.2 + gevent12: gevent>=1.2,<1.3 + gevent13: gevent>=1.3,<1.4 + gevent14: gevent>=1.4,<1.5 + grpc112: grpcio>=1.12.0,<1.13.0 + grpc113: grpcio>=1.13.0,<1.14.0 + grpc114: grpcio>=1.14.0,<1.15.0 + grpc115: grpcio>=1.15.0,<1.16.0 + grpc116: grpcio>=1.16.0,<1.17.0 + grpc117: grpcio>=1.17.0,<1.18.0 + grpc118: grpcio>=1.18.0,<1.19.0 + grpc119: grpcio>=1.19.0,<1.20.0 + grpc120: grpcio>=1.20.0,<1.21.0 + grpc121: grpcio>=1.21.0,<1.22.0 + grpc122: grpcio>=1.22.0,<1.23.0 + grpc112: googleapis-common-protos + grpc113: googleapis-common-protos + grpc114: googleapis-common-protos + grpc115: googleapis-common-protos + grpc116: googleapis-common-protos + grpc117: googleapis-common-protos + grpc118: googleapis-common-protos + grpc119: googleapis-common-protos + grpc120: googleapis-common-protos + grpc121: googleapis-common-protos + grpc122: googleapis-common-protos + jinja27: jinja2>=2.7,<2.8 + jinja28: jinja2>=2.8,<2.9 + jinja29: jinja2>=2.9,<2.10 + jinja210: jinja2>=2.10,<2.11 + mako100: mako>=1.0.0,<1.1.0 + mako010: mako>=0.1.0,<1.0.0 + memcached: python-memcached + molten070: molten>=0.7.0,<0.7.2 + molten072: molten>=0.7.2,<0.8.0 + mongoengine015: mongoengine>=0.15<0.16 + mongoengine016: mongoengine>=0.16<0.17 + mongoengine017: mongoengine>=0.17<0.18 + mongoengine018: mongoengine>=0.18<0.19 + mongoenginelatest: mongoengine>=0.18 + mysqlconnector: mysql-connector-python!=8.0.18 + mysqldb12: mysql-python>=1.2,<1.3 + mysqlclient13: mysqlclient>=1.3,<1.4 +# webob is required for Pylons < 1.0 + pylons096: pylons>=0.9.6,<0.9.7 + pylons096: webob<1.1 + pylons097: pylons>=0.9.7,<0.9.8 + pylons097: webob<1.1 + pylons010: pylons>=0.10,<0.11 + pylons010: webob<1.1 + pylons10: pylons>=1.0,<1.1 + pylibmc: pylibmc + pylibmc140: pylibmc>=1.4.0,<1.5.0 + pylibmc150: pylibmc>=1.5.0,<1.6.0 + pymemcache130: pymemcache>=1.3.0,<1.4.0 + pymemcache140: pymemcache>=1.4.0,<1.5.0 + pymongo30: pymongo>=3.0,<3.1 + pymongo31: pymongo>=3.1,<3.2 + pymongo32: pymongo>=3.2,<3.3 + pymongo33: pymongo>=3.3,<3.4 + pymongo34: pymongo>=3.4,<3.5 + pymongo36: pymongo>=3.6,<3.7 + pymongo37: pymongo>=3.7,<3.8 + pymongo38: pymongo>=3.8,<3.9 + pymongo39: pymongo>=3.9,<3.10 + pymongolatest: pymongo>=3.9 + pymysql07: pymysql>=0.7,<0.8 + pymysql08: pymysql>=0.8,<0.9 + pymysql09: pymysql>=0.9,<0.10 + pyramid17: pyramid>=1.7,<1.8 + pyramid18: pyramid>=1.8,<1.9 + pyramid19: pyramid>=1.9,<1.10 + psycopg224: psycopg2>=2.4,<2.5 + psycopg225: psycopg2>=2.5,<2.6 + psycopg226: psycopg2>=2.6,<2.7 + psycopg227: psycopg2>=2.7,<2.8 + psycopg228: psycopg2>=2.8,<2.9 + redis26: redis>=2.6,<2.7 + redis27: redis>=2.7,<2.8 + redis28: redis>=2.8,<2.9 + redis29: redis>=2.9,<2.10 + redis210: redis>=2.10,<2.11 + redis300: redis>=3.0.0,<3.1.0 + redis320: redis>=3.2.0,<3.3.0 + rediscluster135: redis-py-cluster>=1.3.5,<1.3.6 + rediscluster136: redis-py-cluster>=1.3.6,<1.3.7 + kombu44: kombu>=4.4,<4.5 + kombu43: kombu>=4.3,<4.4 + kombu42: kombu>=4.2,<4.3 + kombu41: kombu>=4.1,<4.2 + kombu40: kombu>=4.0,<4.1 + requests_contrib: requests-mock>=1.4 + requests200: requests>=2.0,<2.1 + requests208: requests>=2.8,<2.9 + requests209: requests>=2.9,<2.10 + requests210: requests>=2.10,<2.11 + requests211: requests>=2.11,<2.12 + requests212: requests>=2.12,<2.13 + requests213: requests>=2.13,<2.14 + requests218: requests>=2.18,<2.19 + requests219: requests>=2.19,<2.20 + sqlalchemy10: sqlalchemy>=1.0,<1.1 + sqlalchemy11: sqlalchemy>=1.1,<1.2 + sqlalchemy12: sqlalchemy>=1.2,<1.3 + tornado40: tornado>=4.0,<4.1 + tornado41: tornado>=4.1,<4.2 + tornado42: tornado>=4.2,<4.3 + tornado43: tornado>=4.3,<4.4 + tornado44: tornado>=4.4,<4.5 + tornado45: tornado>=4.5,<4.6 + tornado50: tornado>=5.0,<5.1 + tornado51: tornado>=5.1,<5.2 + tornado60: tornado>=6.0,<6.1 + vertica060: vertica-python>=0.6.0,<0.7.0 + vertica070: vertica-python>=0.7.0,<0.8.0 + webtest: WebTest + +# pass along test env variables +passenv=TEST_* + +commands = +# run only essential tests related to the tracing client + tracer: pytest {posargs} --ignore="tests/contrib" --ignore="tests/test_integration.py" --ignore="tests/commands" --ignore="tests/opentracer" --ignore="tests/unit" --ignore="tests/internal" tests +# run only the `ddtrace.internal` tests + internal: pytest {posargs} tests/internal +# run only the opentrace tests + opentracer: pytest {posargs} tests/opentracer/test_tracer.py tests/opentracer/test_span.py tests/opentracer/test_span_context.py tests/opentracer/test_dd_compatibility.py tests/opentracer/test_utils.py + opentracer_asyncio: pytest {posargs} tests/opentracer/test_tracer_asyncio.py + opentracer_tornado-tornado{40,41,42,43,44}: pytest {posargs} tests/opentracer/test_tracer_tornado.py + opentracer_gevent: pytest {posargs} tests/opentracer/test_tracer_gevent.py +# integration tests + integration: pytest {posargs} tests/test_integration.py +# Contribs + aiobotocore_contrib-{py34,py35,py36,py37}: pytest {posargs} tests/contrib/aiobotocore + aiopg_contrib-{py34,py35,py36,py37}: pytest {posargs} tests/contrib/aiopg + aiohttp_contrib: pytest {posargs} tests/contrib/aiohttp + algoliasearch_contrib: pytest {posargs} tests/contrib/algoliasearch + asyncio_contrib: pytest {posargs} tests/contrib/asyncio + boto_contrib: pytest {posargs} tests/contrib/boto + botocore_contrib: pytest {posargs} tests/contrib/botocore + bottle_contrib: pytest {posargs} --ignore="tests/contrib/bottle/test_autopatch.py" tests/contrib/bottle/ + bottle_contrib_autopatch: python tests/ddtrace_run.py pytest {posargs} tests/contrib/bottle/test_autopatch.py + cassandra_contrib: pytest {posargs} tests/contrib/cassandra + celery_contrib: pytest {posargs} tests/contrib/celery + consul_contrib: pytest {posargs} tests/contrib/consul + dbapi_contrib: pytest {posargs} tests/contrib/dbapi + django_contrib: pytest {posargs} tests/contrib/django + django_contrib_autopatch: python tests/ddtrace_run.py pytest {posargs} tests/contrib/django + django_drf_contrib: pytest {posargs} tests/contrib/djangorestframework + dogpile_contrib: pytest {posargs} tests/contrib/dogpile_cache + elasticsearch_contrib: pytest {posargs} tests/contrib/elasticsearch + falcon_contrib: pytest {posargs} tests/contrib/falcon/test_middleware.py tests/contrib/falcon/test_distributed_tracing.py + falcon_contrib_autopatch: python tests/ddtrace_run.py pytest {posargs} tests/contrib/falcon/test_autopatch.py + flask_contrib: pytest {posargs} tests/contrib/flask + flask_contrib_autopatch: python tests/ddtrace_run.py pytest {posargs} tests/contrib/flask_autopatch + flask_cache_contrib: pytest {posargs} tests/contrib/flask_cache + futures_contrib: pytest {posargs} tests/contrib/futures + gevent_contrib: pytest {posargs} tests/contrib/gevent + grpc_contrib: pytest {posargs} tests/contrib/grpc + httplib_contrib: pytest {posargs} tests/contrib/httplib + jinja2_contrib: pytest {posargs} tests/contrib/jinja2 + mako_contrib: pytest {posargs} tests/contrib/mako + molten_contrib: pytest {posargs} tests/contrib/molten + mongoengine_contrib: pytest {posargs} tests/contrib/mongoengine + mysql_contrib: pytest {posargs} tests/contrib/mysql + mysqldb_contrib: pytest {posargs} tests/contrib/mysqldb + psycopg_contrib: pytest {posargs} tests/contrib/psycopg + pylibmc_contrib: pytest {posargs} tests/contrib/pylibmc + pylons_contrib: pytest {posargs} tests/contrib/pylons + pymemcache_contrib: pytest {posargs} --ignore="tests/contrib/pymemcache/autopatch" tests/contrib/pymemcache/ + pymemcache_contrib_autopatch: python tests/ddtrace_run.py pytest {posargs} tests/contrib/pymemcache/autopatch/ + pymongo_contrib: pytest {posargs} tests/contrib/pymongo + pymysql_contrib: pytest {posargs} tests/contrib/pymysql + pyramid_contrib: pytest {posargs} tests/contrib/pyramid/test_pyramid.py + pyramid_contrib_autopatch: python tests/ddtrace_run.py pytest {posargs} tests/contrib/pyramid/test_pyramid_autopatch.py + redis_contrib: pytest {posargs} tests/contrib/redis + rediscluster_contrib: pytest {posargs} tests/contrib/rediscluster + requests_contrib: pytest {posargs} tests/contrib/requests + requests_gevent_contrib: pytest {posargs} tests/contrib/requests_gevent + kombu_contrib: pytest {posargs} tests/contrib/kombu + sqlalchemy_contrib: pytest {posargs} tests/contrib/sqlalchemy + sqlite3_contrib: pytest {posargs} tests/contrib/sqlite3 + tornado_contrib: pytest {posargs} tests/contrib/tornado + vertica_contrib: pytest {posargs} tests/contrib/vertica/ +# run subsets of the tests for particular library versions + ddtracerun: pytest {posargs} tests/commands/test_runner.py + test_utils: pytest {posargs} tests/contrib/test_utils.py + test_logging: pytest {posargs} tests/contrib/logging/ +# Unit tests: pytest based test suite that do not require any additional dependency. + unit_tests: pytest {posargs} tests/unit + benchmarks: pytest --benchmark-only {posargs} tests/benchmark.py + +[testenv:wait] +commands=python tests/wait-for-services.py {posargs} +basepython=python +deps= + cassandra-driver + psycopg2 + mysql-connector-python!=8.0.18 + redis-py-cluster>=1.3.6,<1.4.0 + vertica-python>=0.6.0,<0.7.0 + kombu>=4.2.0,<4.3.0 + + +# this is somewhat flaky (can fail and still be up) so try the tests anyway +ignore_outcome=true + +[testenv:black] +deps=black +commands=black --check . +basepython=python3.7 + +[testenv:flake8] +deps= + flake8>=3.7,<=3.8 + flake8-blind-except + flake8-builtins + flake8-docstrings + flake8-logging-format + flake8-rst-docstrings + # needed for some features from flake8-rst-docstrings + pygments +commands=flake8 . +basepython=python3.7 + +# do not use develop mode with celery as running multiple python versions within +# same job will cause problem for tests that use ddtrace-run +[celery_contrib] +usedevelop = False +[testenv:celery_contrib-py27-celery31-redis210] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py34-celery31-redis210] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py35-celery31-redis210] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py36-celery31-redis210] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py27-celery40-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py27-celery40-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py27-celery41-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py27-celery41-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py34-celery40-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py34-celery40-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py34-celery41-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py34-celery41-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py35-celery40-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py35-celery40-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py35-celery41-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py35-celery41-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py36-celery40-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py36-celery40-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py36-celery41-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py36-celery41-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py27-celery42-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py34-celery42-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py35-celery42-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py36-celery42-redis210-kombu43] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py27-celery43-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py34-celery43-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py35-celery43-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py36-celery43-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} +[testenv:celery_contrib-py37-celery43-redis320-kombu44] +usedevelop = {[celery_contrib]usedevelop} + +[falcon_autopatch] +setenv = + DATADOG_SERVICE_NAME=my-falcon +[testenv:falcon_contrib_autopatch-py27-falcon10] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py27-falcon11] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py27-falcon12] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py27-falcon13] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py27-falcon14] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py34-falcon10] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py34-falcon11] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py34-falcon12] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py34-falcon13] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py34-falcon14] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py35-falcon10] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py35-falcon11] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py35-falcon12] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py35-falcon13] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py35-falcon14] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py36-falcon10] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py36-falcon11] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py36-falcon12] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py36-falcon13] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py36-falcon14] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py37-falcon10] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py37-falcon11] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py37-falcon12] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py37-falcon13] +setenv = + {[falcon_autopatch]setenv} +[testenv:falcon_contrib_autopatch-py37-falcon14] +setenv = + {[falcon_autopatch]setenv} + + +[pyramid_autopatch] +setenv = + DATADOG_SERVICE_NAME = foobar + DATADOG_PYRAMID_DISTRIBUTED_TRACING = True +[testenv:pyramid_contrib_autopatch-py27-pyramid17-webtest] +setenv = + {[pyramid_autopatch]setenv} + +[testenv:pyramid_contrib_autopatch-py27-pyramid18-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py27-pyramid19-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py34-pyramid17-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py34-pyramid18-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py34-pyramid19-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py35-pyramid17-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py35-pyramid18-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py35-pyramid19-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py36-pyramid17-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py36-pyramid18-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py36-pyramid19-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py37-pyramid17-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py37-pyramid18-webtest] +setenv = + {[pyramid_autopatch]setenv} +[testenv:pyramid_contrib_autopatch-py37-pyramid19-webtest] +setenv = + {[pyramid_autopatch]setenv} + + +[flask_autopatch] +setenv = + DATADOG_SERVICE_NAME = test.flask.service + DATADOG_PATCH_MODULES = jinja2:false +[testenv:flask_contrib_autopatch-py27-flask010-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask011-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask012-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask10-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py34-flask010-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py34-flask011-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py34-flask012-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py34-flask10-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py35-flask010-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py35-flask011-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py35-flask012-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py35-flask10-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py36-flask010-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py36-flask011-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py36-flask012-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py36-flask10-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py37-flask010-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py37-flask011-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py37-flask012-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py37-flask10-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask010-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask011-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask012-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py34-flask010-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py34-flask011-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py34-flask012-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py35-flask010-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py35-flask011-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py35-flask012-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py36-flask010-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py36-flask011-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py36-flask012-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py37-flask010-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py37-flask011-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py37-flask012-flaskcache013-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask010-flaskcache012-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} +[testenv:flask_contrib_autopatch-py27-flask011-flaskcache012-memcached-redis210-blinker] +setenv = + {[flask_autopatch]setenv} + + +[bottle_autopatch] +setenv = + DATADOG_SERVICE_NAME = bottle-app +[testenv:bottle_contrib_autopatch-py27-bottle11-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py34-bottle11-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py35-bottle11-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py36-bottle11-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py37-bottle11-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py27-bottle12-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py34-bottle12-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py35-bottle12-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py36-bottle12-webtest] +setenv = + {[bottle_autopatch]setenv} +[testenv:bottle_contrib_autopatch-py37-bottle12-webtest] +setenv = + {[bottle_autopatch]setenv} + + +# DEV: We use `conftest.py` as a local pytest plugin to configure hooks for collection +[pytest] +# Common directories to ignore +addopts = --ignore "tests/utils" --ignore "tests/base" +# DEV: The default is `test_*\.py` which will miss `test.py` files +python_files = test*\.py + +[flake8] +max-line-length=120 +exclude= + .ddtox,.tox, + .git,__pycache__, + .eggs,*.egg, + build, + # We shouldn't lint our vendored dependencies + ddtrace/vendor/ +# Ignore: +# A003: XXX is a python builtin, consider renaming the class attribute +# G201 Logging: .exception(...) should be used instead of .error(..., exc_info=True) +# E231,W503: not respected by black +# We ignore most of the D errors because there are too many; the goal is to fix them eventually +ignore = W503,E231,A003,G201,D100,D101,D102,D103,D104,D105,D106,D107,D200,D202,D204,D205,D208,D210,D300,D400,D401,D403,D413 +enable-extensions=G +rst-roles = class,meth,obj,ref +rst-directives = py:data