From aec82080fc3eb232df29b38ddb90ac18305dcc6c Mon Sep 17 00:00:00 2001 From: nelsonic Date: Fri, 9 Sep 2022 22:01:02 +0100 Subject: [PATCH] mix phx.new app --- .formatter.exs | 5 + _build/dev/lib/app/.mix/compile.lock | 0 assets/css/app.css | 120 + assets/css/phoenix.css | 101 + assets/js/app.js | 45 + assets/vendor/topbar.js | 157 + buildit.md | 27 + config/config.exs | 40 + config/dev.exs | 74 + config/prod.exs | 49 + config/runtime.exs | 65 + config/test.exs | 27 + deps/castore/.fetch | 0 deps/castore/.hex | Bin 0 -> 272 bytes deps/castore/README.md | 48 + deps/castore/hex_metadata.config | 12 + deps/castore/lib/castore.ex | 24 + deps/castore/mix.exs | 48 + deps/castore/priv/cacerts.pem | 3460 +++++++ deps/connection/.fetch | 0 deps/connection/.hex | Bin 0 -> 274 bytes deps/connection/LICENSE | 202 + deps/connection/README.md | 64 + deps/connection/hex_metadata.config | 12 + deps/connection/lib/connection.ex | 829 ++ deps/connection/mix.exs | 52 + deps/cowboy/.fetch | 0 deps/cowboy/.hex | Bin 0 -> 280 bytes deps/cowboy/LICENSE | 13 + deps/cowboy/Makefile | 124 + deps/cowboy/README.asciidoc | 38 + deps/cowboy/ebin/cowboy.app | 9 + deps/cowboy/erlang.mk | 8156 +++++++++++++++++ deps/cowboy/hex_metadata.config | 36 + deps/cowboy/plugins.mk | 75 + deps/cowboy/rebar.config | 4 + deps/cowboy/src/cowboy.erl | 105 + deps/cowboy/src/cowboy_app.erl | 27 + deps/cowboy/src/cowboy_bstr.erl | 123 + deps/cowboy/src/cowboy_children.erl | 192 + deps/cowboy/src/cowboy_clear.erl | 60 + deps/cowboy/src/cowboy_clock.erl | 221 + deps/cowboy/src/cowboy_compress_h.erl | 249 + deps/cowboy/src/cowboy_constraints.erl | 174 + deps/cowboy/src/cowboy_handler.erl | 57 + deps/cowboy/src/cowboy_http.erl | 1523 +++ deps/cowboy/src/cowboy_http2.erl | 1220 +++ deps/cowboy/src/cowboy_loop.erl | 108 + deps/cowboy/src/cowboy_metrics_h.erl | 331 + deps/cowboy/src/cowboy_middleware.erl | 24 + deps/cowboy/src/cowboy_req.erl | 1016 ++ deps/cowboy/src/cowboy_rest.erl | 1637 ++++ deps/cowboy/src/cowboy_router.erl | 603 ++ deps/cowboy/src/cowboy_static.erl | 418 + deps/cowboy/src/cowboy_stream.erl | 193 + deps/cowboy/src/cowboy_stream_h.erl | 324 + deps/cowboy/src/cowboy_sub_protocol.erl | 24 + deps/cowboy/src/cowboy_sup.erl | 30 + deps/cowboy/src/cowboy_tls.erl | 56 + deps/cowboy/src/cowboy_tracer_h.erl | 192 + deps/cowboy/src/cowboy_websocket.erl | 707 ++ deps/cowboy_telemetry/.fetch | 0 deps/cowboy_telemetry/.hex | Bin 0 -> 283 bytes deps/cowboy_telemetry/LICENSE | 191 + deps/cowboy_telemetry/README.md | 70 + deps/cowboy_telemetry/hex_metadata.config | 20 + deps/cowboy_telemetry/rebar.config | 6 + deps/cowboy_telemetry/rebar.lock | 17 + .../src/cowboy_telemetry.app.src | 10 + .../src/cowboy_telemetry_h.erl | 92 + deps/cowlib/.fetch | 0 deps/cowlib/.hex | Bin 0 -> 281 bytes deps/cowlib/LICENSE | 13 + deps/cowlib/Makefile | 113 + deps/cowlib/README.asciidoc | 18 + deps/cowlib/ebin/cowlib.app | 8 + deps/cowlib/erlang.mk | 8109 ++++++++++++++++ deps/cowlib/hex_metadata.config | 25 + deps/cowlib/include/cow_inline.hrl | 447 + deps/cowlib/include/cow_parse.hrl | 83 + deps/cowlib/src/cow_base64url.erl | 81 + deps/cowlib/src/cow_cookie.erl | 428 + deps/cowlib/src/cow_date.erl | 434 + deps/cowlib/src/cow_hpack.erl | 1449 +++ .../src/cow_hpack_dec_huffman_lookup.hrl | 4132 +++++++++ deps/cowlib/src/cow_http.erl | 426 + deps/cowlib/src/cow_http2.erl | 483 + deps/cowlib/src/cow_http2_machine.erl | 1647 ++++ deps/cowlib/src/cow_http_hd.erl | 3622 ++++++++ deps/cowlib/src/cow_http_struct_hd.erl | 420 + deps/cowlib/src/cow_http_te.erl | 373 + deps/cowlib/src/cow_iolists.erl | 95 + deps/cowlib/src/cow_link.erl | 445 + deps/cowlib/src/cow_mimetypes.erl | 1045 +++ deps/cowlib/src/cow_mimetypes.erl.src | 61 + deps/cowlib/src/cow_multipart.erl | 775 ++ deps/cowlib/src/cow_qs.erl | 563 ++ deps/cowlib/src/cow_spdy.erl | 313 + deps/cowlib/src/cow_spdy.hrl | 181 + deps/cowlib/src/cow_sse.erl | 348 + deps/cowlib/src/cow_uri.erl | 339 + deps/cowlib/src/cow_uri_template.erl | 356 + deps/cowlib/src/cow_ws.erl | 741 ++ deps/db_connection/.fetch | 0 deps/db_connection/.formatter.exs | 4 + deps/db_connection/.hex | Bin 0 -> 277 bytes deps/db_connection/CHANGELOG.md | 64 + deps/db_connection/README.md | 100 + deps/db_connection/hex_metadata.config | 35 + deps/db_connection/lib/db_connection.ex | 1804 ++++ deps/db_connection/lib/db_connection/app.ex | 22 + .../lib/db_connection/backoff.ex | 96 + .../lib/db_connection/connection.ex | 451 + .../lib/db_connection/connection_pool.ex | 353 + .../lib/db_connection/connection_pool/pool.ex | 29 + .../db_connection/lib/db_connection/holder.ex | 441 + .../lib/db_connection/log_entry.ex | 81 + .../lib/db_connection/ownership.ex | 139 + .../lib/db_connection/ownership/manager.ex | 336 + .../lib/db_connection/ownership/proxy.ex | 306 + deps/db_connection/lib/db_connection/query.ex | 57 + deps/db_connection/lib/db_connection/task.ex | 47 + .../lib/db_connection/watcher.ex | 61 + deps/db_connection/mix.exs | 85 + deps/decimal/.fetch | 0 deps/decimal/.formatter.exs | 7 + deps/decimal/.hex | Bin 0 -> 271 bytes deps/decimal/CHANGELOG.md | 116 + deps/decimal/LICENSE.txt | 176 + deps/decimal/README.md | 174 + deps/decimal/hex_metadata.config | 14 + deps/decimal/lib/decimal.ex | 1785 ++++ deps/decimal/lib/decimal/context.ex | 125 + deps/decimal/lib/decimal/error.ex | 24 + deps/decimal/lib/decimal/macros.ex | 11 + deps/decimal/mix.exs | 42 + deps/ecto/.fetch | 0 deps/ecto/.formatter.exs | 31 + deps/ecto/.hex | Bin 0 -> 268 bytes deps/ecto/CHANGELOG.md | 735 ++ deps/ecto/README.md | 205 + deps/ecto/hex_metadata.config | 71 + deps/ecto/integration_test/cases/assoc.exs | 865 ++ deps/ecto/integration_test/cases/interval.exs | 419 + deps/ecto/integration_test/cases/joins.exs | 592 ++ deps/ecto/integration_test/cases/preload.exs | 714 ++ deps/ecto/integration_test/cases/repo.exs | 2038 ++++ deps/ecto/integration_test/cases/type.exs | 527 ++ deps/ecto/integration_test/cases/windows.exs | 53 + .../ecto/integration_test/support/schemas.exs | 345 + deps/ecto/integration_test/support/types.exs | 53 + deps/ecto/lib/ecto.ex | 699 ++ deps/ecto/lib/ecto/adapter.ex | 139 + deps/ecto/lib/ecto/adapter/queryable.ex | 126 + deps/ecto/lib/ecto/adapter/schema.ex | 92 + deps/ecto/lib/ecto/adapter/storage.ex | 53 + deps/ecto/lib/ecto/adapter/transaction.ex | 31 + deps/ecto/lib/ecto/application.ex | 13 + deps/ecto/lib/ecto/association.ex | 1440 +++ deps/ecto/lib/ecto/changeset.ex | 3173 +++++++ deps/ecto/lib/ecto/changeset/relation.ex | 565 ++ deps/ecto/lib/ecto/embedded.ex | 286 + deps/ecto/lib/ecto/enum.ex | 198 + deps/ecto/lib/ecto/exceptions.ex | 307 + deps/ecto/lib/ecto/json.ex | 42 + deps/ecto/lib/ecto/multi.ex | 877 ++ deps/ecto/lib/ecto/parameterized_type.ex | 197 + deps/ecto/lib/ecto/query.ex | 2152 +++++ deps/ecto/lib/ecto/query/api.ex | 689 ++ deps/ecto/lib/ecto/query/builder.ex | 1250 +++ .../lib/ecto/query/builder/combination.ex | 36 + deps/ecto/lib/ecto/query/builder/cte.ex | 87 + deps/ecto/lib/ecto/query/builder/distinct.ex | 81 + deps/ecto/lib/ecto/query/builder/dynamic.ex | 76 + deps/ecto/lib/ecto/query/builder/filter.ex | 196 + deps/ecto/lib/ecto/query/builder/from.ex | 185 + deps/ecto/lib/ecto/query/builder/group_by.ex | 117 + deps/ecto/lib/ecto/query/builder/join.ex | 318 + .../lib/ecto/query/builder/limit_offset.ex | 56 + deps/ecto/lib/ecto/query/builder/lock.ex | 59 + deps/ecto/lib/ecto/query/builder/order_by.ex | 209 + deps/ecto/lib/ecto/query/builder/preload.ex | 159 + deps/ecto/lib/ecto/query/builder/select.ex | 396 + deps/ecto/lib/ecto/query/builder/update.ex | 200 + deps/ecto/lib/ecto/query/builder/windows.ex | 201 + deps/ecto/lib/ecto/query/inspect.ex | 423 + deps/ecto/lib/ecto/query/planner.ex | 1884 ++++ deps/ecto/lib/ecto/query/window_api.ex | 232 + deps/ecto/lib/ecto/queryable.ex | 51 + deps/ecto/lib/ecto/repo.ex | 1875 ++++ deps/ecto/lib/ecto/repo/assoc.ex | 120 + deps/ecto/lib/ecto/repo/preloader.ex | 548 ++ deps/ecto/lib/ecto/repo/queryable.ex | 558 ++ deps/ecto/lib/ecto/repo/registry.ex | 51 + deps/ecto/lib/ecto/repo/schema.ex | 1012 ++ deps/ecto/lib/ecto/repo/supervisor.ex | 214 + deps/ecto/lib/ecto/repo/transaction.ex | 34 + deps/ecto/lib/ecto/schema.ex | 2351 +++++ deps/ecto/lib/ecto/schema/loader.ex | 106 + deps/ecto/lib/ecto/schema/metadata.ex | 66 + deps/ecto/lib/ecto/type.ex | 1383 +++ deps/ecto/lib/ecto/uuid.ex | 223 + deps/ecto/lib/mix/ecto.ex | 157 + deps/ecto/lib/mix/tasks/ecto.create.ex | 69 + deps/ecto/lib/mix/tasks/ecto.drop.ex | 96 + deps/ecto/lib/mix/tasks/ecto.ex | 30 + deps/ecto/lib/mix/tasks/ecto.gen.repo.ex | 110 + deps/ecto/mix.exs | 143 + deps/ecto_sql/.fetch | 0 deps/ecto_sql/.formatter.exs | 35 + deps/ecto_sql/.hex | Bin 0 -> 272 bytes deps/ecto_sql/CHANGELOG.md | 396 + deps/ecto_sql/README.md | 69 + deps/ecto_sql/hex_metadata.config | 75 + deps/ecto_sql/integration_test/sql/alter.exs | 90 + deps/ecto_sql/integration_test/sql/lock.exs | 59 + .../ecto_sql/integration_test/sql/logging.exs | 135 + .../integration_test/sql/migration.exs | 625 ++ .../integration_test/sql/migrator.exs | 242 + .../integration_test/sql/query_many.exs | 15 + .../ecto_sql/integration_test/sql/sandbox.exs | 316 + deps/ecto_sql/integration_test/sql/sql.exs | 159 + deps/ecto_sql/integration_test/sql/stream.exs | 44 + .../integration_test/sql/subquery.exs | 114 + .../integration_test/sql/transaction.exs | 277 + .../integration_test/support/file_helpers.exs | 43 + .../integration_test/support/migration.exs | 132 + .../integration_test/support/repo.exs | 23 + deps/ecto_sql/lib/ecto/adapter/migration.ex | 62 + deps/ecto_sql/lib/ecto/adapter/structure.ex | 41 + deps/ecto_sql/lib/ecto/adapters/mysql.ex | 23 + deps/ecto_sql/lib/ecto/adapters/myxql.ex | 431 + .../lib/ecto/adapters/myxql/connection.ex | 1137 +++ deps/ecto_sql/lib/ecto/adapters/postgres.ex | 376 + .../lib/ecto/adapters/postgres/connection.ex | 1393 +++ deps/ecto_sql/lib/ecto/adapters/sql.ex | 1262 +++ .../lib/ecto/adapters/sql/application.ex | 14 + .../lib/ecto/adapters/sql/connection.ex | 135 + .../ecto_sql/lib/ecto/adapters/sql/sandbox.ex | 628 ++ deps/ecto_sql/lib/ecto/adapters/sql/stream.ex | 43 + deps/ecto_sql/lib/ecto/adapters/tds.ex | 298 + .../lib/ecto/adapters/tds/connection.ex | 1780 ++++ deps/ecto_sql/lib/ecto/adapters/tds/types.ex | 305 + deps/ecto_sql/lib/ecto/migration.ex | 1368 +++ deps/ecto_sql/lib/ecto/migration/runner.ex | 413 + .../lib/ecto/migration/schema_migration.ex | 64 + deps/ecto_sql/lib/ecto/migrator.ex | 776 ++ deps/ecto_sql/lib/mix/ecto_sql.ex | 45 + deps/ecto_sql/lib/mix/tasks/ecto.dump.ex | 79 + .../lib/mix/tasks/ecto.gen.migration.ex | 121 + deps/ecto_sql/lib/mix/tasks/ecto.load.ex | 121 + deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex | 149 + .../ecto_sql/lib/mix/tasks/ecto.migrations.ex | 97 + deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex | 145 + deps/ecto_sql/mix.exs | 204 + deps/esbuild/.fetch | 0 deps/esbuild/.formatter.exs | 4 + deps/esbuild/.hex | Bin 0 -> 271 bytes deps/esbuild/CHANGELOG.md | 73 + deps/esbuild/LICENSE.md | 22 + deps/esbuild/README.md | 163 + deps/esbuild/hex_metadata.config | 21 + deps/esbuild/lib/esbuild.ex | 309 + deps/esbuild/lib/mix/tasks/esbuild.ex | 58 + deps/esbuild/lib/mix/tasks/esbuild.install.ex | 56 + deps/esbuild/mix.exs | 49 + deps/file_system/.fetch | 0 deps/file_system/.hex | Bin 0 -> 276 bytes deps/file_system/README.md | 104 + deps/file_system/c_src/mac/cli.c | 180 + deps/file_system/c_src/mac/cli.h | 36 + deps/file_system/c_src/mac/common.h | 55 + deps/file_system/c_src/mac/compat.c | 25 + deps/file_system/c_src/mac/compat.h | 47 + deps/file_system/c_src/mac/main.c | 234 + deps/file_system/hex_metadata.config | 20 + deps/file_system/lib/file_system.ex | 52 + deps/file_system/lib/file_system/backend.ex | 70 + .../lib/file_system/backends/fs_inotify.ex | 169 + .../lib/file_system/backends/fs_mac.ex | 180 + .../lib/file_system/backends/fs_poll.ex | 106 + .../lib/file_system/backends/fs_windows.ex | 160 + deps/file_system/lib/file_system/worker.ex | 50 + deps/file_system/mix.exs | 75 + deps/file_system/priv/inotifywait.exe | Bin 0 -> 14848 bytes deps/floki/.fetch | 0 deps/floki/.hex | Bin 0 -> 270 bytes deps/floki/CHANGELOG.md | 679 ++ deps/floki/CODE_OF_CONDUCT.md | 54 + deps/floki/CONTRIBUTING.md | 41 + deps/floki/LICENSE | 21 + deps/floki/README.md | 273 + deps/floki/hex_metadata.config | 40 + deps/floki/lib/floki.ex | 696 ++ deps/floki/lib/floki/deep_text.ex | 30 + deps/floki/lib/floki/entities.ex | 2242 +++++ deps/floki/lib/floki/filter_out.ex | 45 + deps/floki/lib/floki/finder.ex | 196 + deps/floki/lib/floki/flat_text.ex | 41 + deps/floki/lib/floki/html/numeric_charref.ex | 108 + deps/floki/lib/floki/html/tokenizer.ex | 2868 ++++++ deps/floki/lib/floki/html_parser.ex | 38 + deps/floki/lib/floki/html_parser/fast_html.ex | 30 + deps/floki/lib/floki/html_parser/html5ever.ex | 26 + deps/floki/lib/floki/html_parser/mochiweb.ex | 18 + deps/floki/lib/floki/html_tree.ex | 292 + deps/floki/lib/floki/html_tree/comment.ex | 13 + deps/floki/lib/floki/html_tree/html_node.ex | 13 + deps/floki/lib/floki/html_tree/id_seeder.ex | 6 + deps/floki/lib/floki/html_tree/text.ex | 12 + deps/floki/lib/floki/parse_error.ex | 3 + deps/floki/lib/floki/raw_html.ex | 217 + deps/floki/lib/floki/selector.ex | 229 + .../lib/floki/selector/attribute_selector.ex | 140 + deps/floki/lib/floki/selector/combinator.ex | 44 + deps/floki/lib/floki/selector/functional.ex | 53 + deps/floki/lib/floki/selector/parser.ex | 262 + deps/floki/lib/floki/selector/pseudo_class.ex | 182 + deps/floki/lib/floki/selector/tokenizer.ex | 16 + deps/floki/lib/floki/traversal.ex | 32 + deps/floki/mix.exs | 132 + deps/floki/src/floki.gleam | 13 + deps/floki/src/floki_mochi_html.erl | 872 ++ deps/floki/src/floki_selector_lexer.xrl | 52 + deps/html_entities/.fetch | 0 deps/html_entities/.hex | Bin 0 -> 277 bytes deps/html_entities/LICENSE | 7 + deps/html_entities/README.md | 53 + deps/html_entities/hex_metadata.config | 14 + deps/html_entities/lib/html_entities.ex | 89 + deps/html_entities/lib/html_entities/util.ex | 23 + deps/html_entities/lib/html_entities_list.txt | 253 + deps/html_entities/mix.exs | 53 + deps/jason/.fetch | 0 deps/jason/.hex | Bin 0 -> 269 bytes deps/jason/CHANGELOG.md | 107 + deps/jason/LICENSE | 13 + deps/jason/README.md | 141 + deps/jason/hex_metadata.config | 21 + deps/jason/lib/codegen.ex | 138 + deps/jason/lib/decoder.ex | 693 ++ deps/jason/lib/encode.ex | 643 ++ deps/jason/lib/encoder.ex | 236 + deps/jason/lib/formatter.ex | 255 + deps/jason/lib/fragment.ex | 11 + deps/jason/lib/helpers.ex | 98 + deps/jason/lib/jason.ex | 242 + deps/jason/lib/ordered_object.ex | 94 + deps/jason/lib/sigil.ex | 84 + deps/jason/mix.exs | 76 + deps/mime/.fetch | 0 deps/mime/.formatter.exs | 3 + deps/mime/.hex | Bin 0 -> 268 bytes deps/mime/CHANGELOG.md | 33 + deps/mime/LICENSE | 13 + deps/mime/README.md | 32 + deps/mime/hex_metadata.config | 12 + deps/mime/lib/mime.ex | 264 + deps/mime/mix.exs | 42 + deps/phoenix/.fetch | 0 deps/phoenix/.formatter.exs | 80 + deps/phoenix/.hex | Bin 0 -> 272 bytes deps/phoenix/CHANGELOG.md | 163 + deps/phoenix/LICENSE.md | 22 + deps/phoenix/README.md | 94 + deps/phoenix/assets/js/phoenix/ajax.js | 83 + deps/phoenix/assets/js/phoenix/channel.js | 311 + deps/phoenix/assets/js/phoenix/constants.js | 29 + deps/phoenix/assets/js/phoenix/index.js | 207 + deps/phoenix/assets/js/phoenix/longpoll.js | 135 + deps/phoenix/assets/js/phoenix/presence.js | 162 + deps/phoenix/assets/js/phoenix/push.js | 128 + deps/phoenix/assets/js/phoenix/serializer.js | 112 + deps/phoenix/assets/js/phoenix/socket.js | 560 ++ deps/phoenix/assets/js/phoenix/timer.js | 42 + deps/phoenix/assets/js/phoenix/utils.js | 9 + deps/phoenix/hex_metadata.config | 202 + deps/phoenix/lib/mix/phoenix.ex | 362 + deps/phoenix/lib/mix/phoenix/context.ex | 101 + deps/phoenix/lib/mix/phoenix/schema.ex | 540 ++ deps/phoenix/lib/mix/tasks/compile.phoenix.ex | 63 + .../phoenix/lib/mix/tasks/phx.digest.clean.ex | 66 + deps/phoenix/lib/mix/tasks/phx.digest.ex | 70 + deps/phoenix/lib/mix/tasks/phx.ex | 40 + deps/phoenix/lib/mix/tasks/phx.gen.auth.ex | 684 ++ .../mix/tasks/phx.gen.auth/hashing_library.ex | 48 + .../lib/mix/tasks/phx.gen.auth/injector.ex | 262 + .../lib/mix/tasks/phx.gen.auth/migration.ex | 33 + deps/phoenix/lib/mix/tasks/phx.gen.cert.ex | 312 + deps/phoenix/lib/mix/tasks/phx.gen.channel.ex | 111 + deps/phoenix/lib/mix/tasks/phx.gen.context.ex | 362 + .../phoenix/lib/mix/tasks/phx.gen.embedded.ex | 107 + deps/phoenix/lib/mix/tasks/phx.gen.ex | 26 + deps/phoenix/lib/mix/tasks/phx.gen.html.ex | 204 + deps/phoenix/lib/mix/tasks/phx.gen.json.ex | 160 + deps/phoenix/lib/mix/tasks/phx.gen.live.ex | 233 + .../phoenix/lib/mix/tasks/phx.gen.notifier.ex | 214 + .../phoenix/lib/mix/tasks/phx.gen.presence.ex | 61 + deps/phoenix/lib/mix/tasks/phx.gen.release.ex | 207 + deps/phoenix/lib/mix/tasks/phx.gen.schema.ex | 219 + deps/phoenix/lib/mix/tasks/phx.gen.secret.ex | 36 + deps/phoenix/lib/mix/tasks/phx.gen.socket.ex | 116 + deps/phoenix/lib/mix/tasks/phx.routes.ex | 99 + deps/phoenix/lib/mix/tasks/phx.server.ex | 55 + deps/phoenix/lib/phoenix.ex | 136 + deps/phoenix/lib/phoenix/channel.ex | 661 ++ deps/phoenix/lib/phoenix/channel/server.ex | 546 ++ deps/phoenix/lib/phoenix/code_reloader.ex | 288 + .../lib/phoenix/code_reloader/proxy.ex | 52 + .../lib/phoenix/code_reloader/server.ex | 292 + deps/phoenix/lib/phoenix/config.ex | 166 + deps/phoenix/lib/phoenix/controller.ex | 1591 ++++ .../lib/phoenix/controller/pipeline.ex | 220 + deps/phoenix/lib/phoenix/digester.ex | 378 + .../lib/phoenix/digester/compressor.ex | 44 + deps/phoenix/lib/phoenix/digester/gzip.ex | 18 + deps/phoenix/lib/phoenix/endpoint.ex | 936 ++ .../lib/phoenix/endpoint/cowboy2_adapter.ex | 132 + .../lib/phoenix/endpoint/cowboy2_handler.ex | 188 + .../lib/phoenix/endpoint/render_errors.ex | 149 + .../lib/phoenix/endpoint/supervisor.ex | 417 + deps/phoenix/lib/phoenix/endpoint/watcher.ex | 60 + deps/phoenix/lib/phoenix/exceptions.ex | 70 + deps/phoenix/lib/phoenix/logger.ex | 377 + deps/phoenix/lib/phoenix/naming.ex | 132 + deps/phoenix/lib/phoenix/param.ex | 127 + deps/phoenix/lib/phoenix/presence.ex | 424 + deps/phoenix/lib/phoenix/router.ex | 1065 +++ .../lib/phoenix/router/console_formatter.ex | 101 + deps/phoenix/lib/phoenix/router/helpers.ex | 429 + deps/phoenix/lib/phoenix/router/resource.ex | 84 + deps/phoenix/lib/phoenix/router/route.ex | 183 + deps/phoenix/lib/phoenix/router/scope.ex | 213 + deps/phoenix/lib/phoenix/socket.ex | 736 ++ deps/phoenix/lib/phoenix/socket/message.ex | 72 + .../lib/phoenix/socket/pool_supervisor.ex | 59 + deps/phoenix/lib/phoenix/socket/serializer.ex | 29 + .../socket/serializers/v1_json_serializer.ex | 41 + .../socket/serializers/v2_json_serializer.ex | 158 + deps/phoenix/lib/phoenix/socket/transport.ex | 634 ++ deps/phoenix/lib/phoenix/test/channel_test.ex | 635 ++ deps/phoenix/lib/phoenix/test/conn_test.ex | 701 ++ deps/phoenix/lib/phoenix/token.ex | 246 + .../lib/phoenix/transports/long_poll.ex | 210 + .../phoenix/transports/long_poll_server.ex | 145 + .../lib/phoenix/transports/websocket.ex | 48 + deps/phoenix/mix.exs | 220 + deps/phoenix/package.json | 26 + deps/phoenix/priv/static/favicon.ico | Bin 0 -> 1258 bytes deps/phoenix/priv/static/phoenix.cjs.js | 1144 +++ deps/phoenix/priv/static/phoenix.cjs.js.map | 7 + deps/phoenix/priv/static/phoenix.js | 1145 +++ deps/phoenix/priv/static/phoenix.min.js | 1 + deps/phoenix/priv/static/phoenix.mjs | 1122 +++ deps/phoenix/priv/static/phoenix.mjs.map | 7 + deps/phoenix/priv/static/phoenix.png | Bin 0 -> 13900 bytes .../templates/phx.gen.auth/_menu.html.heex | 10 + .../priv/templates/phx.gen.auth/auth.ex | 149 + .../priv/templates/phx.gen.auth/auth_test.exs | 170 + .../phx.gen.auth/confirmation_controller.ex | 56 + .../confirmation_controller_test.exs | 105 + .../phx.gen.auth/confirmation_edit.html.heex | 12 + .../phx.gen.auth/confirmation_new.html.heex | 15 + .../phx.gen.auth/confirmation_view.ex | 3 + .../priv/templates/phx.gen.auth/conn_case.exs | 26 + .../context_fixtures_functions.ex | 24 + .../phx.gen.auth/context_functions.ex | 344 + .../priv/templates/phx.gen.auth/migration.ex | 29 + .../priv/templates/phx.gen.auth/notifier.ex | 79 + .../phx.gen.auth/registration_controller.ex | 30 + .../registration_controller_test.exs | 54 + .../phx.gen.auth/registration_new.html.heex | 26 + .../phx.gen.auth/registration_view.ex | 3 + .../phx.gen.auth/reset_password_controller.ex | 58 + .../reset_password_controller_test.exs | 113 + .../reset_password_edit.html.heex | 26 + .../phx.gen.auth/reset_password_new.html.heex | 15 + .../phx.gen.auth/reset_password_view.ex | 3 + .../priv/templates/phx.gen.auth/routes.ex | 33 + .../priv/templates/phx.gen.auth/schema.ex | 141 + .../templates/phx.gen.auth/schema_token.ex | 181 + .../phx.gen.auth/session_controller.ex | 27 + .../phx.gen.auth/session_controller_test.exs | 98 + .../phx.gen.auth/session_new.html.heex | 27 + .../templates/phx.gen.auth/session_view.ex | 3 + .../phx.gen.auth/settings_controller.ex | 74 + .../phx.gen.auth/settings_controller_test.exs | 129 + .../phx.gen.auth/settings_edit.html.heex | 53 + .../templates/phx.gen.auth/settings_view.ex | 3 + .../templates/phx.gen.auth/test_cases.exs | 502 + .../priv/templates/phx.gen.channel/channel.ex | 32 + .../templates/phx.gen.channel/channel_case.ex | 39 + .../phx.gen.channel/channel_test.exs | 27 + .../phx.gen.context/access_no_schema.ex | 89 + .../priv/templates/phx.gen.context/context.ex | 8 + .../phx.gen.context/context_test.exs | 5 + .../templates/phx.gen.context/fixtures.ex | 17 + .../phx.gen.context/fixtures_module.ex | 6 + .../phx.gen.context/schema_access.ex | 96 + .../templates/phx.gen.context/test_cases.exs | 54 + .../phx.gen.embedded/embedded_schema.ex | 16 + .../priv/templates/phx.gen.html/controller.ex | 62 + .../phx.gen.html/controller_test.exs | 85 + .../templates/phx.gen.html/edit.html.heex | 5 + .../templates/phx.gen.html/form.html.heex | 15 + .../templates/phx.gen.html/index.html.heex | 26 + .../priv/templates/phx.gen.html/new.html.heex | 5 + .../templates/phx.gen.html/show.html.heex | 13 + .../priv/templates/phx.gen.html/view.ex | 3 + .../templates/phx.gen.json/changeset_view.ex | 19 + .../priv/templates/phx.gen.json/controller.ex | 43 + .../phx.gen.json/controller_test.exs | 84 + .../phx.gen.json/fallback_controller.ex | 24 + .../priv/templates/phx.gen.json/view.ex | 18 + .../templates/phx.gen.live/form_component.ex | 55 + .../phx.gen.live/form_component.html.heex | 20 + .../priv/templates/phx.gen.live/index.ex | 46 + .../templates/phx.gen.live/index.html.heex | 39 + .../templates/phx.gen.live/live_helpers.ex | 60 + .../priv/templates/phx.gen.live/live_test.exs | 110 + .../priv/templates/phx.gen.live/show.ex | 21 + .../templates/phx.gen.live/show.html.heex | 26 + .../templates/phx.gen.notifier/notifier.ex | 14 + .../phx.gen.notifier/notifier_test.exs | 18 + .../templates/phx.gen.presence/presence.ex | 10 + .../templates/phx.gen.release/Dockerfile.eex | 92 + .../phx.gen.release/dockerignore.eex | 45 + .../phx.gen.release/rel/migrate.bat.eex | 1 + .../phx.gen.release/rel/migrate.sh.eex | 3 + .../phx.gen.release/rel/server.bat.eex | 2 + .../phx.gen.release/rel/server.sh.eex | 3 + .../priv/templates/phx.gen.release/release.ex | 28 + .../templates/phx.gen.schema/migration.exs | 15 + .../priv/templates/phx.gen.schema/schema.ex | 22 + .../priv/templates/phx.gen.socket/socket.ex | 54 + .../priv/templates/phx.gen.socket/socket.js | 64 + deps/phoenix_ecto/.fetch | 0 deps/phoenix_ecto/.formatter.exs | 4 + deps/phoenix_ecto/.hex | Bin 0 -> 276 bytes deps/phoenix_ecto/CHANGELOG.md | 190 + deps/phoenix_ecto/LICENSE | 20 + deps/phoenix_ecto/README.md | 128 + deps/phoenix_ecto/hex_metadata.config | 33 + deps/phoenix_ecto/lib/phoenix_ecto.ex | 17 + .../lib/phoenix_ecto/check_repo_status.ex | 71 + .../lib/phoenix_ecto/exceptions.ex | 17 + deps/phoenix_ecto/lib/phoenix_ecto/html.ex | 331 + deps/phoenix_ecto/lib/phoenix_ecto/plug.ex | 59 + .../lib/phoenix_ecto/sql/sandbox.ex | 306 + .../lib/phoenix_ecto/sql/sandbox_session.ex | 43 + deps/phoenix_ecto/mix.exs | 59 + deps/phoenix_html/.fetch | 0 deps/phoenix_html/.hex | Bin 0 -> 276 bytes deps/phoenix_html/CHANGELOG.md | 379 + deps/phoenix_html/LICENSE | 20 + deps/phoenix_html/README.md | 34 + deps/phoenix_html/hex_metadata.config | 24 + deps/phoenix_html/lib/phoenix_html.ex | 339 + deps/phoenix_html/lib/phoenix_html/engine.ex | 198 + deps/phoenix_html/lib/phoenix_html/form.ex | 1772 ++++ .../lib/phoenix_html/form_data.ex | 166 + deps/phoenix_html/lib/phoenix_html/format.ex | 81 + deps/phoenix_html/lib/phoenix_html/link.ex | 272 + deps/phoenix_html/lib/phoenix_html/safe.ex | 100 + deps/phoenix_html/lib/phoenix_html/tag.ex | 307 + deps/phoenix_html/mix.exs | 46 + deps/phoenix_html/package.json | 8 + deps/phoenix_html/priv/static/phoenix_html.js | 78 + deps/phoenix_live_reload/.fetch | 0 deps/phoenix_live_reload/.formatter.exs | 4 + deps/phoenix_live_reload/.hex | Bin 0 -> 283 bytes deps/phoenix_live_reload/CHANGELOG.md | 141 + deps/phoenix_live_reload/README.md | 74 + deps/phoenix_live_reload/hex_metadata.config | 29 + .../lib/phoenix_live_reload/application.ex | 41 + .../lib/phoenix_live_reload/channel.ex | 40 + .../lib/phoenix_live_reload/live_reloader.ex | 197 + .../lib/phoenix_live_reload/socket.ex | 12 + deps/phoenix_live_reload/mix.exs | 49 + .../priv/static/phoenix_live_reload.js | 58 + deps/phoenix_live_view/.fetch | 0 deps/phoenix_live_view/.hex | Bin 0 -> 283 bytes deps/phoenix_live_view/CHANGELOG.md | 1111 +++ deps/phoenix_live_view/LICENSE.md | 22 + deps/phoenix_live_view/README.md | 181 + .../assets/js/phoenix_live_view/browser.js | 72 + .../assets/js/phoenix_live_view/constants.js | 80 + .../assets/js/phoenix_live_view/dom.js | 428 + .../assets/js/phoenix_live_view/dom_patch.js | 278 + .../dom_post_morph_restorer.js | 65 + .../js/phoenix_live_view/entry_uploader.js | 54 + .../assets/js/phoenix_live_view/hooks.js | 47 + .../assets/js/phoenix_live_view/index.js | 13 + .../assets/js/phoenix_live_view/js.js | 201 + .../js/phoenix_live_view/live_socket.js | 862 ++ .../js/phoenix_live_view/live_uploader.js | 130 + .../assets/js/phoenix_live_view/rendered.js | 245 + .../js/phoenix_live_view/upload_entry.js | 113 + .../assets/js/phoenix_live_view/utils.js | 63 + .../assets/js/phoenix_live_view/view.js | 1104 +++ .../assets/js/phoenix_live_view/view_hook.js | 66 + deps/phoenix_live_view/hex_metadata.config | 84 + .../lib/phoenix_component.ex | 295 + .../lib/phoenix_live_component.ex | 470 + .../lib/phoenix_live_view.ex | 1733 ++++ .../lib/phoenix_live_view/application.ex | 16 + .../lib/phoenix_live_view/channel.ex | 1295 +++ .../lib/phoenix_live_view/controller.ex | 65 + .../lib/phoenix_live_view/diff.ex | 774 ++ .../lib/phoenix_live_view/engine.ex | 1164 +++ .../lib/phoenix_live_view/helpers.ex | 1165 +++ .../lib/phoenix_live_view/html_algebra.ex | 474 + .../lib/phoenix_live_view/html_engine.ex | 835 ++ .../lib/phoenix_live_view/html_formatter.ex | 565 ++ .../lib/phoenix_live_view/html_tokenizer.ex | 626 ++ .../lib/phoenix_live_view/js.ex | 647 ++ .../lib/phoenix_live_view/lifecycle.ex | 214 + .../lib/phoenix_live_view/plug.ex | 42 + .../lib/phoenix_live_view/renderer.ex | 79 + .../lib/phoenix_live_view/route.ex | 100 + .../lib/phoenix_live_view/router.ex | 450 + .../lib/phoenix_live_view/session.ex | 97 + .../lib/phoenix_live_view/socket.ex | 87 + .../lib/phoenix_live_view/static.ex | 366 + .../phoenix_live_view/test/client_proxy.ex | 1280 +++ .../lib/phoenix_live_view/test/dom.ex | 455 + .../phoenix_live_view/test/live_view_test.ex | 1755 ++++ .../lib/phoenix_live_view/test/structs.ex | 108 + .../phoenix_live_view/test/upload_client.ex | 191 + .../lib/phoenix_live_view/upload.ex | 407 + .../lib/phoenix_live_view/upload_channel.ex | 186 + .../lib/phoenix_live_view/upload_config.ex | 699 ++ .../lib/phoenix_live_view/utils.ex | 506 + deps/phoenix_live_view/mix.exs | 151 + deps/phoenix_live_view/package.json | 26 + .../priv/static/phoenix_live_view.cjs.js | 3911 ++++++++ .../priv/static/phoenix_live_view.cjs.js.map | 7 + .../priv/static/phoenix_live_view.esm.js | 3901 ++++++++ .../priv/static/phoenix_live_view.esm.js.map | 7 + .../priv/static/phoenix_live_view.js | 3929 ++++++++ .../priv/static/phoenix_live_view.js.map | 7 + .../priv/static/phoenix_live_view.min.js | 17 + deps/phoenix_pubsub/.fetch | 0 deps/phoenix_pubsub/.hex | Bin 0 -> 278 bytes deps/phoenix_pubsub/CHANGELOG.md | 21 + deps/phoenix_pubsub/LICENSE.md | 22 + deps/phoenix_pubsub/README.md | 56 + deps/phoenix_pubsub/hex_metadata.config | 21 + deps/phoenix_pubsub/lib/phoenix/pubsub.ex | 295 + .../lib/phoenix/pubsub/adapter.ex | 46 + .../lib/phoenix/pubsub/application.ex | 19 + deps/phoenix_pubsub/lib/phoenix/pubsub/pg2.ex | 128 + .../lib/phoenix/pubsub/supervisor.ex | 42 + deps/phoenix_pubsub/lib/phoenix/tracker.ex | 325 + .../lib/phoenix/tracker/clock.ex | 104 + .../lib/phoenix/tracker/delta_generation.ex | 69 + .../lib/phoenix/tracker/replica.ex | 85 + .../lib/phoenix/tracker/shard.ex | 553 ++ .../lib/phoenix/tracker/state.ex | 583 ++ deps/phoenix_pubsub/mix.exs | 53 + .../test/shared/pubsub_test.exs | 177 + deps/phoenix_view/.fetch | 0 deps/phoenix_view/.formatter.exs | 4 + deps/phoenix_view/.hex | Bin 0 -> 276 bytes deps/phoenix_view/CHANGELOG.md | 18 + deps/phoenix_view/LICENSE.md | 22 + deps/phoenix_view/README.md | 18 + deps/phoenix_view/hex_metadata.config | 22 + deps/phoenix_view/lib/phoenix/template.ex | 436 + .../lib/phoenix/template/eex_engine.ex | 34 + .../lib/phoenix/template/engine.ex | 15 + .../lib/phoenix/template/exs_engine.ex | 13 + deps/phoenix_view/lib/phoenix/view.ex | 522 ++ deps/phoenix_view/mix.exs | 49 + deps/plug/.fetch | 0 deps/plug/.formatter.exs | 30 + deps/plug/.hex | Bin 0 -> 269 bytes deps/plug/CHANGELOG.md | 257 + deps/plug/LICENSE | 13 + deps/plug/README.md | 259 + deps/plug/hex_metadata.config | 51 + deps/plug/lib/plug.ex | 174 + deps/plug/lib/plug/adapters/cowboy.ex | 52 + deps/plug/lib/plug/adapters/cowboy2.ex | 54 + deps/plug/lib/plug/adapters/test/conn.ex | 213 + deps/plug/lib/plug/application.ex | 16 + deps/plug/lib/plug/basic_auth.ex | 162 + deps/plug/lib/plug/builder.ex | 419 + deps/plug/lib/plug/conn.ex | 1806 ++++ deps/plug/lib/plug/conn/adapter.ex | 145 + deps/plug/lib/plug/conn/cookies.ex | 139 + deps/plug/lib/plug/conn/query.ex | 280 + deps/plug/lib/plug/conn/status.ex | 175 + deps/plug/lib/plug/conn/unfetched.ex | 47 + deps/plug/lib/plug/conn/utils.ex | 319 + deps/plug/lib/plug/conn/wrapper_error.ex | 41 + deps/plug/lib/plug/csrf_protection.ex | 449 + deps/plug/lib/plug/debugger.ex | 513 ++ deps/plug/lib/plug/error_handler.ex | 122 + deps/plug/lib/plug/exceptions.ex | 70 + deps/plug/lib/plug/head.ex | 20 + deps/plug/lib/plug/html.ex | 81 + deps/plug/lib/plug/logger.ex | 56 + deps/plug/lib/plug/method_override.ex | 50 + deps/plug/lib/plug/mime.ex | 67 + deps/plug/lib/plug/parsers.ex | 393 + deps/plug/lib/plug/parsers/json.ex | 111 + deps/plug/lib/plug/parsers/multipart.ex | 328 + deps/plug/lib/plug/parsers/urlencoded.ex | 57 + deps/plug/lib/plug/request_id.ex | 78 + deps/plug/lib/plug/rewrite_on.ex | 87 + deps/plug/lib/plug/router.ex | 638 ++ deps/plug/lib/plug/router/utils.ex | 285 + deps/plug/lib/plug/session.ex | 140 + deps/plug/lib/plug/session/cookie.ex | 247 + deps/plug/lib/plug/session/ets.ex | 98 + deps/plug/lib/plug/session/store.ex | 71 + deps/plug/lib/plug/ssl.ex | 392 + deps/plug/lib/plug/static.ex | 444 + deps/plug/lib/plug/telemetry.ex | 89 + .../plug/lib/plug/templates/debugger.html.eex | 909 ++ deps/plug/lib/plug/templates/debugger.md.eex | 24 + deps/plug/lib/plug/test.ex | 250 + deps/plug/lib/plug/upload.ex | 274 + deps/plug/mix.exs | 117 + deps/plug/src/plug_multipart.erl | 471 + deps/plug_cowboy/.fetch | 0 deps/plug_cowboy/.formatter.exs | 3 + deps/plug_cowboy/.hex | Bin 0 -> 275 bytes deps/plug_cowboy/CHANGELOG.md | 103 + deps/plug_cowboy/LICENSE | 13 + deps/plug_cowboy/README.md | 64 + deps/plug_cowboy/hex_metadata.config | 31 + deps/plug_cowboy/lib/plug/cowboy.ex | 430 + deps/plug_cowboy/lib/plug/cowboy/conn.ex | 151 + deps/plug_cowboy/lib/plug/cowboy/drainer.ex | 114 + deps/plug_cowboy/lib/plug/cowboy/handler.ex | 58 + .../plug_cowboy/lib/plug/cowboy/translator.ex | 89 + deps/plug_cowboy/mix.exs | 59 + deps/plug_crypto/.fetch | 0 deps/plug_crypto/.hex | Bin 0 -> 275 bytes deps/plug_crypto/CHANGELOG.md | 35 + deps/plug_crypto/LICENSE | 13 + deps/plug_crypto/README.md | 30 + deps/plug_crypto/hex_metadata.config | 17 + deps/plug_crypto/lib/plug/crypto.ex | 365 + .../lib/plug/crypto/application.ex | 15 + .../lib/plug/crypto/key_generator.ex | 101 + .../lib/plug/crypto/message_encryptor.ex | 223 + .../lib/plug/crypto/message_verifier.ex | 103 + deps/plug_crypto/mix.exs | 52 + deps/postgrex/.fetch | 0 deps/postgrex/.formatter.exs | 3 + deps/postgrex/.hex | Bin 0 -> 273 bytes deps/postgrex/CHANGELOG.md | 463 + deps/postgrex/README.md | 184 + deps/postgrex/hex_metadata.config | 81 + deps/postgrex/lib/postgrex.ex | 680 ++ deps/postgrex/lib/postgrex/app.ex | 15 + .../postgrex/lib/postgrex/binary_extension.ex | 17 + deps/postgrex/lib/postgrex/binary_utils.ex | 47 + deps/postgrex/lib/postgrex/builtins.ex | 238 + deps/postgrex/lib/postgrex/default_types.ex | 7 + deps/postgrex/lib/postgrex/errcodes.txt | 465 + deps/postgrex/lib/postgrex/error.ex | 60 + deps/postgrex/lib/postgrex/error_code.ex | 82 + deps/postgrex/lib/postgrex/extension.ex | 138 + .../postgrex/lib/postgrex/extensions/array.ex | 132 + .../lib/postgrex/extensions/bit_string.ex | 47 + deps/postgrex/lib/postgrex/extensions/bool.ex | 25 + deps/postgrex/lib/postgrex/extensions/box.ex | 29 + .../lib/postgrex/extensions/circle.ex | 23 + deps/postgrex/lib/postgrex/extensions/date.ex | 55 + .../lib/postgrex/extensions/float4.ex | 33 + .../lib/postgrex/extensions/float8.ex | 33 + .../lib/postgrex/extensions/hstore.ex | 101 + deps/postgrex/lib/postgrex/extensions/inet.ex | 37 + deps/postgrex/lib/postgrex/extensions/int2.ex | 25 + deps/postgrex/lib/postgrex/extensions/int4.ex | 25 + deps/postgrex/lib/postgrex/extensions/int8.ex | 25 + .../lib/postgrex/extensions/interval.ex | 25 + deps/postgrex/lib/postgrex/extensions/json.ex | 55 + .../postgrex/lib/postgrex/extensions/jsonb.ex | 49 + deps/postgrex/lib/postgrex/extensions/line.ex | 24 + .../lib/postgrex/extensions/line_segment.ex | 29 + .../lib/postgrex/extensions/macaddr.ex | 22 + deps/postgrex/lib/postgrex/extensions/name.ex | 30 + .../lib/postgrex/extensions/numeric.ex | 143 + deps/postgrex/lib/postgrex/extensions/oid.ex | 36 + deps/postgrex/lib/postgrex/extensions/path.ex | 47 + .../postgrex/lib/postgrex/extensions/point.ex | 30 + .../lib/postgrex/extensions/polygon.ex | 46 + .../postgrex/lib/postgrex/extensions/range.ex | 125 + deps/postgrex/lib/postgrex/extensions/raw.ex | 38 + .../lib/postgrex/extensions/record.ex | 45 + deps/postgrex/lib/postgrex/extensions/tid.ex | 22 + deps/postgrex/lib/postgrex/extensions/time.ex | 39 + .../lib/postgrex/extensions/timestamp.ex | 94 + .../lib/postgrex/extensions/timestamptz.ex | 80 + .../lib/postgrex/extensions/timetz.ex | 60 + .../lib/postgrex/extensions/tsvector.ex | 80 + deps/postgrex/lib/postgrex/extensions/uuid.ex | 29 + .../lib/postgrex/extensions/void_binary.ex | 21 + .../lib/postgrex/extensions/void_text.ex | 27 + deps/postgrex/lib/postgrex/extensions/xid8.ex | 25 + deps/postgrex/lib/postgrex/messages.ex | 452 + deps/postgrex/lib/postgrex/notifications.ex | 303 + deps/postgrex/lib/postgrex/parameters.ex | 78 + deps/postgrex/lib/postgrex/protocol.ex | 3515 +++++++ deps/postgrex/lib/postgrex/query.ex | 119 + .../lib/postgrex/replication_connection.ex | 574 ++ deps/postgrex/lib/postgrex/result.ex | 38 + deps/postgrex/lib/postgrex/scram.ex | 74 + .../lib/postgrex/simple_connection.ex | 405 + deps/postgrex/lib/postgrex/stream.ex | 130 + deps/postgrex/lib/postgrex/super_extension.ex | 23 + deps/postgrex/lib/postgrex/type_info.ex | 38 + deps/postgrex/lib/postgrex/type_module.ex | 949 ++ deps/postgrex/lib/postgrex/type_server.ex | 169 + deps/postgrex/lib/postgrex/type_supervisor.ex | 45 + deps/postgrex/lib/postgrex/types.ex | 356 + deps/postgrex/lib/postgrex/utils.ex | 172 + deps/postgrex/mix.exs | 87 + deps/ranch/.fetch | 0 deps/ranch/.hex | Bin 0 -> 279 bytes deps/ranch/LICENSE | 13 + deps/ranch/Makefile | 85 + deps/ranch/README.asciidoc | 38 + deps/ranch/ebin/ranch.app | 9 + deps/ranch/erlang.mk | 8156 +++++++++++++++++ deps/ranch/hex_metadata.config | 22 + deps/ranch/src/ranch.erl | 504 + deps/ranch/src/ranch_acceptor.erl | 64 + deps/ranch/src/ranch_acceptors_sup.erl | 71 + deps/ranch/src/ranch_app.erl | 44 + deps/ranch/src/ranch_conns_sup.erl | 325 + deps/ranch/src/ranch_crc32c.erl | 115 + deps/ranch/src/ranch_listener_sup.erl | 41 + deps/ranch/src/ranch_protocol.erl | 24 + deps/ranch/src/ranch_proxy_header.erl | 880 ++ deps/ranch/src/ranch_server.erl | 233 + deps/ranch/src/ranch_ssl.erl | 243 + deps/ranch/src/ranch_sup.erl | 40 + deps/ranch/src/ranch_tcp.erl | 245 + deps/ranch/src/ranch_transport.erl | 151 + deps/telemetry/.fetch | 0 deps/telemetry/.hex | Bin 0 -> 276 bytes deps/telemetry/CHANGELOG.md | 101 + deps/telemetry/LICENSE | 177 + deps/telemetry/NOTICE | 13 + deps/telemetry/README.md | 260 + deps/telemetry/hex_metadata.config | 15 + deps/telemetry/mix.exs | 20 + deps/telemetry/rebar.config | 35 + deps/telemetry/rebar.lock | 1 + deps/telemetry/src/telemetry.app.src | 13 + deps/telemetry/src/telemetry.erl | 404 + deps/telemetry/src/telemetry.hrl | 22 + deps/telemetry/src/telemetry_app.erl | 13 + .../telemetry/src/telemetry_handler_table.erl | 113 + deps/telemetry/src/telemetry_sup.erl | 24 + deps/telemetry_metrics/.fetch | 0 deps/telemetry_metrics/.formatter.exs | 4 + deps/telemetry_metrics/.hex | Bin 0 -> 281 bytes deps/telemetry_metrics/CHANGELOG.md | 117 + deps/telemetry_metrics/LICENSE | 177 + deps/telemetry_metrics/README.md | 16 + deps/telemetry_metrics/hex_metadata.config | 25 + .../lib/telemetry_metrics.ex | 801 ++ .../lib/telemetry_metrics/console_reporter.ex | 163 + .../lib/telemetry_metrics/counter.ex | 31 + .../lib/telemetry_metrics/distribution.ex | 31 + .../lib/telemetry_metrics/last_value.ex | 31 + .../lib/telemetry_metrics/sum.ex | 31 + .../lib/telemetry_metrics/summary.ex | 31 + deps/telemetry_metrics/mix.exs | 82 + deps/telemetry_poller/.fetch | 0 deps/telemetry_poller/.hex | Bin 0 -> 283 bytes deps/telemetry_poller/CHANGELOG.md | 114 + deps/telemetry_poller/LICENSE | 177 + deps/telemetry_poller/NOTICE | 13 + deps/telemetry_poller/README.md | 82 + deps/telemetry_poller/hex_metadata.config | 19 + deps/telemetry_poller/rebar.config | 26 + deps/telemetry_poller/rebar.lock | 8 + .../src/telemetry_poller.app.src | 12 + .../telemetry_poller/src/telemetry_poller.erl | 348 + .../src/telemetry_poller_app.erl | 24 + .../src/telemetry_poller_builtin.erl | 67 + .../src/telemetry_poller_sup.erl | 18 + lib/app.ex | 9 + lib/app/application.ex | 36 + lib/app/repo.ex | 5 + lib/app_web.ex | 107 + lib/app_web/controllers/page_controller.ex | 7 + lib/app_web/endpoint.ex | 46 + lib/app_web/router.ex | 27 + lib/app_web/telemetry.ex | 71 + lib/app_web/templates/layout/app.html.heex | 5 + lib/app_web/templates/layout/live.html.heex | 11 + lib/app_web/templates/layout/root.html.heex | 28 + lib/app_web/templates/page/index.html.heex | 41 + lib/app_web/views/error_helpers.ex | 30 + lib/app_web/views/error_view.ex | 16 + lib/app_web/views/layout_view.ex | 7 + lib/app_web/views/page_view.ex | 3 + mix.exs | 67 + mix.lock | 32 + priv/repo/migrations/.formatter.exs | 4 + priv/repo/seeds.exs | 11 + priv/static/favicon.ico | Bin 0 -> 1258 bytes priv/static/images/phoenix.png | Bin 0 -> 13900 bytes priv/static/robots.txt | 5 + .../controllers/page_controller_test.exs | 8 + test/app_web/views/error_view_test.exs | 14 + test/app_web/views/layout_view_test.exs | 8 + test/app_web/views/page_view_test.exs | 3 + test/support/conn_case.ex | 38 + test/support/data_case.ex | 58 + test/test_helper.exs | 2 + 920 files changed, 235138 insertions(+) create mode 100644 .formatter.exs create mode 100644 _build/dev/lib/app/.mix/compile.lock create mode 100644 assets/css/app.css create mode 100644 assets/css/phoenix.css create mode 100644 assets/js/app.js create mode 100644 assets/vendor/topbar.js create mode 100644 config/config.exs create mode 100644 config/dev.exs create mode 100644 config/prod.exs create mode 100644 config/runtime.exs create mode 100644 config/test.exs create mode 100644 deps/castore/.fetch create mode 100644 deps/castore/.hex create mode 100644 deps/castore/README.md create mode 100644 deps/castore/hex_metadata.config create mode 100644 deps/castore/lib/castore.ex create mode 100644 deps/castore/mix.exs create mode 100644 deps/castore/priv/cacerts.pem create mode 100644 deps/connection/.fetch create mode 100644 deps/connection/.hex create mode 100644 deps/connection/LICENSE create mode 100644 deps/connection/README.md create mode 100644 deps/connection/hex_metadata.config create mode 100644 deps/connection/lib/connection.ex create mode 100644 deps/connection/mix.exs create mode 100644 deps/cowboy/.fetch create mode 100644 deps/cowboy/.hex create mode 100644 deps/cowboy/LICENSE create mode 100644 deps/cowboy/Makefile create mode 100644 deps/cowboy/README.asciidoc create mode 100644 deps/cowboy/ebin/cowboy.app create mode 100644 deps/cowboy/erlang.mk create mode 100644 deps/cowboy/hex_metadata.config create mode 100644 deps/cowboy/plugins.mk create mode 100644 deps/cowboy/rebar.config create mode 100644 deps/cowboy/src/cowboy.erl create mode 100644 deps/cowboy/src/cowboy_app.erl create mode 100644 deps/cowboy/src/cowboy_bstr.erl create mode 100644 deps/cowboy/src/cowboy_children.erl create mode 100644 deps/cowboy/src/cowboy_clear.erl create mode 100644 deps/cowboy/src/cowboy_clock.erl create mode 100644 deps/cowboy/src/cowboy_compress_h.erl create mode 100644 deps/cowboy/src/cowboy_constraints.erl create mode 100644 deps/cowboy/src/cowboy_handler.erl create mode 100644 deps/cowboy/src/cowboy_http.erl create mode 100644 deps/cowboy/src/cowboy_http2.erl create mode 100644 deps/cowboy/src/cowboy_loop.erl create mode 100644 deps/cowboy/src/cowboy_metrics_h.erl create mode 100644 deps/cowboy/src/cowboy_middleware.erl create mode 100644 deps/cowboy/src/cowboy_req.erl create mode 100644 deps/cowboy/src/cowboy_rest.erl create mode 100644 deps/cowboy/src/cowboy_router.erl create mode 100644 deps/cowboy/src/cowboy_static.erl create mode 100644 deps/cowboy/src/cowboy_stream.erl create mode 100644 deps/cowboy/src/cowboy_stream_h.erl create mode 100644 deps/cowboy/src/cowboy_sub_protocol.erl create mode 100644 deps/cowboy/src/cowboy_sup.erl create mode 100644 deps/cowboy/src/cowboy_tls.erl create mode 100644 deps/cowboy/src/cowboy_tracer_h.erl create mode 100644 deps/cowboy/src/cowboy_websocket.erl create mode 100644 deps/cowboy_telemetry/.fetch create mode 100644 deps/cowboy_telemetry/.hex create mode 100644 deps/cowboy_telemetry/LICENSE create mode 100644 deps/cowboy_telemetry/README.md create mode 100644 deps/cowboy_telemetry/hex_metadata.config create mode 100644 deps/cowboy_telemetry/rebar.config create mode 100644 deps/cowboy_telemetry/rebar.lock create mode 100644 deps/cowboy_telemetry/src/cowboy_telemetry.app.src create mode 100644 deps/cowboy_telemetry/src/cowboy_telemetry_h.erl create mode 100644 deps/cowlib/.fetch create mode 100644 deps/cowlib/.hex create mode 100644 deps/cowlib/LICENSE create mode 100644 deps/cowlib/Makefile create mode 100644 deps/cowlib/README.asciidoc create mode 100644 deps/cowlib/ebin/cowlib.app create mode 100644 deps/cowlib/erlang.mk create mode 100644 deps/cowlib/hex_metadata.config create mode 100644 deps/cowlib/include/cow_inline.hrl create mode 100644 deps/cowlib/include/cow_parse.hrl create mode 100644 deps/cowlib/src/cow_base64url.erl create mode 100644 deps/cowlib/src/cow_cookie.erl create mode 100644 deps/cowlib/src/cow_date.erl create mode 100644 deps/cowlib/src/cow_hpack.erl create mode 100644 deps/cowlib/src/cow_hpack_dec_huffman_lookup.hrl create mode 100644 deps/cowlib/src/cow_http.erl create mode 100644 deps/cowlib/src/cow_http2.erl create mode 100644 deps/cowlib/src/cow_http2_machine.erl create mode 100644 deps/cowlib/src/cow_http_hd.erl create mode 100644 deps/cowlib/src/cow_http_struct_hd.erl create mode 100644 deps/cowlib/src/cow_http_te.erl create mode 100644 deps/cowlib/src/cow_iolists.erl create mode 100644 deps/cowlib/src/cow_link.erl create mode 100644 deps/cowlib/src/cow_mimetypes.erl create mode 100644 deps/cowlib/src/cow_mimetypes.erl.src create mode 100644 deps/cowlib/src/cow_multipart.erl create mode 100644 deps/cowlib/src/cow_qs.erl create mode 100644 deps/cowlib/src/cow_spdy.erl create mode 100644 deps/cowlib/src/cow_spdy.hrl create mode 100644 deps/cowlib/src/cow_sse.erl create mode 100644 deps/cowlib/src/cow_uri.erl create mode 100644 deps/cowlib/src/cow_uri_template.erl create mode 100644 deps/cowlib/src/cow_ws.erl create mode 100644 deps/db_connection/.fetch create mode 100644 deps/db_connection/.formatter.exs create mode 100644 deps/db_connection/.hex create mode 100644 deps/db_connection/CHANGELOG.md create mode 100644 deps/db_connection/README.md create mode 100644 deps/db_connection/hex_metadata.config create mode 100644 deps/db_connection/lib/db_connection.ex create mode 100644 deps/db_connection/lib/db_connection/app.ex create mode 100644 deps/db_connection/lib/db_connection/backoff.ex create mode 100644 deps/db_connection/lib/db_connection/connection.ex create mode 100644 deps/db_connection/lib/db_connection/connection_pool.ex create mode 100644 deps/db_connection/lib/db_connection/connection_pool/pool.ex create mode 100644 deps/db_connection/lib/db_connection/holder.ex create mode 100644 deps/db_connection/lib/db_connection/log_entry.ex create mode 100644 deps/db_connection/lib/db_connection/ownership.ex create mode 100644 deps/db_connection/lib/db_connection/ownership/manager.ex create mode 100644 deps/db_connection/lib/db_connection/ownership/proxy.ex create mode 100644 deps/db_connection/lib/db_connection/query.ex create mode 100644 deps/db_connection/lib/db_connection/task.ex create mode 100644 deps/db_connection/lib/db_connection/watcher.ex create mode 100644 deps/db_connection/mix.exs create mode 100644 deps/decimal/.fetch create mode 100644 deps/decimal/.formatter.exs create mode 100644 deps/decimal/.hex create mode 100644 deps/decimal/CHANGELOG.md create mode 100644 deps/decimal/LICENSE.txt create mode 100644 deps/decimal/README.md create mode 100644 deps/decimal/hex_metadata.config create mode 100644 deps/decimal/lib/decimal.ex create mode 100644 deps/decimal/lib/decimal/context.ex create mode 100644 deps/decimal/lib/decimal/error.ex create mode 100644 deps/decimal/lib/decimal/macros.ex create mode 100644 deps/decimal/mix.exs create mode 100644 deps/ecto/.fetch create mode 100644 deps/ecto/.formatter.exs create mode 100644 deps/ecto/.hex create mode 100644 deps/ecto/CHANGELOG.md create mode 100644 deps/ecto/README.md create mode 100644 deps/ecto/hex_metadata.config create mode 100644 deps/ecto/integration_test/cases/assoc.exs create mode 100644 deps/ecto/integration_test/cases/interval.exs create mode 100644 deps/ecto/integration_test/cases/joins.exs create mode 100644 deps/ecto/integration_test/cases/preload.exs create mode 100644 deps/ecto/integration_test/cases/repo.exs create mode 100644 deps/ecto/integration_test/cases/type.exs create mode 100644 deps/ecto/integration_test/cases/windows.exs create mode 100644 deps/ecto/integration_test/support/schemas.exs create mode 100644 deps/ecto/integration_test/support/types.exs create mode 100644 deps/ecto/lib/ecto.ex create mode 100644 deps/ecto/lib/ecto/adapter.ex create mode 100644 deps/ecto/lib/ecto/adapter/queryable.ex create mode 100644 deps/ecto/lib/ecto/adapter/schema.ex create mode 100644 deps/ecto/lib/ecto/adapter/storage.ex create mode 100644 deps/ecto/lib/ecto/adapter/transaction.ex create mode 100644 deps/ecto/lib/ecto/application.ex create mode 100644 deps/ecto/lib/ecto/association.ex create mode 100644 deps/ecto/lib/ecto/changeset.ex create mode 100644 deps/ecto/lib/ecto/changeset/relation.ex create mode 100644 deps/ecto/lib/ecto/embedded.ex create mode 100644 deps/ecto/lib/ecto/enum.ex create mode 100644 deps/ecto/lib/ecto/exceptions.ex create mode 100644 deps/ecto/lib/ecto/json.ex create mode 100644 deps/ecto/lib/ecto/multi.ex create mode 100644 deps/ecto/lib/ecto/parameterized_type.ex create mode 100644 deps/ecto/lib/ecto/query.ex create mode 100644 deps/ecto/lib/ecto/query/api.ex create mode 100644 deps/ecto/lib/ecto/query/builder.ex create mode 100644 deps/ecto/lib/ecto/query/builder/combination.ex create mode 100644 deps/ecto/lib/ecto/query/builder/cte.ex create mode 100644 deps/ecto/lib/ecto/query/builder/distinct.ex create mode 100644 deps/ecto/lib/ecto/query/builder/dynamic.ex create mode 100644 deps/ecto/lib/ecto/query/builder/filter.ex create mode 100644 deps/ecto/lib/ecto/query/builder/from.ex create mode 100644 deps/ecto/lib/ecto/query/builder/group_by.ex create mode 100644 deps/ecto/lib/ecto/query/builder/join.ex create mode 100644 deps/ecto/lib/ecto/query/builder/limit_offset.ex create mode 100644 deps/ecto/lib/ecto/query/builder/lock.ex create mode 100644 deps/ecto/lib/ecto/query/builder/order_by.ex create mode 100644 deps/ecto/lib/ecto/query/builder/preload.ex create mode 100644 deps/ecto/lib/ecto/query/builder/select.ex create mode 100644 deps/ecto/lib/ecto/query/builder/update.ex create mode 100644 deps/ecto/lib/ecto/query/builder/windows.ex create mode 100644 deps/ecto/lib/ecto/query/inspect.ex create mode 100644 deps/ecto/lib/ecto/query/planner.ex create mode 100644 deps/ecto/lib/ecto/query/window_api.ex create mode 100644 deps/ecto/lib/ecto/queryable.ex create mode 100644 deps/ecto/lib/ecto/repo.ex create mode 100644 deps/ecto/lib/ecto/repo/assoc.ex create mode 100644 deps/ecto/lib/ecto/repo/preloader.ex create mode 100644 deps/ecto/lib/ecto/repo/queryable.ex create mode 100644 deps/ecto/lib/ecto/repo/registry.ex create mode 100644 deps/ecto/lib/ecto/repo/schema.ex create mode 100644 deps/ecto/lib/ecto/repo/supervisor.ex create mode 100644 deps/ecto/lib/ecto/repo/transaction.ex create mode 100644 deps/ecto/lib/ecto/schema.ex create mode 100644 deps/ecto/lib/ecto/schema/loader.ex create mode 100644 deps/ecto/lib/ecto/schema/metadata.ex create mode 100644 deps/ecto/lib/ecto/type.ex create mode 100644 deps/ecto/lib/ecto/uuid.ex create mode 100644 deps/ecto/lib/mix/ecto.ex create mode 100644 deps/ecto/lib/mix/tasks/ecto.create.ex create mode 100644 deps/ecto/lib/mix/tasks/ecto.drop.ex create mode 100644 deps/ecto/lib/mix/tasks/ecto.ex create mode 100644 deps/ecto/lib/mix/tasks/ecto.gen.repo.ex create mode 100644 deps/ecto/mix.exs create mode 100644 deps/ecto_sql/.fetch create mode 100644 deps/ecto_sql/.formatter.exs create mode 100644 deps/ecto_sql/.hex create mode 100644 deps/ecto_sql/CHANGELOG.md create mode 100644 deps/ecto_sql/README.md create mode 100644 deps/ecto_sql/hex_metadata.config create mode 100644 deps/ecto_sql/integration_test/sql/alter.exs create mode 100644 deps/ecto_sql/integration_test/sql/lock.exs create mode 100644 deps/ecto_sql/integration_test/sql/logging.exs create mode 100644 deps/ecto_sql/integration_test/sql/migration.exs create mode 100644 deps/ecto_sql/integration_test/sql/migrator.exs create mode 100644 deps/ecto_sql/integration_test/sql/query_many.exs create mode 100644 deps/ecto_sql/integration_test/sql/sandbox.exs create mode 100644 deps/ecto_sql/integration_test/sql/sql.exs create mode 100644 deps/ecto_sql/integration_test/sql/stream.exs create mode 100644 deps/ecto_sql/integration_test/sql/subquery.exs create mode 100644 deps/ecto_sql/integration_test/sql/transaction.exs create mode 100644 deps/ecto_sql/integration_test/support/file_helpers.exs create mode 100644 deps/ecto_sql/integration_test/support/migration.exs create mode 100644 deps/ecto_sql/integration_test/support/repo.exs create mode 100644 deps/ecto_sql/lib/ecto/adapter/migration.ex create mode 100644 deps/ecto_sql/lib/ecto/adapter/structure.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/mysql.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/myxql.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/postgres.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/sql.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/sql/application.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/sql/connection.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/sql/stream.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/tds.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/tds/connection.ex create mode 100644 deps/ecto_sql/lib/ecto/adapters/tds/types.ex create mode 100644 deps/ecto_sql/lib/ecto/migration.ex create mode 100644 deps/ecto_sql/lib/ecto/migration/runner.ex create mode 100644 deps/ecto_sql/lib/ecto/migration/schema_migration.ex create mode 100644 deps/ecto_sql/lib/ecto/migrator.ex create mode 100644 deps/ecto_sql/lib/mix/ecto_sql.ex create mode 100644 deps/ecto_sql/lib/mix/tasks/ecto.dump.ex create mode 100644 deps/ecto_sql/lib/mix/tasks/ecto.gen.migration.ex create mode 100644 deps/ecto_sql/lib/mix/tasks/ecto.load.ex create mode 100644 deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex create mode 100644 deps/ecto_sql/lib/mix/tasks/ecto.migrations.ex create mode 100644 deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex create mode 100644 deps/ecto_sql/mix.exs create mode 100644 deps/esbuild/.fetch create mode 100644 deps/esbuild/.formatter.exs create mode 100644 deps/esbuild/.hex create mode 100644 deps/esbuild/CHANGELOG.md create mode 100644 deps/esbuild/LICENSE.md create mode 100644 deps/esbuild/README.md create mode 100644 deps/esbuild/hex_metadata.config create mode 100644 deps/esbuild/lib/esbuild.ex create mode 100644 deps/esbuild/lib/mix/tasks/esbuild.ex create mode 100644 deps/esbuild/lib/mix/tasks/esbuild.install.ex create mode 100644 deps/esbuild/mix.exs create mode 100644 deps/file_system/.fetch create mode 100644 deps/file_system/.hex create mode 100644 deps/file_system/README.md create mode 100644 deps/file_system/c_src/mac/cli.c create mode 100644 deps/file_system/c_src/mac/cli.h create mode 100644 deps/file_system/c_src/mac/common.h create mode 100644 deps/file_system/c_src/mac/compat.c create mode 100644 deps/file_system/c_src/mac/compat.h create mode 100644 deps/file_system/c_src/mac/main.c create mode 100644 deps/file_system/hex_metadata.config create mode 100644 deps/file_system/lib/file_system.ex create mode 100644 deps/file_system/lib/file_system/backend.ex create mode 100644 deps/file_system/lib/file_system/backends/fs_inotify.ex create mode 100644 deps/file_system/lib/file_system/backends/fs_mac.ex create mode 100644 deps/file_system/lib/file_system/backends/fs_poll.ex create mode 100644 deps/file_system/lib/file_system/backends/fs_windows.ex create mode 100644 deps/file_system/lib/file_system/worker.ex create mode 100644 deps/file_system/mix.exs create mode 100644 deps/file_system/priv/inotifywait.exe create mode 100644 deps/floki/.fetch create mode 100644 deps/floki/.hex create mode 100644 deps/floki/CHANGELOG.md create mode 100644 deps/floki/CODE_OF_CONDUCT.md create mode 100644 deps/floki/CONTRIBUTING.md create mode 100644 deps/floki/LICENSE create mode 100644 deps/floki/README.md create mode 100644 deps/floki/hex_metadata.config create mode 100644 deps/floki/lib/floki.ex create mode 100644 deps/floki/lib/floki/deep_text.ex create mode 100644 deps/floki/lib/floki/entities.ex create mode 100644 deps/floki/lib/floki/filter_out.ex create mode 100644 deps/floki/lib/floki/finder.ex create mode 100644 deps/floki/lib/floki/flat_text.ex create mode 100644 deps/floki/lib/floki/html/numeric_charref.ex create mode 100644 deps/floki/lib/floki/html/tokenizer.ex create mode 100644 deps/floki/lib/floki/html_parser.ex create mode 100644 deps/floki/lib/floki/html_parser/fast_html.ex create mode 100644 deps/floki/lib/floki/html_parser/html5ever.ex create mode 100644 deps/floki/lib/floki/html_parser/mochiweb.ex create mode 100644 deps/floki/lib/floki/html_tree.ex create mode 100644 deps/floki/lib/floki/html_tree/comment.ex create mode 100644 deps/floki/lib/floki/html_tree/html_node.ex create mode 100644 deps/floki/lib/floki/html_tree/id_seeder.ex create mode 100644 deps/floki/lib/floki/html_tree/text.ex create mode 100644 deps/floki/lib/floki/parse_error.ex create mode 100644 deps/floki/lib/floki/raw_html.ex create mode 100644 deps/floki/lib/floki/selector.ex create mode 100644 deps/floki/lib/floki/selector/attribute_selector.ex create mode 100644 deps/floki/lib/floki/selector/combinator.ex create mode 100644 deps/floki/lib/floki/selector/functional.ex create mode 100644 deps/floki/lib/floki/selector/parser.ex create mode 100644 deps/floki/lib/floki/selector/pseudo_class.ex create mode 100644 deps/floki/lib/floki/selector/tokenizer.ex create mode 100644 deps/floki/lib/floki/traversal.ex create mode 100644 deps/floki/mix.exs create mode 100644 deps/floki/src/floki.gleam create mode 100644 deps/floki/src/floki_mochi_html.erl create mode 100644 deps/floki/src/floki_selector_lexer.xrl create mode 100644 deps/html_entities/.fetch create mode 100644 deps/html_entities/.hex create mode 100644 deps/html_entities/LICENSE create mode 100644 deps/html_entities/README.md create mode 100644 deps/html_entities/hex_metadata.config create mode 100644 deps/html_entities/lib/html_entities.ex create mode 100644 deps/html_entities/lib/html_entities/util.ex create mode 100644 deps/html_entities/lib/html_entities_list.txt create mode 100644 deps/html_entities/mix.exs create mode 100644 deps/jason/.fetch create mode 100644 deps/jason/.hex create mode 100644 deps/jason/CHANGELOG.md create mode 100644 deps/jason/LICENSE create mode 100644 deps/jason/README.md create mode 100644 deps/jason/hex_metadata.config create mode 100644 deps/jason/lib/codegen.ex create mode 100644 deps/jason/lib/decoder.ex create mode 100644 deps/jason/lib/encode.ex create mode 100644 deps/jason/lib/encoder.ex create mode 100644 deps/jason/lib/formatter.ex create mode 100644 deps/jason/lib/fragment.ex create mode 100644 deps/jason/lib/helpers.ex create mode 100644 deps/jason/lib/jason.ex create mode 100644 deps/jason/lib/ordered_object.ex create mode 100644 deps/jason/lib/sigil.ex create mode 100644 deps/jason/mix.exs create mode 100644 deps/mime/.fetch create mode 100644 deps/mime/.formatter.exs create mode 100644 deps/mime/.hex create mode 100644 deps/mime/CHANGELOG.md create mode 100644 deps/mime/LICENSE create mode 100644 deps/mime/README.md create mode 100644 deps/mime/hex_metadata.config create mode 100644 deps/mime/lib/mime.ex create mode 100644 deps/mime/mix.exs create mode 100644 deps/phoenix/.fetch create mode 100644 deps/phoenix/.formatter.exs create mode 100644 deps/phoenix/.hex create mode 100644 deps/phoenix/CHANGELOG.md create mode 100644 deps/phoenix/LICENSE.md create mode 100644 deps/phoenix/README.md create mode 100644 deps/phoenix/assets/js/phoenix/ajax.js create mode 100644 deps/phoenix/assets/js/phoenix/channel.js create mode 100644 deps/phoenix/assets/js/phoenix/constants.js create mode 100644 deps/phoenix/assets/js/phoenix/index.js create mode 100644 deps/phoenix/assets/js/phoenix/longpoll.js create mode 100644 deps/phoenix/assets/js/phoenix/presence.js create mode 100644 deps/phoenix/assets/js/phoenix/push.js create mode 100644 deps/phoenix/assets/js/phoenix/serializer.js create mode 100644 deps/phoenix/assets/js/phoenix/socket.js create mode 100644 deps/phoenix/assets/js/phoenix/timer.js create mode 100644 deps/phoenix/assets/js/phoenix/utils.js create mode 100644 deps/phoenix/hex_metadata.config create mode 100644 deps/phoenix/lib/mix/phoenix.ex create mode 100644 deps/phoenix/lib/mix/phoenix/context.ex create mode 100644 deps/phoenix/lib/mix/phoenix/schema.ex create mode 100644 deps/phoenix/lib/mix/tasks/compile.phoenix.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.digest.clean.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.digest.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.auth.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.auth/hashing_library.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.auth/injector.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.auth/migration.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.cert.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.channel.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.context.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.embedded.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.html.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.json.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.live.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.notifier.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.presence.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.release.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.schema.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.secret.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.gen.socket.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.routes.ex create mode 100644 deps/phoenix/lib/mix/tasks/phx.server.ex create mode 100644 deps/phoenix/lib/phoenix.ex create mode 100644 deps/phoenix/lib/phoenix/channel.ex create mode 100644 deps/phoenix/lib/phoenix/channel/server.ex create mode 100644 deps/phoenix/lib/phoenix/code_reloader.ex create mode 100644 deps/phoenix/lib/phoenix/code_reloader/proxy.ex create mode 100644 deps/phoenix/lib/phoenix/code_reloader/server.ex create mode 100644 deps/phoenix/lib/phoenix/config.ex create mode 100644 deps/phoenix/lib/phoenix/controller.ex create mode 100644 deps/phoenix/lib/phoenix/controller/pipeline.ex create mode 100644 deps/phoenix/lib/phoenix/digester.ex create mode 100644 deps/phoenix/lib/phoenix/digester/compressor.ex create mode 100644 deps/phoenix/lib/phoenix/digester/gzip.ex create mode 100644 deps/phoenix/lib/phoenix/endpoint.ex create mode 100644 deps/phoenix/lib/phoenix/endpoint/cowboy2_adapter.ex create mode 100644 deps/phoenix/lib/phoenix/endpoint/cowboy2_handler.ex create mode 100644 deps/phoenix/lib/phoenix/endpoint/render_errors.ex create mode 100644 deps/phoenix/lib/phoenix/endpoint/supervisor.ex create mode 100644 deps/phoenix/lib/phoenix/endpoint/watcher.ex create mode 100644 deps/phoenix/lib/phoenix/exceptions.ex create mode 100644 deps/phoenix/lib/phoenix/logger.ex create mode 100644 deps/phoenix/lib/phoenix/naming.ex create mode 100644 deps/phoenix/lib/phoenix/param.ex create mode 100644 deps/phoenix/lib/phoenix/presence.ex create mode 100644 deps/phoenix/lib/phoenix/router.ex create mode 100644 deps/phoenix/lib/phoenix/router/console_formatter.ex create mode 100644 deps/phoenix/lib/phoenix/router/helpers.ex create mode 100644 deps/phoenix/lib/phoenix/router/resource.ex create mode 100644 deps/phoenix/lib/phoenix/router/route.ex create mode 100644 deps/phoenix/lib/phoenix/router/scope.ex create mode 100644 deps/phoenix/lib/phoenix/socket.ex create mode 100644 deps/phoenix/lib/phoenix/socket/message.ex create mode 100644 deps/phoenix/lib/phoenix/socket/pool_supervisor.ex create mode 100644 deps/phoenix/lib/phoenix/socket/serializer.ex create mode 100644 deps/phoenix/lib/phoenix/socket/serializers/v1_json_serializer.ex create mode 100644 deps/phoenix/lib/phoenix/socket/serializers/v2_json_serializer.ex create mode 100644 deps/phoenix/lib/phoenix/socket/transport.ex create mode 100644 deps/phoenix/lib/phoenix/test/channel_test.ex create mode 100644 deps/phoenix/lib/phoenix/test/conn_test.ex create mode 100644 deps/phoenix/lib/phoenix/token.ex create mode 100644 deps/phoenix/lib/phoenix/transports/long_poll.ex create mode 100644 deps/phoenix/lib/phoenix/transports/long_poll_server.ex create mode 100644 deps/phoenix/lib/phoenix/transports/websocket.ex create mode 100644 deps/phoenix/mix.exs create mode 100644 deps/phoenix/package.json create mode 100644 deps/phoenix/priv/static/favicon.ico create mode 100644 deps/phoenix/priv/static/phoenix.cjs.js create mode 100644 deps/phoenix/priv/static/phoenix.cjs.js.map create mode 100644 deps/phoenix/priv/static/phoenix.js create mode 100644 deps/phoenix/priv/static/phoenix.min.js create mode 100644 deps/phoenix/priv/static/phoenix.mjs create mode 100644 deps/phoenix/priv/static/phoenix.mjs.map create mode 100644 deps/phoenix/priv/static/phoenix.png create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/_menu.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/auth.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/auth_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/confirmation_edit.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/confirmation_new.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/confirmation_view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/conn_case.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/context_fixtures_functions.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/context_functions.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/migration.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/notifier.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/registration_controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/registration_controller_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/registration_new.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/registration_view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/reset_password_edit.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/reset_password_new.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/reset_password_view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/routes.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/schema.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/schema_token.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/session_controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/session_controller_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/session_new.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/session_view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/settings_controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/settings_controller_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/settings_edit.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/settings_view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.auth/test_cases.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.channel/channel.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.channel/channel_case.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.channel/channel_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.context/access_no_schema.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.context/context.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.context/context_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.context/fixtures.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.context/fixtures_module.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.context/schema_access.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.context/test_cases.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.embedded/embedded_schema.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.html/controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.html/controller_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.html/edit.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.html/form.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.html/index.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.html/new.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.html/show.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.html/view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.json/changeset_view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.json/controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.json/controller_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.json/fallback_controller.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.json/view.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.live/form_component.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.live/form_component.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.live/index.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.live/index.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.live/live_helpers.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.live/live_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.live/show.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.live/show.html.heex create mode 100644 deps/phoenix/priv/templates/phx.gen.notifier/notifier.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.notifier/notifier_test.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.presence/presence.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.release/Dockerfile.eex create mode 100644 deps/phoenix/priv/templates/phx.gen.release/dockerignore.eex create mode 100644 deps/phoenix/priv/templates/phx.gen.release/rel/migrate.bat.eex create mode 100644 deps/phoenix/priv/templates/phx.gen.release/rel/migrate.sh.eex create mode 100644 deps/phoenix/priv/templates/phx.gen.release/rel/server.bat.eex create mode 100644 deps/phoenix/priv/templates/phx.gen.release/rel/server.sh.eex create mode 100644 deps/phoenix/priv/templates/phx.gen.release/release.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.schema/migration.exs create mode 100644 deps/phoenix/priv/templates/phx.gen.schema/schema.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.socket/socket.ex create mode 100644 deps/phoenix/priv/templates/phx.gen.socket/socket.js create mode 100644 deps/phoenix_ecto/.fetch create mode 100644 deps/phoenix_ecto/.formatter.exs create mode 100644 deps/phoenix_ecto/.hex create mode 100644 deps/phoenix_ecto/CHANGELOG.md create mode 100644 deps/phoenix_ecto/LICENSE create mode 100644 deps/phoenix_ecto/README.md create mode 100644 deps/phoenix_ecto/hex_metadata.config create mode 100644 deps/phoenix_ecto/lib/phoenix_ecto.ex create mode 100644 deps/phoenix_ecto/lib/phoenix_ecto/check_repo_status.ex create mode 100644 deps/phoenix_ecto/lib/phoenix_ecto/exceptions.ex create mode 100644 deps/phoenix_ecto/lib/phoenix_ecto/html.ex create mode 100644 deps/phoenix_ecto/lib/phoenix_ecto/plug.ex create mode 100644 deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox.ex create mode 100644 deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox_session.ex create mode 100644 deps/phoenix_ecto/mix.exs create mode 100644 deps/phoenix_html/.fetch create mode 100644 deps/phoenix_html/.hex create mode 100644 deps/phoenix_html/CHANGELOG.md create mode 100644 deps/phoenix_html/LICENSE create mode 100644 deps/phoenix_html/README.md create mode 100644 deps/phoenix_html/hex_metadata.config create mode 100644 deps/phoenix_html/lib/phoenix_html.ex create mode 100644 deps/phoenix_html/lib/phoenix_html/engine.ex create mode 100644 deps/phoenix_html/lib/phoenix_html/form.ex create mode 100644 deps/phoenix_html/lib/phoenix_html/form_data.ex create mode 100644 deps/phoenix_html/lib/phoenix_html/format.ex create mode 100644 deps/phoenix_html/lib/phoenix_html/link.ex create mode 100644 deps/phoenix_html/lib/phoenix_html/safe.ex create mode 100644 deps/phoenix_html/lib/phoenix_html/tag.ex create mode 100644 deps/phoenix_html/mix.exs create mode 100644 deps/phoenix_html/package.json create mode 100644 deps/phoenix_html/priv/static/phoenix_html.js create mode 100644 deps/phoenix_live_reload/.fetch create mode 100644 deps/phoenix_live_reload/.formatter.exs create mode 100644 deps/phoenix_live_reload/.hex create mode 100644 deps/phoenix_live_reload/CHANGELOG.md create mode 100644 deps/phoenix_live_reload/README.md create mode 100644 deps/phoenix_live_reload/hex_metadata.config create mode 100644 deps/phoenix_live_reload/lib/phoenix_live_reload/application.ex create mode 100644 deps/phoenix_live_reload/lib/phoenix_live_reload/channel.ex create mode 100755 deps/phoenix_live_reload/lib/phoenix_live_reload/live_reloader.ex create mode 100644 deps/phoenix_live_reload/lib/phoenix_live_reload/socket.ex create mode 100644 deps/phoenix_live_reload/mix.exs create mode 100644 deps/phoenix_live_reload/priv/static/phoenix_live_reload.js create mode 100644 deps/phoenix_live_view/.fetch create mode 100644 deps/phoenix_live_view/.hex create mode 100644 deps/phoenix_live_view/CHANGELOG.md create mode 100644 deps/phoenix_live_view/LICENSE.md create mode 100644 deps/phoenix_live_view/README.md create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/browser.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/constants.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/dom.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/dom_patch.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/dom_post_morph_restorer.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/entry_uploader.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/hooks.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/index.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/js.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/live_socket.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/live_uploader.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/rendered.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/upload_entry.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/utils.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/view.js create mode 100644 deps/phoenix_live_view/assets/js/phoenix_live_view/view_hook.js create mode 100644 deps/phoenix_live_view/hex_metadata.config create mode 100644 deps/phoenix_live_view/lib/phoenix_component.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_component.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/application.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/channel.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/controller.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/diff.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/engine.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/helpers.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/html_algebra.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/html_engine.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/html_formatter.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/html_tokenizer.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/js.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/lifecycle.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/plug.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/renderer.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/route.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/router.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/session.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/socket.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/static.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/test/client_proxy.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/test/dom.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/test/live_view_test.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/test/structs.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/test/upload_client.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/upload.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/upload_channel.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/upload_config.ex create mode 100644 deps/phoenix_live_view/lib/phoenix_live_view/utils.ex create mode 100644 deps/phoenix_live_view/mix.exs create mode 100644 deps/phoenix_live_view/package.json create mode 100644 deps/phoenix_live_view/priv/static/phoenix_live_view.cjs.js create mode 100644 deps/phoenix_live_view/priv/static/phoenix_live_view.cjs.js.map create mode 100644 deps/phoenix_live_view/priv/static/phoenix_live_view.esm.js create mode 100644 deps/phoenix_live_view/priv/static/phoenix_live_view.esm.js.map create mode 100644 deps/phoenix_live_view/priv/static/phoenix_live_view.js create mode 100644 deps/phoenix_live_view/priv/static/phoenix_live_view.js.map create mode 100644 deps/phoenix_live_view/priv/static/phoenix_live_view.min.js create mode 100644 deps/phoenix_pubsub/.fetch create mode 100644 deps/phoenix_pubsub/.hex create mode 100644 deps/phoenix_pubsub/CHANGELOG.md create mode 100644 deps/phoenix_pubsub/LICENSE.md create mode 100644 deps/phoenix_pubsub/README.md create mode 100644 deps/phoenix_pubsub/hex_metadata.config create mode 100644 deps/phoenix_pubsub/lib/phoenix/pubsub.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/pubsub/adapter.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/pubsub/application.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/pubsub/pg2.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/pubsub/supervisor.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/tracker.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/tracker/clock.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/tracker/delta_generation.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/tracker/replica.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/tracker/shard.ex create mode 100644 deps/phoenix_pubsub/lib/phoenix/tracker/state.ex create mode 100644 deps/phoenix_pubsub/mix.exs create mode 100644 deps/phoenix_pubsub/test/shared/pubsub_test.exs create mode 100644 deps/phoenix_view/.fetch create mode 100644 deps/phoenix_view/.formatter.exs create mode 100644 deps/phoenix_view/.hex create mode 100644 deps/phoenix_view/CHANGELOG.md create mode 100644 deps/phoenix_view/LICENSE.md create mode 100644 deps/phoenix_view/README.md create mode 100644 deps/phoenix_view/hex_metadata.config create mode 100644 deps/phoenix_view/lib/phoenix/template.ex create mode 100644 deps/phoenix_view/lib/phoenix/template/eex_engine.ex create mode 100644 deps/phoenix_view/lib/phoenix/template/engine.ex create mode 100644 deps/phoenix_view/lib/phoenix/template/exs_engine.ex create mode 100644 deps/phoenix_view/lib/phoenix/view.ex create mode 100644 deps/phoenix_view/mix.exs create mode 100644 deps/plug/.fetch create mode 100644 deps/plug/.formatter.exs create mode 100644 deps/plug/.hex create mode 100644 deps/plug/CHANGELOG.md create mode 100644 deps/plug/LICENSE create mode 100644 deps/plug/README.md create mode 100644 deps/plug/hex_metadata.config create mode 100644 deps/plug/lib/plug.ex create mode 100644 deps/plug/lib/plug/adapters/cowboy.ex create mode 100644 deps/plug/lib/plug/adapters/cowboy2.ex create mode 100644 deps/plug/lib/plug/adapters/test/conn.ex create mode 100644 deps/plug/lib/plug/application.ex create mode 100644 deps/plug/lib/plug/basic_auth.ex create mode 100644 deps/plug/lib/plug/builder.ex create mode 100644 deps/plug/lib/plug/conn.ex create mode 100644 deps/plug/lib/plug/conn/adapter.ex create mode 100644 deps/plug/lib/plug/conn/cookies.ex create mode 100644 deps/plug/lib/plug/conn/query.ex create mode 100644 deps/plug/lib/plug/conn/status.ex create mode 100644 deps/plug/lib/plug/conn/unfetched.ex create mode 100644 deps/plug/lib/plug/conn/utils.ex create mode 100644 deps/plug/lib/plug/conn/wrapper_error.ex create mode 100644 deps/plug/lib/plug/csrf_protection.ex create mode 100644 deps/plug/lib/plug/debugger.ex create mode 100644 deps/plug/lib/plug/error_handler.ex create mode 100644 deps/plug/lib/plug/exceptions.ex create mode 100644 deps/plug/lib/plug/head.ex create mode 100644 deps/plug/lib/plug/html.ex create mode 100644 deps/plug/lib/plug/logger.ex create mode 100644 deps/plug/lib/plug/method_override.ex create mode 100644 deps/plug/lib/plug/mime.ex create mode 100644 deps/plug/lib/plug/parsers.ex create mode 100644 deps/plug/lib/plug/parsers/json.ex create mode 100644 deps/plug/lib/plug/parsers/multipart.ex create mode 100644 deps/plug/lib/plug/parsers/urlencoded.ex create mode 100644 deps/plug/lib/plug/request_id.ex create mode 100644 deps/plug/lib/plug/rewrite_on.ex create mode 100644 deps/plug/lib/plug/router.ex create mode 100644 deps/plug/lib/plug/router/utils.ex create mode 100644 deps/plug/lib/plug/session.ex create mode 100644 deps/plug/lib/plug/session/cookie.ex create mode 100644 deps/plug/lib/plug/session/ets.ex create mode 100644 deps/plug/lib/plug/session/store.ex create mode 100644 deps/plug/lib/plug/ssl.ex create mode 100644 deps/plug/lib/plug/static.ex create mode 100644 deps/plug/lib/plug/telemetry.ex create mode 100644 deps/plug/lib/plug/templates/debugger.html.eex create mode 100644 deps/plug/lib/plug/templates/debugger.md.eex create mode 100644 deps/plug/lib/plug/test.ex create mode 100644 deps/plug/lib/plug/upload.ex create mode 100644 deps/plug/mix.exs create mode 100644 deps/plug/src/plug_multipart.erl create mode 100644 deps/plug_cowboy/.fetch create mode 100644 deps/plug_cowboy/.formatter.exs create mode 100644 deps/plug_cowboy/.hex create mode 100644 deps/plug_cowboy/CHANGELOG.md create mode 100644 deps/plug_cowboy/LICENSE create mode 100644 deps/plug_cowboy/README.md create mode 100644 deps/plug_cowboy/hex_metadata.config create mode 100644 deps/plug_cowboy/lib/plug/cowboy.ex create mode 100644 deps/plug_cowboy/lib/plug/cowboy/conn.ex create mode 100644 deps/plug_cowboy/lib/plug/cowboy/drainer.ex create mode 100644 deps/plug_cowboy/lib/plug/cowboy/handler.ex create mode 100644 deps/plug_cowboy/lib/plug/cowboy/translator.ex create mode 100644 deps/plug_cowboy/mix.exs create mode 100644 deps/plug_crypto/.fetch create mode 100644 deps/plug_crypto/.hex create mode 100644 deps/plug_crypto/CHANGELOG.md create mode 100644 deps/plug_crypto/LICENSE create mode 100644 deps/plug_crypto/README.md create mode 100644 deps/plug_crypto/hex_metadata.config create mode 100644 deps/plug_crypto/lib/plug/crypto.ex create mode 100644 deps/plug_crypto/lib/plug/crypto/application.ex create mode 100644 deps/plug_crypto/lib/plug/crypto/key_generator.ex create mode 100644 deps/plug_crypto/lib/plug/crypto/message_encryptor.ex create mode 100644 deps/plug_crypto/lib/plug/crypto/message_verifier.ex create mode 100644 deps/plug_crypto/mix.exs create mode 100644 deps/postgrex/.fetch create mode 100644 deps/postgrex/.formatter.exs create mode 100644 deps/postgrex/.hex create mode 100644 deps/postgrex/CHANGELOG.md create mode 100644 deps/postgrex/README.md create mode 100644 deps/postgrex/hex_metadata.config create mode 100644 deps/postgrex/lib/postgrex.ex create mode 100644 deps/postgrex/lib/postgrex/app.ex create mode 100644 deps/postgrex/lib/postgrex/binary_extension.ex create mode 100644 deps/postgrex/lib/postgrex/binary_utils.ex create mode 100644 deps/postgrex/lib/postgrex/builtins.ex create mode 100644 deps/postgrex/lib/postgrex/default_types.ex create mode 100644 deps/postgrex/lib/postgrex/errcodes.txt create mode 100644 deps/postgrex/lib/postgrex/error.ex create mode 100644 deps/postgrex/lib/postgrex/error_code.ex create mode 100644 deps/postgrex/lib/postgrex/extension.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/array.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/bit_string.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/bool.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/box.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/circle.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/date.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/float4.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/float8.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/hstore.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/inet.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/int2.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/int4.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/int8.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/interval.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/json.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/jsonb.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/line.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/line_segment.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/macaddr.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/name.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/numeric.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/oid.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/path.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/point.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/polygon.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/range.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/raw.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/record.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/tid.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/time.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/timestamp.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/timestamptz.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/timetz.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/tsvector.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/uuid.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/void_binary.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/void_text.ex create mode 100644 deps/postgrex/lib/postgrex/extensions/xid8.ex create mode 100644 deps/postgrex/lib/postgrex/messages.ex create mode 100644 deps/postgrex/lib/postgrex/notifications.ex create mode 100644 deps/postgrex/lib/postgrex/parameters.ex create mode 100644 deps/postgrex/lib/postgrex/protocol.ex create mode 100644 deps/postgrex/lib/postgrex/query.ex create mode 100644 deps/postgrex/lib/postgrex/replication_connection.ex create mode 100644 deps/postgrex/lib/postgrex/result.ex create mode 100644 deps/postgrex/lib/postgrex/scram.ex create mode 100644 deps/postgrex/lib/postgrex/simple_connection.ex create mode 100644 deps/postgrex/lib/postgrex/stream.ex create mode 100644 deps/postgrex/lib/postgrex/super_extension.ex create mode 100644 deps/postgrex/lib/postgrex/type_info.ex create mode 100644 deps/postgrex/lib/postgrex/type_module.ex create mode 100644 deps/postgrex/lib/postgrex/type_server.ex create mode 100644 deps/postgrex/lib/postgrex/type_supervisor.ex create mode 100644 deps/postgrex/lib/postgrex/types.ex create mode 100644 deps/postgrex/lib/postgrex/utils.ex create mode 100644 deps/postgrex/mix.exs create mode 100644 deps/ranch/.fetch create mode 100644 deps/ranch/.hex create mode 100644 deps/ranch/LICENSE create mode 100644 deps/ranch/Makefile create mode 100644 deps/ranch/README.asciidoc create mode 100644 deps/ranch/ebin/ranch.app create mode 100644 deps/ranch/erlang.mk create mode 100644 deps/ranch/hex_metadata.config create mode 100644 deps/ranch/src/ranch.erl create mode 100644 deps/ranch/src/ranch_acceptor.erl create mode 100644 deps/ranch/src/ranch_acceptors_sup.erl create mode 100644 deps/ranch/src/ranch_app.erl create mode 100644 deps/ranch/src/ranch_conns_sup.erl create mode 100644 deps/ranch/src/ranch_crc32c.erl create mode 100644 deps/ranch/src/ranch_listener_sup.erl create mode 100644 deps/ranch/src/ranch_protocol.erl create mode 100644 deps/ranch/src/ranch_proxy_header.erl create mode 100644 deps/ranch/src/ranch_server.erl create mode 100644 deps/ranch/src/ranch_ssl.erl create mode 100644 deps/ranch/src/ranch_sup.erl create mode 100644 deps/ranch/src/ranch_tcp.erl create mode 100644 deps/ranch/src/ranch_transport.erl create mode 100644 deps/telemetry/.fetch create mode 100644 deps/telemetry/.hex create mode 100644 deps/telemetry/CHANGELOG.md create mode 100644 deps/telemetry/LICENSE create mode 100644 deps/telemetry/NOTICE create mode 100644 deps/telemetry/README.md create mode 100644 deps/telemetry/hex_metadata.config create mode 100644 deps/telemetry/mix.exs create mode 100644 deps/telemetry/rebar.config create mode 100644 deps/telemetry/rebar.lock create mode 100644 deps/telemetry/src/telemetry.app.src create mode 100644 deps/telemetry/src/telemetry.erl create mode 100644 deps/telemetry/src/telemetry.hrl create mode 100644 deps/telemetry/src/telemetry_app.erl create mode 100644 deps/telemetry/src/telemetry_handler_table.erl create mode 100644 deps/telemetry/src/telemetry_sup.erl create mode 100644 deps/telemetry_metrics/.fetch create mode 100644 deps/telemetry_metrics/.formatter.exs create mode 100644 deps/telemetry_metrics/.hex create mode 100644 deps/telemetry_metrics/CHANGELOG.md create mode 100644 deps/telemetry_metrics/LICENSE create mode 100644 deps/telemetry_metrics/README.md create mode 100644 deps/telemetry_metrics/hex_metadata.config create mode 100644 deps/telemetry_metrics/lib/telemetry_metrics.ex create mode 100644 deps/telemetry_metrics/lib/telemetry_metrics/console_reporter.ex create mode 100644 deps/telemetry_metrics/lib/telemetry_metrics/counter.ex create mode 100644 deps/telemetry_metrics/lib/telemetry_metrics/distribution.ex create mode 100644 deps/telemetry_metrics/lib/telemetry_metrics/last_value.ex create mode 100644 deps/telemetry_metrics/lib/telemetry_metrics/sum.ex create mode 100644 deps/telemetry_metrics/lib/telemetry_metrics/summary.ex create mode 100644 deps/telemetry_metrics/mix.exs create mode 100644 deps/telemetry_poller/.fetch create mode 100644 deps/telemetry_poller/.hex create mode 100644 deps/telemetry_poller/CHANGELOG.md create mode 100644 deps/telemetry_poller/LICENSE create mode 100644 deps/telemetry_poller/NOTICE create mode 100644 deps/telemetry_poller/README.md create mode 100644 deps/telemetry_poller/hex_metadata.config create mode 100644 deps/telemetry_poller/rebar.config create mode 100644 deps/telemetry_poller/rebar.lock create mode 100644 deps/telemetry_poller/src/telemetry_poller.app.src create mode 100644 deps/telemetry_poller/src/telemetry_poller.erl create mode 100644 deps/telemetry_poller/src/telemetry_poller_app.erl create mode 100644 deps/telemetry_poller/src/telemetry_poller_builtin.erl create mode 100644 deps/telemetry_poller/src/telemetry_poller_sup.erl create mode 100644 lib/app.ex create mode 100644 lib/app/application.ex create mode 100644 lib/app/repo.ex create mode 100644 lib/app_web.ex create mode 100644 lib/app_web/controllers/page_controller.ex create mode 100644 lib/app_web/endpoint.ex create mode 100644 lib/app_web/router.ex create mode 100644 lib/app_web/telemetry.ex create mode 100644 lib/app_web/templates/layout/app.html.heex create mode 100644 lib/app_web/templates/layout/live.html.heex create mode 100644 lib/app_web/templates/layout/root.html.heex create mode 100644 lib/app_web/templates/page/index.html.heex create mode 100644 lib/app_web/views/error_helpers.ex create mode 100644 lib/app_web/views/error_view.ex create mode 100644 lib/app_web/views/layout_view.ex create mode 100644 lib/app_web/views/page_view.ex create mode 100644 mix.exs create mode 100644 mix.lock create mode 100644 priv/repo/migrations/.formatter.exs create mode 100644 priv/repo/seeds.exs create mode 100644 priv/static/favicon.ico create mode 100644 priv/static/images/phoenix.png create mode 100644 priv/static/robots.txt create mode 100644 test/app_web/controllers/page_controller_test.exs create mode 100644 test/app_web/views/error_view_test.exs create mode 100644 test/app_web/views/layout_view_test.exs create mode 100644 test/app_web/views/page_view_test.exs create mode 100644 test/support/conn_case.ex create mode 100644 test/support/data_case.ex create mode 100644 test/test_helper.exs diff --git a/.formatter.exs b/.formatter.exs new file mode 100644 index 0000000..8a6391c --- /dev/null +++ b/.formatter.exs @@ -0,0 +1,5 @@ +[ + import_deps: [:ecto, :phoenix], + inputs: ["*.{ex,exs}", "priv/*/seeds.exs", "{config,lib,test}/**/*.{ex,exs}"], + subdirectories: ["priv/*/migrations"] +] diff --git a/_build/dev/lib/app/.mix/compile.lock b/_build/dev/lib/app/.mix/compile.lock new file mode 100644 index 0000000..e69de29 diff --git a/assets/css/app.css b/assets/css/app.css new file mode 100644 index 0000000..19c2e51 --- /dev/null +++ b/assets/css/app.css @@ -0,0 +1,120 @@ +/* This file is for your main application CSS */ +@import "./phoenix.css"; + +/* Alerts and form errors used by phx.new */ +.alert { + padding: 15px; + margin-bottom: 20px; + border: 1px solid transparent; + border-radius: 4px; +} +.alert-info { + color: #31708f; + background-color: #d9edf7; + border-color: #bce8f1; +} +.alert-warning { + color: #8a6d3b; + background-color: #fcf8e3; + border-color: #faebcc; +} +.alert-danger { + color: #a94442; + background-color: #f2dede; + border-color: #ebccd1; +} +.alert p { + margin-bottom: 0; +} +.alert:empty { + display: none; +} +.invalid-feedback { + color: #a94442; + display: block; + margin: -1rem 0 2rem; +} + +/* LiveView specific classes for your customization */ +.phx-no-feedback.invalid-feedback, +.phx-no-feedback .invalid-feedback { + display: none; +} + +.phx-click-loading { + opacity: 0.5; + transition: opacity 1s ease-out; +} + +.phx-loading{ + cursor: wait; +} + +.phx-modal { + opacity: 1!important; + position: fixed; + z-index: 1; + left: 0; + top: 0; + width: 100%; + height: 100%; + overflow: auto; + background-color: rgba(0,0,0,0.4); +} + +.phx-modal-content { + background-color: #fefefe; + margin: 15vh auto; + padding: 20px; + border: 1px solid #888; + width: 80%; +} + +.phx-modal-close { + color: #aaa; + float: right; + font-size: 28px; + font-weight: bold; +} + +.phx-modal-close:hover, +.phx-modal-close:focus { + color: black; + text-decoration: none; + cursor: pointer; +} + +.fade-in-scale { + animation: 0.2s ease-in 0s normal forwards 1 fade-in-scale-keys; +} + +.fade-out-scale { + animation: 0.2s ease-out 0s normal forwards 1 fade-out-scale-keys; +} + +.fade-in { + animation: 0.2s ease-out 0s normal forwards 1 fade-in-keys; +} +.fade-out { + animation: 0.2s ease-out 0s normal forwards 1 fade-out-keys; +} + +@keyframes fade-in-scale-keys{ + 0% { scale: 0.95; opacity: 0; } + 100% { scale: 1.0; opacity: 1; } +} + +@keyframes fade-out-scale-keys{ + 0% { scale: 1.0; opacity: 1; } + 100% { scale: 0.95; opacity: 0; } +} + +@keyframes fade-in-keys{ + 0% { opacity: 0; } + 100% { opacity: 1; } +} + +@keyframes fade-out-keys{ + 0% { opacity: 1; } + 100% { opacity: 0; } +} diff --git a/assets/css/phoenix.css b/assets/css/phoenix.css new file mode 100644 index 0000000..0d59050 --- /dev/null +++ b/assets/css/phoenix.css @@ -0,0 +1,101 @@ +/* Includes some default style for the starter application. + * This can be safely deleted to start fresh. + */ + +/* Milligram v1.4.1 https://milligram.github.io + * Copyright (c) 2020 CJ Patoilo Licensed under the MIT license + */ + +*,*:after,*:before{box-sizing:inherit}html{box-sizing:border-box;font-size:62.5%}body{color:#000000;font-family:'Helvetica Neue', 'Helvetica', 'Arial', sans-serif;font-size:1.6em;font-weight:300;letter-spacing:.01em;line-height:1.6}blockquote{border-left:0.3rem solid #d1d1d1;margin-left:0;margin-right:0;padding:1rem 1.5rem}blockquote *:last-child{margin-bottom:0}.button,button,input[type='button'],input[type='reset'],input[type='submit']{background-color:#0069d9;border:0.1rem solid #0069d9;border-radius:.4rem;color:#fff;cursor:pointer;display:inline-block;font-size:1.1rem;font-weight:700;height:3.8rem;letter-spacing:.1rem;line-height:3.8rem;padding:0 3.0rem;text-align:center;text-decoration:none;text-transform:uppercase;white-space:nowrap}.button:focus,.button:hover,button:focus,button:hover,input[type='button']:focus,input[type='button']:hover,input[type='reset']:focus,input[type='reset']:hover,input[type='submit']:focus,input[type='submit']:hover{background-color:#606c76;border-color:#606c76;color:#fff;outline:0}.button[disabled],button[disabled],input[type='button'][disabled],input[type='reset'][disabled],input[type='submit'][disabled]{cursor:default;opacity:.5}.button[disabled]:focus,.button[disabled]:hover,button[disabled]:focus,button[disabled]:hover,input[type='button'][disabled]:focus,input[type='button'][disabled]:hover,input[type='reset'][disabled]:focus,input[type='reset'][disabled]:hover,input[type='submit'][disabled]:focus,input[type='submit'][disabled]:hover{background-color:#0069d9;border-color:#0069d9}.button.button-outline,button.button-outline,input[type='button'].button-outline,input[type='reset'].button-outline,input[type='submit'].button-outline{background-color:transparent;color:#0069d9}.button.button-outline:focus,.button.button-outline:hover,button.button-outline:focus,button.button-outline:hover,input[type='button'].button-outline:focus,input[type='button'].button-outline:hover,input[type='reset'].button-outline:focus,input[type='reset'].button-outline:hover,input[type='submit'].button-outline:focus,input[type='submit'].button-outline:hover{background-color:transparent;border-color:#606c76;color:#606c76}.button.button-outline[disabled]:focus,.button.button-outline[disabled]:hover,button.button-outline[disabled]:focus,button.button-outline[disabled]:hover,input[type='button'].button-outline[disabled]:focus,input[type='button'].button-outline[disabled]:hover,input[type='reset'].button-outline[disabled]:focus,input[type='reset'].button-outline[disabled]:hover,input[type='submit'].button-outline[disabled]:focus,input[type='submit'].button-outline[disabled]:hover{border-color:inherit;color:#0069d9}.button.button-clear,button.button-clear,input[type='button'].button-clear,input[type='reset'].button-clear,input[type='submit'].button-clear{background-color:transparent;border-color:transparent;color:#0069d9}.button.button-clear:focus,.button.button-clear:hover,button.button-clear:focus,button.button-clear:hover,input[type='button'].button-clear:focus,input[type='button'].button-clear:hover,input[type='reset'].button-clear:focus,input[type='reset'].button-clear:hover,input[type='submit'].button-clear:focus,input[type='submit'].button-clear:hover{background-color:transparent;border-color:transparent;color:#606c76}.button.button-clear[disabled]:focus,.button.button-clear[disabled]:hover,button.button-clear[disabled]:focus,button.button-clear[disabled]:hover,input[type='button'].button-clear[disabled]:focus,input[type='button'].button-clear[disabled]:hover,input[type='reset'].button-clear[disabled]:focus,input[type='reset'].button-clear[disabled]:hover,input[type='submit'].button-clear[disabled]:focus,input[type='submit'].button-clear[disabled]:hover{color:#0069d9}code{background:#f4f5f6;border-radius:.4rem;font-size:86%;margin:0 .2rem;padding:.2rem .5rem;white-space:nowrap}pre{background:#f4f5f6;border-left:0.3rem solid #0069d9;overflow-y:hidden}pre>code{border-radius:0;display:block;padding:1rem 1.5rem;white-space:pre}hr{border:0;border-top:0.1rem solid #f4f5f6;margin:3.0rem 0}input[type='color'],input[type='date'],input[type='datetime'],input[type='datetime-local'],input[type='email'],input[type='month'],input[type='number'],input[type='password'],input[type='search'],input[type='tel'],input[type='text'],input[type='url'],input[type='week'],input:not([type]),textarea,select{-webkit-appearance:none;background-color:transparent;border:0.1rem solid #d1d1d1;border-radius:.4rem;box-shadow:none;box-sizing:inherit;height:3.8rem;padding:.6rem 1.0rem .7rem;width:100%}input[type='color']:focus,input[type='date']:focus,input[type='datetime']:focus,input[type='datetime-local']:focus,input[type='email']:focus,input[type='month']:focus,input[type='number']:focus,input[type='password']:focus,input[type='search']:focus,input[type='tel']:focus,input[type='text']:focus,input[type='url']:focus,input[type='week']:focus,input:not([type]):focus,textarea:focus,select:focus{border-color:#0069d9;outline:0}select{background:url('data:image/svg+xml;utf8,') center right no-repeat;padding-right:3.0rem}select:focus{background-image:url('data:image/svg+xml;utf8,')}select[multiple]{background:none;height:auto}textarea{min-height:6.5rem}label,legend{display:block;font-size:1.6rem;font-weight:700;margin-bottom:.5rem}fieldset{border-width:0;padding:0}input[type='checkbox'],input[type='radio']{display:inline}.label-inline{display:inline-block;font-weight:normal;margin-left:.5rem}.container{margin:0 auto;max-width:112.0rem;padding:0 2.0rem;position:relative;width:100%}.row{display:flex;flex-direction:column;padding:0;width:100%}.row.row-no-padding{padding:0}.row.row-no-padding>.column{padding:0}.row.row-wrap{flex-wrap:wrap}.row.row-top{align-items:flex-start}.row.row-bottom{align-items:flex-end}.row.row-center{align-items:center}.row.row-stretch{align-items:stretch}.row.row-baseline{align-items:baseline}.row .column{display:block;flex:1 1 auto;margin-left:0;max-width:100%;width:100%}.row .column.column-offset-10{margin-left:10%}.row .column.column-offset-20{margin-left:20%}.row .column.column-offset-25{margin-left:25%}.row .column.column-offset-33,.row .column.column-offset-34{margin-left:33.3333%}.row .column.column-offset-40{margin-left:40%}.row .column.column-offset-50{margin-left:50%}.row .column.column-offset-60{margin-left:60%}.row .column.column-offset-66,.row .column.column-offset-67{margin-left:66.6666%}.row .column.column-offset-75{margin-left:75%}.row .column.column-offset-80{margin-left:80%}.row .column.column-offset-90{margin-left:90%}.row .column.column-10{flex:0 0 10%;max-width:10%}.row .column.column-20{flex:0 0 20%;max-width:20%}.row .column.column-25{flex:0 0 25%;max-width:25%}.row .column.column-33,.row .column.column-34{flex:0 0 33.3333%;max-width:33.3333%}.row .column.column-40{flex:0 0 40%;max-width:40%}.row .column.column-50{flex:0 0 50%;max-width:50%}.row .column.column-60{flex:0 0 60%;max-width:60%}.row .column.column-66,.row .column.column-67{flex:0 0 66.6666%;max-width:66.6666%}.row .column.column-75{flex:0 0 75%;max-width:75%}.row .column.column-80{flex:0 0 80%;max-width:80%}.row .column.column-90{flex:0 0 90%;max-width:90%}.row .column .column-top{align-self:flex-start}.row .column .column-bottom{align-self:flex-end}.row .column .column-center{align-self:center}@media (min-width: 40rem){.row{flex-direction:row;margin-left:-1.0rem;width:calc(100% + 2.0rem)}.row .column{margin-bottom:inherit;padding:0 1.0rem}}a{color:#0069d9;text-decoration:none}a:focus,a:hover{color:#606c76}dl,ol,ul{list-style:none;margin-top:0;padding-left:0}dl dl,dl ol,dl ul,ol dl,ol ol,ol ul,ul dl,ul ol,ul ul{font-size:90%;margin:1.5rem 0 1.5rem 3.0rem}ol{list-style:decimal inside}ul{list-style:circle inside}.button,button,dd,dt,li{margin-bottom:1.0rem}fieldset,input,select,textarea{margin-bottom:1.5rem}blockquote,dl,figure,form,ol,p,pre,table,ul{margin-bottom:2.5rem}table{border-spacing:0;display:block;overflow-x:auto;text-align:left;width:100%}td,th{border-bottom:0.1rem solid #e1e1e1;padding:1.2rem 1.5rem}td:first-child,th:first-child{padding-left:0}td:last-child,th:last-child{padding-right:0}@media (min-width: 40rem){table{display:table;overflow-x:initial}}b,strong{font-weight:bold}p{margin-top:0}h1,h2,h3,h4,h5,h6{font-weight:300;letter-spacing:-.1rem;margin-bottom:2.0rem;margin-top:0}h1{font-size:4.6rem;line-height:1.2}h2{font-size:3.6rem;line-height:1.25}h3{font-size:2.8rem;line-height:1.3}h4{font-size:2.2rem;letter-spacing:-.08rem;line-height:1.35}h5{font-size:1.8rem;letter-spacing:-.05rem;line-height:1.5}h6{font-size:1.6rem;letter-spacing:0;line-height:1.4}img{max-width:100%}.clearfix:after{clear:both;content:' ';display:table}.float-left{float:left}.float-right{float:right} + +/* General style */ +h1{font-size: 3.6rem; line-height: 1.25} +h2{font-size: 2.8rem; line-height: 1.3} +h3{font-size: 2.2rem; letter-spacing: -.08rem; line-height: 1.35} +h4{font-size: 1.8rem; letter-spacing: -.05rem; line-height: 1.5} +h5{font-size: 1.6rem; letter-spacing: 0; line-height: 1.4} +h6{font-size: 1.4rem; letter-spacing: 0; line-height: 1.2} +pre{padding: 1em;} + +.container{ + margin: 0 auto; + max-width: 80.0rem; + padding: 0 2.0rem; + position: relative; + width: 100% +} +select { + width: auto; +} + +/* Phoenix promo and logo */ +.phx-hero { + text-align: center; + border-bottom: 1px solid #e3e3e3; + background: #eee; + border-radius: 6px; + padding: 3em 3em 1em; + margin-bottom: 3rem; + font-weight: 200; + font-size: 120%; +} +.phx-hero input { + background: #ffffff; +} +.phx-logo { + min-width: 300px; + margin: 1rem; + display: block; +} +.phx-logo img { + width: auto; + display: block; +} + +/* Headers */ +header { + width: 100%; + background: #fdfdfd; + border-bottom: 1px solid #eaeaea; + margin-bottom: 2rem; +} +header section { + align-items: center; + display: flex; + flex-direction: column; + justify-content: space-between; +} +header section :first-child { + order: 2; +} +header section :last-child { + order: 1; +} +header nav ul, +header nav li { + margin: 0; + padding: 0; + display: block; + text-align: right; + white-space: nowrap; +} +header nav ul { + margin: 1rem; + margin-top: 0; +} +header nav a { + display: block; +} + +@media (min-width: 40.0rem) { /* Small devices (landscape phones, 576px and up) */ + header section { + flex-direction: row; + } + header nav ul { + margin: 1rem; + } + .phx-logo { + flex-basis: 527px; + margin: 2rem 1rem; + } +} diff --git a/assets/js/app.js b/assets/js/app.js new file mode 100644 index 0000000..2ca06a5 --- /dev/null +++ b/assets/js/app.js @@ -0,0 +1,45 @@ +// We import the CSS which is extracted to its own file by esbuild. +// Remove this line if you add a your own CSS build pipeline (e.g postcss). +import "../css/app.css" + +// If you want to use Phoenix channels, run `mix help phx.gen.channel` +// to get started and then uncomment the line below. +// import "./user_socket.js" + +// You can include dependencies in two ways. +// +// The simplest option is to put them in assets/vendor and +// import them using relative paths: +// +// import "../vendor/some-package.js" +// +// Alternatively, you can `npm install some-package --prefix assets` and import +// them using a path starting with the package name: +// +// import "some-package" +// + +// Include phoenix_html to handle method=PUT/DELETE in forms and buttons. +import "phoenix_html" +// Establish Phoenix Socket and LiveView configuration. +import {Socket} from "phoenix" +import {LiveSocket} from "phoenix_live_view" +import topbar from "../vendor/topbar" + +let csrfToken = document.querySelector("meta[name='csrf-token']").getAttribute("content") +let liveSocket = new LiveSocket("/live", Socket, {params: {_csrf_token: csrfToken}}) + +// Show progress bar on live navigation and form submits +topbar.config({barColors: {0: "#29d"}, shadowColor: "rgba(0, 0, 0, .3)"}) +window.addEventListener("phx:page-loading-start", info => topbar.show()) +window.addEventListener("phx:page-loading-stop", info => topbar.hide()) + +// connect if there are any LiveViews on the page +liveSocket.connect() + +// expose liveSocket on window for web console debug logs and latency simulation: +// >> liveSocket.enableDebug() +// >> liveSocket.enableLatencySim(1000) // enabled for duration of browser session +// >> liveSocket.disableLatencySim() +window.liveSocket = liveSocket + diff --git a/assets/vendor/topbar.js b/assets/vendor/topbar.js new file mode 100644 index 0000000..1f62209 --- /dev/null +++ b/assets/vendor/topbar.js @@ -0,0 +1,157 @@ +/** + * @license MIT + * topbar 1.0.0, 2021-01-06 + * https://buunguyen.github.io/topbar + * Copyright (c) 2021 Buu Nguyen + */ +(function (window, document) { + "use strict"; + + // https://gist.github.com/paulirish/1579671 + (function () { + var lastTime = 0; + var vendors = ["ms", "moz", "webkit", "o"]; + for (var x = 0; x < vendors.length && !window.requestAnimationFrame; ++x) { + window.requestAnimationFrame = + window[vendors[x] + "RequestAnimationFrame"]; + window.cancelAnimationFrame = + window[vendors[x] + "CancelAnimationFrame"] || + window[vendors[x] + "CancelRequestAnimationFrame"]; + } + if (!window.requestAnimationFrame) + window.requestAnimationFrame = function (callback, element) { + var currTime = new Date().getTime(); + var timeToCall = Math.max(0, 16 - (currTime - lastTime)); + var id = window.setTimeout(function () { + callback(currTime + timeToCall); + }, timeToCall); + lastTime = currTime + timeToCall; + return id; + }; + if (!window.cancelAnimationFrame) + window.cancelAnimationFrame = function (id) { + clearTimeout(id); + }; + })(); + + var canvas, + progressTimerId, + fadeTimerId, + currentProgress, + showing, + addEvent = function (elem, type, handler) { + if (elem.addEventListener) elem.addEventListener(type, handler, false); + else if (elem.attachEvent) elem.attachEvent("on" + type, handler); + else elem["on" + type] = handler; + }, + options = { + autoRun: true, + barThickness: 3, + barColors: { + 0: "rgba(26, 188, 156, .9)", + ".25": "rgba(52, 152, 219, .9)", + ".50": "rgba(241, 196, 15, .9)", + ".75": "rgba(230, 126, 34, .9)", + "1.0": "rgba(211, 84, 0, .9)", + }, + shadowBlur: 10, + shadowColor: "rgba(0, 0, 0, .6)", + className: null, + }, + repaint = function () { + canvas.width = window.innerWidth; + canvas.height = options.barThickness * 5; // need space for shadow + + var ctx = canvas.getContext("2d"); + ctx.shadowBlur = options.shadowBlur; + ctx.shadowColor = options.shadowColor; + + var lineGradient = ctx.createLinearGradient(0, 0, canvas.width, 0); + for (var stop in options.barColors) + lineGradient.addColorStop(stop, options.barColors[stop]); + ctx.lineWidth = options.barThickness; + ctx.beginPath(); + ctx.moveTo(0, options.barThickness / 2); + ctx.lineTo( + Math.ceil(currentProgress * canvas.width), + options.barThickness / 2 + ); + ctx.strokeStyle = lineGradient; + ctx.stroke(); + }, + createCanvas = function () { + canvas = document.createElement("canvas"); + var style = canvas.style; + style.position = "fixed"; + style.top = style.left = style.right = style.margin = style.padding = 0; + style.zIndex = 100001; + style.display = "none"; + if (options.className) canvas.classList.add(options.className); + document.body.appendChild(canvas); + addEvent(window, "resize", repaint); + }, + topbar = { + config: function (opts) { + for (var key in opts) + if (options.hasOwnProperty(key)) options[key] = opts[key]; + }, + show: function () { + if (showing) return; + showing = true; + if (fadeTimerId !== null) window.cancelAnimationFrame(fadeTimerId); + if (!canvas) createCanvas(); + canvas.style.opacity = 1; + canvas.style.display = "block"; + topbar.progress(0); + if (options.autoRun) { + (function loop() { + progressTimerId = window.requestAnimationFrame(loop); + topbar.progress( + "+" + 0.05 * Math.pow(1 - Math.sqrt(currentProgress), 2) + ); + })(); + } + }, + progress: function (to) { + if (typeof to === "undefined") return currentProgress; + if (typeof to === "string") { + to = + (to.indexOf("+") >= 0 || to.indexOf("-") >= 0 + ? currentProgress + : 0) + parseFloat(to); + } + currentProgress = to > 1 ? 1 : to; + repaint(); + return currentProgress; + }, + hide: function () { + if (!showing) return; + showing = false; + if (progressTimerId != null) { + window.cancelAnimationFrame(progressTimerId); + progressTimerId = null; + } + (function loop() { + if (topbar.progress("+.1") >= 1) { + canvas.style.opacity -= 0.05; + if (canvas.style.opacity <= 0.05) { + canvas.style.display = "none"; + fadeTimerId = null; + return; + } + } + fadeTimerId = window.requestAnimationFrame(loop); + })(); + }, + }; + + if (typeof module === "object" && typeof module.exports === "object") { + module.exports = topbar; + } else if (typeof define === "function" && define.amd) { + define(function () { + return topbar; + }); + } else { + this.topbar = topbar; + } +}.call(this, window, document)); diff --git a/buildit.md b/buildit.md index e69de29..65a9fd1 100644 --- a/buildit.md +++ b/buildit.md @@ -0,0 +1,27 @@ +# build it! + +This document is a log +of our progress in building +**`linky`**. +You can follow along +to understand every step along the way. +It serves as **_living_ documentation** +and a rapid entry point +for anyone wanting to understand +how the app works +so that you can easily +deploy, maintain and _contribute_ to it! + +# 1. Create a `new` `Phonenix` App + +```sh +mix phx.new app --no-mailer --no-dashboard --no-gettext +``` + + + +To start your Phoenix server: + + * Install dependencies with `mix deps.get` + * Create and migrate your database with `mix ecto.setup` + * Start Phoenix endpoint with `mix phx.server` or inside IEx with `iex -S mix phx.server` \ No newline at end of file diff --git a/config/config.exs b/config/config.exs new file mode 100644 index 0000000..ad47d5e --- /dev/null +++ b/config/config.exs @@ -0,0 +1,40 @@ +# This file is responsible for configuring your application +# and its dependencies with the aid of the Config module. +# +# This configuration file is loaded before any dependency and +# is restricted to this project. + +# General application configuration +import Config + +config :app, + ecto_repos: [App.Repo] + +# Configures the endpoint +config :app, AppWeb.Endpoint, + url: [host: "localhost"], + render_errors: [view: AppWeb.ErrorView, accepts: ~w(html json), layout: false], + pubsub_server: App.PubSub, + live_view: [signing_salt: "N3x6kvyq"] + +# Configure esbuild (the version is required) +config :esbuild, + version: "0.14.29", + default: [ + args: + ~w(js/app.js --bundle --target=es2017 --outdir=../priv/static/assets --external:/fonts/* --external:/images/*), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] + +# Configures Elixir's Logger +config :logger, :console, + format: "$time $metadata[$level] $message\n", + metadata: [:request_id] + +# Use Jason for JSON parsing in Phoenix +config :phoenix, :json_library, Jason + +# Import environment specific config. This must remain at the bottom +# of this file so it overrides the configuration defined above. +import_config "#{config_env()}.exs" diff --git a/config/dev.exs b/config/dev.exs new file mode 100644 index 0000000..b67832d --- /dev/null +++ b/config/dev.exs @@ -0,0 +1,74 @@ +import Config + +# Configure your database +config :app, App.Repo, + username: "postgres", + password: "postgres", + hostname: "localhost", + database: "app_dev", + stacktrace: true, + show_sensitive_data_on_connection_error: true, + pool_size: 10 + +# For development, we disable any cache and enable +# debugging and code reloading. +# +# The watchers configuration can be used to run external +# watchers to your application. For example, we use it +# with esbuild to bundle .js and .css sources. +config :app, AppWeb.Endpoint, + # Binding to loopback ipv4 address prevents access from other machines. + # Change to `ip: {0, 0, 0, 0}` to allow access from other machines. + http: [ip: {127, 0, 0, 1}, port: 4000], + check_origin: false, + code_reloader: true, + debug_errors: true, + secret_key_base: "z+Xu1e5d01/6LnBh5jgmyuS6/kRsclruM4s3B5qfaxdBNYxOPyVJ1Y4MepTVu5Zg", + watchers: [ + # Start the esbuild watcher by calling Esbuild.install_and_run(:default, args) + esbuild: {Esbuild, :install_and_run, [:default, ~w(--sourcemap=inline --watch)]} + ] + +# ## SSL Support +# +# In order to use HTTPS in development, a self-signed +# certificate can be generated by running the following +# Mix task: +# +# mix phx.gen.cert +# +# Note that this task requires Erlang/OTP 20 or later. +# Run `mix help phx.gen.cert` for more information. +# +# The `http:` config above can be replaced with: +# +# https: [ +# port: 4001, +# cipher_suite: :strong, +# keyfile: "priv/cert/selfsigned_key.pem", +# certfile: "priv/cert/selfsigned.pem" +# ], +# +# If desired, both `http:` and `https:` keys can be +# configured to run both http and https servers on +# different ports. + +# Watch static and templates for browser reloading. +config :app, AppWeb.Endpoint, + live_reload: [ + patterns: [ + ~r"priv/static/.*(js|css|png|jpeg|jpg|gif|svg)$", + ~r"lib/app_web/(live|views)/.*(ex)$", + ~r"lib/app_web/templates/.*(eex)$" + ] + ] + +# Do not include metadata nor timestamps in development logs +config :logger, :console, format: "[$level] $message\n" + +# Set a higher stacktrace during development. Avoid configuring such +# in production as building large stacktraces may be expensive. +config :phoenix, :stacktrace_depth, 20 + +# Initialize plugs at runtime for faster development compilation +config :phoenix, :plug_init_mode, :runtime diff --git a/config/prod.exs b/config/prod.exs new file mode 100644 index 0000000..d5f74e9 --- /dev/null +++ b/config/prod.exs @@ -0,0 +1,49 @@ +import Config + +# For production, don't forget to configure the url host +# to something meaningful, Phoenix uses this information +# when generating URLs. +# +# Note we also include the path to a cache manifest +# containing the digested version of static files. This +# manifest is generated by the `mix phx.digest` task, +# which you should run after static files are built and +# before starting your production server. +config :app, AppWeb.Endpoint, cache_static_manifest: "priv/static/cache_manifest.json" + +# Do not print debug messages in production +config :logger, level: :info + +# ## SSL Support +# +# To get SSL working, you will need to add the `https` key +# to the previous section and set your `:url` port to 443: +# +# config :app, AppWeb.Endpoint, +# ..., +# url: [host: "example.com", port: 443], +# https: [ +# ..., +# port: 443, +# cipher_suite: :strong, +# keyfile: System.get_env("SOME_APP_SSL_KEY_PATH"), +# certfile: System.get_env("SOME_APP_SSL_CERT_PATH") +# ] +# +# The `cipher_suite` is set to `:strong` to support only the +# latest and more secure SSL ciphers. This means old browsers +# and clients may not be supported. You can set it to +# `:compatible` for wider support. +# +# `:keyfile` and `:certfile` expect an absolute path to the key +# and cert in disk or a relative path inside priv, for example +# "priv/ssl/server.key". For all supported SSL configuration +# options, see https://hexdocs.pm/plug/Plug.SSL.html#configure/1 +# +# We also recommend setting `force_ssl` in your endpoint, ensuring +# no data is ever sent via http, always redirecting to https: +# +# config :app, AppWeb.Endpoint, +# force_ssl: [hsts: true] +# +# Check `Plug.SSL` for all available options in `force_ssl`. diff --git a/config/runtime.exs b/config/runtime.exs new file mode 100644 index 0000000..f930961 --- /dev/null +++ b/config/runtime.exs @@ -0,0 +1,65 @@ +import Config + +# config/runtime.exs is executed for all environments, including +# during releases. It is executed after compilation and before the +# system starts, so it is typically used to load production configuration +# and secrets from environment variables or elsewhere. Do not define +# any compile-time configuration in here, as it won't be applied. +# The block below contains prod specific runtime configuration. + +# ## Using releases +# +# If you use `mix release`, you need to explicitly enable the server +# by passing the PHX_SERVER=true when you start it: +# +# PHX_SERVER=true bin/app start +# +# Alternatively, you can use `mix phx.gen.release` to generate a `bin/server` +# script that automatically sets the env var above. +if System.get_env("PHX_SERVER") do + config :app, AppWeb.Endpoint, server: true +end + +if config_env() == :prod do + database_url = + System.get_env("DATABASE_URL") || + raise """ + environment variable DATABASE_URL is missing. + For example: ecto://USER:PASS@HOST/DATABASE + """ + + maybe_ipv6 = if System.get_env("ECTO_IPV6"), do: [:inet6], else: [] + + config :app, App.Repo, + # ssl: true, + url: database_url, + pool_size: String.to_integer(System.get_env("POOL_SIZE") || "10"), + socket_options: maybe_ipv6 + + # The secret key base is used to sign/encrypt cookies and other secrets. + # A default value is used in config/dev.exs and config/test.exs but you + # want to use a different value for prod and you most likely don't want + # to check this value into version control, so we use an environment + # variable instead. + secret_key_base = + System.get_env("SECRET_KEY_BASE") || + raise """ + environment variable SECRET_KEY_BASE is missing. + You can generate one by calling: mix phx.gen.secret + """ + + host = System.get_env("PHX_HOST") || "example.com" + port = String.to_integer(System.get_env("PORT") || "4000") + + config :app, AppWeb.Endpoint, + url: [host: host, port: 443, scheme: "https"], + http: [ + # Enable IPv6 and bind on all interfaces. + # Set it to {0, 0, 0, 0, 0, 0, 0, 1} for local network only access. + # See the documentation on https://hexdocs.pm/plug_cowboy/Plug.Cowboy.html + # for details about using IPv6 vs IPv4 and loopback vs public addresses. + ip: {0, 0, 0, 0, 0, 0, 0, 0}, + port: port + ], + secret_key_base: secret_key_base +end diff --git a/config/test.exs b/config/test.exs new file mode 100644 index 0000000..7a78cd3 --- /dev/null +++ b/config/test.exs @@ -0,0 +1,27 @@ +import Config + +# Configure your database +# +# The MIX_TEST_PARTITION environment variable can be used +# to provide built-in test partitioning in CI environment. +# Run `mix help test` for more information. +config :app, App.Repo, + username: "postgres", + password: "postgres", + hostname: "localhost", + database: "app_test#{System.get_env("MIX_TEST_PARTITION")}", + pool: Ecto.Adapters.SQL.Sandbox, + pool_size: 10 + +# We don't run a server during test. If one is required, +# you can enable the server option below. +config :app, AppWeb.Endpoint, + http: [ip: {127, 0, 0, 1}, port: 4002], + secret_key_base: "fmXvsbiTUn/EzMg3uCwbZiKAjQgbkH5t56GZF3INCtW0YIOl7M3q2V4Mrgji0LVK", + server: false + +# Print only warnings and errors during test +config :logger, level: :warn + +# Initialize plugs at runtime for faster test compilation +config :phoenix, :plug_init_mode, :runtime diff --git a/deps/castore/.fetch b/deps/castore/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/castore/.hex b/deps/castore/.hex new file mode 100644 index 0000000000000000000000000000000000000000..0f341998a2f8b137a15b4b8dbf631d408becdd9c GIT binary patch literal 272 zcmZ9{!BWF83Bx30J2(D>k!*>i;G~nJOz-?S&XwyP_U---hv8VvagYZc z$ZW=#Q!#JjG|BRj2gzUSz1Eh#7vaD?F}MyOpk>q$A*q*$2%3G!a*)(&|J)GrHMXeS-)G~Q$y?cxEAv~QI(xIXIi&;k>=0$?Z4kdCdDo|j>(B8VDEF` zuC1`7oE50IiMk*xQKDCCJrqPMhy>W6MQ-yBy|lNtTFm*UjTz@@&rWc$7n}Y7DYZ)? literal 0 HcmV?d00001 diff --git a/deps/castore/README.md b/deps/castore/README.md new file mode 100644 index 0000000..5e72e2c --- /dev/null +++ b/deps/castore/README.md @@ -0,0 +1,48 @@ +# CAStore + +Up-to-date CA certificate store. + +## Installation + +In your `mix.exs`: + +```elixir +def deps do + [ + {:castore, "~> 0.1.0"} + ] +end +``` + +Then, run `$ mix deps.get`. + +## Usage + +This is a micro-library whose only job is storing an up-to-date CA certificate store. The only provided function is `CAStore.file_path/0`, which returns the path of the CA certificate store file. + +```elixir +CAStore.file_path() +#=> /Users/me/castore/_build/dev/lib/castore/priv/cacerts.pem" +``` + +See [the documentation](https://hexdocs.pm/castore). + +## Contributing + +If you want to locally update the CA certificate store file bundled with this library, run the `mix certdata` from the root of this library. + +## License + +Copyright 2018 Eric Meadows-Jรถnsson and Andrea Leopardi + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deps/castore/hex_metadata.config b/deps/castore/hex_metadata.config new file mode 100644 index 0000000..9b6b12a --- /dev/null +++ b/deps/castore/hex_metadata.config @@ -0,0 +1,12 @@ +{<<"app">>,<<"castore">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"Up-to-date CA certificate store.">>}. +{<<"elixir">>,<<"~> 1.0">>}. +{<<"files">>, + [<<"lib/castore.ex">>,<<"priv">>,<<"priv/cacerts.pem">>,<<"mix.exs">>, + <<"README.md">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/elixir-mint/castore">>}]}. +{<<"name">>,<<"castore">>}. +{<<"requirements">>,[]}. +{<<"version">>,<<"0.1.18">>}. diff --git a/deps/castore/lib/castore.ex b/deps/castore/lib/castore.ex new file mode 100644 index 0000000..853035c --- /dev/null +++ b/deps/castore/lib/castore.ex @@ -0,0 +1,24 @@ +defmodule CAStore do + @moduledoc """ + Functionality to retrieve the up-to-date CA certificate store. + + The only purpose of this library is to keep an up-to-date CA certificate store file. + This is why this module only provides one function, `file_path/0`, to access the path of + the CA certificate store file. You can then read this file and use its contents for your + own purposes. + """ + + @doc """ + Returns the path to the CA certificate store PEM file. + + ## Examples + + CAStore.file_path() + #=> /Users/me/castore/_build/dev/lib/castore/priv/cacerts.pem" + + """ + @spec file_path() :: Path.t() + def file_path() do + Application.app_dir(:castore, "priv/cacerts.pem") + end +end diff --git a/deps/castore/mix.exs b/deps/castore/mix.exs new file mode 100644 index 0000000..7ef4f62 --- /dev/null +++ b/deps/castore/mix.exs @@ -0,0 +1,48 @@ +defmodule CAStore.MixProject do + use Mix.Project + + @version "0.1.18" + @repo_url "https://github.com/elixir-mint/castore" + + def project do + [ + app: :castore, + version: @version, + elixir: "~> 1.0", + start_permanent: Mix.env() == :prod, + deps: deps(), + xref: [exclude: [:public_key]], + + # Hex + package: package(), + description: "Up-to-date CA certificate store.", + + # Docs + name: "CAStore", + docs: [ + source_ref: "v#{@version}", + source_url: @repo_url + ] + ] + end + + def application do + [ + extra_applications: [:logger] + ] + end + + defp deps do + [ + {:ex_doc, "~> 0.22", only: :dev} + ] + end + + defp package do + [ + files: ["lib/castore.ex", "priv", "mix.exs", "README.md"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @repo_url} + ] + end +end diff --git a/deps/castore/priv/cacerts.pem b/deps/castore/priv/cacerts.pem new file mode 100644 index 0000000..e7e44f4 --- /dev/null +++ b/deps/castore/priv/cacerts.pem @@ -0,0 +1,3460 @@ +## +## Bundle of CA Root Certificates +## +## Certificate data from Mozilla as of: Tue Jul 19 03:28:20 2022 GMT +## +## This is a bundle of X.509 certificates of public Certificate Authorities +## (CA). These were automatically extracted from Mozilla's root certificates +## file (certdata.txt). This file can be found in the mozilla source tree: +## https://hg.mozilla.org/releases/mozilla-release/raw-file/default/security/nss/lib/ckfw/builtins/certdata.txt +## +## It contains the certificates in PEM format and therefore +## can be directly used with curl / libcurl / php_curl, or with +## an Apache+mod_ssl webserver for SSL client authentication. +## Just configure this file as the SSLCACertificateFile. +## +## Conversion done with mk-ca-bundle.pl version 1.29. +## SHA256: 9bf3799611fb58197f61d45e71ce3dc19f30e7dd73731915872ce5108a7bb066 +## + + +GlobalSign Root CA +================== +-----BEGIN CERTIFICATE----- +MIIDdTCCAl2gAwIBAgILBAAAAAABFUtaw5QwDQYJKoZIhvcNAQEFBQAwVzELMAkGA1UEBhMCQkUx +GTAXBgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExEDAOBgNVBAsTB1Jvb3QgQ0ExGzAZBgNVBAMTEkds +b2JhbFNpZ24gUm9vdCBDQTAeFw05ODA5MDExMjAwMDBaFw0yODAxMjgxMjAwMDBaMFcxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRAwDgYDVQQLEwdSb290IENBMRswGQYD +VQQDExJHbG9iYWxTaWduIFJvb3QgQ0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDa +DuaZjc6j40+Kfvvxi4Mla+pIH/EqsLmVEQS98GPR4mdmzxzdzxtIK+6NiY6arymAZavpxy0Sy6sc +THAHoT0KMM0VjU/43dSMUBUc71DuxC73/OlS8pF94G3VNTCOXkNz8kHp1Wrjsok6Vjk4bwY8iGlb +Kk3Fp1S4bInMm/k8yuX9ifUSPJJ4ltbcdG6TRGHRjcdGsnUOhugZitVtbNV4FpWi6cgKOOvyJBNP +c1STE4U6G7weNLWLBYy5d4ux2x8gkasJU26Qzns3dLlwR5EiUWMWea6xrkEmCMgZK9FGqkjWZCrX +gzT/LCrBbBlDSgeF59N89iFo7+ryUp9/k5DPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV +HRMBAf8EBTADAQH/MB0GA1UdDgQWBBRge2YaRQ2XyolQL30EzTSo//z9SzANBgkqhkiG9w0BAQUF +AAOCAQEA1nPnfE920I2/7LqivjTFKDK1fPxsnCwrvQmeU79rXqoRSLblCKOzyj1hTdNGCbM+w6Dj +Y1Ub8rrvrTnhQ7k4o+YviiY776BQVvnGCv04zcQLcFGUl5gE38NflNUVyRRBnMRddWQVDf9VMOyG +j/8N7yy5Y0b2qvzfvGn9LhJIZJrglfCm7ymPAbEVtQwdpf5pLGkkeB6zpxxxYu7KyJesF12KwvhH +hm4qxFYxldBniYUr+WymXUadDKqC5JlR3XC321Y9YeRq4VzW9v493kHMB65jUr9TU/Qr6cf9tveC +X4XSQRjbgbMEHMUfpIBvFSDJ3gyICh3WZlXi/EjJKSZp4A== +-----END CERTIFICATE----- + +Entrust.net Premium 2048 Secure Server CA +========================================= +-----BEGIN CERTIFICATE----- +MIIEKjCCAxKgAwIBAgIEOGPe+DANBgkqhkiG9w0BAQUFADCBtDEUMBIGA1UEChMLRW50cnVzdC5u +ZXQxQDA+BgNVBAsUN3d3dy5lbnRydXN0Lm5ldC9DUFNfMjA0OCBpbmNvcnAuIGJ5IHJlZi4gKGxp +bWl0cyBsaWFiLikxJTAjBgNVBAsTHChjKSAxOTk5IEVudHJ1c3QubmV0IExpbWl0ZWQxMzAxBgNV +BAMTKkVudHJ1c3QubmV0IENlcnRpZmljYXRpb24gQXV0aG9yaXR5ICgyMDQ4KTAeFw05OTEyMjQx +NzUwNTFaFw0yOTA3MjQxNDE1MTJaMIG0MRQwEgYDVQQKEwtFbnRydXN0Lm5ldDFAMD4GA1UECxQ3 +d3d3LmVudHJ1c3QubmV0L0NQU18yMDQ4IGluY29ycC4gYnkgcmVmLiAobGltaXRzIGxpYWIuKTEl +MCMGA1UECxMcKGMpIDE5OTkgRW50cnVzdC5uZXQgTGltaXRlZDEzMDEGA1UEAxMqRW50cnVzdC5u +ZXQgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgKDIwNDgpMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEArU1LqRKGsuqjIAcVFmQqK0vRvwtKTY7tgHalZ7d4QMBzQshowNtTK91euHaYNZOL +Gp18EzoOH1u3Hs/lJBQesYGpjX24zGtLA/ECDNyrpUAkAH90lKGdCCmziAv1h3edVc3kw37XamSr +hRSGlVuXMlBvPci6Zgzj/L24ScF2iUkZ/cCovYmjZy/Gn7xxGWC4LeksyZB2ZnuU4q941mVTXTzW +nLLPKQP5L6RQstRIzgUyVYr9smRMDuSYB3Xbf9+5CFVghTAp+XtIpGmG4zU/HoZdenoVve8AjhUi +VBcAkCaTvA5JaJG/+EfTnZVCwQ5N328mz8MYIWJmQ3DW1cAH4QIDAQABo0IwQDAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVeSB0RGAvtiJuQijMfmhJAkWuXAwDQYJ +KoZIhvcNAQEFBQADggEBADubj1abMOdTmXx6eadNl9cZlZD7Bh/KM3xGY4+WZiT6QBshJ8rmcnPy +T/4xmf3IDExoU8aAghOY+rat2l098c5u9hURlIIM7j+VrxGrD9cv3h8Dj1csHsm7mhpElesYT6Yf +zX1XEC+bBAlahLVu2B064dae0Wx5XnkcFMXj0EyTO2U87d89vqbllRrDtRnDvV5bu/8j72gZyxKT +J1wDLW8w0B62GqzeWvfRqqgnpv55gcR5mTNXuhKwqeBCbJPKVt7+bYQLCIt+jerXmCHG8+c8eS9e +nNFMFY3h7CI3zJpDC5fcgJCNs2ebb0gIFVbPv/ErfF6adulZkMV8gzURZVE= +-----END CERTIFICATE----- + +Baltimore CyberTrust Root +========================= +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIEAgAAuTANBgkqhkiG9w0BAQUFADBaMQswCQYDVQQGEwJJRTESMBAGA1UE +ChMJQmFsdGltb3JlMRMwEQYDVQQLEwpDeWJlclRydXN0MSIwIAYDVQQDExlCYWx0aW1vcmUgQ3li +ZXJUcnVzdCBSb290MB4XDTAwMDUxMjE4NDYwMFoXDTI1MDUxMjIzNTkwMFowWjELMAkGA1UEBhMC +SUUxEjAQBgNVBAoTCUJhbHRpbW9yZTETMBEGA1UECxMKQ3liZXJUcnVzdDEiMCAGA1UEAxMZQmFs +dGltb3JlIEN5YmVyVHJ1c3QgUm9vdDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKME +uyKrmD1X6CZymrV51Cni4eiVgLGw41uOKymaZN+hXe2wCQVt2yguzmKiYv60iNoS6zjrIZ3AQSsB +UnuId9Mcj8e6uYi1agnnc+gRQKfRzMpijS3ljwumUNKoUMMo6vWrJYeKmpYcqWe4PwzV9/lSEy/C +G9VwcPCPwBLKBsua4dnKM3p31vjsufFoREJIE9LAwqSuXmD+tqYF/LTdB1kC1FkYmGP1pWPgkAx9 +XbIGevOF6uvUA65ehD5f/xXtabz5OTZydc93Uk3zyZAsuT3lySNTPx8kmCFcB5kpvcY67Oduhjpr +l3RjM71oGDHweI12v/yejl0qhqdNkNwnGjkCAwEAAaNFMEMwHQYDVR0OBBYEFOWdWTCCR1jMrPoI +VDaGezq1BE3wMBIGA1UdEwEB/wQIMAYBAf8CAQMwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEB +BQUAA4IBAQCFDF2O5G9RaEIFoN27TyclhAO992T9Ldcw46QQF+vaKSm2eT929hkTI7gQCvlYpNRh +cL0EYWoSihfVCr3FvDB81ukMJY2GQE/szKN+OMY3EU/t3WgxjkzSswF07r51XgdIGn9w/xZchMB5 +hbgF/X++ZRGjD8ACtPhSNzkE1akxehi/oCr0Epn3o0WC4zxe9Z2etciefC7IpJ5OCBRLbf1wbWsa +Y71k5h+3zvDyny67G7fyUIhzksLi4xaNmjICq44Y3ekQEe5+NauQrz4wlHrQMz2nZQ/1/I6eYs9H +RCwBXbsdtTLSR9I4LtD+gdwyah617jzV/OeBHRnDJELqYzmp +-----END CERTIFICATE----- + +Entrust Root Certification Authority +==================================== +-----BEGIN CERTIFICATE----- +MIIEkTCCA3mgAwIBAgIERWtQVDANBgkqhkiG9w0BAQUFADCBsDELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xOTA3BgNVBAsTMHd3dy5lbnRydXN0Lm5ldC9DUFMgaXMgaW5jb3Jw +b3JhdGVkIGJ5IHJlZmVyZW5jZTEfMB0GA1UECxMWKGMpIDIwMDYgRW50cnVzdCwgSW5jLjEtMCsG +A1UEAxMkRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTA2MTEyNzIwMjM0 +MloXDTI2MTEyNzIwNTM0MlowgbAxCzAJBgNVBAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMu +MTkwNwYDVQQLEzB3d3cuZW50cnVzdC5uZXQvQ1BTIGlzIGluY29ycG9yYXRlZCBieSByZWZlcmVu +Y2UxHzAdBgNVBAsTFihjKSAyMDA2IEVudHJ1c3QsIEluYy4xLTArBgNVBAMTJEVudHJ1c3QgUm9v +dCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ALaVtkNC+sZtKm9I35RMOVcF7sN5EUFoNu3s/poBj6E4KPz3EEZmLk0eGrEaTsbRwJWIsMn/MYsz +A9u3g3s+IIRe7bJWKKf44LlAcTfFy0cOlypowCKVYhXbR9n10Cv/gkvJrT7eTNuQgFA/CYqEAOww +Cj0Yzfv9KlmaI5UXLEWeH25DeW0MXJj+SKfFI0dcXv1u5x609mhF0YaDW6KKjbHjKYD+JXGIrb68 +j6xSlkuqUY3kEzEZ6E5Nn9uss2rVvDlUccp6en+Q3X0dgNmBu1kmwhH+5pPi94DkZfs0Nw4pgHBN +rziGLp5/V6+eF67rHMsoIV+2HNjnogQi+dPa2MsCAwEAAaOBsDCBrTAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zArBgNVHRAEJDAigA8yMDA2MTEyNzIwMjM0MlqBDzIwMjYxMTI3MjA1 +MzQyWjAfBgNVHSMEGDAWgBRokORnpKZTgMeGZqTx90tD+4S9bTAdBgNVHQ4EFgQUaJDkZ6SmU4DH +hmak8fdLQ/uEvW0wHQYJKoZIhvZ9B0EABBAwDhsIVjcuMTo0LjADAgSQMA0GCSqGSIb3DQEBBQUA +A4IBAQCT1DCw1wMgKtD5Y+iRDAUgqV8ZyntyTtSx29CW+1RaGSwMCPeyvIWonX9tO1KzKtvn1ISM +Y/YPyyYBkVBs9F8U4pN0wBOeMDpQ47RgxRzwIkSNcUesyBrJ6ZuaAGAT/3B+XxFNSRuzFVJ7yVTa +v52Vr2ua2J7p8eRDjeIRRDq/r72DQnNSi6q7pynP9WQcCk3RvKqsnyrQ/39/2n3qse0wJcGE2jTS +W3iDVuycNsMm4hH2Z0kdkquM++v/eu6FSqdQgPCnXEqULl8FmTxSQeDNtGPPAUO6nIPcj2A781q0 +tHuu2guQOHXvgR1m0vdXcDazv/wor3ElhVsT/h5/WrQ8 +-----END CERTIFICATE----- + +Comodo AAA Services root +======================== +-----BEGIN CERTIFICATE----- +MIIEMjCCAxqgAwIBAgIBATANBgkqhkiG9w0BAQUFADB7MQswCQYDVQQGEwJHQjEbMBkGA1UECAwS +R3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHDAdTYWxmb3JkMRowGAYDVQQKDBFDb21vZG8gQ0Eg +TGltaXRlZDEhMB8GA1UEAwwYQUFBIENlcnRpZmljYXRlIFNlcnZpY2VzMB4XDTA0MDEwMTAwMDAw +MFoXDTI4MTIzMTIzNTk1OVowezELMAkGA1UEBhMCR0IxGzAZBgNVBAgMEkdyZWF0ZXIgTWFuY2hl +c3RlcjEQMA4GA1UEBwwHU2FsZm9yZDEaMBgGA1UECgwRQ29tb2RvIENBIExpbWl0ZWQxITAfBgNV +BAMMGEFBQSBDZXJ0aWZpY2F0ZSBTZXJ2aWNlczCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBAL5AnfRu4ep2hxxNRUSOvkbIgwadwSr+GB+O5AL686tdUIoWMQuaBtDFcCLNSS1UY8y2bmhG +C1Pqy0wkwLxyTurxFa70VJoSCsN6sjNg4tqJVfMiWPPe3M/vg4aijJRPn2jymJBGhCfHdr/jzDUs +i14HZGWCwEiwqJH5YZ92IFCokcdmtet4YgNW8IoaE+oxox6gmf049vYnMlhvB/VruPsUK6+3qszW +Y19zjNoFmag4qMsXeDZRrOme9Hg6jc8P2ULimAyrL58OAd7vn5lJ8S3frHRNG5i1R8XlKdH5kBjH +Ypy+g8cmez6KJcfA3Z3mNWgQIJ2P2N7Sw4ScDV7oL8kCAwEAAaOBwDCBvTAdBgNVHQ4EFgQUoBEK +Iz6W8Qfs4q8p74Klf9AwpLQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wewYDVR0f +BHQwcjA4oDagNIYyaHR0cDovL2NybC5jb21vZG9jYS5jb20vQUFBQ2VydGlmaWNhdGVTZXJ2aWNl +cy5jcmwwNqA0oDKGMGh0dHA6Ly9jcmwuY29tb2RvLm5ldC9BQUFDZXJ0aWZpY2F0ZVNlcnZpY2Vz +LmNybDANBgkqhkiG9w0BAQUFAAOCAQEACFb8AvCb6P+k+tZ7xkSAzk/ExfYAWMymtrwUSWgEdujm +7l3sAg9g1o1QGE8mTgHj5rCl7r+8dFRBv/38ErjHT1r0iWAFf2C3BUrz9vHCv8S5dIa2LX1rzNLz +Rt0vxuBqw8M0Ayx9lt1awg6nCpnBBYurDC/zXDrPbDdVCYfeU0BsWO/8tqtlbgT2G9w84FoVxp7Z +8VlIMCFlA2zs6SFz7JsDoeA3raAVGI/6ugLOpyypEBMs1OUIJqsil2D4kF501KKaU73yqWjgom7C +12yxow+ev+to51byrvLjKzg6CYG1a4XXvi3tPxq3smPi9WIsgtRqAEFQ8TmDn5XpNpaYbg== +-----END CERTIFICATE----- + +QuoVadis Root CA 2 +================== +-----BEGIN CERTIFICATE----- +MIIFtzCCA5+gAwIBAgICBQkwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMjAeFw0wNjExMjQx +ODI3MDBaFw0zMTExMjQxODIzMzNaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDIwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCaGMpLlA0ALa8DKYrwD4HIrkwZhR0In6spRIXzL4GtMh6QRr+jhiYaHv5+HBg6 +XJxgFyo6dIMzMH1hVBHL7avg5tKifvVrbxi3Cgst/ek+7wrGsxDp3MJGF/hd/aTa/55JWpzmM+Yk +lvc/ulsrHHo1wtZn/qtmUIttKGAr79dgw8eTvI02kfN/+NsRE8Scd3bBrrcCaoF6qUWD4gXmuVbB +lDePSHFjIuwXZQeVikvfj8ZaCuWw419eaxGrDPmF60Tp+ARz8un+XJiM9XOva7R+zdRcAitMOeGy +lZUtQofX1bOQQ7dsE/He3fbE+Ik/0XX1ksOR1YqI0JDs3G3eicJlcZaLDQP9nL9bFqyS2+r+eXyt +66/3FsvbzSUr5R/7mp/iUcw6UwxI5g69ybR2BlLmEROFcmMDBOAENisgGQLodKcftslWZvB1Jdxn +wQ5hYIizPtGo/KPaHbDRsSNU30R2be1B2MGyIrZTHN81Hdyhdyox5C315eXbyOD/5YDXC2Og/zOh +D7osFRXql7PSorW+8oyWHhqPHWykYTe5hnMz15eWniN9gqRMgeKh0bpnX5UHoycR7hYQe7xFSkyy +BNKr79X9DFHOUGoIMfmR2gyPZFwDwzqLID9ujWc9Otb+fVuIyV77zGHcizN300QyNQliBJIWENie +J0f7OyHj+OsdWwIDAQABo4GwMIGtMA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1Ud +DgQWBBQahGK8SEwzJQTU7tD2A8QZRtGUazBuBgNVHSMEZzBlgBQahGK8SEwzJQTU7tD2A8QZRtGU +a6FJpEcwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMT +ElF1b1ZhZGlzIFJvb3QgQ0EgMoICBQkwDQYJKoZIhvcNAQEFBQADggIBAD4KFk2fBluornFdLwUv +Z+YTRYPENvbzwCYMDbVHZF34tHLJRqUDGCdViXh9duqWNIAXINzng/iN/Ae42l9NLmeyhP3ZRPx3 +UIHmfLTJDQtyU/h2BwdBR5YM++CCJpNVjP4iH2BlfF/nJrP3MpCYUNQ3cVX2kiF495V5+vgtJodm +VjB3pjd4M1IQWK4/YY7yarHvGH5KWWPKjaJW1acvvFYfzznB4vsKqBUsfU16Y8Zsl0Q80m/DShcK ++JDSV6IZUaUtl0HaB0+pUNqQjZRG4T7wlP0QADj1O+hA4bRuVhogzG9Yje0uRY/W6ZM/57Es3zrW +IozchLsib9D45MY56QSIPMO661V6bYCZJPVsAfv4l7CUW+v90m/xd2gNNWQjrLhVoQPRTUIZ3Ph1 +WVaj+ahJefivDrkRoHy3au000LYmYjgahwz46P0u05B/B5EqHdZ+XIWDmbA4CD/pXvk1B+TJYm5X +f6dQlfe6yJvmjqIBxdZmv3lh8zwc4bmCXF2gw+nYSL0ZohEUGW6yhhtoPkg3Goi3XZZenMfvJ2II +4pEZXNLxId26F0KCl3GBUzGpn/Z9Yr9y4aOTHcyKJloJONDO1w2AFrR4pTqHTI2KpdVGl/IsELm8 +VCLAAVBpQ570su9t+Oza8eOx79+Rj1QqCyXBJhnEUhAFZdWCEOrCMc0u +-----END CERTIFICATE----- + +QuoVadis Root CA 3 +================== +-----BEGIN CERTIFICATE----- +MIIGnTCCBIWgAwIBAgICBcYwDQYJKoZIhvcNAQEFBQAwRTELMAkGA1UEBhMCQk0xGTAXBgNVBAoT +EFF1b1ZhZGlzIExpbWl0ZWQxGzAZBgNVBAMTElF1b1ZhZGlzIFJvb3QgQ0EgMzAeFw0wNjExMjQx +OTExMjNaFw0zMTExMjQxOTA2NDRaMEUxCzAJBgNVBAYTAkJNMRkwFwYDVQQKExBRdW9WYWRpcyBM +aW1pdGVkMRswGQYDVQQDExJRdW9WYWRpcyBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDMV0IWVJzmmNPTTe7+7cefQzlKZbPoFog02w1ZkXTPkrgEQK0CSzGrvI2RaNgg +DhoB4hp7Thdd4oq3P5kazethq8Jlph+3t723j/z9cI8LoGe+AaJZz3HmDyl2/7FWeUUrH556VOij +KTVopAFPD6QuN+8bv+OPEKhyq1hX51SGyMnzW9os2l2ObjyjPtr7guXd8lyyBTNvijbO0BNO/79K +DDRMpsMhvVAEVeuxu537RR5kFd5VAYwCdrXLoT9CabwvvWhDFlaJKjdhkf2mrk7AyxRllDdLkgbv +BNDInIjbC3uBr7E9KsRlOni27tyAsdLTmZw67mtaa7ONt9XOnMK+pUsvFrGeaDsGb659n/je7Mwp +p5ijJUMv7/FfJuGITfhebtfZFG4ZM2mnO4SJk8RTVROhUXhA+LjJou57ulJCg54U7QVSWllWp5f8 +nT8KKdjcT5EOE7zelaTfi5m+rJsziO+1ga8bxiJTyPbH7pcUsMV8eFLI8M5ud2CEpukqdiDtWAEX +MJPpGovgc2PZapKUSU60rUqFxKMiMPwJ7Wgic6aIDFUhWMXhOp8q3crhkODZc6tsgLjoC2SToJyM +Gf+z0gzskSaHirOi4XCPLArlzW1oUevaPwV/izLmE1xr/l9A4iLItLRkT9a6fUg+qGkM17uGcclz +uD87nSVL2v9A6wIDAQABo4IBlTCCAZEwDwYDVR0TAQH/BAUwAwEB/zCB4QYDVR0gBIHZMIHWMIHT +BgkrBgEEAb5YAAMwgcUwgZMGCCsGAQUFBwICMIGGGoGDQW55IHVzZSBvZiB0aGlzIENlcnRpZmlj +YXRlIGNvbnN0aXR1dGVzIGFjY2VwdGFuY2Ugb2YgdGhlIFF1b1ZhZGlzIFJvb3QgQ0EgMyBDZXJ0 +aWZpY2F0ZSBQb2xpY3kgLyBDZXJ0aWZpY2F0aW9uIFByYWN0aWNlIFN0YXRlbWVudC4wLQYIKwYB +BQUHAgEWIWh0dHA6Ly93d3cucXVvdmFkaXNnbG9iYWwuY29tL2NwczALBgNVHQ8EBAMCAQYwHQYD +VR0OBBYEFPLAE+CCQz777i9nMpY1XNu4ywLQMG4GA1UdIwRnMGWAFPLAE+CCQz777i9nMpY1XNu4 +ywLQoUmkRzBFMQswCQYDVQQGEwJCTTEZMBcGA1UEChMQUXVvVmFkaXMgTGltaXRlZDEbMBkGA1UE +AxMSUXVvVmFkaXMgUm9vdCBDQSAzggIFxjANBgkqhkiG9w0BAQUFAAOCAgEAT62gLEz6wPJv92ZV +qyM07ucp2sNbtrCD2dDQ4iH782CnO11gUyeim/YIIirnv6By5ZwkajGxkHon24QRiSemd1o417+s +hvzuXYO8BsbRd2sPbSQvS3pspweWyuOEn62Iix2rFo1bZhfZFvSLgNLd+LJ2w/w4E6oM3kJpK27z +POuAJ9v1pkQNn1pVWQvVDVJIxa6f8i+AxeoyUDUSly7B4f/xI4hROJ/yZlZ25w9Rl6VSDE1JUZU2 +Pb+iSwwQHYaZTKrzchGT5Or2m9qoXadNt54CrnMAyNojA+j56hl0YgCUyyIgvpSnWbWCar6ZeXqp +8kokUvd0/bpO5qgdAm6xDYBEwa7TIzdfu4V8K5Iu6H6li92Z4b8nby1dqnuH/grdS/yO9SbkbnBC +bjPsMZ57k8HkyWkaPcBrTiJt7qtYTcbQQcEr6k8Sh17rRdhs9ZgC06DYVYoGmRmioHfRMJ6szHXu +g/WwYjnPbFfiTNKRCw51KBuav/0aQ/HKd/s7j2G4aSgWQgRecCocIdiP4b0jWy10QJLZYxkNc91p +vGJHvOB0K7Lrfb5BG7XARsWhIstfTsEokt4YutUqKLsRixeTmJlglFwjz1onl14LBQaTNx47aTbr +qZ5hHY8y2o4M1nQ+ewkk2gF3R8Q7zTSMmfXK4SVhM7JZG+Ju1zdXtg2pEto= +-----END CERTIFICATE----- + +Security Communication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIDWjCCAkKgAwIBAgIBADANBgkqhkiG9w0BAQUFADBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +HhcNMDMwOTMwMDQyMDQ5WhcNMjMwOTMwMDQyMDQ5WjBQMQswCQYDVQQGEwJKUDEYMBYGA1UEChMP +U0VDT00gVHJ1c3QubmV0MScwJQYDVQQLEx5TZWN1cml0eSBDb21tdW5pY2F0aW9uIFJvb3RDQTEw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCzs/5/022x7xZ8V6UMbXaKL0u/ZPtM7orw +8yl89f/uKuDp6bpbZCKamm8sOiZpUQWZJtzVHGpxxpp9Hp3dfGzGjGdnSj74cbAZJ6kJDKaVv0uM +DPpVmDvY6CKhS3E4eayXkmmziX7qIWgGmBSWh9JhNrxtJ1aeV+7AwFb9Ms+k2Y7CI9eNqPPYJayX +5HA49LY6tJ07lyZDo6G8SVlyTCMwhwFY9k6+HGhWZq/NQV3Is00qVUarH9oe4kA92819uZKAnDfd +DJZkndwi92SL32HeFZRSFaB9UslLqCHJxrHty8OVYNEP8Ktw+N/LTX7s1vqr2b1/VPKl6Xn62dZ2 +JChzAgMBAAGjPzA9MB0GA1UdDgQWBBSgc0mZaNyFW2XjmygvV5+9M7wHSDALBgNVHQ8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaECpqLvkT115swW1F7NgE+vGkl3g +0dNq/vu+m22/xwVtWSDEHPC32oRYAmP6SBbvT6UL90qY8j+eG61Ha2POCEfrUj94nK9NrvjVT8+a +mCoQQTlSxN3Zmw7vkwGusi7KaEIkQmywszo+zenaSMQVy+n5Bw+SUEmK3TGXX8npN6o7WWWXlDLJ +s58+OmJYxUmtYg5xpTKqL8aJdkNAExNnPaJUJRDL8Try2frbSVa7pv6nQTXD4IhhyYjH3zYQIphZ +6rBK+1YWc26sTfcioU+tHXotRSflMMFe8toTyyVCUZVHA4xsIcx0Qu1T/zOLjw9XARYvz6buyXAi +FL39vmwLAw== +-----END CERTIFICATE----- + +XRamp Global CA Root +==================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIQUJRs7Bjq1ZxN1ZfvdY+grTANBgkqhkiG9w0BAQUFADCBgjELMAkGA1UE +BhMCVVMxHjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2Vj +dXJpdHkgU2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDQxMTAxMTcxNDA0WhcNMzUwMTAxMDUzNzE5WjCBgjELMAkGA1UEBhMCVVMx +HjAcBgNVBAsTFXd3dy54cmFtcHNlY3VyaXR5LmNvbTEkMCIGA1UEChMbWFJhbXAgU2VjdXJpdHkg +U2VydmljZXMgSW5jMS0wKwYDVQQDEyRYUmFtcCBHbG9iYWwgQ2VydGlmaWNhdGlvbiBBdXRob3Jp +dHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCYJB69FbS638eMpSe2OAtp87ZOqCwu +IR1cRN8hXX4jdP5efrRKt6atH67gBhbim1vZZ3RrXYCPKZ2GG9mcDZhtdhAoWORlsH9KmHmf4MMx +foArtYzAQDsRhtDLooY2YKTVMIJt2W7QDxIEM5dfT2Fa8OT5kavnHTu86M/0ay00fOJIYRyO82FE +zG+gSqmUsE3a56k0enI4qEHMPJQRfevIpoy3hsvKMzvZPTeL+3o+hiznc9cKV6xkmxnr9A8ECIqs +AxcZZPRaJSKNNCyy9mgdEm3Tih4U2sSPpuIjhdV6Db1q4Ons7Be7QhtnqiXtRYMh/MHJfNViPvry +xS3T/dRlAgMBAAGjgZ8wgZwwEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFMZPoj0GY4QJnM5i5ASsjVy16bYbMDYGA1UdHwQvMC0wK6Ap +oCeGJWh0dHA6Ly9jcmwueHJhbXBzZWN1cml0eS5jb20vWEdDQS5jcmwwEAYJKwYBBAGCNxUBBAMC +AQEwDQYJKoZIhvcNAQEFBQADggEBAJEVOQMBG2f7Shz5CmBbodpNl2L5JFMn14JkTpAuw0kbK5rc +/Kh4ZzXxHfARvbdI4xD2Dd8/0sm2qlWkSLoC295ZLhVbO50WfUfXN+pfTXYSNrsf16GBBEYgoyxt +qZ4Bfj8pzgCT3/3JknOJiWSe5yvkHJEs0rnOfc5vMZnT5r7SHpDwCRR5XCOrTdLaIR9NmXmd4c8n +nxCbHIgNsIpkQTG4DmyQJKSbXHGPurt+HBvbaoAPIbzp26a3QPSyi6mx5O+aGtA9aZnuqCij4Tyz +8LIRnM98QObd50N9otg6tamN8jSZxNQQ4Qb9CYQQO+7ETPTsJ3xCwnR8gooJybQDJbw= +-----END CERTIFICATE----- + +Go Daddy Class 2 CA +=================== +-----BEGIN CERTIFICATE----- +MIIEADCCAuigAwIBAgIBADANBgkqhkiG9w0BAQUFADBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMY +VGhlIEdvIERhZGR5IEdyb3VwLCBJbmMuMTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRp +ZmljYXRpb24gQXV0aG9yaXR5MB4XDTA0MDYyOTE3MDYyMFoXDTM0MDYyOTE3MDYyMFowYzELMAkG +A1UEBhMCVVMxITAfBgNVBAoTGFRoZSBHbyBEYWRkeSBHcm91cCwgSW5jLjExMC8GA1UECxMoR28g +RGFkZHkgQ2xhc3MgMiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTCCASAwDQYJKoZIhvcNAQEBBQAD +ggENADCCAQgCggEBAN6d1+pXGEmhW+vXX0iG6r7d/+TvZxz0ZWizV3GgXne77ZtJ6XCAPVYYYwhv +2vLM0D9/AlQiVBDYsoHUwHU9S3/Hd8M+eKsaA7Ugay9qK7HFiH7Eux6wwdhFJ2+qN1j3hybX2C32 +qRe3H3I2TqYXP2WYktsqbl2i/ojgC95/5Y0V4evLOtXiEqITLdiOr18SPaAIBQi2XKVlOARFmR6j +YGB0xUGlcmIbYsUfb18aQr4CUWWoriMYavx4A6lNf4DD+qta/KFApMoZFv6yyO9ecw3ud72a9nmY +vLEHZ6IVDd2gWMZEewo+YihfukEHU1jPEX44dMX4/7VpkI+EdOqXG68CAQOjgcAwgb0wHQYDVR0O +BBYEFNLEsNKR1EwRcbNhyz2h/t2oatTjMIGNBgNVHSMEgYUwgYKAFNLEsNKR1EwRcbNhyz2h/t2o +atTjoWekZTBjMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYVGhlIEdvIERhZGR5IEdyb3VwLCBJbmMu +MTEwLwYDVQQLEyhHbyBEYWRkeSBDbGFzcyAyIENlcnRpZmljYXRpb24gQXV0aG9yaXR5ggEAMAwG +A1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBADJL87LKPpH8EsahB4yOd6AzBhRckB4Y9wim +PQoZ+YeAEW5p5JYXMP80kWNyOO7MHAGjHZQopDH2esRU1/blMVgDoszOYtuURXO1v0XJJLXVggKt +I3lpjbi2Tc7PTMozI+gciKqdi0FuFskg5YmezTvacPd+mSYgFFQlq25zheabIZ0KbIIOqPjCDPoQ +HmyW74cNxA9hi63ugyuV+I6ShHI56yDqg+2DzZduCLzrTia2cyvk0/ZM/iZx4mERdEr/VxqHD3VI +Ls9RaRegAhJhldXRQLIQTO7ErBBDpqWeCtWVYpoNz4iCxTIM5CufReYNnyicsbkqWletNw+vHX/b +vZ8= +-----END CERTIFICATE----- + +Starfield Class 2 CA +==================== +-----BEGIN CERTIFICATE----- +MIIEDzCCAvegAwIBAgIBADANBgkqhkiG9w0BAQUFADBoMQswCQYDVQQGEwJVUzElMCMGA1UEChMc +U3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAGA1UECxMpU3RhcmZpZWxkIENsYXNzIDIg +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkwHhcNMDQwNjI5MTczOTE2WhcNMzQwNjI5MTczOTE2WjBo +MQswCQYDVQQGEwJVUzElMCMGA1UEChMcU3RhcmZpZWxkIFRlY2hub2xvZ2llcywgSW5jLjEyMDAG +A1UECxMpU3RhcmZpZWxkIENsYXNzIDIgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEgMA0GCSqG +SIb3DQEBAQUAA4IBDQAwggEIAoIBAQC3Msj+6XGmBIWtDBFk385N78gDGIc/oav7PKaf8MOh2tTY +bitTkPskpD6E8J7oX+zlJ0T1KKY/e97gKvDIr1MvnsoFAZMej2YcOadN+lq2cwQlZut3f+dZxkqZ +JRRU6ybH838Z1TBwj6+wRir/resp7defqgSHo9T5iaU0X9tDkYI22WY8sbi5gv2cOj4QyDvvBmVm +epsZGD3/cVE8MC5fvj13c7JdBmzDI1aaK4UmkhynArPkPw2vCHmCuDY96pzTNbO8acr1zJ3o/WSN +F4Azbl5KXZnJHoe0nRrA1W4TNSNe35tfPe/W93bC6j67eA0cQmdrBNj41tpvi/JEoAGrAgEDo4HF +MIHCMB0GA1UdDgQWBBS/X7fRzt0fhvRbVazc1xDCDqmI5zCBkgYDVR0jBIGKMIGHgBS/X7fRzt0f +hvRbVazc1xDCDqmI56FspGowaDELMAkGA1UEBhMCVVMxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNo +bm9sb2dpZXMsIEluYy4xMjAwBgNVBAsTKVN0YXJmaWVsZCBDbGFzcyAyIENlcnRpZmljYXRpb24g +QXV0aG9yaXR5ggEAMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAAWdP4id0ckaVaGs +afPzWdqbAYcaT1epoXkJKtv3L7IezMdeatiDh6GX70k1PncGQVhiv45YuApnP+yz3SFmH8lU+nLM +PUxA2IGvd56Deruix/U0F47ZEUD0/CwqTRV/p2JdLiXTAAsgGh1o+Re49L2L7ShZ3U0WixeDyLJl +xy16paq8U4Zt3VekyvggQQto8PT7dL5WXXp59fkdheMtlb71cZBDzI0fmgAKhynpVSJYACPq4xJD +KVtHCN2MQWplBqjlIapBtJUhlbl90TSrE9atvNziPTnNvT51cKEYWQPJIrSPnNVeKtelttQKbfi3 +QBFGmh95DmK/D5fs4C8fF5Q= +-----END CERTIFICATE----- + +DigiCert Assured ID Root CA +=========================== +-----BEGIN CERTIFICATE----- +MIIDtzCCAp+gAwIBAgIQDOfg5RfYRv6P5WD8G/AwOTANBgkqhkiG9w0BAQUFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0EwHhcNMDYxMTEwMDAwMDAwWhcNMzEx +MTEwMDAwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgQ0Ew +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCtDhXO5EOAXLGH87dg+XESpa7cJpSIqvTO +9SA5KFhgDPiA2qkVlTJhPLWxKISKityfCgyDF3qPkKyK53lTXDGEKvYPmDI2dsze3Tyoou9q+yHy +UmHfnyDXH+Kx2f4YZNISW1/5WBg1vEfNoTb5a3/UsDg+wRvDjDPZ2C8Y/igPs6eD1sNuRMBhNZYW +/lmci3Zt1/GiSw0r/wty2p5g0I6QNcZ4VYcgoc/lbQrISXwxmDNsIumH0DJaoroTghHtORedmTpy +oeb6pNnVFzF1roV9Iq4/AUaG9ih5yLHa5FcXxH4cDrC0kqZWs72yl+2qp/C3xag/lRbQ/6GW6whf +GHdPAgMBAAGjYzBhMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRF +66Kv9JLLgjEtUYunpyGd823IDzAfBgNVHSMEGDAWgBRF66Kv9JLLgjEtUYunpyGd823IDzANBgkq +hkiG9w0BAQUFAAOCAQEAog683+Lt8ONyc3pklL/3cmbYMuRCdWKuh+vy1dneVrOfzM4UKLkNl2Bc +EkxY5NM9g0lFWJc1aRqoR+pWxnmrEthngYTffwk8lOa4JiwgvT2zKIn3X/8i4peEH+ll74fg38Fn +SbNd67IJKusm7Xi+fT8r87cmNW1fiQG2SVufAQWbqz0lwcy2f8Lxb4bG+mRo64EtlOtCt/qMHt1i +8b5QZ7dsvfPxH2sMNgcWfzd8qVttevESRmCD1ycEvkvOl77DZypoEd+A5wwzZr8TDRRu838fYxAe ++o0bJW1sj6W3YQGx0qMmoRBxna3iw/nDmVG3KwcIzi7mULKn+gpFL6Lw8g== +-----END CERTIFICATE----- + +DigiCert Global Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIDrzCCApegAwIBAgIQCDvgVpBCRrGhdWrJWZHHSjANBgkqhkiG9w0BAQUFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBDQTAeFw0wNjExMTAwMDAwMDBaFw0zMTExMTAw +MDAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IENBMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA4jvhEXLeqKTTo1eqUKKPC3eQyaKl7hLOllsBCSDMAZOn +TjC3U/dDxGkAV53ijSLdhwZAAIEJzs4bg7/fzTtxRuLWZscFs3YnFo97nh6Vfe63SKMI2tavegw5 +BmV/Sl0fvBf4q77uKNd0f3p4mVmFaG5cIzJLv07A6Fpt43C/dxC//AH2hdmoRBBYMql1GNXRor5H +4idq9Joz+EkIYIvUX7Q6hL+hqkpMfT7PT19sdl6gSzeRntwi5m3OFBqOasv+zbMUZBfHWymeMr/y +7vrTC0LUq7dBMtoM1O/4gdW7jVg/tRvoSSiicNoxBN33shbyTApOB6jtSj1etX+jkMOvJwIDAQAB +o2MwYTAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA95QNVbRTLtm +8KPiGxvDl7I90VUwHwYDVR0jBBgwFoAUA95QNVbRTLtm8KPiGxvDl7I90VUwDQYJKoZIhvcNAQEF +BQADggEBAMucN6pIExIK+t1EnE9SsPTfrgT1eXkIoyQY/EsrhMAtudXH/vTBH1jLuG2cenTnmCmr +EbXjcKChzUyImZOMkXDiqw8cvpOp/2PV5Adg06O/nVsJ8dWO41P0jmP6P6fbtGbfYmbW0W5BjfIt +tep3Sp+dWOIrWcBAI+0tKIJFPnlUkiaY4IBIqDfv8NZ5YBberOgOzW6sRBc4L0na4UU+Krk2U886 +UAb3LujEV0lsYSEY1QSteDwsOoBrp+uvFRTp2InBuThs4pFsiv9kuXclVzDAGySj4dzp30d8tbQk +CAUw7C29C79Fv1C5qfPrmAESrciIxpg0X40KPMbp1ZWVbd4= +-----END CERTIFICATE----- + +DigiCert High Assurance EV Root CA +================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIQAqxcJmoLQJuPC3nyrkYldzANBgkqhkiG9w0BAQUFADBsMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSsw +KQYDVQQDEyJEaWdpQ2VydCBIaWdoIEFzc3VyYW5jZSBFViBSb290IENBMB4XDTA2MTExMDAwMDAw +MFoXDTMxMTExMDAwMDAwMFowbDELMAkGA1UEBhMCVVMxFTATBgNVBAoTDERpZ2lDZXJ0IEluYzEZ +MBcGA1UECxMQd3d3LmRpZ2ljZXJ0LmNvbTErMCkGA1UEAxMiRGlnaUNlcnQgSGlnaCBBc3N1cmFu +Y2UgRVYgUm9vdCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMbM5XPm+9S75S0t +Mqbf5YE/yc0lSbZxKsPVlDRnogocsF9ppkCxxLeyj9CYpKlBWTrT3JTWPNt0OKRKzE0lgvdKpVMS +OO7zSW1xkX5jtqumX8OkhPhPYlG++MXs2ziS4wblCJEMxChBVfvLWokVfnHoNb9Ncgk9vjo4UFt3 +MRuNs8ckRZqnrG0AFFoEt7oT61EKmEFBIk5lYYeBQVCmeVyJ3hlKV9Uu5l0cUyx+mM0aBhakaHPQ +NAQTXKFx01p8VdteZOE3hzBWBOURtCmAEvF5OYiiAhF8J2a3iLd48soKqDirCmTCv2ZdlYTBoSUe +h10aUAsgEsxBu24LUTi4S8sCAwEAAaNjMGEwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMB +Af8wHQYDVR0OBBYEFLE+w2kD+L9HAdSYJhoIAu9jZCvDMB8GA1UdIwQYMBaAFLE+w2kD+L9HAdSY +JhoIAu9jZCvDMA0GCSqGSIb3DQEBBQUAA4IBAQAcGgaX3NecnzyIZgYIVyHbIUf4KmeqvxgydkAQ +V8GK83rZEWWONfqe/EW1ntlMMUu4kehDLI6zeM7b41N5cdblIZQB2lWHmiRk9opmzN6cN82oNLFp +myPInngiK3BD41VHMWEZ71jFhS9OMPagMRYjyOfiZRYzy78aG6A9+MpeizGLYAiJLQwGXFK3xPkK +mNEVX58Svnw2Yzi9RKR/5CYrCsSXaQ3pjOLAEFe4yHYSkVXySGnYvCoCWw9E1CAx2/S6cCZdkGCe +vEsXCS+0yx5DaMkHJ8HSXPfqIbloEpw8nL+e/IBcm2PN7EeqJSdnoDfzAIJ9VNep+OkuE6N36B9K +-----END CERTIFICATE----- + +SwissSign Gold CA - G2 +====================== +-----BEGIN CERTIFICATE----- +MIIFujCCA6KgAwIBAgIJALtAHEP1Xk+wMA0GCSqGSIb3DQEBBQUAMEUxCzAJBgNVBAYTAkNIMRUw +EwYDVQQKEwxTd2lzc1NpZ24gQUcxHzAdBgNVBAMTFlN3aXNzU2lnbiBHb2xkIENBIC0gRzIwHhcN +MDYxMDI1MDgzMDM1WhcNMzYxMDI1MDgzMDM1WjBFMQswCQYDVQQGEwJDSDEVMBMGA1UEChMMU3dp +c3NTaWduIEFHMR8wHQYDVQQDExZTd2lzc1NpZ24gR29sZCBDQSAtIEcyMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEAr+TufoskDhJuqVAtFkQ7kpJcyrhdhJJCEyq8ZVeCQD5XJM1QiyUq +t2/876LQwB8CJEoTlo8jE+YoWACjR8cGp4QjK7u9lit/VcyLwVcfDmJlD909Vopz2q5+bbqBHH5C +jCA12UNNhPqE21Is8w4ndwtrvxEvcnifLtg+5hg3Wipy+dpikJKVyh+c6bM8K8vzARO/Ws/BtQpg +vd21mWRTuKCWs2/iJneRjOBiEAKfNA+k1ZIzUd6+jbqEemA8atufK+ze3gE/bk3lUIbLtK/tREDF +ylqM2tIrfKjuvqblCqoOpd8FUrdVxyJdMmqXl2MT28nbeTZ7hTpKxVKJ+STnnXepgv9VHKVxaSvR +AiTysybUa9oEVeXBCsdtMDeQKuSeFDNeFhdVxVu1yzSJkvGdJo+hB9TGsnhQ2wwMC3wLjEHXuend +jIj3o02yMszYF9rNt85mndT9Xv+9lz4pded+p2JYryU0pUHHPbwNUMoDAw8IWh+Vc3hiv69yFGkO +peUDDniOJihC8AcLYiAQZzlG+qkDzAQ4embvIIO1jEpWjpEA/I5cgt6IoMPiaG59je883WX0XaxR +7ySArqpWl2/5rX3aYT+YdzylkbYcjCbaZaIJbcHiVOO5ykxMgI93e2CaHt+28kgeDrpOVG2Y4OGi +GqJ3UM/EY5LsRxmd6+ZrzsECAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUw +AwEB/zAdBgNVHQ4EFgQUWyV7lqRlUX64OfPAeGZe6Drn8O4wHwYDVR0jBBgwFoAUWyV7lqRlUX64 +OfPAeGZe6Drn8O4wRgYDVR0gBD8wPTA7BglghXQBWQECAQEwLjAsBggrBgEFBQcCARYgaHR0cDov +L3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBACe645R88a7A3hfm +5djV9VSwg/S7zV4Fe0+fdWavPOhWfvxyeDgD2StiGwC5+OlgzczOUYrHUDFu4Up+GC9pWbY9ZIEr +44OE5iKHjn3g7gKZYbge9LgriBIWhMIxkziWMaa5O1M/wySTVltpkuzFwbs4AOPsF6m43Md8AYOf +Mke6UiI0HTJ6CVanfCU2qT1L2sCCbwq7EsiHSycR+R4tx5M/nttfJmtS2S6K8RTGRI0Vqbe/vd6m +Gu6uLftIdxf+u+yvGPUqUfA5hJeVbG4bwyvEdGB5JbAKJ9/fXtI5z0V9QkvfsywexcZdylU6oJxp +mo/a77KwPJ+HbBIrZXAVUjEaJM9vMSNQH4xPjyPDdEFjHFWoFN0+4FFQz/EbMFYOkrCChdiDyyJk +vC24JdVUorgG6q2SpCSgwYa1ShNqR88uC1aVVMvOmttqtKay20EIhid392qgQmwLOM7XdVAyksLf +KzAiSNDVQTglXaTpXZ/GlHXQRf0wl0OPkKsKx4ZzYEppLd6leNcG2mqeSz53OiATIgHQv2ieY2Br +NU0LbbqhPcCT4H8js1WtciVORvnSFu+wZMEBnunKoGqYDs/YYPIvSbjkQuE4NRb0yG5P94FW6Lqj +viOvrv1vA+ACOzB2+httQc8Bsem4yWb02ybzOqR08kkkW8mw0FfB+j564ZfJ +-----END CERTIFICATE----- + +SwissSign Silver CA - G2 +======================== +-----BEGIN CERTIFICATE----- +MIIFvTCCA6WgAwIBAgIITxvUL1S7L0swDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCQ0gxFTAT +BgNVBAoTDFN3aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMB4X +DTA2MTAyNTA4MzI0NloXDTM2MTAyNTA4MzI0NlowRzELMAkGA1UEBhMCQ0gxFTATBgNVBAoTDFN3 +aXNzU2lnbiBBRzEhMB8GA1UEAxMYU3dpc3NTaWduIFNpbHZlciBDQSAtIEcyMIICIjANBgkqhkiG +9w0BAQEFAAOCAg8AMIICCgKCAgEAxPGHf9N4Mfc4yfjDmUO8x/e8N+dOcbpLj6VzHVxumK4DV644 +N0MvFz0fyM5oEMF4rhkDKxD6LHmD9ui5aLlV8gREpzn5/ASLHvGiTSf5YXu6t+WiE7brYT7QbNHm ++/pe7R20nqA1W6GSy/BJkv6FCgU+5tkL4k+73JU3/JHpMjUi0R86TieFnbAVlDLaYQ1HTWBCrpJH +6INaUFjpiou5XaHc3ZlKHzZnu0jkg7Y360g6rw9njxcH6ATK72oxh9TAtvmUcXtnZLi2kUpCe2Uu +MGoM9ZDulebyzYLs2aFK7PayS+VFheZteJMELpyCbTapxDFkH4aDCyr0NQp4yVXPQbBH6TCfmb5h +qAaEuSh6XzjZG6k4sIN/c8HDO0gqgg8hm7jMqDXDhBuDsz6+pJVpATqJAHgE2cn0mRmrVn5bi4Y5 +FZGkECwJMoBgs5PAKrYYC51+jUnyEEp/+dVGLxmSo5mnJqy7jDzmDrxHB9xzUfFwZC8I+bRHHTBs +ROopN4WSaGa8gzj+ezku01DwH/teYLappvonQfGbGHLy9YR0SslnxFSuSGTfjNFusB3hB48IHpmc +celM2KX3RxIfdNFRnobzwqIjQAtz20um53MGjMGg6cFZrEb65i/4z3GcRm25xBWNOHkDRUjvxF3X +CO6HOSKGsg0PWEP3calILv3q1h8CAwEAAaOBrDCBqTAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUF6DNweRBtjpbO8tFnb0cwpj6hlgwHwYDVR0jBBgwFoAUF6DNweRB +tjpbO8tFnb0cwpj6hlgwRgYDVR0gBD8wPTA7BglghXQBWQEDAQEwLjAsBggrBgEFBQcCARYgaHR0 +cDovL3JlcG9zaXRvcnkuc3dpc3NzaWduLmNvbS8wDQYJKoZIhvcNAQEFBQADggIBAHPGgeAn0i0P +4JUw4ppBf1AsX19iYamGamkYDHRJ1l2E6kFSGG9YrVBWIGrGvShpWJHckRE1qTodvBqlYJ7YH39F +kWnZfrt4csEGDyrOj4VwYaygzQu4OSlWhDJOhrs9xCrZ1x9y7v5RoSJBsXECYxqCsGKrXlcSH9/L +3XWgwF15kIwb4FDm3jH+mHtwX6WQ2K34ArZv02DdQEsixT2tOnqfGhpHkXkzuoLcMmkDlm4fS/Bx +/uNncqCxv1yL5PqZIseEuRuNI5c/7SXgz2W79WEE790eslpBIlqhn10s6FvJbakMDHiqYMZWjwFa +DGi8aRl5xB9+lwW/xekkUV7U1UtT7dkjWjYDZaPBA61BMPNGG4WQr2W11bHkFlt4dR2Xem1ZqSqP +e97Dh4kQmUlzeMg9vVE1dCrV8X5pGyq7O70luJpaPXJhkGaH7gzWTdQRdAtq/gsD/KNVV4n+Ssuu +WxcFyPKNIzFTONItaj+CuY0IavdeQXRuwxF+B6wpYJE/OMpXEA29MC/HpeZBoNquBYeaoKRlbEwJ +DIm6uNO5wJOKMPqN5ZprFQFOZ6raYlY+hAhm0sQ2fac+EPyI4NSA5QC9qvNOBqN6avlicuMJT+ub +DgEj8Z+7fNzcbBGXJbLytGMU0gYqZ4yD9c7qB9iaah7s5Aq7KkzrCWA5zspi2C5u +-----END CERTIFICATE----- + +SecureTrust CA +============== +-----BEGIN CERTIFICATE----- +MIIDuDCCAqCgAwIBAgIQDPCOXAgWpa1Cf/DrJxhZ0DANBgkqhkiG9w0BAQUFADBIMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xFzAVBgNVBAMTDlNlY3VyZVRy +dXN0IENBMB4XDTA2MTEwNzE5MzExOFoXDTI5MTIzMTE5NDA1NVowSDELMAkGA1UEBhMCVVMxIDAe +BgNVBAoTF1NlY3VyZVRydXN0IENvcnBvcmF0aW9uMRcwFQYDVQQDEw5TZWN1cmVUcnVzdCBDQTCC +ASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAKukgeWVzfX2FI7CT8rU4niVWJxB4Q2ZQCQX +OZEzZum+4YOvYlyJ0fwkW2Gz4BERQRwdbvC4u/jep4G6pkjGnx29vo6pQT64lO0pGtSO0gMdA+9t +DWccV9cGrcrI9f4Or2YlSASWC12juhbDCE/RRvgUXPLIXgGZbf2IzIaowW8xQmxSPmjL8xk037uH +GFaAJsTQ3MBv396gwpEWoGQRS0S8Hvbn+mPeZqx2pHGj7DaUaHp3pLHnDi+BeuK1cobvomuL8A/b +01k/unK8RCSc43Oz969XL0Imnal0ugBS8kvNU3xHCzaFDmapCJcWNFfBZveA4+1wVMeT4C4oFVmH +ursCAwEAAaOBnTCBmjATBgkrBgEEAYI3FAIEBh4EAEMAQTALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/ +BAUwAwEB/zAdBgNVHQ4EFgQUQjK2FvoE/f5dS3rD/fdMQB1aQ68wNAYDVR0fBC0wKzApoCegJYYj +aHR0cDovL2NybC5zZWN1cmV0cnVzdC5jb20vU1RDQS5jcmwwEAYJKwYBBAGCNxUBBAMCAQAwDQYJ +KoZIhvcNAQEFBQADggEBADDtT0rhWDpSclu1pqNlGKa7UTt36Z3q059c4EVlew3KW+JwULKUBRSu +SceNQQcSc5R+DCMh/bwQf2AQWnL1mA6s7Ll/3XpvXdMc9P+IBWlCqQVxyLesJugutIxq/3HcuLHf +mbx8IVQr5Fiiu1cprp6poxkmD5kuCLDv/WnPmRoJjeOnnyvJNjR7JLN4TJUXpAYmHrZkUjZfYGfZ +nMUFdAvnZyPSCPyI6a6Lf+Ew9Dd+/cYy2i2eRDAwbO4H3tI0/NL/QPZL9GZGBlSm8jIKYyYwa5vR +3ItHuuG51WLQoqD0ZwV4KWMabwTW+MZMo5qxN7SN5ShLHZ4swrhovO0C7jE= +-----END CERTIFICATE----- + +Secure Global CA +================ +-----BEGIN CERTIFICATE----- +MIIDvDCCAqSgAwIBAgIQB1YipOjUiolN9BPI8PjqpTANBgkqhkiG9w0BAQUFADBKMQswCQYDVQQG +EwJVUzEgMB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBH +bG9iYWwgQ0EwHhcNMDYxMTA3MTk0MjI4WhcNMjkxMjMxMTk1MjA2WjBKMQswCQYDVQQGEwJVUzEg +MB4GA1UEChMXU2VjdXJlVHJ1c3QgQ29ycG9yYXRpb24xGTAXBgNVBAMTEFNlY3VyZSBHbG9iYWwg +Q0EwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCvNS7YrGxVaQZx5RNoJLNP2MwhR/jx +YDiJiQPpvepeRlMJ3Fz1Wuj3RSoC6zFh1ykzTM7HfAo3fg+6MpjhHZevj8fcyTiW89sa/FHtaMbQ +bqR8JNGuQsiWUGMu4P51/pinX0kuleM5M2SOHqRfkNJnPLLZ/kG5VacJjnIFHovdRIWCQtBJwB1g +8NEXLJXr9qXBkqPFwqcIYA1gBBCWeZ4WNOaptvolRTnIHmX5k/Wq8VLcmZg9pYYaDDUz+kulBAYV +HDGA76oYa8J719rO+TMg1fW9ajMtgQT7sFzUnKPiXB3jqUJ1XnvUd+85VLrJChgbEplJL4hL/VBi +0XPnj3pDAgMBAAGjgZ0wgZowEwYJKwYBBAGCNxQCBAYeBABDAEEwCwYDVR0PBAQDAgGGMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFK9EBMJBfkiD2045AuzshHrmzsmkMDQGA1UdHwQtMCswKaAn +oCWGI2h0dHA6Ly9jcmwuc2VjdXJldHJ1c3QuY29tL1NHQ0EuY3JsMBAGCSsGAQQBgjcVAQQDAgEA +MA0GCSqGSIb3DQEBBQUAA4IBAQBjGghAfaReUw132HquHw0LURYD7xh8yOOvaliTFGCRsoTciE6+ +OYo68+aCiV0BN7OrJKQVDpI1WkpEXk5X+nXOH0jOZvQ8QCaSmGwb7iRGDBezUqXbpZGRzzfTb+cn +CDpOGR86p1hcF895P4vkp9MmI50mD1hp/Ed+stCNi5O/KU9DaXR2Z0vPB4zmAve14bRDtUstFJ/5 +3CYNv6ZHdAbYiNE6KTCEztI5gGIbqMdXSbxqVVFnFUq+NQfk1XWYN3kwFNspnWzFacxHVaIw98xc +f8LDmBxrThaA63p4ZUWiABqvDA1VZDRIuJK58bRQKfJPIx/abKwfROHdI3hRW8cW +-----END CERTIFICATE----- + +COMODO Certification Authority +============================== +-----BEGIN CERTIFICATE----- +MIIEHTCCAwWgAwIBAgIQToEtioJl4AsC7j41AkblPTANBgkqhkiG9w0BAQUFADCBgTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxJzAlBgNVBAMTHkNPTU9ETyBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eTAeFw0wNjEyMDEwMDAwMDBaFw0yOTEyMzEyMzU5NTlaMIGBMQswCQYDVQQGEwJHQjEb +MBkGA1UECBMSR3JlYXRlciBNYW5jaGVzdGVyMRAwDgYDVQQHEwdTYWxmb3JkMRowGAYDVQQKExFD +T01PRE8gQ0EgTGltaXRlZDEnMCUGA1UEAxMeQ09NT0RPIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0ECLi3LjkRv3UcEbVASY06m/weaKXTuH ++7uIzg3jLz8GlvCiKVCZrts7oVewdFFxze1CkU1B/qnI2GqGd0S7WWaXUF601CxwRM/aN5VCaTww +xHGzUvAhTaHYujl8HJ6jJJ3ygxaYqhZ8Q5sVW7euNJH+1GImGEaaP+vB+fGQV+useg2L23IwambV +4EajcNxo2f8ESIl33rXp+2dtQem8Ob0y2WIC8bGoPW43nOIv4tOiJovGuFVDiOEjPqXSJDlqR6sA +1KGzqSX+DT+nHbrTUcELpNqsOO9VUCQFZUaTNE8tja3G1CEZ0o7KBWFxB3NH5YoZEr0ETc5OnKVI +rLsm9wIDAQABo4GOMIGLMB0GA1UdDgQWBBQLWOWLxkwVN6RAqTCpIb5HNlpW/zAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zBJBgNVHR8EQjBAMD6gPKA6hjhodHRwOi8vY3JsLmNvbW9k +b2NhLmNvbS9DT01PRE9DZXJ0aWZpY2F0aW9uQXV0aG9yaXR5LmNybDANBgkqhkiG9w0BAQUFAAOC +AQEAPpiem/Yb6dc5t3iuHXIYSdOH5EOC6z/JqvWote9VfCFSZfnVDeFs9D6Mk3ORLgLETgdxb8CP +OGEIqB6BCsAvIC9Bi5HcSEW88cbeunZrM8gALTFGTO3nnc+IlP8zwFboJIYmuNg4ON8qa90SzMc/ +RxdMosIGlgnW2/4/PEZB31jiVg88O8EckzXZOFKs7sjsLjBOlDW0JB9LeGna8gI4zJVSk/BwJVmc +IGfE7vmLV2H0knZ9P4SNVbfo5azV8fUZVqZa+5Acr5Pr5RzUZ5ddBA6+C4OmF4O5MBKgxTMVBbkN ++8cFduPYSo38NBejxiEovjBFMR7HeL5YYTisO+IBZQ== +-----END CERTIFICATE----- + +Network Solutions Certificate Authority +======================================= +-----BEGIN CERTIFICATE----- +MIID5jCCAs6gAwIBAgIQV8szb8JcFuZHFhfjkDFo4DANBgkqhkiG9w0BAQUFADBiMQswCQYDVQQG +EwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMuMTAwLgYDVQQDEydOZXR3b3Jr +IFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwHhcNMDYxMjAxMDAwMDAwWhcNMjkxMjMx +MjM1OTU5WjBiMQswCQYDVQQGEwJVUzEhMB8GA1UEChMYTmV0d29yayBTb2x1dGlvbnMgTC5MLkMu +MTAwLgYDVQQDEydOZXR3b3JrIFNvbHV0aW9ucyBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkwggEiMA0G +CSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDkvH6SMG3G2I4rC7xGzuAnlt7e+foS0zwzc7MEL7xx +jOWftiJgPl9dzgn/ggwbmlFQGiaJ3dVhXRncEg8tCqJDXRfQNJIg6nPPOCwGJgl6cvf6UDL4wpPT +aaIjzkGxzOTVHzbRijr4jGPiFFlp7Q3Tf2vouAPlT2rlmGNpSAW+Lv8ztumXWWn4Zxmuk2GWRBXT +crA/vGp97Eh/jcOrqnErU2lBUzS1sLnFBgrEsEX1QV1uiUV7PTsmjHTC5dLRfbIR1PtYMiKagMnc +/Qzpf14Dl847ABSHJ3A4qY5usyd2mFHgBeMhqxrVhSI8KbWaFsWAqPS7azCPL0YCorEMIuDTAgMB +AAGjgZcwgZQwHQYDVR0OBBYEFCEwyfsA106Y2oeqKtCnLrFAMadMMA4GA1UdDwEB/wQEAwIBBjAP +BgNVHRMBAf8EBTADAQH/MFIGA1UdHwRLMEkwR6BFoEOGQWh0dHA6Ly9jcmwubmV0c29sc3NsLmNv +bS9OZXR3b3JrU29sdXRpb25zQ2VydGlmaWNhdGVBdXRob3JpdHkuY3JsMA0GCSqGSIb3DQEBBQUA +A4IBAQC7rkvnt1frf6ott3NHhWrB5KUd5Oc86fRZZXe1eltajSU24HqXLjjAV2CDmAaDn7l2em5Q +4LqILPxFzBiwmZVRDuwduIj/h1AcgsLj4DKAv6ALR8jDMe+ZZzKATxcheQxpXN5eNK4CtSbqUN9/ +GGUsyfJj4akH/nxxH2szJGoeBfcFaMBqEssuXmHLrijTfsK0ZpEmXzwuJF/LWA/rKOyvEZbz3Htv +wKeI8lN3s2Berq4o2jUsbzRF0ybh3uxbTydrFny9RAQYgrOJeRcQcT16ohZO9QHNpGxlaKFJdlxD +ydi8NmdspZS11My5vWo1ViHe2MPr+8ukYEywVaCge1ey +-----END CERTIFICATE----- + +COMODO ECC Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIICiTCCAg+gAwIBAgIQH0evqmIAcFBUTAGem2OZKjAKBggqhkjOPQQDAzCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwHhcNMDgwMzA2MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMCR0Ix +GzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UEChMR +Q09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBFQ0MgQ2VydGlmaWNhdGlvbiBBdXRo +b3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQDR3svdcmCFYX7deSRFtSrYpn1PlILBs5BAH+X +4QokPB0BBO490o0JlwzgdeT6+3eKKvUDYEs2ixYjFq0JcfRK9ChQtP6IHG4/bC8vCVlbpVsLM5ni +wz2J+Wos77LTBumjQjBAMB0GA1UdDgQWBBR1cacZSBm8nZ3qQUfflMRId5nTeTAOBgNVHQ8BAf8E +BAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjEA7wNbeqy3eApyt4jf/7VG +FAkK+qDmfQjGGoe9GKhzvSbKYAydzpmfz1wPMOG+FDHqAjAU9JM8SaczepBGR7NjfRObTrdvGDeA +U/7dIOA1mjbRxwG55tzd8/8dLDoWV9mSOdY= +-----END CERTIFICATE----- + +Certigna +======== +-----BEGIN CERTIFICATE----- +MIIDqDCCApCgAwIBAgIJAP7c4wEPyUj/MA0GCSqGSIb3DQEBBQUAMDQxCzAJBgNVBAYTAkZSMRIw +EAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hMB4XDTA3MDYyOTE1MTMwNVoXDTI3 +MDYyOTE1MTMwNVowNDELMAkGA1UEBhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczERMA8GA1UEAwwI +Q2VydGlnbmEwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDIaPHJ1tazNHUmgh7stL7q +XOEm7RFHYeGifBZ4QCHkYJ5ayGPhxLGWkv8YbWkj4Sti993iNi+RB7lIzw7sebYs5zRLcAglozyH +GxnygQcPOJAZ0xH+hrTy0V4eHpbNgGzOOzGTtvKg0KmVEn2lmsxryIRWijOp5yIVUxbwzBfsV1/p +ogqYCd7jX5xv3EjjhQsVWqa6n6xI4wmy9/Qy3l40vhx4XUJbzg4ij02Q130yGLMLLGq/jj8UEYkg +DncUtT2UCIf3JR7VsmAA7G8qKCVuKj4YYxclPz5EIBb2JsglrgVKtOdjLPOMFlN+XPsRGgjBRmKf +Irjxwo1p3Po6WAbfAgMBAAGjgbwwgbkwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUGu3+QTmQ +tCRZvgHyUtVF9lo53BEwZAYDVR0jBF0wW4AUGu3+QTmQtCRZvgHyUtVF9lo53BGhOKQ2MDQxCzAJ +BgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxETAPBgNVBAMMCENlcnRpZ25hggkA/tzjAQ/J +SP8wDgYDVR0PAQH/BAQDAgEGMBEGCWCGSAGG+EIBAQQEAwIABzANBgkqhkiG9w0BAQUFAAOCAQEA +hQMeknH2Qq/ho2Ge6/PAD/Kl1NqV5ta+aDY9fm4fTIrv0Q8hbV6lUmPOEvjvKtpv6zf+EwLHyzs+ +ImvaYS5/1HI93TDhHkxAGYwP15zRgzB7mFncfca5DClMoTOi62c6ZYTTluLtdkVwj7Ur3vkj1klu +PBS1xp81HlDQwY9qcEQCYsuuHWhBp6pX6FOqB9IG9tUUBguRA3UsbHK1YZWaDYu5Def131TN3ubY +1gkIl2PlwS6wt0QmwCbAr1UwnjvVNioZBPRcHv/PLLf/0P2HQBHVESO7SMAhqaQoLf0V+LBOK/Qw +WyH8EZE0vkHve52Xdf+XlcCWWC/qu0bXu+TZLg== +-----END CERTIFICATE----- + +ePKI Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIFsDCCA5igAwIBAgIQFci9ZUdcr7iXAF7kBtK8nTANBgkqhkiG9w0BAQUFADBeMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xKjAoBgNVBAsMIWVQS0kg +Um9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0wNDEyMjAwMjMxMjdaFw0zNDEyMjAwMjMx +MjdaMF4xCzAJBgNVBAYTAlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEq +MCgGA1UECwwhZVBLSSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA4SUP7o3biDN1Z82tH306Tm2d0y8U82N0ywEhajfqhFAHSyZbCUNs +IZ5qyNUD9WBpj8zwIuQf5/dqIjG3LBXy4P4AakP/h2XGtRrBp0xtInAhijHyl3SJCRImHJ7K2RKi +lTza6We/CKBk49ZCt0Xvl/T29de1ShUCWH2YWEtgvM3XDZoTM1PRYfl61dd4s5oz9wCGzh1NlDiv +qOx4UXCKXBCDUSH3ET00hl7lSM2XgYI1TBnsZfZrxQWh7kcT1rMhJ5QQCtkkO7q+RBNGMD+XPNjX +12ruOzjjK9SXDrkb5wdJfzcq+Xd4z1TtW0ado4AOkUPB1ltfFLqfpo0kR0BZv3I4sjZsN/+Z0V0O +WQqraffAsgRFelQArr5T9rXn4fg8ozHSqf4hUmTFpmfwdQcGlBSBVcYn5AGPF8Fqcde+S/uUWH1+ +ETOxQvdibBjWzwloPn9s9h6PYq2lY9sJpx8iQkEeb5mKPtf5P0B6ebClAZLSnT0IFaUQAS2zMnao +lQ2zepr7BxB4EW/hj8e6DyUadCrlHJhBmd8hh+iVBmoKs2pHdmX2Os+PYhcZewoozRrSgx4hxyy/ +vv9haLdnG7t4TY3OZ+XkwY63I2binZB1NJipNiuKmpS5nezMirH4JYlcWrYvjB9teSSnUmjDhDXi +Zo1jDiVN1Rmy5nk3pyKdVDECAwEAAaNqMGgwHQYDVR0OBBYEFB4M97Zn8uGSJglFwFU5Lnc/Qkqi +MAwGA1UdEwQFMAMBAf8wOQYEZyoHAAQxMC8wLQIBADAJBgUrDgMCGgUAMAcGBWcqAwAABBRFsMLH +ClZ87lt4DJX5GFPBphzYEDANBgkqhkiG9w0BAQUFAAOCAgEACbODU1kBPpVJufGBuvl2ICO1J2B0 +1GqZNF5sAFPZn/KmsSQHRGoqxqWOeBLoR9lYGxMqXnmbnwoqZ6YlPwZpVnPDimZI+ymBV3QGypzq +KOg4ZyYr8dW1P2WT+DZdjo2NQCCHGervJ8A9tDkPJXtoUHRVnAxZfVo9QZQlUgjgRywVMRnVvwdV +xrsStZf0X4OFunHB2WyBEXYKCrC/gpf36j36+uwtqSiUO1bd0lEursC9CBWMd1I0ltabrNMdjmEP +NXubrjlpC2JgQCA2j6/7Nu4tCEoduL+bXPjqpRugc6bY+G7gMwRfaKonh+3ZwZCc7b3jajWvY9+r +GNm65ulK6lCKD2GTHuItGeIwlDWSXQ62B68ZgI9HkFFLLk3dheLSClIKF5r8GrBQAuUBo2M3IUxE +xJtRmREOc5wGj1QupyheRDmHVi03vYVElOEMSyycw5KFNGHLD7ibSkNS/jQ6fbjpKdx2qcgw+BRx +gMYeNkh0IkFch4LoGHGLQYlE535YW6i4jRPpp2zDR+2zGp1iro2C6pSe3VkQw63d4k3jMdXH7Ojy +sP6SHhYKGvzZ8/gntsm+HbRsZJB/9OTEW9c3rkIO3aQab3yIVMUWbuF6aC74Or8NpDyJO3inTmOD +BCEIZ43ygknQW/2xzQ+DhNQ+IIX3Sj0rnP0qCglN6oH4EZw= +-----END CERTIFICATE----- + +certSIGN ROOT CA +================ +-----BEGIN CERTIFICATE----- +MIIDODCCAiCgAwIBAgIGIAYFFnACMA0GCSqGSIb3DQEBBQUAMDsxCzAJBgNVBAYTAlJPMREwDwYD +VQQKEwhjZXJ0U0lHTjEZMBcGA1UECxMQY2VydFNJR04gUk9PVCBDQTAeFw0wNjA3MDQxNzIwMDRa +Fw0zMTA3MDQxNzIwMDRaMDsxCzAJBgNVBAYTAlJPMREwDwYDVQQKEwhjZXJ0U0lHTjEZMBcGA1UE +CxMQY2VydFNJR04gUk9PVCBDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALczuX7I +JUqOtdu0KBuqV5Do0SLTZLrTk+jUrIZhQGpgV2hUhE28alQCBf/fm5oqrl0Hj0rDKH/v+yv6efHH +rfAQUySQi2bJqIirr1qjAOm+ukbuW3N7LBeCgV5iLKECZbO9xSsAfsT8AzNXDe3i+s5dRdY4zTW2 +ssHQnIFKquSyAVwdj1+ZxLGt24gh65AIgoDzMKND5pCCrlUoSe1b16kQOA7+j0xbm0bqQfWwCHTD +0IgztnzXdN/chNFDDnU5oSVAKOp4yw4sLjmdjItuFhwvJoIQ4uNllAoEwF73XVv4EOLQunpL+943 +AAAaWyjj0pxzPjKHmKHJUS/X3qwzs08CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8B +Af8EBAMCAcYwHQYDVR0OBBYEFOCMm9slSbPxfIbWskKHC9BroNnkMA0GCSqGSIb3DQEBBQUAA4IB +AQA+0hyJLjX8+HXd5n9liPRyTMks1zJO890ZeUe9jjtbkw9QSSQTaxQGcu8J06Gh40CEyecYMnQ8 +SG4Pn0vU9x7Tk4ZkVJdjclDVVc/6IJMCopvDI5NOFlV2oHB5bc0hH88vLbwZ44gx+FkagQnIl6Z0 +x2DEW8xXjrJ1/RsCCdtZb3KTafcxQdaIOL+Hsr0Wefmq5L6IJd1hJyMctTEHBDa0GpC9oHRxUIlt +vBTjD4au8as+x6AJzKNI0eDbZOeStc+vckNwi/nDhDwTqn6Sm1dTk/pwwpEOMfmbZ13pljheX7Nz +TogVZ96edhBiIL5VaZVDADlN9u6wWk5JRFRYX0KD +-----END CERTIFICATE----- + +NetLock Arany (Class Gold) Fล‘tanรบsรญtvรกny +======================================== +-----BEGIN CERTIFICATE----- +MIIEFTCCAv2gAwIBAgIGSUEs5AAQMA0GCSqGSIb3DQEBCwUAMIGnMQswCQYDVQQGEwJIVTERMA8G +A1UEBwwIQnVkYXBlc3QxFTATBgNVBAoMDE5ldExvY2sgS2Z0LjE3MDUGA1UECwwuVGFuw7pzw610 +dsOhbnlraWFkw7NrIChDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzKTE1MDMGA1UEAwwsTmV0TG9jayBB +cmFueSAoQ2xhc3MgR29sZCkgRsWRdGFuw7pzw610dsOhbnkwHhcNMDgxMjExMTUwODIxWhcNMjgx +MjA2MTUwODIxWjCBpzELMAkGA1UEBhMCSFUxETAPBgNVBAcMCEJ1ZGFwZXN0MRUwEwYDVQQKDAxO +ZXRMb2NrIEtmdC4xNzA1BgNVBAsMLlRhbsO6c8OtdHbDoW55a2lhZMOzayAoQ2VydGlmaWNhdGlv +biBTZXJ2aWNlcykxNTAzBgNVBAMMLE5ldExvY2sgQXJhbnkgKENsYXNzIEdvbGQpIEbFkXRhbsO6 +c8OtdHbDoW55MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxCRec75LbRTDofTjl5Bu +0jBFHjzuZ9lk4BqKf8owyoPjIMHj9DrTlF8afFttvzBPhCf2nx9JvMaZCpDyD/V/Q4Q3Y1GLeqVw +/HpYzY6b7cNGbIRwXdrzAZAj/E4wqX7hJ2Pn7WQ8oLjJM2P+FpD/sLj916jAwJRDC7bVWaaeVtAk +H3B5r9s5VA1lddkVQZQBr17s9o3x/61k/iCa11zr/qYfCGSji3ZVrR47KGAuhyXoqq8fxmRGILdw +fzzeSNuWU7c5d+Qa4scWhHaXWy+7GRWF+GmF9ZmnqfI0p6m2pgP8b4Y9VHx2BJtr+UBdADTHLpl1 +neWIA6pN+APSQnbAGwIDAKiLo0UwQzASBgNVHRMBAf8ECDAGAQH/AgEEMA4GA1UdDwEB/wQEAwIB +BjAdBgNVHQ4EFgQUzPpnk/C2uNClwB7zU/2MU9+D15YwDQYJKoZIhvcNAQELBQADggEBAKt/7hwW +qZw8UQCgwBEIBaeZ5m8BiFRhbvG5GK1Krf6BQCOUL/t1fC8oS2IkgYIL9WHxHG64YTjrgfpioTta +YtOUZcTh5m2C+C8lcLIhJsFyUR+MLMOEkMNaj7rP9KdlpeuY0fsFskZ1FSNqb4VjMIDw1Z4fKRzC +bLBQWV2QWzuoDTDPv31/zvGdg73JRm4gpvlhUbohL3u+pRVjodSVh/GeufOJ8z2FuLjbvrW5Kfna +NwUASZQDhETnv0Mxz3WLJdH0pmT1kvarBes96aULNmLazAZfNou2XjG4Kvte9nHfRCaexOYNkbQu +dZWAUWpLMKawYqGT8ZvYzsRjdT9ZR7E= +-----END CERTIFICATE----- + +Hongkong Post Root CA 1 +======================= +-----BEGIN CERTIFICATE----- +MIIDMDCCAhigAwIBAgICA+gwDQYJKoZIhvcNAQEFBQAwRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoT +DUhvbmdrb25nIFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMB4XDTAzMDUx +NTA1MTMxNFoXDTIzMDUxNTA0NTIyOVowRzELMAkGA1UEBhMCSEsxFjAUBgNVBAoTDUhvbmdrb25n +IFBvc3QxIDAeBgNVBAMTF0hvbmdrb25nIFBvc3QgUm9vdCBDQSAxMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEArP84tulmAknjorThkPlAj3n54r15/gK97iSSHSL22oVyaf7XPwnU3ZG1 +ApzQjVrhVcNQhrkpJsLj2aDxaQMoIIBFIi1WpztUlVYiWR8o3x8gPW2iNr4joLFutbEnPzlTCeqr +auh0ssJlXI6/fMN4hM2eFvz1Lk8gKgifd/PFHsSaUmYeSF7jEAaPIpjhZY4bXSNmO7ilMlHIhqqh +qZ5/dpTCpmy3QfDVyAY45tQM4vM7TG1QjMSDJ8EThFk9nnV0ttgCXjqQesBCNnLsak3c78QA3xMY +V18meMjWCnl3v/evt3a5pQuEF10Q6m/hq5URX208o1xNg1vysxmKgIsLhwIDAQABoyYwJDASBgNV +HRMBAf8ECDAGAQH/AgEDMA4GA1UdDwEB/wQEAwIBxjANBgkqhkiG9w0BAQUFAAOCAQEADkbVPK7i +h9legYsCmEEIjEy82tvuJxuC52pF7BaLT4Wg87JwvVqWuspube5Gi27nKi6Wsxkz67SfqLI37pio +l7Yutmcn1KZJ/RyTZXaeQi/cImyaT/JaFTmxcdcrUehtHJjA2Sr0oYJ71clBoiMBdDhViw+5Lmei +IAQ32pwL0xch4I+XeTRvhEgCIDMb5jREn5Fw9IBehEPCKdJsEhTkYY2sEJCehFC78JZvRZ+K88ps +T/oROhUVRsPNH4NbLUES7VBnQRM9IauUiqpOfMGx+6fWtScvl6tu4B3i0RwsH0Ti/L6RoZz71ilT +c4afU9hDDl3WY4JxHYB0yvbiAmvZWg== +-----END CERTIFICATE----- + +SecureSign RootCA11 +=================== +-----BEGIN CERTIFICATE----- +MIIDbTCCAlWgAwIBAgIBATANBgkqhkiG9w0BAQUFADBYMQswCQYDVQQGEwJKUDErMCkGA1UEChMi +SmFwYW4gQ2VydGlmaWNhdGlvbiBTZXJ2aWNlcywgSW5jLjEcMBoGA1UEAxMTU2VjdXJlU2lnbiBS +b290Q0ExMTAeFw0wOTA0MDgwNDU2NDdaFw0yOTA0MDgwNDU2NDdaMFgxCzAJBgNVBAYTAkpQMSsw +KQYDVQQKEyJKYXBhbiBDZXJ0aWZpY2F0aW9uIFNlcnZpY2VzLCBJbmMuMRwwGgYDVQQDExNTZWN1 +cmVTaWduIFJvb3RDQTExMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA/XeqpRyQBTvL +TJszi1oURaTnkBbR31fSIRCkF/3frNYfp+TbfPfs37gD2pRY/V1yfIw/XwFndBWW4wI8h9uuywGO +wvNmxoVF9ALGOrVisq/6nL+k5tSAMJjzDbaTj6nU2DbysPyKyiyhFTOVMdrAG/LuYpmGYz+/3ZMq +g6h2uRMft85OQoWPIucuGvKVCbIFtUROd6EgvanyTgp9UK31BQ1FT0Zx/Sg+U/sE2C3XZR1KG/rP +O7AxmjVuyIsG0wCR8pQIZUyxNAYAeoni8McDWc/V1uinMrPmmECGxc0nEovMe863ETxiYAcjPitA +bpSACW22s293bzUIUPsCh8U+iQIDAQABo0IwQDAdBgNVHQ4EFgQUW/hNT7KlhtQ60vFjmqC+CfZX +t94wDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAKCh +OBZmLqdWHyGcBvod7bkixTgm2E5P7KN/ed5GIaGHd48HCJqypMWvDzKYC3xmKbabfSVSSUOrTC4r +bnpwrxYO4wJs+0LmGJ1F2FXI6Dvd5+H0LgscNFxsWEr7jIhQX5Ucv+2rIrVls4W6ng+4reV6G4pQ +Oh29Dbx7VFALuUKvVaAYga1lme++5Jy/xIWrQbJUb9wlze144o4MjQlJ3WN7WmmWAiGovVJZ6X01 +y8hSyn+B/tlr0/cR7SXf+Of5pPpyl4RTDaXQMhhRdlkUbA/r7F+AjHVDg8OFmP9Mni0N5HeDk061 +lgeLKBObjBmNQSdJQO7e5iNEOdyhIta6A/I= +-----END CERTIFICATE----- + +Microsec e-Szigno Root CA 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIECjCCAvKgAwIBAgIJAMJ+QwRORz8ZMA0GCSqGSIb3DQEBCwUAMIGCMQswCQYDVQQGEwJIVTER +MA8GA1UEBwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jv +c2VjIGUtU3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5o +dTAeFw0wOTA2MTYxMTMwMThaFw0yOTEyMzAxMTMwMThaMIGCMQswCQYDVQQGEwJIVTERMA8GA1UE +BwwIQnVkYXBlc3QxFjAUBgNVBAoMDU1pY3Jvc2VjIEx0ZC4xJzAlBgNVBAMMHk1pY3Jvc2VjIGUt +U3ppZ25vIFJvb3QgQ0EgMjAwOTEfMB0GCSqGSIb3DQEJARYQaW5mb0BlLXN6aWduby5odTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOn4j/NjrdqG2KfgQvvPkd6mJviZpWNwrZuuyjNA +fW2WbqEORO7hE52UQlKavXWFdCyoDh2Tthi3jCyoz/tccbna7P7ofo/kLx2yqHWH2Leh5TvPmUpG +0IMZfcChEhyVbUr02MelTTMuhTlAdX4UfIASmFDHQWe4oIBhVKZsTh/gnQ4H6cm6M+f+wFUoLAKA +pxn1ntxVUwOXewdI/5n7N4okxFnMUBBjjqqpGrCEGob5X7uxUG6k0QrM1XF+H6cbfPVTbiJfyyvm +1HxdrtbCxkzlBQHZ7Vf8wSN5/PrIJIOV87VqUQHQd9bpEqH5GoP7ghu5sJf0dgYzQ0mg/wu1+rUC +AwEAAaOBgDB+MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBTLD8bf +QkPMPcu1SCOhGnqmKrs0aDAfBgNVHSMEGDAWgBTLD8bfQkPMPcu1SCOhGnqmKrs0aDAbBgNVHREE +FDASgRBpbmZvQGUtc3ppZ25vLmh1MA0GCSqGSIb3DQEBCwUAA4IBAQDJ0Q5eLtXMs3w+y/w9/w0o +lZMEyL/azXm4Q5DwpL7v8u8hmLzU1F0G9u5C7DBsoKqpyvGvivo/C3NqPuouQH4frlRheesuCDfX +I/OMn74dseGkddug4lQUsbocKaQY9hK6ohQU4zE1yED/t+AFdlfBHFny+L/k7SViXITwfn4fs775 +tyERzAMBVnCnEJIeGzSBHq2cGsMEPO0CYdYeBvNfOofyK/FFh+U9rNHHV4S9a67c2Pm2G2JwCz02 +yULyMtd6YebS2z3PyKnJm9zbWETXbzivf3jTo60adbocwTZ8jx5tHMN1Rq41Bab2XD0h7lbwyYIi +LXpUq3DDfSJlgnCW +-----END CERTIFICATE----- + +GlobalSign Root CA - R3 +======================= +-----BEGIN CERTIFICATE----- +MIIDXzCCAkegAwIBAgILBAAAAAABIVhTCKIwDQYJKoZIhvcNAQELBQAwTDEgMB4GA1UECxMXR2xv +YmFsU2lnbiBSb290IENBIC0gUjMxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkdsb2Jh +bFNpZ24wHhcNMDkwMzE4MTAwMDAwWhcNMjkwMzE4MTAwMDAwWjBMMSAwHgYDVQQLExdHbG9iYWxT +aWduIFJvb3QgQ0EgLSBSMzETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFsU2ln +bjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMwldpB5BngiFvXAg7aEyiie/QV2EcWt +iHL8RgJDx7KKnQRfJMsuS+FggkbhUqsMgUdwbN1k0ev1LKMPgj0MK66X17YUhhB5uzsTgHeMCOFJ +0mpiLx9e+pZo34knlTifBtc+ycsmWQ1z3rDI6SYOgxXG71uL0gRgykmmKPZpO/bLyCiR5Z2KYVc3 +rHQU3HTgOu5yLy6c+9C7v/U9AOEGM+iCK65TpjoWc4zdQQ4gOsC0p6Hpsk+QLjJg6VfLuQSSaGjl +OCZgdbKfd/+RFO+uIEn8rUAVSNECMWEZXriX7613t2Saer9fwRPvm2L7DWzgVGkWqQPabumDk3F2 +xmmFghcCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FI/wS3+oLkUkrk1Q+mOai97i3Ru8MA0GCSqGSIb3DQEBCwUAA4IBAQBLQNvAUKr+yAzv95ZURUm7 +lgAJQayzE4aGKAczymvmdLm6AC2upArT9fHxD4q/c2dKg8dEe3jgr25sbwMpjjM5RcOO5LlXbKr8 +EpbsU8Yt5CRsuZRj+9xTaGdWPoO4zzUhw8lo/s7awlOqzJCK6fBdRoyV3XpYKBovHd7NADdBj+1E +bddTKJd+82cEHhXXipa0095MJ6RMG3NzdvQXmcIfeg7jLQitChws/zyrVQ4PkX4268NXSb7hLi18 +YIvDQVETI53O9zJrlAGomecsMx86OyXShkDOOyyGeMlhLxS67ttVb9+E7gUJTb0o2HLO02JQZR7r +kpeDMdmztcpHWD9f +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIU+w77vuySF8wDQYJKoZIhvcNAQEFBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0wOTA1MjAwODM4MTVaFw0zMDEyMzEwODM4MTVaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMBIGA1Ud +EwEB/wQIMAYBAf8CAQEwDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBRlzeurNR4APn7VdMActHNH +DhpkLzCBpgYDVR0gBIGeMIGbMIGYBgRVHSAAMIGPMC8GCCsGAQUFBwIBFiNodHRwOi8vd3d3LmZp +cm1hcHJvZmVzaW9uYWwuY29tL2NwczBcBggrBgEFBQcCAjBQHk4AUABhAHMAZQBvACAAZABlACAA +bABhACAAQgBvAG4AYQBuAG8AdgBhACAANAA3ACAAQgBhAHIAYwBlAGwAbwBuAGEAIAAwADgAMAAx +ADcwDQYJKoZIhvcNAQEFBQADggIBABd9oPm03cXF661LJLWhAqvdpYhKsg9VSytXjDvlMd3+xDLx +51tkljYyGOylMnfX40S2wBEqgLk9am58m9Ot/MPWo+ZkKXzR4Tgegiv/J2Wv+xYVxC5xhOW1//qk +R71kMrv2JYSiJ0L1ILDCExARzRAVukKQKtJE4ZYm6zFIEv0q2skGz3QeqUvVhyj5eTSSPi5E6PaP +T481PyWzOdxjKpBrIF/EUhJOlywqrJ2X3kjyo2bbwtKDlaZmp54lD+kLM5FlClrD2VQS3a/DTg4f +Jl4N3LON7NWBcN7STyQF82xO9UxJZo3R/9ILJUFI/lGExkKvgATP0H5kSeTy36LssUzAKh3ntLFl +osS88Zj0qnAHY7S42jtM+kAiMFsRpvAFDsYCA0irhpuF3dvd6qJ2gHN99ZwExEWN57kci57q13XR +crHedUTnQn3iV2t93Jm8PYMo6oCTjcVMZcFwgbg4/EMxsvYDNEeyrPsiBsse3RdHHF9mudMaotoR +saS8I8nkvof/uZS2+F0gStRf571oe2XyFR7SOqkt6dhrJKyXWERHrVkY8SFlcN7ONGCoQPHzPKTD +KCOM/iczQ0CgFzzr6juwcqajuUpLXhZI9LK8yIySxZ2frHI2vDSANGupi5LAuBft7HZT9SQBjLMi +6Et8Vcad+qMUu2WFbm5PEn4KPJ2V +-----END CERTIFICATE----- + +Izenpe.com +========== +-----BEGIN CERTIFICATE----- +MIIF8TCCA9mgAwIBAgIQALC3WhZIX7/hy/WL1xnmfTANBgkqhkiG9w0BAQsFADA4MQswCQYDVQQG +EwJFUzEUMBIGA1UECgwLSVpFTlBFIFMuQS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wHhcNMDcxMjEz +MTMwODI4WhcNMzcxMjEzMDgyNzI1WjA4MQswCQYDVQQGEwJFUzEUMBIGA1UECgwLSVpFTlBFIFMu +QS4xEzARBgNVBAMMCkl6ZW5wZS5jb20wggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDJ +03rKDx6sp4boFmVqscIbRTJxldn+EFvMr+eleQGPicPK8lVx93e+d5TzcqQsRNiekpsUOqHnJJAK +ClaOxdgmlOHZSOEtPtoKct2jmRXagaKH9HtuJneJWK3W6wyyQXpzbm3benhB6QiIEn6HLmYRY2xU ++zydcsC8Lv/Ct90NduM61/e0aL6i9eOBbsFGb12N4E3GVFWJGjMxCrFXuaOKmMPsOzTFlUFpfnXC +PCDFYbpRR6AgkJOhkEvzTnyFRVSa0QUmQbC1TR0zvsQDyCV8wXDbO/QJLVQnSKwv4cSsPsjLkkxT +OTcj7NMB+eAJRE1NZMDhDVqHIrytG6P+JrUV86f8hBnp7KGItERphIPzidF0BqnMC9bC3ieFUCbK +F7jJeodWLBoBHmy+E60QrLUk9TiRodZL2vG70t5HtfG8gfZZa88ZU+mNFctKy6lvROUbQc/hhqfK +0GqfvEyNBjNaooXlkDWgYlwWTvDjovoDGrQscbNYLN57C9saD+veIR8GdwYDsMnvmfzAuU8Lhij+ +0rnq49qlw0dpEuDb8PYZi+17cNcC1u2HGCgsBCRMd+RIihrGO5rUD8r6ddIBQFqNeb+Lz0vPqhbB +leStTIo+F5HUsWLlguWABKQDfo2/2n+iD5dPDNMN+9fR5XJ+HMh3/1uaD7euBUbl8agW7EekFwID +AQABo4H2MIHzMIGwBgNVHREEgagwgaWBD2luZm9AaXplbnBlLmNvbaSBkTCBjjFHMEUGA1UECgw+ +SVpFTlBFIFMuQS4gLSBDSUYgQTAxMzM3MjYwLVJNZXJjLlZpdG9yaWEtR2FzdGVpeiBUMTA1NSBG +NjIgUzgxQzBBBgNVBAkMOkF2ZGEgZGVsIE1lZGl0ZXJyYW5lbyBFdG9yYmlkZWEgMTQgLSAwMTAx +MCBWaXRvcmlhLUdhc3RlaXowDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFB0cZQ6o8iV7tJHP5LGx5r1VdGwFMA0GCSqGSIb3DQEBCwUAA4ICAQB4pgwWSp9MiDrAyw6l +Fn2fuUhfGI8NYjb2zRlrrKvV9pF9rnHzP7MOeIWblaQnIUdCSnxIOvVFfLMMjlF4rJUT3sb9fbga +kEyrkgPH7UIBzg/YsfqikuFgba56awmqxinuaElnMIAkejEWOVt+8Rwu3WwJrfIxwYJOubv5vr8q +hT/AQKM6WfxZSzwoJNu0FXWuDYi6LnPAvViH5ULy617uHjAimcs30cQhbIHsvm0m5hzkQiCeR7Cs +g1lwLDXWrzY0tM07+DKo7+N4ifuNRSzanLh+QBxh5z6ikixL8s36mLYp//Pye6kfLqCTVyvehQP5 +aTfLnnhqBbTFMXiJ7HqnheG5ezzevh55hM6fcA5ZwjUukCox2eRFekGkLhObNA5me0mrZJfQRsN5 +nXJQY6aYWwa9SG3YOYNw6DXwBdGqvOPbyALqfP2C2sJbUjWumDqtujWTI6cfSN01RpiyEGjkpTHC +ClguGYEQyVB1/OpaFs4R1+7vUIgtYf8/QnMFlEPVjjxOAToZpR9GTnfQXeWBIiGH/pR9hNiTrdZo +Q0iy2+tzJOeRf1SktoA+naM8THLCV8Sg1Mw4J87VBp6iSNnpn86CcDaTmjvfliHjWbcM2pE38P1Z +WrOZyGlsQyYBNWNgVYkDOnXYukrZVP/u3oDYLdE41V4tC5h9Pmzb/CaIxw== +-----END CERTIFICATE----- + +Go Daddy Root Certificate Authority - G2 +======================================== +-----BEGIN CERTIFICATE----- +MIIDxTCCAq2gAwIBAgIBADANBgkqhkiG9w0BAQsFADCBgzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxGjAYBgNVBAoTEUdvRGFkZHkuY29tLCBJbmMu +MTEwLwYDVQQDEyhHbyBEYWRkeSBSb290IENlcnRpZmljYXRlIEF1dGhvcml0eSAtIEcyMB4XDTA5 +MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgYMxCzAJBgNVBAYTAlVTMRAwDgYDVQQIEwdBcml6 +b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMRowGAYDVQQKExFHb0RhZGR5LmNvbSwgSW5jLjExMC8G +A1UEAxMoR28gRGFkZHkgUm9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZI +hvcNAQEBBQADggEPADCCAQoCggEBAL9xYgjx+lk09xvJGKP3gElY6SKDE6bFIEMBO4Tx5oVJnyfq +9oQbTqC023CYxzIBsQU+B07u9PpPL1kwIuerGVZr4oAH/PMWdYA5UXvl+TW2dE6pjYIT5LY/qQOD ++qK+ihVqf94Lw7YZFAXK6sOoBJQ7RnwyDfMAZiLIjWltNowRGLfTshxgtDj6AozO091GB94KPutd +fMh8+7ArU6SSYmlRJQVhGkSBjCypQ5Yj36w6gZoOKcUcqeldHraenjAKOc7xiID7S13MMuyFYkMl +NAJWJwGRtDtwKj9useiciAF9n9T521NtYJ2/LOdYq7hfRvzOxBsDPAnrSTFcaUaz4EcCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFDqahQcQZyi27/a9 +BUFuIMGU2g/eMA0GCSqGSIb3DQEBCwUAA4IBAQCZ21151fmXWWcDYfF+OwYxdS2hII5PZYe096ac +vNjpL9DbWu7PdIxztDhC2gV7+AJ1uP2lsdeu9tfeE8tTEH6KRtGX+rcuKxGrkLAngPnon1rpN5+r +5N9ss4UXnT3ZJE95kTXWXwTrgIOrmgIttRD02JDHBHNA7XIloKmf7J6raBKZV8aPEjoJpL1E/QYV +N8Gb5DKj7Tjo2GTzLH4U/ALqn83/B2gX2yKQOC16jdFU8WnjXzPKej17CuPKf1855eJ1usV2GDPO +LPAvTK33sefOT6jEm0pUBsV/fdUID+Ic/n4XuKxe9tQWskMJDE32p2u0mYRlynqI4uJEvlz36hz1 +-----END CERTIFICATE----- + +Starfield Root Certificate Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIID3TCCAsWgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBjzELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xMjAwBgNVBAMTKVN0YXJmaWVsZCBSb290IENlcnRpZmljYXRlIEF1dGhvcml0 +eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgY8xCzAJBgNVBAYTAlVTMRAw +DgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxTdGFyZmllbGQg +VGVjaG5vbG9naWVzLCBJbmMuMTIwMAYDVQQDEylTdGFyZmllbGQgUm9vdCBDZXJ0aWZpY2F0ZSBB +dXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAL3twQP89o/8ArFv +W59I2Z154qK3A2FWGMNHttfKPTUuiUP3oWmb3ooa/RMgnLRJdzIpVv257IzdIvpy3Cdhl+72WoTs +bhm5iSzchFvVdPtrX8WJpRBSiUZV9Lh1HOZ/5FSuS/hVclcCGfgXcVnrHigHdMWdSL5stPSksPNk +N3mSwOxGXn/hbVNMYq/NHwtjuzqd+/x5AJhhdM8mgkBj87JyahkNmcrUDnXMN/uLicFZ8WJ/X7Nf +ZTD4p7dNdloedl40wOiWVpmKs/B/pM293DIxfJHP4F8R+GuqSVzRmZTRouNjWwl2tVZi4Ut0HZbU +JtQIBFnQmA4O5t78w+wfkPECAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwHQYDVR0OBBYEFHwMMh+n2TB/xH1oo2Kooc6rB1snMA0GCSqGSIb3DQEBCwUAA4IBAQARWfol +TwNvlJk7mh+ChTnUdgWUXuEok21iXQnCoKjUsHU48TRqneSfioYmUeYs0cYtbpUgSpIB7LiKZ3sx +4mcujJUDJi5DnUox9g61DLu34jd/IroAow57UvtruzvE03lRTs2Q9GcHGcg8RnoNAX3FWOdt5oUw +F5okxBDgBPfg8n/Uqgr/Qh037ZTlZFkSIHc40zI+OIF1lnP6aI+xy84fxez6nH7PfrHxBy22/L/K +pL/QlwVKvOoYKAKQvVR4CSFx09F9HdkWsKlhPdAKACL8x3vLCWRFCztAgfd9fDL1mMpYjn0q7pBZ +c2T5NnReJaH1ZgUufzkVqSr7UIuOhWn0 +-----END CERTIFICATE----- + +Starfield Services Root Certificate Authority - G2 +================================================== +-----BEGIN CERTIFICATE----- +MIID7zCCAtegAwIBAgIBADANBgkqhkiG9w0BAQsFADCBmDELMAkGA1UEBhMCVVMxEDAOBgNVBAgT +B0FyaXpvbmExEzARBgNVBAcTClNjb3R0c2RhbGUxJTAjBgNVBAoTHFN0YXJmaWVsZCBUZWNobm9s +b2dpZXMsIEluYy4xOzA5BgNVBAMTMlN0YXJmaWVsZCBTZXJ2aWNlcyBSb290IENlcnRpZmljYXRl +IEF1dGhvcml0eSAtIEcyMB4XDTA5MDkwMTAwMDAwMFoXDTM3MTIzMTIzNTk1OVowgZgxCzAJBgNV +BAYTAlVTMRAwDgYDVQQIEwdBcml6b25hMRMwEQYDVQQHEwpTY290dHNkYWxlMSUwIwYDVQQKExxT +dGFyZmllbGQgVGVjaG5vbG9naWVzLCBJbmMuMTswOQYDVQQDEzJTdGFyZmllbGQgU2VydmljZXMg +Um9vdCBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkgLSBHMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCC +AQoCggEBANUMOsQq+U7i9b4Zl1+OiFOxHz/Lz58gE20pOsgPfTz3a3Y4Y9k2YKibXlwAgLIvWX/2 +h/klQ4bnaRtSmpDhcePYLQ1Ob/bISdm28xpWriu2dBTrz/sm4xq6HZYuajtYlIlHVv8loJNwU4Pa +hHQUw2eeBGg6345AWh1KTs9DkTvnVtYAcMtS7nt9rjrnvDH5RfbCYM8TWQIrgMw0R9+53pBlbQLP +LJGmpufehRhJfGZOozptqbXuNC66DQO4M99H67FrjSXZm86B0UVGMpZwh94CDklDhbZsc7tk6mFB +rMnUVN+HL8cisibMn1lUaJ/8viovxFUcdUBgF4UCVTmLfwUCAwEAAaNCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJxfAN+qAdcwKziIorhtSpzyEZGDMA0GCSqG +SIb3DQEBCwUAA4IBAQBLNqaEd2ndOxmfZyMIbw5hyf2E3F/YNoHN2BtBLZ9g3ccaaNnRbobhiCPP +E95Dz+I0swSdHynVv/heyNXBve6SbzJ08pGCL72CQnqtKrcgfU28elUSwhXqvfdqlS5sdJ/PHLTy +xQGjhdByPq1zqwubdQxtRbeOlKyWN7Wg0I8VRw7j6IPdj/3vQQF3zCepYoUz8jcI73HPdwbeyBkd +iEDPfUYd/x7H4c7/I9vG+o1VTqkC50cRRj70/b17KSa7qWFiNyi2LSr2EIZkyXCn0q23KXB56jza +YyWf/Wi3MOxw+3WKt21gZ7IeyLnp2KhvAotnDU0mV3HaIPzBSlCNsSi6 +-----END CERTIFICATE----- + +AffirmTrust Commercial +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIId3cGJyapsXwwDQYJKoZIhvcNAQELBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMB4XDTEw +MDEyOTE0MDYwNloXDTMwMTIzMTE0MDYwNlowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBDb21tZXJjaWFsMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEA9htPZwcroRX1BiLLHwGy43NFBkRJLLtJJRTWzsO3qyxPxkEylFf6Eqdb +DuKPHx6GGaeqtS25Xw2Kwq+FNXkyLbscYjfysVtKPcrNcV/pQr6U6Mje+SJIZMblq8Yrba0F8PrV +C8+a5fBQpIs7R6UjW3p6+DM/uO+Zl+MgwdYoic+U+7lF7eNAFxHUdPALMeIrJmqbTFeurCA+ukV6 +BfO9m2kVrn1OIGPENXY6BwLJN/3HR+7o8XYdcxXyl6S1yHp52UKqK39c/s4mT6NmgTWvRLpUHhww +MmWd5jyTXlBOeuM61G7MGvv50jeuJCqrVwMiKA1JdX+3KNp1v47j3A55MQIDAQABo0IwQDAdBgNV +HQ4EFgQUnZPGU4teyq8/nx4P5ZmVvCT2lI8wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggEBAFis9AQOzcAN/wr91LoWXym9e2iZWEnStB03TX8nfUYGXUPG +hi4+c7ImfU+TqbbEKpqrIZcUsd6M06uJFdhrJNTxFq7YpFzUf1GO7RgBsZNjvbz4YYCanrHOQnDi +qX0GJX0nof5v7LMeJNrjS1UaADs1tDvZ110w/YETifLCBivtZ8SOyUOyXGsViQK8YvxO8rUzqrJv +0wqiUOP2O+guRMLbZjipM1ZI8W0bM40NjD9gN53Tym1+NH4Nn3J2ixufcv1SNUFFApYvHLKac0kh +sUlHRUe072o0EclNmsxZt9YCnlpOZbWUrhvfKbAW8b8Angc6F2S1BLUjIZkKlTuXfO8= +-----END CERTIFICATE----- + +AffirmTrust Networking +====================== +-----BEGIN CERTIFICATE----- +MIIDTDCCAjSgAwIBAgIIfE8EORzUmS0wDQYJKoZIhvcNAQEFBQAwRDELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMB4XDTEw +MDEyOTE0MDgyNFoXDTMwMTIzMTE0MDgyNFowRDELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmly +bVRydXN0MR8wHQYDVQQDDBZBZmZpcm1UcnVzdCBOZXR3b3JraW5nMIIBIjANBgkqhkiG9w0BAQEF +AAOCAQ8AMIIBCgKCAQEAtITMMxcua5Rsa2FSoOujz3mUTOWUgJnLVWREZY9nZOIG41w3SfYvm4SE +Hi3yYJ0wTsyEheIszx6e/jarM3c1RNg1lho9Nuh6DtjVR6FqaYvZ/Ls6rnla1fTWcbuakCNrmreI +dIcMHl+5ni36q1Mr3Lt2PpNMCAiMHqIjHNRqrSK6mQEubWXLviRmVSRLQESxG9fhwoXA3hA/Pe24 +/PHxI1Pcv2WXb9n5QHGNfb2V1M6+oF4nI979ptAmDgAp6zxG8D1gvz9Q0twmQVGeFDdCBKNwV6gb +h+0t+nvujArjqWaJGctB+d1ENmHP4ndGyH329JKBNv3bNPFyfvMMFr20FQIDAQABo0IwQDAdBgNV +HQ4EFgQUBx/S55zawm6iQLSwelAQUHTEyL0wDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQEFBQADggEBAIlXshZ6qML91tmbmzTCnLQyFE2npN/svqe++EPbkTfOtDIu +UFUaNU52Q3Eg75N3ThVwLofDwR1t3Mu1J9QsVtFSUzpE0nPIxBsFZVpikpzuQY0x2+c06lkh1QF6 +12S4ZDnNye2v7UsDSKegmQGA3GWjNq5lWUhPgkvIZfFXHeVZLgo/bNjR9eUJtGxUAArgFU2HdW23 +WJZa3W3SAKD0m0i+wzekujbgfIeFlxoVot4uolu9rxj5kFDNcFn4J2dHy8egBzp90SxdbBk6ZrV9 +/ZFvgrG+CJPbFEfxojfHRZ48x3evZKiT3/Zpg4Jg8klCNO1aAFSFHBY2kgxc+qatv9s= +-----END CERTIFICATE----- + +AffirmTrust Premium +=================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIIbYwURrGmCu4wDQYJKoZIhvcNAQEMBQAwQTELMAkGA1UEBhMCVVMxFDAS +BgNVBAoMC0FmZmlybVRydXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMB4XDTEwMDEy +OTE0MTAzNloXDTQwMTIzMTE0MTAzNlowQTELMAkGA1UEBhMCVVMxFDASBgNVBAoMC0FmZmlybVRy +dXN0MRwwGgYDVQQDDBNBZmZpcm1UcnVzdCBQcmVtaXVtMIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxBLfqV/+Qd3d9Z+K4/as4Tx4mrzY8H96oDMq3I0gW64tb+eT2TZwamjPjlGjhVtn +BKAQJG9dKILBl1fYSCkTtuG+kU3fhQxTGJoeJKJPj/CihQvL9Cl/0qRY7iZNyaqoe5rZ+jjeRFcV +5fiMyNlI4g0WJx0eyIOFJbe6qlVBzAMiSy2RjYvmia9mx+n/K+k8rNrSs8PhaJyJ+HoAVt70VZVs ++7pk3WKL3wt3MutizCaam7uqYoNMtAZ6MMgpv+0GTZe5HMQxK9VfvFMSF5yZVylmd2EhMQcuJUmd +GPLu8ytxjLW6OQdJd/zvLpKQBY0tL3d770O/Nbua2Plzpyzy0FfuKE4mX4+QaAkvuPjcBukumj5R +p9EixAqnOEhss/n/fauGV+O61oV4d7pD6kh/9ti+I20ev9E2bFhc8e6kGVQa9QPSdubhjL08s9NI +S+LI+H+SqHZGnEJlPqQewQcDWkYtuJfzt9WyVSHvutxMAJf7FJUnM7/oQ0dG0giZFmA7mn7S5u04 +6uwBHjxIVkkJx0w3AJ6IDsBz4W9m6XJHMD4Q5QsDyZpCAGzFlH5hxIrff4IaC1nEWTJ3s7xgaVY5 +/bQGeyzWZDbZvUjthB9+pSKPKrhC9IK31FOQeE4tGv2Bb0TXOwF0lkLgAOIua+rF7nKsu7/+6qqo ++Nz2snmKtmcCAwEAAaNCMEAwHQYDVR0OBBYEFJ3AZ6YMItkm9UWrpmVSESfYRaxjMA8GA1UdEwEB +/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBDAUAA4ICAQCzV00QYk465KzquByv +MiPIs0laUZx2KI15qldGF9X1Uva3ROgIRL8YhNILgM3FEv0AVQVhh0HctSSePMTYyPtwni94loMg +Nt58D2kTiKV1NpgIpsbfrM7jWNa3Pt668+s0QNiigfV4Py/VpfzZotReBA4Xrf5B8OWycvpEgjNC +6C1Y91aMYj+6QrCcDFx+LmUmXFNPALJ4fqENmS2NuB2OosSw/WDQMKSOyARiqcTtNd56l+0OOF6S +L5Nwpamcb6d9Ex1+xghIsV5n61EIJenmJWtSKZGc0jlzCFfemQa0W50QBuHCAKi4HEoCChTQwUHK ++4w1IX2COPKpVJEZNZOUbWo6xbLQu4mGk+ibyQ86p3q4ofB4Rvr8Ny/lioTz3/4E2aFooC8k4gmV +BtWVyuEklut89pMFu+1z6S3RdTnX5yTb2E5fQ4+e0BQ5v1VwSJlXMbSc7kqYA5YwH2AG7hsj/oFg +IxpHYoWlzBk0gG+zrBrjn/B7SK3VAdlntqlyk+otZrWyuOQ9PLLvTIzq6we/qzWaVYa8GKa1qF60 +g2xraUDTn9zxw2lrueFtCfTxqlB2Cnp9ehehVZZCmTEJ3WARjQUwfuaORtGdFNrHF+QFlozEJLUb +zxQHskD4o55BhrwE0GuWyCqANP2/7waj3VjFhT0+j/6eKeC2uAloGRwYQw== +-----END CERTIFICATE----- + +AffirmTrust Premium ECC +======================= +-----BEGIN CERTIFICATE----- +MIIB/jCCAYWgAwIBAgIIdJclisc/elQwCgYIKoZIzj0EAwMwRTELMAkGA1UEBhMCVVMxFDASBgNV +BAoMC0FmZmlybVRydXN0MSAwHgYDVQQDDBdBZmZpcm1UcnVzdCBQcmVtaXVtIEVDQzAeFw0xMDAx +MjkxNDIwMjRaFw00MDEyMzExNDIwMjRaMEUxCzAJBgNVBAYTAlVTMRQwEgYDVQQKDAtBZmZpcm1U +cnVzdDEgMB4GA1UEAwwXQWZmaXJtVHJ1c3QgUHJlbWl1bSBFQ0MwdjAQBgcqhkjOPQIBBgUrgQQA +IgNiAAQNMF4bFZ0D0KF5Nbc6PJJ6yhUczWLznCZcBz3lVPqj1swS6vQUX+iOGasvLkjmrBhDeKzQ +N8O9ss0s5kfiGuZjuD0uL3jET9v0D6RoTFVya5UdThhClXjMNzyR4ptlKymjQjBAMB0GA1UdDgQW +BBSaryl6wBE1NSZRMADDav5A1a7WPDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAK +BggqhkjOPQQDAwNnADBkAjAXCfOHiFBar8jAQr9HX/VsaobgxCd05DhT1wV/GzTjxi+zygk8N53X +57hG8f2h4nECMEJZh0PUUd+60wkyWs6Iflc9nF9Ca/UHLbXwgpP5WW+uZPpY5Yse42O+tYHNbwKM +eQ== +-----END CERTIFICATE----- + +Certum Trusted Network CA +========================= +-----BEGIN CERTIFICATE----- +MIIDuzCCAqOgAwIBAgIDBETAMA0GCSqGSIb3DQEBBQUAMH4xCzAJBgNVBAYTAlBMMSIwIAYDVQQK +ExlVbml6ZXRvIFRlY2hub2xvZ2llcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkxIjAgBgNVBAMTGUNlcnR1bSBUcnVzdGVkIE5ldHdvcmsgQ0EwHhcNMDgxMDIy +MTIwNzM3WhcNMjkxMjMxMTIwNzM3WjB+MQswCQYDVQQGEwJQTDEiMCAGA1UEChMZVW5pemV0byBU +ZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +MSIwIAYDVQQDExlDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENBMIIBIjANBgkqhkiG9w0BAQEFAAOC +AQ8AMIIBCgKCAQEA4/t9o3K6wvDJFIf1awFO4W5AB7ptJ11/91sts1rHUV+rpDKmYYe2bg+G0jAC +l/jXaVehGDldamR5xgFZrDwxSjh80gTSSyjoIF87B6LMTXPb865Px1bVWqeWifrzq2jUI4ZZJ88J +J7ysbnKDHDBy3+Ci6dLhdHUZvSqeexVUBBvXQzmtVSjF4hq79MDkrjhJM8x2hZ85RdKknvISjFH4 +fOQtf/WsX+sWn7Et0brMkUJ3TCXJkDhv2/DM+44el1k+1WBO5gUo7Ul5E0u6SNsv+XLTOcr+H9g0 +cvW0QM8xAcPs3hEtF10fuFDRXhmnad4HMyjKUJX5p1TLVIZQRan5SQIDAQABo0IwQDAPBgNVHRMB +Af8EBTADAQH/MB0GA1UdDgQWBBQIds3LB/8k9sXN7buQvOKEN0Z19zAOBgNVHQ8BAf8EBAMCAQYw +DQYJKoZIhvcNAQEFBQADggEBAKaorSLOAT2mo/9i0Eidi15ysHhE49wcrwn9I0j6vSrEuVUEtRCj +jSfeC4Jj0O7eDDd5QVsisrCaQVymcODU0HfLI9MA4GxWL+FpDQ3Zqr8hgVDZBqWo/5U30Kr+4rP1 +mS1FhIrlQgnXdAIv94nYmem8J9RHjboNRhx3zxSkHLmkMcScKHQDNP8zGSal6Q10tz6XxnboJ5aj +Zt3hrvJBW8qYVoNzcOSGGtIxQbovvi0TWnZvTuhOgQ4/WwMioBK+ZlgRSssDxLQqKi2WF+A5VLxI +03YnnZotBqbJ7DnSq9ufmgsnAjUpsUCV5/nonFWIGUbWtzT1fs45mtk48VH3Tyw= +-----END CERTIFICATE----- + +TWCA Root Certification Authority +================================= +-----BEGIN CERTIFICATE----- +MIIDezCCAmOgAwIBAgIBATANBgkqhkiG9w0BAQUFADBfMQswCQYDVQQGEwJUVzESMBAGA1UECgwJ +VEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NBIFJvb3QgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMDgwODI4MDcyNDMzWhcNMzAxMjMxMTU1OTU5WjBfMQswCQYDVQQG +EwJUVzESMBAGA1UECgwJVEFJV0FOLUNBMRAwDgYDVQQLDAdSb290IENBMSowKAYDVQQDDCFUV0NB +IFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEK +AoIBAQCwfnK4pAOU5qfeCTiRShFAh6d8WWQUe7UREN3+v9XAu1bihSX0NXIP+FPQQeFEAcK0HMMx +QhZHhTMidrIKbw/lJVBPhYa+v5guEGcevhEFhgWQxFnQfHgQsIBct+HHK3XLfJ+utdGdIzdjp9xC +oi2SBBtQwXu4PhvJVgSLL1KbralW6cH/ralYhzC2gfeXRfwZVzsrb+RH9JlF/h3x+JejiB03HFyP +4HYlmlD4oFT/RJB2I9IyxsOrBr/8+7/zrX2SYgJbKdM1o5OaQ2RgXbL6Mv87BK9NQGr5x+PvI/1r +y+UPizgN7gr8/g+YnzAx3WxSZfmLgb4i4RxYA7qRG4kHAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIB +BjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqOFsmjd6LWvJPelSDGRjjCDWmujANBgkqhkiG +9w0BAQUFAAOCAQEAPNV3PdrfibqHDAhUaiBQkr6wQT25JmSDCi/oQMCXKCeCMErJk/9q56YAf4lC +mtYR5VPOL8zy2gXE/uJQxDqGfczafhAJO5I1KlOy/usrBdlsXebQ79NqZp4VKIV66IIArB6nCWlW +QtNoURi+VJq/REG6Sb4gumlc7rh3zc5sH62Dlhh9DrUUOYTxKOkto557HnpyWoOzeW/vtPzQCqVY +T0bf+215WfKEIlKuD8z7fDvnaspHYcN6+NOSBB+4IIThNlQWx0DeO4pz3N/GCUzf7Nr/1FNCocny +Yh0igzyXxfkZYiesZSLX0zzG5Y6yU8xJzrww/nsOM5D77dIUkR8Hrw== +-----END CERTIFICATE----- + +Security Communication RootCA2 +============================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIBADANBgkqhkiG9w0BAQsFADBdMQswCQYDVQQGEwJKUDElMCMGA1UEChMc +U0VDT00gVHJ1c3QgU3lzdGVtcyBDTy4sTFRELjEnMCUGA1UECxMeU2VjdXJpdHkgQ29tbXVuaWNh +dGlvbiBSb290Q0EyMB4XDTA5MDUyOTA1MDAzOVoXDTI5MDUyOTA1MDAzOVowXTELMAkGA1UEBhMC +SlAxJTAjBgNVBAoTHFNFQ09NIFRydXN0IFN5c3RlbXMgQ08uLExURC4xJzAlBgNVBAsTHlNlY3Vy +aXR5IENvbW11bmljYXRpb24gUm9vdENBMjCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB +ANAVOVKxUrO6xVmCxF1SrjpDZYBLx/KWvNs2l9amZIyoXvDjChz335c9S672XewhtUGrzbl+dp++ ++T42NKA7wfYxEUV0kz1XgMX5iZnK5atq1LXaQZAQwdbWQonCv/Q4EpVMVAX3NuRFg3sUZdbcDE3R +3n4MqzvEFb46VqZab3ZpUql6ucjrappdUtAtCms1FgkQhNBqyjoGADdH5H5XTz+L62e4iKrFvlNV +spHEfbmwhRkGeC7bYRr6hfVKkaHnFtWOojnflLhwHyg/i/xAXmODPIMqGplrz95Zajv8bxbXH/1K +EOtOghY6rCcMU/Gt1SSwawNQwS08Ft1ENCcadfsCAwEAAaNCMEAwHQYDVR0OBBYEFAqFqXdlBZh8 +QIH4D5csOPEK7DzPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEB +CwUAA4IBAQBMOqNErLlFsceTfsgLCkLfZOoc7llsCLqJX2rKSpWeeo8HxdpFcoJxDjrSzG+ntKEj +u/Ykn8sX/oymzsLS28yN/HH8AynBbF0zX2S2ZTuJbxh2ePXcokgfGT+Ok+vx+hfuzU7jBBJV1uXk +3fs+BXziHV7Gp7yXT2g69ekuCkO2r1dcYmh8t/2jioSgrGK+KwmHNPBqAbubKVY8/gA3zyNs8U6q +tnRGEmyR7jTV7JqR50S+kDFy1UkC9gLl9B/rfNmWVan/7Ir5mUf/NVoCqgTLiluHcSmRvaS0eg29 +mvVXIwAHIRc/SjnRBUkLp7Y3gaVdjKozXoEofKd9J+sAro03 +-----END CERTIFICATE----- + +Actalis Authentication Root CA +============================== +-----BEGIN CERTIFICATE----- +MIIFuzCCA6OgAwIBAgIIVwoRl0LE48wwDQYJKoZIhvcNAQELBQAwazELMAkGA1UEBhMCSVQxDjAM +BgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlzIFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UE +AwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290IENBMB4XDTExMDkyMjExMjIwMloXDTMwMDky +MjExMjIwMlowazELMAkGA1UEBhMCSVQxDjAMBgNVBAcMBU1pbGFuMSMwIQYDVQQKDBpBY3RhbGlz +IFMucC5BLi8wMzM1ODUyMDk2NzEnMCUGA1UEAwweQWN0YWxpcyBBdXRoZW50aWNhdGlvbiBSb290 +IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAp8bEpSmkLO/lGMWwUKNvUTufClrJ +wkg4CsIcoBh/kbWHuUA/3R1oHwiD1S0eiKD4j1aPbZkCkpAW1V8IbInX4ay8IMKx4INRimlNAJZa +by/ARH6jDuSRzVju3PvHHkVH3Se5CAGfpiEd9UEtL0z9KK3giq0itFZljoZUj5NDKd45RnijMCO6 +zfB9E1fAXdKDa0hMxKufgFpbOr3JpyI/gCczWw63igxdBzcIy2zSekciRDXFzMwujt0q7bd9Zg1f +YVEiVRvjRuPjPdA1YprbrxTIW6HMiRvhMCb8oJsfgadHHwTrozmSBp+Z07/T6k9QnBn+locePGX2 +oxgkg4YQ51Q+qDp2JE+BIcXjDwL4k5RHILv+1A7TaLndxHqEguNTVHnd25zS8gebLra8Pu2Fbe8l +EfKXGkJh90qX6IuxEAf6ZYGyojnP9zz/GPvG8VqLWeICrHuS0E4UT1lF9gxeKF+w6D9Fz8+vm2/7 +hNN3WpVvrJSEnu68wEqPSpP4RCHiMUVhUE4Q2OM1fEwZtN4Fv6MGn8i1zeQf1xcGDXqVdFUNaBr8 +EBtiZJ1t4JWgw5QHVw0U5r0F+7if5t+L4sbnfpb2U8WANFAoWPASUHEXMLrmeGO89LKtmyuy/uE5 +jF66CyCU3nuDuP/jVo23Eek7jPKxwV2dpAtMK9myGPW1n0sCAwEAAaNjMGEwHQYDVR0OBBYEFFLY +iDrIn3hm7YnzezhwlMkCAjbQMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUUtiIOsifeGbt +ifN7OHCUyQICNtAwDgYDVR0PAQH/BAQDAgEGMA0GCSqGSIb3DQEBCwUAA4ICAQALe3KHwGCmSUyI +WOYdiPcUZEim2FgKDk8TNd81HdTtBjHIgT5q1d07GjLukD0R0i70jsNjLiNmsGe+b7bAEzlgqqI0 +JZN1Ut6nna0Oh4lScWoWPBkdg/iaKWW+9D+a2fDzWochcYBNy+A4mz+7+uAwTc+G02UQGRjRlwKx +K3JCaKygvU5a2hi/a5iB0P2avl4VSM0RFbnAKVy06Ij3Pjaut2L9HmLecHgQHEhb2rykOLpn7VU+ +Xlff1ANATIGk0k9jpwlCCRT8AKnCgHNPLsBA2RF7SOp6AsDT6ygBJlh0wcBzIm2Tlf05fbsq4/aC +4yyXX04fkZT6/iyj2HYauE2yOE+b+h1IYHkm4vP9qdCa6HCPSXrW5b0KDtst842/6+OkfcvHlXHo +2qN8xcL4dJIEG4aspCJTQLas/kx2z/uUMsA1n3Y/buWQbqCmJqK4LL7RK4X9p2jIugErsWx0Hbhz +lefut8cl8ABMALJ+tguLHPPAUJ4lueAI3jZm/zel0btUZCzJJ7VLkn5l/9Mt4blOvH+kQSGQQXem +OR/qnuOf0GZvBeyqdn6/axag67XH/JJULysRJyU3eExRarDzzFhdFPFqSBX/wge2sY0PjlxQRrM9 +vwGYT7JZVEc+NHt4bVaTLnPqZih4zR0Uv6CPLy64Lo7yFIrM6bV8+2ydDKXhlg== +-----END CERTIFICATE----- + +Buypass Class 2 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMiBSb290IENBMB4X +DTEwMTAyNjA4MzgwM1oXDTQwMTAyNjA4MzgwM1owTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDIgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANfHXvfBB9R3+0Mh9PT1aeTuMgHbo4Yf5FkNuud1 +g1Lr6hxhFUi7HQfKjK6w3Jad6sNgkoaCKHOcVgb/S2TwDCo3SbXlzwx87vFKu3MwZfPVL4O2fuPn +9Z6rYPnT8Z2SdIrkHJasW4DptfQxh6NR/Md+oW+OU3fUl8FVM5I+GC911K2GScuVr1QGbNgGE41b +/+EmGVnAJLqBcXmQRFBoJJRfuLMR8SlBYaNByyM21cHxMlAQTn/0hpPshNOOvEu/XAFOBz3cFIqU +CqTqc/sLUegTBxj6DvEr0VQVfTzh97QZQmdiXnfgolXsttlpF9U6r0TtSsWe5HonfOV116rLJeff +awrbD02TTqigzXsu8lkBarcNuAeBfos4GzjmCleZPe4h6KP1DBbdi+w0jpwqHAAVF41og9JwnxgI +zRFo1clrUs3ERo/ctfPYV3Me6ZQ5BL/T3jjetFPsaRyifsSP5BtwrfKi+fv3FmRmaZ9JUaLiFRhn +Bkp/1Wy1TbMz4GHrXb7pmA8y1x1LPC5aAVKRCfLf6o3YBkBjqhHk/sM3nhRSP/TizPJhk9H9Z2vX +Uq6/aKtAQ6BXNVN48FP4YUIHZMbXb5tMOA1jrGKvNouicwoN9SG9dKpN6nIDSdvHXx1iY8f93ZHs +M+71bbRuMGjeyNYmsHVee7QHIJihdjK4TWxPAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFMmAd+BikoL1RpzzuvdMw964o605MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAU18h9bqwOlI5LJKwbADJ784g7wbylp7ppHR/ehb8t/W2+xUbP6umwHJdELFx7rxP462s +A20ucS6vxOOto70MEae0/0qyexAQH6dXQbLArvQsWdZHEIjzIVEpMMpghq9Gqx3tOluwlN5E40EI +osHsHdb9T7bWR9AUC8rmyrV7d35BH16Dx7aMOZawP5aBQW9gkOLo+fsicdl9sz1Gv7SEr5AcD48S +aq/v7h56rgJKihcrdv6sVIkkLE8/trKnToyokZf7KcZ7XC25y2a2t6hbElGFtQl+Ynhw/qlqYLYd +DnkM/crqJIByw5c/8nerQyIKx+u2DISCLIBrQYoIwOula9+ZEsuK1V6ADJHgJgg2SMX6OBE1/yWD +LfJ6v9r9jv6ly0UsH8SIU653DtmadsWOLB2jutXsMq7Aqqz30XpN69QH4kj3Io6wpJ9qzo6ysmD0 +oyLQI+uUWnpp3Q+/QFesa1lQ2aOZ4W7+jQF5JyMV3pKdewlNWudLSDBaGOYKbeaP4NK75t98biGC +wWg5TbSYWGZizEqQXsP6JwSxeRV0mcy+rSDeJmAc61ZRpqPq5KM/p/9h3PFaTWwyI0PurKju7koS +CTxdccK+efrCh2gdC/1cacwG0Jp9VJkqyTkaGa9LKkPzY11aWOIv4x3kqdbQCtCev9eBCfHJxyYN +rJgWVqA= +-----END CERTIFICATE----- + +Buypass Class 3 Root CA +======================= +-----BEGIN CERTIFICATE----- +MIIFWTCCA0GgAwIBAgIBAjANBgkqhkiG9w0BAQsFADBOMQswCQYDVQQGEwJOTzEdMBsGA1UECgwU +QnV5cGFzcyBBUy05ODMxNjMzMjcxIDAeBgNVBAMMF0J1eXBhc3MgQ2xhc3MgMyBSb290IENBMB4X +DTEwMTAyNjA4Mjg1OFoXDTQwMTAyNjA4Mjg1OFowTjELMAkGA1UEBhMCTk8xHTAbBgNVBAoMFEJ1 +eXBhc3MgQVMtOTgzMTYzMzI3MSAwHgYDVQQDDBdCdXlwYXNzIENsYXNzIDMgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAKXaCpUWUOOV8l6ddjEGMnqb8RB2uACatVI2zSRH +sJ8YZLya9vrVediQYkwiL944PdbgqOkcLNt4EemOaFEVcsfzM4fkoF0LXOBXByow9c3EN3coTRiR +5r/VUv1xLXA+58bEiuPwKAv0dpihi4dVsjoT/Lc+JzeOIuOoTyrvYLs9tznDDgFHmV0ST9tD+leh +7fmdvhFHJlsTmKtdFoqwNxxXnUX/iJY2v7vKB3tvh2PX0DJq1l1sDPGzbjniazEuOQAnFN44wOwZ +ZoYS6J1yFhNkUsepNxz9gjDthBgd9K5c/3ATAOux9TN6S9ZV+AWNS2mw9bMoNlwUxFFzTWsL8TQH +2xc519woe2v1n/MuwU8XKhDzzMro6/1rqy6any2CbgTUUgGTLT2G/H783+9CHaZr77kgxve9oKeV +/afmiSTYzIw0bOIjL9kSGiG5VZFvC5F5GQytQIgLcOJ60g7YaEi7ghM5EFjp2CoHxhLbWNvSO1UQ +RwUVZ2J+GGOmRj8JDlQyXr8NYnon74Do29lLBlo3WiXQCBJ31G8JUJc9yB3D34xFMFbG02SrZvPA +Xpacw8Tvw3xrizp5f7NJzz3iiZ+gMEuFuZyUJHmPfWupRWgPK9Dx2hzLabjKSWJtyNBjYt1gD1iq +j6G8BaVmos8bdrKEZLFMOVLAMLrwjEsCsLa3AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFEe4zf/lb+74suwvTg75JbCOPGvDMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsF +AAOCAgEAACAjQTUEkMJAYmDv4jVM1z+s4jSQuKFvdvoWFqRINyzpkMLyPPgKn9iB5btb2iUspKdV +cSQy9sgL8rxq+JOssgfCX5/bzMiKqr5qb+FJEMwx14C7u8jYog5kV+qi9cKpMRXSIGrs/CIBKM+G +uIAeqcwRpTzyFrNHnfzSgCHEy9BHcEGhyoMZCCxt8l13nIoUE9Q2HJLw5QY33KbmkJs4j1xrG0aG +Q0JfPgEHU1RdZX33inOhmlRaHylDFCfChQ+1iHsaO5S3HWCntZznKWlXWpuTekMwGwPXYshApqr8 +ZORK15FTAaggiG6cX0S5y2CBNOxv033aSF/rtJC8LakcC6wc1aJoIIAE1vyxjy+7SjENSoYc6+I2 +KSb12tjE8nVhz36udmNKekBlk4f4HoCMhuWG1o8O/FMsYOgWYRqiPkN7zTlgVGr18okmAWiDSKIz +6MkEkbIRNBE+6tBDGR8Dk5AM/1E9V/RBbuHLoL7ryWPNbczk+DaqaJ3tvV2XcEQNtg413OEMXbug +UZTLfhbrES+jkkXITHHZvMmZUldGL1DPvTVp9D0VzgalLA8+9oG6lLvDu79leNKGef9JOxqDDPDe +eOzI8k1MGt6CKfjBWtrt7uYnXuhF0J0cUahoq0Tj0Itq4/g7u9xN12TyUb7mqqta6THuBrxzvxNi +Cp/HuZc= +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 3 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwHhcNMDgx +MDAxMTAyOTU2WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDMwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQC9dZPwYiJvJK7genasfb3ZJNW4t/zN8ELg63iIVl6bmlQdTQyK +9tPPcPRStdiTBONGhnFBSivwKixVA9ZIw+A5OO3yXDw/RLyTPWGrTs0NvvAgJ1gORH8EGoel15YU +NpDQSXuhdfsaa3Ox+M6pCSzyU9XDFES4hqX2iys52qMzVNn6chr3IhUciJFrf2blw2qAsCTz34ZF +iP0Zf3WHHx+xGwpzJFu5ZeAsVMhg02YXP+HMVDNzkQI6pn97djmiH5a2OK61yJN0HZ65tOVgnS9W +0eDrXltMEnAMbEQgqxHY9Bn20pxSN+f6tsIxO0rUFJmtxxr1XV/6B7h8DR/Wgx6zAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS1A/d2O2GCahKqGFPr +AyGUv/7OyjANBgkqhkiG9w0BAQsFAAOCAQEAVj3vlNW92nOyWL6ukK2YJ5f+AbGwUgC4TeQbIXQb +fsDuXmkqJa9c1h3a0nnJ85cp4IaH3gRZD/FZ1GSFS5mvJQQeyUapl96Cshtwn5z2r3Ex3XsFpSzT +ucpH9sry9uetuUg/vBa3wW306gmv7PO15wWeph6KU1HWk4HMdJP2udqmJQV0eVp+QD6CSyYRMG7h +P0HHRwA11fXT91Q+gT3aSWqas+8QPebrb9HIIkfLzM8BMZLZGOMivgkeGj5asuRrDFR6fUNOuIml +e9eiPZaGzPImNC1qkp2aGtAw4l1OBLBfiyB+d8E9lYLRRpo7PHi4b6HQDWSieB4pTpPDpFQUWw== +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 2009 +============================== +-----BEGIN CERTIFICATE----- +MIIEMzCCAxugAwIBAgIDCYPzMA0GCSqGSIb3DQEBCwUAME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTAe +Fw0wOTExMDUwODM1NThaFw0yOTExMDUwODM1NThaME0xCzAJBgNVBAYTAkRFMRUwEwYDVQQKDAxE +LVRydXN0IEdtYkgxJzAlBgNVBAMMHkQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgMjAwOTCCASIw +DQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBANOySs96R+91myP6Oi/WUEWJNTrGa9v+2wBoqOAD +ER03UAifTUpolDWzU9GUY6cgVq/eUXjsKj3zSEhQPgrfRlWLJ23DEE0NkVJD2IfgXU42tSHKXzlA +BF9bfsyjxiupQB7ZNoTWSPOSHjRGICTBpFGOShrvUD9pXRl/RcPHAY9RySPocq60vFYJfxLLHLGv +KZAKyVXMD9O0Gu1HNVpK7ZxzBCHQqr0ME7UAyiZsxGsMlFqVlNpQmvH/pStmMaTJOKDfHR+4CS7z +p+hnUquVH+BGPtikw8paxTGA6Eian5Rp/hnd2HN8gcqW3o7tszIFZYQ05ub9VxC1X3a/L7AQDcUC +AwEAAaOCARowggEWMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFP3aFMSfMN4hvR5COfyrYyNJ +4PGEMA4GA1UdDwEB/wQEAwIBBjCB0wYDVR0fBIHLMIHIMIGAoH6gfIZ6bGRhcDovL2RpcmVjdG9y +eS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwUm9vdCUyMENsYXNzJTIwMyUyMENBJTIwMiUyMDIw +MDksTz1ELVRydXN0JTIwR21iSCxDPURFP2NlcnRpZmljYXRlcmV2b2NhdGlvbmxpc3QwQ6BBoD+G +PWh0dHA6Ly93d3cuZC10cnVzdC5uZXQvY3JsL2QtdHJ1c3Rfcm9vdF9jbGFzc18zX2NhXzJfMjAw +OS5jcmwwDQYJKoZIhvcNAQELBQADggEBAH+X2zDI36ScfSF6gHDOFBJpiBSVYEQBrLLpME+bUMJm +2H6NMLVwMeniacfzcNsgFYbQDfC+rAF1hM5+n02/t2A7nPPKHeJeaNijnZflQGDSNiH+0LS4F9p0 +o3/U37CYAqxva2ssJSRyoWXuJVrl5jLn8t+rSfrzkGkj2wTZ51xY/GXUl77M/C4KzCUqNQT4YJEV +dT1B/yMfGchs64JTBKbkTCJNjYy6zltz7GRUUG3RnFX7acM2w4y8PIWmawomDeCTmGCufsYkl4ph +X5GOZpIJhzbNi5stPvZR1FDUWSi9g/LMKHtThm3YJohw1+qRzT65ysCQblrGXnRl11z+o+I= +-----END CERTIFICATE----- + +D-TRUST Root Class 3 CA 2 EV 2009 +================================= +-----BEGIN CERTIFICATE----- +MIIEQzCCAyugAwIBAgIDCYP0MA0GCSqGSIb3DQEBCwUAMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTAeFw0wOTExMDUwODUwNDZaFw0yOTExMDUwODUwNDZaMFAxCzAJBgNVBAYTAkRFMRUwEwYDVQQK +DAxELVRydXN0IEdtYkgxKjAoBgNVBAMMIUQtVFJVU1QgUm9vdCBDbGFzcyAzIENBIDIgRVYgMjAw +OTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAJnxhDRwui+3MKCOvXwEz75ivJn9gpfS +egpnljgJ9hBOlSJzmY3aFS3nBfwZcyK3jpgAvDw9rKFs+9Z5JUut8Mxk2og+KbgPCdM03TP1YtHh +zRnp7hhPTFiu4h7WDFsVWtg6uMQYZB7jM7K1iXdODL/ZlGsTl28So/6ZqQTMFexgaDbtCHu39b+T +7WYxg4zGcTSHThfqr4uRjRxWQa4iN1438h3Z0S0NL2lRp75mpoo6Kr3HGrHhFPC+Oh25z1uxav60 +sUYgovseO3Dvk5h9jHOW8sXvhXCtKSb8HgQ+HKDYD8tSg2J87otTlZCpV6LqYQXY+U3EJ/pure35 +11H3a6UCAwEAAaOCASQwggEgMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFNOUikxiEyoZLsyv +cop9NteaHNxnMA4GA1UdDwEB/wQEAwIBBjCB3QYDVR0fBIHVMIHSMIGHoIGEoIGBhn9sZGFwOi8v +ZGlyZWN0b3J5LmQtdHJ1c3QubmV0L0NOPUQtVFJVU1QlMjBSb290JTIwQ2xhc3MlMjAzJTIwQ0El +MjAyJTIwRVYlMjAyMDA5LE89RC1UcnVzdCUyMEdtYkgsQz1ERT9jZXJ0aWZpY2F0ZXJldm9jYXRp +b25saXN0MEagRKBChkBodHRwOi8vd3d3LmQtdHJ1c3QubmV0L2NybC9kLXRydXN0X3Jvb3RfY2xh +c3NfM19jYV8yX2V2XzIwMDkuY3JsMA0GCSqGSIb3DQEBCwUAA4IBAQA07XtaPKSUiO8aEXUHL7P+ +PPoeUSbrh/Yp3uDx1MYkCenBz1UbtDDZzhr+BlGmFaQt77JLvyAoJUnRpjZ3NOhk31KxEcdzes05 +nsKtjHEh8lprr988TlWvsoRlFIm5d8sqMb7Po23Pb0iUMkZv53GMoKaEGTcH8gNFCSuGdXzfX2lX +ANtu2KZyIktQ1HWYVt+3GP9DQ1CuekR78HlR10M9p9OB0/DJT7naxpeG0ILD5EJt/rDiZE4OJudA +NCa1CInXCGNjOCd1HjPqbqjdn5lPdE2BiYBL3ZqXKVwvvoFBuYz/6n1gBp7N1z3TLqMVvKjmJuVv +w9y4AyHqnxbxLFS1 +-----END CERTIFICATE----- + +CA Disig Root R2 +================ +-----BEGIN CERTIFICATE----- +MIIFaTCCA1GgAwIBAgIJAJK4iNuwisFjMA0GCSqGSIb3DQEBCwUAMFIxCzAJBgNVBAYTAlNLMRMw +EQYDVQQHEwpCcmF0aXNsYXZhMRMwEQYDVQQKEwpEaXNpZyBhLnMuMRkwFwYDVQQDExBDQSBEaXNp +ZyBSb290IFIyMB4XDTEyMDcxOTA5MTUzMFoXDTQyMDcxOTA5MTUzMFowUjELMAkGA1UEBhMCU0sx +EzARBgNVBAcTCkJyYXRpc2xhdmExEzARBgNVBAoTCkRpc2lnIGEucy4xGTAXBgNVBAMTEENBIERp +c2lnIFJvb3QgUjIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCio8QACdaFXS1tFPbC +w3OeNcJxVX6B+6tGUODBfEl45qt5WDza/3wcn9iXAng+a0EE6UG9vgMsRfYvZNSrXaNHPWSb6Wia +xswbP7q+sos0Ai6YVRn8jG+qX9pMzk0DIaPY0jSTVpbLTAwAFjxfGs3Ix2ymrdMxp7zo5eFm1tL7 +A7RBZckQrg4FY8aAamkw/dLukO8NJ9+flXP04SXabBbeQTg06ov80egEFGEtQX6sx3dOy1FU+16S +GBsEWmjGycT6txOgmLcRK7fWV8x8nhfRyyX+hk4kLlYMeE2eARKmK6cBZW58Yh2EhN/qwGu1pSqV +g8NTEQxzHQuyRpDRQjrOQG6Vrf/GlK1ul4SOfW+eioANSW1z4nuSHsPzwfPrLgVv2RvPN3YEyLRa +5Beny912H9AZdugsBbPWnDTYltxhh5EF5EQIM8HauQhl1K6yNg3ruji6DOWbnuuNZt2Zz9aJQfYE +koopKW1rOhzndX0CcQ7zwOe9yxndnWCywmZgtrEE7snmhrmaZkCo5xHtgUUDi/ZnWejBBhG93c+A +Ak9lQHhcR1DIm+YfgXvkRKhbhZri3lrVx/k6RGZL5DJUfORsnLMOPReisjQS1n6yqEm70XooQL6i +Fh/f5DcfEXP7kAplQ6INfPgGAVUzfbANuPT1rqVCV3w2EYx7XsQDnYx5nQIDAQABo0IwQDAPBgNV +HRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUtZn4r7CU9eMg1gqtzk5WpC5u +Qu0wDQYJKoZIhvcNAQELBQADggIBACYGXnDnZTPIgm7ZnBc6G3pmsgH2eDtpXi/q/075KMOYKmFM +tCQSin1tERT3nLXK5ryeJ45MGcipvXrA1zYObYVybqjGom32+nNjf7xueQgcnYqfGopTpti72TVV +sRHFqQOzVju5hJMiXn7B9hJSi+osZ7z+Nkz1uM/Rs0mSO9MpDpkblvdhuDvEK7Z4bLQjb/D907Je +dR+Zlais9trhxTF7+9FGs9K8Z7RiVLoJ92Owk6Ka+elSLotgEqv89WBW7xBci8QaQtyDW2QOy7W8 +1k/BfDxujRNt+3vrMNDcTa/F1balTFtxyegxvug4BkihGuLq0t4SOVga/4AOgnXmt8kHbA7v/zjx +mHHEt38OFdAlab0inSvtBfZGR6ztwPDUO+Ls7pZbkBNOHlY667DvlruWIxG68kOGdGSVyCh13x01 +utI3gzhTODY7z2zp+WsO0PsE6E9312UBeIYMej4hYvF/Y3EMyZ9E26gnonW+boE+18DrG5gPcFw0 +sorMwIUY6256s/daoQe/qUKS82Ail+QUoQebTnbAjn39pCXHR+3/H3OszMOl6W8KjptlwlCFtaOg +UxLMVYdh84GuEEZhvUQhuMI9dM9+JDX6HAcOmz0iyu8xL4ysEr3vQCj8KWefshNPZiTEUxnpHikV +7+ZtsH8tZ/3zbBt1RqPlShfppNcL +-----END CERTIFICATE----- + +ACCVRAIZ1 +========= +-----BEGIN CERTIFICATE----- +MIIH0zCCBbugAwIBAgIIXsO3pkN/pOAwDQYJKoZIhvcNAQEFBQAwQjESMBAGA1UEAwwJQUNDVlJB +SVoxMRAwDgYDVQQLDAdQS0lBQ0NWMQ0wCwYDVQQKDARBQ0NWMQswCQYDVQQGEwJFUzAeFw0xMTA1 +MDUwOTM3MzdaFw0zMDEyMzEwOTM3MzdaMEIxEjAQBgNVBAMMCUFDQ1ZSQUlaMTEQMA4GA1UECwwH +UEtJQUNDVjENMAsGA1UECgwEQUNDVjELMAkGA1UEBhMCRVMwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQCbqau/YUqXry+XZpp0X9DZlv3P4uRm7x8fRzPCRKPfmt4ftVTdFXxpNRFvu8gM +jmoYHtiP2Ra8EEg2XPBjs5BaXCQ316PWywlxufEBcoSwfdtNgM3802/J+Nq2DoLSRYWoG2ioPej0 +RGy9ocLLA76MPhMAhN9KSMDjIgro6TenGEyxCQ0jVn8ETdkXhBilyNpAlHPrzg5XPAOBOp0KoVdD +aaxXbXmQeOW1tDvYvEyNKKGno6e6Ak4l0Squ7a4DIrhrIA8wKFSVf+DuzgpmndFALW4ir50awQUZ +0m/A8p/4e7MCQvtQqR0tkw8jq8bBD5L/0KIV9VMJcRz/RROE5iZe+OCIHAr8Fraocwa48GOEAqDG +WuzndN9wrqODJerWx5eHk6fGioozl2A3ED6XPm4pFdahD9GILBKfb6qkxkLrQaLjlUPTAYVtjrs7 +8yM2x/474KElB0iryYl0/wiPgL/AlmXz7uxLaL2diMMxs0Dx6M/2OLuc5NF/1OVYm3z61PMOm3WR +5LpSLhl+0fXNWhn8ugb2+1KoS5kE3fj5tItQo05iifCHJPqDQsGH+tUtKSpacXpkatcnYGMN285J +9Y0fkIkyF/hzQ7jSWpOGYdbhdQrqeWZ2iE9x6wQl1gpaepPluUsXQA+xtrn13k/c4LOsOxFwYIRK +Q26ZIMApcQrAZQIDAQABo4ICyzCCAscwfQYIKwYBBQUHAQEEcTBvMEwGCCsGAQUFBzAChkBodHRw +Oi8vd3d3LmFjY3YuZXMvZmlsZWFkbWluL0FyY2hpdm9zL2NlcnRpZmljYWRvcy9yYWl6YWNjdjEu +Y3J0MB8GCCsGAQUFBzABhhNodHRwOi8vb2NzcC5hY2N2LmVzMB0GA1UdDgQWBBTSh7Tj3zcnk1X2 +VuqB5TbMjB4/vTAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFNKHtOPfNyeTVfZW6oHlNsyM +Hj+9MIIBcwYDVR0gBIIBajCCAWYwggFiBgRVHSAAMIIBWDCCASIGCCsGAQUFBwICMIIBFB6CARAA +QQB1AHQAbwByAGkAZABhAGQAIABkAGUAIABDAGUAcgB0AGkAZgBpAGMAYQBjAGkA8wBuACAAUgBh +AO0AegAgAGQAZQAgAGwAYQAgAEEAQwBDAFYAIAAoAEEAZwBlAG4AYwBpAGEAIABkAGUAIABUAGUA +YwBuAG8AbABvAGcA7QBhACAAeQAgAEMAZQByAHQAaQBmAGkAYwBhAGMAaQDzAG4AIABFAGwAZQBj +AHQAcgDzAG4AaQBjAGEALAAgAEMASQBGACAAUQA0ADYAMAAxADEANQA2AEUAKQAuACAAQwBQAFMA +IABlAG4AIABoAHQAdABwADoALwAvAHcAdwB3AC4AYQBjAGMAdgAuAGUAczAwBggrBgEFBQcCARYk +aHR0cDovL3d3dy5hY2N2LmVzL2xlZ2lzbGFjaW9uX2MuaHRtMFUGA1UdHwROMEwwSqBIoEaGRGh0 +dHA6Ly93d3cuYWNjdi5lcy9maWxlYWRtaW4vQXJjaGl2b3MvY2VydGlmaWNhZG9zL3JhaXphY2N2 +MV9kZXIuY3JsMA4GA1UdDwEB/wQEAwIBBjAXBgNVHREEEDAOgQxhY2N2QGFjY3YuZXMwDQYJKoZI +hvcNAQEFBQADggIBAJcxAp/n/UNnSEQU5CmH7UwoZtCPNdpNYbdKl02125DgBS4OxnnQ8pdpD70E +R9m+27Up2pvZrqmZ1dM8MJP1jaGo/AaNRPTKFpV8M9xii6g3+CfYCS0b78gUJyCpZET/LtZ1qmxN +YEAZSUNUY9rizLpm5U9EelvZaoErQNV/+QEnWCzI7UiRfD+mAM/EKXMRNt6GGT6d7hmKG9Ww7Y49 +nCrADdg9ZuM8Db3VlFzi4qc1GwQA9j9ajepDvV+JHanBsMyZ4k0ACtrJJ1vnE5Bc5PUzolVt3OAJ +TS+xJlsndQAJxGJ3KQhfnlmstn6tn1QwIgPBHnFk/vk4CpYY3QIUrCPLBhwepH2NDd4nQeit2hW3 +sCPdK6jT2iWH7ehVRE2I9DZ+hJp4rPcOVkkO1jMl1oRQQmwgEh0q1b688nCBpHBgvgW1m54ERL5h +I6zppSSMEYCUWqKiuUnSwdzRp+0xESyeGabu4VXhwOrPDYTkF7eifKXeVSUG7szAh1xA2syVP1Xg +Nce4hL60Xc16gwFy7ofmXx2utYXGJt/mwZrpHgJHnyqobalbz+xFd3+YJ5oyXSrjhO7FmGYvliAd +3djDJ9ew+f7Zfc3Qn48LFFhRny+Lwzgt3uiP1o2HpPVWQxaZLPSkVrQ0uGE3ycJYgBugl6H8WY3p +EfbRD0tVNEYqi4Y7 +-----END CERTIFICATE----- + +TWCA Global Root CA +=================== +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgICDL4wDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCVFcxEjAQBgNVBAoT +CVRBSVdBTi1DQTEQMA4GA1UECxMHUm9vdCBDQTEcMBoGA1UEAxMTVFdDQSBHbG9iYWwgUm9vdCBD +QTAeFw0xMjA2MjcwNjI4MzNaFw0zMDEyMzExNTU5NTlaMFExCzAJBgNVBAYTAlRXMRIwEAYDVQQK +EwlUQUlXQU4tQ0ExEDAOBgNVBAsTB1Jvb3QgQ0ExHDAaBgNVBAMTE1RXQ0EgR2xvYmFsIFJvb3Qg +Q0EwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCwBdvI64zEbooh745NnHEKH1Jw7W2C +nJfF10xORUnLQEK1EjRsGcJ0pDFfhQKX7EMzClPSnIyOt7h52yvVavKOZsTuKwEHktSz0ALfUPZV +r2YOy+BHYC8rMjk1Ujoog/h7FsYYuGLWRyWRzvAZEk2tY/XTP3VfKfChMBwqoJimFb3u/Rk28OKR +Q4/6ytYQJ0lM793B8YVwm8rqqFpD/G2Gb3PpN0Wp8DbHzIh1HrtsBv+baz4X7GGqcXzGHaL3SekV +tTzWoWH1EfcFbx39Eb7QMAfCKbAJTibc46KokWofwpFFiFzlmLhxpRUZyXx1EcxwdE8tmx2RRP1W +KKD+u4ZqyPpcC1jcxkt2yKsi2XMPpfRaAok/T54igu6idFMqPVMnaR1sjjIsZAAmY2E2TqNGtz99 +sy2sbZCilaLOz9qC5wc0GZbpuCGqKX6mOL6OKUohZnkfs8O1CWfe1tQHRvMq2uYiN2DLgbYPoA/p +yJV/v1WRBXrPPRXAb94JlAGD1zQbzECl8LibZ9WYkTunhHiVJqRaCPgrdLQABDzfuBSO6N+pjWxn +kjMdwLfS7JLIvgm/LCkFbwJrnu+8vyq8W8BQj0FwcYeyTbcEqYSjMq+u7msXi7Kx/mzhkIyIqJdI +zshNy/MGz19qCkKxHh53L46g5pIOBvwFItIm4TFRfTLcDwIDAQABoyMwITAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAXzSBdu+WHdXltdkCY4QWwa6g +cFGn90xHNcgL1yg9iXHZqjNB6hQbbCEAwGxCGX6faVsgQt+i0trEfJdLjbDorMjupWkEmQqSpqsn +LhpNgb+E1HAerUf+/UqdM+DyucRFCCEK2mlpc3INvjT+lIutwx4116KD7+U4x6WFH6vPNOw/KP4M +8VeGTslV9xzU2KV9Bnpv1d8Q34FOIWWxtuEXeZVFBs5fzNxGiWNoRI2T9GRwoD2dKAXDOXC4Ynsg +/eTb6QihuJ49CcdP+yz4k3ZB3lLg4VfSnQO8d57+nile98FRYB/e2guyLXW3Q0iT5/Z5xoRdgFlg +lPx4mI88k1HtQJAH32RjJMtOcQWh15QaiDLxInQirqWm2BJpTGCjAu4r7NRjkgtevi92a6O2JryP +A9gK8kxkRr05YuWW6zRjESjMlfGt7+/cgFhI6Uu46mWs6fyAtbXIRfmswZ/ZuepiiI7E8UuDEq3m +i4TWnsLrgxifarsbJGAzcMzs9zLzXNl5fe+epP7JI8Mk7hWSsT2RTyaGvWZzJBPqpK5jwa19hAM8 +EHiGG3njxPPyBJUgriOCxLM6AGK/5jYk4Ve6xx6QddVfP5VhK8E7zeWzaGHQRiapIVJpLesux+t3 +zqY6tQMzT3bR51xUAV3LePTJDL/PEo4XLSNolOer/qmyKwbQBM0= +-----END CERTIFICATE----- + +TeliaSonera Root CA v1 +====================== +-----BEGIN CERTIFICATE----- +MIIFODCCAyCgAwIBAgIRAJW+FqD3LkbxezmCcvqLzZYwDQYJKoZIhvcNAQEFBQAwNzEUMBIGA1UE +CgwLVGVsaWFTb25lcmExHzAdBgNVBAMMFlRlbGlhU29uZXJhIFJvb3QgQ0EgdjEwHhcNMDcxMDE4 +MTIwMDUwWhcNMzIxMDE4MTIwMDUwWjA3MRQwEgYDVQQKDAtUZWxpYVNvbmVyYTEfMB0GA1UEAwwW +VGVsaWFTb25lcmEgUm9vdCBDQSB2MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMK+ +6yfwIaPzaSZVfp3FVRaRXP3vIb9TgHot0pGMYzHw7CTww6XScnwQbfQ3t+XmfHnqjLWCi65ItqwA +3GV17CpNX8GH9SBlK4GoRz6JI5UwFpB/6FcHSOcZrr9FZ7E3GwYq/t75rH2D+1665I+XZ75Ljo1k +B1c4VWk0Nj0TSO9P4tNmHqTPGrdeNjPUtAa9GAH9d4RQAEX1jF3oI7x+/jXh7VB7qTCNGdMJjmhn +Xb88lxhTuylixcpecsHHltTbLaC0H2kD7OriUPEMPPCs81Mt8Bz17Ww5OXOAFshSsCPN4D7c3TxH +oLs1iuKYaIu+5b9y7tL6pe0S7fyYGKkmdtwoSxAgHNN/Fnct7W+A90m7UwW7XWjH1Mh1Fj+JWov3 +F0fUTPHSiXk+TT2YqGHeOh7S+F4D4MHJHIzTjU3TlTazN19jY5szFPAtJmtTfImMMsJu7D0hADnJ +oWjiUIMusDor8zagrC/kb2HCUQk5PotTubtn2txTuXZZNp1D5SDgPTJghSJRt8czu90VL6R4pgd7 +gUY2BIbdeTXHlSw7sKMXNeVzH7RcWe/a6hBle3rQf5+ztCo3O3CLm1u5K7fsslESl1MpWtTwEhDc +TwK7EpIvYtQ/aUN8Ddb8WHUBiJ1YFkveupD/RwGJBmr2X7KQarMCpgKIv7NHfirZ1fpoeDVNAgMB +AAGjPzA9MA8GA1UdEwEB/wQFMAMBAf8wCwYDVR0PBAQDAgEGMB0GA1UdDgQWBBTwj1k4ALP1j5qW +DNXr+nuqF+gTEjANBgkqhkiG9w0BAQUFAAOCAgEAvuRcYk4k9AwI//DTDGjkk0kiP0Qnb7tt3oNm +zqjMDfz1mgbldxSR651Be5kqhOX//CHBXfDkH1e3damhXwIm/9fH907eT/j3HEbAek9ALCI18Bmx +0GtnLLCo4MBANzX2hFxc469CeP6nyQ1Q6g2EdvZR74NTxnr/DlZJLo961gzmJ1TjTQpgcmLNkQfW +pb/ImWvtxBnmq0wROMVvMeJuScg/doAmAyYp4Db29iBT4xdwNBedY2gea+zDTYa4EzAvXUYNR0PV +G6pZDrlcjQZIrXSHX8f8MVRBE+LHIQ6e4B4N4cB7Q4WQxYpYxmUKeFfyxiMPAdkgS94P+5KFdSpc +c41teyWRyu5FrgZLAMzTsVlQ2jqIOylDRl6XK1TOU2+NSueW+r9xDkKLfP0ooNBIytrEgUy7onOT +JsjrDNYmiLbAJM+7vVvrdX3pCI6GMyx5dwlppYn8s3CQh3aP0yK7Qs69cwsgJirQmz1wHiRszYd2 +qReWt88NkvuOGKmYSdGe/mBEciG5Ge3C9THxOUiIkCR1VBatzvT4aRRkOfujuLpwQMcnHL/EVlP6 +Y2XQ8xwOFvVrhlhNGNTkDY6lnVuR3HYkUD/GKvvZt5y11ubQ2egZixVxSK236thZiNSQvxaz2ems +WWFUyBy6ysHK4bkgTI86k4mloMy/0/Z1pHWWbVY= +-----END CERTIFICATE----- + +E-Tugra Certification Authority +=============================== +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIIamg+nFGby1MwDQYJKoZIhvcNAQELBQAwgbIxCzAJBgNVBAYTAlRSMQ8w +DQYDVQQHDAZBbmthcmExQDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamls +ZXJpIHZlIEhpem1ldGxlcmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBN +ZXJrZXppMSgwJgYDVQQDDB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MB4XDTEzMDMw +NTEyMDk0OFoXDTIzMDMwMzEyMDk0OFowgbIxCzAJBgNVBAYTAlRSMQ8wDQYDVQQHDAZBbmthcmEx +QDA+BgNVBAoMN0UtVHXEn3JhIEVCRyBCaWxpxZ9pbSBUZWtub2xvamlsZXJpIHZlIEhpem1ldGxl +cmkgQS7Fni4xJjAkBgNVBAsMHUUtVHVncmEgU2VydGlmaWthc3lvbiBNZXJrZXppMSgwJgYDVQQD +DB9FLVR1Z3JhIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEA4vU/kwVRHoViVF56C/UYB4Oufq9899SKa6VjQzm5S/fDxmSJPZQuVIBSOTkHS0vd +hQd2h8y/L5VMzH2nPbxHD5hw+IyFHnSOkm0bQNGZDbt1bsipa5rAhDGvykPL6ys06I+XawGb1Q5K +CKpbknSFQ9OArqGIW66z6l7LFpp3RMih9lRozt6Plyu6W0ACDGQXwLWTzeHxE2bODHnv0ZEoq1+g +ElIwcxmOj+GMB6LDu0rw6h8VqO4lzKRG+Bsi77MOQ7osJLjFLFzUHPhdZL3Dk14opz8n8Y4e0ypQ +BaNV2cvnOVPAmJ6MVGKLJrD3fY185MaeZkJVgkfnsliNZvcHfC425lAcP9tDJMW/hkd5s3kc91r0 +E+xs+D/iWR+V7kI+ua2oMoVJl0b+SzGPWsutdEcf6ZG33ygEIqDUD13ieU/qbIWGvaimzuT6w+Gz +rt48Ue7LE3wBf4QOXVGUnhMMti6lTPk5cDZvlsouDERVxcr6XQKj39ZkjFqzAQqptQpHF//vkUAq +jqFGOjGY5RH8zLtJVor8udBhmm9lbObDyz51Sf6Pp+KJxWfXnUYTTjF2OySznhFlhqt/7x3U+Lzn +rFpct1pHXFXOVbQicVtbC/DP3KBhZOqp12gKY6fgDT+gr9Oq0n7vUaDmUStVkhUXU8u3Zg5mTPj5 +dUyQ5xJwx0UCAwEAAaNjMGEwHQYDVR0OBBYEFC7j27JJ0JxUeVz6Jyr+zE7S6E5UMA8GA1UdEwEB +/wQFMAMBAf8wHwYDVR0jBBgwFoAULuPbsknQnFR5XPonKv7MTtLoTlQwDgYDVR0PAQH/BAQDAgEG +MA0GCSqGSIb3DQEBCwUAA4ICAQAFNzr0TbdF4kV1JI+2d1LoHNgQk2Xz8lkGpD4eKexd0dCrfOAK +kEh47U6YA5n+KGCRHTAduGN8qOY1tfrTYXbm1gdLymmasoR6d5NFFxWfJNCYExL/u6Au/U5Mh/jO +XKqYGwXgAEZKgoClM4so3O0409/lPun++1ndYYRP0lSWE2ETPo+Aab6TR7U1Q9Jauz1c77NCR807 +VRMGsAnb/WP2OogKmW9+4c4bU2pEZiNRCHu8W1Ki/QY3OEBhj0qWuJA3+GbHeJAAFS6LrVE1Uweo +a2iu+U48BybNCAVwzDk/dr2l02cmAYamU9JgO3xDf1WKvJUawSg5TB9D0pH0clmKuVb8P7Sd2nCc +dlqMQ1DujjByTd//SffGqWfZbawCEeI6FiWnWAjLb1NBnEg4R2gz0dfHj9R0IdTDBZB6/86WiLEV +KV0jq9BgoRJP3vQXzTLlyb/IQ639Lo7xr+L0mPoSHyDYwKcMhcWQ9DstliaxLL5Mq+ux0orJ23gT +Dx4JnW2PAJ8C2sH6H3p6CcRK5ogql5+Ji/03X186zjhZhkuvcQu02PJwT58yE+Owp1fl2tpDy4Q0 +8ijE6m30Ku/Ba3ba+367hTzSU8JNvnHhRdH9I2cNE3X7z2VnIp2usAnRCf8dNL/+I5c30jn6PQ0G +C7TbO6Orb1wdtn7os4I07QZcJA== +-----END CERTIFICATE----- + +T-TeleSec GlobalRoot Class 2 +============================ +-----BEGIN CERTIFICATE----- +MIIDwzCCAqugAwIBAgIBATANBgkqhkiG9w0BAQsFADCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoM +IlQtU3lzdGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBU +cnVzdCBDZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwHhcNMDgx +MDAxMTA0MDE0WhcNMzMxMDAxMjM1OTU5WjCBgjELMAkGA1UEBhMCREUxKzApBgNVBAoMIlQtU3lz +dGVtcyBFbnRlcnByaXNlIFNlcnZpY2VzIEdtYkgxHzAdBgNVBAsMFlQtU3lzdGVtcyBUcnVzdCBD +ZW50ZXIxJTAjBgNVBAMMHFQtVGVsZVNlYyBHbG9iYWxSb290IENsYXNzIDIwggEiMA0GCSqGSIb3 +DQEBAQUAA4IBDwAwggEKAoIBAQCqX9obX+hzkeXaXPSi5kfl82hVYAUdAqSzm1nzHoqvNK38DcLZ +SBnuaY/JIPwhqgcZ7bBcrGXHX+0CfHt8LRvWurmAwhiCFoT6ZrAIxlQjgeTNuUk/9k9uN0goOA/F +vudocP05l03Sx5iRUKrERLMjfTlH6VJi1hKTXrcxlkIF+3anHqP1wvzpesVsqXFP6st4vGCvx970 +2cu+fjOlbpSD8DT6IavqjnKgP6TeMFvvhk1qlVtDRKgQFRzlAVfFmPHmBiiRqiDFt1MmUUOyCxGV +WOHAD3bZwI18gfNycJ5v/hqO2V81xrJvNHy+SE/iWjnX2J14np+GPgNeGYtEotXHAgMBAAGjQjBA +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBS/WSA2AHmgoCJrjNXy +YdK4LMuCSjANBgkqhkiG9w0BAQsFAAOCAQEAMQOiYQsfdOhyNsZt+U2e+iKo4YFWz827n+qrkRk4 +r6p8FU3ztqONpfSO9kSpp+ghla0+AGIWiPACuvxhI+YzmzB6azZie60EI4RYZeLbK4rnJVM3YlNf +vNoBYimipidx5joifsFvHZVwIEoHNN/q/xWA5brXethbdXwFeilHfkCoMRN3zUA7tFFHei4R40cR +3p1m0IvVVGb6g1XqfMIpiRvpb7PO4gWEyS8+eIVibslfwXhjdFjASBgMmTnrpMwatXlajRWc2BQN +9noHV8cigwUtPJslJj0Ys6lDfMjIq2SPDqO/nBudMNva0Bkuqjzx+zOAduTNrRlPBSeOE6Fuwg== +-----END CERTIFICATE----- + +Atos TrustedRoot 2011 +===================== +-----BEGIN CERTIFICATE----- +MIIDdzCCAl+gAwIBAgIIXDPLYixfszIwDQYJKoZIhvcNAQELBQAwPDEeMBwGA1UEAwwVQXRvcyBU +cnVzdGVkUm9vdCAyMDExMQ0wCwYDVQQKDARBdG9zMQswCQYDVQQGEwJERTAeFw0xMTA3MDcxNDU4 +MzBaFw0zMDEyMzEyMzU5NTlaMDwxHjAcBgNVBAMMFUF0b3MgVHJ1c3RlZFJvb3QgMjAxMTENMAsG +A1UECgwEQXRvczELMAkGA1UEBhMCREUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCV +hTuXbyo7LjvPpvMpNb7PGKw+qtn4TaA+Gke5vJrf8v7MPkfoepbCJI419KkM/IL9bcFyYie96mvr +54rMVD6QUM+A1JX76LWC1BTFtqlVJVfbsVD2sGBkWXppzwO3bw2+yj5vdHLqqjAqc2K+SZFhyBH+ +DgMq92og3AIVDV4VavzjgsG1xZ1kCWyjWZgHJ8cblithdHFsQ/H3NYkQ4J7sVaE3IqKHBAUsR320 +HLliKWYoyrfhk/WklAOZuXCFteZI6o1Q/NnezG8HDt0Lcp2AMBYHlT8oDv3FdU9T1nSatCQujgKR +z3bFmx5VdJx4IbHwLfELn8LVlhgf8FQieowHAgMBAAGjfTB7MB0GA1UdDgQWBBSnpQaxLKYJYO7R +l+lwrrw7GWzbITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFKelBrEspglg7tGX6XCuvDsZ +bNshMBgGA1UdIAQRMA8wDQYLKwYBBAGwLQMEAQEwDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEB +CwUAA4IBAQAmdzTblEiGKkGdLD4GkGDEjKwLVLgfuXvTBznk+j57sj1O7Z8jvZfza1zv7v1Apt+h +k6EKhqzvINB5Ab149xnYJDE0BAGmuhWawyfc2E8PzBhj/5kPDpFrdRbhIfzYJsdHt6bPWHJxfrrh +TZVHO8mvbaG0weyJ9rQPOLXiZNwlz6bb65pcmaHFCN795trV1lpFDMS3wrUU77QR/w4VtfX128a9 +61qn8FYiqTxlVMYVqL2Gns2Dlmh6cYGJ4Qvh6hEbaAjMaZ7snkGeRDImeuKHCnE96+RapNLbxc3G +3mB/ufNPRJLvKrcYPqcZ2Qt9sTdBQrC6YB3y/gkRsPCHe6ed +-----END CERTIFICATE----- + +QuoVadis Root CA 1 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIUeFhfLq0sGUvjNwc1NBMotZbUZZMwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMSBHMzAeFw0xMjAxMTIxNzI3NDRaFw00MjAxMTIxNzI3NDRaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDEg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCgvlAQjunybEC0BJyFuTHK3C3kEakE +PBtVwedYMB0ktMPvhd6MLOHBPd+C5k+tR4ds7FtJwUrVu4/sh6x/gpqG7D0DmVIB0jWerNrwU8lm +PNSsAgHaJNM7qAJGr6Qc4/hzWHa39g6QDbXwz8z6+cZM5cOGMAqNF34168Xfuw6cwI2H44g4hWf6 +Pser4BOcBRiYz5P1sZK0/CPTz9XEJ0ngnjybCKOLXSoh4Pw5qlPafX7PGglTvF0FBM+hSo+LdoIN +ofjSxxR3W5A2B4GbPgb6Ul5jxaYA/qXpUhtStZI5cgMJYr2wYBZupt0lwgNm3fME0UDiTouG9G/l +g6AnhF4EwfWQvTA9xO+oabw4m6SkltFi2mnAAZauy8RRNOoMqv8hjlmPSlzkYZqn0ukqeI1RPToV +7qJZjqlc3sX5kCLliEVx3ZGZbHqfPT2YfF72vhZooF6uCyP8Wg+qInYtyaEQHeTTRCOQiJ/GKubX +9ZqzWB4vMIkIG1SitZgj7Ah3HJVdYdHLiZxfokqRmu8hqkkWCKi9YSgxyXSthfbZxbGL0eUQMk1f +iyA6PEkfM4VZDdvLCXVDaXP7a3F98N/ETH3Goy7IlXnLc6KOTk0k+17kBL5yG6YnLUlamXrXXAkg +t3+UuU/xDRxeiEIbEbfnkduebPRq34wGmAOtzCjvpUfzUwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUo5fW816iEOGrRZ88F2Q87gFwnMwwDQYJKoZI +hvcNAQELBQADggIBABj6W3X8PnrHX3fHyt/PX8MSxEBd1DKquGrX1RUVRpgjpeaQWxiZTOOtQqOC +MTaIzen7xASWSIsBx40Bz1szBpZGZnQdT+3Btrm0DWHMY37XLneMlhwqI2hrhVd2cDMT/uFPpiN3 +GPoajOi9ZcnPP/TJF9zrx7zABC4tRi9pZsMbj/7sPtPKlL92CiUNqXsCHKnQO18LwIE6PWThv6ct +Tr1NxNgpxiIY0MWscgKCP6o6ojoilzHdCGPDdRS5YCgtW2jgFqlmgiNR9etT2DGbe+m3nUvriBbP ++V04ikkwj+3x6xn0dxoxGE1nVGwvb2X52z3sIexe9PSLymBlVNFxZPT5pqOBMzYzcfCkeF9OrYMh +3jRJjehZrJ3ydlo28hP0r+AJx2EqbPfgna67hkooby7utHnNkDPDs3b69fBsnQGQ+p6Q9pxyz0fa +wx/kNSBT8lTR32GDpgLiJTjehTItXnOQUl1CxM49S+H5GYQd1aJQzEH7QRTDvdbJWqNjZgKAvQU6 +O0ec7AAmTPWIUb+oI38YB7AL7YsmoWTTYUrrXJ/es69nA7Mf3W1daWhpq1467HxpvMc7hU6eFbm0 +FU/DlXpY18ls6Wy58yljXrQs8C097Vpl4KlbQMJImYFtnh8GKjwStIsPm6Ik8KaN1nrgS7ZklmOV +hMJKzRwuJIczYOXD +-----END CERTIFICATE----- + +QuoVadis Root CA 2 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIURFc0JFuBiZs18s64KztbpybwdSgwDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMiBHMzAeFw0xMjAxMTIxODU5MzJaFw00MjAxMTIxODU5MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDIg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQChriWyARjcV4g/Ruv5r+LrI3HimtFh +ZiFfqq8nUeVuGxbULX1QsFN3vXg6YOJkApt8hpvWGo6t/x8Vf9WVHhLL5hSEBMHfNrMWn4rjyduY +NM7YMxcoRvynyfDStNVNCXJJ+fKH46nafaF9a7I6JaltUkSs+L5u+9ymc5GQYaYDFCDy54ejiK2t +oIz/pgslUiXnFgHVy7g1gQyjO/Dh4fxaXc6AcW34Sas+O7q414AB+6XrW7PFXmAqMaCvN+ggOp+o +MiwMzAkd056OXbxMmO7FGmh77FOm6RQ1o9/NgJ8MSPsc9PG/Srj61YxxSscfrf5BmrODXfKEVu+l +V0POKa2Mq1W/xPtbAd0jIaFYAI7D0GoT7RPjEiuA3GfmlbLNHiJuKvhB1PLKFAeNilUSxmn1uIZo +L1NesNKqIcGY5jDjZ1XHm26sGahVpkUG0CM62+tlXSoREfA7T8pt9DTEceT/AFr2XK4jYIVz8eQQ +sSWu1ZK7E8EM4DnatDlXtas1qnIhO4M15zHfeiFuuDIIfR0ykRVKYnLP43ehvNURG3YBZwjgQQvD +6xVu+KQZ2aKrr+InUlYrAoosFCT5v0ICvybIxo/gbjh9Uy3l7ZizlWNof/k19N+IxWA1ksB8aRxh +lRbQ694Lrz4EEEVlWFA4r0jyWbYW8jwNkALGcC4BrTwV1wIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQU7edvdlq/YOxJW8ald7tyFnGbxD0wDQYJKoZI +hvcNAQELBQADggIBAJHfgD9DCX5xwvfrs4iP4VGyvD11+ShdyLyZm3tdquXK4Qr36LLTn91nMX66 +AarHakE7kNQIXLJgapDwyM4DYvmL7ftuKtwGTTwpD4kWilhMSA/ohGHqPHKmd+RCroijQ1h5fq7K +pVMNqT1wvSAZYaRsOPxDMuHBR//47PERIjKWnML2W2mWeyAMQ0GaW/ZZGYjeVYg3UQt4XAoeo0L9 +x52ID8DyeAIkVJOviYeIyUqAHerQbj5hLja7NQ4nlv1mNDthcnPxFlxHBlRJAHpYErAK74X9sbgz +dWqTHBLmYF5vHX/JHyPLhGGfHoJE+V+tYlUkmlKY7VHnoX6XOuYvHxHaU4AshZ6rNRDbIl9qxV6X +U/IyAgkwo1jwDQHVcsaxfGl7w/U2Rcxhbl5MlMVerugOXou/983g7aEOGzPuVBj+D77vfoRrQ+Nw +mNtddbINWQeFFSM51vHfqSYP1kjHs6Yi9TM3WpVHn3u6GBVv/9YUZINJ0gpnIdsPNWNgKCLjsZWD +zYWm3S8P52dSbrsvhXz1SnPnxT7AvSESBT/8twNJAlvIJebiVDj1eYeMHVOyToV7BjjHLPj4sHKN +JeV3UvQDHEimUF+IIDBu8oJDqz2XhOdT+yHBTw8imoa4WSr2Rz0ZiC3oheGe7IUIarFsNMkd7Egr +O3jtZsSOeWmD3n+M +-----END CERTIFICATE----- + +QuoVadis Root CA 3 G3 +===================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIULvWbAiin23r/1aOp7r0DoM8Sah0wDQYJKoZIhvcNAQELBQAwSDELMAkG +A1UEBhMCQk0xGTAXBgNVBAoTEFF1b1ZhZGlzIExpbWl0ZWQxHjAcBgNVBAMTFVF1b1ZhZGlzIFJv +b3QgQ0EgMyBHMzAeFw0xMjAxMTIyMDI2MzJaFw00MjAxMTIyMDI2MzJaMEgxCzAJBgNVBAYTAkJN +MRkwFwYDVQQKExBRdW9WYWRpcyBMaW1pdGVkMR4wHAYDVQQDExVRdW9WYWRpcyBSb290IENBIDMg +RzMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCzyw4QZ47qFJenMioKVjZ/aEzHs286 +IxSR/xl/pcqs7rN2nXrpixurazHb+gtTTK/FpRp5PIpM/6zfJd5O2YIyC0TeytuMrKNuFoM7pmRL +Mon7FhY4futD4tN0SsJiCnMK3UmzV9KwCoWdcTzeo8vAMvMBOSBDGzXRU7Ox7sWTaYI+FrUoRqHe +6okJ7UO4BUaKhvVZR74bbwEhELn9qdIoyhA5CcoTNs+cra1AdHkrAj80//ogaX3T7mH1urPnMNA3 +I4ZyYUUpSFlob3emLoG+B01vr87ERRORFHAGjx+f+IdpsQ7vw4kZ6+ocYfx6bIrc1gMLnia6Et3U +VDmrJqMz6nWB2i3ND0/kA9HvFZcba5DFApCTZgIhsUfei5pKgLlVj7WiL8DWM2fafsSntARE60f7 +5li59wzweyuxwHApw0BiLTtIadwjPEjrewl5qW3aqDCYz4ByA4imW0aucnl8CAMhZa634RylsSqi +Md5mBPfAdOhx3v89WcyWJhKLhZVXGqtrdQtEPREoPHtht+KPZ0/l7DxMYIBpVzgeAVuNVejH38DM +dyM0SXV89pgR6y3e7UEuFAUCf+D+IOs15xGsIs5XPd7JMG0QA4XN8f+MFrXBsj6IbGB/kE+V9/Yt +rQE5BwT6dYB9v0lQ7e/JxHwc64B+27bQ3RP+ydOc17KXqQIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUxhfQvKjqAkPyGwaZXSuQILnXnOQwDQYJKoZI +hvcNAQELBQADggIBADRh2Va1EodVTd2jNTFGu6QHcrxfYWLopfsLN7E8trP6KZ1/AvWkyaiTt3px +KGmPc+FSkNrVvjrlt3ZqVoAh313m6Tqe5T72omnHKgqwGEfcIHB9UqM+WXzBusnIFUBhynLWcKzS +t/Ac5IYp8M7vaGPQtSCKFWGafoaYtMnCdvvMujAWzKNhxnQT5WvvoxXqA/4Ti2Tk08HS6IT7SdEQ +TXlm66r99I0xHnAUrdzeZxNMgRVhvLfZkXdxGYFgu/BYpbWcC/ePIlUnwEsBbTuZDdQdm2NnL9Du +DcpmvJRPpq3t/O5jrFc/ZSXPsoaP0Aj/uHYUbt7lJ+yreLVTubY/6CD50qi+YUbKh4yE8/nxoGib +Ih6BJpsQBJFxwAYf3KDTuVan45gtf4Od34wrnDKOMpTwATwiKp9Dwi7DmDkHOHv8XgBCH/MyJnmD +hPbl8MFREsALHgQjDFSlTC9JxUrRtm5gDWv8a4uFJGS3iQ6rJUdbPM9+Sb3H6QrG2vd+DhcI00iX +0HGS8A85PjRqHH3Y8iKuu2n0M7SmSFXRDw4m6Oy2Cy2nhTXN/VnIn9HNPlopNLk9hM6xZdRZkZFW +dSHBd575euFgndOtBBj0fOtek49TSiIp+EgrPk2GrFt/ywaZWWDYWGWVjUTR939+J399roD1B0y2 +PpxxVJkES/1Y+Zj0 +-----END CERTIFICATE----- + +DigiCert Assured ID Root G2 +=========================== +-----BEGIN CERTIFICATE----- +MIIDljCCAn6gAwIBAgIQC5McOtY5Z+pnI7/Dr5r0SzANBgkqhkiG9w0BAQsFADBlMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQw +IgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIwHhcNMTMwODAxMTIwMDAwWhcNMzgw +MTE1MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQL +ExB3d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzIw +ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDZ5ygvUj82ckmIkzTz+GoeMVSAn61UQbVH +35ao1K+ALbkKz3X9iaV9JPrjIgwrvJUXCzO/GU1BBpAAvQxNEP4HteccbiJVMWWXvdMX0h5i89vq +bFCMP4QMls+3ywPgym2hFEwbid3tALBSfK+RbLE4E9HpEgjAALAcKxHad3A2m67OeYfcgnDmCXRw +VWmvo2ifv922ebPynXApVfSr/5Vh88lAbx3RvpO704gqu52/clpWcTs/1PPRCv4o76Pu2ZmvA9OP +YLfykqGxvYmJHzDNw6YuYjOuFgJ3RFrngQo8p0Quebg/BLxcoIfhG69Rjs3sLPr4/m3wOnyqi+Rn +lTGNAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTO +w0q5mVXyuNtgv6l+vVa1lzan1jANBgkqhkiG9w0BAQsFAAOCAQEAyqVVjOPIQW5pJ6d1Ee88hjZv +0p3GeDgdaZaikmkuOGybfQTUiaWxMTeKySHMq2zNixya1r9I0jJmwYrA8y8678Dj1JGG0VDjA9tz +d29KOVPt3ibHtX2vK0LRdWLjSisCx1BL4GnilmwORGYQRI+tBev4eaymG+g3NJ1TyWGqolKvSnAW +hsI6yLETcDbYz+70CjTVW0z9B5yiutkBclzzTcHdDrEcDcRjvq30FPuJ7KJBDkzMyFdA0G4Dqs0M +jomZmWzwPDCvON9vvKO+KSAnq3T/EyJ43pdSVR6DtVQgA+6uwE9W3jfMw3+qBCe703e4YtsXfJwo +IhNzbM8m9Yop5w== +-----END CERTIFICATE----- + +DigiCert Assured ID Root G3 +=========================== +-----BEGIN CERTIFICATE----- +MIICRjCCAc2gAwIBAgIQC6Fa+h3foLVJRK/NJKBs7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSQwIgYD +VQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBlMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSQwIgYDVQQDExtEaWdpQ2VydCBBc3N1cmVkIElEIFJvb3QgRzMwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAAQZ57ysRGXtzbg/WPuNsVepRC0FFfLvC/8QdJ+1YlJfZn4f5dwb +RXkLzMZTCp2NXQLZqVneAlr2lSoOjThKiknGvMYDOAdfVdp+CW7if17QRSAPWXYQ1qAk8C3eNvJs +KTmjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBTL0L2p4ZgF +UaFNN6KDec6NHSrkhDAKBggqhkjOPQQDAwNnADBkAjAlpIFFAmsSS3V0T8gj43DydXLefInwz5Fy +YZ5eEJJZVrmDxxDnOOlYJjZ91eQ0hjkCMHw2U/Aw5WJjOpnitqM7mzT6HtoQknFekROn3aRukswy +1vUhZscv6pZjamVFkpUBtA== +-----END CERTIFICATE----- + +DigiCert Global Root G2 +======================= +-----BEGIN CERTIFICATE----- +MIIDjjCCAnagAwIBAgIQAzrx5qcRqaC7KGSxHQn65TANBgkqhkiG9w0BAQsFADBhMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAw +HgYDVQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMjAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUx +MjAwMDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3 +dy5kaWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEcyMIIBIjANBgkq +hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAuzfNNNx7a8myaJCtSnX/RrohCgiN9RlUyfuI2/Ou8jqJ +kTx65qsGGmvPrC3oXgkkRLpimn7Wo6h+4FR1IAWsULecYxpsMNzaHxmx1x7e/dfgy5SDN67sH0NO +3Xss0r0upS/kqbitOtSZpLYl6ZtrAGCSYP9PIUkY92eQq2EGnI/yuum06ZIya7XzV+hdG82MHauV +BJVJ8zUtluNJbd134/tJS7SsVQepj5WztCO7TG1F8PapspUwtP1MVYwnSlcUfIKdzXOS0xZKBgyM +UNGPHgm+F6HmIcr9g+UQvIOlCsRnKPZzFBQ9RnbDhxSJITRNrw9FDKZJobq7nMWxM4MphQIDAQAB +o0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUTiJUIBiV5uNu +5g/6+rkS7QYXjzkwDQYJKoZIhvcNAQELBQADggEBAGBnKJRvDkhj6zHd6mcY1Yl9PMWLSn/pvtsr +F9+wX3N3KjITOYFnQoQj8kVnNeyIv/iPsGEMNKSuIEyExtv4NeF22d+mQrvHRAiGfzZ0JFrabA0U +WTW98kndth/Jsw1HKj2ZL7tcu7XUIOGZX1NGFdtom/DzMNU+MeKNhJ7jitralj41E6Vf8PlwUHBH +QRFXGU7Aj64GxJUTFy8bJZ918rGOmaFvE7FBcf6IKshPECBV1/MUReXgRPTqh5Uykw7+U0b6LJ3/ +iyK5S9kJRaTepLiaWN0bfVKfjllDiIGknibVb63dDcY3fe0Dkhvld1927jyNxF1WW6LZZm6zNTfl +MrY= +-----END CERTIFICATE----- + +DigiCert Global Root G3 +======================= +-----BEGIN CERTIFICATE----- +MIICPzCCAcWgAwIBAgIQBVVWvPJepDU1w6QP1atFcjAKBggqhkjOPQQDAzBhMQswCQYDVQQGEwJV +UzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSAwHgYD +VQQDExdEaWdpQ2VydCBHbG9iYWwgUm9vdCBHMzAeFw0xMzA4MDExMjAwMDBaFw0zODAxMTUxMjAw +MDBaMGExCzAJBgNVBAYTAlVTMRUwEwYDVQQKEwxEaWdpQ2VydCBJbmMxGTAXBgNVBAsTEHd3dy5k +aWdpY2VydC5jb20xIDAeBgNVBAMTF0RpZ2lDZXJ0IEdsb2JhbCBSb290IEczMHYwEAYHKoZIzj0C +AQYFK4EEACIDYgAE3afZu4q4C/sLfyHS8L6+c/MzXRq8NOrexpu80JX28MzQC7phW1FGfp4tn+6O +YwwX7Adw9c+ELkCDnOg/QW07rdOkFFk2eJ0DQ+4QE2xy3q6Ip6FrtUPOZ9wj/wMco+I+o0IwQDAP +BgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjAdBgNVHQ4EFgQUs9tIpPmhxdiuNkHMEWNp +Yim8S8YwCgYIKoZIzj0EAwMDaAAwZQIxAK288mw/EkrRLTnDCgmXc/SINoyIJ7vmiI1Qhadj+Z4y +3maTD/HMsQmP3Wyr+mt/oAIwOWZbwmSNuJ5Q3KjVSaLtx9zRSX8XAbjIho9OjIgrqJqpisXRAL34 +VOKa5Vt8sycX +-----END CERTIFICATE----- + +DigiCert Trusted Root G4 +======================== +-----BEGIN CERTIFICATE----- +MIIFkDCCA3igAwIBAgIQBZsbV56OITLiOQe9p3d1XDANBgkqhkiG9w0BAQwFADBiMQswCQYDVQQG +EwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3d3cuZGlnaWNlcnQuY29tMSEw +HwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwHhcNMTMwODAxMTIwMDAwWhcNMzgwMTE1 +MTIwMDAwWjBiMQswCQYDVQQGEwJVUzEVMBMGA1UEChMMRGlnaUNlcnQgSW5jMRkwFwYDVQQLExB3 +d3cuZGlnaWNlcnQuY29tMSEwHwYDVQQDExhEaWdpQ2VydCBUcnVzdGVkIFJvb3QgRzQwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC/5pBzaN675F1KPDAiMGkz7MKnJS7JIT3yithZwuEp +pz1Yq3aaza57G4QNxDAf8xukOBbrVsaXbR2rsnnyyhHS5F/WBTxSD1Ifxp4VpX6+n6lXFllVcq9o +k3DCsrp1mWpzMpTREEQQLt+C8weE5nQ7bXHiLQwb7iDVySAdYyktzuxeTsiT+CFhmzTrBcZe7Fsa +vOvJz82sNEBfsXpm7nfISKhmV1efVFiODCu3T6cw2Vbuyntd463JT17lNecxy9qTXtyOj4DatpGY +QJB5w3jHtrHEtWoYOAMQjdjUN6QuBX2I9YI+EJFwq1WCQTLX2wRzKm6RAXwhTNS8rhsDdV14Ztk6 +MUSaM0C/CNdaSaTC5qmgZ92kJ7yhTzm1EVgX9yRcRo9k98FpiHaYdj1ZXUJ2h4mXaXpI8OCiEhtm +mnTK3kse5w5jrubU75KSOp493ADkRSWJtppEGSt+wJS00mFt6zPZxd9LBADMfRyVw4/3IbKyEbe7 +f/LVjHAsQWCqsWMYRJUadmJ+9oCw++hkpjPRiQfhvbfmQ6QYuKZ3AeEPlAwhHbJUKSWJbOUOUlFH +dL4mrLZBdd56rF+NP8m800ERElvlEFDrMcXKchYiCd98THU/Y+whX8QgUWtvsauGi0/C1kVfnSD8 +oR7FwI+isX4KJpn15GkvmB0t9dmpsh3lGwIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MA4GA1Ud +DwEB/wQEAwIBhjAdBgNVHQ4EFgQU7NfjgtJxXWRM3y5nP+e6mK4cD08wDQYJKoZIhvcNAQEMBQAD +ggIBALth2X2pbL4XxJEbw6GiAI3jZGgPVs93rnD5/ZpKmbnJeFwMDF/k5hQpVgs2SV1EY+CtnJYY +ZhsjDT156W1r1lT40jzBQ0CuHVD1UvyQO7uYmWlrx8GnqGikJ9yd+SeuMIW59mdNOj6PWTkiU0Tr +yF0Dyu1Qen1iIQqAyHNm0aAFYF/opbSnr6j3bTWcfFqK1qI4mfN4i/RN0iAL3gTujJtHgXINwBQy +7zBZLq7gcfJW5GqXb5JQbZaNaHqasjYUegbyJLkJEVDXCLG4iXqEI2FCKeWjzaIgQdfRnGTZ6iah +ixTXTBmyUEFxPT9NcCOGDErcgdLMMpSEDQgJlxxPwO5rIHQw0uA5NBCFIRUBCOhVMt5xSdkoF1BN +5r5N0XWs0Mr7QbhDparTwwVETyw2m+L64kW4I1NsBm9nVX9GtUw/bihaeSbSpKhil9Ie4u1Ki7wb +/UdKDd9nZn6yW0HQO+T0O/QEY+nvwlQAUaCKKsnOeMzV6ocEGLPOr0mIr/OSmbaz5mEP0oUA51Aa +5BuVnRmhuZyxm7EAHu/QD09CbMkKvO5D+jpxpchNJqU1/YldvIViHTLSoCtU7ZpXwdv6EM8Zt4tK +G48BtieVU+i2iW1bvGjUI+iLUaJW+fCmgKDWHrO8Dw9TdSmq6hN35N6MgSGtBxBHEa2HPQfRdbzP +82Z+ +-----END CERTIFICATE----- + +COMODO RSA Certification Authority +================================== +-----BEGIN CERTIFICATE----- +MIIF2DCCA8CgAwIBAgIQTKr5yttjb+Af907YWwOGnTANBgkqhkiG9w0BAQwFADCBhTELMAkGA1UE +BhMCR0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgG +A1UEChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMTE5MDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBhTELMAkGA1UEBhMC +R0IxGzAZBgNVBAgTEkdyZWF0ZXIgTWFuY2hlc3RlcjEQMA4GA1UEBxMHU2FsZm9yZDEaMBgGA1UE +ChMRQ09NT0RPIENBIExpbWl0ZWQxKzApBgNVBAMTIkNPTU9ETyBSU0EgQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCR6FSS0gpWsawNJN3Fz0Rn +dJkrN6N9I3AAcbxT38T6KhKPS38QVr2fcHK3YX/JSw8Xpz3jsARh7v8Rl8f0hj4K+j5c+ZPmNHrZ +FGvnnLOFoIJ6dq9xkNfs/Q36nGz637CC9BR++b7Epi9Pf5l/tfxnQ3K9DADWietrLNPtj5gcFKt+ +5eNu/Nio5JIk2kNrYrhV/erBvGy2i/MOjZrkm2xpmfh4SDBF1a3hDTxFYPwyllEnvGfDyi62a+pG +x8cgoLEfZd5ICLqkTqnyg0Y3hOvozIFIQ2dOciqbXL1MGyiKXCJ7tKuY2e7gUYPDCUZObT6Z+pUX +2nwzV0E8jVHtC7ZcryxjGt9XyD+86V3Em69FmeKjWiS0uqlWPc9vqv9JWL7wqP/0uK3pN/u6uPQL +OvnoQ0IeidiEyxPx2bvhiWC4jChWrBQdnArncevPDt09qZahSL0896+1DSJMwBGB7FY79tOi4lu3 +sgQiUpWAk2nojkxl8ZEDLXB0AuqLZxUpaVICu9ffUGpVRr+goyhhf3DQw6KqLCGqR84onAZFdr+C +GCe01a60y1Dma/RMhnEw6abfFobg2P9A3fvQQoh/ozM6LlweQRGBY84YcWsr7KaKtzFcOmpH4MN5 +WdYgGq/yapiqcrxXStJLnbsQ/LBMQeXtHT1eKJ2czL+zUdqnR+WEUwIDAQABo0IwQDAdBgNVHQ4E +FgQUu69+Aj36pvE8hI6t7jiY7NkyMtQwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAArx1UaEt65Ru2yyTUEUAJNMnMvlwFTPoCWOAvn9sKIN9SCYPBMt +rFaisNZ+EZLpLrqeLppysb0ZRGxhNaKatBYSaVqM4dc+pBroLwP0rmEdEBsqpIt6xf4FpuHA1sj+ +nq6PK7o9mfjYcwlYRm6mnPTXJ9OV2jeDchzTc+CiR5kDOF3VSXkAKRzH7JsgHAckaVd4sjn8OoSg +tZx8jb8uk2IntznaFxiuvTwJaP+EmzzV1gsD41eeFPfR60/IvYcjt7ZJQ3mFXLrrkguhxuhoqEwW +sRqZCuhTLJK7oQkYdQxlqHvLI7cawiiFwxv/0Cti76R7CZGYZ4wUAc1oBmpjIXUDgIiKboHGhfKp +pC3n9KUkEEeDys30jXlYsQab5xoq2Z0B15R97QNKyvDb6KkBPvVWmckejkk9u+UJueBPSZI9FoJA +zMxZxuY67RIuaTxslbH9qh17f4a+Hg4yRvv7E491f0yLS0Zj/gA0QHDBw7mh3aZw4gSzQbzpgJHq +ZJx64SIDqZxubw5lT2yHh17zbqD5daWbQOhTsiedSrnAdyGN/4fy3ryM7xfft0kL0fJuMAsaDk52 +7RH89elWsn2/x20Kk4yl0MC2Hb46TpSi125sC8KKfPog88Tk5c0NqMuRkrF8hey1FGlmDoLnzc7I +LaZRfyHBNVOFBkpdn627G190 +-----END CERTIFICATE----- + +USERTrust RSA Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIIF3jCCA8agAwIBAgIQAf1tMPyjylGoG7xkDjUDLTANBgkqhkiG9w0BAQwFADCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UE +BhMCVVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQK +ExVUaGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBSU0EgQ2VydGlmaWNh +dGlvbiBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCAEmUXNg7D2wiz +0KxXDXbtzSfTTK1Qg2HiqiBNCS1kCdzOiZ/MPans9s/B3PHTsdZ7NygRK0faOca8Ohm0X6a9fZ2j +Y0K2dvKpOyuR+OJv0OwWIJAJPuLodMkYtJHUYmTbf6MG8YgYapAiPLz+E/CHFHv25B+O1ORRxhFn +RghRy4YUVD+8M/5+bJz/Fp0YvVGONaanZshyZ9shZrHUm3gDwFA66Mzw3LyeTP6vBZY1H1dat//O ++T23LLb2VN3I5xI6Ta5MirdcmrS3ID3KfyI0rn47aGYBROcBTkZTmzNg95S+UzeQc0PzMsNT79uq +/nROacdrjGCT3sTHDN/hMq7MkztReJVni+49Vv4M0GkPGw/zJSZrM233bkf6c0Plfg6lZrEpfDKE +Y1WJxA3Bk1QwGROs0303p+tdOmw1XNtB1xLaqUkL39iAigmTYo61Zs8liM2EuLE/pDkP2QKe6xJM +lXzzawWpXhaDzLhn4ugTncxbgtNMs+1b/97lc6wjOy0AvzVVdAlJ2ElYGn+SNuZRkg7zJn0cTRe8 +yexDJtC/QV9AqURE9JnnV4eeUB9XVKg+/XRjL7FQZQnmWEIuQxpMtPAlR1n6BB6T1CZGSlCBst6+ +eLf8ZxXhyVeEHg9j1uliutZfVS7qXMYoCAQlObgOK6nyTJccBz8NUvXt7y+CDwIDAQABo0IwQDAd +BgNVHQ4EFgQUU3m/WqorSs9UgOHYm8Cd8rIDZsswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQF +MAMBAf8wDQYJKoZIhvcNAQEMBQADggIBAFzUfA3P9wF9QZllDHPFUp/L+M+ZBn8b2kMVn54CVVeW +FPFSPCeHlCjtHzoBN6J2/FNQwISbxmtOuowhT6KOVWKR82kV2LyI48SqC/3vqOlLVSoGIG1VeCkZ +7l8wXEskEVX/JJpuXior7gtNn3/3ATiUFJVDBwn7YKnuHKsSjKCaXqeYalltiz8I+8jRRa8YFWSQ +Eg9zKC7F4iRO/Fjs8PRF/iKz6y+O0tlFYQXBl2+odnKPi4w2r78NBc5xjeambx9spnFixdjQg3IM +8WcRiQycE0xyNN+81XHfqnHd4blsjDwSXWXavVcStkNr/+XeTWYRUc+ZruwXtuhxkYzeSf7dNXGi +FSeUHM9h4ya7b6NnJSFd5t0dCy5oGzuCr+yDZ4XUmFF0sbmZgIn/f3gZXHlKYC6SQK5MNyosycdi +yA5d9zZbyuAlJQG03RoHnHcAP9Dc1ew91Pq7P8yF1m9/qS3fuQL39ZeatTXaw2ewh0qpKJ4jjv9c +J2vhsE/zB+4ALtRZh8tSQZXq9EfX7mRBVXyNWQKV3WKdwrnuWih0hKWbt5DHDAff9Yk2dDLWKMGw +sAvgnEzDHNb842m1R0aBL6KCq9NjRHDEjf8tM7qtj3u1cIiuPhnPQCjY/MiQu12ZIvVS5ljFH4gx +Q+6IHdfGjjxDah2nGN59PRbxYvnKkKj9 +-----END CERTIFICATE----- + +USERTrust ECC Certification Authority +===================================== +-----BEGIN CERTIFICATE----- +MIICjzCCAhWgAwIBAgIQXIuZxVqUxdJxVt7NiYDMJjAKBggqhkjOPQQDAzCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwHhcNMTAwMjAxMDAwMDAwWhcNMzgwMTE4MjM1OTU5WjCBiDELMAkGA1UEBhMC +VVMxEzARBgNVBAgTCk5ldyBKZXJzZXkxFDASBgNVBAcTC0plcnNleSBDaXR5MR4wHAYDVQQKExVU +aGUgVVNFUlRSVVNUIE5ldHdvcmsxLjAsBgNVBAMTJVVTRVJUcnVzdCBFQ0MgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQarFRaqfloI+d61SRvU8Za2EurxtW2 +0eZzca7dnNYMYf3boIkDuAUU7FfO7l0/4iGzzvfUinngo4N+LZfQYcTxmdwlkWOrfzCjtHDix6Ez +nPO/LlxTsV+zfTJ/ijTjeXmjQjBAMB0GA1UdDgQWBBQ64QmG1M8ZwpZ2dEl23OA1xmNjmjAOBgNV +HQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNoADBlAjA2Z6EWCNzklwBB +HU6+4WMBzzuqQhFkoJ2UOQIReVx7Hfpkue4WQrO/isIJxOzksU0CMQDpKmFHjFJKS04YcPbWRNZu +9YO6bVi9JNlWSOrvxKJGgYhqOkbRqZtNyWHa0V1Xahg= +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R5 +=========================== +-----BEGIN CERTIFICATE----- +MIICHjCCAaSgAwIBAgIRYFlJ4CYuu1X5CneKcflK2GwwCgYIKoZIzj0EAwMwUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMB4XDTEyMTExMzAwMDAwMFoXDTM4MDExOTAzMTQwN1owUDEkMCIGA1UECxMb +R2xvYmFsU2lnbiBFQ0MgUm9vdCBDQSAtIFI1MRMwEQYDVQQKEwpHbG9iYWxTaWduMRMwEQYDVQQD +EwpHbG9iYWxTaWduMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAER0UOlvt9Xb/pOdEh+J8LttV7HpI6 +SFkc8GIxLcB6KP4ap1yztsyX50XUWPrRd21DosCHZTQKH3rd6zwzocWdTaRvQZU4f8kehOvRnkmS +h5SHDDqFSmafnVmTTZdhBoZKo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUPeYpSJvqB8ohREom3m7e0oPQn1kwCgYIKoZIzj0EAwMDaAAwZQIxAOVpEslu28Yx +uglB4Zf4+/2a4n0Sye18ZNPLBSWLVtmg515dTguDnFt2KaAJJiFqYgIwcdK1j1zqO+F4CYWodZI7 +yFz9SO8NdCKoCOJuxUnOxwy8p2Fp8fc74SrL+SvzZpA3 +-----END CERTIFICATE----- + +Staat der Nederlanden EV Root CA +================================ +-----BEGIN CERTIFICATE----- +MIIFcDCCA1igAwIBAgIEAJiWjTANBgkqhkiG9w0BAQsFADBYMQswCQYDVQQGEwJOTDEeMBwGA1UE +CgwVU3RhYXQgZGVyIE5lZGVybGFuZGVuMSkwJwYDVQQDDCBTdGFhdCBkZXIgTmVkZXJsYW5kZW4g +RVYgUm9vdCBDQTAeFw0xMDEyMDgxMTE5MjlaFw0yMjEyMDgxMTEwMjhaMFgxCzAJBgNVBAYTAk5M +MR4wHAYDVQQKDBVTdGFhdCBkZXIgTmVkZXJsYW5kZW4xKTAnBgNVBAMMIFN0YWF0IGRlciBOZWRl +cmxhbmRlbiBFViBSb290IENBMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA48d+ifkk +SzrSM4M1LGns3Amk41GoJSt5uAg94JG6hIXGhaTK5skuU6TJJB79VWZxXSzFYGgEt9nCUiY4iKTW +O0Cmws0/zZiTs1QUWJZV1VD+hq2kY39ch/aO5ieSZxeSAgMs3NZmdO3dZ//BYY1jTw+bbRcwJu+r +0h8QoPnFfxZpgQNH7R5ojXKhTbImxrpsX23Wr9GxE46prfNeaXUmGD5BKyF/7otdBwadQ8QpCiv8 +Kj6GyzyDOvnJDdrFmeK8eEEzduG/L13lpJhQDBXd4Pqcfzho0LKmeqfRMb1+ilgnQ7O6M5HTp5gV +XJrm0w912fxBmJc+qiXbj5IusHsMX/FjqTf5m3VpTCgmJdrV8hJwRVXj33NeN/UhbJCONVrJ0yPr +08C+eKxCKFhmpUZtcALXEPlLVPxdhkqHz3/KRawRWrUgUY0viEeXOcDPusBCAUCZSCELa6fS/ZbV +0b5GnUngC6agIk440ME8MLxwjyx1zNDFjFE7PZQIZCZhfbnDZY8UnCHQqv0XcgOPvZuM5l5Tnrmd +74K74bzickFbIZTTRTeU0d8JOV3nI6qaHcptqAqGhYqCvkIH1vI4gnPah1vlPNOePqc7nvQDs/nx +fRN0Av+7oeX6AHkcpmZBiFxgV6YuCcS6/ZrPpx9Aw7vMWgpVSzs4dlG4Y4uElBbmVvMCAwEAAaNC +MEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFP6rAJCYniT8qcwa +ivsnuL8wbqg7MA0GCSqGSIb3DQEBCwUAA4ICAQDPdyxuVr5Os7aEAJSrR8kN0nbHhp8dB9O2tLsI +eK9p0gtJ3jPFrK3CiAJ9Brc1AsFgyb/E6JTe1NOpEyVa/m6irn0F3H3zbPB+po3u2dfOWBfoqSmu +c0iH55vKbimhZF8ZE/euBhD/UcabTVUlT5OZEAFTdfETzsemQUHSv4ilf0X8rLiltTMMgsT7B/Zq +5SWEXwbKwYY5EdtYzXc7LMJMD16a4/CrPmEbUCTCwPTxGfARKbalGAKb12NMcIxHowNDXLldRqAN +b/9Zjr7dn3LDWyvfjFvO5QxGbJKyCqNMVEIYFRIYvdr8unRu/8G2oGTYqV9Vrp9canaW2HNnh/tN +f1zuacpzEPuKqf2evTY4SUmH9A4U8OmHuD+nT3pajnnUk+S7aFKErGzp85hwVXIy+TSrK0m1zSBi +5Dp6Z2Orltxtrpfs/J92VoguZs9btsmksNcFuuEnL5O7Jiqik7Ab846+HUCjuTaPPoIaGl6I6lD4 +WeKDRikL40Rc4ZW2aZCaFG+XroHPaO+Zmr615+F/+PoTRxZMzG0IQOeLeG9QgkRQP2YGiqtDhFZK +DyAthg710tvSeopLzaXoTvFeJiUBWSOgftL2fiFX1ye8FVdMpEbB4IMeDExNH08GGeL5qPQ6gqGy +eUN51q1veieQA6TqJIc/2b3Z6fJfUEkc7uzXLg== +-----END CERTIFICATE----- + +IdenTrust Commercial Root CA 1 +============================== +-----BEGIN CERTIFICATE----- +MIIFYDCCA0igAwIBAgIQCgFCgAAAAUUjyES1AAAAAjANBgkqhkiG9w0BAQsFADBKMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBS +b290IENBIDEwHhcNMTQwMTE2MTgxMjIzWhcNMzQwMTE2MTgxMjIzWjBKMQswCQYDVQQGEwJVUzES +MBAGA1UEChMJSWRlblRydXN0MScwJQYDVQQDEx5JZGVuVHJ1c3QgQ29tbWVyY2lhbCBSb290IENB +IDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnUBneP5k91DNG8W9RYYKyqU+PZ4ld +hNlT3Qwo2dfw/66VQ3KZ+bVdfIrBQuExUHTRgQ18zZshq0PirK1ehm7zCYofWjK9ouuU+ehcCuz/ +mNKvcbO0U59Oh++SvL3sTzIwiEsXXlfEU8L2ApeN2WIrvyQfYo3fw7gpS0l4PJNgiCL8mdo2yMKi +1CxUAGc1bnO/AljwpN3lsKImesrgNqUZFvX9t++uP0D1bVoE/c40yiTcdCMbXTMTEl3EASX2MN0C +XZ/g1Ue9tOsbobtJSdifWwLziuQkkORiT0/Br4sOdBeo0XKIanoBScy0RnnGF7HamB4HWfp1IYVl +3ZBWzvurpWCdxJ35UrCLvYf5jysjCiN2O/cz4ckA82n5S6LgTrx+kzmEB/dEcH7+B1rlsazRGMzy +NeVJSQjKVsk9+w8YfYs7wRPCTY/JTw436R+hDmrfYi7LNQZReSzIJTj0+kuniVyc0uMNOYZKdHzV +WYfCP04MXFL0PfdSgvHqo6z9STQaKPNBiDoT7uje/5kdX7rL6B7yuVBgwDHTc+XvvqDtMwt0viAg +xGds8AgDelWAf0ZOlqf0Hj7h9tgJ4TNkK2PXMl6f+cB7D3hvl7yTmvmcEpB4eoCHFddydJxVdHix +uuFucAS6T6C6aMN7/zHwcz09lCqxC0EOoP5NiGVreTO01wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU7UQZwNPwBovupHu+QucmVMiONnYwDQYJKoZI +hvcNAQELBQADggIBAA2ukDL2pkt8RHYZYR4nKM1eVO8lvOMIkPkp165oCOGUAFjvLi5+U1KMtlwH +6oi6mYtQlNeCgN9hCQCTrQ0U5s7B8jeUeLBfnLOic7iPBZM4zY0+sLj7wM+x8uwtLRvM7Kqas6pg +ghstO8OEPVeKlh6cdbjTMM1gCIOQ045U8U1mwF10A0Cj7oV+wh93nAbowacYXVKV7cndJZ5t+qnt +ozo00Fl72u1Q8zW/7esUTTHHYPTa8Yec4kjixsU3+wYQ+nVZZjFHKdp2mhzpgq7vmrlR94gjmmmV +YjzlVYA211QC//G5Xc7UI2/YRYRKW2XviQzdFKcgyxilJbQN+QHwotL0AMh0jqEqSI5l2xPE4iUX +feu+h1sXIFRRk0pTAwvsXcoz7WL9RccvW9xYoIA55vrX/hMUpu09lEpCdNTDd1lzzY9GvlU47/ro +kTLql1gEIt44w8y8bckzOmoKaT+gyOpyj4xjhiO9bTyWnpXgSUyqorkqG5w2gXjtw+hG4iZZRHUe +2XWJUc0QhJ1hYMtd+ZciTY6Y5uN/9lu7rs3KSoFrXgvzUeF0K+l+J6fZmUlO+KWA2yUPHGNiiskz +Z2s8EIPGrd6ozRaOjfAHN3Gf8qv8QfXBi+wAN10J5U6A7/qxXDgGpRtK4dw4LTzcqx+QGtVKnO7R +cGzM7vRX+Bi6hG6H +-----END CERTIFICATE----- + +IdenTrust Public Sector Root CA 1 +================================= +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCgFCgAAAAUUjz0Z8AAAAAjANBgkqhkiG9w0BAQsFADBNMQswCQYDVQQG +EwJVUzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3Rv +ciBSb290IENBIDEwHhcNMTQwMTE2MTc1MzMyWhcNMzQwMTE2MTc1MzMyWjBNMQswCQYDVQQGEwJV +UzESMBAGA1UEChMJSWRlblRydXN0MSowKAYDVQQDEyFJZGVuVHJ1c3QgUHVibGljIFNlY3RvciBS +b290IENBIDEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2IpT8pEiv6EdrCvsnduTy +P4o7ekosMSqMjbCpwzFrqHd2hCa2rIFCDQjrVVi7evi8ZX3yoG2LqEfpYnYeEe4IFNGyRBb06tD6 +Hi9e28tzQa68ALBKK0CyrOE7S8ItneShm+waOh7wCLPQ5CQ1B5+ctMlSbdsHyo+1W/CD80/HLaXI +rcuVIKQxKFdYWuSNG5qrng0M8gozOSI5Cpcu81N3uURF/YTLNiCBWS2ab21ISGHKTN9T0a9SvESf +qy9rg3LvdYDaBjMbXcjaY8ZNzaxmMc3R3j6HEDbhuaR672BQssvKplbgN6+rNBM5Jeg5ZuSYeqoS +mJxZZoY+rfGwyj4GD3vwEUs3oERte8uojHH01bWRNszwFcYr3lEXsZdMUD2xlVl8BX0tIdUAvwFn +ol57plzy9yLxkA2T26pEUWbMfXYD62qoKjgZl3YNa4ph+bz27nb9cCvdKTz4Ch5bQhyLVi9VGxyh +LrXHFub4qjySjmm2AcG1hp2JDws4lFTo6tyePSW8Uybt1as5qsVATFSrsrTZ2fjXctscvG29ZV/v +iDUqZi/u9rNl8DONfJhBaUYPQxxp+pu10GFqzcpL2UyQRqsVWaFHVCkugyhfHMKiq3IXAAaOReyL +4jM9f9oZRORicsPfIsbyVtTdX5Vy7W1f90gDW/3FKqD2cyOEEBsB5wIDAQABo0IwQDAOBgNVHQ8B +Af8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU43HgntinQtnbcZFrlJPrw6PRFKMw +DQYJKoZIhvcNAQELBQADggIBAEf63QqwEZE4rU1d9+UOl1QZgkiHVIyqZJnYWv6IAcVYpZmxI1Qj +t2odIFflAWJBF9MJ23XLblSQdf4an4EKwt3X9wnQW3IV5B4Jaj0z8yGa5hV+rVHVDRDtfULAj+7A +mgjVQdZcDiFpboBhDhXAuM/FSRJSzL46zNQuOAXeNf0fb7iAaJg9TaDKQGXSc3z1i9kKlT/YPyNt +GtEqJBnZhbMX73huqVjRI9PHE+1yJX9dsXNw0H8GlwmEKYBhHfpe/3OsoOOJuBxxFcbeMX8S3OFt +m6/n6J91eEyrRjuazr8FGF1NFTwWmhlQBJqymm9li1JfPFgEKCXAZmExfrngdbkaqIHWchezxQMx +NRF4eKLg6TCMf4DfWN88uieW4oA0beOY02QnrEh+KHdcxiVhJfiFDGX6xDIvpZgF5PgLZxYWxoK4 +Mhn5+bl53B/N66+rDt0b20XkeucC4pVd/GnwU2lhlXV5C15V5jgclKlZM57IcXR5f1GJtshquDDI +ajjDbp7hNxbqBWJMWxJH7ae0s1hWx0nzfxJoCTFx8G34Tkf71oXuxVhAGaQdp/lLQzfcaFpPz+vC +ZHTetBXZ9FRUGi8c15dxVJCO2SCdUyt/q4/i6jC8UDfv8Ue1fXwsBOxonbRJRBD0ckscZOf85muQ +3Wl9af0AVqW3rLatt8o+Ae+c +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G2 +========================================= +-----BEGIN CERTIFICATE----- +MIIEPjCCAyagAwIBAgIESlOMKDANBgkqhkiG9w0BAQsFADCBvjELMAkGA1UEBhMCVVMxFjAUBgNV +BAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVy +bXMxOTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ug +b25seTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIw +HhcNMDkwNzA3MTcyNTU0WhcNMzAxMjA3MTc1NTU0WjCBvjELMAkGA1UEBhMCVVMxFjAUBgNVBAoT +DUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVnYWwtdGVybXMx +OTA3BgNVBAsTMChjKSAyMDA5IEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXplZCB1c2Ugb25s +eTEyMDAGA1UEAxMpRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IC0gRzIwggEi +MA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC6hLZy254Ma+KZ6TABp3bqMriVQRrJ2mFOWHLP +/vaCeb9zYQYKpSfYs1/TRU4cctZOMvJyig/3gxnQaoCAAEUesMfnmr8SVycco2gvCoe9amsOXmXz +HHfV1IWNcCG0szLni6LVhjkCsbjSR87kyUnEO6fe+1R9V77w6G7CebI6C1XiUJgWMhNcL3hWwcKU +s/Ja5CeanyTXxuzQmyWC48zCxEXFjJd6BmsqEZ+pCm5IO2/b1BEZQvePB7/1U1+cPvQXLOZprE4y +TGJ36rfo5bs0vBmLrpxR57d+tVOxMyLlbc9wPBr64ptntoP0jaWvYkxN4FisZDQSA/i2jZRjJKRx +AgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBRqciZ6 +0B7vfec7aVHUbI2fkBJmqzANBgkqhkiG9w0BAQsFAAOCAQEAeZ8dlsa2eT8ijYfThwMEYGprmi5Z +iXMRrEPR9RP/jTkrwPK9T3CMqS/qF8QLVJ7UG5aYMzyorWKiAHarWWluBh1+xLlEjZivEtRh2woZ +Rkfz6/djwUAFQKXSt/S1mja/qYh2iARVBCuch38aNzx+LaUa2NSJXsq9rD1s2G2v1fN2D807iDgi +nWyTmsQ9v4IbZT+mD12q/OWyFcq1rca8PdCE6OoGcrBNOTJ4vz4RnAuknZoh8/CbCzB428Hch0P+ +vGOaysXCHMnHjf87ElgI5rY97HosTvuDls4MPGmHVHOkc8KT/1EQrBVUAdj8BbGJoX90g5pJ19xO +e4pIb4tF9g== +-----END CERTIFICATE----- + +Entrust Root Certification Authority - EC1 +========================================== +-----BEGIN CERTIFICATE----- +MIIC+TCCAoCgAwIBAgINAKaLeSkAAAAAUNCR+TAKBggqhkjOPQQDAzCBvzELMAkGA1UEBhMCVVMx +FjAUBgNVBAoTDUVudHJ1c3QsIEluYy4xKDAmBgNVBAsTH1NlZSB3d3cuZW50cnVzdC5uZXQvbGVn +YWwtdGVybXMxOTA3BgNVBAsTMChjKSAyMDEyIEVudHJ1c3QsIEluYy4gLSBmb3IgYXV0aG9yaXpl +ZCB1c2Ugb25seTEzMDEGA1UEAxMqRW50cnVzdCBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5 +IC0gRUMxMB4XDTEyMTIxODE1MjUzNloXDTM3MTIxODE1NTUzNlowgb8xCzAJBgNVBAYTAlVTMRYw +FAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0L2xlZ2Fs +LXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxMiBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhvcml6ZWQg +dXNlIG9ubHkxMzAxBgNVBAMTKkVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSAt +IEVDMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABIQTydC6bUF74mzQ61VfZgIaJPRbiWlH47jCffHy +AsWfoPZb1YsGGYZPUxBtByQnoaD41UcZYUx9ypMn6nQM72+WCf5j7HBdNq1nd67JnXxVRDqiY1Ef +9eNi1KlHBz7MIKNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE +FLdj5xrdjekIplWDpOBqUEFlEUJJMAoGCCqGSM49BAMDA2cAMGQCMGF52OVCR98crlOZF7ZvHH3h +vxGU0QOIdeSNiaSKd0bebWHvAvX7td/M/k7//qnmpwIwW5nXhTcGtXsI/esni0qU+eH6p44mCOh8 +kmhtc9hvJqwhAriZtyZBWyVgrtBIGu4G +-----END CERTIFICATE----- + +CFCA EV ROOT +============ +-----BEGIN CERTIFICATE----- +MIIFjTCCA3WgAwIBAgIEGErM1jANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJDTjEwMC4GA1UE +CgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQDDAxDRkNB +IEVWIFJPT1QwHhcNMTIwODA4MDMwNzAxWhcNMjkxMjMxMDMwNzAxWjBWMQswCQYDVQQGEwJDTjEw +MC4GA1UECgwnQ2hpbmEgRmluYW5jaWFsIENlcnRpZmljYXRpb24gQXV0aG9yaXR5MRUwEwYDVQQD +DAxDRkNBIEVWIFJPT1QwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDXXWvNED8fBVnV +BU03sQ7smCuOFR36k0sXgiFxEFLXUWRwFsJVaU2OFW2fvwwbwuCjZ9YMrM8irq93VCpLTIpTUnrD +7i7es3ElweldPe6hL6P3KjzJIx1qqx2hp/Hz7KDVRM8Vz3IvHWOX6Jn5/ZOkVIBMUtRSqy5J35DN +uF++P96hyk0g1CXohClTt7GIH//62pCfCqktQT+x8Rgp7hZZLDRJGqgG16iI0gNyejLi6mhNbiyW +ZXvKWfry4t3uMCz7zEasxGPrb382KzRzEpR/38wmnvFyXVBlWY9ps4deMm/DGIq1lY+wejfeWkU7 +xzbh72fROdOXW3NiGUgthxwG+3SYIElz8AXSG7Ggo7cbcNOIabla1jj0Ytwli3i/+Oh+uFzJlU9f +py25IGvPa931DfSCt/SyZi4QKPaXWnuWFo8BGS1sbn85WAZkgwGDg8NNkt0yxoekN+kWzqotaK8K +gWU6cMGbrU1tVMoqLUuFG7OA5nBFDWteNfB/O7ic5ARwiRIlk9oKmSJgamNgTnYGmE69g60dWIol +hdLHZR4tjsbftsbhf4oEIRUpdPA+nJCdDC7xij5aqgwJHsfVPKPtl8MeNPo4+QgO48BdK4PRVmrJ +tqhUUy54Mmc9gn900PvhtgVguXDbjgv5E1hvcWAQUhC5wUEJ73IfZzF4/5YFjQIDAQABo2MwYTAf +BgNVHSMEGDAWgBTj/i39KNALtbq2osS/BqoFjJP7LzAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB +/wQEAwIBBjAdBgNVHQ4EFgQU4/4t/SjQC7W6tqLEvwaqBYyT+y8wDQYJKoZIhvcNAQELBQADggIB +ACXGumvrh8vegjmWPfBEp2uEcwPenStPuiB/vHiyz5ewG5zz13ku9Ui20vsXiObTej/tUxPQ4i9q +ecsAIyjmHjdXNYmEwnZPNDatZ8POQQaIxffu2Bq41gt/UP+TqhdLjOztUmCypAbqTuv0axn96/Ua +4CUqmtzHQTb3yHQFhDmVOdYLO6Qn+gjYXB74BGBSESgoA//vU2YApUo0FmZ8/Qmkrp5nGm9BC2sG +E5uPhnEFtC+NiWYzKXZUmhH4J/qyP5Hgzg0b8zAarb8iXRvTvyUFTeGSGn+ZnzxEk8rUQElsgIfX +BDrDMlI1Dlb4pd19xIsNER9Tyx6yF7Zod1rg1MvIB671Oi6ON7fQAUtDKXeMOZePglr4UeWJoBjn +aH9dCi77o0cOPaYjesYBx4/IXr9tgFa+iiS6M+qf4TIRnvHST4D2G0CvOJ4RUHlzEhLN5mydLIhy +PDCBBpEi6lmt2hkuIsKNuYyH4Ga8cyNfIWRjgEj1oDwYPZTISEEdQLpe/v5WOaHIz16eGWRGENoX +kbcFgKyLmZJ956LYBws2J+dIeWCKw9cTXPhyQN9Ky8+ZAAoACxGV2lZFA4gKn2fQ1XmxqI1AbQ3C +ekD6819kR5LLU7m7Wc5P/dAVUwHY3+vZ5nbv0CO7O6l5s9UCKc2Jo5YPSjXnTkLAdc0Hz+Ys63su +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GB CA +=============================== +-----BEGIN CERTIFICATE----- +MIIDtTCCAp2gAwIBAgIQdrEgUnTwhYdGs/gjGvbCwDANBgkqhkiG9w0BAQsFADBtMQswCQYDVQQG +EwJDSDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNl +ZDEoMCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQiBDQTAeFw0xNDEyMDExNTAw +MzJaFw0zOTEyMDExNTEwMzFaMG0xCzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYD +VQQLExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEds +b2JhbCBSb290IEdCIENBMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA2Be3HEokKtaX +scriHvt9OO+Y9bI5mE4nuBFde9IllIiCFSZqGzG7qFshISvYD06fWvGxWuR51jIjK+FTzJlFXHtP +rby/h0oLS5daqPZI7H17Dc0hBt+eFf1Biki3IPShehtX1F1Q/7pn2COZH8g/497/b1t3sWtuuMlk +9+HKQUYOKXHQuSP8yYFfTvdv37+ErXNku7dCjmn21HYdfp2nuFeKUWdy19SouJVUQHMD9ur06/4o +Qnc/nSMbsrY9gBQHTC5P99UKFg29ZkM3fiNDecNAhvVMKdqOmq0NpQSHiB6F4+lT1ZvIiwNjeOvg +GUpuuy9rM2RYk61pv48b74JIxwIDAQABo1EwTzALBgNVHQ8EBAMCAYYwDwYDVR0TAQH/BAUwAwEB +/zAdBgNVHQ4EFgQUNQ/INmNe4qPs+TtmFc5RUuORmj0wEAYJKwYBBAGCNxUBBAMCAQAwDQYJKoZI +hvcNAQELBQADggEBAEBM+4eymYGQfp3FsLAmzYh7KzKNbrghcViXfa43FK8+5/ea4n32cZiZBKpD +dHij40lhPnOMTZTg+XHEthYOU3gf1qKHLwI5gSk8rxWYITD+KJAAjNHhy/peyP34EEY7onhCkRd0 +VQreUGdNZtGn//3ZwLWoo4rOZvUPQ82nK1d7Y0Zqqi5S2PTt4W2tKZB4SLrhI6qjiey1q5bAtEui +HZeeevJuQHHfaPFlTc58Bd9TZaml8LGXBHAVRgOY1NK/VLSgWH1Sb9pWJmLU2NuJMW8c8CLC02Ic +Nc1MaRVUGpCY3useX8p3x8uOPUNpnJpY0CQ73xtAln41rYHHTnG6iBM= +-----END CERTIFICATE----- + +SZAFIR ROOT CA2 +=============== +-----BEGIN CERTIFICATE----- +MIIDcjCCAlqgAwIBAgIUPopdB+xV0jLVt+O2XwHrLdzk1uQwDQYJKoZIhvcNAQELBQAwUTELMAkG +A1UEBhMCUEwxKDAmBgNVBAoMH0tyYWpvd2EgSXpiYSBSb3psaWN6ZW5pb3dhIFMuQS4xGDAWBgNV +BAMMD1NaQUZJUiBST09UIENBMjAeFw0xNTEwMTkwNzQzMzBaFw0zNTEwMTkwNzQzMzBaMFExCzAJ +BgNVBAYTAlBMMSgwJgYDVQQKDB9LcmFqb3dhIEl6YmEgUm96bGljemVuaW93YSBTLkEuMRgwFgYD +VQQDDA9TWkFGSVIgUk9PVCBDQTIwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQC3vD5Q +qEvNQLXOYeeWyrSh2gwisPq1e3YAd4wLz32ohswmUeQgPYUM1ljj5/QqGJ3a0a4m7utT3PSQ1hNK +DJA8w/Ta0o4NkjrcsbH/ON7Dui1fgLkCvUqdGw+0w8LBZwPd3BucPbOw3gAeqDRHu5rr/gsUvTaE +2g0gv/pby6kWIK05YO4vdbbnl5z5Pv1+TW9NL++IDWr63fE9biCloBK0TXC5ztdyO4mTp4CEHCdJ +ckm1/zuVnsHMyAHs6A6KCpbns6aH5db5BSsNl0BwPLqsdVqc1U2dAgrSS5tmS0YHF2Wtn2yIANwi +ieDhZNRnvDF5YTy7ykHNXGoAyDw4jlivAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0P +AQH/BAQDAgEGMB0GA1UdDgQWBBQuFqlKGLXLzPVvUPMjX/hd56zwyDANBgkqhkiG9w0BAQsFAAOC +AQEAtXP4A9xZWx126aMqe5Aosk3AM0+qmrHUuOQn/6mWmc5G4G18TKI4pAZw8PRBEew/R40/cof5 +O/2kbytTAOD/OblqBw7rHRz2onKQy4I9EYKL0rufKq8h5mOGnXkZ7/e7DDWQw4rtTw/1zBLZpD67 +oPwglV9PJi8RI4NOdQcPv5vRtB3pEAT+ymCPoky4rc/hkA/NrgrHXXu3UNLUYfrVFdvXn4dRVOul +4+vJhaAlIDf7js4MNIThPIGyd05DpYhfhmehPea0XGG2Ptv+tyjFogeutcrKjSoS75ftwjCkySp6 ++/NNIxuZMzSgLvWpCz/UXeHPhJ/iGcJfitYgHuNztw== +-----END CERTIFICATE----- + +Certum Trusted Network CA 2 +=========================== +-----BEGIN CERTIFICATE----- +MIIF0jCCA7qgAwIBAgIQIdbQSk8lD8kyN/yqXhKN6TANBgkqhkiG9w0BAQ0FADCBgDELMAkGA1UE +BhMCUEwxIjAgBgNVBAoTGVVuaXpldG8gVGVjaG5vbG9naWVzIFMuQS4xJzAlBgNVBAsTHkNlcnR1 +bSBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTEkMCIGA1UEAxMbQ2VydHVtIFRydXN0ZWQgTmV0d29y +ayBDQSAyMCIYDzIwMTExMDA2MDgzOTU2WhgPMjA0NjEwMDYwODM5NTZaMIGAMQswCQYDVQQGEwJQ +TDEiMCAGA1UEChMZVW5pemV0byBUZWNobm9sb2dpZXMgUy5BLjEnMCUGA1UECxMeQ2VydHVtIENl +cnRpZmljYXRpb24gQXV0aG9yaXR5MSQwIgYDVQQDExtDZXJ0dW0gVHJ1c3RlZCBOZXR3b3JrIENB +IDIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC9+Xj45tWADGSdhhuWZGc/IjoedQF9 +7/tcZ4zJzFxrqZHmuULlIEub2pt7uZld2ZuAS9eEQCsn0+i6MLs+CRqnSZXvK0AkwpfHp+6bJe+o +CgCXhVqqndwpyeI1B+twTUrWwbNWuKFBOJvR+zF/j+Bf4bE/D44WSWDXBo0Y+aomEKsq09DRZ40b +Rr5HMNUuctHFY9rnY3lEfktjJImGLjQ/KUxSiyqnwOKRKIm5wFv5HdnnJ63/mgKXwcZQkpsCLL2p +uTRZCr+ESv/f/rOf69me4Jgj7KZrdxYq28ytOxykh9xGc14ZYmhFV+SQgkK7QtbwYeDBoz1mo130 +GO6IyY0XRSmZMnUCMe4pJshrAua1YkV/NxVaI2iJ1D7eTiew8EAMvE0Xy02isx7QBlrd9pPPV3WZ +9fqGGmd4s7+W/jTcvedSVuWz5XV710GRBdxdaeOVDUO5/IOWOZV7bIBaTxNyxtd9KXpEulKkKtVB +Rgkg/iKgtlswjbyJDNXXcPiHUv3a76xRLgezTv7QCdpw75j6VuZt27VXS9zlLCUVyJ4ueE742pye +hizKV/Ma5ciSixqClnrDvFASadgOWkaLOusm+iPJtrCBvkIApPjW/jAux9JG9uWOdf3yzLnQh1vM +BhBgu4M1t15n3kfsmUjxpKEV/q2MYo45VU85FrmxY53/twIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MB0GA1UdDgQWBBS2oVQ5AsOgP46KvPrU+Bym0ToO/TAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZI +hvcNAQENBQADggIBAHGlDs7k6b8/ONWJWsQCYftMxRQXLYtPU2sQF/xlhMcQSZDe28cmk4gmb3DW +Al45oPePq5a1pRNcgRRtDoGCERuKTsZPpd1iHkTfCVn0W3cLN+mLIMb4Ck4uWBzrM9DPhmDJ2vuA +L55MYIR4PSFk1vtBHxgP58l1cb29XN40hz5BsA72udY/CROWFC/emh1auVbONTqwX3BNXuMp8SMo +clm2q8KMZiYcdywmdjWLKKdpoPk79SPdhRB0yZADVpHnr7pH1BKXESLjokmUbOe3lEu6LaTaM4tM +pkT/WjzGHWTYtTHkpjx6qFcL2+1hGsvxznN3Y6SHb0xRONbkX8eftoEq5IVIeVheO/jbAoJnwTnb +w3RLPTYe+SmTiGhbqEQZIfCn6IENLOiTNrQ3ssqwGyZ6miUfmpqAnksqP/ujmv5zMnHCnsZy4Ypo +J/HkD7TETKVhk/iXEAcqMCWpuchxuO9ozC1+9eB+D4Kob7a6bINDd82Kkhehnlt4Fj1F4jNy3eFm +ypnTycUm/Q1oBEauttmbjL4ZvrHG8hnjXALKLNhvSgfZyTXaQHXyxKcZb55CEJh15pWLYLztxRLX +is7VmFxWlgPF7ncGNf/P5O4/E2Hu29othfDNrp2yGAlFw5Khchf8R7agCyzxxN5DaAhqXzvwdmP7 +zAYspsbiDrW5viSP +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions RootCA 2015 +======================================================= +-----BEGIN CERTIFICATE----- +MIIGCzCCA/OgAwIBAgIBADANBgkqhkiG9w0BAQsFADCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcT +BkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0 +aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNVBAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNl +YXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIwMTUwHhcNMTUwNzA3MTAxMTIxWhcNNDAwNjMwMTAx +MTIxWjCBpjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0aGVuczFEMEIGA1UEChM7SGVsbGVuaWMg +QWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBDZXJ0LiBBdXRob3JpdHkxQDA+BgNV +BAMTN0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgUm9vdENBIDIw +MTUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDC+Kk/G4n8PDwEXT2QNrCROnk8Zlrv +bTkBSRq0t89/TSNTt5AA4xMqKKYx8ZEA4yjsriFBzh/a/X0SWwGDD7mwX5nh8hKDgE0GPt+sr+eh +iGsxr/CL0BgzuNtFajT0AoAkKAoCFZVedioNmToUW/bLy1O8E00BiDeUJRtCvCLYjqOWXjrZMts+ +6PAQZe104S+nfK8nNLspfZu2zwnI5dMK/IhlZXQK3HMcXM1AsRzUtoSMTFDPaI6oWa7CJ06CojXd +FPQf/7J31Ycvqm59JCfnxssm5uX+Zwdj2EUN3TpZZTlYepKZcj2chF6IIbjV9Cz82XBST3i4vTwr +i5WY9bPRaM8gFH5MXF/ni+X1NYEZN9cRCLdmvtNKzoNXADrDgfgXy5I2XdGj2HUb4Ysn6npIQf1F +GQatJ5lOwXBH3bWfgVMS5bGMSF0xQxfjjMZ6Y5ZLKTBOhE5iGV48zpeQpX8B653g+IuJ3SWYPZK2 +fu/Z8VFRfS0myGlZYeCsargqNhEEelC9MoS+L9xy1dcdFkfkR2YgP/SWxa+OAXqlD3pk9Q0Yh9mu +iNX6hME6wGkoLfINaFGq46V3xqSQDqE3izEjR8EJCOtu93ib14L8hCCZSRm2Ekax+0VVFqmjZayc +Bw/qa9wfLgZy7IaIEuQt218FL+TwA9MmM+eAws1CoRc0CwIDAQABo0IwQDAPBgNVHRMBAf8EBTAD +AQH/MA4GA1UdDwEB/wQEAwIBBjAdBgNVHQ4EFgQUcRVnyMjJvXVdctA4GGqd83EkVAswDQYJKoZI +hvcNAQELBQADggIBAHW7bVRLqhBYRjTyYtcWNl0IXtVsyIe9tC5G8jH4fOpCtZMWVdyhDBKg2mF+ +D1hYc2Ryx+hFjtyp8iY/xnmMsVMIM4GwVhO+5lFc2JsKT0ucVlMC6U/2DWDqTUJV6HwbISHTGzrM +d/K4kPFox/la/vot9L/J9UUbzjgQKjeKeaO04wlshYaT/4mWJ3iBj2fjRnRUjtkNaeJK9E10A/+y +d+2VZ5fkscWrv2oj6NSU4kQoYsRL4vDY4ilrGnB+JGGTe08DMiUNRSQrlrRGar9KC/eaj8GsGsVn +82800vpzY4zvFrCopEYq+OsS7HK07/grfoxSwIuEVPkvPuNVqNxmsdnhX9izjFk0WaSrT2y7Hxjb +davYy5LNlDhhDgcGH0tGEPEVvo2FXDtKK4F5D7Rpn0lQl033DlZdwJVqwjbDG2jJ9SrcR5q+ss7F +Jej6A7na+RZukYT1HCjI/CbM1xyQVqdfbzoEvM14iQuODy+jqk+iGxI9FghAD/FGTNeqewjBCvVt +J94Cj8rDtSvK6evIIVM4pcw72Hc3MKJP2W/R8kCtQXoXxdZKNYm3QdV8hn9VTYNKpXMgwDqvkPGa +JI7ZjnHKe7iG2rKPmT4dEw0SEe7Uq/DpFXYC5ODfqiAeW2GFZECpkJcNrVPSWh2HagCXZWK0vm9q +p/UsQu0yrbYhnr68 +-----END CERTIFICATE----- + +Hellenic Academic and Research Institutions ECC RootCA 2015 +=========================================================== +-----BEGIN CERTIFICATE----- +MIICwzCCAkqgAwIBAgIBADAKBggqhkjOPQQDAjCBqjELMAkGA1UEBhMCR1IxDzANBgNVBAcTBkF0 +aGVuczFEMEIGA1UEChM7SGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9u +cyBDZXJ0LiBBdXRob3JpdHkxRDBCBgNVBAMTO0hlbGxlbmljIEFjYWRlbWljIGFuZCBSZXNlYXJj +aCBJbnN0aXR1dGlvbnMgRUNDIFJvb3RDQSAyMDE1MB4XDTE1MDcwNzEwMzcxMloXDTQwMDYzMDEw +MzcxMlowgaoxCzAJBgNVBAYTAkdSMQ8wDQYDVQQHEwZBdGhlbnMxRDBCBgNVBAoTO0hlbGxlbmlj +IEFjYWRlbWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ2VydC4gQXV0aG9yaXR5MUQwQgYD +VQQDEztIZWxsZW5pYyBBY2FkZW1pYyBhbmQgUmVzZWFyY2ggSW5zdGl0dXRpb25zIEVDQyBSb290 +Q0EgMjAxNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABJKgQehLgoRc4vgxEZmGZE4JJS+dQS8KrjVP +dJWyUWRrjWvmP3CV8AVER6ZyOFB2lQJajq4onvktTpnvLEhvTCUp6NFxW98dwXU3tNf6e3pCnGoK +Vlp8aQuqgAkkbH7BRqNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0O +BBYEFLQiC4KZJAEOnLvkDv2/+5cgk5kqMAoGCCqGSM49BAMCA2cAMGQCMGfOFmI4oqxiRaeplSTA +GiecMjvAwNW6qef4BENThe5SId6d9SWDPp5YSy/XZxMOIQIwBeF1Ad5o7SofTUwJCA3sS61kFyjn +dc5FZXIhF8siQQ6ME5g4mlRtm8rifOoCWCKR +-----END CERTIFICATE----- + +ISRG Root X1 +============ +-----BEGIN CERTIFICATE----- +MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAwTzELMAkGA1UE +BhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2VhcmNoIEdyb3VwMRUwEwYDVQQD +EwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQG +EwJVUzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMT +DElTUkcgUm9vdCBYMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54r +Vygch77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+0TM8ukj1 +3Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6UA5/TR5d8mUgjU+g4rk8K +b4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sWT8KOEUt+zwvo/7V3LvSye0rgTBIlDHCN +Aymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyHB5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ +4Q7e2RCOFvu396j3x+UCB5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf +1b0SHzUvKBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWnOlFu +hjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTnjh8BCNAw1FtxNrQH +usEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbwqHyGO0aoSCqI3Haadr8faqU9GY/r +OPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CIrU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4G +A1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY +9umbbjANBgkqhkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL +ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ3BebYhtF8GaV +0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KKNFtY2PwByVS5uCbMiogziUwt +hDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJw +TdwJx4nLCgdNbOhdjsnvzqvHu7UrTkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nx +e5AW0wdeRlN8NwdCjNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZA +JzVcoyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq4RgqsahD +YVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPAmRGunUHBcnWEvgJBQl9n +JEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57demyPxgcYxn/eR44/KJ4EBs+lVDR3veyJ +m+kXQ99b21/+jh5Xos1AnX5iItreGCc= +-----END CERTIFICATE----- + +AC RAIZ FNMT-RCM +================ +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIPXZONMGc2yAYdGsdUhGkHMA0GCSqGSIb3DQEBCwUAMDsxCzAJBgNVBAYT +AkVTMREwDwYDVQQKDAhGTk1ULVJDTTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTAeFw0wODEw +MjkxNTU5NTZaFw0zMDAxMDEwMDAwMDBaMDsxCzAJBgNVBAYTAkVTMREwDwYDVQQKDAhGTk1ULVJD +TTEZMBcGA1UECwwQQUMgUkFJWiBGTk1ULVJDTTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBALpxgHpMhm5/yBNtwMZ9HACXjywMI7sQmkCpGreHiPibVmr75nuOi5KOpyVdWRHbNi63URcf +qQgfBBckWKo3Shjf5TnUV/3XwSyRAZHiItQDwFj8d0fsjz50Q7qsNI1NOHZnjrDIbzAzWHFctPVr +btQBULgTfmxKo0nRIBnuvMApGGWn3v7v3QqQIecaZ5JCEJhfTzC8PhxFtBDXaEAUwED653cXeuYL +j2VbPNmaUtu1vZ5Gzz3rkQUCwJaydkxNEJY7kvqcfw+Z374jNUUeAlz+taibmSXaXvMiwzn15Cou +08YfxGyqxRxqAQVKL9LFwag0Jl1mpdICIfkYtwb1TplvqKtMUejPUBjFd8g5CSxJkjKZqLsXF3mw +WsXmo8RZZUc1g16p6DULmbvkzSDGm0oGObVo/CK67lWMK07q87Hj/LaZmtVC+nFNCM+HHmpxffnT +tOmlcYF7wk5HlqX2doWjKI/pgG6BU6VtX7hI+cL5NqYuSf+4lsKMB7ObiFj86xsc3i1w4peSMKGJ +47xVqCfWS+2QrYv6YyVZLag13cqXM7zlzced0ezvXg5KkAYmY6252TUtB7p2ZSysV4999AeU14EC +ll2jB0nVetBX+RvnU0Z1qrB5QstocQjpYL05ac70r8NWQMetUqIJ5G+GR4of6ygnXYMgrwTJbFaa +i0b1AgMBAAGjgYMwgYAwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYE +FPd9xf3E6Jobd2Sn9R2gzL+HYJptMD4GA1UdIAQ3MDUwMwYEVR0gADArMCkGCCsGAQUFBwIBFh1o +dHRwOi8vd3d3LmNlcnQuZm5tdC5lcy9kcGNzLzANBgkqhkiG9w0BAQsFAAOCAgEAB5BK3/MjTvDD +nFFlm5wioooMhfNzKWtN/gHiqQxjAb8EZ6WdmF/9ARP67Jpi6Yb+tmLSbkyU+8B1RXxlDPiyN8+s +D8+Nb/kZ94/sHvJwnvDKuO+3/3Y3dlv2bojzr2IyIpMNOmqOFGYMLVN0V2Ue1bLdI4E7pWYjJ2cJ +j+F3qkPNZVEI7VFY/uY5+ctHhKQV8Xa7pO6kO8Rf77IzlhEYt8llvhjho6Tc+hj507wTmzl6NLrT +Qfv6MooqtyuGC2mDOL7Nii4LcK2NJpLuHvUBKwrZ1pebbuCoGRw6IYsMHkCtA+fdZn71uSANA+iW ++YJF1DngoABd15jmfZ5nc8OaKveri6E6FO80vFIOiZiaBECEHX5FaZNXzuvO+FB8TxxuBEOb+dY7 +Ixjp6o7RTUaN8Tvkasq6+yO3m/qZASlaWFot4/nUbQ4mrcFuNLwy+AwF+mWj2zs3gyLp1txyM/1d +8iC9djwj2ij3+RvrWWTV3F9yfiD8zYm1kGdNYno/Tq0dwzn+evQoFt9B9kiABdcPUXmsEKvU7ANm +5mqwujGSQkBqvjrTcuFqN1W8rB2Vt2lh8kORdOag0wokRqEIr9baRRmW1FMdW4R58MD3R++Lj8UG +rp1MYp3/RgT408m2ECVAdf4WqslKYIYvuu8wd+RU4riEmViAqhOLUTpPSPaLtrM= +-----END CERTIFICATE----- + +Amazon Root CA 1 +================ +-----BEGIN CERTIFICATE----- +MIIDQTCCAimgAwIBAgITBmyfz5m/jAo54vB4ikPmljZbyjANBgkqhkiG9w0BAQsFADA5MQswCQYD +VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAxMB4XDTE1 +MDUyNjAwMDAwMFoXDTM4MDExNzAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv +bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoC +ggEBALJ4gHHKeNXjca9HgFB0fW7Y14h29Jlo91ghYPl0hAEvrAIthtOgQ3pOsqTQNroBvo3bSMgH +FzZM9O6II8c+6zf1tRn4SWiw3te5djgdYZ6k/oI2peVKVuRF4fn9tBb6dNqcmzU5L/qwIFAGbHrQ +gLKm+a/sRxmPUDgH3KKHOVj4utWp+UhnMJbulHheb4mjUcAwhmahRWa6VOujw5H5SNz/0egwLX0t +dHA114gk957EWW67c4cX8jJGKLhD+rcdqsq08p8kDi1L93FcXmn/6pUCyziKrlA4b9v7LWIbxcce +VOF34GfID5yHI9Y/QCB/IIDEgEw+OyQmjgSubJrIqg0CAwEAAaNCMEAwDwYDVR0TAQH/BAUwAwEB +/zAOBgNVHQ8BAf8EBAMCAYYwHQYDVR0OBBYEFIQYzIU07LwMlJQuCFmcx7IQTgoIMA0GCSqGSIb3 +DQEBCwUAA4IBAQCY8jdaQZChGsV2USggNiMOruYou6r4lK5IpDB/G/wkjUu0yKGX9rbxenDIU5PM +CCjjmCXPI6T53iHTfIUJrU6adTrCC2qJeHZERxhlbI1Bjjt/msv0tadQ1wUsN+gDS63pYaACbvXy +8MWy7Vu33PqUXHeeE6V/Uq2V8viTO96LXFvKWlJbYK8U90vvo/ufQJVtMVT8QtPHRh8jrdkPSHCa +2XV4cdFyQzR1bldZwgJcJmApzyMZFo6IQ6XU5MsI+yMRQ+hDKXJioaldXgjUkK642M4UwtBV8ob2 +xJNDd2ZhwLnoQdeXeGADbkpyrqXRfboQnoZsG4q5WTP468SQvvG5 +-----END CERTIFICATE----- + +Amazon Root CA 2 +================ +-----BEGIN CERTIFICATE----- +MIIFQTCCAymgAwIBAgITBmyf0pY1hp8KD+WGePhbJruKNzANBgkqhkiG9w0BAQwFADA5MQswCQYD +VQQGEwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAyMB4XDTE1 +MDUyNjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpv +bjEZMBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoC +ggIBAK2Wny2cSkxKgXlRmeyKy2tgURO8TW0G/LAIjd0ZEGrHJgw12MBvIITplLGbhQPDW9tK6Mj4 +kHbZW0/jTOgGNk3Mmqw9DJArktQGGWCsN0R5hYGCrVo34A3MnaZMUnbqQ523BNFQ9lXg1dKmSYXp +N+nKfq5clU1Imj+uIFptiJXZNLhSGkOQsL9sBbm2eLfq0OQ6PBJTYv9K8nu+NQWpEjTj82R0Yiw9 +AElaKP4yRLuH3WUnAnE72kr3H9rN9yFVkE8P7K6C4Z9r2UXTu/Bfh+08LDmG2j/e7HJV63mjrdvd +fLC6HM783k81ds8P+HgfajZRRidhW+mez/CiVX18JYpvL7TFz4QuK/0NURBs+18bvBt+xa47mAEx +kv8LV/SasrlX6avvDXbR8O70zoan4G7ptGmh32n2M8ZpLpcTnqWHsFcQgTfJU7O7f/aS0ZzQGPSS +btqDT6ZjmUyl+17vIWR6IF9sZIUVyzfpYgwLKhbcAS4y2j5L9Z469hdAlO+ekQiG+r5jqFoz7Mt0 +Q5X5bGlSNscpb/xVA1wf+5+9R+vnSUeVC06JIglJ4PVhHvG/LopyboBZ/1c6+XUyo05f7O0oYtlN +c/LMgRdg7c3r3NunysV+Ar3yVAhU/bQtCSwXVEqY0VThUWcI0u1ufm8/0i2BWSlmy5A5lREedCf+ +3euvAgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSw +DPBMMPQFWAJI/TPlUq9LhONmUjANBgkqhkiG9w0BAQwFAAOCAgEAqqiAjw54o+Ci1M3m9Zh6O+oA +A7CXDpO8Wqj2LIxyh6mx/H9z/WNxeKWHWc8w4Q0QshNabYL1auaAn6AFC2jkR2vHat+2/XcycuUY ++gn0oJMsXdKMdYV2ZZAMA3m3MSNjrXiDCYZohMr/+c8mmpJ5581LxedhpxfL86kSk5Nrp+gvU5LE +YFiwzAJRGFuFjWJZY7attN6a+yb3ACfAXVU3dJnJUH/jWS5E4ywl7uxMMne0nxrpS10gxdr9HIcW +xkPo1LsmmkVwXqkLN1PiRnsn/eBG8om3zEK2yygmbtmlyTrIQRNg91CMFa6ybRoVGld45pIq2WWQ +gj9sAq+uEjonljYE1x2igGOpm/HlurR8FLBOybEfdF849lHqm/osohHUqS0nGkWxr7JOcQ3AWEbW +aQbLU8uz/mtBzUF+fUwPfHJ5elnNXkoOrJupmHN5fLT0zLm4BwyydFy4x2+IoZCn9Kr5v2c69BoV +Yh63n749sSmvZ6ES8lgQGVMDMBu4Gon2nL2XA46jCfMdiyHxtN/kHNGfZQIG6lzWE7OE76KlXIx3 +KadowGuuQNKotOrN8I1LOJwZmhsoVLiJkO/KdYE+HvJkJMcYr07/R54H9jVlpNMKVv/1F2Rs76gi +JUmTtt8AF9pYfl3uxRuw0dFfIRDH+fO6AgonB8Xx1sfT4PsJYGw= +-----END CERTIFICATE----- + +Amazon Root CA 3 +================ +-----BEGIN CERTIFICATE----- +MIIBtjCCAVugAwIBAgITBmyf1XSXNmY/Owua2eiedgPySjAKBggqhkjOPQQDAjA5MQswCQYDVQQG +EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSAzMB4XDTE1MDUy +NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ +MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgMzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABCmXp8ZB +f8ANm+gBG1bG8lKlui2yEujSLtf6ycXYqm0fc4E7O5hrOXwzpcVOho6AF2hiRVd9RFgdszflZwjr +Zt6jQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMB0GA1UdDgQWBBSrttvXBp43 +rDCGB5Fwx5zEGbF4wDAKBggqhkjOPQQDAgNJADBGAiEA4IWSoxe3jfkrBqWTrBqYaGFy+uGh0Psc +eGCmQ5nFuMQCIQCcAu/xlJyzlvnrxir4tiz+OpAUFteMYyRIHN8wfdVoOw== +-----END CERTIFICATE----- + +Amazon Root CA 4 +================ +-----BEGIN CERTIFICATE----- +MIIB8jCCAXigAwIBAgITBmyf18G7EEwpQ+Vxe3ssyBrBDjAKBggqhkjOPQQDAzA5MQswCQYDVQQG +EwJVUzEPMA0GA1UEChMGQW1hem9uMRkwFwYDVQQDExBBbWF6b24gUm9vdCBDQSA0MB4XDTE1MDUy +NjAwMDAwMFoXDTQwMDUyNjAwMDAwMFowOTELMAkGA1UEBhMCVVMxDzANBgNVBAoTBkFtYXpvbjEZ +MBcGA1UEAxMQQW1hem9uIFJvb3QgQ0EgNDB2MBAGByqGSM49AgEGBSuBBAAiA2IABNKrijdPo1MN +/sGKe0uoe0ZLY7Bi9i0b2whxIdIA6GO9mif78DluXeo9pcmBqqNbIJhFXRbb/egQbeOc4OO9X4Ri +83BkM6DLJC9wuoihKqB1+IGuYgbEgds5bimwHvouXKNCMEAwDwYDVR0TAQH/BAUwAwEB/zAOBgNV +HQ8BAf8EBAMCAYYwHQYDVR0OBBYEFNPsxzplbszh2naaVvuc84ZtV+WBMAoGCCqGSM49BAMDA2gA +MGUCMDqLIfG9fhGt0O9Yli/W651+kI0rz2ZVwyzjKKlwCkcO8DdZEv8tmZQoTipPNU0zWgIxAOp1 +AE47xDqUEpHJWEadIRNyp4iciuRMStuW1KyLa2tJElMzrdfkviT8tQp21KW8EA== +-----END CERTIFICATE----- + +TUBITAK Kamu SM SSL Kok Sertifikasi - Surum 1 +============================================= +-----BEGIN CERTIFICATE----- +MIIEYzCCA0ugAwIBAgIBATANBgkqhkiG9w0BAQsFADCB0jELMAkGA1UEBhMCVFIxGDAWBgNVBAcT +D0dlYnplIC0gS29jYWVsaTFCMEAGA1UEChM5VHVya2l5ZSBCaWxpbXNlbCB2ZSBUZWtub2xvamlr +IEFyYXN0aXJtYSBLdXJ1bXUgLSBUVUJJVEFLMS0wKwYDVQQLEyRLYW11IFNlcnRpZmlrYXN5b24g +TWVya2V6aSAtIEthbXUgU00xNjA0BgNVBAMTLVRVQklUQUsgS2FtdSBTTSBTU0wgS29rIFNlcnRp +ZmlrYXNpIC0gU3VydW0gMTAeFw0xMzExMjUwODI1NTVaFw00MzEwMjUwODI1NTVaMIHSMQswCQYD +VQQGEwJUUjEYMBYGA1UEBxMPR2ViemUgLSBLb2NhZWxpMUIwQAYDVQQKEzlUdXJraXllIEJpbGlt +c2VsIHZlIFRla25vbG9qaWsgQXJhc3Rpcm1hIEt1cnVtdSAtIFRVQklUQUsxLTArBgNVBAsTJEth +bXUgU2VydGlmaWthc3lvbiBNZXJrZXppIC0gS2FtdSBTTTE2MDQGA1UEAxMtVFVCSVRBSyBLYW11 +IFNNIFNTTCBLb2sgU2VydGlmaWthc2kgLSBTdXJ1bSAxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A +MIIBCgKCAQEAr3UwM6q7a9OZLBI3hNmNe5eA027n/5tQlT6QlVZC1xl8JoSNkvoBHToP4mQ4t4y8 +6Ij5iySrLqP1N+RAjhgleYN1Hzv/bKjFxlb4tO2KRKOrbEz8HdDc72i9z+SqzvBV96I01INrN3wc +wv61A+xXzry0tcXtAA9TNypN9E8Mg/uGz8v+jE69h/mniyFXnHrfA2eJLJ2XYacQuFWQfw4tJzh0 +3+f92k4S400VIgLI4OD8D62K18lUUMw7D8oWgITQUVbDjlZ/iSIzL+aFCr2lqBs23tPcLG07xxO9 +WSMs5uWk99gL7eqQQESolbuT1dCANLZGeA4fAJNG4e7p+exPFwIDAQABo0IwQDAdBgNVHQ4EFgQU +ZT/HiobGPN08VFw1+DrtUgxHV8gwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJ +KoZIhvcNAQELBQADggEBACo/4fEyjq7hmFxLXs9rHmoJ0iKpEsdeV31zVmSAhHqT5Am5EM2fKifh +AHe+SMg1qIGf5LgsyX8OsNJLN13qudULXjS99HMpw+0mFZx+CFOKWI3QSyjfwbPfIPP54+M638yc +lNhOT8NrF7f3cuitZjO1JVOr4PhMqZ398g26rrnZqsZr+ZO7rqu4lzwDGrpDxpa5RXI4s6ehlj2R +e37AIVNMh+3yC1SVUZPVIqUNivGTDj5UDrDYyU7c8jEyVupk+eq1nRZmQnLzf9OxMUP8pI4X8W0j +q5Rm+K37DwhuJi1/FwcJsoz7UMCflo3Ptv0AnVoUmr8CRPXBwp8iXqIPoeM= +-----END CERTIFICATE----- + +GDCA TrustAUTH R5 ROOT +====================== +-----BEGIN CERTIFICATE----- +MIIFiDCCA3CgAwIBAgIIfQmX/vBH6nowDQYJKoZIhvcNAQELBQAwYjELMAkGA1UEBhMCQ04xMjAw +BgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZIENPLixMVEQuMR8wHQYDVQQD +DBZHRENBIFRydXN0QVVUSCBSNSBST09UMB4XDTE0MTEyNjA1MTMxNVoXDTQwMTIzMTE1NTk1OVow +YjELMAkGA1UEBhMCQ04xMjAwBgNVBAoMKUdVQU5HIERPTkcgQ0VSVElGSUNBVEUgQVVUSE9SSVRZ +IENPLixMVEQuMR8wHQYDVQQDDBZHRENBIFRydXN0QVVUSCBSNSBST09UMIICIjANBgkqhkiG9w0B +AQEFAAOCAg8AMIICCgKCAgEA2aMW8Mh0dHeb7zMNOwZ+Vfy1YI92hhJCfVZmPoiC7XJjDp6L3TQs +AlFRwxn9WVSEyfFrs0yw6ehGXTjGoqcuEVe6ghWinI9tsJlKCvLriXBjTnnEt1u9ol2x8kECK62p +OqPseQrsXzrj/e+APK00mxqriCZ7VqKChh/rNYmDf1+uKU49tm7srsHwJ5uu4/Ts765/94Y9cnrr +pftZTqfrlYwiOXnhLQiPzLyRuEH3FMEjqcOtmkVEs7LXLM3GKeJQEK5cy4KOFxg2fZfmiJqwTTQJ +9Cy5WmYqsBebnh52nUpmMUHfP/vFBu8btn4aRjb3ZGM74zkYI+dndRTVdVeSN72+ahsmUPI2JgaQ +xXABZG12ZuGR224HwGGALrIuL4xwp9E7PLOR5G62xDtw8mySlwnNR30YwPO7ng/Wi64HtloPzgsM +R6flPri9fcebNaBhlzpBdRfMK5Z3KpIhHtmVdiBnaM8Nvd/WHwlqmuLMc3GkL30SgLdTMEZeS1SZ +D2fJpcjyIMGC7J0R38IC+xo70e0gmu9lZJIQDSri3nDxGGeCjGHeuLzRL5z7D9Ar7Rt2ueQ5Vfj4 +oR24qoAATILnsn8JuLwwoC8N9VKejveSswoAHQBUlwbgsQfZxw9cZX08bVlX5O2ljelAU58VS6Bx +9hoh49pwBiFYFIeFd3mqgnkCAwEAAaNCMEAwHQYDVR0OBBYEFOLJQJ9NzuiaoXzPDj9lxSmIahlR +MA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQDRSVfg +p8xoWLoBDysZzY2wYUWsEe1jUGn4H3++Fo/9nesLqjJHdtJnJO29fDMylyrHBYZmDRd9FBUb1Ov9 +H5r2XpdptxolpAqzkT9fNqyL7FeoPueBihhXOYV0GkLH6VsTX4/5COmSdI31R9KrO9b7eGZONn35 +6ZLpBN79SWP8bfsUcZNnL0dKt7n/HipzcEYwv1ryL3ml4Y0M2fmyYzeMN2WFcGpcWwlyua1jPLHd ++PwyvzeG5LuOmCd+uh8W4XAR8gPfJWIyJyYYMoSf/wA6E7qaTfRPuBRwIrHKK5DOKcFw9C+df/KQ +HtZa37dG/OaG+svgIHZ6uqbL9XzeYqWxi+7egmaKTjowHz+Ay60nugxe19CxVsp3cbK1daFQqUBD +F8Io2c9Si1vIY9RCPqAzekYu9wogRlR+ak8x8YF+QnQ4ZXMn7sZ8uI7XpTrXmKGcjBBV09tL7ECQ +8s1uV9JiDnxXk7Gnbc2dg7sq5+W2O3FYrf3RRbxake5TFW/TRQl1brqQXR4EzzffHqhmsYzmIGrv +/EhOdJhCrylvLmrH+33RZjEizIYAfmaDDEL0vTSSwxrqT8p+ck0LcIymSLumoRT2+1hEmRSuqguT +aaApJUqlyyvdimYHFngVV3Eb7PVHhPOeMTd61X8kreS8/f3MboPoDKi3QWwH3b08hpcv0g== +-----END CERTIFICATE----- + +TrustCor RootCert CA-1 +====================== +-----BEGIN CERTIFICATE----- +MIIEMDCCAxigAwIBAgIJANqb7HHzA7AZMA0GCSqGSIb3DQEBCwUAMIGkMQswCQYDVQQGEwJQQTEP +MA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3Ig +U3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkxHzAdBgNVBAMMFlRydXN0Q29yIFJvb3RDZXJ0IENBLTEwHhcNMTYwMjA0MTIzMjE2WhcNMjkx +MjMxMTcyMzE2WjCBpDELMAkGA1UEBhMCUEExDzANBgNVBAgMBlBhbmFtYTEUMBIGA1UEBwwLUGFu +YW1hIENpdHkxJDAiBgNVBAoMG1RydXN0Q29yIFN5c3RlbXMgUy4gZGUgUi5MLjEnMCUGA1UECwwe +VHJ1c3RDb3IgQ2VydGlmaWNhdGUgQXV0aG9yaXR5MR8wHQYDVQQDDBZUcnVzdENvciBSb290Q2Vy +dCBDQS0xMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAv463leLCJhJrMxnHQFgKq1mq +jQCj/IDHUHuO1CAmujIS2CNUSSUQIpidRtLByZ5OGy4sDjjzGiVoHKZaBeYei0i/mJZ0PmnK6bV4 +pQa81QBeCQryJ3pS/C3Vseq0iWEk8xoT26nPUu0MJLq5nux+AHT6k61sKZKuUbS701e/s/OojZz0 +JEsq1pme9J7+wH5COucLlVPat2gOkEz7cD+PSiyU8ybdY2mplNgQTsVHCJCZGxdNuWxu72CVEY4h +gLW9oHPY0LJ3xEXqWib7ZnZ2+AYfYW0PVcWDtxBWcgYHpfOxGgMFZA6dWorWhnAbJN7+KIor0Gqw +/Hqi3LJ5DotlDwIDAQABo2MwYTAdBgNVHQ4EFgQU7mtJPHo/DeOxCbeKyKsZn3MzUOcwHwYDVR0j +BBgwFoAU7mtJPHo/DeOxCbeKyKsZn3MzUOcwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMC +AYYwDQYJKoZIhvcNAQELBQADggEBACUY1JGPE+6PHh0RU9otRCkZoB5rMZ5NDp6tPVxBb5UrJKF5 +mDo4Nvu7Zp5I/5CQ7z3UuJu0h3U/IJvOcs+hVcFNZKIZBqEHMwwLKeXx6quj7LUKdJDHfXLy11yf +ke+Ri7fc7Waiz45mO7yfOgLgJ90WmMCV1Aqk5IGadZQ1nJBfiDcGrVmVCrDRZ9MZyonnMlo2HD6C +qFqTvsbQZJG2z9m2GM/bftJlo6bEjhcxwft+dtvTheNYsnd6djtsL1Ac59v2Z3kf9YKVmgenFK+P +3CghZwnS1k1aHBkcjndcw5QkPTJrS37UeJSDvjdNzl/HHk484IkzlQsPpTLWPFp5LBk= +-----END CERTIFICATE----- + +TrustCor RootCert CA-2 +====================== +-----BEGIN CERTIFICATE----- +MIIGLzCCBBegAwIBAgIIJaHfyjPLWQIwDQYJKoZIhvcNAQELBQAwgaQxCzAJBgNVBAYTAlBBMQ8w +DQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5MSQwIgYDVQQKDBtUcnVzdENvciBT +eXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRydXN0Q29yIENlcnRpZmljYXRlIEF1dGhvcml0 +eTEfMB0GA1UEAwwWVHJ1c3RDb3IgUm9vdENlcnQgQ0EtMjAeFw0xNjAyMDQxMjMyMjNaFw0zNDEy +MzExNzI2MzlaMIGkMQswCQYDVQQGEwJQQTEPMA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5h +bWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3IgU3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5U +cnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3JpdHkxHzAdBgNVBAMMFlRydXN0Q29yIFJvb3RDZXJ0 +IENBLTIwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCnIG7CKqJiJJWQdsg4foDSq8Gb +ZQWU9MEKENUCrO2fk8eHyLAnK0IMPQo+QVqedd2NyuCb7GgypGmSaIwLgQ5WoD4a3SwlFIIvl9Nk +RvRUqdw6VC0xK5mC8tkq1+9xALgxpL56JAfDQiDyitSSBBtlVkxs1Pu2YVpHI7TYabS3OtB0PAx1 +oYxOdqHp2yqlO/rOsP9+aij9JxzIsekp8VduZLTQwRVtDr4uDkbIXvRR/u8OYzo7cbrPb1nKDOOb +XUm4TOJXsZiKQlecdu/vvdFoqNL0Cbt3Nb4lggjEFixEIFapRBF37120Hapeaz6LMvYHL1cEksr1 +/p3C6eizjkxLAjHZ5DxIgif3GIJ2SDpxsROhOdUuxTTCHWKF3wP+TfSvPd9cW436cOGlfifHhi5q +jxLGhF5DUVCcGZt45vz27Ud+ez1m7xMTiF88oWP7+ayHNZ/zgp6kPwqcMWmLmaSISo5uZk3vFsQP +eSghYA2FFn3XVDjxklb9tTNMg9zXEJ9L/cb4Qr26fHMC4P99zVvh1Kxhe1fVSntb1IVYJ12/+Ctg +rKAmrhQhJ8Z3mjOAPF5GP/fDsaOGM8boXg25NSyqRsGFAnWAoOsk+xWq5Gd/bnc/9ASKL3x74xdh +8N0JqSDIvgmk0H5Ew7IwSjiqqewYmgeCK9u4nBit2uBGF6zPXQIDAQABo2MwYTAdBgNVHQ4EFgQU +2f4hQG6UnrybPZx9mCAZ5YwwYrIwHwYDVR0jBBgwFoAU2f4hQG6UnrybPZx9mCAZ5YwwYrIwDwYD +VR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQELBQADggIBAJ5Fngw7tu/h +Osh80QA9z+LqBrWyOrsGS2h60COXdKcs8AjYeVrXWoSK2BKaG9l9XE1wxaX5q+WjiYndAfrs3fnp +kpfbsEZC89NiqpX+MWcUaViQCqoL7jcjx1BRtPV+nuN79+TMQjItSQzL/0kMmx40/W5ulop5A7Zv +2wnL/V9lFDfhOPXzYRZY5LVtDQsEGz9QLX+zx3oaFoBg+Iof6Rsqxvm6ARppv9JYx1RXCI/hOWB3 +S6xZhBqI8d3LT3jX5+EzLfzuQfogsL7L9ziUwOHQhQ+77Sxzq+3+knYaZH9bDTMJBzN7Bj8RpFxw +PIXAz+OQqIN3+tvmxYxoZxBnpVIt8MSZj3+/0WvitUfW2dCFmU2Umw9Lje4AWkcdEQOsQRivh7dv +DDqPys/cA8GiCcjl/YBeyGBCARsaU1q7N6a3vLqE6R5sGtRk2tRD/pOLS/IseRYQ1JMLiI+h2IYU +RpFHmygk71dSTlxCnKr3Sewn6EAes6aJInKc9Q0ztFijMDvd1GpUk74aTfOTlPf8hAs/hCBcNANE +xdqtvArBAs8e5ZTZ845b2EzwnexhF7sUMlQMAimTHpKG9n/v55IFDlndmQguLvqcAFLTxWYp5KeX +RKQOKIETNcX2b2TmQcTVL8w0RSXPQQCWPUouwpaYT05KnJe32x+SMsj/D1Fu1uwJ +-----END CERTIFICATE----- + +TrustCor ECA-1 +============== +-----BEGIN CERTIFICATE----- +MIIEIDCCAwigAwIBAgIJAISCLF8cYtBAMA0GCSqGSIb3DQEBCwUAMIGcMQswCQYDVQQGEwJQQTEP +MA0GA1UECAwGUGFuYW1hMRQwEgYDVQQHDAtQYW5hbWEgQ2l0eTEkMCIGA1UECgwbVHJ1c3RDb3Ig +U3lzdGVtcyBTLiBkZSBSLkwuMScwJQYDVQQLDB5UcnVzdENvciBDZXJ0aWZpY2F0ZSBBdXRob3Jp +dHkxFzAVBgNVBAMMDlRydXN0Q29yIEVDQS0xMB4XDTE2MDIwNDEyMzIzM1oXDTI5MTIzMTE3Mjgw +N1owgZwxCzAJBgNVBAYTAlBBMQ8wDQYDVQQIDAZQYW5hbWExFDASBgNVBAcMC1BhbmFtYSBDaXR5 +MSQwIgYDVQQKDBtUcnVzdENvciBTeXN0ZW1zIFMuIGRlIFIuTC4xJzAlBgNVBAsMHlRydXN0Q29y +IENlcnRpZmljYXRlIEF1dGhvcml0eTEXMBUGA1UEAwwOVHJ1c3RDb3IgRUNBLTEwggEiMA0GCSqG +SIb3DQEBAQUAA4IBDwAwggEKAoIBAQDPj+ARtZ+odnbb3w9U73NjKYKtR8aja+3+XzP4Q1HpGjOR +MRegdMTUpwHmspI+ap3tDvl0mEDTPwOABoJA6LHip1GnHYMma6ve+heRK9jGrB6xnhkB1Zem6g23 +xFUfJ3zSCNV2HykVh0A53ThFEXXQmqc04L/NyFIduUd+Dbi7xgz2c1cWWn5DkR9VOsZtRASqnKmc +p0yJF4OuowReUoCLHhIlERnXDH19MURB6tuvsBzvgdAsxZohmz3tQjtQJvLsznFhBmIhVE5/wZ0+ +fyCMgMsq2JdiyIMzkX2woloPV+g7zPIlstR8L+xNxqE6FXrntl019fZISjZFZtS6mFjBAgMBAAGj +YzBhMB0GA1UdDgQWBBREnkj1zG1I1KBLf/5ZJC+Dl5mahjAfBgNVHSMEGDAWgBREnkj1zG1I1KBL +f/5ZJC+Dl5mahjAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBhjANBgkqhkiG9w0BAQsF +AAOCAQEABT41XBVwm8nHc2FvcivUwo/yQ10CzsSUuZQRg2dd4mdsdXa/uwyqNsatR5Nj3B5+1t4u +/ukZMjgDfxT2AHMsWbEhBuH7rBiVDKP/mZb3Kyeb1STMHd3BOuCYRLDE5D53sXOpZCz2HAF8P11F +hcCF5yWPldwX8zyfGm6wyuMdKulMY/okYWLW2n62HGz1Ah3UKt1VkOsqEUc8Ll50soIipX1TH0Xs +J5F95yIW6MBoNtjG8U+ARDL54dHRHareqKucBK+tIA5kmE2la8BIWJZpTdwHjFGTot+fDz2LYLSC +jaoITmJF4PkL0uDgPFveXHEnJcLmA4GLEFPjx1WitJ/X5g== +-----END CERTIFICATE----- + +SSL.com Root Certification Authority RSA +======================================== +-----BEGIN CERTIFICATE----- +MIIF3TCCA8WgAwIBAgIIeyyb0xaAMpkwDQYJKoZIhvcNAQELBQAwfDELMAkGA1UEBhMCVVMxDjAM +BgNVBAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24x +MTAvBgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBSU0EwHhcNMTYw +MjEyMTczOTM5WhcNNDEwMjEyMTczOTM5WjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMx +EDAOBgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NM +LmNvbSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IFJTQTCCAiIwDQYJKoZIhvcNAQEBBQAD +ggIPADCCAgoCggIBAPkP3aMrfcvQKv7sZ4Wm5y4bunfh4/WvpOz6Sl2RxFdHaxh3a3by/ZPkPQ/C +Fp4LZsNWlJ4Xg4XOVu/yFv0AYvUiCVToZRdOQbngT0aXqhvIuG5iXmmxX9sqAn78bMrzQdjt0Oj8 +P2FI7bADFB0QDksZ4LtO7IZl/zbzXmcCC52GVWH9ejjt/uIZALdvoVBidXQ8oPrIJZK0bnoix/ge +oeOy3ZExqysdBP+lSgQ36YWkMyv94tZVNHwZpEpox7Ko07fKoZOI68GXvIz5HdkihCR0xwQ9aqkp +k8zruFvh/l8lqjRYyMEjVJ0bmBHDOJx+PYZspQ9AhnwC9FwCTyjLrnGfDzrIM/4RJTXq/LrFYD3Z +fBjVsqnTdXgDciLKOsMf7yzlLqn6niy2UUb9rwPW6mBo6oUWNmuF6R7As93EJNyAKoFBbZQ+yODJ +gUEAnl6/f8UImKIYLEJAs/lvOCdLToD0PYFH4Ih86hzOtXVcUS4cK38acijnALXRdMbX5J+tB5O2 +UzU1/Dfkw/ZdFr4hc96SCvigY2q8lpJqPvi8ZVWb3vUNiSYE/CUapiVpy8JtynziWV+XrOvvLsi8 +1xtZPCvM8hnIk2snYxnP/Okm+Mpxm3+T/jRnhE6Z6/yzeAkzcLpmpnbtG3PrGqUNxCITIJRWCk4s +bE6x/c+cCbqiM+2HAgMBAAGjYzBhMB0GA1UdDgQWBBTdBAkHovV6fVJTEpKV7jiAJQ2mWTAPBgNV +HRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFN0ECQei9Xp9UlMSkpXuOIAlDaZZMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAIBgRlCn7Jp0cHh5wYfGVcpNxJK1ok1iOMq8bs3AD/CUr +dIWQPXhq9LmLpZc7tRiRux6n+UBbkflVma8eEdBcHadm47GUBwwyOabqG7B52B2ccETjit3E+ZUf +ijhDPwGFpUenPUayvOUiaPd7nNgsPgohyC0zrL/FgZkxdMF1ccW+sfAjRfSda/wZY52jvATGGAsl +u1OJD7OAUN5F7kR/q5R4ZJjT9ijdh9hwZXT7DrkT66cPYakylszeu+1jTBi7qUD3oFRuIIhxdRjq +erQ0cuAjJ3dctpDqhiVAq+8zD8ufgr6iIPv2tS0a5sKFsXQP+8hlAqRSAUfdSSLBv9jra6x+3uxj +MxW3IwiPxg+NQVrdjsW5j+VFP3jbutIbQLH+cU0/4IGiul607BXgk90IH37hVZkLId6Tngr75qNJ +vTYw/ud3sqB1l7UtgYgXZSD32pAAn8lSzDLKNXz1PQ/YK9f1JmzJBjSWFupwWRoyeXkLtoh/D1JI +Pb9s2KJELtFOt3JY04kTlf5Eq/jXixtunLwsoFvVagCvXzfh1foQC5ichucmj87w7G6KVwuA406y +wKBjYZC6VWg3dGq2ktufoYYitmUnDuy2n0Jg5GfCtdpBC8TTi2EbvPofkSvXRAdeuims2cXp71NI +WuuA8ShYIc2wBlX7Jz9TkHCpBB5XJ7k= +-----END CERTIFICATE----- + +SSL.com Root Certification Authority ECC +======================================== +-----BEGIN CERTIFICATE----- +MIICjTCCAhSgAwIBAgIIdebfy8FoW6gwCgYIKoZIzj0EAwIwfDELMAkGA1UEBhMCVVMxDjAMBgNV +BAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24xMTAv +BgNVBAMMKFNTTC5jb20gUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYwMjEy +MTgxNDAzWhcNNDEwMjEyMTgxNDAzWjB8MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMxEDAO +BgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjExMC8GA1UEAwwoU1NMLmNv +bSBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuBBAAiA2IA +BEVuqVDEpiM2nl8ojRfLliJkP9x6jh3MCLOicSS6jkm5BBtHllirLZXI7Z4INcgn64mMU1jrYor+ +8FsPazFSY0E7ic3s7LaNGdM0B9y7xgZ/wkWV7Mt/qCPgCemB+vNH06NjMGEwHQYDVR0OBBYEFILR +hXMw5zUE044CkvvlpNHEIejNMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUgtGFczDnNQTT +jgKS++Wk0cQh6M0wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2cAMGQCMG/n61kRpGDPYbCW +e+0F+S8Tkdzt5fxQaxFGRrMcIQBiu77D5+jNB5n5DQtdcj7EqgIwH7y6C+IwJPt8bYBVCpk+gA0z +5Wajs6O7pdWLjwkspl1+4vAHCGht0nxpbl/f5Wpl +-----END CERTIFICATE----- + +SSL.com EV Root Certification Authority RSA R2 +============================================== +-----BEGIN CERTIFICATE----- +MIIF6zCCA9OgAwIBAgIIVrYpzTS8ePYwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNVBAYTAlVTMQ4w +DAYDVQQIDAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9u +MTcwNQYDVQQDDC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIy +MB4XDTE3MDUzMTE4MTQzN1oXDTQyMDUzMDE4MTQzN1owgYIxCzAJBgNVBAYTAlVTMQ4wDAYDVQQI +DAVUZXhhczEQMA4GA1UEBwwHSG91c3RvbjEYMBYGA1UECgwPU1NMIENvcnBvcmF0aW9uMTcwNQYD +VQQDDC5TU0wuY29tIEVWIFJvb3QgQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkgUlNBIFIyMIICIjAN +BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAjzZlQOHWTcDXtOlG2mvqM0fNTPl9fb69LT3w23jh +hqXZuglXaO1XPqDQCEGD5yhBJB/jchXQARr7XnAjssufOePPxU7Gkm0mxnu7s9onnQqG6YE3Bf7w +cXHswxzpY6IXFJ3vG2fThVUCAtZJycxa4bH3bzKfydQ7iEGonL3Lq9ttewkfokxykNorCPzPPFTO +Zw+oz12WGQvE43LrrdF9HSfvkusQv1vrO6/PgN3B0pYEW3p+pKk8OHakYo6gOV7qd89dAFmPZiw+ +B6KjBSYRaZfqhbcPlgtLyEDhULouisv3D5oi53+aNxPN8k0TayHRwMwi8qFG9kRpnMphNQcAb9Zh +CBHqurj26bNg5U257J8UZslXWNvNh2n4ioYSA0e/ZhN2rHd9NCSFg83XqpyQGp8hLH94t2S42Oim +9HizVcuE0jLEeK6jj2HdzghTreyI/BXkmg3mnxp3zkyPuBQVPWKchjgGAGYS5Fl2WlPAApiiECto +RHuOec4zSnaqW4EWG7WK2NAAe15itAnWhmMOpgWVSbooi4iTsjQc2KRVbrcc0N6ZVTsj9CLg+Slm +JuwgUHfbSguPvuUCYHBBXtSuUDkiFCbLsjtzdFVHB3mBOagwE0TlBIqulhMlQg+5U8Sb/M3kHN48 ++qvWBkofZ6aYMBzdLNvcGJVXZsb/XItW9XcCAwEAAaNjMGEwDwYDVR0TAQH/BAUwAwEB/zAfBgNV +HSMEGDAWgBT5YLvU49U09rj1BoAlp3PbRmmonjAdBgNVHQ4EFgQU+WC71OPVNPa49QaAJadz20Zp +qJ4wDgYDVR0PAQH/BAQDAgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBWs47LCp1Jjr+kxJG7ZhcFUZh1 +++VQLHqe8RT6q9OKPv+RKY9ji9i0qVQBDb6Thi/5Sm3HXvVX+cpVHBK+Rw82xd9qt9t1wkclf7nx +Y/hoLVUE0fKNsKTPvDxeH3jnpaAgcLAExbf3cqfeIg29MyVGjGSSJuM+LmOW2puMPfgYCdcDzH2G +guDKBAdRUNf/ktUM79qGn5nX67evaOI5JpS6aLe/g9Pqemc9YmeuJeVy6OLk7K4S9ksrPJ/psEDz +OFSz/bdoyNrGj1E8svuR3Bznm53htw1yj+KkxKl4+esUrMZDBcJlOSgYAsOCsp0FvmXtll9ldDz7 +CTUue5wT/RsPXcdtgTpWD8w74a8CLyKsRspGPKAcTNZEtF4uXBVmCeEmKf7GUmG6sXP/wwyc5Wxq +lD8UykAWlYTzWamsX0xhk23RO8yilQwipmdnRC652dKKQbNmC1r7fSOl8hqw/96bg5Qu0T/fkreR +rwU7ZcegbLHNYhLDkBvjJc40vG93drEQw/cFGsDWr3RiSBd3kmmQYRzelYB0VI8YHMPzA9C/pEN1 +hlMYegouCRw2n5H9gooiS9EOUCXdywMMF8mDAAhONU2Ki+3wApRmLER/y5UnlhetCTCstnEXbosX +9hwJ1C07mKVx01QT2WDz9UtmT/rx7iASjbSsV7FFY6GsdqnC+w== +-----END CERTIFICATE----- + +SSL.com EV Root Certification Authority ECC +=========================================== +-----BEGIN CERTIFICATE----- +MIIClDCCAhqgAwIBAgIILCmcWxbtBZUwCgYIKoZIzj0EAwIwfzELMAkGA1UEBhMCVVMxDjAMBgNV +BAgMBVRleGFzMRAwDgYDVQQHDAdIb3VzdG9uMRgwFgYDVQQKDA9TU0wgQ29ycG9yYXRpb24xNDAy +BgNVBAMMK1NTTC5jb20gRVYgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eSBFQ0MwHhcNMTYw +MjEyMTgxNTIzWhcNNDEwMjEyMTgxNTIzWjB/MQswCQYDVQQGEwJVUzEOMAwGA1UECAwFVGV4YXMx +EDAOBgNVBAcMB0hvdXN0b24xGDAWBgNVBAoMD1NTTCBDb3Jwb3JhdGlvbjE0MDIGA1UEAwwrU1NM +LmNvbSBFViBSb290IENlcnRpZmljYXRpb24gQXV0aG9yaXR5IEVDQzB2MBAGByqGSM49AgEGBSuB +BAAiA2IABKoSR5CYG/vvw0AHgyBO8TCCogbR8pKGYfL2IWjKAMTH6kMAVIbc/R/fALhBYlzccBYy +3h+Z1MzFB8gIH2EWB1E9fVwHU+M1OIzfzZ/ZLg1KthkuWnBaBu2+8KGwytAJKaNjMGEwHQYDVR0O +BBYEFFvKXuXe0oGqzagtZFG22XKbl+ZPMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAUW8pe +5d7SgarNqC1kUbbZcpuX5k8wDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMCA2gAMGUCMQCK5kCJ +N+vp1RPZytRrJPOwPYdGWBrssd9v+1a6cGvHOMzosYxPD/fxZ3YOg9AeUY8CMD32IygmTMZgh5Mm +m7I1HrrW9zzRHM76JTymGoEVW/MSD2zuZYrJh6j5B+BimoxcSg== +-----END CERTIFICATE----- + +GlobalSign Root CA - R6 +======================= +-----BEGIN CERTIFICATE----- +MIIFgzCCA2ugAwIBAgIORea7A4Mzw4VlSOb/RVEwDQYJKoZIhvcNAQEMBQAwTDEgMB4GA1UECxMX +R2xvYmFsU2lnbiBSb290IENBIC0gUjYxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wHhcNMTQxMjEwMDAwMDAwWhcNMzQxMjEwMDAwMDAwWjBMMSAwHgYDVQQLExdHbG9i +YWxTaWduIFJvb3QgQ0EgLSBSNjETMBEGA1UEChMKR2xvYmFsU2lnbjETMBEGA1UEAxMKR2xvYmFs +U2lnbjCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJUH6HPKZvnsFMp7PPcNCPG0RQss +grRIxutbPK6DuEGSMxSkb3/pKszGsIhrxbaJ0cay/xTOURQh7ErdG1rG1ofuTToVBu1kZguSgMpE +3nOUTvOniX9PeGMIyBJQbUJmL025eShNUhqKGoC3GYEOfsSKvGRMIRxDaNc9PIrFsmbVkJq3MQbF +vuJtMgamHvm566qjuL++gmNQ0PAYid/kD3n16qIfKtJwLnvnvJO7bVPiSHyMEAc4/2ayd2F+4OqM +PKq0pPbzlUoSB239jLKJz9CgYXfIWHSw1CM69106yqLbnQneXUQtkPGBzVeS+n68UARjNN9rkxi+ +azayOeSsJDa38O+2HBNXk7besvjihbdzorg1qkXy4J02oW9UivFyVm4uiMVRQkQVlO6jxTiWm05O +WgtH8wY2SXcwvHE35absIQh1/OZhFj931dmRl4QKbNQCTXTAFO39OfuD8l4UoQSwC+n+7o/hbguy +CLNhZglqsQY6ZZZZwPA1/cnaKI0aEYdwgQqomnUdnjqGBQCe24DWJfncBZ4nWUx2OVvq+aWh2IMP +0f/fMBH5hc8zSPXKbWQULHpYT9NLCEnFlWQaYw55PfWzjMpYrZxCRXluDocZXFSxZba/jJvcE+kN +b7gu3GduyYsRtYQUigAZcIN5kZeR1BonvzceMgfYFGM8KEyvAgMBAAGjYzBhMA4GA1UdDwEB/wQE +AwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSubAWjkxPioufi1xzWx/B/yGdToDAfBgNV +HSMEGDAWgBSubAWjkxPioufi1xzWx/B/yGdToDANBgkqhkiG9w0BAQwFAAOCAgEAgyXt6NH9lVLN +nsAEoJFp5lzQhN7craJP6Ed41mWYqVuoPId8AorRbrcWc+ZfwFSY1XS+wc3iEZGtIxg93eFyRJa0 +lV7Ae46ZeBZDE1ZXs6KzO7V33EByrKPrmzU+sQghoefEQzd5Mr6155wsTLxDKZmOMNOsIeDjHfrY +BzN2VAAiKrlNIC5waNrlU/yDXNOd8v9EDERm8tLjvUYAGm0CuiVdjaExUd1URhxN25mW7xocBFym +Fe944Hn+Xds+qkxV/ZoVqW/hpvvfcDDpw+5CRu3CkwWJ+n1jez/QcYF8AOiYrg54NMMl+68KnyBr +3TsTjxKM4kEaSHpzoHdpx7Zcf4LIHv5YGygrqGytXm3ABdJ7t+uA/iU3/gKbaKxCXcPu9czc8FB1 +0jZpnOZ7BN9uBmm23goJSFmH63sUYHpkqmlD75HHTOwY3WzvUy2MmeFe8nI+z1TIvWfspA9MRf/T +uTAjB0yPEL+GltmZWrSZVxykzLsViVO6LAUP5MSeGbEYNNVMnbrt9x+vJJUEeKgDu+6B5dpffItK +oZB0JaezPkvILFa9x8jvOOJckvB595yEunQtYQEgfn7R8k8HWV+LLUNS60YMlOH1Zkd5d9VUWx+t +JDfLRVpOoERIyNiwmcUVhAn21klJwGW45hpxbqCo8YLoRT5s1gLXCmeDBVrJpBA= +-----END CERTIFICATE----- + +OISTE WISeKey Global Root GC CA +=============================== +-----BEGIN CERTIFICATE----- +MIICaTCCAe+gAwIBAgIQISpWDK7aDKtARb8roi066jAKBggqhkjOPQQDAzBtMQswCQYDVQQGEwJD +SDEQMA4GA1UEChMHV0lTZUtleTEiMCAGA1UECxMZT0lTVEUgRm91bmRhdGlvbiBFbmRvcnNlZDEo +MCYGA1UEAxMfT0lTVEUgV0lTZUtleSBHbG9iYWwgUm9vdCBHQyBDQTAeFw0xNzA1MDkwOTQ4MzRa +Fw00MjA1MDkwOTU4MzNaMG0xCzAJBgNVBAYTAkNIMRAwDgYDVQQKEwdXSVNlS2V5MSIwIAYDVQQL +ExlPSVNURSBGb3VuZGF0aW9uIEVuZG9yc2VkMSgwJgYDVQQDEx9PSVNURSBXSVNlS2V5IEdsb2Jh +bCBSb290IEdDIENBMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAETOlQwMYPchi82PG6s4nieUqjFqdr +VCTbUf/q9Akkwwsin8tqJ4KBDdLArzHkdIJuyiXZjHWd8dvQmqJLIX4Wp2OQ0jnUsYd4XxiWD1Ab +NTcPasbc2RNNpI6QN+a9WzGRo1QwUjAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAd +BgNVHQ4EFgQUSIcUrOPDnpBgOtfKie7TrYy0UGYwEAYJKwYBBAGCNxUBBAMCAQAwCgYIKoZIzj0E +AwMDaAAwZQIwJsdpW9zV57LnyAyMjMPdeYwbY9XJUpROTYJKcx6ygISpJcBMWm1JKWB4E+J+SOtk +AjEA2zQgMgj/mkkCtojeFK9dbJlxjRo/i9fgojaGHAeCOnZT/cKi7e97sIBPWA9LUzm9 +-----END CERTIFICATE----- + +UCA Global G2 Root +================== +-----BEGIN CERTIFICATE----- +MIIFRjCCAy6gAwIBAgIQXd+x2lqj7V2+WmUgZQOQ7zANBgkqhkiG9w0BAQsFADA9MQswCQYDVQQG +EwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxGzAZBgNVBAMMElVDQSBHbG9iYWwgRzIgUm9vdDAeFw0x +NjAzMTEwMDAwMDBaFw00MDEyMzEwMDAwMDBaMD0xCzAJBgNVBAYTAkNOMREwDwYDVQQKDAhVbmlU +cnVzdDEbMBkGA1UEAwwSVUNBIEdsb2JhbCBHMiBSb290MIICIjANBgkqhkiG9w0BAQEFAAOCAg8A +MIICCgKCAgEAxeYrb3zvJgUno4Ek2m/LAfmZmqkywiKHYUGRO8vDaBsGxUypK8FnFyIdK+35KYmT +oni9kmugow2ifsqTs6bRjDXVdfkX9s9FxeV67HeToI8jrg4aA3++1NDtLnurRiNb/yzmVHqUwCoV +8MmNsHo7JOHXaOIxPAYzRrZUEaalLyJUKlgNAQLx+hVRZ2zA+te2G3/RVogvGjqNO7uCEeBHANBS +h6v7hn4PJGtAnTRnvI3HLYZveT6OqTwXS3+wmeOwcWDcC/Vkw85DvG1xudLeJ1uK6NjGruFZfc8o +LTW4lVYa8bJYS7cSN8h8s+1LgOGN+jIjtm+3SJUIsUROhYw6AlQgL9+/V087OpAh18EmNVQg7Mc/ +R+zvWr9LesGtOxdQXGLYD0tK3Cv6brxzks3sx1DoQZbXqX5t2Okdj4q1uViSukqSKwxW/YDrCPBe +KW4bHAyvj5OJrdu9o54hyokZ7N+1wxrrFv54NkzWbtA+FxyQF2smuvt6L78RHBgOLXMDj6DlNaBa +4kx1HXHhOThTeEDMg5PXCp6dW4+K5OXgSORIskfNTip1KnvyIvbJvgmRlld6iIis7nCs+dwp4wwc +OxJORNanTrAmyPPZGpeRaOrvjUYG0lZFWJo8DA+DuAUlwznPO6Q0ibd5Ei9Hxeepl2n8pndntd97 +8XplFeRhVmUCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0O +BBYEFIHEjMz15DD/pQwIX4wVZyF0Ad/fMA0GCSqGSIb3DQEBCwUAA4ICAQATZSL1jiutROTL/7lo +5sOASD0Ee/ojL3rtNtqyzm325p7lX1iPyzcyochltq44PTUbPrw7tgTQvPlJ9Zv3hcU2tsu8+Mg5 +1eRfB70VVJd0ysrtT7q6ZHafgbiERUlMjW+i67HM0cOU2kTC5uLqGOiiHycFutfl1qnN3e92mI0A +Ds0b+gO3joBYDic/UvuUospeZcnWhNq5NXHzJsBPd+aBJ9J3O5oUb3n09tDh05S60FdRvScFDcH9 +yBIw7m+NESsIndTUv4BFFJqIRNow6rSn4+7vW4LVPtateJLbXDzz2K36uGt/xDYotgIVilQsnLAX +c47QN6MUPJiVAAwpBVueSUmxX8fjy88nZY41F7dXyDDZQVu5FLbowg+UMaeUmMxq67XhJ/UQqAHo +jhJi6IjMtX9Gl8CbEGY4GjZGXyJoPd/JxhMnq1MGrKI8hgZlb7F+sSlEmqO6SWkoaY/X5V+tBIZk +bxqgDMUIYs6Ao9Dz7GjevjPHF1t/gMRMTLGmhIrDO7gJzRSBuhjjVFc2/tsvfEehOjPI+Vg7RE+x +ygKJBJYoaMVLuCaJu9YzL1DV/pqJuhgyklTGW+Cd+V7lDSKb9triyCGyYiGqhkCyLmTTX8jjfhFn +RR8F/uOi77Oos/N9j/gMHyIfLXC0uAE0djAA5SN4p1bXUB+K+wb1whnw0A== +-----END CERTIFICATE----- + +UCA Extended Validation Root +============================ +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgIQT9Irj/VkyDOeTzRYZiNwYDANBgkqhkiG9w0BAQsFADBHMQswCQYDVQQG +EwJDTjERMA8GA1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9u +IFJvb3QwHhcNMTUwMzEzMDAwMDAwWhcNMzgxMjMxMDAwMDAwWjBHMQswCQYDVQQGEwJDTjERMA8G +A1UECgwIVW5pVHJ1c3QxJTAjBgNVBAMMHFVDQSBFeHRlbmRlZCBWYWxpZGF0aW9uIFJvb3QwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCpCQcoEwKwmeBkqh5DFnpzsZGgdT6o+uM4AHrs +iWogD4vFsJszA1qGxliG1cGFu0/GnEBNyr7uaZa4rYEwmnySBesFK5pI0Lh2PpbIILvSsPGP2KxF +Rv+qZ2C0d35qHzwaUnoEPQc8hQ2E0B92CvdqFN9y4zR8V05WAT558aopO2z6+I9tTcg1367r3CTu +eUWnhbYFiN6IXSV8l2RnCdm/WhUFhvMJHuxYMjMR83dksHYf5BA1FxvyDrFspCqjc/wJHx4yGVMR +59mzLC52LqGj3n5qiAno8geK+LLNEOfic0CTuwjRP+H8C5SzJe98ptfRr5//lpr1kXuYC3fUfugH +0mK1lTnj8/FtDw5lhIpjVMWAtuCeS31HJqcBCF3RiJ7XwzJE+oJKCmhUfzhTA8ykADNkUVkLo4KR +el7sFsLzKuZi2irbWWIQJUoqgQtHB0MGcIfS+pMRKXpITeuUx3BNr2fVUbGAIAEBtHoIppB/TuDv +B0GHr2qlXov7z1CymlSvw4m6WC31MJixNnI5fkkE/SmnTHnkBVfblLkWU41Gsx2VYVdWf6/wFlth +WG82UBEL2KwrlRYaDh8IzTY0ZRBiZtWAXxQgXy0MoHgKaNYs1+lvK9JKBZP8nm9rZ/+I8U6laUpS +NwXqxhaN0sSZ0YIrO7o1dfdRUVjzyAfd5LQDfwIDAQABo0IwQDAdBgNVHQ4EFgQU2XQ65DA9DfcS +3H5aBZ8eNJr34RQwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAYYwDQYJKoZIhvcNAQEL +BQADggIBADaNl8xCFWQpN5smLNb7rhVpLGsaGvdftvkHTFnq88nIua7Mui563MD1sC3AO6+fcAUR +ap8lTwEpcOPlDOHqWnzcSbvBHiqB9RZLcpHIojG5qtr8nR/zXUACE/xOHAbKsxSQVBcZEhrxH9cM +aVr2cXj0lH2RC47skFSOvG+hTKv8dGT9cZr4QQehzZHkPJrgmzI5c6sq1WnIeJEmMX3ixzDx/BR4 +dxIOE/TdFpS/S2d7cFOFyrC78zhNLJA5wA3CXWvp4uXViI3WLL+rG761KIcSF3Ru/H38j9CHJrAb ++7lsq+KePRXBOy5nAliRn+/4Qh8st2j1da3Ptfb/EX3C8CSlrdP6oDyp+l3cpaDvRKS+1ujl5BOW +F3sGPjLtx7dCvHaj2GU4Kzg1USEODm8uNBNA4StnDG1KQTAYI1oyVZnJF+A83vbsea0rWBmirSwi +GpWOvpaQXUJXxPkUAzUrHC1RVwinOt4/5Mi0A3PCwSaAuwtCH60NryZy2sy+s6ODWA2CxR9GUeOc +GMyNm43sSet1UNWMKFnKdDTajAshqx7qG+XH/RU+wBeq+yNuJkbL+vmxcmtpzyKEC2IPrNkZAJSi +djzULZrtBJ4tBmIQN1IchXIbJ+XMxjHsN+xjWZsLHXbMfjKaiJUINlK73nZfdklJrX+9ZSCyycEr +dhh2n1ax +-----END CERTIFICATE----- + +Certigna Root CA +================ +-----BEGIN CERTIFICATE----- +MIIGWzCCBEOgAwIBAgIRAMrpG4nxVQMNo+ZBbcTjpuEwDQYJKoZIhvcNAQELBQAwWjELMAkGA1UE +BhMCRlIxEjAQBgNVBAoMCURoaW15b3RpczEcMBoGA1UECwwTMDAwMiA0ODE0NjMwODEwMDAzNjEZ +MBcGA1UEAwwQQ2VydGlnbmEgUm9vdCBDQTAeFw0xMzEwMDEwODMyMjdaFw0zMzEwMDEwODMyMjda +MFoxCzAJBgNVBAYTAkZSMRIwEAYDVQQKDAlEaGlteW90aXMxHDAaBgNVBAsMEzAwMDIgNDgxNDYz +MDgxMDAwMzYxGTAXBgNVBAMMEENlcnRpZ25hIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEBAQUAA4IC +DwAwggIKAoICAQDNGDllGlmx6mQWDoyUJJV8g9PFOSbcDO8WV43X2KyjQn+Cyu3NW9sOty3tRQgX +stmzy9YXUnIo245Onoq2C/mehJpNdt4iKVzSs9IGPjA5qXSjklYcoW9MCiBtnyN6tMbaLOQdLNyz +KNAT8kxOAkmhVECe5uUFoC2EyP+YbNDrihqECB63aCPuI9Vwzm1RaRDuoXrC0SIxwoKF0vJVdlB8 +JXrJhFwLrN1CTivngqIkicuQstDuI7pmTLtipPlTWmR7fJj6o0ieD5Wupxj0auwuA0Wv8HT4Ks16 +XdG+RCYyKfHx9WzMfgIhC59vpD++nVPiz32pLHxYGpfhPTc3GGYo0kDFUYqMwy3OU4gkWGQwFsWq +4NYKpkDfePb1BHxpE4S80dGnBs8B92jAqFe7OmGtBIyT46388NtEbVncSVmurJqZNjBBe3YzIoej +wpKGbvlw7q6Hh5UbxHq9MfPU0uWZ/75I7HX1eBYdpnDBfzwboZL7z8g81sWTCo/1VTp2lc5ZmIoJ +lXcymoO6LAQ6l73UL77XbJuiyn1tJslV1c/DeVIICZkHJC1kJWumIWmbat10TWuXekG9qxf5kBdI +jzb5LdXF2+6qhUVB+s06RbFo5jZMm5BX7CO5hwjCxAnxl4YqKE3idMDaxIzb3+KhF1nOJFl0Mdp/ +/TBt2dzhauH8XwIDAQABo4IBGjCCARYwDwYDVR0TAQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYw +HQYDVR0OBBYEFBiHVuBud+4kNTxOc5of1uHieX4rMB8GA1UdIwQYMBaAFBiHVuBud+4kNTxOc5of +1uHieX4rMEQGA1UdIAQ9MDswOQYEVR0gADAxMC8GCCsGAQUFBwIBFiNodHRwczovL3d3d3cuY2Vy +dGlnbmEuZnIvYXV0b3JpdGVzLzBtBgNVHR8EZjBkMC+gLaArhilodHRwOi8vY3JsLmNlcnRpZ25h +LmZyL2NlcnRpZ25hcm9vdGNhLmNybDAxoC+gLYYraHR0cDovL2NybC5kaGlteW90aXMuY29tL2Nl +cnRpZ25hcm9vdGNhLmNybDANBgkqhkiG9w0BAQsFAAOCAgEAlLieT/DjlQgi581oQfccVdV8AOIt +OoldaDgvUSILSo3L6btdPrtcPbEo/uRTVRPPoZAbAh1fZkYJMyjhDSSXcNMQH+pkV5a7XdrnxIxP +TGRGHVyH41neQtGbqH6mid2PHMkwgu07nM3A6RngatgCdTer9zQoKJHyBApPNeNgJgH60BGM+RFq +7q89w1DTj18zeTyGqHNFkIwgtnJzFyO+B2XleJINugHA64wcZr+shncBlA2c5uk5jR+mUYyZDDl3 +4bSb+hxnV29qao6pK0xXeXpXIs/NX2NGjVxZOob4Mkdio2cNGJHc+6Zr9UhhcyNZjgKnvETq9Emd +8VRY+WCv2hikLyhF3HqgiIZd8zvn/yk1gPxkQ5Tm4xxvvq0OKmOZK8l+hfZx6AYDlf7ej0gcWtSS +6Cvu5zHbugRqh5jnxV/vfaci9wHYTfmJ0A6aBVmknpjZbyvKcL5kwlWj9Omvw5Ip3IgWJJk8jSaY +tlu3zM63Nwf9JtmYhST/WSMDmu2dnajkXjjO11INb9I/bbEFa0nOipFGc/T2L/Coc3cOZayhjWZS +aX5LaAzHHjcng6WMxwLkFM1JAbBzs/3GkDpv0mztO+7skb6iQ12LAEpmJURw3kAP+HwV96LOPNde +E4yBFxgX0b3xdxA61GU5wSesVywlVP+i2k+KYTlerj1KjL0= +-----END CERTIFICATE----- + +emSign Root CA - G1 +=================== +-----BEGIN CERTIFICATE----- +MIIDlDCCAnygAwIBAgIKMfXkYgxsWO3W2DANBgkqhkiG9w0BAQsFADBnMQswCQYDVQQGEwJJTjET +MBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9sb2dpZXMgTGltaXRl +ZDEcMBoGA1UEAxMTZW1TaWduIFJvb3QgQ0EgLSBHMTAeFw0xODAyMTgxODMwMDBaFw00MzAyMTgx +ODMwMDBaMGcxCzAJBgNVBAYTAklOMRMwEQYDVQQLEwplbVNpZ24gUEtJMSUwIwYDVQQKExxlTXVk +aHJhIFRlY2hub2xvZ2llcyBMaW1pdGVkMRwwGgYDVQQDExNlbVNpZ24gUm9vdCBDQSAtIEcxMIIB +IjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAk0u76WaK7p1b1TST0Bsew+eeuGQzf2N4aLTN +LnF115sgxk0pvLZoYIr3IZpWNVrzdr3YzZr/k1ZLpVkGoZM0Kd0WNHVO8oG0x5ZOrRkVUkr+PHB1 +cM2vK6sVmjM8qrOLqs1D/fXqcP/tzxE7lM5OMhbTI0Aqd7OvPAEsbO2ZLIvZTmmYsvePQbAyeGHW +DV/D+qJAkh1cF+ZwPjXnorfCYuKrpDhMtTk1b+oDafo6VGiFbdbyL0NVHpENDtjVaqSW0RM8LHhQ +6DqS0hdW5TUaQBw+jSztOd9C4INBdN+jzcKGYEho42kLVACL5HZpIQ15TjQIXhTCzLG3rdd8cIrH +hQIDAQABo0IwQDAdBgNVHQ4EFgQU++8Nhp6w492pufEhF38+/PB3KxowDgYDVR0PAQH/BAQDAgEG +MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAFn/8oz1h31xPaOfG1vR2vjTnGs2 +vZupYeveFix0PZ7mddrXuqe8QhfnPZHr5X3dPpzxz5KsbEjMwiI/aTvFthUvozXGaCocV685743Q +NcMYDHsAVhzNixl03r4PEuDQqqE/AjSxcM6dGNYIAwlG7mDgfrbESQRRfXBgvKqy/3lyeqYdPV8q ++Mri/Tm3R7nrft8EI6/6nAYH6ftjk4BAtcZsCjEozgyfz7MjNYBBjWzEN3uBL4ChQEKF6dk4jeih +U80Bv2noWgbyRQuQ+q7hv53yrlc8pa6yVvSLZUDp/TGBLPQ5Cdjua6e0ph0VpZj3AYHYhX3zUVxx +iN66zB+Afko= +-----END CERTIFICATE----- + +emSign ECC Root CA - G3 +======================= +-----BEGIN CERTIFICATE----- +MIICTjCCAdOgAwIBAgIKPPYHqWhwDtqLhDAKBggqhkjOPQQDAzBrMQswCQYDVQQGEwJJTjETMBEG +A1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11ZGhyYSBUZWNobm9sb2dpZXMgTGltaXRlZDEg +MB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0gRzMwHhcNMTgwMjE4MTgzMDAwWhcNNDMwMjE4 +MTgzMDAwWjBrMQswCQYDVQQGEwJJTjETMBEGA1UECxMKZW1TaWduIFBLSTElMCMGA1UEChMcZU11 +ZGhyYSBUZWNobm9sb2dpZXMgTGltaXRlZDEgMB4GA1UEAxMXZW1TaWduIEVDQyBSb290IENBIC0g +RzMwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAAQjpQy4LRL1KPOxst3iAhKAnjlfSU2fySU0WXTsuwYc +58Byr+iuL+FBVIcUqEqy6HyC5ltqtdyzdc6LBtCGI79G1Y4PPwT01xySfvalY8L1X44uT6EYGQIr +MgqCZH0Wk9GjQjBAMB0GA1UdDgQWBBR8XQKEE9TMipuBzhccLikenEhjQjAOBgNVHQ8BAf8EBAMC +AQYwDwYDVR0TAQH/BAUwAwEB/zAKBggqhkjOPQQDAwNpADBmAjEAvvNhzwIQHWSVB7gYboiFBS+D +CBeQyh+KTOgNG3qxrdWBCUfvO6wIBHxcmbHtRwfSAjEAnbpV/KlK6O3t5nYBQnvI+GDZjVGLVTv7 +jHvrZQnD+JbNR6iC8hZVdyR+EhCVBCyj +-----END CERTIFICATE----- + +emSign Root CA - C1 +=================== +-----BEGIN CERTIFICATE----- +MIIDczCCAlugAwIBAgILAK7PALrEzzL4Q7IwDQYJKoZIhvcNAQELBQAwVjELMAkGA1UEBhMCVVMx +EzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQDExNlbVNp +Z24gUm9vdCBDQSAtIEMxMB4XDTE4MDIxODE4MzAwMFoXDTQzMDIxODE4MzAwMFowVjELMAkGA1UE +BhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMRwwGgYDVQQD +ExNlbVNpZ24gUm9vdCBDQSAtIEMxMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAz+up +ufGZBczYKCFK83M0UYRWEPWgTywS4/oTmifQz/l5GnRfHXk5/Fv4cI7gklL35CX5VIPZHdPIWoU/ +Xse2B+4+wM6ar6xWQio5JXDWv7V7Nq2s9nPczdcdioOl+yuQFTdrHCZH3DspVpNqs8FqOp099cGX +OFgFixwR4+S0uF2FHYP+eF8LRWgYSKVGczQ7/g/IdrvHGPMF0Ybzhe3nudkyrVWIzqa2kbBPrH4V +I5b2P/AgNBbeCsbEBEV5f6f9vtKppa+cxSMq9zwhbL2vj07FOrLzNBL834AaSaTUqZX3noleooms +lMuoaJuvimUnzYnu3Yy1aylwQ6BpC+S5DwIDAQABo0IwQDAdBgNVHQ4EFgQU/qHgcB4qAzlSWkK+ +XJGFehiqTbUwDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQAD +ggEBAMJKVvoVIXsoounlHfv4LcQ5lkFMOycsxGwYFYDGrK9HWS8mC+M2sO87/kOXSTKZEhVb3xEp +/6tT+LvBeA+snFOvV71ojD1pM/CjoCNjO2RnIkSt1XHLVip4kqNPEjE2NuLe/gDEo2APJ62gsIq1 +NnpSob0n9CAnYuhNlCQT5AoE6TyrLshDCUrGYQTlSTR+08TI9Q/Aqum6VF7zYytPT1DU/rl7mYw9 +wC68AivTxEDkigcxHpvOJpkT+xHqmiIMERnHXhuBUDDIlhJu58tBf5E7oke3VIAb3ADMmpDqw8NQ +BmIMMMAVSKeoWXzhriKi4gp6D/piq1JM4fHfyr6DDUI= +-----END CERTIFICATE----- + +emSign ECC Root CA - C3 +======================= +-----BEGIN CERTIFICATE----- +MIICKzCCAbGgAwIBAgIKe3G2gla4EnycqDAKBggqhkjOPQQDAzBaMQswCQYDVQQGEwJVUzETMBEG +A1UECxMKZW1TaWduIFBLSTEUMBIGA1UEChMLZU11ZGhyYSBJbmMxIDAeBgNVBAMTF2VtU2lnbiBF +Q0MgUm9vdCBDQSAtIEMzMB4XDTE4MDIxODE4MzAwMFoXDTQzMDIxODE4MzAwMFowWjELMAkGA1UE +BhMCVVMxEzARBgNVBAsTCmVtU2lnbiBQS0kxFDASBgNVBAoTC2VNdWRocmEgSW5jMSAwHgYDVQQD +ExdlbVNpZ24gRUNDIFJvb3QgQ0EgLSBDMzB2MBAGByqGSM49AgEGBSuBBAAiA2IABP2lYa57JhAd +6bciMK4G9IGzsUJxlTm801Ljr6/58pc1kjZGDoeVjbk5Wum739D+yAdBPLtVb4OjavtisIGJAnB9 +SMVK4+kiVCJNk7tCDK93nCOmfddhEc5lx/h//vXyqaNCMEAwHQYDVR0OBBYEFPtaSNCAIEDyqOkA +B2kZd6fmw/TPMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gA +MGUCMQC02C8Cif22TGK6Q04ThHK1rt0c3ta13FaPWEBaLd4gTCKDypOofu4SQMfWh0/434UCMBwU +ZOR8loMRnLDRWmFLpg9J0wD8ofzkpf9/rdcw0Md3f76BB1UwUCAU9Vc4CqgxUQ== +-----END CERTIFICATE----- + +Hongkong Post Root CA 3 +======================= +-----BEGIN CERTIFICATE----- +MIIFzzCCA7egAwIBAgIUCBZfikyl7ADJk0DfxMauI7gcWqQwDQYJKoZIhvcNAQELBQAwbzELMAkG +A1UEBhMCSEsxEjAQBgNVBAgTCUhvbmcgS29uZzESMBAGA1UEBxMJSG9uZyBLb25nMRYwFAYDVQQK +Ew1Ib25na29uZyBQb3N0MSAwHgYDVQQDExdIb25na29uZyBQb3N0IFJvb3QgQ0EgMzAeFw0xNzA2 +MDMwMjI5NDZaFw00MjA2MDMwMjI5NDZaMG8xCzAJBgNVBAYTAkhLMRIwEAYDVQQIEwlIb25nIEtv +bmcxEjAQBgNVBAcTCUhvbmcgS29uZzEWMBQGA1UEChMNSG9uZ2tvbmcgUG9zdDEgMB4GA1UEAxMX +SG9uZ2tvbmcgUG9zdCBSb290IENBIDMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz +iNfqzg8gTr7m1gNt7ln8wlffKWihgw4+aMdoWJwcYEuJQwy51BWy7sFOdem1p+/l6TWZ5Mwc50tf +jTMwIDNT2aa71T4Tjukfh0mtUC1Qyhi+AViiE3CWu4mIVoBc+L0sPOFMV4i707mV78vH9toxdCim +5lSJ9UExyuUmGs2C4HDaOym71QP1mbpV9WTRYA6ziUm4ii8F0oRFKHyPaFASePwLtVPLwpgchKOe +sL4jpNrcyCse2m5FHomY2vkALgbpDDtw1VAliJnLzXNg99X/NWfFobxeq81KuEXryGgeDQ0URhLj +0mRiikKYvLTGCAj4/ahMZJx2Ab0vqWwzD9g/KLg8aQFChn5pwckGyuV6RmXpwtZQQS4/t+TtbNe/ +JgERohYpSms0BpDsE9K2+2p20jzt8NYt3eEV7KObLyzJPivkaTv/ciWxNoZbx39ri1UbSsUgYT2u +y1DhCDq+sI9jQVMwCFk8mB13umOResoQUGC/8Ne8lYePl8X+l2oBlKN8W4UdKjk60FSh0Tlxnf0h ++bV78OLgAo9uliQlLKAeLKjEiafv7ZkGL7YKTE/bosw3Gq9HhS2KX8Q0NEwA/RiTZxPRN+ZItIsG +xVd7GYYKecsAyVKvQv83j+GjHno9UKtjBucVtT+2RTeUN7F+8kjDf8V1/peNRY8apxpyKBpADwID +AQABo2MwYTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjAfBgNVHSMEGDAWgBQXnc0e +i9Y5K3DTXNSguB+wAPzFYTAdBgNVHQ4EFgQUF53NHovWOStw01zUoLgfsAD8xWEwDQYJKoZIhvcN +AQELBQADggIBAFbVe27mIgHSQpsY1Q7XZiNc4/6gx5LS6ZStS6LG7BJ8dNVI0lkUmcDrudHr9Egw +W62nV3OZqdPlt9EuWSRY3GguLmLYauRwCy0gUCCkMpXRAJi70/33MvJJrsZ64Ee+bs7Lo3I6LWld +y8joRTnU+kLBEUx3XZL7av9YROXrgZ6voJmtvqkBZss4HTzfQx/0TW60uhdG/H39h4F5ag0zD/ov ++BS5gLNdTaqX4fnkGMX41TiMJjz98iji7lpJiCzfeT2OnpA8vUFKOt1b9pq0zj8lMH8yfaIDlNDc +eqFS3m6TjRgm/VWsvY+b0s+v54Ysyx8Jb6NvqYTUc79NoXQbTiNg8swOqn+knEwlqLJmOzj/2ZQw +9nKEvmhVEA/GcywWaZMH/rFF7buiVWqw2rVKAiUnhde3t4ZEFolsgCs+l6mc1X5VTMbeRRAc6uk7 +nwNT7u56AQIWeNTowr5GdogTPyK7SBIdUgC0An4hGh6cJfTzPV4e0hz5sy229zdcxsshTrD3mUcY +hcErulWuBurQB7Lcq9CClnXO0lD+mefPL5/ndtFhKvshuzHQqp9HpLIiyhY6UFfEW0NnxWViA0kB +60PZ2Pierc+xYw5F9KBaLJstxabArahH9CdMOA0uG0k7UvToiIMrVCjU8jVStDKDYmlkDJGcn5fq +dBb9HxEGmpv0 +-----END CERTIFICATE----- + +Entrust Root Certification Authority - G4 +========================================= +-----BEGIN CERTIFICATE----- +MIIGSzCCBDOgAwIBAgIRANm1Q3+vqTkPAAAAAFVlrVgwDQYJKoZIhvcNAQELBQAwgb4xCzAJBgNV +BAYTAlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3Qu +bmV0L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1 +dGhvcml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1 +dGhvcml0eSAtIEc0MB4XDTE1MDUyNzExMTExNloXDTM3MTIyNzExNDExNlowgb4xCzAJBgNVBAYT +AlVTMRYwFAYDVQQKEw1FbnRydXN0LCBJbmMuMSgwJgYDVQQLEx9TZWUgd3d3LmVudHJ1c3QubmV0 +L2xlZ2FsLXRlcm1zMTkwNwYDVQQLEzAoYykgMjAxNSBFbnRydXN0LCBJbmMuIC0gZm9yIGF1dGhv +cml6ZWQgdXNlIG9ubHkxMjAwBgNVBAMTKUVudHJ1c3QgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhv +cml0eSAtIEc0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsewsQu7i0TD/pZJH4i3D +umSXbcr3DbVZwbPLqGgZ2K+EbTBwXX7zLtJTmeH+H17ZSK9dE43b/2MzTdMAArzE+NEGCJR5WIoV +3imz/f3ET+iq4qA7ec2/a0My3dl0ELn39GjUu9CH1apLiipvKgS1sqbHoHrmSKvS0VnM1n4j5pds +8ELl3FFLFUHtSUrJ3hCX1nbB76W1NhSXNdh4IjVS70O92yfbYVaCNNzLiGAMC1rlLAHGVK/XqsEQ +e9IFWrhAnoanw5CGAlZSCXqc0ieCU0plUmr1POeo8pyvi73TDtTUXm6Hnmo9RR3RXRv06QqsYJn7 +ibT/mCzPfB3pAqoEmh643IhuJbNsZvc8kPNXwbMv9W3y+8qh+CmdRouzavbmZwe+LGcKKh9asj5X +xNMhIWNlUpEbsZmOeX7m640A2Vqq6nPopIICR5b+W45UYaPrL0swsIsjdXJ8ITzI9vF01Bx7owVV +7rtNOzK+mndmnqxpkCIHH2E6lr7lmk/MBTwoWdPBDFSoWWG9yHJM6Nyfh3+9nEg2XpWjDrk4JFX8 +dWbrAuMINClKxuMrLzOg2qOGpRKX/YAr2hRC45K9PvJdXmd0LhyIRyk0X+IyqJwlN4y6mACXi0mW +Hv0liqzc2thddG5msP9E36EYxr5ILzeUePiVSj9/E15dWf10hkNjc0kCAwEAAaNCMEAwDwYDVR0T +AQH/BAUwAwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFJ84xFYjwznooHFs6FRM5Og6sb9n +MA0GCSqGSIb3DQEBCwUAA4ICAQAS5UKme4sPDORGpbZgQIeMJX6tuGguW8ZAdjwD+MlZ9POrYs4Q +jbRaZIxowLByQzTSGwv2LFPSypBLhmb8qoMi9IsabyZIrHZ3CL/FmFz0Jomee8O5ZDIBf9PD3Vht +7LGrhFV0d4QEJ1JrhkzO3bll/9bGXp+aEJlLdWr+aumXIOTkdnrG0CSqkM0gkLpHZPt/B7NTeLUK +YvJzQ85BK4FqLoUWlFPUa19yIqtRLULVAJyZv967lDtX/Zr1hstWO1uIAeV8KEsD+UmDfLJ/fOPt +jqF/YFOOVZ1QNBIPt5d7bIdKROf1beyAN/BYGW5KaHbwH5Lk6rWS02FREAutp9lfx1/cH6NcjKF+ +m7ee01ZvZl4HliDtC3T7Zk6LERXpgUl+b7DUUH8i119lAg2m9IUe2K4GS0qn0jFmwvjO5QimpAKW +RGhXxNUzzxkvFMSUHHuk2fCfDrGA4tGeEWSpiBE6doLlYsKA2KSD7ZPvfC+QsDJMlhVoSFLUmQjA +JOgc47OlIQ6SwJAfzyBfyjs4x7dtOvPmRLgOMWuIjnDrnBdSqEGULoe256YSxXXfW8AKbnuk5F6G ++TaU33fD6Q3AOfF5u0aOq0NZJ7cguyPpVkAh7DE9ZapD8j3fcEThuk0mEDuYn/PIjhs4ViFqUZPT +kcpG2om3PVODLAgfi49T3f+sHw== +-----END CERTIFICATE----- + +Microsoft ECC Root Certificate Authority 2017 +============================================= +-----BEGIN CERTIFICATE----- +MIICWTCCAd+gAwIBAgIQZvI9r4fei7FK6gxXMQHC7DAKBggqhkjOPQQDAzBlMQswCQYDVQQGEwJV +UzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNyb3NvZnQgRUND +IFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwHhcNMTkxMjE4MjMwNjQ1WhcNNDIwNzE4 +MjMxNjA0WjBlMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYw +NAYDVQQDEy1NaWNyb3NvZnQgRUNDIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwdjAQ +BgcqhkjOPQIBBgUrgQQAIgNiAATUvD0CQnVBEyPNgASGAlEvaqiBYgtlzPbKnR5vSmZRogPZnZH6 +thaxjG7efM3beaYvzrvOcS/lpaso7GMEZpn4+vKTEAXhgShC48Zo9OYbhGBKia/teQ87zvH2RPUB +eMCjVDBSMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTIy5lycFIM ++Oa+sgRXKSrPQhDtNTAQBgkrBgEEAYI3FQEEAwIBADAKBggqhkjOPQQDAwNoADBlAjBY8k3qDPlf +Xu5gKcs68tvWMoQZP3zVL8KxzJOuULsJMsbG7X7JNpQS5GiFBqIb0C8CMQCZ6Ra0DvpWSNSkMBaR +eNtUjGUBiudQZsIxtzm6uBoiB078a1QWIP8rtedMDE2mT3M= +-----END CERTIFICATE----- + +Microsoft RSA Root Certificate Authority 2017 +============================================= +-----BEGIN CERTIFICATE----- +MIIFqDCCA5CgAwIBAgIQHtOXCV/YtLNHcB6qvn9FszANBgkqhkiG9w0BAQwFADBlMQswCQYDVQQG +EwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9uMTYwNAYDVQQDEy1NaWNyb3NvZnQg +UlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcwHhcNMTkxMjE4MjI1MTIyWhcNNDIw +NzE4MjMwMDIzWjBlMQswCQYDVQQGEwJVUzEeMBwGA1UEChMVTWljcm9zb2Z0IENvcnBvcmF0aW9u +MTYwNAYDVQQDEy1NaWNyb3NvZnQgUlNBIFJvb3QgQ2VydGlmaWNhdGUgQXV0aG9yaXR5IDIwMTcw +ggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKW76UM4wplZEWCpW9R2LBifOZNt9GkMml +7Xhqb0eRaPgnZ1AzHaGm++DlQ6OEAlcBXZxIQIJTELy/xztokLaCLeX0ZdDMbRnMlfl7rEqUrQ7e +S0MdhweSE5CAg2Q1OQT85elss7YfUJQ4ZVBcF0a5toW1HLUX6NZFndiyJrDKxHBKrmCk3bPZ7Pw7 +1VdyvD/IybLeS2v4I2wDwAW9lcfNcztmgGTjGqwu+UcF8ga2m3P1eDNbx6H7JyqhtJqRjJHTOoI+ +dkC0zVJhUXAoP8XFWvLJjEm7FFtNyP9nTUwSlq31/niol4fX/V4ggNyhSyL71Imtus5Hl0dVe49F +yGcohJUcaDDv70ngNXtk55iwlNpNhTs+VcQor1fznhPbRiefHqJeRIOkpcrVE7NLP8TjwuaGYaRS +MLl6IE9vDzhTyzMMEyuP1pq9KsgtsRx9S1HKR9FIJ3Jdh+vVReZIZZ2vUpC6W6IYZVcSn2i51BVr +lMRpIpj0M+Dt+VGOQVDJNE92kKz8OMHY4Xu54+OU4UZpyw4KUGsTuqwPN1q3ErWQgR5WrlcihtnJ +0tHXUeOrO8ZV/R4O03QK0dqq6mm4lyiPSMQH+FJDOvTKVTUssKZqwJz58oHhEmrARdlns87/I6KJ +ClTUFLkqqNfs+avNJVgyeY+QW5g5xAgGwax/Dj0ApQIDAQABo1QwUjAOBgNVHQ8BAf8EBAMCAYYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUCctZf4aycI8awznjwNnpv7tNsiMwEAYJKwYBBAGC +NxUBBAMCAQAwDQYJKoZIhvcNAQEMBQADggIBAKyvPl3CEZaJjqPnktaXFbgToqZCLgLNFgVZJ8og +6Lq46BrsTaiXVq5lQ7GPAJtSzVXNUzltYkyLDVt8LkS/gxCP81OCgMNPOsduET/m4xaRhPtthH80 +dK2Jp86519efhGSSvpWhrQlTM93uCupKUY5vVau6tZRGrox/2KJQJWVggEbbMwSubLWYdFQl3JPk ++ONVFT24bcMKpBLBaYVu32TxU5nhSnUgnZUP5NbcA/FZGOhHibJXWpS2qdgXKxdJ5XbLwVaZOjex +/2kskZGT4d9Mozd2TaGf+G0eHdP67Pv0RR0Tbc/3WeUiJ3IrhvNXuzDtJE3cfVa7o7P4NHmJweDy +AmH3pvwPuxwXC65B2Xy9J6P9LjrRk5Sxcx0ki69bIImtt2dmefU6xqaWM/5TkshGsRGRxpl/j8nW +ZjEgQRCHLQzWwa80mMpkg/sTV9HB8Dx6jKXB/ZUhoHHBk2dxEuqPiAppGWSZI1b7rCoucL5mxAyE +7+WL85MB+GqQk2dLsmijtWKP6T+MejteD+eMuMZ87zf9dOLITzNy4ZQ5bb0Sr74MTnB8G2+NszKT +c0QWbej09+CVgI+WXTik9KveCjCHk9hNAHFiRSdLOkKEW39lt2c0Ui2cFmuqqNh7o0JMcccMyj6D +5KbvtwEwXlGjefVwaaZBRA+GsCyRxj3qrg+E +-----END CERTIFICATE----- + +e-Szigno Root CA 2017 +===================== +-----BEGIN CERTIFICATE----- +MIICQDCCAeWgAwIBAgIMAVRI7yH9l1kN9QQKMAoGCCqGSM49BAMCMHExCzAJBgNVBAYTAkhVMREw +DwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UECgwNTWljcm9zZWMgTHRkLjEXMBUGA1UEYQwOVkFUSFUt +MjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3ppZ25vIFJvb3QgQ0EgMjAxNzAeFw0xNzA4MjIxMjA3MDZa +Fw00MjA4MjIxMjA3MDZaMHExCzAJBgNVBAYTAkhVMREwDwYDVQQHDAhCdWRhcGVzdDEWMBQGA1UE +CgwNTWljcm9zZWMgTHRkLjEXMBUGA1UEYQwOVkFUSFUtMjM1ODQ0OTcxHjAcBgNVBAMMFWUtU3pp +Z25vIFJvb3QgQ0EgMjAxNzBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABJbcPYrYsHtvxie+RJCx +s1YVe45DJH0ahFnuY2iyxl6H0BVIHqiQrb1TotreOpCmYF9oMrWGQd+HWyx7xf58etqjYzBhMA8G +A1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1UdDgQWBBSHERUI0arBeAyxr87GyZDv +vzAEwDAfBgNVHSMEGDAWgBSHERUI0arBeAyxr87GyZDvvzAEwDAKBggqhkjOPQQDAgNJADBGAiEA +tVfd14pVCzbhhkT61NlojbjcI4qKDdQvfepz7L9NbKgCIQDLpbQS+ue16M9+k/zzNY9vTlp8tLxO +svxyqltZ+efcMQ== +-----END CERTIFICATE----- + +certSIGN Root CA G2 +=================== +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIJEQA0tk7GNi02MA0GCSqGSIb3DQEBCwUAMEExCzAJBgNVBAYTAlJPMRQw +EgYDVQQKEwtDRVJUU0lHTiBTQTEcMBoGA1UECxMTY2VydFNJR04gUk9PVCBDQSBHMjAeFw0xNzAy +MDYwOTI3MzVaFw00MjAyMDYwOTI3MzVaMEExCzAJBgNVBAYTAlJPMRQwEgYDVQQKEwtDRVJUU0lH +TiBTQTEcMBoGA1UECxMTY2VydFNJR04gUk9PVCBDQSBHMjCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBAMDFdRmRfUR0dIf+DjuW3NgBFszuY5HnC2/OOwppGnzC46+CjobXXo9X69MhWf05 +N0IwvlDqtg+piNguLWkh59E3GE59kdUWX2tbAMI5Qw02hVK5U2UPHULlj88F0+7cDBrZuIt4Imfk +abBoxTzkbFpG583H+u/E7Eu9aqSs/cwoUe+StCmrqzWaTOTECMYmzPhpn+Sc8CnTXPnGFiWeI8Mg +wT0PPzhAsP6CRDiqWhqKa2NYOLQV07YRaXseVO6MGiKscpc/I1mbySKEwQdPzH/iV8oScLumZfNp +dWO9lfsbl83kqK/20U6o2YpxJM02PbyWxPFsqa7lzw1uKA2wDrXKUXt4FMMgL3/7FFXhEZn91Qqh +ngLjYl/rNUssuHLoPj1PrCy7Lobio3aP5ZMqz6WryFyNSwb/EkaseMsUBzXgqd+L6a8VTxaJW732 +jcZZroiFDsGJ6x9nxUWO/203Nit4ZoORUSs9/1F3dmKh7Gc+PoGD4FapUB8fepmrY7+EF3fxDTvf +95xhszWYijqy7DwaNz9+j5LP2RIUZNoQAhVB/0/E6xyjyfqZ90bp4RjZsbgyLcsUDFDYg2WD7rlc +z8sFWkz6GZdr1l0T08JcVLwyc6B49fFtHsufpaafItzRUZ6CeWRgKRM+o/1Pcmqr4tTluCRVLERL +iohEnMqE0yo7AgMBAAGjQjBAMA8GA1UdEwEB/wQFMAMBAf8wDgYDVR0PAQH/BAQDAgEGMB0GA1Ud +DgQWBBSCIS1mxteg4BXrzkwJd8RgnlRuAzANBgkqhkiG9w0BAQsFAAOCAgEAYN4auOfyYILVAzOB +ywaK8SJJ6ejqkX/GM15oGQOGO0MBzwdw5AgeZYWR5hEit/UCI46uuR59H35s5r0l1ZUa8gWmr4UC +b6741jH/JclKyMeKqdmfS0mbEVeZkkMR3rYzpMzXjWR91M08KCy0mpbqTfXERMQlqiCA2ClV9+BB +/AYm/7k29UMUA2Z44RGx2iBfRgB4ACGlHgAoYXhvqAEBj500mv/0OJD7uNGzcgbJceaBxXntC6Z5 +8hMLnPddDnskk7RI24Zf3lCGeOdA5jGokHZwYa+cNywRtYK3qq4kNFtyDGkNzVmf9nGvnAvRCjj5 +BiKDUyUM/FHE5r7iOZULJK2v0ZXkltd0ZGtxTgI8qoXzIKNDOXZbbFD+mpwUHmUUihW9o4JFWklW +atKcsWMy5WHgUyIOpwpJ6st+H6jiYoD2EEVSmAYY3qXNL3+q1Ok+CHLsIwMCPKaq2LxndD0UF/tU +Sxfj03k9bWtJySgOLnRQvwzZRjoQhsmnP+mg7H/rpXdYaXHmgwo38oZJar55CJD2AhZkPuXaTH4M +NMn5X7azKFGnpyuqSfqNZSlO42sTp5SjLVFteAxEy9/eCG/Oo2Sr05WE1LlSVHJ7liXMvGnjSG4N +0MedJ5qq+BOS3R7fY581qRY27Iy4g/Q9iY/NtBde17MXQRBdJ3NghVdJIgc= +-----END CERTIFICATE----- + +Trustwave Global Certification Authority +======================================== +-----BEGIN CERTIFICATE----- +MIIF2jCCA8KgAwIBAgIMBfcOhtpJ80Y1LrqyMA0GCSqGSIb3DQEBCwUAMIGIMQswCQYDVQQGEwJV +UzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2 +ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTAeFw0xNzA4MjMxOTM0MTJaFw00MjA4MjMxOTM0MTJaMIGIMQswCQYDVQQGEwJV +UzERMA8GA1UECAwISWxsaW5vaXMxEDAOBgNVBAcMB0NoaWNhZ28xITAfBgNVBAoMGFRydXN0d2F2 +ZSBIb2xkaW5ncywgSW5jLjExMC8GA1UEAwwoVHJ1c3R3YXZlIEdsb2JhbCBDZXJ0aWZpY2F0aW9u +IEF1dGhvcml0eTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALldUShLPDeS0YLOvR29 +zd24q88KPuFd5dyqCblXAj7mY2Hf8g+CY66j96xz0XznswuvCAAJWX/NKSqIk4cXGIDtiLK0thAf +LdZfVaITXdHG6wZWiYj+rDKd/VzDBcdu7oaJuogDnXIhhpCujwOl3J+IKMujkkkP7NAP4m1ET4Bq +stTnoApTAbqOl5F2brz81Ws25kCI1nsvXwXoLG0R8+eyvpJETNKXpP7ScoFDB5zpET71ixpZfR9o +WN0EACyW80OzfpgZdNmcc9kYvkHHNHnZ9GLCQ7mzJ7Aiy/k9UscwR7PJPrhq4ufogXBeQotPJqX+ +OsIgbrv4Fo7NDKm0G2x2EOFYeUY+VM6AqFcJNykbmROPDMjWLBz7BegIlT1lRtzuzWniTY+HKE40 +Cz7PFNm73bZQmq131BnW2hqIyE4bJ3XYsgjxroMwuREOzYfwhI0Vcnyh78zyiGG69Gm7DIwLdVcE +uE4qFC49DxweMqZiNu5m4iK4BUBjECLzMx10coos9TkpoNPnG4CELcU9402x/RpvumUHO1jsQkUm ++9jaJXLE9gCxInm943xZYkqcBW89zubWR2OZxiRvchLIrH+QtAuRcOi35hYQcRfO3gZPSEF9NUqj +ifLJS3tBEW1ntwiYTOURGa5CgNz7kAXU+FDKvuStx8KU1xad5hePrzb7AgMBAAGjQjBAMA8GA1Ud +EwEB/wQFMAMBAf8wHQYDVR0OBBYEFJngGWcNYtt2s9o9uFvo/ULSMQ6HMA4GA1UdDwEB/wQEAwIB +BjANBgkqhkiG9w0BAQsFAAOCAgEAmHNw4rDT7TnsTGDZqRKGFx6W0OhUKDtkLSGm+J1WE2pIPU/H +PinbbViDVD2HfSMF1OQc3Og4ZYbFdada2zUFvXfeuyk3QAUHw5RSn8pk3fEbK9xGChACMf1KaA0H +ZJDmHvUqoai7PF35owgLEQzxPy0QlG/+4jSHg9bP5Rs1bdID4bANqKCqRieCNqcVtgimQlRXtpla +4gt5kNdXElE1GYhBaCXUNxeEFfsBctyV3lImIJgm4nb1J2/6ADtKYdkNy1GTKv0WBpanI5ojSP5R +vbbEsLFUzt5sQa0WZ37b/TjNuThOssFgy50X31ieemKyJo90lZvkWx3SD92YHJtZuSPTMaCm/zjd +zyBP6VhWOmfD0faZmZ26NraAL4hHT4a/RDqA5Dccprrql5gR0IRiR2Qequ5AvzSxnI9O4fKSTx+O +856X3vOmeWqJcU9LJxdI/uz0UA9PSX3MReO9ekDFQdxhVicGaeVyQYHTtgGJoC86cnn+OjC/QezH +Yj6RS8fZMXZC+fc8Y+wmjHMMfRod6qh8h6jCJ3zhM0EPz8/8AKAigJ5Kp28AsEFFtyLKaEjFQqKu +3R3y4G5OBVixwJAWKqQ9EEC+j2Jjg6mcgn0tAumDMHzLJ8n9HmYAsC7TIS+OMxZsmO0QqAfWzJPP +29FpHOTKyeC2nOnOcXHebD8WpHk= +-----END CERTIFICATE----- + +Trustwave Global ECC P256 Certification Authority +================================================= +-----BEGIN CERTIFICATE----- +MIICYDCCAgegAwIBAgIMDWpfCD8oXD5Rld9dMAoGCCqGSM49BAMCMIGRMQswCQYDVQQGEwJVUzER +MA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDI1NiBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMxOTM1MTBaFw00MjA4MjMxOTM1MTBaMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRy +dXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDI1 +NiBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTBZMBMGByqGSM49AgEGCCqGSM49AwEHA0IABH77bOYj +43MyCMpg5lOcunSNGLB4kFKA3TjASh3RqMyTpJcGOMoNFWLGjgEqZZ2q3zSRLoHB5DOSMcT9CTqm +P62jQzBBMA8GA1UdEwEB/wQFMAMBAf8wDwYDVR0PAQH/BAUDAwcGADAdBgNVHQ4EFgQUo0EGrJBt +0UrrdaVKEJmzsaGLSvcwCgYIKoZIzj0EAwIDRwAwRAIgB+ZU2g6gWrKuEZ+Hxbb/ad4lvvigtwjz +RM4q3wghDDcCIC0mA6AFvWvR9lz4ZcyGbbOcNEhjhAnFjXca4syc4XR7 +-----END CERTIFICATE----- + +Trustwave Global ECC P384 Certification Authority +================================================= +-----BEGIN CERTIFICATE----- +MIICnTCCAiSgAwIBAgIMCL2Fl2yZJ6SAaEc7MAoGCCqGSM49BAMDMIGRMQswCQYDVQQGEwJVUzER +MA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRydXN0d2F2ZSBI +b2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDM4NCBDZXJ0aWZp +Y2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MjMxOTM2NDNaFw00MjA4MjMxOTM2NDNaMIGRMQswCQYD +VQQGEwJVUzERMA8GA1UECBMISWxsaW5vaXMxEDAOBgNVBAcTB0NoaWNhZ28xITAfBgNVBAoTGFRy +dXN0d2F2ZSBIb2xkaW5ncywgSW5jLjE6MDgGA1UEAxMxVHJ1c3R3YXZlIEdsb2JhbCBFQ0MgUDM4 +NCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTB2MBAGByqGSM49AgEGBSuBBAAiA2IABGvaDXU1CDFH +Ba5FmVXxERMuSvgQMSOjfoPTfygIOiYaOs+Xgh+AtycJj9GOMMQKmw6sWASr9zZ9lCOkmwqKi6vr +/TklZvFe/oyujUF5nQlgziip04pt89ZF1PKYhDhloKNDMEEwDwYDVR0TAQH/BAUwAwEB/zAPBgNV +HQ8BAf8EBQMDBwYAMB0GA1UdDgQWBBRVqYSJ0sEyvRjLbKYHTsjnnb6CkDAKBggqhkjOPQQDAwNn +ADBkAjA3AZKXRRJ+oPM+rRk6ct30UJMDEr5E0k9BpIycnR+j9sKS50gU/k6bpZFXrsY3crsCMGcl +CrEMXu6pY5Jv5ZAL/mYiykf9ijH3g/56vxC+GCsej/YpHpRZ744hN8tRmKVuSw== +-----END CERTIFICATE----- + +NAVER Global Root Certification Authority +========================================= +-----BEGIN CERTIFICATE----- +MIIFojCCA4qgAwIBAgIUAZQwHqIL3fXFMyqxQ0Rx+NZQTQ0wDQYJKoZIhvcNAQEMBQAwaTELMAkG +A1UEBhMCS1IxJjAkBgNVBAoMHU5BVkVSIEJVU0lORVNTIFBMQVRGT1JNIENvcnAuMTIwMAYDVQQD +DClOQVZFUiBHbG9iYWwgUm9vdCBDZXJ0aWZpY2F0aW9uIEF1dGhvcml0eTAeFw0xNzA4MTgwODU4 +NDJaFw0zNzA4MTgyMzU5NTlaMGkxCzAJBgNVBAYTAktSMSYwJAYDVQQKDB1OQVZFUiBCVVNJTkVT +UyBQTEFURk9STSBDb3JwLjEyMDAGA1UEAwwpTkFWRVIgR2xvYmFsIFJvb3QgQ2VydGlmaWNhdGlv +biBBdXRob3JpdHkwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC21PGTXLVAiQqrDZBb +UGOukJR0F0Vy1ntlWilLp1agS7gvQnXp2XskWjFlqxcX0TM62RHcQDaH38dq6SZeWYp34+hInDEW ++j6RscrJo+KfziFTowI2MMtSAuXaMl3Dxeb57hHHi8lEHoSTGEq0n+USZGnQJoViAbbJAh2+g1G7 +XNr4rRVqmfeSVPc0W+m/6imBEtRTkZazkVrd/pBzKPswRrXKCAfHcXLJZtM0l/aM9BhK4dA9WkW2 +aacp+yPOiNgSnABIqKYPszuSjXEOdMWLyEz59JuOuDxp7W87UC9Y7cSw0BwbagzivESq2M0UXZR4 +Yb8ObtoqvC8MC3GmsxY/nOb5zJ9TNeIDoKAYv7vxvvTWjIcNQvcGufFt7QSUqP620wbGQGHfnZ3z +VHbOUzoBppJB7ASjjw2i1QnK1sua8e9DXcCrpUHPXFNwcMmIpi3Ua2FzUCaGYQ5fG8Ir4ozVu53B +A0K6lNpfqbDKzE0K70dpAy8i+/Eozr9dUGWokG2zdLAIx6yo0es+nPxdGoMuK8u180SdOqcXYZai +cdNwlhVNt0xz7hlcxVs+Qf6sdWA7G2POAN3aCJBitOUt7kinaxeZVL6HSuOpXgRM6xBtVNbv8ejy +YhbLgGvtPe31HzClrkvJE+2KAQHJuFFYwGY6sWZLxNUxAmLpdIQM201GLQIDAQABo0IwQDAdBgNV +HQ4EFgQU0p+I36HNLL3s9TsBAZMzJ7LrYEswDgYDVR0PAQH/BAQDAgEGMA8GA1UdEwEB/wQFMAMB +Af8wDQYJKoZIhvcNAQEMBQADggIBADLKgLOdPVQG3dLSLvCkASELZ0jKbY7gyKoNqo0hV4/GPnrK +21HUUrPUloSlWGB/5QuOH/XcChWB5Tu2tyIvCZwTFrFsDDUIbatjcu3cvuzHV+YwIHHW1xDBE1UB +jCpD5EHxzzp6U5LOogMFDTjfArsQLtk70pt6wKGm+LUx5vR1yblTmXVHIloUFcd4G7ad6Qz4G3bx +hYTeodoS76TiEJd6eN4MUZeoIUCLhr0N8F5OSza7OyAfikJW4Qsav3vQIkMsRIz75Sq0bBwcupTg +E34h5prCy8VCZLQelHsIJchxzIdFV4XTnyliIoNRlwAYl3dqmJLJfGBs32x9SuRwTMKeuB330DTH +D8z7p/8Dvq1wkNoL3chtl1+afwkyQf3NosxabUzyqkn+Zvjp2DXrDige7kgvOtB5CTh8piKCk5XQ +A76+AqAF3SAi428diDRgxuYKuQl1C/AH6GmWNcf7I4GOODm4RStDeKLRLBT/DShycpWbXgnbiUSY +qqFJu3FS8r/2/yehNq+4tneI3TqkbZs0kNwUXTC/t+sX5Ie3cdCh13cV1ELX8vMxmV2b3RZtP+oG +I/hGoiLtk/bdmuYqh7GYVPEi92tF4+KOdh2ajcQGjTa3FPOdVGm3jjzVpG2Tgbet9r1ke8LJaDmg +kpzNNIaRkPpkUZ3+/uul9XXeifdy +-----END CERTIFICATE----- + +AC RAIZ FNMT-RCM SERVIDORES SEGUROS +=================================== +-----BEGIN CERTIFICATE----- +MIICbjCCAfOgAwIBAgIQYvYybOXE42hcG2LdnC6dlTAKBggqhkjOPQQDAzB4MQswCQYDVQQGEwJF +UzERMA8GA1UECgwIRk5NVC1SQ00xDjAMBgNVBAsMBUNlcmVzMRgwFgYDVQRhDA9WQVRFUy1RMjgy +NjAwNEoxLDAqBgNVBAMMI0FDIFJBSVogRk5NVC1SQ00gU0VSVklET1JFUyBTRUdVUk9TMB4XDTE4 +MTIyMDA5MzczM1oXDTQzMTIyMDA5MzczM1oweDELMAkGA1UEBhMCRVMxETAPBgNVBAoMCEZOTVQt +UkNNMQ4wDAYDVQQLDAVDZXJlczEYMBYGA1UEYQwPVkFURVMtUTI4MjYwMDRKMSwwKgYDVQQDDCNB +QyBSQUlaIEZOTVQtUkNNIFNFUlZJRE9SRVMgU0VHVVJPUzB2MBAGByqGSM49AgEGBSuBBAAiA2IA +BPa6V1PIyqvfNkpSIeSX0oNnnvBlUdBeh8dHsVnyV0ebAAKTRBdp20LHsbI6GA60XYyzZl2hNPk2 +LEnb80b8s0RpRBNm/dfF/a82Tc4DTQdxz69qBdKiQ1oKUm8BA06Oi6NCMEAwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFAG5L++/EYZg8k/QQW6rcx/n0m5JMAoGCCqG +SM49BAMDA2kAMGYCMQCuSuMrQMN0EfKVrRYj3k4MGuZdpSRea0R7/DjiT8ucRRcRTBQnJlU5dUoD +zBOQn5ICMQD6SmxgiHPz7riYYqnOK8LZiqZwMR2vsJRM60/G49HzYqc8/5MuB1xJAWdpEgJyv+c= +-----END CERTIFICATE----- + +GlobalSign Root R46 +=================== +-----BEGIN CERTIFICATE----- +MIIFWjCCA0KgAwIBAgISEdK7udcjGJ5AXwqdLdDfJWfRMA0GCSqGSIb3DQEBDAUAMEYxCzAJBgNV +BAYTAkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQDExNHbG9iYWxTaWduIFJv +b3QgUjQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAX +BgNVBAoTEEdsb2JhbFNpZ24gbnYtc2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBSNDYwggIi +MA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCsrHQy6LNl5brtQyYdpokNRbopiLKkHWPd08Es +CVeJOaFV6Wc0dwxu5FUdUiXSE2te4R2pt32JMl8Nnp8semNgQB+msLZ4j5lUlghYruQGvGIFAha/ +r6gjA7aUD7xubMLL1aa7DOn2wQL7Id5m3RerdELv8HQvJfTqa1VbkNud316HCkD7rRlr+/fKYIje +2sGP1q7Vf9Q8g+7XFkyDRTNrJ9CG0Bwta/OrffGFqfUo0q3v84RLHIf8E6M6cqJaESvWJ3En7YEt +bWaBkoe0G1h6zD8K+kZPTXhc+CtI4wSEy132tGqzZfxCnlEmIyDLPRT5ge1lFgBPGmSXZgjPjHvj +K8Cd+RTyG/FWaha/LIWFzXg4mutCagI0GIMXTpRW+LaCtfOW3T3zvn8gdz57GSNrLNRyc0NXfeD4 +12lPFzYE+cCQYDdF3uYM2HSNrpyibXRdQr4G9dlkbgIQrImwTDsHTUB+JMWKmIJ5jqSngiCNI/on +ccnfxkF0oE32kRbcRoxfKWMxWXEM2G/CtjJ9++ZdU6Z+Ffy7dXxd7Pj2Fxzsx2sZy/N78CsHpdls +eVR2bJ0cpm4O6XkMqCNqo98bMDGfsVR7/mrLZqrcZdCinkqaByFrgY/bxFn63iLABJzjqls2k+g9 +vXqhnQt2sQvHnf3PmKgGwvgqo6GDoLclcqUC4wIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUA1yrc4GHqMywptWU4jaWSf8FmSwwDQYJKoZIhvcNAQEM +BQADggIBAHx47PYCLLtbfpIrXTncvtgdokIzTfnvpCo7RGkerNlFo048p9gkUbJUHJNOxO97k4Vg +JuoJSOD1u8fpaNK7ajFxzHmuEajwmf3lH7wvqMxX63bEIaZHU1VNaL8FpO7XJqti2kM3S+LGteWy +gxk6x9PbTZ4IevPuzz5i+6zoYMzRx6Fcg0XERczzF2sUyQQCPtIkpnnpHs6i58FZFZ8d4kuaPp92 +CC1r2LpXFNqD6v6MVenQTqnMdzGxRBF6XLE+0xRFFRhiJBPSy03OXIPBNvIQtQ6IbbjhVp+J3pZm +OUdkLG5NrmJ7v2B0GbhWrJKsFjLtrWhV/pi60zTe9Mlhww6G9kuEYO4Ne7UyWHmRVSyBQ7N0H3qq +JZ4d16GLuc1CLgSkZoNNiTW2bKg2SnkheCLQQrzRQDGQob4Ez8pn7fXwgNNgyYMqIgXQBztSvwye +qiv5u+YfjyW6hY0XHgL+XVAEV8/+LbzvXMAaq7afJMbfc2hIkCwU9D9SGuTSyxTDYWnP4vkYxboz +nxSjBF25cfe1lNj2M8FawTSLfJvdkzrnE6JwYZ+vj+vYxXX4M2bUdGc6N3ec592kD3ZDZopD8p/7 +DEJ4Y9HiD2971KE9dJeFt0g5QdYg/NA6s/rob8SKunE3vouXsXgxT7PntgMTzlSdriVZzH81Xwj3 +QEUxeCp6 +-----END CERTIFICATE----- + +GlobalSign Root E46 +=================== +-----BEGIN CERTIFICATE----- +MIICCzCCAZGgAwIBAgISEdK7ujNu1LzmJGjFDYQdmOhDMAoGCCqGSM49BAMDMEYxCzAJBgNVBAYT +AkJFMRkwFwYDVQQKExBHbG9iYWxTaWduIG52LXNhMRwwGgYDVQQDExNHbG9iYWxTaWduIFJvb3Qg +RTQ2MB4XDTE5MDMyMDAwMDAwMFoXDTQ2MDMyMDAwMDAwMFowRjELMAkGA1UEBhMCQkUxGTAXBgNV +BAoTEEdsb2JhbFNpZ24gbnYtc2ExHDAaBgNVBAMTE0dsb2JhbFNpZ24gUm9vdCBFNDYwdjAQBgcq +hkjOPQIBBgUrgQQAIgNiAAScDrHPt+ieUnd1NPqlRqetMhkytAepJ8qUuwzSChDH2omwlwxwEwkB +jtjqR+q+soArzfwoDdusvKSGN+1wCAB16pMLey5SnCNoIwZD7JIvU4Tb+0cUB+hflGddyXqBPCCj +QjBAMA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBQxCpCPtsad0kRL +gLWi5h+xEk8blTAKBggqhkjOPQQDAwNoADBlAjEA31SQ7Zvvi5QCkxeCmb6zniz2C5GMn0oUsfZk +vLtoURMMA/cVi4RguYv/Uo7njLwcAjA8+RHUjE7AwWHCFUyqqx0LMV87HOIAl0Qx5v5zli/altP+ +CAezNIm8BZ/3Hobui3A= +-----END CERTIFICATE----- + +GLOBALTRUST 2020 +================ +-----BEGIN CERTIFICATE----- +MIIFgjCCA2qgAwIBAgILWku9WvtPilv6ZeUwDQYJKoZIhvcNAQELBQAwTTELMAkGA1UEBhMCQVQx +IzAhBgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVT +VCAyMDIwMB4XDTIwMDIxMDAwMDAwMFoXDTQwMDYxMDAwMDAwMFowTTELMAkGA1UEBhMCQVQxIzAh +BgNVBAoTGmUtY29tbWVyY2UgbW9uaXRvcmluZyBHbWJIMRkwFwYDVQQDExBHTE9CQUxUUlVTVCAy +MDIwMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAri5WrRsc7/aVj6B3GyvTY4+ETUWi +D59bRatZe1E0+eyLinjF3WuvvcTfk0Uev5E4C64OFudBc/jbu9G4UeDLgztzOG53ig9ZYybNpyrO +VPu44sB8R85gfD+yc/LAGbaKkoc1DZAoouQVBGM+uq/ufF7MpotQsjj3QWPKzv9pj2gOlTblzLmM +CcpL3TGQlsjMH/1WljTbjhzqLL6FLmPdqqmV0/0plRPwyJiT2S0WR5ARg6I6IqIoV6Lr/sCMKKCm +fecqQjuCgGOlYx8ZzHyyZqjC0203b+J+BlHZRYQfEs4kUmSFC0iAToexIiIwquuuvuAC4EDosEKA +A1GqtH6qRNdDYfOiaxaJSaSjpCuKAsR49GiKweR6NrFvG5Ybd0mN1MkGco/PU+PcF4UgStyYJ9OR +JitHHmkHr96i5OTUawuzXnzUJIBHKWk7buis/UDr2O1xcSvy6Fgd60GXIsUf1DnQJ4+H4xj04KlG +DfV0OoIu0G4skaMxXDtG6nsEEFZegB31pWXogvziB4xiRfUg3kZwhqG8k9MedKZssCz3AwyIDMvU +clOGvGBG85hqwvG/Q/lwIHfKN0F5VVJjjVsSn8VoxIidrPIwq7ejMZdnrY8XD2zHc+0klGvIg5rQ +mjdJBKuxFshsSUktq6HQjJLyQUp5ISXbY9e2nKd+Qmn7OmMCAwEAAaNjMGEwDwYDVR0TAQH/BAUw +AwEB/zAOBgNVHQ8BAf8EBAMCAQYwHQYDVR0OBBYEFNwuH9FhN3nkq9XVsxJxaD1qaJwiMB8GA1Ud +IwQYMBaAFNwuH9FhN3nkq9XVsxJxaD1qaJwiMA0GCSqGSIb3DQEBCwUAA4ICAQCR8EICaEDuw2jA +VC/f7GLDw56KoDEoqoOOpFaWEhCGVrqXctJUMHytGdUdaG/7FELYjQ7ztdGl4wJCXtzoRlgHNQIw +4Lx0SsFDKv/bGtCwr2zD/cuz9X9tAy5ZVp0tLTWMstZDFyySCstd6IwPS3BD0IL/qMy/pJTAvoe9 +iuOTe8aPmxadJ2W8esVCgmxcB9CpwYhgROmYhRZf+I/KARDOJcP5YBugxZfD0yyIMaK9MOzQ0MAS +8cE54+X1+NZK3TTN+2/BT+MAi1bikvcoskJ3ciNnxz8RFbLEAwW+uxF7Cr+obuf/WEPPm2eggAe2 +HcqtbepBEX4tdJP7wry+UUTF72glJ4DjyKDUEuzZpTcdN3y0kcra1LGWge9oXHYQSa9+pTeAsRxS +vTOBTI/53WXZFM2KJVj04sWDpQmQ1GwUY7VA3+vA/MRYfg0UFodUJ25W5HCEuGwyEn6CMUO+1918 +oa2u1qsgEu8KwxCMSZY13At1XrFP1U80DhEgB3VDRemjEdqso5nCtnkn4rnvyOL2NSl6dPrFf4IF +YqYK6miyeUcGbvJXqBUzxvd4Sj1Ce2t+/vdG6tHrju+IaFvowdlxfv1k7/9nR4hYJS8+hge9+6jl +gqispdNpQ80xiEmEU5LAsTkbOYMBMMTyqfrQA71yN2BWHzZ8vTmR9W0Nv3vXkg== +-----END CERTIFICATE----- + +ANF Secure Server Root CA +========================= +-----BEGIN CERTIFICATE----- +MIIF7zCCA9egAwIBAgIIDdPjvGz5a7EwDQYJKoZIhvcNAQELBQAwgYQxEjAQBgNVBAUTCUc2MzI4 +NzUxMDELMAkGA1UEBhMCRVMxJzAlBgNVBAoTHkFORiBBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lv +bjEUMBIGA1UECxMLQU5GIENBIFJhaXoxIjAgBgNVBAMTGUFORiBTZWN1cmUgU2VydmVyIFJvb3Qg +Q0EwHhcNMTkwOTA0MTAwMDM4WhcNMzkwODMwMTAwMDM4WjCBhDESMBAGA1UEBRMJRzYzMjg3NTEw +MQswCQYDVQQGEwJFUzEnMCUGA1UEChMeQU5GIEF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uMRQw +EgYDVQQLEwtBTkYgQ0EgUmFpejEiMCAGA1UEAxMZQU5GIFNlY3VyZSBTZXJ2ZXIgUm9vdCBDQTCC +AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBANvrayvmZFSVgpCjcqQZAZ2cC4Ffc0m6p6zz +BE57lgvsEeBbphzOG9INgxwruJ4dfkUyYA8H6XdYfp9qyGFOtibBTI3/TO80sh9l2Ll49a2pcbnv +T1gdpd50IJeh7WhM3pIXS7yr/2WanvtH2Vdy8wmhrnZEE26cLUQ5vPnHO6RYPUG9tMJJo8gN0pcv +B2VSAKduyK9o7PQUlrZXH1bDOZ8rbeTzPvY1ZNoMHKGESy9LS+IsJJ1tk0DrtSOOMspvRdOoiXse +zx76W0OLzc2oD2rKDF65nkeP8Nm2CgtYZRczuSPkdxl9y0oukntPLxB3sY0vaJxizOBQ+OyRp1RM +VwnVdmPF6GUe7m1qzwmd+nxPrWAI/VaZDxUse6mAq4xhj0oHdkLePfTdsiQzW7i1o0TJrH93PB0j +7IKppuLIBkwC/qxcmZkLLxCKpvR/1Yd0DVlJRfbwcVw5Kda/SiOL9V8BY9KHcyi1Swr1+KuCLH5z +JTIdC2MKF4EA/7Z2Xue0sUDKIbvVgFHlSFJnLNJhiQcND85Cd8BEc5xEUKDbEAotlRyBr+Qc5RQe +8TZBAQIvfXOn3kLMTOmJDVb3n5HUA8ZsyY/b2BzgQJhdZpmYgG4t/wHFzstGH6wCxkPmrqKEPMVO +Hj1tyRRM4y5Bu8o5vzY8KhmqQYdOpc5LMnndkEl/AgMBAAGjYzBhMB8GA1UdIwQYMBaAFJxf0Gxj +o1+TypOYCK2Mh6UsXME3MB0GA1UdDgQWBBScX9BsY6Nfk8qTmAitjIelLFzBNzAOBgNVHQ8BAf8E +BAMCAYYwDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEATh65isagmD9uw2nAalxJ +UqzLK114OMHVVISfk/CHGT0sZonrDUL8zPB1hT+L9IBdeeUXZ701guLyPI59WzbLWoAAKfLOKyzx +j6ptBZNscsdW699QIyjlRRA96Gejrw5VD5AJYu9LWaL2U/HANeQvwSS9eS9OICI7/RogsKQOLHDt +dD+4E5UGUcjohybKpFtqFiGS3XNgnhAY3jyB6ugYw3yJ8otQPr0R4hUDqDZ9MwFsSBXXiJCZBMXM +5gf0vPSQ7RPi6ovDj6MzD8EpTBNO2hVWcXNyglD2mjN8orGoGjR0ZVzO0eurU+AagNjqOknkJjCb +5RyKqKkVMoaZkgoQI1YS4PbOTOK7vtuNknMBZi9iPrJyJ0U27U1W45eZ/zo1PqVUSlJZS2Db7v54 +EX9K3BR5YLZrZAPbFYPhor72I5dQ8AkzNqdxliXzuUJ92zg/LFis6ELhDtjTO0wugumDLmsx2d1H +hk9tl5EuT+IocTUW0fJz/iUrB0ckYyfI+PbZa/wSMVYIwFNCr5zQM378BvAxRAMU8Vjq8moNqRGy +g77FGr8H6lnco4g175x2MjxNBiLOFeXdntiP2t7SxDnlF4HPOEfrf4htWRvfn0IUrn7PqLBmZdo3 +r5+qPeoott7VMVgWglvquxl1AnMaykgaIZOQCo6ThKd9OyMYkomgjaw= +-----END CERTIFICATE----- + +Certum EC-384 CA +================ +-----BEGIN CERTIFICATE----- +MIICZTCCAeugAwIBAgIQeI8nXIESUiClBNAt3bpz9DAKBggqhkjOPQQDAzB0MQswCQYDVQQGEwJQ +TDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2Vy +dGlmaWNhdGlvbiBBdXRob3JpdHkxGTAXBgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwHhcNMTgwMzI2 +MDcyNDU0WhcNNDMwMzI2MDcyNDU0WjB0MQswCQYDVQQGEwJQTDEhMB8GA1UEChMYQXNzZWNvIERh +dGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBBdXRob3JpdHkx +GTAXBgNVBAMTEENlcnR1bSBFQy0zODQgQ0EwdjAQBgcqhkjOPQIBBgUrgQQAIgNiAATEKI6rGFtq +vm5kN2PkzeyrOvfMobgOgknXhimfoZTy42B4mIF4Bk3y7JoOV2CDn7TmFy8as10CW4kjPMIRBSqn +iBMY81CE1700LCeJVf/OTOffph8oxPBUw7l8t1Ot68KjQjBAMA8GA1UdEwEB/wQFMAMBAf8wHQYD +VR0OBBYEFI0GZnQkdjrzife81r1HfS+8EF9LMA4GA1UdDwEB/wQEAwIBBjAKBggqhkjOPQQDAwNo +ADBlAjADVS2m5hjEfO/JUG7BJw+ch69u1RsIGL2SKcHvlJF40jocVYli5RsJHrpka/F2tNQCMQC0 +QoSZ/6vnnvuRlydd3LBbMHHOXjgaatkl5+r3YZJW+OraNsKHZZYuciUvf9/DE8k= +-----END CERTIFICATE----- + +Certum Trusted Root CA +====================== +-----BEGIN CERTIFICATE----- +MIIFwDCCA6igAwIBAgIQHr9ZULjJgDdMBvfrVU+17TANBgkqhkiG9w0BAQ0FADB6MQswCQYDVQQG +EwJQTDEhMB8GA1UEChMYQXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0g +Q2VydGlmaWNhdGlvbiBBdXRob3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0Ew +HhcNMTgwMzE2MTIxMDEzWhcNNDMwMzE2MTIxMDEzWjB6MQswCQYDVQQGEwJQTDEhMB8GA1UEChMY +QXNzZWNvIERhdGEgU3lzdGVtcyBTLkEuMScwJQYDVQQLEx5DZXJ0dW0gQ2VydGlmaWNhdGlvbiBB +dXRob3JpdHkxHzAdBgNVBAMTFkNlcnR1bSBUcnVzdGVkIFJvb3QgQ0EwggIiMA0GCSqGSIb3DQEB +AQUAA4ICDwAwggIKAoICAQDRLY67tzbqbTeRn06TpwXkKQMlzhyC93yZn0EGze2jusDbCSzBfN8p +fktlL5On1AFrAygYo9idBcEq2EXxkd7fO9CAAozPOA/qp1x4EaTByIVcJdPTsuclzxFUl6s1wB52 +HO8AU5853BSlLCIls3Jy/I2z5T4IHhQqNwuIPMqw9MjCoa68wb4pZ1Xi/K1ZXP69VyywkI3C7Te2 +fJmItdUDmj0VDT06qKhF8JVOJVkdzZhpu9PMMsmN74H+rX2Ju7pgE8pllWeg8xn2A1bUatMn4qGt +g/BKEiJ3HAVz4hlxQsDsdUaakFjgao4rpUYwBI4Zshfjvqm6f1bxJAPXsiEodg42MEx51UGamqi4 +NboMOvJEGyCI98Ul1z3G4z5D3Yf+xOr1Uz5MZf87Sst4WmsXXw3Hw09Omiqi7VdNIuJGmj8PkTQk +fVXjjJU30xrwCSss0smNtA0Aq2cpKNgB9RkEth2+dv5yXMSFytKAQd8FqKPVhJBPC/PgP5sZ0jeJ +P/J7UhyM9uH3PAeXjA6iWYEMspA90+NZRu0PqafegGtaqge2Gcu8V/OXIXoMsSt0Puvap2ctTMSY +njYJdmZm/Bo/6khUHL4wvYBQv3y1zgD2DGHZ5yQD4OMBgQ692IU0iL2yNqh7XAjlRICMb/gv1SHK +HRzQ+8S1h9E6Tsd2tTVItQIDAQABo0IwQDAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSM+xx1 +vALTn04uSNn5YFSqxLNP+jAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQENBQADggIBAEii1QAL +LtA/vBzVtVRJHlpr9OTy4EA34MwUe7nJ+jW1dReTagVphZzNTxl4WxmB82M+w85bj/UvXgF2Ez8s +ALnNllI5SW0ETsXpD4YN4fqzX4IS8TrOZgYkNCvozMrnadyHncI013nR03e4qllY/p0m+jiGPp2K +h2RX5Rc64vmNueMzeMGQ2Ljdt4NR5MTMI9UGfOZR0800McD2RrsLrfw9EAUqO0qRJe6M1ISHgCq8 +CYyqOhNf6DR5UMEQGfnTKB7U0VEwKbOukGfWHwpjscWpxkIxYxeU72nLL/qMFH3EQxiJ2fAyQOaA +4kZf5ePBAFmo+eggvIksDkc0C+pXwlM2/KfUrzHN/gLldfq5Jwn58/U7yn2fqSLLiMmq0Uc9Nneo +WWRrJ8/vJ8HjJLWG965+Mk2weWjROeiQWMODvA8s1pfrzgzhIMfatz7DP78v3DSk+yshzWePS/Tj +6tQ/50+6uaWTRRxmHyH6ZF5v4HaUMst19W7l9o/HuKTMqJZ9ZPskWkoDbGs4xugDQ5r3V7mzKWmT +OPQD8rv7gmsHINFSH5pkAnuYZttcTVoP0ISVoDwUQwbKytu4QTbaakRnh6+v40URFWkIsr4WOZck +bxJF0WddCajJFdr60qZfE2Efv4WstK2tBZQIgx51F9NxO5NQI1mg7TyRVJ12AMXDuDjb +-----END CERTIFICATE----- + +TunTrust Root CA +================ +-----BEGIN CERTIFICATE----- +MIIFszCCA5ugAwIBAgIUEwLV4kBMkkaGFmddtLu7sms+/BMwDQYJKoZIhvcNAQELBQAwYTELMAkG +A1UEBhMCVE4xNzA1BgNVBAoMLkFnZW5jZSBOYXRpb25hbGUgZGUgQ2VydGlmaWNhdGlvbiBFbGVj +dHJvbmlxdWUxGTAXBgNVBAMMEFR1blRydXN0IFJvb3QgQ0EwHhcNMTkwNDI2MDg1NzU2WhcNNDQw +NDI2MDg1NzU2WjBhMQswCQYDVQQGEwJUTjE3MDUGA1UECgwuQWdlbmNlIE5hdGlvbmFsZSBkZSBD +ZXJ0aWZpY2F0aW9uIEVsZWN0cm9uaXF1ZTEZMBcGA1UEAwwQVHVuVHJ1c3QgUm9vdCBDQTCCAiIw +DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMPN0/y9BFPdDCA61YguBUtB9YOCfvdZn56eY+hz +2vYGqU8ftPkLHzmMmiDQfgbU7DTZhrx1W4eI8NLZ1KMKsmwb60ksPqxd2JQDoOw05TDENX37Jk0b +bjBU2PWARZw5rZzJJQRNmpA+TkBuimvNKWfGzC3gdOgFVwpIUPp6Q9p+7FuaDmJ2/uqdHYVy7BG7 +NegfJ7/Boce7SBbdVtfMTqDhuazb1YMZGoXRlJfXyqNlC/M4+QKu3fZnz8k/9YosRxqZbwUN/dAd +gjH8KcwAWJeRTIAAHDOFli/LQcKLEITDCSSJH7UP2dl3RxiSlGBcx5kDPP73lad9UKGAwqmDrViW +VSHbhlnUr8a83YFuB9tgYv7sEG7aaAH0gxupPqJbI9dkxt/con3YS7qC0lH4Zr8GRuR5KiY2eY8f +Tpkdso8MDhz/yV3A/ZAQprE38806JG60hZC/gLkMjNWb1sjxVj8agIl6qeIbMlEsPvLfe/ZdeikZ +juXIvTZxi11Mwh0/rViizz1wTaZQmCXcI/m4WEEIcb9PuISgjwBUFfyRbVinljvrS5YnzWuioYas +DXxU5mZMZl+QviGaAkYt5IPCgLnPSz7ofzwB7I9ezX/SKEIBlYrilz0QIX32nRzFNKHsLA4KUiwS +VXAkPcvCFDVDXSdOvsC9qnyW5/yeYa1E0wCXAgMBAAGjYzBhMB0GA1UdDgQWBBQGmpsfU33x9aTI +04Y+oXNZtPdEITAPBgNVHRMBAf8EBTADAQH/MB8GA1UdIwQYMBaAFAaamx9TffH1pMjThj6hc1m0 +90QhMA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOCAgEAqgVutt0Vyb+zxiD2BkewhpMl +0425yAA/l/VSJ4hxyXT968pk21vvHl26v9Hr7lxpuhbI87mP0zYuQEkHDVneixCwSQXi/5E/S7fd +Ao74gShczNxtr18UnH1YeA32gAm56Q6XKRm4t+v4FstVEuTGfbvE7Pi1HE4+Z7/FXxttbUcoqgRY +YdZ2vyJ/0Adqp2RT8JeNnYA/u8EH22Wv5psymsNUk8QcCMNE+3tjEUPRahphanltkE8pjkcFwRJp +adbGNjHh/PqAulxPxOu3Mqz4dWEX1xAZufHSCe96Qp1bWgvUxpVOKs7/B9dPfhgGiPEZtdmYu65x +xBzndFlY7wyJz4sfdZMaBBSSSFCp61cpABbjNhzI+L/wM9VBD8TMPN3pM0MBkRArHtG5Xc0yGYuP +jCB31yLEQtyEFpslbei0VXF/sHyz03FJuc9SpAQ/3D2gu68zngowYI7bnV2UqL1g52KAdoGDDIzM +MEZJ4gzSqK/rYXHv5yJiqfdcZGyfFoxnNidF9Ql7v/YQCvGwjVRDjAS6oz/v4jXH+XTgbzRB0L9z +ZVcg+ZtnemZoJE6AZb0QmQZZ8mWvuMZHu/2QeItBcy6vVR/cO5JyboTT0GFMDcx2V+IthSIVNg3r +AZ3r2OvEhJn7wAzMMujjd9qDRIueVSjAi1jTkD5OGwDxFa2DK5o= +-----END CERTIFICATE----- + +HARICA TLS RSA Root CA 2021 +=========================== +-----BEGIN CERTIFICATE----- +MIIFpDCCA4ygAwIBAgIQOcqTHO9D88aOk8f0ZIk4fjANBgkqhkiG9w0BAQsFADBsMQswCQYDVQQG +EwJHUjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9u +cyBDQTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBSU0EgUm9vdCBDQSAyMDIxMB4XDTIxMDIxOTEwNTUz +OFoXDTQ1MDIxMzEwNTUzN1owbDELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRl +bWljIGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgUlNB +IFJvb3QgQ0EgMjAyMTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAIvC569lmwVnlskN +JLnQDmT8zuIkGCyEf3dRywQRNrhe7Wlxp57kJQmXZ8FHws+RFjZiPTgE4VGC/6zStGndLuwRo0Xu +a2s7TL+MjaQenRG56Tj5eg4MmOIjHdFOY9TnuEFE+2uva9of08WRiFukiZLRgeaMOVig1mlDqa2Y +Ulhu2wr7a89o+uOkXjpFc5gH6l8Cct4MpbOfrqkdtx2z/IpZ525yZa31MJQjB/OCFks1mJxTuy/K +5FrZx40d/JiZ+yykgmvwKh+OC19xXFyuQnspiYHLA6OZyoieC0AJQTPb5lh6/a6ZcMBaD9YThnEv +dmn8kN3bLW7R8pv1GmuebxWMevBLKKAiOIAkbDakO/IwkfN4E8/BPzWr8R0RI7VDIp4BkrcYAuUR +0YLbFQDMYTfBKnya4dC6s1BG7oKsnTH4+yPiAwBIcKMJJnkVU2DzOFytOOqBAGMUuTNe3QvboEUH +GjMJ+E20pwKmafTCWQWIZYVWrkvL4N48fS0ayOn7H6NhStYqE613TBoYm5EPWNgGVMWX+Ko/IIqm +haZ39qb8HOLubpQzKoNQhArlT4b4UEV4AIHrW2jjJo3Me1xR9BQsQL4aYB16cmEdH2MtiKrOokWQ +CPxrvrNQKlr9qEgYRtaQQJKQCoReaDH46+0N0x3GfZkYVVYnZS6NRcUk7M7jAgMBAAGjQjBAMA8G +A1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFApII6ZgpJIKM+qTW8VX6iVNvRLuMA4GA1UdDwEB/wQE +AwIBhjANBgkqhkiG9w0BAQsFAAOCAgEAPpBIqm5iFSVmewzVjIuJndftTgfvnNAUX15QvWiWkKQU +EapobQk1OUAJ2vQJLDSle1mESSmXdMgHHkdt8s4cUCbjnj1AUz/3f5Z2EMVGpdAgS1D0NTsY9FVq +QRtHBmg8uwkIYtlfVUKqrFOFrJVWNlar5AWMxajaH6NpvVMPxP/cyuN+8kyIhkdGGvMA9YCRotxD +QpSbIPDRzbLrLFPCU3hKTwSUQZqPJzLB5UkZv/HywouoCjkxKLR9YjYsTewfM7Z+d21+UPCfDtcR +j88YxeMn/ibvBZ3PzzfF0HvaO7AWhAw6k9a+F9sPPg4ZeAnHqQJyIkv3N3a6dcSFA1pj1bF1BcK5 +vZStjBWZp5N99sXzqnTPBIWUmAD04vnKJGW/4GKvyMX6ssmeVkjaef2WdhW+o45WxLM0/L5H9MG0 +qPzVMIho7suuyWPEdr6sOBjhXlzPrjoiUevRi7PzKzMHVIf6tLITe7pTBGIBnfHAT+7hOtSLIBD6 +Alfm78ELt5BGnBkpjNxvoEppaZS3JGWg/6w/zgH7IS79aPib8qXPMThcFarmlwDB31qlpzmq6YR/ +PFGoOtmUW4y/Twhx5duoXNTSpv4Ao8YWxw/ogM4cKGR0GQjTQuPOAF1/sdwTsOEFy9EgqoZ0njnn +kf3/W9b3raYvAwtt41dU63ZTGI0RmLo= +-----END CERTIFICATE----- + +HARICA TLS ECC Root CA 2021 +=========================== +-----BEGIN CERTIFICATE----- +MIICVDCCAdugAwIBAgIQZ3SdjXfYO2rbIvT/WeK/zjAKBggqhkjOPQQDAzBsMQswCQYDVQQGEwJH +UjE3MDUGA1UECgwuSGVsbGVuaWMgQWNhZGVtaWMgYW5kIFJlc2VhcmNoIEluc3RpdHV0aW9ucyBD +QTEkMCIGA1UEAwwbSEFSSUNBIFRMUyBFQ0MgUm9vdCBDQSAyMDIxMB4XDTIxMDIxOTExMDExMFoX +DTQ1MDIxMzExMDEwOVowbDELMAkGA1UEBhMCR1IxNzA1BgNVBAoMLkhlbGxlbmljIEFjYWRlbWlj +IGFuZCBSZXNlYXJjaCBJbnN0aXR1dGlvbnMgQ0ExJDAiBgNVBAMMG0hBUklDQSBUTFMgRUNDIFJv +b3QgQ0EgMjAyMTB2MBAGByqGSM49AgEGBSuBBAAiA2IABDgI/rGgltJ6rK9JOtDA4MM7KKrxcm1l +AEeIhPyaJmuqS7psBAqIXhfyVYf8MLA04jRYVxqEU+kw2anylnTDUR9YSTHMmE5gEYd103KUkE+b +ECUqqHgtvpBBWJAVcqeht6NCMEAwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUyRtTgRL+BNUW +0aq8mm+3oJUZbsowDgYDVR0PAQH/BAQDAgGGMAoGCCqGSM49BAMDA2cAMGQCMBHervjcToiwqfAi +rcJRQO9gcS3ujwLEXQNwSaSS6sUUiHCm0w2wqsosQJz76YJumgIwK0eaB8bRwoF8yguWGEEbo/Qw +CZ61IygNnxS2PFOiTAZpffpskcYqSUXm7LcT4Tps +-----END CERTIFICATE----- + +Autoridad de Certificacion Firmaprofesional CIF A62634068 +========================================================= +-----BEGIN CERTIFICATE----- +MIIGFDCCA/ygAwIBAgIIG3Dp0v+ubHEwDQYJKoZIhvcNAQELBQAwUTELMAkGA1UEBhMCRVMxQjBA +BgNVBAMMOUF1dG9yaWRhZCBkZSBDZXJ0aWZpY2FjaW9uIEZpcm1hcHJvZmVzaW9uYWwgQ0lGIEE2 +MjYzNDA2ODAeFw0xNDA5MjMxNTIyMDdaFw0zNjA1MDUxNTIyMDdaMFExCzAJBgNVBAYTAkVTMUIw +QAYDVQQDDDlBdXRvcmlkYWQgZGUgQ2VydGlmaWNhY2lvbiBGaXJtYXByb2Zlc2lvbmFsIENJRiBB +NjI2MzQwNjgwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDKlmuO6vj78aI14H9M2uDD +Utd9thDIAl6zQyrET2qyyhxdKJp4ERppWVevtSBC5IsP5t9bpgOSL/UR5GLXMnE42QQMcas9UX4P +B99jBVzpv5RvwSmCwLTaUbDBPLutN0pcyvFLNg4kq7/DhHf9qFD0sefGL9ItWY16Ck6WaVICqjaY +7Pz6FIMMNx/Jkjd/14Et5cS54D40/mf0PmbR0/RAz15iNA9wBj4gGFrO93IbJWyTdBSTo3OxDqqH +ECNZXyAFGUftaI6SEspd/NYrspI8IM/hX68gvqB2f3bl7BqGYTM+53u0P6APjqK5am+5hyZvQWyI +plD9amML9ZMWGxmPsu2bm8mQ9QEM3xk9Dz44I8kvjwzRAv4bVdZO0I08r0+k8/6vKtMFnXkIoctX +MbScyJCyZ/QYFpM6/EfY0XiWMR+6KwxfXZmtY4laJCB22N/9q06mIqqdXuYnin1oKaPnirjaEbsX +LZmdEyRG98Xi2J+Of8ePdG1asuhy9azuJBCtLxTa/y2aRnFHvkLfuwHb9H/TKI8xWVvTyQKmtFLK +bpf7Q8UIJm+K9Lv9nyiqDdVF8xM6HdjAeI9BZzwelGSuewvF6NkBiDkal4ZkQdU7hwxu+g/GvUgU +vzlN1J5Bto+WHWOWk9mVBngxaJ43BjuAiUVhOSPHG0SjFeUc+JIwuwIDAQABo4HvMIHsMB0GA1Ud +DgQWBBRlzeurNR4APn7VdMActHNHDhpkLzASBgNVHRMBAf8ECDAGAQH/AgEBMIGmBgNVHSAEgZ4w +gZswgZgGBFUdIAAwgY8wLwYIKwYBBQUHAgEWI2h0dHA6Ly93d3cuZmlybWFwcm9mZXNpb25hbC5j +b20vY3BzMFwGCCsGAQUFBwICMFAeTgBQAGEAcwBlAG8AIABkAGUAIABsAGEAIABCAG8AbgBhAG4A +bwB2AGEAIAA0ADcAIABCAGEAcgBjAGUAbABvAG4AYQAgADAAOAAwADEANzAOBgNVHQ8BAf8EBAMC +AQYwDQYJKoZIhvcNAQELBQADggIBAHSHKAIrdx9miWTtj3QuRhy7qPj4Cx2Dtjqn6EWKB7fgPiDL +4QjbEwj4KKE1soCzC1HA01aajTNFSa9J8OA9B3pFE1r/yJfY0xgsfZb43aJlQ3CTkBW6kN/oGbDb +LIpgD7dvlAceHabJhfa9NPhAeGIQcDq+fUs5gakQ1JZBu/hfHAsdCPKxsIl68veg4MSPi3i1O1il +I45PVf42O+AMt8oqMEEgtIDNrvx2ZnOorm7hfNoD6JQg5iKj0B+QXSBTFCZX2lSX3xZEEAEeiGaP +cjiT3SC3NL7X8e5jjkd5KAb881lFJWAiMxujX6i6KtoaPc1A6ozuBRWV1aUsIC+nmCjuRfzxuIgA +LI9C2lHVnOUTaHFFQ4ueCyE8S1wF3BqfmI7avSKecs2tCsvMo2ebKHTEm9caPARYpoKdrcd7b/+A +lun4jWq9GJAd/0kakFI3ky88Al2CdgtR5xbHV/g4+afNmyJU72OwFW1TZQNKXkqgsqeOSQBZONXH +9IBk9W6VULgRfhVwOEqwf9DEMnDAGf/JOC0ULGb0QkTmVXYbgBVX/8Cnp6o5qtjTcNAuuuuUavpf +NIbnYrX9ivAwhZTJryQCL2/W3Wf+47BVTwSYT6RBVuKT0Gro1vP7ZeDOdcQxWQzugsgMYDNKGbqE +ZycPvEJdvSRUDewdcAZfpLz6IHxV +-----END CERTIFICATE----- + +vTrus ECC Root CA +================= +-----BEGIN CERTIFICATE----- +MIICDzCCAZWgAwIBAgIUbmq8WapTvpg5Z6LSa6Q75m0c1towCgYIKoZIzj0EAwMwRzELMAkGA1UE +BhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBS +b290IENBMB4XDTE4MDczMTA3MjY0NFoXDTQzMDczMTA3MjY0NFowRzELMAkGA1UEBhMCQ04xHDAa +BgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xGjAYBgNVBAMTEXZUcnVzIEVDQyBSb290IENBMHYw +EAYHKoZIzj0CAQYFK4EEACIDYgAEZVBKrox5lkqqHAjDo6LN/llWQXf9JpRCux3NCNtzslt188+c +ToL0v/hhJoVs1oVbcnDS/dtitN9Ti72xRFhiQgnH+n9bEOf+QP3A2MMrMudwpremIFUde4BdS49n +TPEQo0IwQDAdBgNVHQ4EFgQUmDnNvtiyjPeyq+GtJK97fKHbH88wDwYDVR0TAQH/BAUwAwEB/zAO +BgNVHQ8BAf8EBAMCAQYwCgYIKoZIzj0EAwMDaAAwZQIwV53dVvHH4+m4SVBrm2nDb+zDfSXkV5UT +QJtS0zvzQBm8JsctBp61ezaf9SXUY2sAAjEA6dPGnlaaKsyh2j/IZivTWJwghfqrkYpwcBE4YGQL +YgmRWAD5Tfs0aNoJrSEGGJTO +-----END CERTIFICATE----- + +vTrus Root CA +============= +-----BEGIN CERTIFICATE----- +MIIFVjCCAz6gAwIBAgIUQ+NxE9izWRRdt86M/TX9b7wFjUUwDQYJKoZIhvcNAQELBQAwQzELMAkG +A1UEBhMCQ04xHDAaBgNVBAoTE2lUcnVzQ2hpbmEgQ28uLEx0ZC4xFjAUBgNVBAMTDXZUcnVzIFJv +b3QgQ0EwHhcNMTgwNzMxMDcyNDA1WhcNNDMwNzMxMDcyNDA1WjBDMQswCQYDVQQGEwJDTjEcMBoG +A1UEChMTaVRydXNDaGluYSBDby4sTHRkLjEWMBQGA1UEAxMNdlRydXMgUm9vdCBDQTCCAiIwDQYJ +KoZIhvcNAQEBBQADggIPADCCAgoCggIBAL1VfGHTuB0EYgWgrmy3cLRB6ksDXhA/kFocizuwZots +SKYcIrrVQJLuM7IjWcmOvFjai57QGfIvWcaMY1q6n6MLsLOaXLoRuBLpDLvPbmyAhykUAyyNJJrI +ZIO1aqwTLDPxn9wsYTwaP3BVm60AUn/PBLn+NvqcwBauYv6WTEN+VRS+GrPSbcKvdmaVayqwlHeF +XgQPYh1jdfdr58tbmnDsPmcF8P4HCIDPKNsFxhQnL4Z98Cfe/+Z+M0jnCx5Y0ScrUw5XSmXX+6KA +YPxMvDVTAWqXcoKv8R1w6Jz1717CbMdHflqUhSZNO7rrTOiwCcJlwp2dCZtOtZcFrPUGoPc2BX70 +kLJrxLT5ZOrpGgrIDajtJ8nU57O5q4IikCc9Kuh8kO+8T/3iCiSn3mUkpF3qwHYw03dQ+A0Em5Q2 +AXPKBlim0zvc+gRGE1WKyURHuFE5Gi7oNOJ5y1lKCn+8pu8fA2dqWSslYpPZUxlmPCdiKYZNpGvu +/9ROutW04o5IWgAZCfEF2c6Rsffr6TlP9m8EQ5pV9T4FFL2/s1m02I4zhKOQUqqzApVg+QxMaPnu +1RcN+HFXtSXkKe5lXa/R7jwXC1pDxaWG6iSe4gUH3DRCEpHWOXSuTEGC2/KmSNGzm/MzqvOmwMVO +9fSddmPmAsYiS8GVP1BkLFTltvA8Kc9XAgMBAAGjQjBAMB0GA1UdDgQWBBRUYnBj8XWEQ1iO0RYg +scasGrz2iTAPBgNVHRMBAf8EBTADAQH/MA4GA1UdDwEB/wQEAwIBBjANBgkqhkiG9w0BAQsFAAOC +AgEAKbqSSaet8PFww+SX8J+pJdVrnjT+5hpk9jprUrIQeBqfTNqK2uwcN1LgQkv7bHbKJAs5EhWd +nxEt/Hlk3ODg9d3gV8mlsnZwUKT+twpw1aA08XXXTUm6EdGz2OyC/+sOxL9kLX1jbhd47F18iMjr +jld22VkE+rxSH0Ws8HqA7Oxvdq6R2xCOBNyS36D25q5J08FsEhvMKar5CKXiNxTKsbhm7xqC5PD4 +8acWabfbqWE8n/Uxy+QARsIvdLGx14HuqCaVvIivTDUHKgLKeBRtRytAVunLKmChZwOgzoy8sHJn +xDHO2zTlJQNgJXtxmOTAGytfdELSS8VZCAeHvsXDf+eW2eHcKJfWjwXj9ZtOyh1QRwVTsMo554Wg +icEFOwE30z9J4nfrI8iIZjs9OXYhRvHsXyO466JmdXTBQPfYaJqT4i2pLr0cox7IdMakLXogqzu4 +sEb9b91fUlV1YvCXoHzXOP0l382gmxDPi7g4Xl7FtKYCNqEeXxzP4padKar9mK5S4fNBUvupLnKW +nyfjqnN9+BojZns7q2WwMgFLFT49ok8MKzWixtlnEjUwzXYuFrOZnk1PTi07NEPhmg4NpGaXutIc +SkwsKouLgU9xGqndXHt7CMUADTdA43x7VF8vhV929vensBxXVsFy6K2ir40zSbofitzmdHxghm+H +l3s= +-----END CERTIFICATE----- + +ISRG Root X2 +============ +-----BEGIN CERTIFICATE----- +MIICGzCCAaGgAwIBAgIQQdKd0XLq7qeAwSxs6S+HUjAKBggqhkjOPQQDAzBPMQswCQYDVQQGEwJV +UzEpMCcGA1UEChMgSW50ZXJuZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElT +UkcgUm9vdCBYMjAeFw0yMDA5MDQwMDAwMDBaFw00MDA5MTcxNjAwMDBaME8xCzAJBgNVBAYTAlVT +MSkwJwYDVQQKEyBJbnRlcm5ldCBTZWN1cml0eSBSZXNlYXJjaCBHcm91cDEVMBMGA1UEAxMMSVNS +RyBSb290IFgyMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAEzZvVn4CDCuwJSvMWSj5cz3es3mcFDR0H +ttwW+1qLFNvicWDEukWVEYmO6gbf9yoWHKS5xcUy4APgHoIYOIvXRdgKam7mAHf7AlF9ItgKbppb +d9/w+kHsOdx1ymgHDB/qo0IwQDAOBgNVHQ8BAf8EBAMCAQYwDwYDVR0TAQH/BAUwAwEB/zAdBgNV +HQ4EFgQUfEKWrt5LSDv6kviejM9ti6lyN5UwCgYIKoZIzj0EAwMDaAAwZQIwe3lORlCEwkSHRhtF +cP9Ymd70/aTSVaYgLXTWNLxBo1BfASdWtL4ndQavEi51mI38AjEAi/V3bNTIZargCyzuFJ0nN6T5 +U6VR5CmD1/iQMVtCnwr1/q4AaOeMSQ+2b1tbFfLn +-----END CERTIFICATE----- + +HiPKI Root CA - G1 +================== +-----BEGIN CERTIFICATE----- +MIIFajCCA1KgAwIBAgIQLd2szmKXlKFD6LDNdmpeYDANBgkqhkiG9w0BAQsFADBPMQswCQYDVQQG +EwJUVzEjMCEGA1UECgwaQ2h1bmdod2EgVGVsZWNvbSBDby4sIEx0ZC4xGzAZBgNVBAMMEkhpUEtJ +IFJvb3QgQ0EgLSBHMTAeFw0xOTAyMjIwOTQ2MDRaFw0zNzEyMzExNTU5NTlaME8xCzAJBgNVBAYT +AlRXMSMwIQYDVQQKDBpDaHVuZ2h3YSBUZWxlY29tIENvLiwgTHRkLjEbMBkGA1UEAwwSSGlQS0kg +Um9vdCBDQSAtIEcxMIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEA9B5/UnMyDHPkvRN0 +o9QwqNCuS9i233VHZvR85zkEHmpwINJaR3JnVfSl6J3VHiGh8Ge6zCFovkRTv4354twvVcg3Px+k +wJyz5HdcoEb+d/oaoDjq7Zpy3iu9lFc6uux55199QmQ5eiY29yTw1S+6lZgRZq2XNdZ1AYDgr/SE +YYwNHl98h5ZeQa/rh+r4XfEuiAU+TCK72h8q3VJGZDnzQs7ZngyzsHeXZJzA9KMuH5UHsBffMNsA +GJZMoYFL3QRtU6M9/Aes1MU3guvklQgZKILSQjqj2FPseYlgSGDIcpJQ3AOPgz+yQlda22rpEZfd +hSi8MEyr48KxRURHH+CKFgeW0iEPU8DtqX7UTuybCeyvQqww1r/REEXgphaypcXTT3OUM3ECoWqj +1jOXTyFjHluP2cFeRXF3D4FdXyGarYPM+l7WjSNfGz1BryB1ZlpK9p/7qxj3ccC2HTHsOyDry+K4 +9a6SsvfhhEvyovKTmiKe0xRvNlS9H15ZFblzqMF8b3ti6RZsR1pl8w4Rm0bZ/W3c1pzAtH2lsN0/ +Vm+h+fbkEkj9Bn8SV7apI09bA8PgcSojt/ewsTu8mL3WmKgMa/aOEmem8rJY5AIJEzypuxC00jBF +8ez3ABHfZfjcK0NVvxaXxA/VLGGEqnKG/uY6fsI/fe78LxQ+5oXdUG+3Se0CAwEAAaNCMEAwDwYD +VR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ncX+l6o/vY9cdVouslGDDjYr7AwDgYDVR0PAQH/BAQD +AgGGMA0GCSqGSIb3DQEBCwUAA4ICAQBQUfB13HAE4/+qddRxosuej6ip0691x1TPOhwEmSKsxBHi +7zNKpiMdDg1H2DfHb680f0+BazVP6XKlMeJ45/dOlBhbQH3PayFUhuaVevvGyuqcSE5XCV0vrPSl +tJczWNWseanMX/mF+lLFjfiRFOs6DRfQUsJ748JzjkZ4Bjgs6FzaZsT0pPBWGTMpWmWSBUdGSquE +wx4noR8RkpkndZMPvDY7l1ePJlsMu5wP1G4wB9TcXzZoZjmDlicmisjEOf6aIW/Vcobpf2Lll07Q +JNBAsNB1CI69aO4I1258EHBGG3zgiLKecoaZAeO/n0kZtCW+VmWuF2PlHt/o/0elv+EmBYTksMCv +5wiZqAxeJoBF1PhoL5aPruJKHJwWDBNvOIf2u8g0X5IDUXlwpt/L9ZlNec1OvFefQ05rLisY+Gpz +jLrFNe85akEez3GoorKGB1s6yeHvP2UEgEcyRHCVTjFnanRbEEV16rCf0OY1/k6fi8wrkkVbbiVg +hUbN0aqwdmaTd5a+g744tiROJgvM7XpWGuDpWsZkrUx6AEhEL7lAuxM+vhV4nYWBSipX3tUZQ9rb +yltHhoMLP7YNdnhzeSJesYAfz77RP1YQmCuVh6EfnWQUYDksswBVLuT1sw5XxJFBAJw/6KXf6vb/ +yPCtbVKoF6ubYfwSUTXkJf2vqmqGOQ== +-----END CERTIFICATE----- + +GlobalSign ECC Root CA - R4 +=========================== +-----BEGIN CERTIFICATE----- +MIIB3DCCAYOgAwIBAgINAgPlfvU/k/2lCSGypjAKBggqhkjOPQQDAjBQMSQwIgYDVQQLExtHbG9i +YWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wHhcNMTIxMTEzMDAwMDAwWhcNMzgwMTE5MDMxNDA3WjBQMSQwIgYDVQQLExtHbG9i +YWxTaWduIEVDQyBSb290IENBIC0gUjQxEzARBgNVBAoTCkdsb2JhbFNpZ24xEzARBgNVBAMTCkds +b2JhbFNpZ24wWTATBgcqhkjOPQIBBggqhkjOPQMBBwNCAAS4xnnTj2wlDp8uORkcA6SumuU5BwkW +ymOxuYb4ilfBV85C+nOh92VC/x7BALJucw7/xyHlGKSq2XE/qNS5zowdo0IwQDAOBgNVHQ8BAf8E +BAMCAYYwDwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQUVLB7rUW44kB/+wpu+74zyTyjhNUwCgYI +KoZIzj0EAwIDRwAwRAIgIk90crlgr/HmnKAWBVBfw147bmF0774BxL4YSFlhgjICICadVGNA3jdg +UM/I2O2dgq43mLyjj0xMqTQrbO/7lZsm +-----END CERTIFICATE----- + +GTS Root R1 +=========== +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlk28xsBNJiGuiFzANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQGEwJV +UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg +UjEwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE +ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjEwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQC2EQKLHuOhd5s73L+UPreVp0A8of2C+X0yBoJx9vaM +f/vo27xqLpeXo4xL+Sv2sfnOhB2x+cWX3u+58qPpvBKJXqeqUqv4IyfLpLGcY9vXmX7wCl7raKb0 +xlpHDU0QM+NOsROjyBhsS+z8CZDfnWQpJSMHobTSPS5g4M/SCYe7zUjwTcLCeoiKu7rPWRnWr4+w +B7CeMfGCwcDfLqZtbBkOtdh+JhpFAz2weaSUKK0PfyblqAj+lug8aJRT7oM6iCsVlgmy4HqMLnXW +nOunVmSPlk9orj2XwoSPwLxAwAtcvfaHszVsrBhQf4TgTM2S0yDpM7xSma8ytSmzJSq0SPly4cpk +9+aCEI3oncKKiPo4Zor8Y/kB+Xj9e1x3+naH+uzfsQ55lVe0vSbv1gHR6xYKu44LtcXFilWr06zq +kUspzBmkMiVOKvFlRNACzqrOSbTqn3yDsEB750Orp2yjj32JgfpMpf/VjsPOS+C12LOORc92wO1A +K/1TD7Cn1TsNsYqiA94xrcx36m97PtbfkSIS5r762DL8EGMUUXLeXdYWk70paDPvOmbsB4om3xPX +V2V4J95eSRQAogB/mqghtqmxlbCluQ0WEdrHbEg8QOB+DVrNVjzRlwW5y0vtOUucxD/SVRNuJLDW +cfr0wbrM7Rv1/oFB2ACYPTrIrnqYNxgFlQIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQU5K8rJnEaK0gnhS9SZizv8IkTcT4wDQYJKoZIhvcNAQEMBQAD +ggIBAJ+qQibbC5u+/x6Wki4+omVKapi6Ist9wTrYggoGxval3sBOh2Z5ofmmWJyq+bXmYOfg6LEe +QkEzCzc9zolwFcq1JKjPa7XSQCGYzyI0zzvFIoTgxQ6KfF2I5DUkzps+GlQebtuyh6f88/qBVRRi +ClmpIgUxPoLW7ttXNLwzldMXG+gnoot7TiYaelpkttGsN/H9oPM47HLwEXWdyzRSjeZ2axfG34ar +J45JK3VmgRAhpuo+9K4l/3wV3s6MJT/KYnAK9y8JZgfIPxz88NtFMN9iiMG1D53Dn0reWVlHxYci +NuaCp+0KueIHoI17eko8cdLiA6EfMgfdG+RCzgwARWGAtQsgWSl4vflVy2PFPEz0tv/bal8xa5me +LMFrUKTX5hgUvYU/Z6tGn6D/Qqc6f1zLXbBwHSs09dR2CQzreExZBfMzQsNhFRAbd03OIozUhfJF +fbdT6u9AWpQKXCBfTkBdYiJ23//OYb2MI3jSNwLgjt7RETeJ9r/tSQdirpLsQBqvFAnZ0E6yove+ +7u7Y/9waLd64NnHi/Hm3lCXRSHNboTXns5lndcEZOitHTtNCjv0xyBZm2tIMPNuzjsmhDYAPexZ3 +FL//2wmUspO8IFgV6dtxQ/PeEMMA3KgqlbbC1j+Qa3bbbP6MvPJwNQzcmRk13NfIRmPVNnGuV/u3 +gm3c +-----END CERTIFICATE----- + +GTS Root R2 +=========== +-----BEGIN CERTIFICATE----- +MIIFVzCCAz+gAwIBAgINAgPlrsWNBCUaqxElqjANBgkqhkiG9w0BAQwFADBHMQswCQYDVQQGEwJV +UzEiMCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3Qg +UjIwHhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UE +ChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjIwggIiMA0G +CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDO3v2m++zsFDQ8BwZabFn3GTXd98GdVarTzTukk3Lv +CvptnfbwhYBboUhSnznFt+4orO/LdmgUud+tAWyZH8QiHZ/+cnfgLFuv5AS/T3KgGjSY6Dlo7JUl +e3ah5mm5hRm9iYz+re026nO8/4Piy33B0s5Ks40FnotJk9/BW9BuXvAuMC6C/Pq8tBcKSOWIm8Wb +a96wyrQD8Nr0kLhlZPdcTK3ofmZemde4wj7I0BOdre7kRXuJVfeKH2JShBKzwkCX44ofR5GmdFrS ++LFjKBC4swm4VndAoiaYecb+3yXuPuWgf9RhD1FLPD+M2uFwdNjCaKH5wQzpoeJ/u1U8dgbuak7M +kogwTZq9TwtImoS1mKPV+3PBV2HdKFZ1E66HjucMUQkQdYhMvI35ezzUIkgfKtzra7tEscszcTJG +r61K8YzodDqs5xoic4DSMPclQsciOzsSrZYuxsN2B6ogtzVJV+mSSeh2FnIxZyuWfoqjx5RWIr9q +S34BIbIjMt/kmkRtWVtd9QCgHJvGeJeNkP+byKq0rxFROV7Z+2et1VsRnTKaG73VululycslaVNV +J1zgyjbLiGH7HrfQy+4W+9OmTN6SpdTi3/UGVN4unUu0kzCqgc7dGtxRcw1PcOnlthYhGXmy5okL +dWTK1au8CcEYof/UVKGFPP0UJAOyh9OktwIDAQABo0IwQDAOBgNVHQ8BAf8EBAMCAYYwDwYDVR0T +AQH/BAUwAwEB/zAdBgNVHQ4EFgQUu//KjiOfT5nK2+JopqUVJxce2Q4wDQYJKoZIhvcNAQEMBQAD +ggIBAB/Kzt3HvqGf2SdMC9wXmBFqiN495nFWcrKeGk6c1SuYJF2ba3uwM4IJvd8lRuqYnrYb/oM8 +0mJhwQTtzuDFycgTE1XnqGOtjHsB/ncw4c5omwX4Eu55MaBBRTUoCnGkJE+M3DyCB19m3H0Q/gxh +swWV7uGugQ+o+MePTagjAiZrHYNSVc61LwDKgEDg4XSsYPWHgJ2uNmSRXbBoGOqKYcl3qJfEycel +/FVL8/B/uWU9J2jQzGv6U53hkRrJXRqWbTKH7QMgyALOWr7Z6v2yTcQvG99fevX4i8buMTolUVVn +jWQye+mew4K6Ki3pHrTgSAai/GevHyICc/sgCq+dVEuhzf9gR7A/Xe8bVr2XIZYtCtFenTgCR2y5 +9PYjJbigapordwj6xLEokCZYCDzifqrXPW+6MYgKBesntaFJ7qBFVHvmJ2WZICGoo7z7GJa7Um8M +7YNRTOlZ4iBgxcJlkoKM8xAfDoqXvneCbT+PHV28SSe9zE8P4c52hgQjxcCMElv924SgJPFI/2R8 +0L5cFtHvma3AH/vLrrw4IgYmZNralw4/KBVEqE8AyvCazM90arQ+POuV7LXTWtiBmelDGDfrs7vR +WGJB82bSj6p4lVQgw1oudCvV0b4YacCs1aTPObpRhANl6WLAYv7YTVWW4tAR+kg0Eeye7QUd5MjW +HYbL +-----END CERTIFICATE----- + +GTS Root R3 +=========== +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPluILrIPglJ209ZjAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJVUzEi +MCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMw +HhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZ +R29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjMwdjAQBgcqhkjO +PQIBBgUrgQQAIgNiAAQfTzOHMymKoYTey8chWEGJ6ladK0uFxh1MJ7x/JlFyb+Kf1qPKzEUURout +736GjOyxfi//qXGdGIRFBEFVbivqJn+7kAHjSxm65FSWRQmx1WyRRK2EE46ajA2ADDL24CejQjBA +MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBTB8Sa6oC2uhYHP0/Eq +Er24Cmf9vDAKBggqhkjOPQQDAwNpADBmAjEA9uEglRR7VKOQFhG/hMjqb2sXnh5GmCCbn9MN2azT +L818+FsuVbu/3ZL3pAzcMeGiAjEA/JdmZuVDFhOD3cffL74UOO0BzrEXGhF16b0DjyZ+hOXJYKaV +11RZt+cRLInUue4X +-----END CERTIFICATE----- + +GTS Root R4 +=========== +-----BEGIN CERTIFICATE----- +MIICCTCCAY6gAwIBAgINAgPlwGjvYxqccpBQUjAKBggqhkjOPQQDAzBHMQswCQYDVQQGEwJVUzEi +MCAGA1UEChMZR29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQw +HhcNMTYwNjIyMDAwMDAwWhcNMzYwNjIyMDAwMDAwWjBHMQswCQYDVQQGEwJVUzEiMCAGA1UEChMZ +R29vZ2xlIFRydXN0IFNlcnZpY2VzIExMQzEUMBIGA1UEAxMLR1RTIFJvb3QgUjQwdjAQBgcqhkjO +PQIBBgUrgQQAIgNiAATzdHOnaItgrkO4NcWBMHtLSZ37wWHO5t5GvWvVYRg1rkDdc/eJkTBa6zzu +hXyiQHY7qca4R9gq55KRanPpsXI5nymfopjTX15YhmUPoYRlBtHci8nHc8iMai/lxKvRHYqjQjBA +MA4GA1UdDwEB/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MB0GA1UdDgQWBBSATNbrdP9JNqPV2Py1 +PsVq8JQdjDAKBggqhkjOPQQDAwNpADBmAjEA6ED/g94D9J+uHXqnLrmvT/aDHQ4thQEd0dlq7A/C +r8deVl5c1RxYIigL9zC2L7F8AjEA8GE8p/SgguMh1YQdc4acLa/KNJvxn7kjNuK8YAOdgLOaVsjh +4rsUecrNIdSUtUlD +-----END CERTIFICATE----- + +Telia Root CA v2 +================ +-----BEGIN CERTIFICATE----- +MIIFdDCCA1ygAwIBAgIPAWdfJ9b+euPkrL4JWwWeMA0GCSqGSIb3DQEBCwUAMEQxCzAJBgNVBAYT +AkZJMRowGAYDVQQKDBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2 +MjAeFw0xODExMjkxMTU1NTRaFw00MzExMjkxMTU1NTRaMEQxCzAJBgNVBAYTAkZJMRowGAYDVQQK +DBFUZWxpYSBGaW5sYW5kIE95ajEZMBcGA1UEAwwQVGVsaWEgUm9vdCBDQSB2MjCCAiIwDQYJKoZI +hvcNAQEBBQADggIPADCCAgoCggIBALLQPwe84nvQa5n44ndp586dpAO8gm2h/oFlH0wnrI4AuhZ7 +6zBqAMCzdGh+sq/H1WKzej9Qyow2RCRj0jbpDIX2Q3bVTKFgcmfiKDOlyzG4OiIjNLh9vVYiQJ3q +9HsDrWj8soFPmNB06o3lfc1jw6P23pLCWBnglrvFxKk9pXSW/q/5iaq9lRdU2HhE8Qx3FZLgmEKn +pNaqIJLNwaCzlrI6hEKNfdWV5Nbb6WLEWLN5xYzTNTODn3WhUidhOPFZPY5Q4L15POdslv5e2QJl +tI5c0BE0312/UqeBAMN/mUWZFdUXyApT7GPzmX3MaRKGwhfwAZ6/hLzRUssbkmbOpFPlob/E2wnW +5olWK8jjfN7j/4nlNW4o6GwLI1GpJQXrSPjdscr6bAhR77cYbETKJuFzxokGgeWKrLDiKca5JLNr +RBH0pUPCTEPlcDaMtjNXepUugqD0XBCzYYP2AgWGLnwtbNwDRm41k9V6lS/eINhbfpSQBGq6WT0E +BXWdN6IOLj3rwaRSg/7Qa9RmjtzG6RJOHSpXqhC8fF6CfaamyfItufUXJ63RDolUK5X6wK0dmBR4 +M0KGCqlztft0DbcbMBnEWg4cJ7faGND/isgFuvGqHKI3t+ZIpEYslOqodmJHixBTB0hXbOKSTbau +BcvcwUpej6w9GU7C7WB1K9vBykLVAgMBAAGjYzBhMB8GA1UdIwQYMBaAFHKs5DN5qkWH9v2sHZ7W +xy+G2CQ5MB0GA1UdDgQWBBRyrOQzeapFh/b9rB2e1scvhtgkOTAOBgNVHQ8BAf8EBAMCAQYwDwYD +VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAgEAoDtZpwmUPjaE0n4vOaWWl/oRrfxn83EJ +8rKJhGdEr7nv7ZbsnGTbMjBvZ5qsfl+yqwE2foH65IRe0qw24GtixX1LDoJt0nZi0f6X+J8wfBj5 +tFJ3gh1229MdqfDBmgC9bXXYfef6xzijnHDoRnkDry5023X4blMMA8iZGok1GTzTyVR8qPAs5m4H +eW9q4ebqkYJpCh3DflminmtGFZhb069GHWLIzoBSSRE/yQQSwxN8PzuKlts8oB4KtItUsiRnDe+C +y748fdHif64W1lZYudogsYMVoe+KTTJvQS8TUoKU1xrBeKJR3Stwbbca+few4GeXVtt8YVMJAygC +QMez2P2ccGrGKMOF6eLtGpOg3kuYooQ+BXcBlj37tCAPnHICehIv1aO6UXivKitEZU61/Qrowc15 +h2Er3oBXRb9n8ZuRXqWk7FlIEA04x7D6w0RtBPV4UBySllva9bguulvP5fBqnUsvWHMtTy3EHD70 +sz+rFQ47GUGKpMFXEmZxTPpT41frYpUJnlTd0cI8Vzy9OK2YZLe4A5pTVmBds9hCG1xLEooc6+t9 +xnppxyd/pPiL8uSUZodL6ZQHCRJ5irLrdATczvREWeAWysUsWNc8e89ihmpQfTU2Zqf7N+cox9jQ +raVplI/owd8k+BsHMYeB2F326CjYSlKArBPuUBQemMc= +-----END CERTIFICATE----- + +D-TRUST BR Root CA 1 2020 +========================= +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQfMmPK4TX3+oPyWWa00tNljAKBggqhkjOPQQDAzBIMQswCQYDVQQGEwJE +RTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRSVVNUIEJSIFJvb3QgQ0EgMSAy +MDIwMB4XDTIwMDIxMTA5NDUwMFoXDTM1MDIxMTA5NDQ1OVowSDELMAkGA1UEBhMCREUxFTATBgNV +BAoTDEQtVHJ1c3QgR21iSDEiMCAGA1UEAxMZRC1UUlVTVCBCUiBSb290IENBIDEgMjAyMDB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABMbLxyjR+4T1mu9CFCDhQ2tuda38KwOE1HaTJddZO0Flax7mNCq7 +dPYSzuht56vkPE4/RAiLzRZxy7+SmfSk1zxQVFKQhYN4lGdnoxwJGT11NIXe7WB9xwy0QVK5buXu +QqOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFHOREKv/VbNafAkl1bK6CKBrqx9t +MA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6gPKA6hjhodHRwOi8vY3JsLmQtdHJ1c3Qu +bmV0L2NybC9kLXRydXN0X2JyX3Jvb3RfY2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwQlIlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxP +PUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjOPQQD +AwNpADBmAjEAlJAtE/rhY/hhY+ithXhUkZy4kzg+GkHaQBZTQgjKL47xPoFWwKrY7RjEsK70Pvom +AjEA8yjixtsrmfu3Ubgko6SUeho/5jbiA1czijDLgsfWFBHVdWNbFJWcHwHP2NVypw87 +-----END CERTIFICATE----- + +D-TRUST EV Root CA 1 2020 +========================= +-----BEGIN CERTIFICATE----- +MIIC2zCCAmCgAwIBAgIQXwJB13qHfEwDo6yWjfv/0DAKBggqhkjOPQQDAzBIMQswCQYDVQQGEwJE +RTEVMBMGA1UEChMMRC1UcnVzdCBHbWJIMSIwIAYDVQQDExlELVRSVVNUIEVWIFJvb3QgQ0EgMSAy +MDIwMB4XDTIwMDIxMTEwMDAwMFoXDTM1MDIxMTA5NTk1OVowSDELMAkGA1UEBhMCREUxFTATBgNV +BAoTDEQtVHJ1c3QgR21iSDEiMCAGA1UEAxMZRC1UUlVTVCBFViBSb290IENBIDEgMjAyMDB2MBAG +ByqGSM49AgEGBSuBBAAiA2IABPEL3YZDIBnfl4XoIkqbz52Yv7QFJsnL46bSj8WeeHsxiamJrSc8 +ZRCC/N/DnU7wMyPE0jL1HLDfMxddxfCxivnvubcUyilKwg+pf3VlSSowZ/Rk99Yad9rDwpdhQntJ +raOCAQ0wggEJMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYEFH8QARY3OqQo5FD4pPfsazK2/umL +MA4GA1UdDwEB/wQEAwIBBjCBxgYDVR0fBIG+MIG7MD6gPKA6hjhodHRwOi8vY3JsLmQtdHJ1c3Qu +bmV0L2NybC9kLXRydXN0X2V2X3Jvb3RfY2FfMV8yMDIwLmNybDB5oHegdYZzbGRhcDovL2RpcmVj +dG9yeS5kLXRydXN0Lm5ldC9DTj1ELVRSVVNUJTIwRVYlMjBSb290JTIwQ0ElMjAxJTIwMjAyMCxP +PUQtVHJ1c3QlMjBHbWJILEM9REU/Y2VydGlmaWNhdGVyZXZvY2F0aW9ubGlzdDAKBggqhkjOPQQD +AwNpADBmAjEAyjzGKnXCXnViOTYAYFqLwZOZzNnbQTs7h5kXO9XMT8oi96CAy/m0sRtW9XLS/BnR +AjEAkfcwkz8QRitxpNA7RJvAKQIFskF3UfN5Wp6OFKBOQtJbgfM0agPnIjhQW+0ZT0MW +-----END CERTIFICATE----- + +DigiCert TLS ECC P384 Root G5 +============================= +-----BEGIN CERTIFICATE----- +MIICGTCCAZ+gAwIBAgIQCeCTZaz32ci5PhwLBCou8zAKBggqhkjOPQQDAzBOMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJjAkBgNVBAMTHURpZ2lDZXJ0IFRMUyBFQ0MgUDM4 +NCBSb290IEc1MB4XDTIxMDExNTAwMDAwMFoXDTQ2MDExNDIzNTk1OVowTjELMAkGA1UEBhMCVVMx +FzAVBgNVBAoTDkRpZ2lDZXJ0LCBJbmMuMSYwJAYDVQQDEx1EaWdpQ2VydCBUTFMgRUNDIFAzODQg +Um9vdCBHNTB2MBAGByqGSM49AgEGBSuBBAAiA2IABMFEoc8Rl1Ca3iOCNQfN0MsYndLxf3c1Tzvd +lHJS7cI7+Oz6e2tYIOyZrsn8aLN1udsJ7MgT9U7GCh1mMEy7H0cKPGEQQil8pQgO4CLp0zVozptj +n4S1mU1YoI71VOeVyaNCMEAwHQYDVR0OBBYEFMFRRVBZqz7nLFr6ICISB4CIfBFqMA4GA1UdDwEB +/wQEAwIBhjAPBgNVHRMBAf8EBTADAQH/MAoGCCqGSM49BAMDA2gAMGUCMQCJao1H5+z8blUD2Wds +Jk6Dxv3J+ysTvLd6jLRl0mlpYxNjOyZQLgGheQaRnUi/wr4CMEfDFXuxoJGZSZOoPHzoRgaLLPIx +AJSdYsiJvRmEFOml+wG4DXZDjC5Ty3zfDBeWUA== +-----END CERTIFICATE----- + +DigiCert TLS RSA4096 Root G5 +============================ +-----BEGIN CERTIFICATE----- +MIIFZjCCA06gAwIBAgIQCPm0eKj6ftpqMzeJ3nzPijANBgkqhkiG9w0BAQwFADBNMQswCQYDVQQG +EwJVUzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0 +MDk2IFJvb3QgRzUwHhcNMjEwMTE1MDAwMDAwWhcNNDYwMTE0MjM1OTU5WjBNMQswCQYDVQQGEwJV +UzEXMBUGA1UEChMORGlnaUNlcnQsIEluYy4xJTAjBgNVBAMTHERpZ2lDZXJ0IFRMUyBSU0E0MDk2 +IFJvb3QgRzUwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCz0PTJeRGd/fxmgefM1eS8 +7IE+ajWOLrfn3q/5B03PMJ3qCQuZvWxX2hhKuHisOjmopkisLnLlvevxGs3npAOpPxG02C+JFvuU +AT27L/gTBaF4HI4o4EXgg/RZG5Wzrn4DReW+wkL+7vI8toUTmDKdFqgpwgscONyfMXdcvyej/Ces +tyu9dJsXLfKB2l2w4SMXPohKEiPQ6s+d3gMXsUJKoBZMpG2T6T867jp8nVid9E6P/DsjyG244gXa +zOvswzH016cpVIDPRFtMbzCe88zdH5RDnU1/cHAN1DrRN/BsnZvAFJNY781BOHW8EwOVfH/jXOnV +DdXifBBiqmvwPXbzP6PosMH976pXTayGpxi0KcEsDr9kvimM2AItzVwv8n/vFfQMFawKsPHTDU9q +TXeXAaDxZre3zu/O7Oyldcqs4+Fj97ihBMi8ez9dLRYiVu1ISf6nL3kwJZu6ay0/nTvEF+cdLvvy +z6b84xQslpghjLSR6Rlgg/IwKwZzUNWYOwbpx4oMYIwo+FKbbuH2TbsGJJvXKyY//SovcfXWJL5/ +MZ4PbeiPT02jP/816t9JXkGPhvnxd3lLG7SjXi/7RgLQZhNeXoVPzthwiHvOAbWWl9fNff2C+MIk +wcoBOU+NosEUQB+cZtUMCUbW8tDRSHZWOkPLtgoRObqME2wGtZ7P6wIDAQABo0IwQDAdBgNVHQ4E +FgQUUTMc7TZArxfTJc1paPKvTiM+s0EwDgYDVR0PAQH/BAQDAgGGMA8GA1UdEwEB/wQFMAMBAf8w +DQYJKoZIhvcNAQEMBQADggIBAGCmr1tfV9qJ20tQqcQjNSH/0GEwhJG3PxDPJY7Jv0Y02cEhJhxw +GXIeo8mH/qlDZJY6yFMECrZBu8RHANmfGBg7sg7zNOok992vIGCukihfNudd5N7HPNtQOa27PShN +lnx2xlv0wdsUpasZYgcYQF+Xkdycx6u1UQ3maVNVzDl92sURVXLFO4uJ+DQtpBflF+aZfTCIITfN +MBc9uPK8qHWgQ9w+iUuQrm0D4ByjoJYJu32jtyoQREtGBzRj7TG5BO6jm5qu5jF49OokYTurWGT/ +u4cnYiWB39yhL/btp/96j1EuMPikAdKFOV8BmZZvWltwGUb+hmA+rYAQCd05JS9Yf7vSdPD3Rh9G +OUrYU9DzLjtxpdRv/PNn5AeP3SYZ4Y1b+qOTEZvpyDrDVWiakuFSdjjo4bq9+0/V77PnSIMx8IIh +47a+p6tv75/fTM8BuGJqIz3nCU2AG3swpMPdB380vqQmsvZB6Akd4yCYqjdP//fx4ilwMUc/dNAU +FvohigLVigmUdy7yWSiLfFCSCmZ4OIN1xLVaqBHG5cGdZlXPU8Sv13WFqUITVuwhd4GTWgzqltlJ +yqEI8pc7bZsEGCREjnwB8twl2F6GmrE52/WRMmrRpnCKovfepEWFJqgejF0pW8hL2JpqA15w8oVP +bEtoL8pU9ozaMv7Da4M/OMZ+ +-----END CERTIFICATE----- + +Certainly Root R1 +================= +-----BEGIN CERTIFICATE----- +MIIFRzCCAy+gAwIBAgIRAI4P+UuQcWhlM1T01EQ5t+AwDQYJKoZIhvcNAQELBQAwPTELMAkGA1UE +BhMCVVMxEjAQBgNVBAoTCUNlcnRhaW5seTEaMBgGA1UEAxMRQ2VydGFpbmx5IFJvb3QgUjEwHhcN +MjEwNDAxMDAwMDAwWhcNNDYwNDAxMDAwMDAwWjA9MQswCQYDVQQGEwJVUzESMBAGA1UEChMJQ2Vy +dGFpbmx5MRowGAYDVQQDExFDZXJ0YWlubHkgUm9vdCBSMTCCAiIwDQYJKoZIhvcNAQEBBQADggIP +ADCCAgoCggIBANA21B/q3avk0bbm+yLA3RMNansiExyXPGhjZjKcA7WNpIGD2ngwEc/csiu+kr+O +5MQTvqRoTNoCaBZ0vrLdBORrKt03H2As2/X3oXyVtwxwhi7xOu9S98zTm/mLvg7fMbedaFySpvXl +8wo0tf97ouSHocavFwDvA5HtqRxOcT3Si2yJ9HiG5mpJoM610rCrm/b01C7jcvk2xusVtyWMOvwl +DbMicyF0yEqWYZL1LwsYpfSt4u5BvQF5+paMjRcCMLT5r3gajLQ2EBAHBXDQ9DGQilHFhiZ5shGI +XsXwClTNSaa/ApzSRKft43jvRl5tcdF5cBxGX1HpyTfcX35pe0HfNEXgO4T0oYoKNp43zGJS4YkN +KPl6I7ENPT2a/Z2B7yyQwHtETrtJ4A5KVpK8y7XdeReJkd5hiXSSqOMyhb5OhaRLWcsrxXiOcVTQ +AjeZjOVJ6uBUcqQRBi8LjMFbvrWhsFNunLhgkR9Za/kt9JQKl7XsxXYDVBtlUrpMklZRNaBA2Cnb +rlJ2Oy0wQJuK0EJWtLeIAaSHO1OWzaMWj/Nmqhexx2DgwUMFDO6bW2BvBlyHWyf5QBGenDPBt+U1 +VwV/J84XIIwc/PH72jEpSe31C4SnT8H2TsIonPru4K8H+zMReiFPCyEQtkA6qyI6BJyLm4SGcprS +p6XEtHWRqSsjAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNVHRMBAf8EBTADAQH/MB0GA1Ud +DgQWBBTgqj8ljZ9EXME66C6ud0yEPmcM9DANBgkqhkiG9w0BAQsFAAOCAgEAuVevuBLaV4OPaAsz +HQNTVfSVcOQrPbA56/qJYv331hgELyE03fFo8NWWWt7CgKPBjcZq91l3rhVkz1t5BXdm6ozTaw3d +8VkswTOlMIAVRQdFGjEitpIAq5lNOo93r6kiyi9jyhXWx8bwPWz8HA2YEGGeEaIi1wrykXprOQ4v +MMM2SZ/g6Q8CRFA3lFV96p/2O7qUpUzpvD5RtOjKkjZUbVwlKNrdrRT90+7iIgXr0PK3aBLXWopB +GsaSpVo7Y0VPv+E6dyIvXL9G+VoDhRNCX8reU9ditaY1BMJH/5n9hN9czulegChB8n3nHpDYT3Y+ +gjwN/KUD+nsa2UUeYNrEjvn8K8l7lcUq/6qJ34IxD3L/DCfXCh5WAFAeDJDBlrXYFIW7pw0WwfgH +JBu6haEaBQmAupVjyTrsJZ9/nbqkRxWbRHDxakvWOF5D8xh+UG7pWijmZeZ3Gzr9Hb4DJqPb1OG7 +fpYnKx3upPvaJVQTA945xsMfTZDsjxtK0hzthZU4UHlG1sGQUDGpXJpuHfUzVounmdLyyCwzk5Iw +x06MZTMQZBf9JBeW0Y3COmor6xOLRPIh80oat3df1+2IpHLlOR+Vnb5nwXARPbv0+Em34yaXOp/S +X3z7wJl8OSngex2/DaeP0ik0biQVy96QXr8axGbqwua6OV+KmalBWQewLK8= +-----END CERTIFICATE----- + +Certainly Root E1 +================= +-----BEGIN CERTIFICATE----- +MIIB9zCCAX2gAwIBAgIQBiUzsUcDMydc+Y2aub/M+DAKBggqhkjOPQQDAzA9MQswCQYDVQQGEwJV +UzESMBAGA1UEChMJQ2VydGFpbmx5MRowGAYDVQQDExFDZXJ0YWlubHkgUm9vdCBFMTAeFw0yMTA0 +MDEwMDAwMDBaFw00NjA0MDEwMDAwMDBaMD0xCzAJBgNVBAYTAlVTMRIwEAYDVQQKEwlDZXJ0YWlu +bHkxGjAYBgNVBAMTEUNlcnRhaW5seSBSb290IEUxMHYwEAYHKoZIzj0CAQYFK4EEACIDYgAE3m/4 +fxzf7flHh4axpMCK+IKXgOqPyEpeKn2IaKcBYhSRJHpcnqMXfYqGITQYUBsQ3tA3SybHGWCA6TS9 +YBk2QNYphwk8kXr2vBMj3VlOBF7PyAIcGFPBMdjaIOlEjeR2o0IwQDAOBgNVHQ8BAf8EBAMCAQYw +DwYDVR0TAQH/BAUwAwEB/zAdBgNVHQ4EFgQU8ygYy2R17ikq6+2uI1g4hevIIgcwCgYIKoZIzj0E +AwMDaAAwZQIxALGOWiDDshliTd6wT99u0nCK8Z9+aozmut6Dacpps6kFtZaSF4fC0urQe87YQVt8 +rgIwRt7qy12a7DLCZRawTDBcMPPaTnOGBtjOiQRINzf43TNRnXCve1XYAS59BWQOhriR +-----END CERTIFICATE----- + +E-Tugra Global Root CA RSA v3 +============================= +-----BEGIN CERTIFICATE----- +MIIF8zCCA9ugAwIBAgIUDU3FzRYilZYIfrgLfxUGNPt5EDQwDQYJKoZIhvcNAQELBQAwgYAxCzAJ +BgNVBAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVncmEgRUJHIEEuUy4xHTAb +BgNVBAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290 +IENBIFJTQSB2MzAeFw0yMDAzMTgwOTA3MTdaFw00NTAzMTIwOTA3MTdaMIGAMQswCQYDVQQGEwJU +UjEPMA0GA1UEBxMGQW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRF +LVR1Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBSU0Eg +djMwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQCiZvCJt3J77gnJY9LTQ91ew6aEOErx +jYG7FL1H6EAX8z3DeEVypi6Q3po61CBxyryfHUuXCscxuj7X/iWpKo429NEvx7epXTPcMHD4QGxL +sqYxYdE0PD0xesevxKenhOGXpOhL9hd87jwH7eKKV9y2+/hDJVDqJ4GohryPUkqWOmAalrv9c/SF +/YP9f4RtNGx/ardLAQO/rWm31zLZ9Vdq6YaCPqVmMbMWPcLzJmAy01IesGykNz709a/r4d+ABs8q +QedmCeFLl+d3vSFtKbZnwy1+7dZ5ZdHPOrbRsV5WYVB6Ws5OUDGAA5hH5+QYfERaxqSzO8bGwzrw +bMOLyKSRBfP12baqBqG3q+Sx6iEUXIOk/P+2UNOMEiaZdnDpwA+mdPy70Bt4znKS4iicvObpCdg6 +04nmvi533wEKb5b25Y08TVJ2Glbhc34XrD2tbKNSEhhw5oBOM/J+JjKsBY04pOZ2PJ8QaQ5tndLB +eSBrW88zjdGUdjXnXVXHt6woq0bM5zshtQoK5EpZ3IE1S0SVEgpnpaH/WwAH0sDM+T/8nzPyAPiM +bIedBi3x7+PmBvrFZhNb/FAHnnGGstpvdDDPk1Po3CLW3iAfYY2jLqN4MpBs3KwytQXk9TwzDdbg +h3cXTJ2w2AmoDVf3RIXwyAS+XF1a4xeOVGNpf0l0ZAWMowIDAQABo2MwYTAPBgNVHRMBAf8EBTAD +AQH/MB8GA1UdIwQYMBaAFLK0ruYt9ybVqnUtdkvAG1Mh0EjvMB0GA1UdDgQWBBSytK7mLfcm1ap1 +LXZLwBtTIdBI7zAOBgNVHQ8BAf8EBAMCAQYwDQYJKoZIhvcNAQELBQADggIBAImocn+M684uGMQQ +gC0QDP/7FM0E4BQ8Tpr7nym/Ip5XuYJzEmMmtcyQ6dIqKe6cLcwsmb5FJ+Sxce3kOJUxQfJ9emN4 +38o2Fi+CiJ+8EUdPdk3ILY7r3y18Tjvarvbj2l0Upq7ohUSdBm6O++96SmotKygY/r+QLHUWnw/q +ln0F7psTpURs+APQ3SPh/QMSEgj0GDSz4DcLdxEBSL9htLX4GdnLTeqjjO/98Aa1bZL0SmFQhO3s +SdPkvmjmLuMxC1QLGpLWgti2omU8ZgT5Vdps+9u1FGZNlIM7zR6mK7L+d0CGq+ffCsn99t2HVhjY +sCxVYJb6CH5SkPVLpi6HfMsg2wY+oF0Dd32iPBMbKaITVaA9FCKvb7jQmhty3QUBjYZgv6Rn7rWl +DdF/5horYmbDB7rnoEgcOMPpRfunf/ztAmgayncSd6YAVSgU7NbHEqIbZULpkejLPoeJVF3Zr52X +nGnnCv8PWniLYypMfUeUP95L6VPQMPHF9p5J3zugkaOj/s1YzOrfr28oO6Bpm4/srK4rVJ2bBLFH +IK+WEj5jlB0E5y67hscMmoi/dkfv97ALl2bSRM9gUgfh1SxKOidhd8rXj+eHDjD/DLsE4mHDosiX +YY60MGo8bcIHX0pzLz/5FooBZu+6kcpSV3uu1OYP3Qt6f4ueJiDPO++BcYNZ +-----END CERTIFICATE----- + +E-Tugra Global Root CA ECC v3 +============================= +-----BEGIN CERTIFICATE----- +MIICpTCCAiqgAwIBAgIUJkYZdzHhT28oNt45UYbm1JeIIsEwCgYIKoZIzj0EAwMwgYAxCzAJBgNV +BAYTAlRSMQ8wDQYDVQQHEwZBbmthcmExGTAXBgNVBAoTEEUtVHVncmEgRUJHIEEuUy4xHTAbBgNV +BAsTFEUtVHVncmEgVHJ1c3QgQ2VudGVyMSYwJAYDVQQDEx1FLVR1Z3JhIEdsb2JhbCBSb290IENB +IEVDQyB2MzAeFw0yMDAzMTgwOTQ2NThaFw00NTAzMTIwOTQ2NThaMIGAMQswCQYDVQQGEwJUUjEP +MA0GA1UEBxMGQW5rYXJhMRkwFwYDVQQKExBFLVR1Z3JhIEVCRyBBLlMuMR0wGwYDVQQLExRFLVR1 +Z3JhIFRydXN0IENlbnRlcjEmMCQGA1UEAxMdRS1UdWdyYSBHbG9iYWwgUm9vdCBDQSBFQ0MgdjMw +djAQBgcqhkjOPQIBBgUrgQQAIgNiAASOmCm/xxAeJ9urA8woLNheSBkQKczLWYHMjLiSF4mDKpL2 +w6QdTGLVn9agRtwcvHbB40fQWxPa56WzZkjnIZpKT4YKfWzqTTKACrJ6CZtpS5iB4i7sAnCWH/31 +Rs7K3IKjYzBhMA8GA1UdEwEB/wQFMAMBAf8wHwYDVR0jBBgwFoAU/4Ixcj75xGZsrTie0bBRiKWQ +zPUwHQYDVR0OBBYEFP+CMXI++cRmbK04ntGwUYilkMz1MA4GA1UdDwEB/wQEAwIBBjAKBggqhkjO +PQQDAwNpADBmAjEA5gVYaWHlLcoNy/EZCL3W/VGSGn5jVASQkZo1kTmZ+gepZpO6yGjUij/67W4W +Aie3AjEA3VoXK3YdZUKWpqxdinlW2Iob35reX8dQj7FbcQwm32pAAOwzkSFxvmjkI6TZraE3 +-----END CERTIFICATE----- diff --git a/deps/connection/.fetch b/deps/connection/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/connection/.hex b/deps/connection/.hex new file mode 100644 index 0000000000000000000000000000000000000000..157b77a0f219a9115ab0626c06ad760dc7391ca0 GIT binary patch literal 274 zcmZ9{O;W=!3>,<<"connection">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"Connection behaviour for connection processes">>}. +{<<"elixir">>,<<"~> 1.7">>}. +{<<"files">>, + [<<"lib">>,<<"lib/connection.ex">>,<<"mix.exs">>,<<"README.md">>, + <<"LICENSE">>]}. +{<<"licenses">>,[<<"Apache 2.0">>]}. +{<<"links">>,[{<<"Github">>,<<"https://github.com/elixir-ecto/connection">>}]}. +{<<"name">>,<<"connection">>}. +{<<"requirements">>,[]}. +{<<"version">>,<<"1.1.0">>}. diff --git a/deps/connection/lib/connection.ex b/deps/connection/lib/connection.ex new file mode 100644 index 0000000..fcfc038 --- /dev/null +++ b/deps/connection/lib/connection.ex @@ -0,0 +1,829 @@ +defmodule Connection do + @moduledoc """ + A behaviour module for implementing connection processes. + + The `Connection` behaviour is a superset of the `GenServer` behaviour. The + additional return values and callbacks are designed to aid building a + connection process that can handle a peer being (temporarily) unavailable. + + An example `Connection` process: + + defmodule TCPConnection do + + use Connection + + def start_link(host, port, opts, timeout \\\\ 5000) do + Connection.start_link(__MODULE__, {host, port, opts, timeout}) + end + + def send(conn, data), do: Connection.call(conn, {:send, data}) + + def recv(conn, bytes, timeout \\\\ 3000) do + Connection.call(conn, {:recv, bytes, timeout}) + end + + def close(conn), do: Connection.call(conn, :close) + + def init({host, port, opts, timeout}) do + s = %{host: host, port: port, opts: opts, timeout: timeout, sock: nil} + {:connect, :init, s} + end + + def connect(_, %{sock: nil, host: host, port: port, opts: opts, + timeout: timeout} = s) do + case :gen_tcp.connect(host, port, [active: false] ++ opts, timeout) do + {:ok, sock} -> + {:ok, %{s | sock: sock}} + {:error, _} -> + {:backoff, 1000, s} + end + end + + def disconnect(info, %{sock: sock} = s) do + :ok = :gen_tcp.close(sock) + case info do + {:close, from} -> + Connection.reply(from, :ok) + {:error, :closed} -> + :error_logger.format("Connection closed~n", []) + {:error, reason} -> + reason = :inet.format_error(reason) + :error_logger.format("Connection error: ~s~n", [reason]) + end + {:connect, :reconnect, %{s | sock: nil}} + end + + def handle_call(_, _, %{sock: nil} = s) do + {:reply, {:error, :closed}, s} + end + + def handle_call({:send, data}, _, %{sock: sock} = s) do + case :gen_tcp.send(sock, data) do + :ok -> + {:reply, :ok, s} + {:error, _} = error -> + {:disconnect, error, error, s} + end + end + def handle_call({:recv, bytes, timeout}, _, %{sock: sock} = s) do + case :gen_tcp.recv(sock, bytes, timeout) do + {:ok, _} = ok -> + {:reply, ok, s} + {:error, :timeout} = timeout -> + {:reply, timeout, s} + {:error, _} = error -> + {:disconnect, error, error, s} + end + end + def handle_call(:close, from, s) do + {:disconnect, {:close, from}, s} + end + end + + The example above follows a common pattern. Try to connect immediately, if + that fails backoff and retry after a delay. If a retry fails make another + attempt after another delay. If the process disconnects a reconnection attempt + is made immediately, if that fails backoff begins. + + Importantly when backing off requests will still be received by the process, + which will need to be handled. In the above example the process replies with + `{:error, :closed}` when it is disconnected. + """ + + @behaviour :gen_server + + @doc """ + Called when the connection process is first started. `start_link/3` will block + until it returns. + + Returning `{:ok, state}` will cause `start_link/3` to return + `{:ok, pid}` and the process to enter its loop with state `state` without + calling `connect/2`. + + This return value is useful when the process connects inside `init/1` so that + `start_link/3` blocks until a connection is established. + + Returning `{:ok, state, timeout}` is similar to `{:ok, state}` + except `handle_info(:timeout, state)` will be called after `timeout` if no + message arrives. + + Returning `{:ok, state, :hibernate}` is similar to + `{:ok, state}` except the process is hibernated awaiting a message. + + Returning `{:connect, info, state}` will cause `start_link/3` to return + `{:ok, pid}` and `connect(info, state)` will be called immediately - even if + messages are in the processes message queue. `state` contains the state of the + process and `info` should contain any information not contained in the state + that is needed to connect. + + This return value is very useful because connecting in `connect/2` will not + block the parent process and a connection attempt is guaranteed to occur + before any messages are handled, which is not possible when using a + `GenServer`. + + Returning `{:backoff, timeout, state}` will cause `start_link/3` to return + `{:ok, pid}` and the process to enter its normal loop with state `state`. + `connect(:backoff, state)` is called after `timeout` if `connect/2` or + `disconnect/2` is not called within the timeout. + + This return value can be used to delay or stagger the initial connection + attempt. + + Returning `{:backoff, timeout, state, timeout2}` is similar to + `{:backoff, timeout, state}` except `handle_info(:timeout, state)` will be + called after `timeout2` if no message arrives. + + Returning `{:backoff, timeout, state, :hibernate}` is similar to + `{:backoff, timeout, state}` except the process hibernates. + + Returning `:ignore` will cause `start_link/3` to return `:ignore` and the + process will exit normally without entering the loop or calling + `terminate/2`. + + Returning `{:stop, reason}` will cause `start_link/3` to return + `{:error, reason}` and the process to exit with reason `reason` without + entering the loop or calling `terminate/2`. + """ + @callback init(any) :: + {:ok, any} | {:ok, any, timeout | :hibernate} | + {:connect, any, any} | + {:backoff, timeout, any} | {:backoff, timeout, any, timeout | :hibernate} | + :ignore | {:stop, any} + + + @doc """ + Called when the process should try to connect. The first argument will either + be the `info` term from `{:connect, info, state}` or + `{:connect, info, reply, state}`, or `:backoff` if the connection attempt is + triggered by backing off. + + It might be beneficial to do handshaking in this callback if connecting is + successful. + + Returning `{:ok, state}` or `{:ok, state, timeout | :hibernate}` will cause + the process to continue its loop. This should be returned when the connection + attempt was successful. In the later case `handle_info(:timeout, state)` is + called after `timeout` if no message has been received, if the third element + is a timeout. Otherwise if the third element is `:hibernate` the process + hibernates. + + Returning `{:backoff, timeout, state}` will cause the process to continue + its loop but `connect(:backoff, state)` will be called after `timeout` if + `connect/2` or `disconnect/2` is not called before that point. + + This return value is used when a connection attempt fails but another attempt + should be made after a delay. It might be beneficial to increase the delay + up to a maximum if successive attempts fail to prevent unnecessary work. If + several connection processes are connecting to the same peer it may also be + beneficial to add some jitter (randomness) to the delays. This spreads out the + connection attempts and helps prevent many attempts occurring at the same time. + + Returning `{:backoff, timeout, state, timeout2 | :hibernate}` is similar to + `{:backoff, timeout, state}` except `handle_info(:timeout, state)` is called + after `timeout2` if no message has been received, or if `:hibernate`, the + process hibernates. + + Returning `{:stop, reason, state}` will terminate the loop and call + `terminate(reason, state)` before the process exits with reason `reason`. + """ + @callback connect(any, any) :: + {:ok, any} | {:ok, any, timeout | :hibernate} | + {:backoff, timeout, any} | {:backoff, timeout, any, timeout | :hibernate} | + {:stop, any, any} + + @doc """ + Called when the process should disconnect. The first argument will either + be the `info` term from `{:disconnect, info, state}` or + `{:disconnect, info, reply, state}`. This callback should do any cleaning up + required to disconnect the connection and update the state of the process. + + Returning `{:connect, info, state}` will call `connect(info, state)` + immediately - even if there are messages in the message queue. + + Returning `{:backoff, timeout, state}` or + `{:backoff, timeout, state, timeout2 | :hibernate}` starts a backoff timer and + behaves the same as when returned from `connect/2`. See the `connect/2` + callback for more information. + + Returning `{:noconnect, state}` or `{:noconnect, state, timeout | :hibernate}` + will cause the process to continue is loop (and NOT call `connect/2` to + try to reconnect). In the later case a timeout is started or the process + hibernates. + + Returning `{:stop, reason, state}` will terminate the loop and call + `terminate(reason, state)` before the process exits with reason `reason`. + """ + @callback disconnect(any, any) :: + {:connect, any, any} | + {:backoff, timeout, any} | {:backoff, timeout, any, timeout | :hibernate} | + {:noconnect, any} | {:noconnect, any, timeout | :hibernate} | + {:stop, any, any} + + @doc """ + Called when the process receives a call message sent by `call/3`. This + callback has the same arguments as the `GenServer` equivalent and the + `:reply`, `:noreply` and `:stop` return tuples behave the same. However + there are a few additional return values: + + Returning `{:connect, info, reply, state}` will reply to the call with `reply` + and immediately call `connect(info, state)`. Similarly for + `{:disconnect, info, reply, state}`, except `disconnect/2` is called. + + Returning `{:connect, info, state}` or `{:disconnect, info, state}` will + call the relevant callback immediately without replying to the call. This + might be useful when the call should block until the process has connected, + failed to connect or disconnected. The second argument passed to this callback + can be included in the `info` or `state` terms and a reply sent in the next + or a later callback using `reply/2`. + """ + @callback handle_call(any, {pid, any}, any) :: + {:reply, any, any} | {:reply, any, any, timeout | :hibernate} | + {:noreply, any} | {:noreply, any, timeout | :hibernate} | + {:disconnect | :connect, any, any, any} | + {:disconnect | :connect, any, any} | + {:stop, any, any} | {:stop, any, any, any} + + @doc """ + Called when the process receives a cast message sent by `cast/3`. This + callback has the same arguments as the `GenServer` equivalent and the + `:noreply` and `:stop` return tuples behave the same. However + there are two additional return values: + + Returning `{:connect, info, state}` will immediately call + `connect(info, state)`. Similarly for `{:disconnect, info, state}`, + except `disconnect/2` is called. + """ + @callback handle_cast(any, any) :: + {:noreply, any} | {:noreply, any, timeout | :hibernate} | + {:disconnect | :connect, any, any} | + {:stop, any, any} + + @doc """ + Called when the process receives a message that is not a call or cast. This + callback has the same arguments as the `GenServer` equivalent and the `:noreply` + and `:stop` return tuples behave the same. However there are two additional + return values: + + Returning `{:connect, info, state}` will immediately call + `connect(info, state)`. Similarly for `{:disconnect, info, state}`, + except `disconnect/2` is called. + """ + @callback handle_info(any, any) :: + {:noreply, any} | {:noreply, any, timeout | :hibernate} | + {:disconnect | :connect, any, any} | + {:stop, any, any} + + @doc """ + This callback is the same as the `GenServer` equivalent and is used to change + the state when loading a different version of the callback module. + """ + @callback code_change(any, any, any) :: {:ok, any} + + @doc """ + This callback is the same as the `GenServer` equivalent and is called when the + process terminates. The first argument is the reason the process is about + to exit with. + """ + @callback terminate(any, any) :: any + + defmacro __using__(_) do + quote location: :keep do + @behaviour Connection + + # The default implementations of init/1, handle_call/3, handle_info/2, + # handle_cast/2, terminate/2 and code_change/3 have been taken verbatim + # from Elixir's GenServer default implementation. + + @doc false + def init(args) do + {:ok, args} + end + + @doc false + def handle_call(msg, _from, state) do + # We do this to trick dialyzer to not complain about non-local returns. + reason = {:bad_call, msg} + case :erlang.phash2(1, 1) do + 0 -> exit(reason) + 1 -> {:stop, reason, state} + end + end + + @doc false + def handle_info(_msg, state) do + {:noreply, state} + end + + @doc false + def handle_cast(msg, state) do + # We do this to trick dialyzer to not complain about non-local returns. + reason = {:bad_cast, msg} + case :erlang.phash2(1, 1) do + 0 -> exit(reason) + 1 -> {:stop, reason, state} + end + end + + @doc false + def terminate(_reason, _state) do + :ok + end + + @doc false + def code_change(_old, state, _extra) do + {:ok, state} + end + + @doc false + def connect(info, state) do + reason = {:bad_connect, info} + case :erlang.phash2(1, 1) do + 0 -> exit(reason) + 1 -> {:stop, reason, state} + end + end + + @doc false + def disconnect(info, state) do + reason = {:bad_disconnect, info} + case :erlang.phash2(1, 1) do + 0 -> exit(reason) + 1 -> {:stop, reason, state} + end + end + + defoverridable [init: 1, handle_call: 3, handle_info: 2, + handle_cast: 2, terminate: 2, code_change: 3, + connect: 2, disconnect: 2] + end + end + + @doc """ + Starts a `Connection` process linked to the current process. + + This function is used to start a `Connection` process in a supervision tree. + The process will be started by calling `init/1` in the callback module with + the given argument. + + This function will return after `init/1` has returned in the spawned process. + The return values are controlled by the `init/1` callback. + + See `GenServer.start_link/3` for more information. + """ + @spec start_link(module, any, GenServer.options) :: GenServer.on_start + def start_link(mod, args, opts \\ []) do + start(mod, args, opts, :link) + end + + @doc """ + Starts a `Connection` process without links (outside of a supervision tree). + + See `start_link/3` for more information. + """ + @spec start(module, any, GenServer.options) :: GenServer.on_start + def start(mod, args, opts \\ []) do + start(mod, args, opts, :nolink) + end + + @doc """ + Sends a synchronous call to the `Connection` process and waits for a reply. + + See `GenServer.call/2` for more information. + """ + defdelegate call(conn, req), to: :gen_server + + @doc """ + Sends a synchronous request to the `Connection` process and waits for a reply. + + See `GenServer.call/3` for more information. + """ + defdelegate call(conn, req, timeout), to: :gen_server + + @doc """ + Sends a asynchronous request to the `Connection` process. + + See `GenServer.cast/2` for more information. + """ + defdelegate cast(conn, req), to: GenServer + + @doc """ + Sends a reply to a request sent by `call/3`. + + See `GenServer.reply/2` for more information. + """ + defdelegate reply(from, response), to: :gen_server + + defstruct [:mod, :backoff, :raise, :mod_state] + + ## :gen callback + + @doc false + def init_it(starter, _, name, mod, args, opts) do + Process.put(:"$initial_call", {mod, :init, 1}) + try do + apply(mod, :init, [args]) + catch + :exit, reason -> + init_stop(starter, name, reason) + :error, reason -> + init_stop(starter, name, {reason, __STACKTRACE__}) + :throw, value -> + reason = {{:nocatch, value}, __STACKTRACE__} + init_stop(starter, name, reason) + else + {:ok, mod_state} -> + :proc_lib.init_ack(starter, {:ok, self()}) + enter_loop(mod, nil, mod_state, name, opts, :infinity) + {:ok, mod_state, timeout} -> + :proc_lib.init_ack(starter, {:ok, self()}) + enter_loop(mod, nil, mod_state, name, opts, timeout) + {:connect, info, mod_state} -> + :proc_lib.init_ack(starter, {:ok, self()}) + enter_connect(mod, info, mod_state, name, opts) + {:backoff, backoff_timeout, mod_state} -> + backoff = start_backoff(backoff_timeout) + :proc_lib.init_ack(starter, {:ok, self()}) + enter_loop(mod, backoff, mod_state, name, opts, :infinity) + {:backoff, backoff_timeout, mod_state, timeout} -> + backoff = start_backoff(backoff_timeout) + :proc_lib.init_ack(starter, {:ok, self()}) + enter_loop(mod, backoff, mod_state, name, opts, timeout) + :ignore -> + _ = unregister(name) + :proc_lib.init_ack(starter, :ignore) + exit(:normal) + {:stop, reason} -> + init_stop(starter, name, reason) + other -> + init_stop(starter, name, {:bad_return_value, other}) + end + end + ## :proc_lib callback + + @doc false + def enter_loop(mod, backoff, mod_state, name, opts, :hibernate) do + args = [mod, backoff, mod_state, name, opts, :infinity] + :proc_lib.hibernate(__MODULE__, :enter_loop, args) + end + def enter_loop(mod, backoff, mod_state, name, opts, timeout) + when name === self() do + s = %Connection{mod: mod, backoff: backoff, mod_state: mod_state, + raise: nil} + :gen_server.enter_loop(__MODULE__, opts, s, timeout) + end + def enter_loop(mod, backoff, mod_state, name, opts, timeout) do + s = %Connection{mod: mod, backoff: backoff, mod_state: mod_state, + raise: nil} + :gen_server.enter_loop(__MODULE__, opts, s, name, timeout) + end + + @doc false + def init(_) do + {:stop, __MODULE__} + end + + @doc false + def handle_call(request, from, %{mod: mod, mod_state: mod_state} = s) do + try do + apply(mod, :handle_call, [request, from, mod_state]) + catch + :throw, value -> + :erlang.raise(:error, {:nocatch, value}, __STACKTRACE__) + else + {:noreply, mod_state} = noreply -> + put_elem(noreply, 1, %{s | mod_state: mod_state}) + {:noreply, mod_state, _} = noreply -> + put_elem(noreply, 1, %{s | mod_state: mod_state}) + {:reply, _, mod_state} = reply -> + put_elem(reply, 2, %{s | mod_state: mod_state}) + {:reply, _, mod_state, _} = reply -> + put_elem(reply, 2, %{s | mod_state: mod_state}) + {:connect, info, mod_state} -> + connect(info, mod_state, s) + {:connect, info, reply, mod_state} -> + reply(from, reply) + connect(info, mod_state, s) + {:disconnect, info, mod_state} -> + disconnect(info, mod_state, s) + {:disconnect, info, reply, mod_state} -> + reply(from, reply) + disconnect(info, mod_state, s) + {:stop, _, mod_state} = stop -> + put_elem(stop, 2, %{s | mod_state: mod_state}) + {:stop, _, _, mod_state} = stop -> + put_elem(stop, 3, %{s | mod_state: mod_state}) + other -> + {:stop, {:bad_return_value, other}, %{s | mod_state: mod_state}} + end + end + + @doc false + def handle_cast(request, s) do + handle_async(:handle_cast, request, s) + end + + @doc false + def handle_info({:timeout, backoff, __MODULE__}, + %{backoff: backoff, mod_state: mod_state} = s) do + connect(:backoff, mod_state, %{s | backoff: nil}) + end + def handle_info(msg, s) do + handle_async(:handle_info, msg, s) + end + + @doc false + def code_change(old_vsn, %{mod: mod, mod_state: mod_state} = s, extra) do + try do + apply(mod, :code_change, [old_vsn, mod_state, extra]) + catch + :throw, value -> + exit({{:nocatch, value}, __STACKTRACE__}) + else + {:ok, mod_state} -> + {:ok, %{s | mod_state: mod_state}} + end + end + + @doc false + def format_status(:normal, [pdict, %{mod: mod, mod_state: mod_state}]) do + try do + apply(mod, :format_status, [:normal, [pdict, mod_state]]) + catch + _, _ -> + [{:data, [{'State', mod_state}]}] + else + mod_status -> + mod_status + end + end + def format_status(:terminate, [pdict, %{mod: mod, mod_state: mod_state}]) do + try do + apply(mod, :format_status, [:terminate, [pdict, mod_state]]) + catch + _, _ -> + mod_state + else + mod_state -> + mod_state + end + end + + @doc false + def terminate(reason, %{mod: mod, mod_state: mod_state, raise: nil}) do + apply(mod, :terminate, [reason, mod_state]) + end + def terminate(stop, %{raise: {class, reason, stack}} = s) do + %{mod: mod, mod_state: mod_state} = s + try do + apply(mod, :terminate, [stop, mod_state]) + catch + :throw, value -> + :erlang.raise(:error, {:nocatch, value}, __STACKTRACE__) + else + _ when stop in [:normal, :shutdown] -> + :ok + _ when tuple_size(stop) == 2 and elem(stop, 0) == :shutdown -> + :ok + _ -> + :erlang.raise(class, reason, stack) + end + end + + # start helpers + + defp start(mod, args, options, link) do + case Keyword.pop(options, :name) do + {nil, opts} -> + :gen.start(__MODULE__, link, mod, args, opts) + {atom, opts} when is_atom(atom) -> + :gen.start(__MODULE__, link, {:local, atom}, mod, args, opts) + {{:global, _} = name, opts} -> + :gen.start(__MODULE__, link, name, mod, args, opts) + {{:via, _, _} = name, opts} -> + :gen.start(__MODULE__, link, name, mod, args, opts) + end + end + + # init helpers + + defp init_stop(starter, name, reason) do + _ = unregister(name) + :proc_lib.init_ack(starter, {:error, reason}) + exit(reason) + end + + defp unregister(name) when name === self(), do: :ok + defp unregister({:local, name}), do: Process.unregister(name) + defp unregister({:global, name}), do: :global.unregister_name(name) + defp unregister({:via, mod, name}), do: apply(mod, :unregister_name, [name]) + + defp enter_connect(mod, info, mod_state, name, opts) do + try do + apply(mod, :connect, [info, mod_state]) + catch + :exit, reason -> + report_reason = {:EXIT, {reason, __STACKTRACE__}} + enter_terminate(mod, mod_state, name, reason, report_reason) + :error, reason -> + reason = {reason, __STACKTRACE__} + enter_terminate(mod, mod_state, name, reason, {:EXIT, reason}) + :throw, value -> + reason = {{:nocatch, value}, __STACKTRACE__} + enter_terminate(mod, mod_state, name, reason, {:EXIT, reason}) + else + {:ok, mod_state} -> + enter_loop(mod, nil, mod_state, name, opts, :infinity) + {:ok, mod_state, timeout} -> + enter_loop(mod, nil, mod_state, name, opts, timeout) + {:backoff, backoff_timeout, mod_state} -> + backoff = start_backoff(backoff_timeout) + enter_loop(mod, backoff, mod_state, name, opts, :infinity) + {:backoff, backoff_timeout, mod_state, timeout} -> + backoff = start_backoff(backoff_timeout) + enter_loop(mod, backoff, mod_state, name, opts, timeout) + {:stop, reason, mod_state} -> + enter_terminate(mod, mod_state, name, reason, {:stop, reason}) + other -> + reason = {:bad_return_value, other} + enter_terminate(mod, mod_state, name, reason, {:stop, reason}) + end + end + + defp enter_terminate(mod, mod_state, name, reason, report_reason) do + try do + apply(mod, :terminate, [reason, mod_state]) + catch + :exit, reason -> + report_reason = {:EXIT, {reason, __STACKTRACE__}} + enter_stop(mod, mod_state, name, reason, report_reason) + :error, reason -> + reason = {reason, __STACKTRACE__} + enter_stop(mod, mod_state, name, reason, {:EXIT, reason}) + :throw, value -> + reason = {{:nocatch, value}, __STACKTRACE__} + enter_stop(mod, mod_state, name, reason, {:EXIT, reason}) + else + _ -> + enter_stop(mod, mod_state, name, reason, report_reason) + end + end + + defp enter_stop(_, _, _, :normal, {:stop, :normal}), do: exit(:normal) + defp enter_stop(_, _, _, :shutdown, {:stop, :shutdown}), do: exit(:shutdown) + defp enter_stop(_, _, _, {:shutdown, reason} = shutdown, + {:stop, {:shutdown, reason}}) do + exit(shutdown) + end + defp enter_stop(mod, mod_state, name, reason, {_, reason2}) do + s = %{mod: mod, backoff: nil, mod_state: mod_state} + mod_state = format_status(:terminate, [Process.get(), s]) + format = '** Generic server ~p terminating \n' ++ + '** Last message in was ~p~n' ++ ## No last message + '** When Server state == ~p~n' ++ + '** Reason for termination == ~n** ~p~n' + args = [report_name(name), nil, mod_state, report_reason(reason2)] + :error_logger.format(format, args) + exit(reason) + end + + defp report_name(name) when name === self(), do: name + defp report_name({:local, name}), do: name + defp report_name({:global, name}), do: name + defp report_name({:via, _, name}), do: name + + defp report_reason({:undef, [{mod, fun, args, _} | _] = stack} = reason) do + cond do + :code.is_loaded(mod) === false -> + {:"module could not be loaded", stack} + not function_exported?(mod, fun, length(args)) -> + {:"function not exported", stack} + true -> + reason + end + end + defp report_reason(reason) do + reason + end + + ## backoff helpers + + defp start_backoff(:infinity), do: nil + defp start_backoff(timeout) do + :erlang.start_timer(timeout, self(), __MODULE__) + end + + defp cancel_backoff(%{backoff: nil} = s), do: s + defp cancel_backoff(%{backoff: backoff} = s) do + case :erlang.cancel_timer(backoff) do + false -> + flush_backoff(backoff) + _ -> + :ok + end + %{s | backoff: nil} + end + + defp flush_backoff(backoff) do + receive do + {:timeout, ^backoff, __MODULE__} -> + :ok + after + 0 -> + :ok + end + end + + ## GenServer helpers + + defp connect(info, mod_state, %{mod: mod} = s) do + s = cancel_backoff(s) + try do + apply(mod, :connect, [info, mod_state]) + catch + class, reason -> + stack = __STACKTRACE__ + callback_stop(class, reason, stack, %{s | mod_state: mod_state}) + else + {:ok, mod_state} -> + {:noreply, %{s | mod_state: mod_state}} + {:ok, mod_state, timeout} -> + {:noreply, %{s | mod_state: mod_state}, timeout} + {:backoff, backoff_timeout, mod_state} -> + backoff = start_backoff(backoff_timeout) + {:noreply, %{s | backoff: backoff, mod_state: mod_state}} + {:backoff, backoff_timeout, mod_state, timeout} -> + backoff = start_backoff(backoff_timeout) + {:noreply, %{s | backoff: backoff, mod_state: mod_state}, timeout} + {:stop, _, mod_state} = stop -> + put_elem(stop, 2, %{s | mod_state: mod_state}) + other -> + {:stop, {:bad_return_value, other}, %{s | mod_state: mod_state}} + end + end + + defp disconnect(info, mod_state, %{mod: mod} = s) do + s = cancel_backoff(s) + try do + apply(mod, :disconnect, [info, mod_state]) + catch + class, reason -> + stack = __STACKTRACE__ + callback_stop(class, reason, stack, %{s | mod_state: mod_state}) + else + {:connect, info, mod_state} -> + connect(info, mod_state, s) + {:noconnect, mod_state} -> + {:noreply, %{s | mod_state: mod_state}} + {:noconnect, mod_state, timeout} -> + {:noreply, %{s | mod_state: mod_state}, timeout} + {:backoff, backoff_timeout, mod_state} -> + backoff = start_backoff(backoff_timeout) + {:noreply, %{s | backoff: backoff, mod_state: mod_state}} + {:backoff, backoff_timeout, mod_state, timeout} -> + backoff = start_backoff(backoff_timeout) + {:noreply, %{s | backoff: backoff, mod_state: mod_state}, timeout} + {:stop, _, mod_state} = stop -> + put_elem(stop, 2, %{s | mod_state: mod_state}) + other -> + {:stop, {:bad_return_value, other}, %{s | mod_state: mod_state}} + end + end + + # In order to have new mod_state in terminate/2 must return the exit reason. + # However to get the correct GenServer report (exit with stacktrace), + # include stacktrace in reason and re-raise after calling mod.terminate/2 if + # it does not raise. + + defp callback_stop(:throw, value, stack, s) do + callback_stop(:error, {:nocatch, value}, stack, s) + end + defp callback_stop(class, reason, stack, s) do + raise = {class, reason, stack} + {:stop, stop_reason(class, reason, stack), %{s | raise: raise}} + end + + defp stop_reason(:error, reason, stack), do: {reason, stack} + defp stop_reason(:exit, reason, _), do: reason + + defp handle_async(fun, msg, %{mod: mod, mod_state: mod_state} = s) do + try do + apply(mod, fun, [msg, mod_state]) + catch + :throw, value -> + :erlang.raise(:error, {:nocatch, value}, __STACKTRACE__) + else + {:noreply, mod_state} = noreply -> + put_elem(noreply, 1, %{s | mod_state: mod_state}) + {:noreply, mod_state, _} = noreply -> + put_elem(noreply, 1, %{s | mod_state: mod_state}) + {:connect, info, mod_state} -> + connect(info, mod_state, s) + {:disconnect, info, mod_state} -> + disconnect(info, mod_state, s) + {:stop, _, mod_state} = stop -> + put_elem(stop, 2, %{s | mod_state: mod_state}) + other -> + {:stop, {:bad_return_value, other}, %{s | mod_state: mod_state}} + end + end +end diff --git a/deps/connection/mix.exs b/deps/connection/mix.exs new file mode 100644 index 0000000..ffb0367 --- /dev/null +++ b/deps/connection/mix.exs @@ -0,0 +1,52 @@ +defmodule Connection.Mixfile do + use Mix.Project + + @version "1.1.0" + + def project do + [ + app: :connection, + version: @version, + elixir: "~> 1.7", + build_embedded: Mix.env == :prod, + start_permanent: Mix.env == :prod, + description: description(), + package: package(), + docs: docs(), + deps: deps() + ] + end + + def application do + [ + applications: [] + ] + end + + defp deps() do + [ + {:ex_doc, "~> 0.22", only: :dev} + ] + end + + defp docs do + [ + source_url: "https://github.com/elixir-ecto/connection", + source_ref: "v#{@version}", + main: Connection + ] + end + + defp description do + """ + Connection behaviour for connection processes + """ + end + + defp package do + %{ + licenses: ["Apache 2.0"], + links: %{"Github" => "https://github.com/elixir-ecto/connection"} + } + end +end diff --git a/deps/cowboy/.fetch b/deps/cowboy/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/cowboy/.hex b/deps/cowboy/.hex new file mode 100644 index 0000000000000000000000000000000000000000..bc8f9ec836fbbedecef509adbdad2fcca96f054f GIT binary patch literal 280 zcmZ9HOHuswwppDwpr2Yn6B5{2ZP^sG!SQbPb%lA|^fG^#XKcEu&kC4xfFfYDKq zGLx}D;-zB4cU~_a>VZ-u@Wu<6iy( DHbzY8 literal 0 HcmV?d00001 diff --git a/deps/cowboy/LICENSE b/deps/cowboy/LICENSE new file mode 100644 index 0000000..9d28158 --- /dev/null +++ b/deps/cowboy/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2011-2017, Loรฏc Hoguin + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/cowboy/Makefile b/deps/cowboy/Makefile new file mode 100644 index 0000000..2930a49 --- /dev/null +++ b/deps/cowboy/Makefile @@ -0,0 +1,124 @@ +# See LICENSE for licensing information. + +PROJECT = cowboy +PROJECT_DESCRIPTION = Small, fast, modern HTTP server. +PROJECT_VERSION = 2.9.0 +PROJECT_REGISTERED = cowboy_clock + +# Options. + +PLT_APPS = public_key ssl +CT_OPTS += -ct_hooks cowboy_ct_hook [] # -boot start_sasl + +# Dependencies. + +LOCAL_DEPS = crypto + +DEPS = cowlib ranch +dep_cowlib = git https://github.com/ninenines/cowlib 2.11.0 +dep_ranch = git https://github.com/ninenines/ranch 1.8.0 + +DOC_DEPS = asciideck + +TEST_DEPS = $(if $(CI_ERLANG_MK),ci.erlang.mk) ct_helper gun +dep_ct_helper = git https://github.com/extend/ct_helper master +dep_gun = git https://github.com/ninenines/gun master + +# CI configuration. + +dep_ci.erlang.mk = git https://github.com/ninenines/ci.erlang.mk master +DEP_EARLY_PLUGINS = ci.erlang.mk + +AUTO_CI_OTP ?= OTP-LATEST-22+ +AUTO_CI_HIPE ?= OTP-LATEST +# AUTO_CI_ERLLVM ?= OTP-LATEST +AUTO_CI_WINDOWS ?= OTP-LATEST-22+ + +# Hex configuration. + +define HEX_TARBALL_EXTRA_METADATA +#{ + licenses => [<<"ISC">>], + links => #{ + <<"User guide">> => <<"https://ninenines.eu/docs/en/cowboy/2.9/guide/">>, + <<"Function reference">> => <<"https://ninenines.eu/docs/en/cowboy/2.9/manual/">>, + <<"GitHub">> => <<"https://github.com/ninenines/cowboy">>, + <<"Sponsor">> => <<"https://github.com/sponsors/essen">> + } +} +endef + +# Standard targets. + +include erlang.mk + +# Don't run the examples test suite by default. + +ifndef FULL +CT_SUITES := $(filter-out examples ws_autobahn,$(CT_SUITES)) +endif + +# Compile options. + +ERLC_OPTS += +warn_missing_spec +warn_untyped_record # +bin_opt_info +TEST_ERLC_OPTS += +'{parse_transform, eunit_autoexport}' + +# Generate rebar.config on build. + +app:: rebar.config + +# Dialyze the tests. + +DIALYZER_OPTS += --src -r test + +# h2spec setup. + +GOPATH := $(ERLANG_MK_TMP)/gopath +export GOPATH + +H2SPEC := $(GOPATH)/src/github.com/summerwind/h2spec/h2spec +export H2SPEC + +# @todo It would be better to allow these dependencies to be specified +# on a per-target basis instead of for all targets. +test-build:: $(H2SPEC) + +$(H2SPEC): + $(gen_verbose) mkdir -p $(GOPATH)/src/github.com/summerwind + $(verbose) git clone --depth 1 https://github.com/summerwind/h2spec $(dir $(H2SPEC)) || true + $(verbose) $(MAKE) -C $(dir $(H2SPEC)) build MAKEFLAGS= || true + +# Use erl_make_certs from the tested release during CI +# and ensure that ct_helper is always recompiled. + +ci-setup:: clean deps test-deps + $(gen_verbose) cp ~/.kerl/builds/$(CI_OTP_RELEASE)/otp_src_git/lib/ssl/test/erl_make_certs.erl deps/ct_helper/src/ || true + $(gen_verbose) $(MAKE) -C $(DEPS_DIR)/ct_helper clean app + +# Prepare for the release. + +prepare_tag: + $(verbose) $(warning Hex metadata: $(HEX_TARBALL_EXTRA_METADATA)) + $(verbose) echo + $(verbose) echo -n "Most recent tag: " + $(verbose) git tag --sort taggerdate | tail -n1 + $(verbose) git verify-tag `git tag --sort taggerdate | tail -n1` + $(verbose) echo -n "MAKEFILE: " + $(verbose) grep -m1 PROJECT_VERSION Makefile + $(verbose) echo -n "APP: " + $(verbose) grep -m1 vsn ebin/$(PROJECT).app | sed 's/ //g' + $(verbose) echo -n "GUIDE: " + $(verbose) grep -h dep_$(PROJECT)_commit doc/src/guide/*.asciidoc || true + $(verbose) echo + $(verbose) echo "Titles in most recent CHANGELOG:" + $(verbose) for f in `ls -r doc/src/guide/migrating_from_*.asciidoc | head -n1`; do \ + echo $$f:; \ + grep == $$f; \ + done + $(verbose) echo + $(verbose) echo "Dependencies:" + $(verbose) grep ^DEPS Makefile || echo "DEPS =" + $(verbose) grep ^dep_ Makefile || true + $(verbose) echo + $(verbose) echo "rebar.config:" + $(verbose) cat rebar.config || true diff --git a/deps/cowboy/README.asciidoc b/deps/cowboy/README.asciidoc new file mode 100644 index 0000000..1fa6d3f --- /dev/null +++ b/deps/cowboy/README.asciidoc @@ -0,0 +1,38 @@ += Cowboy + +Cowboy is a small, fast and modern HTTP server for Erlang/OTP. + +== Goals + +Cowboy aims to provide a *complete* HTTP stack in a *small* code base. +It is optimized for *low latency* and *low memory usage*, in part +because it uses *binary strings*. + +Cowboy provides *routing* capabilities, selectively dispatching requests +to handlers written in Erlang. + +Because it uses Ranch for managing connections, Cowboy can easily be +*embedded* in any other application. + +Cowboy is *clean* and *well tested* Erlang code. + +== Online documentation + +* https://ninenines.eu/docs/en/cowboy/2.6/guide[User guide] +* https://ninenines.eu/docs/en/cowboy/2.6/manual[Function reference] + +== Offline documentation + +* While still online, run `make docs` +* User guide available in `doc/` in PDF and HTML formats +* Function reference man pages available in `doc/man3/` and `doc/man7/` +* Run `make install-docs` to install man pages on your system +* Full documentation in Asciidoc available in `doc/src/` +* Examples available in `examples/` + +== Getting help + +* Official IRC Channel: #ninenines on irc.freenode.net +* https://github.com/ninenines/cowboy/issues[Issues tracker] +* https://ninenines.eu/services[Commercial Support] +* https://github.com/sponsors/essen[Sponsor me!] diff --git a/deps/cowboy/ebin/cowboy.app b/deps/cowboy/ebin/cowboy.app new file mode 100644 index 0000000..6c67d14 --- /dev/null +++ b/deps/cowboy/ebin/cowboy.app @@ -0,0 +1,9 @@ +{application, 'cowboy', [ + {description, "Small, fast, modern HTTP server."}, + {vsn, "2.9.0"}, + {modules, ['cowboy','cowboy_app','cowboy_bstr','cowboy_children','cowboy_clear','cowboy_clock','cowboy_compress_h','cowboy_constraints','cowboy_handler','cowboy_http','cowboy_http2','cowboy_loop','cowboy_metrics_h','cowboy_middleware','cowboy_req','cowboy_rest','cowboy_router','cowboy_static','cowboy_stream','cowboy_stream_h','cowboy_sub_protocol','cowboy_sup','cowboy_tls','cowboy_tracer_h','cowboy_websocket']}, + {registered, [cowboy_sup,cowboy_clock]}, + {applications, [kernel,stdlib,crypto,cowlib,ranch]}, + {mod, {cowboy_app, []}}, + {env, []} +]}. \ No newline at end of file diff --git a/deps/cowboy/erlang.mk b/deps/cowboy/erlang.mk new file mode 100644 index 0000000..f152c37 --- /dev/null +++ b/deps/cowboy/erlang.mk @@ -0,0 +1,8156 @@ +# Copyright (c) 2013-2016, Loรฏc Hoguin +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk + +ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST))) +export ERLANG_MK_FILENAME + +ERLANG_MK_VERSION = d80984c +ERLANG_MK_WITHOUT = + +# Make 3.81 and 3.82 are deprecated. + +ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81) +$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html) +endif + +ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82) +$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html) +endif + +# Core configuration. + +PROJECT ?= $(notdir $(CURDIR)) +PROJECT := $(strip $(PROJECT)) + +PROJECT_VERSION ?= rolling +PROJECT_MOD ?= $(PROJECT)_app +PROJECT_ENV ?= [] + +# Verbosity. + +V ?= 0 + +verbose_0 = @ +verbose_2 = set -x; +verbose = $(verbose_$(V)) + +ifeq ($(V),3) +SHELL := $(SHELL) -x +endif + +gen_verbose_0 = @echo " GEN " $@; +gen_verbose_2 = set -x; +gen_verbose = $(gen_verbose_$(V)) + +gen_verbose_esc_0 = @echo " GEN " $$@; +gen_verbose_esc_2 = set -x; +gen_verbose_esc = $(gen_verbose_esc_$(V)) + +# Temporary files directory. + +ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk +export ERLANG_MK_TMP + +# "erl" command. + +ERL = erl +A1 -noinput -boot no_dot_erlang + +# Platform detection. + +ifeq ($(PLATFORM),) +UNAME_S := $(shell uname -s) + +ifeq ($(UNAME_S),Linux) +PLATFORM = linux +else ifeq ($(UNAME_S),Darwin) +PLATFORM = darwin +else ifeq ($(UNAME_S),SunOS) +PLATFORM = solaris +else ifeq ($(UNAME_S),GNU) +PLATFORM = gnu +else ifeq ($(UNAME_S),FreeBSD) +PLATFORM = freebsd +else ifeq ($(UNAME_S),NetBSD) +PLATFORM = netbsd +else ifeq ($(UNAME_S),OpenBSD) +PLATFORM = openbsd +else ifeq ($(UNAME_S),DragonFly) +PLATFORM = dragonfly +else ifeq ($(shell uname -o),Msys) +PLATFORM = msys2 +else +$(error Unable to detect platform. Please open a ticket with the output of uname -a.) +endif + +export PLATFORM +endif + +# Core targets. + +all:: deps app rel + +# Noop to avoid a Make warning when there's nothing to do. +rel:: + $(verbose) : + +relup:: deps app + +check:: tests + +clean:: clean-crashdump + +clean-crashdump: +ifneq ($(wildcard erl_crash.dump),) + $(gen_verbose) rm -f erl_crash.dump +endif + +distclean:: clean distclean-tmp + +$(ERLANG_MK_TMP): + $(verbose) mkdir -p $(ERLANG_MK_TMP) + +distclean-tmp: + $(gen_verbose) rm -rf $(ERLANG_MK_TMP) + +help:: + $(verbose) printf "%s\n" \ + "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \ + "Copyright (c) 2013-2016 Loรฏc Hoguin " \ + "" \ + "Usage: [V=1] $(MAKE) [target]..." \ + "" \ + "Core targets:" \ + " all Run deps, app and rel targets in that order" \ + " app Compile the project" \ + " deps Fetch dependencies (if needed) and compile them" \ + " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \ + " list-deps List dependencies recursively on stdout" \ + " search q=... Search for a package in the built-in index" \ + " rel Build a release for this project, if applicable" \ + " docs Build the documentation for this project" \ + " install-docs Install the man pages for this project" \ + " check Compile and run all tests and analysis for this project" \ + " tests Run the tests for this project" \ + " clean Delete temporary and output files from most targets" \ + " distclean Delete all temporary and output files" \ + " help Display this help and exit" \ + " erlang-mk Update erlang.mk to the latest version" + +# Core functions. + +empty := +space := $(empty) $(empty) +tab := $(empty) $(empty) +comma := , + +define newline + + +endef + +define comma_list +$(subst $(space),$(comma),$(strip $(1))) +endef + +define escape_dquotes +$(subst ",\",$1) +endef + +# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy. +define erlang +$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk +endef + +ifeq ($(PLATFORM),msys2) +core_native_path = $(shell cygpath -m $1) +else +core_native_path = $1 +endif + +core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2 + +core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1))) + +# We skip files that contain spaces because they end up causing issues. +core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " ")) + +core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1))))))))))))))))))))))))))) + +core_ls = $(filter-out $(1),$(shell echo $(1))) + +# @todo Use a solution that does not require using perl. +core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2) + +define core_render + printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2) +endef + +# Automated update. + +ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk +ERLANG_MK_COMMIT ?= +ERLANG_MK_BUILD_CONFIG ?= build.config +ERLANG_MK_BUILD_DIR ?= .erlang.mk.build + +erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT) +erlang-mk: +ifdef ERLANG_MK_COMMIT + $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR) + $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT) +else + $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR) +endif + $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi + $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1 + $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk + $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR) + $(verbose) rm -rf $(ERLANG_MK_TMP) + +# The erlang.mk package index is bundled in the default erlang.mk build. +# Search for the string "copyright" to skip to the rest of the code. + +# Copyright (c) 2015-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-kerl + +KERL_INSTALL_DIR ?= $(HOME)/erlang + +ifeq ($(strip $(KERL)),) +KERL := $(ERLANG_MK_TMP)/kerl/kerl +endif + +KERL_DIR = $(ERLANG_MK_TMP)/kerl + +export KERL + +KERL_GIT ?= https://github.com/kerl/kerl +KERL_COMMIT ?= master + +KERL_MAKEFLAGS ?= + +OTP_GIT ?= https://github.com/erlang/otp + +define kerl_otp_target +$(KERL_INSTALL_DIR)/$(1): $(KERL) + $(verbose) if [ ! -d $$@ ]; then \ + MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \ + $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \ + fi +endef + +define kerl_hipe_target +$(KERL_INSTALL_DIR)/$1-native: $(KERL) + $(verbose) if [ ! -d $$@ ]; then \ + KERL_CONFIGURE_OPTIONS=--enable-native-libs \ + MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \ + $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \ + fi +endef + +$(KERL): $(KERL_DIR) + +$(KERL_DIR): | $(ERLANG_MK_TMP) + $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl + $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT) + $(verbose) chmod +x $(KERL) + +distclean:: distclean-kerl + +distclean-kerl: + $(gen_verbose) rm -rf $(KERL_DIR) + +# Allow users to select which version of Erlang/OTP to use for a project. + +ifneq ($(strip $(LATEST_ERLANG_OTP)),) +# In some environments it is necessary to filter out master. +ERLANG_OTP := $(notdir $(lastword $(sort\ + $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\ + $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native])))))) +endif + +ERLANG_OTP ?= +ERLANG_HIPE ?= + +# Use kerl to enforce a specific Erlang/OTP version for a project. +ifneq ($(strip $(ERLANG_OTP)),) +export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH) +SHELL := env PATH=$(PATH) $(SHELL) +$(eval $(call kerl_otp_target,$(ERLANG_OTP))) + +# Build Erlang/OTP only if it doesn't already exist. +ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),) +$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...) +$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2) +endif + +else +# Same for a HiPE enabled VM. +ifneq ($(strip $(ERLANG_HIPE)),) +export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH) +SHELL := env PATH=$(PATH) $(SHELL) +$(eval $(call kerl_hipe_target,$(ERLANG_HIPE))) + +# Build Erlang/OTP only if it doesn't already exist. +ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),) +$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...) +$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2) +endif + +endif +endif + +PACKAGES += aberth +pkg_aberth_name = aberth +pkg_aberth_description = Generic BERT-RPC server in Erlang +pkg_aberth_homepage = https://github.com/a13x/aberth +pkg_aberth_fetch = git +pkg_aberth_repo = https://github.com/a13x/aberth +pkg_aberth_commit = master + +PACKAGES += active +pkg_active_name = active +pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running +pkg_active_homepage = https://github.com/proger/active +pkg_active_fetch = git +pkg_active_repo = https://github.com/proger/active +pkg_active_commit = master + +PACKAGES += actordb_core +pkg_actordb_core_name = actordb_core +pkg_actordb_core_description = ActorDB main source +pkg_actordb_core_homepage = http://www.actordb.com/ +pkg_actordb_core_fetch = git +pkg_actordb_core_repo = https://github.com/biokoda/actordb_core +pkg_actordb_core_commit = master + +PACKAGES += actordb_thrift +pkg_actordb_thrift_name = actordb_thrift +pkg_actordb_thrift_description = Thrift API for ActorDB +pkg_actordb_thrift_homepage = http://www.actordb.com/ +pkg_actordb_thrift_fetch = git +pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift +pkg_actordb_thrift_commit = master + +PACKAGES += aleppo +pkg_aleppo_name = aleppo +pkg_aleppo_description = Alternative Erlang Pre-Processor +pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo +pkg_aleppo_fetch = git +pkg_aleppo_repo = https://github.com/ErlyORM/aleppo +pkg_aleppo_commit = master + +PACKAGES += alog +pkg_alog_name = alog +pkg_alog_description = Simply the best logging framework for Erlang +pkg_alog_homepage = https://github.com/siberian-fast-food/alogger +pkg_alog_fetch = git +pkg_alog_repo = https://github.com/siberian-fast-food/alogger +pkg_alog_commit = master + +PACKAGES += amqp_client +pkg_amqp_client_name = amqp_client +pkg_amqp_client_description = RabbitMQ Erlang AMQP client +pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html +pkg_amqp_client_fetch = git +pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git +pkg_amqp_client_commit = master + +PACKAGES += annotations +pkg_annotations_name = annotations +pkg_annotations_description = Simple code instrumentation utilities +pkg_annotations_homepage = https://github.com/hyperthunk/annotations +pkg_annotations_fetch = git +pkg_annotations_repo = https://github.com/hyperthunk/annotations +pkg_annotations_commit = master + +PACKAGES += antidote +pkg_antidote_name = antidote +pkg_antidote_description = Large-scale computation without synchronisation +pkg_antidote_homepage = https://syncfree.lip6.fr/ +pkg_antidote_fetch = git +pkg_antidote_repo = https://github.com/SyncFree/antidote +pkg_antidote_commit = master + +PACKAGES += apns +pkg_apns_name = apns +pkg_apns_description = Apple Push Notification Server for Erlang +pkg_apns_homepage = http://inaka.github.com/apns4erl +pkg_apns_fetch = git +pkg_apns_repo = https://github.com/inaka/apns4erl +pkg_apns_commit = master + +PACKAGES += asciideck +pkg_asciideck_name = asciideck +pkg_asciideck_description = Asciidoc for Erlang. +pkg_asciideck_homepage = https://ninenines.eu +pkg_asciideck_fetch = git +pkg_asciideck_repo = https://github.com/ninenines/asciideck +pkg_asciideck_commit = master + +PACKAGES += azdht +pkg_azdht_name = azdht +pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang +pkg_azdht_homepage = https://github.com/arcusfelis/azdht +pkg_azdht_fetch = git +pkg_azdht_repo = https://github.com/arcusfelis/azdht +pkg_azdht_commit = master + +PACKAGES += backoff +pkg_backoff_name = backoff +pkg_backoff_description = Simple exponential backoffs in Erlang +pkg_backoff_homepage = https://github.com/ferd/backoff +pkg_backoff_fetch = git +pkg_backoff_repo = https://github.com/ferd/backoff +pkg_backoff_commit = master + +PACKAGES += barrel_tcp +pkg_barrel_tcp_name = barrel_tcp +pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang. +pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp +pkg_barrel_tcp_fetch = git +pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp +pkg_barrel_tcp_commit = master + +PACKAGES += basho_bench +pkg_basho_bench_name = basho_bench +pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for. +pkg_basho_bench_homepage = https://github.com/basho/basho_bench +pkg_basho_bench_fetch = git +pkg_basho_bench_repo = https://github.com/basho/basho_bench +pkg_basho_bench_commit = master + +PACKAGES += bcrypt +pkg_bcrypt_name = bcrypt +pkg_bcrypt_description = Bcrypt Erlang / C library +pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt +pkg_bcrypt_fetch = git +pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git +pkg_bcrypt_commit = master + +PACKAGES += beam +pkg_beam_name = beam +pkg_beam_description = BEAM emulator written in Erlang +pkg_beam_homepage = https://github.com/tonyrog/beam +pkg_beam_fetch = git +pkg_beam_repo = https://github.com/tonyrog/beam +pkg_beam_commit = master + +PACKAGES += beanstalk +pkg_beanstalk_name = beanstalk +pkg_beanstalk_description = An Erlang client for beanstalkd +pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk +pkg_beanstalk_fetch = git +pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk +pkg_beanstalk_commit = master + +PACKAGES += bear +pkg_bear_name = bear +pkg_bear_description = a set of statistics functions for erlang +pkg_bear_homepage = https://github.com/boundary/bear +pkg_bear_fetch = git +pkg_bear_repo = https://github.com/boundary/bear +pkg_bear_commit = master + +PACKAGES += bertconf +pkg_bertconf_name = bertconf +pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded +pkg_bertconf_homepage = https://github.com/ferd/bertconf +pkg_bertconf_fetch = git +pkg_bertconf_repo = https://github.com/ferd/bertconf +pkg_bertconf_commit = master + +PACKAGES += bifrost +pkg_bifrost_name = bifrost +pkg_bifrost_description = Erlang FTP Server Framework +pkg_bifrost_homepage = https://github.com/thorstadt/bifrost +pkg_bifrost_fetch = git +pkg_bifrost_repo = https://github.com/thorstadt/bifrost +pkg_bifrost_commit = master + +PACKAGES += binpp +pkg_binpp_name = binpp +pkg_binpp_description = Erlang Binary Pretty Printer +pkg_binpp_homepage = https://github.com/jtendo/binpp +pkg_binpp_fetch = git +pkg_binpp_repo = https://github.com/jtendo/binpp +pkg_binpp_commit = master + +PACKAGES += bisect +pkg_bisect_name = bisect +pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang +pkg_bisect_homepage = https://github.com/knutin/bisect +pkg_bisect_fetch = git +pkg_bisect_repo = https://github.com/knutin/bisect +pkg_bisect_commit = master + +PACKAGES += bitcask +pkg_bitcask_name = bitcask +pkg_bitcask_description = because you need another a key/value storage engine +pkg_bitcask_homepage = https://github.com/basho/bitcask +pkg_bitcask_fetch = git +pkg_bitcask_repo = https://github.com/basho/bitcask +pkg_bitcask_commit = develop + +PACKAGES += bitstore +pkg_bitstore_name = bitstore +pkg_bitstore_description = A document based ontology development environment +pkg_bitstore_homepage = https://github.com/bdionne/bitstore +pkg_bitstore_fetch = git +pkg_bitstore_repo = https://github.com/bdionne/bitstore +pkg_bitstore_commit = master + +PACKAGES += bootstrap +pkg_bootstrap_name = bootstrap +pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application. +pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap +pkg_bootstrap_fetch = git +pkg_bootstrap_repo = https://github.com/schlagert/bootstrap +pkg_bootstrap_commit = master + +PACKAGES += boss +pkg_boss_name = boss +pkg_boss_description = Erlang web MVC, now featuring Comet +pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss +pkg_boss_fetch = git +pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss +pkg_boss_commit = master + +PACKAGES += boss_db +pkg_boss_db_name = boss_db +pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang +pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db +pkg_boss_db_fetch = git +pkg_boss_db_repo = https://github.com/ErlyORM/boss_db +pkg_boss_db_commit = master + +PACKAGES += brod +pkg_brod_name = brod +pkg_brod_description = Kafka client in Erlang +pkg_brod_homepage = https://github.com/klarna/brod +pkg_brod_fetch = git +pkg_brod_repo = https://github.com/klarna/brod.git +pkg_brod_commit = master + +PACKAGES += bson +pkg_bson_name = bson +pkg_bson_description = BSON documents in Erlang, see bsonspec.org +pkg_bson_homepage = https://github.com/comtihon/bson-erlang +pkg_bson_fetch = git +pkg_bson_repo = https://github.com/comtihon/bson-erlang +pkg_bson_commit = master + +PACKAGES += bullet +pkg_bullet_name = bullet +pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy. +pkg_bullet_homepage = http://ninenines.eu +pkg_bullet_fetch = git +pkg_bullet_repo = https://github.com/ninenines/bullet +pkg_bullet_commit = master + +PACKAGES += cache +pkg_cache_name = cache +pkg_cache_description = Erlang in-memory cache +pkg_cache_homepage = https://github.com/fogfish/cache +pkg_cache_fetch = git +pkg_cache_repo = https://github.com/fogfish/cache +pkg_cache_commit = master + +PACKAGES += cake +pkg_cake_name = cake +pkg_cake_description = Really simple terminal colorization +pkg_cake_homepage = https://github.com/darach/cake-erl +pkg_cake_fetch = git +pkg_cake_repo = https://github.com/darach/cake-erl +pkg_cake_commit = master + +PACKAGES += carotene +pkg_carotene_name = carotene +pkg_carotene_description = Real-time server +pkg_carotene_homepage = https://github.com/carotene/carotene +pkg_carotene_fetch = git +pkg_carotene_repo = https://github.com/carotene/carotene +pkg_carotene_commit = master + +PACKAGES += cberl +pkg_cberl_name = cberl +pkg_cberl_description = NIF based Erlang bindings for Couchbase +pkg_cberl_homepage = https://github.com/chitika/cberl +pkg_cberl_fetch = git +pkg_cberl_repo = https://github.com/chitika/cberl +pkg_cberl_commit = master + +PACKAGES += cecho +pkg_cecho_name = cecho +pkg_cecho_description = An ncurses library for Erlang +pkg_cecho_homepage = https://github.com/mazenharake/cecho +pkg_cecho_fetch = git +pkg_cecho_repo = https://github.com/mazenharake/cecho +pkg_cecho_commit = master + +PACKAGES += cferl +pkg_cferl_name = cferl +pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client +pkg_cferl_homepage = https://github.com/ddossot/cferl +pkg_cferl_fetch = git +pkg_cferl_repo = https://github.com/ddossot/cferl +pkg_cferl_commit = master + +PACKAGES += chaos_monkey +pkg_chaos_monkey_name = chaos_monkey +pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes. +pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey +pkg_chaos_monkey_fetch = git +pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey +pkg_chaos_monkey_commit = master + +PACKAGES += check_node +pkg_check_node_name = check_node +pkg_check_node_description = Nagios Scripts for monitoring Riak +pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios +pkg_check_node_fetch = git +pkg_check_node_repo = https://github.com/basho-labs/riak_nagios +pkg_check_node_commit = master + +PACKAGES += chronos +pkg_chronos_name = chronos +pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests. +pkg_chronos_homepage = https://github.com/lehoff/chronos +pkg_chronos_fetch = git +pkg_chronos_repo = https://github.com/lehoff/chronos +pkg_chronos_commit = master + +PACKAGES += chumak +pkg_chumak_name = chumak +pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol. +pkg_chumak_homepage = http://choven.ca +pkg_chumak_fetch = git +pkg_chumak_repo = https://github.com/chovencorp/chumak +pkg_chumak_commit = master + +PACKAGES += cl +pkg_cl_name = cl +pkg_cl_description = OpenCL binding for Erlang +pkg_cl_homepage = https://github.com/tonyrog/cl +pkg_cl_fetch = git +pkg_cl_repo = https://github.com/tonyrog/cl +pkg_cl_commit = master + +PACKAGES += clique +pkg_clique_name = clique +pkg_clique_description = CLI Framework for Erlang +pkg_clique_homepage = https://github.com/basho/clique +pkg_clique_fetch = git +pkg_clique_repo = https://github.com/basho/clique +pkg_clique_commit = develop + +PACKAGES += cloudi_core +pkg_cloudi_core_name = cloudi_core +pkg_cloudi_core_description = CloudI internal service runtime +pkg_cloudi_core_homepage = http://cloudi.org/ +pkg_cloudi_core_fetch = git +pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core +pkg_cloudi_core_commit = master + +PACKAGES += cloudi_service_api_requests +pkg_cloudi_service_api_requests_name = cloudi_service_api_requests +pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support) +pkg_cloudi_service_api_requests_homepage = http://cloudi.org/ +pkg_cloudi_service_api_requests_fetch = git +pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests +pkg_cloudi_service_api_requests_commit = master + +PACKAGES += cloudi_service_db +pkg_cloudi_service_db_name = cloudi_service_db +pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic) +pkg_cloudi_service_db_homepage = http://cloudi.org/ +pkg_cloudi_service_db_fetch = git +pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db +pkg_cloudi_service_db_commit = master + +PACKAGES += cloudi_service_db_cassandra +pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra +pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service +pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/ +pkg_cloudi_service_db_cassandra_fetch = git +pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra +pkg_cloudi_service_db_cassandra_commit = master + +PACKAGES += cloudi_service_db_cassandra_cql +pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql +pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service +pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/ +pkg_cloudi_service_db_cassandra_cql_fetch = git +pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql +pkg_cloudi_service_db_cassandra_cql_commit = master + +PACKAGES += cloudi_service_db_couchdb +pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb +pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service +pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/ +pkg_cloudi_service_db_couchdb_fetch = git +pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb +pkg_cloudi_service_db_couchdb_commit = master + +PACKAGES += cloudi_service_db_elasticsearch +pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch +pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service +pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/ +pkg_cloudi_service_db_elasticsearch_fetch = git +pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch +pkg_cloudi_service_db_elasticsearch_commit = master + +PACKAGES += cloudi_service_db_memcached +pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached +pkg_cloudi_service_db_memcached_description = memcached CloudI Service +pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/ +pkg_cloudi_service_db_memcached_fetch = git +pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached +pkg_cloudi_service_db_memcached_commit = master + +PACKAGES += cloudi_service_db_mysql +pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql +pkg_cloudi_service_db_mysql_description = MySQL CloudI Service +pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/ +pkg_cloudi_service_db_mysql_fetch = git +pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql +pkg_cloudi_service_db_mysql_commit = master + +PACKAGES += cloudi_service_db_pgsql +pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql +pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service +pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/ +pkg_cloudi_service_db_pgsql_fetch = git +pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql +pkg_cloudi_service_db_pgsql_commit = master + +PACKAGES += cloudi_service_db_riak +pkg_cloudi_service_db_riak_name = cloudi_service_db_riak +pkg_cloudi_service_db_riak_description = Riak CloudI Service +pkg_cloudi_service_db_riak_homepage = http://cloudi.org/ +pkg_cloudi_service_db_riak_fetch = git +pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak +pkg_cloudi_service_db_riak_commit = master + +PACKAGES += cloudi_service_db_tokyotyrant +pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant +pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service +pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/ +pkg_cloudi_service_db_tokyotyrant_fetch = git +pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant +pkg_cloudi_service_db_tokyotyrant_commit = master + +PACKAGES += cloudi_service_filesystem +pkg_cloudi_service_filesystem_name = cloudi_service_filesystem +pkg_cloudi_service_filesystem_description = Filesystem CloudI Service +pkg_cloudi_service_filesystem_homepage = http://cloudi.org/ +pkg_cloudi_service_filesystem_fetch = git +pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem +pkg_cloudi_service_filesystem_commit = master + +PACKAGES += cloudi_service_http_client +pkg_cloudi_service_http_client_name = cloudi_service_http_client +pkg_cloudi_service_http_client_description = HTTP client CloudI Service +pkg_cloudi_service_http_client_homepage = http://cloudi.org/ +pkg_cloudi_service_http_client_fetch = git +pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client +pkg_cloudi_service_http_client_commit = master + +PACKAGES += cloudi_service_http_cowboy +pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy +pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service +pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/ +pkg_cloudi_service_http_cowboy_fetch = git +pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy +pkg_cloudi_service_http_cowboy_commit = master + +PACKAGES += cloudi_service_http_elli +pkg_cloudi_service_http_elli_name = cloudi_service_http_elli +pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service +pkg_cloudi_service_http_elli_homepage = http://cloudi.org/ +pkg_cloudi_service_http_elli_fetch = git +pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli +pkg_cloudi_service_http_elli_commit = master + +PACKAGES += cloudi_service_map_reduce +pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce +pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service +pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/ +pkg_cloudi_service_map_reduce_fetch = git +pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce +pkg_cloudi_service_map_reduce_commit = master + +PACKAGES += cloudi_service_oauth1 +pkg_cloudi_service_oauth1_name = cloudi_service_oauth1 +pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service +pkg_cloudi_service_oauth1_homepage = http://cloudi.org/ +pkg_cloudi_service_oauth1_fetch = git +pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1 +pkg_cloudi_service_oauth1_commit = master + +PACKAGES += cloudi_service_queue +pkg_cloudi_service_queue_name = cloudi_service_queue +pkg_cloudi_service_queue_description = Persistent Queue Service +pkg_cloudi_service_queue_homepage = http://cloudi.org/ +pkg_cloudi_service_queue_fetch = git +pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue +pkg_cloudi_service_queue_commit = master + +PACKAGES += cloudi_service_quorum +pkg_cloudi_service_quorum_name = cloudi_service_quorum +pkg_cloudi_service_quorum_description = CloudI Quorum Service +pkg_cloudi_service_quorum_homepage = http://cloudi.org/ +pkg_cloudi_service_quorum_fetch = git +pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum +pkg_cloudi_service_quorum_commit = master + +PACKAGES += cloudi_service_router +pkg_cloudi_service_router_name = cloudi_service_router +pkg_cloudi_service_router_description = CloudI Router Service +pkg_cloudi_service_router_homepage = http://cloudi.org/ +pkg_cloudi_service_router_fetch = git +pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router +pkg_cloudi_service_router_commit = master + +PACKAGES += cloudi_service_tcp +pkg_cloudi_service_tcp_name = cloudi_service_tcp +pkg_cloudi_service_tcp_description = TCP CloudI Service +pkg_cloudi_service_tcp_homepage = http://cloudi.org/ +pkg_cloudi_service_tcp_fetch = git +pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp +pkg_cloudi_service_tcp_commit = master + +PACKAGES += cloudi_service_timers +pkg_cloudi_service_timers_name = cloudi_service_timers +pkg_cloudi_service_timers_description = Timers CloudI Service +pkg_cloudi_service_timers_homepage = http://cloudi.org/ +pkg_cloudi_service_timers_fetch = git +pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers +pkg_cloudi_service_timers_commit = master + +PACKAGES += cloudi_service_udp +pkg_cloudi_service_udp_name = cloudi_service_udp +pkg_cloudi_service_udp_description = UDP CloudI Service +pkg_cloudi_service_udp_homepage = http://cloudi.org/ +pkg_cloudi_service_udp_fetch = git +pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp +pkg_cloudi_service_udp_commit = master + +PACKAGES += cloudi_service_validate +pkg_cloudi_service_validate_name = cloudi_service_validate +pkg_cloudi_service_validate_description = CloudI Validate Service +pkg_cloudi_service_validate_homepage = http://cloudi.org/ +pkg_cloudi_service_validate_fetch = git +pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate +pkg_cloudi_service_validate_commit = master + +PACKAGES += cloudi_service_zeromq +pkg_cloudi_service_zeromq_name = cloudi_service_zeromq +pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service +pkg_cloudi_service_zeromq_homepage = http://cloudi.org/ +pkg_cloudi_service_zeromq_fetch = git +pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq +pkg_cloudi_service_zeromq_commit = master + +PACKAGES += cluster_info +pkg_cluster_info_name = cluster_info +pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app +pkg_cluster_info_homepage = https://github.com/basho/cluster_info +pkg_cluster_info_fetch = git +pkg_cluster_info_repo = https://github.com/basho/cluster_info +pkg_cluster_info_commit = master + +PACKAGES += color +pkg_color_name = color +pkg_color_description = ANSI colors for your Erlang +pkg_color_homepage = https://github.com/julianduque/erlang-color +pkg_color_fetch = git +pkg_color_repo = https://github.com/julianduque/erlang-color +pkg_color_commit = master + +PACKAGES += confetti +pkg_confetti_name = confetti +pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids +pkg_confetti_homepage = https://github.com/jtendo/confetti +pkg_confetti_fetch = git +pkg_confetti_repo = https://github.com/jtendo/confetti +pkg_confetti_commit = master + +PACKAGES += couchbeam +pkg_couchbeam_name = couchbeam +pkg_couchbeam_description = Apache CouchDB client in Erlang +pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam +pkg_couchbeam_fetch = git +pkg_couchbeam_repo = https://github.com/benoitc/couchbeam +pkg_couchbeam_commit = master + +PACKAGES += covertool +pkg_covertool_name = covertool +pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports +pkg_covertool_homepage = https://github.com/idubrov/covertool +pkg_covertool_fetch = git +pkg_covertool_repo = https://github.com/idubrov/covertool +pkg_covertool_commit = master + +PACKAGES += cowboy +pkg_cowboy_name = cowboy +pkg_cowboy_description = Small, fast and modular HTTP server. +pkg_cowboy_homepage = http://ninenines.eu +pkg_cowboy_fetch = git +pkg_cowboy_repo = https://github.com/ninenines/cowboy +pkg_cowboy_commit = 1.0.4 + +PACKAGES += cowdb +pkg_cowdb_name = cowdb +pkg_cowdb_description = Pure Key/Value database library for Erlang Applications +pkg_cowdb_homepage = https://github.com/refuge/cowdb +pkg_cowdb_fetch = git +pkg_cowdb_repo = https://github.com/refuge/cowdb +pkg_cowdb_commit = master + +PACKAGES += cowlib +pkg_cowlib_name = cowlib +pkg_cowlib_description = Support library for manipulating Web protocols. +pkg_cowlib_homepage = http://ninenines.eu +pkg_cowlib_fetch = git +pkg_cowlib_repo = https://github.com/ninenines/cowlib +pkg_cowlib_commit = 1.0.2 + +PACKAGES += cpg +pkg_cpg_name = cpg +pkg_cpg_description = CloudI Process Groups +pkg_cpg_homepage = https://github.com/okeuday/cpg +pkg_cpg_fetch = git +pkg_cpg_repo = https://github.com/okeuday/cpg +pkg_cpg_commit = master + +PACKAGES += cqerl +pkg_cqerl_name = cqerl +pkg_cqerl_description = Native Erlang CQL client for Cassandra +pkg_cqerl_homepage = https://matehat.github.io/cqerl/ +pkg_cqerl_fetch = git +pkg_cqerl_repo = https://github.com/matehat/cqerl +pkg_cqerl_commit = master + +PACKAGES += cr +pkg_cr_name = cr +pkg_cr_description = Chain Replication +pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm +pkg_cr_fetch = git +pkg_cr_repo = https://github.com/spawnproc/cr +pkg_cr_commit = master + +PACKAGES += cuttlefish +pkg_cuttlefish_name = cuttlefish +pkg_cuttlefish_description = cuttlefish configuration abstraction +pkg_cuttlefish_homepage = https://github.com/Kyorai/cuttlefish +pkg_cuttlefish_fetch = git +pkg_cuttlefish_repo = https://github.com/Kyorai/cuttlefish +pkg_cuttlefish_commit = master + +PACKAGES += damocles +pkg_damocles_name = damocles +pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box. +pkg_damocles_homepage = https://github.com/lostcolony/damocles +pkg_damocles_fetch = git +pkg_damocles_repo = https://github.com/lostcolony/damocles +pkg_damocles_commit = master + +PACKAGES += debbie +pkg_debbie_name = debbie +pkg_debbie_description = .DEB Built In Erlang +pkg_debbie_homepage = https://github.com/crownedgrouse/debbie +pkg_debbie_fetch = git +pkg_debbie_repo = https://github.com/crownedgrouse/debbie +pkg_debbie_commit = master + +PACKAGES += decimal +pkg_decimal_name = decimal +pkg_decimal_description = An Erlang decimal arithmetic library +pkg_decimal_homepage = https://github.com/tim/erlang-decimal +pkg_decimal_fetch = git +pkg_decimal_repo = https://github.com/tim/erlang-decimal +pkg_decimal_commit = master + +PACKAGES += detergent +pkg_detergent_name = detergent +pkg_detergent_description = An emulsifying Erlang SOAP library +pkg_detergent_homepage = https://github.com/devinus/detergent +pkg_detergent_fetch = git +pkg_detergent_repo = https://github.com/devinus/detergent +pkg_detergent_commit = master + +PACKAGES += detest +pkg_detest_name = detest +pkg_detest_description = Tool for running tests on a cluster of erlang nodes +pkg_detest_homepage = https://github.com/biokoda/detest +pkg_detest_fetch = git +pkg_detest_repo = https://github.com/biokoda/detest +pkg_detest_commit = master + +PACKAGES += dh_date +pkg_dh_date_name = dh_date +pkg_dh_date_description = Date formatting / parsing library for erlang +pkg_dh_date_homepage = https://github.com/daleharvey/dh_date +pkg_dh_date_fetch = git +pkg_dh_date_repo = https://github.com/daleharvey/dh_date +pkg_dh_date_commit = master + +PACKAGES += dirbusterl +pkg_dirbusterl_name = dirbusterl +pkg_dirbusterl_description = DirBuster successor in Erlang +pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl +pkg_dirbusterl_fetch = git +pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl +pkg_dirbusterl_commit = master + +PACKAGES += dispcount +pkg_dispcount_name = dispcount +pkg_dispcount_description = Erlang task dispatcher based on ETS counters. +pkg_dispcount_homepage = https://github.com/ferd/dispcount +pkg_dispcount_fetch = git +pkg_dispcount_repo = https://github.com/ferd/dispcount +pkg_dispcount_commit = master + +PACKAGES += dlhttpc +pkg_dlhttpc_name = dlhttpc +pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints +pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc +pkg_dlhttpc_fetch = git +pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc +pkg_dlhttpc_commit = master + +PACKAGES += dns +pkg_dns_name = dns +pkg_dns_description = Erlang DNS library +pkg_dns_homepage = https://github.com/aetrion/dns_erlang +pkg_dns_fetch = git +pkg_dns_repo = https://github.com/aetrion/dns_erlang +pkg_dns_commit = master + +PACKAGES += dnssd +pkg_dnssd_name = dnssd +pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation +pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang +pkg_dnssd_fetch = git +pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang +pkg_dnssd_commit = master + +PACKAGES += dynamic_compile +pkg_dynamic_compile_name = dynamic_compile +pkg_dynamic_compile_description = compile and load erlang modules from string input +pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile +pkg_dynamic_compile_fetch = git +pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile +pkg_dynamic_compile_commit = master + +PACKAGES += e2 +pkg_e2_name = e2 +pkg_e2_description = Library to simply writing correct OTP applications. +pkg_e2_homepage = http://e2project.org +pkg_e2_fetch = git +pkg_e2_repo = https://github.com/gar1t/e2 +pkg_e2_commit = master + +PACKAGES += eamf +pkg_eamf_name = eamf +pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang +pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf +pkg_eamf_fetch = git +pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf +pkg_eamf_commit = master + +PACKAGES += eavro +pkg_eavro_name = eavro +pkg_eavro_description = Apache Avro encoder/decoder +pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro +pkg_eavro_fetch = git +pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro +pkg_eavro_commit = master + +PACKAGES += ecapnp +pkg_ecapnp_name = ecapnp +pkg_ecapnp_description = Cap'n Proto library for Erlang +pkg_ecapnp_homepage = https://github.com/kaos/ecapnp +pkg_ecapnp_fetch = git +pkg_ecapnp_repo = https://github.com/kaos/ecapnp +pkg_ecapnp_commit = master + +PACKAGES += econfig +pkg_econfig_name = econfig +pkg_econfig_description = simple Erlang config handler using INI files +pkg_econfig_homepage = https://github.com/benoitc/econfig +pkg_econfig_fetch = git +pkg_econfig_repo = https://github.com/benoitc/econfig +pkg_econfig_commit = master + +PACKAGES += edate +pkg_edate_name = edate +pkg_edate_description = date manipulation library for erlang +pkg_edate_homepage = https://github.com/dweldon/edate +pkg_edate_fetch = git +pkg_edate_repo = https://github.com/dweldon/edate +pkg_edate_commit = master + +PACKAGES += edgar +pkg_edgar_name = edgar +pkg_edgar_description = Erlang Does GNU AR +pkg_edgar_homepage = https://github.com/crownedgrouse/edgar +pkg_edgar_fetch = git +pkg_edgar_repo = https://github.com/crownedgrouse/edgar +pkg_edgar_commit = master + +PACKAGES += edis +pkg_edis_name = edis +pkg_edis_description = An Erlang implementation of Redis KV Store +pkg_edis_homepage = http://inaka.github.com/edis/ +pkg_edis_fetch = git +pkg_edis_repo = https://github.com/inaka/edis +pkg_edis_commit = master + +PACKAGES += edns +pkg_edns_name = edns +pkg_edns_description = Erlang/OTP DNS server +pkg_edns_homepage = https://github.com/hcvst/erlang-dns +pkg_edns_fetch = git +pkg_edns_repo = https://github.com/hcvst/erlang-dns +pkg_edns_commit = master + +PACKAGES += edown +pkg_edown_name = edown +pkg_edown_description = EDoc extension for generating Github-flavored Markdown +pkg_edown_homepage = https://github.com/uwiger/edown +pkg_edown_fetch = git +pkg_edown_repo = https://github.com/uwiger/edown +pkg_edown_commit = master + +PACKAGES += eep +pkg_eep_name = eep +pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy +pkg_eep_homepage = https://github.com/virtan/eep +pkg_eep_fetch = git +pkg_eep_repo = https://github.com/virtan/eep +pkg_eep_commit = master + +PACKAGES += eep_app +pkg_eep_app_name = eep_app +pkg_eep_app_description = Embedded Event Processing +pkg_eep_app_homepage = https://github.com/darach/eep-erl +pkg_eep_app_fetch = git +pkg_eep_app_repo = https://github.com/darach/eep-erl +pkg_eep_app_commit = master + +PACKAGES += efene +pkg_efene_name = efene +pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX +pkg_efene_homepage = https://github.com/efene/efene +pkg_efene_fetch = git +pkg_efene_repo = https://github.com/efene/efene +pkg_efene_commit = master + +PACKAGES += egeoip +pkg_egeoip_name = egeoip +pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database. +pkg_egeoip_homepage = https://github.com/mochi/egeoip +pkg_egeoip_fetch = git +pkg_egeoip_repo = https://github.com/mochi/egeoip +pkg_egeoip_commit = master + +PACKAGES += ehsa +pkg_ehsa_name = ehsa +pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules +pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa +pkg_ehsa_fetch = hg +pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa +pkg_ehsa_commit = default + +PACKAGES += ej +pkg_ej_name = ej +pkg_ej_description = Helper module for working with Erlang terms representing JSON +pkg_ej_homepage = https://github.com/seth/ej +pkg_ej_fetch = git +pkg_ej_repo = https://github.com/seth/ej +pkg_ej_commit = master + +PACKAGES += ejabberd +pkg_ejabberd_name = ejabberd +pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform +pkg_ejabberd_homepage = https://github.com/processone/ejabberd +pkg_ejabberd_fetch = git +pkg_ejabberd_repo = https://github.com/processone/ejabberd +pkg_ejabberd_commit = master + +PACKAGES += ejwt +pkg_ejwt_name = ejwt +pkg_ejwt_description = erlang library for JSON Web Token +pkg_ejwt_homepage = https://github.com/artefactop/ejwt +pkg_ejwt_fetch = git +pkg_ejwt_repo = https://github.com/artefactop/ejwt +pkg_ejwt_commit = master + +PACKAGES += ekaf +pkg_ekaf_name = ekaf +pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang. +pkg_ekaf_homepage = https://github.com/helpshift/ekaf +pkg_ekaf_fetch = git +pkg_ekaf_repo = https://github.com/helpshift/ekaf +pkg_ekaf_commit = master + +PACKAGES += elarm +pkg_elarm_name = elarm +pkg_elarm_description = Alarm Manager for Erlang. +pkg_elarm_homepage = https://github.com/esl/elarm +pkg_elarm_fetch = git +pkg_elarm_repo = https://github.com/esl/elarm +pkg_elarm_commit = master + +PACKAGES += eleveldb +pkg_eleveldb_name = eleveldb +pkg_eleveldb_description = Erlang LevelDB API +pkg_eleveldb_homepage = https://github.com/basho/eleveldb +pkg_eleveldb_fetch = git +pkg_eleveldb_repo = https://github.com/basho/eleveldb +pkg_eleveldb_commit = master + +PACKAGES += elixir +pkg_elixir_name = elixir +pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications +pkg_elixir_homepage = https://elixir-lang.org/ +pkg_elixir_fetch = git +pkg_elixir_repo = https://github.com/elixir-lang/elixir +pkg_elixir_commit = master + +PACKAGES += elli +pkg_elli_name = elli +pkg_elli_description = Simple, robust and performant Erlang web server +pkg_elli_homepage = https://github.com/elli-lib/elli +pkg_elli_fetch = git +pkg_elli_repo = https://github.com/elli-lib/elli +pkg_elli_commit = master + +PACKAGES += elvis +pkg_elvis_name = elvis +pkg_elvis_description = Erlang Style Reviewer +pkg_elvis_homepage = https://github.com/inaka/elvis +pkg_elvis_fetch = git +pkg_elvis_repo = https://github.com/inaka/elvis +pkg_elvis_commit = master + +PACKAGES += emagick +pkg_emagick_name = emagick +pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool. +pkg_emagick_homepage = https://github.com/kivra/emagick +pkg_emagick_fetch = git +pkg_emagick_repo = https://github.com/kivra/emagick +pkg_emagick_commit = master + +PACKAGES += emysql +pkg_emysql_name = emysql +pkg_emysql_description = Stable, pure Erlang MySQL driver. +pkg_emysql_homepage = https://github.com/Eonblast/Emysql +pkg_emysql_fetch = git +pkg_emysql_repo = https://github.com/Eonblast/Emysql +pkg_emysql_commit = master + +PACKAGES += enm +pkg_enm_name = enm +pkg_enm_description = Erlang driver for nanomsg +pkg_enm_homepage = https://github.com/basho/enm +pkg_enm_fetch = git +pkg_enm_repo = https://github.com/basho/enm +pkg_enm_commit = master + +PACKAGES += entop +pkg_entop_name = entop +pkg_entop_description = A top-like tool for monitoring an Erlang node +pkg_entop_homepage = https://github.com/mazenharake/entop +pkg_entop_fetch = git +pkg_entop_repo = https://github.com/mazenharake/entop +pkg_entop_commit = master + +PACKAGES += epcap +pkg_epcap_name = epcap +pkg_epcap_description = Erlang packet capture interface using pcap +pkg_epcap_homepage = https://github.com/msantos/epcap +pkg_epcap_fetch = git +pkg_epcap_repo = https://github.com/msantos/epcap +pkg_epcap_commit = master + +PACKAGES += eper +pkg_eper_name = eper +pkg_eper_description = Erlang performance and debugging tools. +pkg_eper_homepage = https://github.com/massemanet/eper +pkg_eper_fetch = git +pkg_eper_repo = https://github.com/massemanet/eper +pkg_eper_commit = master + +PACKAGES += epgsql +pkg_epgsql_name = epgsql +pkg_epgsql_description = Erlang PostgreSQL client library. +pkg_epgsql_homepage = https://github.com/epgsql/epgsql +pkg_epgsql_fetch = git +pkg_epgsql_repo = https://github.com/epgsql/epgsql +pkg_epgsql_commit = master + +PACKAGES += episcina +pkg_episcina_name = episcina +pkg_episcina_description = A simple non intrusive resource pool for connections +pkg_episcina_homepage = https://github.com/erlware/episcina +pkg_episcina_fetch = git +pkg_episcina_repo = https://github.com/erlware/episcina +pkg_episcina_commit = master + +PACKAGES += eplot +pkg_eplot_name = eplot +pkg_eplot_description = A plot engine written in erlang. +pkg_eplot_homepage = https://github.com/psyeugenic/eplot +pkg_eplot_fetch = git +pkg_eplot_repo = https://github.com/psyeugenic/eplot +pkg_eplot_commit = master + +PACKAGES += epocxy +pkg_epocxy_name = epocxy +pkg_epocxy_description = Erlang Patterns of Concurrency +pkg_epocxy_homepage = https://github.com/duomark/epocxy +pkg_epocxy_fetch = git +pkg_epocxy_repo = https://github.com/duomark/epocxy +pkg_epocxy_commit = master + +PACKAGES += epubnub +pkg_epubnub_name = epubnub +pkg_epubnub_description = Erlang PubNub API +pkg_epubnub_homepage = https://github.com/tsloughter/epubnub +pkg_epubnub_fetch = git +pkg_epubnub_repo = https://github.com/tsloughter/epubnub +pkg_epubnub_commit = master + +PACKAGES += eqm +pkg_eqm_name = eqm +pkg_eqm_description = Erlang pub sub with supply-demand channels +pkg_eqm_homepage = https://github.com/loucash/eqm +pkg_eqm_fetch = git +pkg_eqm_repo = https://github.com/loucash/eqm +pkg_eqm_commit = master + +PACKAGES += eredis +pkg_eredis_name = eredis +pkg_eredis_description = Erlang Redis client +pkg_eredis_homepage = https://github.com/wooga/eredis +pkg_eredis_fetch = git +pkg_eredis_repo = https://github.com/wooga/eredis +pkg_eredis_commit = master + +PACKAGES += eredis_pool +pkg_eredis_pool_name = eredis_pool +pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy. +pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool +pkg_eredis_pool_fetch = git +pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool +pkg_eredis_pool_commit = master + +PACKAGES += erl_streams +pkg_erl_streams_name = erl_streams +pkg_erl_streams_description = Streams in Erlang +pkg_erl_streams_homepage = https://github.com/epappas/erl_streams +pkg_erl_streams_fetch = git +pkg_erl_streams_repo = https://github.com/epappas/erl_streams +pkg_erl_streams_commit = master + +PACKAGES += erlang_cep +pkg_erlang_cep_name = erlang_cep +pkg_erlang_cep_description = A basic CEP package written in erlang +pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep +pkg_erlang_cep_fetch = git +pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep +pkg_erlang_cep_commit = master + +PACKAGES += erlang_js +pkg_erlang_js_name = erlang_js +pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime. +pkg_erlang_js_homepage = https://github.com/basho/erlang_js +pkg_erlang_js_fetch = git +pkg_erlang_js_repo = https://github.com/basho/erlang_js +pkg_erlang_js_commit = master + +PACKAGES += erlang_localtime +pkg_erlang_localtime_name = erlang_localtime +pkg_erlang_localtime_description = Erlang library for conversion from one local time to another +pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime +pkg_erlang_localtime_fetch = git +pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime +pkg_erlang_localtime_commit = master + +PACKAGES += erlang_smtp +pkg_erlang_smtp_name = erlang_smtp +pkg_erlang_smtp_description = Erlang SMTP and POP3 server code. +pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp +pkg_erlang_smtp_fetch = git +pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp +pkg_erlang_smtp_commit = master + +PACKAGES += erlang_term +pkg_erlang_term_name = erlang_term +pkg_erlang_term_description = Erlang Term Info +pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term +pkg_erlang_term_fetch = git +pkg_erlang_term_repo = https://github.com/okeuday/erlang_term +pkg_erlang_term_commit = master + +PACKAGES += erlastic_search +pkg_erlastic_search_name = erlastic_search +pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface. +pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search +pkg_erlastic_search_fetch = git +pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search +pkg_erlastic_search_commit = master + +PACKAGES += erlasticsearch +pkg_erlasticsearch_name = erlasticsearch +pkg_erlasticsearch_description = Erlang thrift interface to elastic_search +pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch +pkg_erlasticsearch_fetch = git +pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch +pkg_erlasticsearch_commit = master + +PACKAGES += erlbrake +pkg_erlbrake_name = erlbrake +pkg_erlbrake_description = Erlang Airbrake notification client +pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake +pkg_erlbrake_fetch = git +pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake +pkg_erlbrake_commit = master + +PACKAGES += erlcloud +pkg_erlcloud_name = erlcloud +pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB) +pkg_erlcloud_homepage = https://github.com/gleber/erlcloud +pkg_erlcloud_fetch = git +pkg_erlcloud_repo = https://github.com/gleber/erlcloud +pkg_erlcloud_commit = master + +PACKAGES += erlcron +pkg_erlcron_name = erlcron +pkg_erlcron_description = Erlang cronish system +pkg_erlcron_homepage = https://github.com/erlware/erlcron +pkg_erlcron_fetch = git +pkg_erlcron_repo = https://github.com/erlware/erlcron +pkg_erlcron_commit = master + +PACKAGES += erldb +pkg_erldb_name = erldb +pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang +pkg_erldb_homepage = http://erldb.org +pkg_erldb_fetch = git +pkg_erldb_repo = https://github.com/erldb/erldb +pkg_erldb_commit = master + +PACKAGES += erldis +pkg_erldis_name = erldis +pkg_erldis_description = redis erlang client library +pkg_erldis_homepage = https://github.com/cstar/erldis +pkg_erldis_fetch = git +pkg_erldis_repo = https://github.com/cstar/erldis +pkg_erldis_commit = master + +PACKAGES += erldns +pkg_erldns_name = erldns +pkg_erldns_description = DNS server, in erlang. +pkg_erldns_homepage = https://github.com/aetrion/erl-dns +pkg_erldns_fetch = git +pkg_erldns_repo = https://github.com/aetrion/erl-dns +pkg_erldns_commit = master + +PACKAGES += erldocker +pkg_erldocker_name = erldocker +pkg_erldocker_description = Docker Remote API client for Erlang +pkg_erldocker_homepage = https://github.com/proger/erldocker +pkg_erldocker_fetch = git +pkg_erldocker_repo = https://github.com/proger/erldocker +pkg_erldocker_commit = master + +PACKAGES += erlfsmon +pkg_erlfsmon_name = erlfsmon +pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX +pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon +pkg_erlfsmon_fetch = git +pkg_erlfsmon_repo = https://github.com/proger/erlfsmon +pkg_erlfsmon_commit = master + +PACKAGES += erlgit +pkg_erlgit_name = erlgit +pkg_erlgit_description = Erlang convenience wrapper around git executable +pkg_erlgit_homepage = https://github.com/gleber/erlgit +pkg_erlgit_fetch = git +pkg_erlgit_repo = https://github.com/gleber/erlgit +pkg_erlgit_commit = master + +PACKAGES += erlguten +pkg_erlguten_name = erlguten +pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang. +pkg_erlguten_homepage = https://github.com/richcarl/erlguten +pkg_erlguten_fetch = git +pkg_erlguten_repo = https://github.com/richcarl/erlguten +pkg_erlguten_commit = master + +PACKAGES += erlmc +pkg_erlmc_name = erlmc +pkg_erlmc_description = Erlang memcached binary protocol client +pkg_erlmc_homepage = https://github.com/jkvor/erlmc +pkg_erlmc_fetch = git +pkg_erlmc_repo = https://github.com/jkvor/erlmc +pkg_erlmc_commit = master + +PACKAGES += erlmongo +pkg_erlmongo_name = erlmongo +pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support +pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo +pkg_erlmongo_fetch = git +pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo +pkg_erlmongo_commit = master + +PACKAGES += erlog +pkg_erlog_name = erlog +pkg_erlog_description = Prolog interpreter in and for Erlang +pkg_erlog_homepage = https://github.com/rvirding/erlog +pkg_erlog_fetch = git +pkg_erlog_repo = https://github.com/rvirding/erlog +pkg_erlog_commit = master + +PACKAGES += erlpass +pkg_erlpass_name = erlpass +pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever. +pkg_erlpass_homepage = https://github.com/ferd/erlpass +pkg_erlpass_fetch = git +pkg_erlpass_repo = https://github.com/ferd/erlpass +pkg_erlpass_commit = master + +PACKAGES += erlport +pkg_erlport_name = erlport +pkg_erlport_description = ErlPort - connect Erlang to other languages +pkg_erlport_homepage = https://github.com/hdima/erlport +pkg_erlport_fetch = git +pkg_erlport_repo = https://github.com/hdima/erlport +pkg_erlport_commit = master + +PACKAGES += erlsh +pkg_erlsh_name = erlsh +pkg_erlsh_description = Erlang shell tools +pkg_erlsh_homepage = https://github.com/proger/erlsh +pkg_erlsh_fetch = git +pkg_erlsh_repo = https://github.com/proger/erlsh +pkg_erlsh_commit = master + +PACKAGES += erlsha2 +pkg_erlsha2_name = erlsha2 +pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs. +pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2 +pkg_erlsha2_fetch = git +pkg_erlsha2_repo = https://github.com/vinoski/erlsha2 +pkg_erlsha2_commit = master + +PACKAGES += erlsom +pkg_erlsom_name = erlsom +pkg_erlsom_description = XML parser for Erlang +pkg_erlsom_homepage = https://github.com/willemdj/erlsom +pkg_erlsom_fetch = git +pkg_erlsom_repo = https://github.com/willemdj/erlsom +pkg_erlsom_commit = master + +PACKAGES += erlubi +pkg_erlubi_name = erlubi +pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer) +pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi +pkg_erlubi_fetch = git +pkg_erlubi_repo = https://github.com/krestenkrab/erlubi +pkg_erlubi_commit = master + +PACKAGES += erlvolt +pkg_erlvolt_name = erlvolt +pkg_erlvolt_description = VoltDB Erlang Client Driver +pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang +pkg_erlvolt_fetch = git +pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang +pkg_erlvolt_commit = master + +PACKAGES += erlware_commons +pkg_erlware_commons_name = erlware_commons +pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components. +pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons +pkg_erlware_commons_fetch = git +pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons +pkg_erlware_commons_commit = master + +PACKAGES += erlydtl +pkg_erlydtl_name = erlydtl +pkg_erlydtl_description = Django Template Language for Erlang. +pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl +pkg_erlydtl_fetch = git +pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl +pkg_erlydtl_commit = master + +PACKAGES += errd +pkg_errd_name = errd +pkg_errd_description = Erlang RRDTool library +pkg_errd_homepage = https://github.com/archaelus/errd +pkg_errd_fetch = git +pkg_errd_repo = https://github.com/archaelus/errd +pkg_errd_commit = master + +PACKAGES += erserve +pkg_erserve_name = erserve +pkg_erserve_description = Erlang/Rserve communication interface +pkg_erserve_homepage = https://github.com/del/erserve +pkg_erserve_fetch = git +pkg_erserve_repo = https://github.com/del/erserve +pkg_erserve_commit = master + +PACKAGES += erwa +pkg_erwa_name = erwa +pkg_erwa_description = A WAMP router and client written in Erlang. +pkg_erwa_homepage = https://github.com/bwegh/erwa +pkg_erwa_fetch = git +pkg_erwa_repo = https://github.com/bwegh/erwa +pkg_erwa_commit = master + +PACKAGES += escalus +pkg_escalus_name = escalus +pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers +pkg_escalus_homepage = https://github.com/esl/escalus +pkg_escalus_fetch = git +pkg_escalus_repo = https://github.com/esl/escalus +pkg_escalus_commit = master + +PACKAGES += esh_mk +pkg_esh_mk_name = esh_mk +pkg_esh_mk_description = esh template engine plugin for erlang.mk +pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk +pkg_esh_mk_fetch = git +pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git +pkg_esh_mk_commit = master + +PACKAGES += espec +pkg_espec_name = espec +pkg_espec_description = ESpec: Behaviour driven development framework for Erlang +pkg_espec_homepage = https://github.com/lucaspiller/espec +pkg_espec_fetch = git +pkg_espec_repo = https://github.com/lucaspiller/espec +pkg_espec_commit = master + +PACKAGES += estatsd +pkg_estatsd_name = estatsd +pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite +pkg_estatsd_homepage = https://github.com/RJ/estatsd +pkg_estatsd_fetch = git +pkg_estatsd_repo = https://github.com/RJ/estatsd +pkg_estatsd_commit = master + +PACKAGES += etap +pkg_etap_name = etap +pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output. +pkg_etap_homepage = https://github.com/ngerakines/etap +pkg_etap_fetch = git +pkg_etap_repo = https://github.com/ngerakines/etap +pkg_etap_commit = master + +PACKAGES += etest +pkg_etest_name = etest +pkg_etest_description = A lightweight, convention over configuration test framework for Erlang +pkg_etest_homepage = https://github.com/wooga/etest +pkg_etest_fetch = git +pkg_etest_repo = https://github.com/wooga/etest +pkg_etest_commit = master + +PACKAGES += etest_http +pkg_etest_http_name = etest_http +pkg_etest_http_description = etest Assertions around HTTP (client-side) +pkg_etest_http_homepage = https://github.com/wooga/etest_http +pkg_etest_http_fetch = git +pkg_etest_http_repo = https://github.com/wooga/etest_http +pkg_etest_http_commit = master + +PACKAGES += etoml +pkg_etoml_name = etoml +pkg_etoml_description = TOML language erlang parser +pkg_etoml_homepage = https://github.com/kalta/etoml +pkg_etoml_fetch = git +pkg_etoml_repo = https://github.com/kalta/etoml +pkg_etoml_commit = master + +PACKAGES += eunit +pkg_eunit_name = eunit +pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository. +pkg_eunit_homepage = https://github.com/richcarl/eunit +pkg_eunit_fetch = git +pkg_eunit_repo = https://github.com/richcarl/eunit +pkg_eunit_commit = master + +PACKAGES += eunit_formatters +pkg_eunit_formatters_name = eunit_formatters +pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better. +pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters +pkg_eunit_formatters_fetch = git +pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters +pkg_eunit_formatters_commit = master + +PACKAGES += euthanasia +pkg_euthanasia_name = euthanasia +pkg_euthanasia_description = Merciful killer for your Erlang processes +pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia +pkg_euthanasia_fetch = git +pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia +pkg_euthanasia_commit = master + +PACKAGES += evum +pkg_evum_name = evum +pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM +pkg_evum_homepage = https://github.com/msantos/evum +pkg_evum_fetch = git +pkg_evum_repo = https://github.com/msantos/evum +pkg_evum_commit = master + +PACKAGES += exec +pkg_exec_name = erlexec +pkg_exec_description = Execute and control OS processes from Erlang/OTP. +pkg_exec_homepage = http://saleyn.github.com/erlexec +pkg_exec_fetch = git +pkg_exec_repo = https://github.com/saleyn/erlexec +pkg_exec_commit = master + +PACKAGES += exml +pkg_exml_name = exml +pkg_exml_description = XML parsing library in Erlang +pkg_exml_homepage = https://github.com/paulgray/exml +pkg_exml_fetch = git +pkg_exml_repo = https://github.com/paulgray/exml +pkg_exml_commit = master + +PACKAGES += exometer +pkg_exometer_name = exometer +pkg_exometer_description = Basic measurement objects and probe behavior +pkg_exometer_homepage = https://github.com/Feuerlabs/exometer +pkg_exometer_fetch = git +pkg_exometer_repo = https://github.com/Feuerlabs/exometer +pkg_exometer_commit = master + +PACKAGES += exs1024 +pkg_exs1024_name = exs1024 +pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang. +pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024 +pkg_exs1024_fetch = git +pkg_exs1024_repo = https://github.com/jj1bdx/exs1024 +pkg_exs1024_commit = master + +PACKAGES += exs64 +pkg_exs64_name = exs64 +pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang. +pkg_exs64_homepage = https://github.com/jj1bdx/exs64 +pkg_exs64_fetch = git +pkg_exs64_repo = https://github.com/jj1bdx/exs64 +pkg_exs64_commit = master + +PACKAGES += exsplus116 +pkg_exsplus116_name = exsplus116 +pkg_exsplus116_description = Xorshift116plus for Erlang +pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116 +pkg_exsplus116_fetch = git +pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116 +pkg_exsplus116_commit = master + +PACKAGES += exsplus128 +pkg_exsplus128_name = exsplus128 +pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang. +pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128 +pkg_exsplus128_fetch = git +pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128 +pkg_exsplus128_commit = master + +PACKAGES += ezmq +pkg_ezmq_name = ezmq +pkg_ezmq_description = zMQ implemented in Erlang +pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq +pkg_ezmq_fetch = git +pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq +pkg_ezmq_commit = master + +PACKAGES += ezmtp +pkg_ezmtp_name = ezmtp +pkg_ezmtp_description = ZMTP protocol in pure Erlang. +pkg_ezmtp_homepage = https://github.com/a13x/ezmtp +pkg_ezmtp_fetch = git +pkg_ezmtp_repo = https://github.com/a13x/ezmtp +pkg_ezmtp_commit = master + +PACKAGES += fast_disk_log +pkg_fast_disk_log_name = fast_disk_log +pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger +pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log +pkg_fast_disk_log_fetch = git +pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log +pkg_fast_disk_log_commit = master + +PACKAGES += feeder +pkg_feeder_name = feeder +pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds. +pkg_feeder_homepage = https://github.com/michaelnisi/feeder +pkg_feeder_fetch = git +pkg_feeder_repo = https://github.com/michaelnisi/feeder +pkg_feeder_commit = master + +PACKAGES += find_crate +pkg_find_crate_name = find_crate +pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory +pkg_find_crate_homepage = https://github.com/goertzenator/find_crate +pkg_find_crate_fetch = git +pkg_find_crate_repo = https://github.com/goertzenator/find_crate +pkg_find_crate_commit = master + +PACKAGES += fix +pkg_fix_name = fix +pkg_fix_description = http://fixprotocol.org/ implementation. +pkg_fix_homepage = https://github.com/maxlapshin/fix +pkg_fix_fetch = git +pkg_fix_repo = https://github.com/maxlapshin/fix +pkg_fix_commit = master + +PACKAGES += flower +pkg_flower_name = flower +pkg_flower_description = FlowER - a Erlang OpenFlow development platform +pkg_flower_homepage = https://github.com/travelping/flower +pkg_flower_fetch = git +pkg_flower_repo = https://github.com/travelping/flower +pkg_flower_commit = master + +PACKAGES += fn +pkg_fn_name = fn +pkg_fn_description = Function utilities for Erlang +pkg_fn_homepage = https://github.com/reiddraper/fn +pkg_fn_fetch = git +pkg_fn_repo = https://github.com/reiddraper/fn +pkg_fn_commit = master + +PACKAGES += folsom +pkg_folsom_name = folsom +pkg_folsom_description = Expose Erlang Events and Metrics +pkg_folsom_homepage = https://github.com/boundary/folsom +pkg_folsom_fetch = git +pkg_folsom_repo = https://github.com/boundary/folsom +pkg_folsom_commit = master + +PACKAGES += folsom_cowboy +pkg_folsom_cowboy_name = folsom_cowboy +pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper. +pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy +pkg_folsom_cowboy_fetch = git +pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy +pkg_folsom_cowboy_commit = master + +PACKAGES += folsomite +pkg_folsomite_name = folsomite +pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics +pkg_folsomite_homepage = https://github.com/campanja/folsomite +pkg_folsomite_fetch = git +pkg_folsomite_repo = https://github.com/campanja/folsomite +pkg_folsomite_commit = master + +PACKAGES += fs +pkg_fs_name = fs +pkg_fs_description = Erlang FileSystem Listener +pkg_fs_homepage = https://github.com/synrc/fs +pkg_fs_fetch = git +pkg_fs_repo = https://github.com/synrc/fs +pkg_fs_commit = master + +PACKAGES += fuse +pkg_fuse_name = fuse +pkg_fuse_description = A Circuit Breaker for Erlang +pkg_fuse_homepage = https://github.com/jlouis/fuse +pkg_fuse_fetch = git +pkg_fuse_repo = https://github.com/jlouis/fuse +pkg_fuse_commit = master + +PACKAGES += gcm +pkg_gcm_name = gcm +pkg_gcm_description = An Erlang application for Google Cloud Messaging +pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang +pkg_gcm_fetch = git +pkg_gcm_repo = https://github.com/pdincau/gcm-erlang +pkg_gcm_commit = master + +PACKAGES += gcprof +pkg_gcprof_name = gcprof +pkg_gcprof_description = Garbage Collection profiler for Erlang +pkg_gcprof_homepage = https://github.com/knutin/gcprof +pkg_gcprof_fetch = git +pkg_gcprof_repo = https://github.com/knutin/gcprof +pkg_gcprof_commit = master + +PACKAGES += geas +pkg_geas_name = geas +pkg_geas_description = Guess Erlang Application Scattering +pkg_geas_homepage = https://github.com/crownedgrouse/geas +pkg_geas_fetch = git +pkg_geas_repo = https://github.com/crownedgrouse/geas +pkg_geas_commit = master + +PACKAGES += geef +pkg_geef_name = geef +pkg_geef_description = Git NEEEEF (Erlang NIF) +pkg_geef_homepage = https://github.com/carlosmn/geef +pkg_geef_fetch = git +pkg_geef_repo = https://github.com/carlosmn/geef +pkg_geef_commit = master + +PACKAGES += gen_coap +pkg_gen_coap_name = gen_coap +pkg_gen_coap_description = Generic Erlang CoAP Client/Server +pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap +pkg_gen_coap_fetch = git +pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap +pkg_gen_coap_commit = master + +PACKAGES += gen_cycle +pkg_gen_cycle_name = gen_cycle +pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks +pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle +pkg_gen_cycle_fetch = git +pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle +pkg_gen_cycle_commit = develop + +PACKAGES += gen_icmp +pkg_gen_icmp_name = gen_icmp +pkg_gen_icmp_description = Erlang interface to ICMP sockets +pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp +pkg_gen_icmp_fetch = git +pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp +pkg_gen_icmp_commit = master + +PACKAGES += gen_leader +pkg_gen_leader_name = gen_leader +pkg_gen_leader_description = leader election behavior +pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival +pkg_gen_leader_fetch = git +pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival +pkg_gen_leader_commit = master + +PACKAGES += gen_nb_server +pkg_gen_nb_server_name = gen_nb_server +pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers +pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server +pkg_gen_nb_server_fetch = git +pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server +pkg_gen_nb_server_commit = master + +PACKAGES += gen_paxos +pkg_gen_paxos_name = gen_paxos +pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol +pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos +pkg_gen_paxos_fetch = git +pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos +pkg_gen_paxos_commit = master + +PACKAGES += gen_rpc +pkg_gen_rpc_name = gen_rpc +pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages +pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git +pkg_gen_rpc_fetch = git +pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git +pkg_gen_rpc_commit = master + +PACKAGES += gen_smtp +pkg_gen_smtp_name = gen_smtp +pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules +pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp +pkg_gen_smtp_fetch = git +pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp +pkg_gen_smtp_commit = master + +PACKAGES += gen_tracker +pkg_gen_tracker_name = gen_tracker +pkg_gen_tracker_description = supervisor with ets handling of children and their metadata +pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker +pkg_gen_tracker_fetch = git +pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker +pkg_gen_tracker_commit = master + +PACKAGES += gen_unix +pkg_gen_unix_name = gen_unix +pkg_gen_unix_description = Erlang Unix socket interface +pkg_gen_unix_homepage = https://github.com/msantos/gen_unix +pkg_gen_unix_fetch = git +pkg_gen_unix_repo = https://github.com/msantos/gen_unix +pkg_gen_unix_commit = master + +PACKAGES += geode +pkg_geode_name = geode +pkg_geode_description = geohash/proximity lookup in pure, uncut erlang. +pkg_geode_homepage = https://github.com/bradfordw/geode +pkg_geode_fetch = git +pkg_geode_repo = https://github.com/bradfordw/geode +pkg_geode_commit = master + +PACKAGES += getopt +pkg_getopt_name = getopt +pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax +pkg_getopt_homepage = https://github.com/jcomellas/getopt +pkg_getopt_fetch = git +pkg_getopt_repo = https://github.com/jcomellas/getopt +pkg_getopt_commit = master + +PACKAGES += gettext +pkg_gettext_name = gettext +pkg_gettext_description = Erlang internationalization library. +pkg_gettext_homepage = https://github.com/etnt/gettext +pkg_gettext_fetch = git +pkg_gettext_repo = https://github.com/etnt/gettext +pkg_gettext_commit = master + +PACKAGES += giallo +pkg_giallo_name = giallo +pkg_giallo_description = Small and flexible web framework on top of Cowboy +pkg_giallo_homepage = https://github.com/kivra/giallo +pkg_giallo_fetch = git +pkg_giallo_repo = https://github.com/kivra/giallo +pkg_giallo_commit = master + +PACKAGES += gin +pkg_gin_name = gin +pkg_gin_description = The guards and for Erlang parse_transform +pkg_gin_homepage = https://github.com/mad-cocktail/gin +pkg_gin_fetch = git +pkg_gin_repo = https://github.com/mad-cocktail/gin +pkg_gin_commit = master + +PACKAGES += gitty +pkg_gitty_name = gitty +pkg_gitty_description = Git access in erlang +pkg_gitty_homepage = https://github.com/maxlapshin/gitty +pkg_gitty_fetch = git +pkg_gitty_repo = https://github.com/maxlapshin/gitty +pkg_gitty_commit = master + +PACKAGES += gold_fever +pkg_gold_fever_name = gold_fever +pkg_gold_fever_description = A Treasure Hunt for Erlangers +pkg_gold_fever_homepage = https://github.com/inaka/gold_fever +pkg_gold_fever_fetch = git +pkg_gold_fever_repo = https://github.com/inaka/gold_fever +pkg_gold_fever_commit = master + +PACKAGES += gpb +pkg_gpb_name = gpb +pkg_gpb_description = A Google Protobuf implementation for Erlang +pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb +pkg_gpb_fetch = git +pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb +pkg_gpb_commit = master + +PACKAGES += gproc +pkg_gproc_name = gproc +pkg_gproc_description = Extended process registry for Erlang +pkg_gproc_homepage = https://github.com/uwiger/gproc +pkg_gproc_fetch = git +pkg_gproc_repo = https://github.com/uwiger/gproc +pkg_gproc_commit = master + +PACKAGES += grapherl +pkg_grapherl_name = grapherl +pkg_grapherl_description = Create graphs of Erlang systems and programs +pkg_grapherl_homepage = https://github.com/eproxus/grapherl +pkg_grapherl_fetch = git +pkg_grapherl_repo = https://github.com/eproxus/grapherl +pkg_grapherl_commit = master + +PACKAGES += grpc +pkg_grpc_name = grpc +pkg_grpc_description = gRPC server in Erlang +pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc +pkg_grpc_fetch = git +pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc +pkg_grpc_commit = master + +PACKAGES += grpc_client +pkg_grpc_client_name = grpc_client +pkg_grpc_client_description = gRPC client in Erlang +pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client +pkg_grpc_client_fetch = git +pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client +pkg_grpc_client_commit = master + +PACKAGES += gun +pkg_gun_name = gun +pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang. +pkg_gun_homepage = http//ninenines.eu +pkg_gun_fetch = git +pkg_gun_repo = https://github.com/ninenines/gun +pkg_gun_commit = master + +PACKAGES += gut +pkg_gut_name = gut +pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman +pkg_gut_homepage = https://github.com/unbalancedparentheses/gut +pkg_gut_fetch = git +pkg_gut_repo = https://github.com/unbalancedparentheses/gut +pkg_gut_commit = master + +PACKAGES += hackney +pkg_hackney_name = hackney +pkg_hackney_description = simple HTTP client in Erlang +pkg_hackney_homepage = https://github.com/benoitc/hackney +pkg_hackney_fetch = git +pkg_hackney_repo = https://github.com/benoitc/hackney +pkg_hackney_commit = master + +PACKAGES += hamcrest +pkg_hamcrest_name = hamcrest +pkg_hamcrest_description = Erlang port of Hamcrest +pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang +pkg_hamcrest_fetch = git +pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang +pkg_hamcrest_commit = master + +PACKAGES += hanoidb +pkg_hanoidb_name = hanoidb +pkg_hanoidb_description = Erlang LSM BTree Storage +pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb +pkg_hanoidb_fetch = git +pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb +pkg_hanoidb_commit = master + +PACKAGES += hottub +pkg_hottub_name = hottub +pkg_hottub_description = Permanent Erlang Worker Pool +pkg_hottub_homepage = https://github.com/bfrog/hottub +pkg_hottub_fetch = git +pkg_hottub_repo = https://github.com/bfrog/hottub +pkg_hottub_commit = master + +PACKAGES += hpack +pkg_hpack_name = hpack +pkg_hpack_description = HPACK Implementation for Erlang +pkg_hpack_homepage = https://github.com/joedevivo/hpack +pkg_hpack_fetch = git +pkg_hpack_repo = https://github.com/joedevivo/hpack +pkg_hpack_commit = master + +PACKAGES += hyper +pkg_hyper_name = hyper +pkg_hyper_description = Erlang implementation of HyperLogLog +pkg_hyper_homepage = https://github.com/GameAnalytics/hyper +pkg_hyper_fetch = git +pkg_hyper_repo = https://github.com/GameAnalytics/hyper +pkg_hyper_commit = master + +PACKAGES += i18n +pkg_i18n_name = i18n +pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e) +pkg_i18n_homepage = https://github.com/erlang-unicode/i18n +pkg_i18n_fetch = git +pkg_i18n_repo = https://github.com/erlang-unicode/i18n +pkg_i18n_commit = master + +PACKAGES += ibrowse +pkg_ibrowse_name = ibrowse +pkg_ibrowse_description = Erlang HTTP client +pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse +pkg_ibrowse_fetch = git +pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse +pkg_ibrowse_commit = master + +PACKAGES += idna +pkg_idna_name = idna +pkg_idna_description = Erlang IDNA lib +pkg_idna_homepage = https://github.com/benoitc/erlang-idna +pkg_idna_fetch = git +pkg_idna_repo = https://github.com/benoitc/erlang-idna +pkg_idna_commit = master + +PACKAGES += ierlang +pkg_ierlang_name = ierlang +pkg_ierlang_description = An Erlang language kernel for IPython. +pkg_ierlang_homepage = https://github.com/robbielynch/ierlang +pkg_ierlang_fetch = git +pkg_ierlang_repo = https://github.com/robbielynch/ierlang +pkg_ierlang_commit = master + +PACKAGES += iota +pkg_iota_name = iota +pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code +pkg_iota_homepage = https://github.com/jpgneves/iota +pkg_iota_fetch = git +pkg_iota_repo = https://github.com/jpgneves/iota +pkg_iota_commit = master + +PACKAGES += irc_lib +pkg_irc_lib_name = irc_lib +pkg_irc_lib_description = Erlang irc client library +pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib +pkg_irc_lib_fetch = git +pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib +pkg_irc_lib_commit = master + +PACKAGES += ircd +pkg_ircd_name = ircd +pkg_ircd_description = A pluggable IRC daemon application/library for Erlang. +pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd +pkg_ircd_fetch = git +pkg_ircd_repo = https://github.com/tonyg/erlang-ircd +pkg_ircd_commit = master + +PACKAGES += iris +pkg_iris_name = iris +pkg_iris_description = Iris Erlang binding +pkg_iris_homepage = https://github.com/project-iris/iris-erl +pkg_iris_fetch = git +pkg_iris_repo = https://github.com/project-iris/iris-erl +pkg_iris_commit = master + +PACKAGES += iso8601 +pkg_iso8601_name = iso8601 +pkg_iso8601_description = Erlang ISO 8601 date formatter/parser +pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601 +pkg_iso8601_fetch = git +pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601 +pkg_iso8601_commit = master + +PACKAGES += jamdb_sybase +pkg_jamdb_sybase_name = jamdb_sybase +pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE +pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase +pkg_jamdb_sybase_fetch = git +pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase +pkg_jamdb_sybase_commit = master + +PACKAGES += jerg +pkg_jerg_name = jerg +pkg_jerg_description = JSON Schema to Erlang Records Generator +pkg_jerg_homepage = https://github.com/ddossot/jerg +pkg_jerg_fetch = git +pkg_jerg_repo = https://github.com/ddossot/jerg +pkg_jerg_commit = master + +PACKAGES += jesse +pkg_jesse_name = jesse +pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang. +pkg_jesse_homepage = https://github.com/for-GET/jesse +pkg_jesse_fetch = git +pkg_jesse_repo = https://github.com/for-GET/jesse +pkg_jesse_commit = master + +PACKAGES += jiffy +pkg_jiffy_name = jiffy +pkg_jiffy_description = JSON NIFs for Erlang. +pkg_jiffy_homepage = https://github.com/davisp/jiffy +pkg_jiffy_fetch = git +pkg_jiffy_repo = https://github.com/davisp/jiffy +pkg_jiffy_commit = master + +PACKAGES += jiffy_v +pkg_jiffy_v_name = jiffy_v +pkg_jiffy_v_description = JSON validation utility +pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v +pkg_jiffy_v_fetch = git +pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v +pkg_jiffy_v_commit = master + +PACKAGES += jobs +pkg_jobs_name = jobs +pkg_jobs_description = a Job scheduler for load regulation +pkg_jobs_homepage = https://github.com/esl/jobs +pkg_jobs_fetch = git +pkg_jobs_repo = https://github.com/esl/jobs +pkg_jobs_commit = master + +PACKAGES += joxa +pkg_joxa_name = joxa +pkg_joxa_description = A Modern Lisp for the Erlang VM +pkg_joxa_homepage = https://github.com/joxa/joxa +pkg_joxa_fetch = git +pkg_joxa_repo = https://github.com/joxa/joxa +pkg_joxa_commit = master + +PACKAGES += json +pkg_json_name = json +pkg_json_description = a high level json library for erlang (17.0+) +pkg_json_homepage = https://github.com/talentdeficit/json +pkg_json_fetch = git +pkg_json_repo = https://github.com/talentdeficit/json +pkg_json_commit = master + +PACKAGES += json_rec +pkg_json_rec_name = json_rec +pkg_json_rec_description = JSON to erlang record +pkg_json_rec_homepage = https://github.com/justinkirby/json_rec +pkg_json_rec_fetch = git +pkg_json_rec_repo = https://github.com/justinkirby/json_rec +pkg_json_rec_commit = master + +PACKAGES += jsone +pkg_jsone_name = jsone +pkg_jsone_description = An Erlang library for encoding, decoding JSON data. +pkg_jsone_homepage = https://github.com/sile/jsone.git +pkg_jsone_fetch = git +pkg_jsone_repo = https://github.com/sile/jsone.git +pkg_jsone_commit = master + +PACKAGES += jsonerl +pkg_jsonerl_name = jsonerl +pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder +pkg_jsonerl_homepage = https://github.com/lambder/jsonerl +pkg_jsonerl_fetch = git +pkg_jsonerl_repo = https://github.com/lambder/jsonerl +pkg_jsonerl_commit = master + +PACKAGES += jsonpath +pkg_jsonpath_name = jsonpath +pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation +pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath +pkg_jsonpath_fetch = git +pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath +pkg_jsonpath_commit = master + +PACKAGES += jsonx +pkg_jsonx_name = jsonx +pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C. +pkg_jsonx_homepage = https://github.com/iskra/jsonx +pkg_jsonx_fetch = git +pkg_jsonx_repo = https://github.com/iskra/jsonx +pkg_jsonx_commit = master + +PACKAGES += jsx +pkg_jsx_name = jsx +pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON. +pkg_jsx_homepage = https://github.com/talentdeficit/jsx +pkg_jsx_fetch = git +pkg_jsx_repo = https://github.com/talentdeficit/jsx +pkg_jsx_commit = main + +PACKAGES += kafka +pkg_kafka_name = kafka +pkg_kafka_description = Kafka consumer and producer in Erlang +pkg_kafka_homepage = https://github.com/wooga/kafka-erlang +pkg_kafka_fetch = git +pkg_kafka_repo = https://github.com/wooga/kafka-erlang +pkg_kafka_commit = master + +PACKAGES += kafka_protocol +pkg_kafka_protocol_name = kafka_protocol +pkg_kafka_protocol_description = Kafka protocol Erlang library +pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol +pkg_kafka_protocol_fetch = git +pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git +pkg_kafka_protocol_commit = master + +PACKAGES += kai +pkg_kai_name = kai +pkg_kai_description = DHT storage by Takeshi Inoue +pkg_kai_homepage = https://github.com/synrc/kai +pkg_kai_fetch = git +pkg_kai_repo = https://github.com/synrc/kai +pkg_kai_commit = master + +PACKAGES += katja +pkg_katja_name = katja +pkg_katja_description = A simple Riemann client written in Erlang. +pkg_katja_homepage = https://github.com/nifoc/katja +pkg_katja_fetch = git +pkg_katja_repo = https://github.com/nifoc/katja +pkg_katja_commit = master + +PACKAGES += kdht +pkg_kdht_name = kdht +pkg_kdht_description = kdht is an erlang DHT implementation +pkg_kdht_homepage = https://github.com/kevinlynx/kdht +pkg_kdht_fetch = git +pkg_kdht_repo = https://github.com/kevinlynx/kdht +pkg_kdht_commit = master + +PACKAGES += key2value +pkg_key2value_name = key2value +pkg_key2value_description = Erlang 2-way map +pkg_key2value_homepage = https://github.com/okeuday/key2value +pkg_key2value_fetch = git +pkg_key2value_repo = https://github.com/okeuday/key2value +pkg_key2value_commit = master + +PACKAGES += keys1value +pkg_keys1value_name = keys1value +pkg_keys1value_description = Erlang set associative map for key lists +pkg_keys1value_homepage = https://github.com/okeuday/keys1value +pkg_keys1value_fetch = git +pkg_keys1value_repo = https://github.com/okeuday/keys1value +pkg_keys1value_commit = master + +PACKAGES += kinetic +pkg_kinetic_name = kinetic +pkg_kinetic_description = Erlang Kinesis Client +pkg_kinetic_homepage = https://github.com/AdRoll/kinetic +pkg_kinetic_fetch = git +pkg_kinetic_repo = https://github.com/AdRoll/kinetic +pkg_kinetic_commit = master + +PACKAGES += kjell +pkg_kjell_name = kjell +pkg_kjell_description = Erlang Shell +pkg_kjell_homepage = https://github.com/karlll/kjell +pkg_kjell_fetch = git +pkg_kjell_repo = https://github.com/karlll/kjell +pkg_kjell_commit = master + +PACKAGES += kraken +pkg_kraken_name = kraken +pkg_kraken_description = Distributed Pubsub Server for Realtime Apps +pkg_kraken_homepage = https://github.com/Asana/kraken +pkg_kraken_fetch = git +pkg_kraken_repo = https://github.com/Asana/kraken +pkg_kraken_commit = master + +PACKAGES += kucumberl +pkg_kucumberl_name = kucumberl +pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber +pkg_kucumberl_homepage = https://github.com/openshine/kucumberl +pkg_kucumberl_fetch = git +pkg_kucumberl_repo = https://github.com/openshine/kucumberl +pkg_kucumberl_commit = master + +PACKAGES += kvc +pkg_kvc_name = kvc +pkg_kvc_description = KVC - Key Value Coding for Erlang data structures +pkg_kvc_homepage = https://github.com/etrepum/kvc +pkg_kvc_fetch = git +pkg_kvc_repo = https://github.com/etrepum/kvc +pkg_kvc_commit = master + +PACKAGES += kvlists +pkg_kvlists_name = kvlists +pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang +pkg_kvlists_homepage = https://github.com/jcomellas/kvlists +pkg_kvlists_fetch = git +pkg_kvlists_repo = https://github.com/jcomellas/kvlists +pkg_kvlists_commit = master + +PACKAGES += kvs +pkg_kvs_name = kvs +pkg_kvs_description = Container and Iterator +pkg_kvs_homepage = https://github.com/synrc/kvs +pkg_kvs_fetch = git +pkg_kvs_repo = https://github.com/synrc/kvs +pkg_kvs_commit = master + +PACKAGES += lager +pkg_lager_name = lager +pkg_lager_description = A logging framework for Erlang/OTP. +pkg_lager_homepage = https://github.com/erlang-lager/lager +pkg_lager_fetch = git +pkg_lager_repo = https://github.com/erlang-lager/lager +pkg_lager_commit = master + +PACKAGES += lager_amqp_backend +pkg_lager_amqp_backend_name = lager_amqp_backend +pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend +pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend +pkg_lager_amqp_backend_fetch = git +pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend +pkg_lager_amqp_backend_commit = master + +PACKAGES += lager_syslog +pkg_lager_syslog_name = lager_syslog +pkg_lager_syslog_description = Syslog backend for lager +pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog +pkg_lager_syslog_fetch = git +pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog +pkg_lager_syslog_commit = master + +PACKAGES += lambdapad +pkg_lambdapad_name = lambdapad +pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang. +pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad +pkg_lambdapad_fetch = git +pkg_lambdapad_repo = https://github.com/gar1t/lambdapad +pkg_lambdapad_commit = master + +PACKAGES += lasp +pkg_lasp_name = lasp +pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations +pkg_lasp_homepage = http://lasp-lang.org/ +pkg_lasp_fetch = git +pkg_lasp_repo = https://github.com/lasp-lang/lasp +pkg_lasp_commit = master + +PACKAGES += lasse +pkg_lasse_name = lasse +pkg_lasse_description = SSE handler for Cowboy +pkg_lasse_homepage = https://github.com/inaka/lasse +pkg_lasse_fetch = git +pkg_lasse_repo = https://github.com/inaka/lasse +pkg_lasse_commit = master + +PACKAGES += ldap +pkg_ldap_name = ldap +pkg_ldap_description = LDAP server written in Erlang +pkg_ldap_homepage = https://github.com/spawnproc/ldap +pkg_ldap_fetch = git +pkg_ldap_repo = https://github.com/spawnproc/ldap +pkg_ldap_commit = master + +PACKAGES += lethink +pkg_lethink_name = lethink +pkg_lethink_description = erlang driver for rethinkdb +pkg_lethink_homepage = https://github.com/taybin/lethink +pkg_lethink_fetch = git +pkg_lethink_repo = https://github.com/taybin/lethink +pkg_lethink_commit = master + +PACKAGES += lfe +pkg_lfe_name = lfe +pkg_lfe_description = Lisp Flavoured Erlang (LFE) +pkg_lfe_homepage = https://github.com/rvirding/lfe +pkg_lfe_fetch = git +pkg_lfe_repo = https://github.com/rvirding/lfe +pkg_lfe_commit = master + +PACKAGES += ling +pkg_ling_name = ling +pkg_ling_description = Erlang on Xen +pkg_ling_homepage = https://github.com/cloudozer/ling +pkg_ling_fetch = git +pkg_ling_repo = https://github.com/cloudozer/ling +pkg_ling_commit = master + +PACKAGES += live +pkg_live_name = live +pkg_live_description = Automated module and configuration reloader. +pkg_live_homepage = http://ninenines.eu +pkg_live_fetch = git +pkg_live_repo = https://github.com/ninenines/live +pkg_live_commit = master + +PACKAGES += lmq +pkg_lmq_name = lmq +pkg_lmq_description = Lightweight Message Queue +pkg_lmq_homepage = https://github.com/iij/lmq +pkg_lmq_fetch = git +pkg_lmq_repo = https://github.com/iij/lmq +pkg_lmq_commit = master + +PACKAGES += locker +pkg_locker_name = locker +pkg_locker_description = Atomic distributed 'check and set' for short-lived keys +pkg_locker_homepage = https://github.com/wooga/locker +pkg_locker_fetch = git +pkg_locker_repo = https://github.com/wooga/locker +pkg_locker_commit = master + +PACKAGES += locks +pkg_locks_name = locks +pkg_locks_description = A scalable, deadlock-resolving resource locker +pkg_locks_homepage = https://github.com/uwiger/locks +pkg_locks_fetch = git +pkg_locks_repo = https://github.com/uwiger/locks +pkg_locks_commit = master + +PACKAGES += log4erl +pkg_log4erl_name = log4erl +pkg_log4erl_description = A logger for erlang in the spirit of Log4J. +pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl +pkg_log4erl_fetch = git +pkg_log4erl_repo = https://github.com/ahmednawras/log4erl +pkg_log4erl_commit = master + +PACKAGES += lol +pkg_lol_name = lol +pkg_lol_description = Lisp on erLang, and programming is fun again +pkg_lol_homepage = https://github.com/b0oh/lol +pkg_lol_fetch = git +pkg_lol_repo = https://github.com/b0oh/lol +pkg_lol_commit = master + +PACKAGES += lucid +pkg_lucid_name = lucid +pkg_lucid_description = HTTP/2 server written in Erlang +pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid +pkg_lucid_fetch = git +pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid +pkg_lucid_commit = master + +PACKAGES += luerl +pkg_luerl_name = luerl +pkg_luerl_description = Lua in Erlang +pkg_luerl_homepage = https://github.com/rvirding/luerl +pkg_luerl_fetch = git +pkg_luerl_repo = https://github.com/rvirding/luerl +pkg_luerl_commit = develop + +PACKAGES += luwak +pkg_luwak_name = luwak +pkg_luwak_description = Large-object storage interface for Riak +pkg_luwak_homepage = https://github.com/basho/luwak +pkg_luwak_fetch = git +pkg_luwak_repo = https://github.com/basho/luwak +pkg_luwak_commit = master + +PACKAGES += lux +pkg_lux_name = lux +pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands +pkg_lux_homepage = https://github.com/hawk/lux +pkg_lux_fetch = git +pkg_lux_repo = https://github.com/hawk/lux +pkg_lux_commit = master + +PACKAGES += machi +pkg_machi_name = machi +pkg_machi_description = Machi file store +pkg_machi_homepage = https://github.com/basho/machi +pkg_machi_fetch = git +pkg_machi_repo = https://github.com/basho/machi +pkg_machi_commit = master + +PACKAGES += mad +pkg_mad_name = mad +pkg_mad_description = Small and Fast Rebar Replacement +pkg_mad_homepage = https://github.com/synrc/mad +pkg_mad_fetch = git +pkg_mad_repo = https://github.com/synrc/mad +pkg_mad_commit = master + +PACKAGES += marina +pkg_marina_name = marina +pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client +pkg_marina_homepage = https://github.com/lpgauth/marina +pkg_marina_fetch = git +pkg_marina_repo = https://github.com/lpgauth/marina +pkg_marina_commit = master + +PACKAGES += mavg +pkg_mavg_name = mavg +pkg_mavg_description = Erlang :: Exponential moving average library +pkg_mavg_homepage = https://github.com/EchoTeam/mavg +pkg_mavg_fetch = git +pkg_mavg_repo = https://github.com/EchoTeam/mavg +pkg_mavg_commit = master + +PACKAGES += mc_erl +pkg_mc_erl_name = mc_erl +pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang. +pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl +pkg_mc_erl_fetch = git +pkg_mc_erl_repo = https://github.com/clonejo/mc-erl +pkg_mc_erl_commit = master + +PACKAGES += mcd +pkg_mcd_name = mcd +pkg_mcd_description = Fast memcached protocol client in pure Erlang +pkg_mcd_homepage = https://github.com/EchoTeam/mcd +pkg_mcd_fetch = git +pkg_mcd_repo = https://github.com/EchoTeam/mcd +pkg_mcd_commit = master + +PACKAGES += mcerlang +pkg_mcerlang_name = mcerlang +pkg_mcerlang_description = The McErlang model checker for Erlang +pkg_mcerlang_homepage = https://github.com/fredlund/McErlang +pkg_mcerlang_fetch = git +pkg_mcerlang_repo = https://github.com/fredlund/McErlang +pkg_mcerlang_commit = master + +PACKAGES += meck +pkg_meck_name = meck +pkg_meck_description = A mocking library for Erlang +pkg_meck_homepage = https://github.com/eproxus/meck +pkg_meck_fetch = git +pkg_meck_repo = https://github.com/eproxus/meck +pkg_meck_commit = master + +PACKAGES += mekao +pkg_mekao_name = mekao +pkg_mekao_description = SQL constructor +pkg_mekao_homepage = https://github.com/ddosia/mekao +pkg_mekao_fetch = git +pkg_mekao_repo = https://github.com/ddosia/mekao +pkg_mekao_commit = master + +PACKAGES += memo +pkg_memo_name = memo +pkg_memo_description = Erlang memoization server +pkg_memo_homepage = https://github.com/tuncer/memo +pkg_memo_fetch = git +pkg_memo_repo = https://github.com/tuncer/memo +pkg_memo_commit = master + +PACKAGES += merge_index +pkg_merge_index_name = merge_index +pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop). +pkg_merge_index_homepage = https://github.com/basho/merge_index +pkg_merge_index_fetch = git +pkg_merge_index_repo = https://github.com/basho/merge_index +pkg_merge_index_commit = master + +PACKAGES += merl +pkg_merl_name = merl +pkg_merl_description = Metaprogramming in Erlang +pkg_merl_homepage = https://github.com/richcarl/merl +pkg_merl_fetch = git +pkg_merl_repo = https://github.com/richcarl/merl +pkg_merl_commit = master + +PACKAGES += mimerl +pkg_mimerl_name = mimerl +pkg_mimerl_description = library to handle mimetypes +pkg_mimerl_homepage = https://github.com/benoitc/mimerl +pkg_mimerl_fetch = git +pkg_mimerl_repo = https://github.com/benoitc/mimerl +pkg_mimerl_commit = master + +PACKAGES += mimetypes +pkg_mimetypes_name = mimetypes +pkg_mimetypes_description = Erlang MIME types library +pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes +pkg_mimetypes_fetch = git +pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes +pkg_mimetypes_commit = master + +PACKAGES += mixer +pkg_mixer_name = mixer +pkg_mixer_description = Mix in functions from other modules +pkg_mixer_homepage = https://github.com/chef/mixer +pkg_mixer_fetch = git +pkg_mixer_repo = https://github.com/chef/mixer +pkg_mixer_commit = master + +PACKAGES += mochiweb +pkg_mochiweb_name = mochiweb +pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers. +pkg_mochiweb_homepage = https://github.com/mochi/mochiweb +pkg_mochiweb_fetch = git +pkg_mochiweb_repo = https://github.com/mochi/mochiweb +pkg_mochiweb_commit = master + +PACKAGES += mochiweb_xpath +pkg_mochiweb_xpath_name = mochiweb_xpath +pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser +pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath +pkg_mochiweb_xpath_fetch = git +pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath +pkg_mochiweb_xpath_commit = master + +PACKAGES += mockgyver +pkg_mockgyver_name = mockgyver +pkg_mockgyver_description = A mocking library for Erlang +pkg_mockgyver_homepage = https://github.com/klajo/mockgyver +pkg_mockgyver_fetch = git +pkg_mockgyver_repo = https://github.com/klajo/mockgyver +pkg_mockgyver_commit = master + +PACKAGES += modlib +pkg_modlib_name = modlib +pkg_modlib_description = Web framework based on Erlang's inets httpd +pkg_modlib_homepage = https://github.com/gar1t/modlib +pkg_modlib_fetch = git +pkg_modlib_repo = https://github.com/gar1t/modlib +pkg_modlib_commit = master + +PACKAGES += mongodb +pkg_mongodb_name = mongodb +pkg_mongodb_description = MongoDB driver for Erlang +pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang +pkg_mongodb_fetch = git +pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang +pkg_mongodb_commit = master + +PACKAGES += mongooseim +pkg_mongooseim_name = mongooseim +pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions +pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform +pkg_mongooseim_fetch = git +pkg_mongooseim_repo = https://github.com/esl/MongooseIM +pkg_mongooseim_commit = master + +PACKAGES += moyo +pkg_moyo_name = moyo +pkg_moyo_description = Erlang utility functions library +pkg_moyo_homepage = https://github.com/dwango/moyo +pkg_moyo_fetch = git +pkg_moyo_repo = https://github.com/dwango/moyo +pkg_moyo_commit = master + +PACKAGES += msgpack +pkg_msgpack_name = msgpack +pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang +pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang +pkg_msgpack_fetch = git +pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang +pkg_msgpack_commit = master + +PACKAGES += mu2 +pkg_mu2_name = mu2 +pkg_mu2_description = Erlang mutation testing tool +pkg_mu2_homepage = https://github.com/ramsay-t/mu2 +pkg_mu2_fetch = git +pkg_mu2_repo = https://github.com/ramsay-t/mu2 +pkg_mu2_commit = master + +PACKAGES += mustache +pkg_mustache_name = mustache +pkg_mustache_description = Mustache template engine for Erlang. +pkg_mustache_homepage = https://github.com/mojombo/mustache.erl +pkg_mustache_fetch = git +pkg_mustache_repo = https://github.com/mojombo/mustache.erl +pkg_mustache_commit = master + +PACKAGES += myproto +pkg_myproto_name = myproto +pkg_myproto_description = MySQL Server Protocol in Erlang +pkg_myproto_homepage = https://github.com/altenwald/myproto +pkg_myproto_fetch = git +pkg_myproto_repo = https://github.com/altenwald/myproto +pkg_myproto_commit = master + +PACKAGES += mysql +pkg_mysql_name = mysql +pkg_mysql_description = MySQL client library for Erlang/OTP +pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp +pkg_mysql_fetch = git +pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp +pkg_mysql_commit = 1.7.0 + +PACKAGES += n2o +pkg_n2o_name = n2o +pkg_n2o_description = WebSocket Application Server +pkg_n2o_homepage = https://github.com/5HT/n2o +pkg_n2o_fetch = git +pkg_n2o_repo = https://github.com/5HT/n2o +pkg_n2o_commit = master + +PACKAGES += nat_upnp +pkg_nat_upnp_name = nat_upnp +pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD +pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp +pkg_nat_upnp_fetch = git +pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp +pkg_nat_upnp_commit = master + +PACKAGES += neo4j +pkg_neo4j_name = neo4j +pkg_neo4j_description = Erlang client library for Neo4J. +pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang +pkg_neo4j_fetch = git +pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang +pkg_neo4j_commit = master + +PACKAGES += neotoma +pkg_neotoma_name = neotoma +pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars. +pkg_neotoma_homepage = https://github.com/seancribbs/neotoma +pkg_neotoma_fetch = git +pkg_neotoma_repo = https://github.com/seancribbs/neotoma +pkg_neotoma_commit = master + +PACKAGES += newrelic +pkg_newrelic_name = newrelic +pkg_newrelic_description = Erlang library for sending metrics to New Relic +pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang +pkg_newrelic_fetch = git +pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang +pkg_newrelic_commit = master + +PACKAGES += nifty +pkg_nifty_name = nifty +pkg_nifty_description = Erlang NIF wrapper generator +pkg_nifty_homepage = https://github.com/parapluu/nifty +pkg_nifty_fetch = git +pkg_nifty_repo = https://github.com/parapluu/nifty +pkg_nifty_commit = master + +PACKAGES += nitrogen_core +pkg_nitrogen_core_name = nitrogen_core +pkg_nitrogen_core_description = The core Nitrogen library. +pkg_nitrogen_core_homepage = http://nitrogenproject.com/ +pkg_nitrogen_core_fetch = git +pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core +pkg_nitrogen_core_commit = master + +PACKAGES += nkbase +pkg_nkbase_name = nkbase +pkg_nkbase_description = NkBASE distributed database +pkg_nkbase_homepage = https://github.com/Nekso/nkbase +pkg_nkbase_fetch = git +pkg_nkbase_repo = https://github.com/Nekso/nkbase +pkg_nkbase_commit = develop + +PACKAGES += nkdocker +pkg_nkdocker_name = nkdocker +pkg_nkdocker_description = Erlang Docker client +pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker +pkg_nkdocker_fetch = git +pkg_nkdocker_repo = https://github.com/Nekso/nkdocker +pkg_nkdocker_commit = master + +PACKAGES += nkpacket +pkg_nkpacket_name = nkpacket +pkg_nkpacket_description = Generic Erlang transport layer +pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket +pkg_nkpacket_fetch = git +pkg_nkpacket_repo = https://github.com/Nekso/nkpacket +pkg_nkpacket_commit = master + +PACKAGES += nksip +pkg_nksip_name = nksip +pkg_nksip_description = Erlang SIP application server +pkg_nksip_homepage = https://github.com/kalta/nksip +pkg_nksip_fetch = git +pkg_nksip_repo = https://github.com/kalta/nksip +pkg_nksip_commit = master + +PACKAGES += nodefinder +pkg_nodefinder_name = nodefinder +pkg_nodefinder_description = automatic node discovery via UDP multicast +pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder +pkg_nodefinder_fetch = git +pkg_nodefinder_repo = https://github.com/okeuday/nodefinder +pkg_nodefinder_commit = master + +PACKAGES += nprocreg +pkg_nprocreg_name = nprocreg +pkg_nprocreg_description = Minimal Distributed Erlang Process Registry +pkg_nprocreg_homepage = http://nitrogenproject.com/ +pkg_nprocreg_fetch = git +pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg +pkg_nprocreg_commit = master + +PACKAGES += oauth +pkg_oauth_name = oauth +pkg_oauth_description = An Erlang OAuth 1.0 implementation +pkg_oauth_homepage = https://github.com/tim/erlang-oauth +pkg_oauth_fetch = git +pkg_oauth_repo = https://github.com/tim/erlang-oauth +pkg_oauth_commit = master + +PACKAGES += oauth2 +pkg_oauth2_name = oauth2 +pkg_oauth2_description = Erlang Oauth2 implementation +pkg_oauth2_homepage = https://github.com/kivra/oauth2 +pkg_oauth2_fetch = git +pkg_oauth2_repo = https://github.com/kivra/oauth2 +pkg_oauth2_commit = master + +PACKAGES += observer_cli +pkg_observer_cli_name = observer_cli +pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line +pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli +pkg_observer_cli_fetch = git +pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli +pkg_observer_cli_commit = master + +PACKAGES += octopus +pkg_octopus_name = octopus +pkg_octopus_description = Small and flexible pool manager written in Erlang +pkg_octopus_homepage = https://github.com/erlangbureau/octopus +pkg_octopus_fetch = git +pkg_octopus_repo = https://github.com/erlangbureau/octopus +pkg_octopus_commit = master + +PACKAGES += of_protocol +pkg_of_protocol_name = of_protocol +pkg_of_protocol_description = OpenFlow Protocol Library for Erlang +pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol +pkg_of_protocol_fetch = git +pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol +pkg_of_protocol_commit = master + +PACKAGES += opencouch +pkg_opencouch_name = couch +pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB +pkg_opencouch_homepage = https://github.com/benoitc/opencouch +pkg_opencouch_fetch = git +pkg_opencouch_repo = https://github.com/benoitc/opencouch +pkg_opencouch_commit = master + +PACKAGES += openflow +pkg_openflow_name = openflow +pkg_openflow_description = An OpenFlow controller written in pure erlang +pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow +pkg_openflow_fetch = git +pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow +pkg_openflow_commit = master + +PACKAGES += openid +pkg_openid_name = openid +pkg_openid_description = Erlang OpenID +pkg_openid_homepage = https://github.com/brendonh/erl_openid +pkg_openid_fetch = git +pkg_openid_repo = https://github.com/brendonh/erl_openid +pkg_openid_commit = master + +PACKAGES += openpoker +pkg_openpoker_name = openpoker +pkg_openpoker_description = Genesis Texas hold'em Game Server +pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker +pkg_openpoker_fetch = git +pkg_openpoker_repo = https://github.com/hpyhacking/openpoker +pkg_openpoker_commit = master + +PACKAGES += otpbp +pkg_otpbp_name = otpbp +pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19) +pkg_otpbp_homepage = https://github.com/Ledest/otpbp +pkg_otpbp_fetch = git +pkg_otpbp_repo = https://github.com/Ledest/otpbp +pkg_otpbp_commit = master + +PACKAGES += pal +pkg_pal_name = pal +pkg_pal_description = Pragmatic Authentication Library +pkg_pal_homepage = https://github.com/manifest/pal +pkg_pal_fetch = git +pkg_pal_repo = https://github.com/manifest/pal +pkg_pal_commit = master + +PACKAGES += parse_trans +pkg_parse_trans_name = parse_trans +pkg_parse_trans_description = Parse transform utilities for Erlang +pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans +pkg_parse_trans_fetch = git +pkg_parse_trans_repo = https://github.com/uwiger/parse_trans +pkg_parse_trans_commit = master + +PACKAGES += parsexml +pkg_parsexml_name = parsexml +pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API +pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml +pkg_parsexml_fetch = git +pkg_parsexml_repo = https://github.com/maxlapshin/parsexml +pkg_parsexml_commit = master + +PACKAGES += partisan +pkg_partisan_name = partisan +pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir. +pkg_partisan_homepage = http://partisan.cloud +pkg_partisan_fetch = git +pkg_partisan_repo = https://github.com/lasp-lang/partisan +pkg_partisan_commit = master + +PACKAGES += pegjs +pkg_pegjs_name = pegjs +pkg_pegjs_description = An implementation of PEG.js grammar for Erlang. +pkg_pegjs_homepage = https://github.com/dmitriid/pegjs +pkg_pegjs_fetch = git +pkg_pegjs_repo = https://github.com/dmitriid/pegjs +pkg_pegjs_commit = master + +PACKAGES += percept2 +pkg_percept2_name = percept2 +pkg_percept2_description = Concurrent profiling tool for Erlang +pkg_percept2_homepage = https://github.com/huiqing/percept2 +pkg_percept2_fetch = git +pkg_percept2_repo = https://github.com/huiqing/percept2 +pkg_percept2_commit = master + +PACKAGES += pgo +pkg_pgo_name = pgo +pkg_pgo_description = Erlang Postgres client and connection pool +pkg_pgo_homepage = https://github.com/erleans/pgo.git +pkg_pgo_fetch = git +pkg_pgo_repo = https://github.com/erleans/pgo.git +pkg_pgo_commit = master + +PACKAGES += pgsql +pkg_pgsql_name = pgsql +pkg_pgsql_description = Erlang PostgreSQL driver +pkg_pgsql_homepage = https://github.com/semiocast/pgsql +pkg_pgsql_fetch = git +pkg_pgsql_repo = https://github.com/semiocast/pgsql +pkg_pgsql_commit = master + +PACKAGES += pkgx +pkg_pkgx_name = pkgx +pkg_pkgx_description = Build .deb packages from Erlang releases +pkg_pkgx_homepage = https://github.com/arjan/pkgx +pkg_pkgx_fetch = git +pkg_pkgx_repo = https://github.com/arjan/pkgx +pkg_pkgx_commit = master + +PACKAGES += pkt +pkg_pkt_name = pkt +pkg_pkt_description = Erlang network protocol library +pkg_pkt_homepage = https://github.com/msantos/pkt +pkg_pkt_fetch = git +pkg_pkt_repo = https://github.com/msantos/pkt +pkg_pkt_commit = master + +PACKAGES += plain_fsm +pkg_plain_fsm_name = plain_fsm +pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs. +pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm +pkg_plain_fsm_fetch = git +pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm +pkg_plain_fsm_commit = master + +PACKAGES += plumtree +pkg_plumtree_name = plumtree +pkg_plumtree_description = Epidemic Broadcast Trees +pkg_plumtree_homepage = https://github.com/helium/plumtree +pkg_plumtree_fetch = git +pkg_plumtree_repo = https://github.com/helium/plumtree +pkg_plumtree_commit = master + +PACKAGES += pmod_transform +pkg_pmod_transform_name = pmod_transform +pkg_pmod_transform_description = Parse transform for parameterized modules +pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform +pkg_pmod_transform_fetch = git +pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform +pkg_pmod_transform_commit = master + +PACKAGES += pobox +pkg_pobox_name = pobox +pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang +pkg_pobox_homepage = https://github.com/ferd/pobox +pkg_pobox_fetch = git +pkg_pobox_repo = https://github.com/ferd/pobox +pkg_pobox_commit = master + +PACKAGES += ponos +pkg_ponos_name = ponos +pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang +pkg_ponos_homepage = https://github.com/klarna/ponos +pkg_ponos_fetch = git +pkg_ponos_repo = https://github.com/klarna/ponos +pkg_ponos_commit = master + +PACKAGES += poolboy +pkg_poolboy_name = poolboy +pkg_poolboy_description = A hunky Erlang worker pool factory +pkg_poolboy_homepage = https://github.com/devinus/poolboy +pkg_poolboy_fetch = git +pkg_poolboy_repo = https://github.com/devinus/poolboy +pkg_poolboy_commit = master + +PACKAGES += pooler +pkg_pooler_name = pooler +pkg_pooler_description = An OTP Process Pool Application +pkg_pooler_homepage = https://github.com/seth/pooler +pkg_pooler_fetch = git +pkg_pooler_repo = https://github.com/seth/pooler +pkg_pooler_commit = master + +PACKAGES += pqueue +pkg_pqueue_name = pqueue +pkg_pqueue_description = Erlang Priority Queues +pkg_pqueue_homepage = https://github.com/okeuday/pqueue +pkg_pqueue_fetch = git +pkg_pqueue_repo = https://github.com/okeuday/pqueue +pkg_pqueue_commit = master + +PACKAGES += procket +pkg_procket_name = procket +pkg_procket_description = Erlang interface to low level socket operations +pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket +pkg_procket_fetch = git +pkg_procket_repo = https://github.com/msantos/procket +pkg_procket_commit = master + +PACKAGES += prometheus +pkg_prometheus_name = prometheus +pkg_prometheus_description = Prometheus.io client in Erlang +pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl +pkg_prometheus_fetch = git +pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl +pkg_prometheus_commit = master + +PACKAGES += prop +pkg_prop_name = prop +pkg_prop_description = An Erlang code scaffolding and generator system. +pkg_prop_homepage = https://github.com/nuex/prop +pkg_prop_fetch = git +pkg_prop_repo = https://github.com/nuex/prop +pkg_prop_commit = master + +PACKAGES += proper +pkg_proper_name = proper +pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang. +pkg_proper_homepage = http://proper.softlab.ntua.gr +pkg_proper_fetch = git +pkg_proper_repo = https://github.com/manopapad/proper +pkg_proper_commit = master + +PACKAGES += props +pkg_props_name = props +pkg_props_description = Property structure library +pkg_props_homepage = https://github.com/greyarea/props +pkg_props_fetch = git +pkg_props_repo = https://github.com/greyarea/props +pkg_props_commit = master + +PACKAGES += protobuffs +pkg_protobuffs_name = protobuffs +pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs. +pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs +pkg_protobuffs_fetch = git +pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs +pkg_protobuffs_commit = master + +PACKAGES += psycho +pkg_psycho_name = psycho +pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware. +pkg_psycho_homepage = https://github.com/gar1t/psycho +pkg_psycho_fetch = git +pkg_psycho_repo = https://github.com/gar1t/psycho +pkg_psycho_commit = master + +PACKAGES += purity +pkg_purity_name = purity +pkg_purity_description = A side-effect analyzer for Erlang +pkg_purity_homepage = https://github.com/mpitid/purity +pkg_purity_fetch = git +pkg_purity_repo = https://github.com/mpitid/purity +pkg_purity_commit = master + +PACKAGES += push_service +pkg_push_service_name = push_service +pkg_push_service_description = Push service +pkg_push_service_homepage = https://github.com/hairyhum/push_service +pkg_push_service_fetch = git +pkg_push_service_repo = https://github.com/hairyhum/push_service +pkg_push_service_commit = master + +PACKAGES += qdate +pkg_qdate_name = qdate +pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang. +pkg_qdate_homepage = https://github.com/choptastic/qdate +pkg_qdate_fetch = git +pkg_qdate_repo = https://github.com/choptastic/qdate +pkg_qdate_commit = master + +PACKAGES += qrcode +pkg_qrcode_name = qrcode +pkg_qrcode_description = QR Code encoder in Erlang +pkg_qrcode_homepage = https://github.com/komone/qrcode +pkg_qrcode_fetch = git +pkg_qrcode_repo = https://github.com/komone/qrcode +pkg_qrcode_commit = master + +PACKAGES += quest +pkg_quest_name = quest +pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang. +pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest +pkg_quest_fetch = git +pkg_quest_repo = https://github.com/eriksoe/ErlangQuest +pkg_quest_commit = master + +PACKAGES += quickrand +pkg_quickrand_name = quickrand +pkg_quickrand_description = Quick Erlang Random Number Generation +pkg_quickrand_homepage = https://github.com/okeuday/quickrand +pkg_quickrand_fetch = git +pkg_quickrand_repo = https://github.com/okeuday/quickrand +pkg_quickrand_commit = master + +PACKAGES += rabbit +pkg_rabbit_name = rabbit +pkg_rabbit_description = RabbitMQ Server +pkg_rabbit_homepage = https://www.rabbitmq.com/ +pkg_rabbit_fetch = git +pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git +pkg_rabbit_commit = master + +PACKAGES += rabbit_exchange_type_riak +pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak +pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak +pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange +pkg_rabbit_exchange_type_riak_fetch = git +pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange +pkg_rabbit_exchange_type_riak_commit = master + +PACKAGES += rack +pkg_rack_name = rack +pkg_rack_description = Rack handler for erlang +pkg_rack_homepage = https://github.com/erlyvideo/rack +pkg_rack_fetch = git +pkg_rack_repo = https://github.com/erlyvideo/rack +pkg_rack_commit = master + +PACKAGES += radierl +pkg_radierl_name = radierl +pkg_radierl_description = RADIUS protocol stack implemented in Erlang. +pkg_radierl_homepage = https://github.com/vances/radierl +pkg_radierl_fetch = git +pkg_radierl_repo = https://github.com/vances/radierl +pkg_radierl_commit = master + +PACKAGES += rafter +pkg_rafter_name = rafter +pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol +pkg_rafter_homepage = https://github.com/andrewjstone/rafter +pkg_rafter_fetch = git +pkg_rafter_repo = https://github.com/andrewjstone/rafter +pkg_rafter_commit = master + +PACKAGES += ranch +pkg_ranch_name = ranch +pkg_ranch_description = Socket acceptor pool for TCP protocols. +pkg_ranch_homepage = http://ninenines.eu +pkg_ranch_fetch = git +pkg_ranch_repo = https://github.com/ninenines/ranch +pkg_ranch_commit = 1.2.1 + +PACKAGES += rbeacon +pkg_rbeacon_name = rbeacon +pkg_rbeacon_description = LAN discovery and presence in Erlang. +pkg_rbeacon_homepage = https://github.com/refuge/rbeacon +pkg_rbeacon_fetch = git +pkg_rbeacon_repo = https://github.com/refuge/rbeacon +pkg_rbeacon_commit = master + +PACKAGES += rebar +pkg_rebar_name = rebar +pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases. +pkg_rebar_homepage = http://www.rebar3.org +pkg_rebar_fetch = git +pkg_rebar_repo = https://github.com/rebar/rebar3 +pkg_rebar_commit = master + +PACKAGES += rebus +pkg_rebus_name = rebus +pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang. +pkg_rebus_homepage = https://github.com/olle/rebus +pkg_rebus_fetch = git +pkg_rebus_repo = https://github.com/olle/rebus +pkg_rebus_commit = master + +PACKAGES += rec2json +pkg_rec2json_name = rec2json +pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily. +pkg_rec2json_homepage = https://github.com/lordnull/rec2json +pkg_rec2json_fetch = git +pkg_rec2json_repo = https://github.com/lordnull/rec2json +pkg_rec2json_commit = master + +PACKAGES += recon +pkg_recon_name = recon +pkg_recon_description = Collection of functions and scripts to debug Erlang in production. +pkg_recon_homepage = https://github.com/ferd/recon +pkg_recon_fetch = git +pkg_recon_repo = https://github.com/ferd/recon +pkg_recon_commit = master + +PACKAGES += record_info +pkg_record_info_name = record_info +pkg_record_info_description = Convert between record and proplist +pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info +pkg_record_info_fetch = git +pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info +pkg_record_info_commit = master + +PACKAGES += redgrid +pkg_redgrid_name = redgrid +pkg_redgrid_description = automatic Erlang node discovery via redis +pkg_redgrid_homepage = https://github.com/jkvor/redgrid +pkg_redgrid_fetch = git +pkg_redgrid_repo = https://github.com/jkvor/redgrid +pkg_redgrid_commit = master + +PACKAGES += redo +pkg_redo_name = redo +pkg_redo_description = pipelined erlang redis client +pkg_redo_homepage = https://github.com/jkvor/redo +pkg_redo_fetch = git +pkg_redo_repo = https://github.com/jkvor/redo +pkg_redo_commit = master + +PACKAGES += reload_mk +pkg_reload_mk_name = reload_mk +pkg_reload_mk_description = Live reload plugin for erlang.mk. +pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk +pkg_reload_mk_fetch = git +pkg_reload_mk_repo = https://github.com/bullno1/reload.mk +pkg_reload_mk_commit = master + +PACKAGES += reltool_util +pkg_reltool_util_name = reltool_util +pkg_reltool_util_description = Erlang reltool utility functionality application +pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util +pkg_reltool_util_fetch = git +pkg_reltool_util_repo = https://github.com/okeuday/reltool_util +pkg_reltool_util_commit = master + +PACKAGES += relx +pkg_relx_name = relx +pkg_relx_description = Sane, simple release creation for Erlang +pkg_relx_homepage = https://github.com/erlware/relx +pkg_relx_fetch = git +pkg_relx_repo = https://github.com/erlware/relx +pkg_relx_commit = master + +PACKAGES += resource_discovery +pkg_resource_discovery_name = resource_discovery +pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster. +pkg_resource_discovery_homepage = http://erlware.org/ +pkg_resource_discovery_fetch = git +pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery +pkg_resource_discovery_commit = master + +PACKAGES += restc +pkg_restc_name = restc +pkg_restc_description = Erlang Rest Client +pkg_restc_homepage = https://github.com/kivra/restclient +pkg_restc_fetch = git +pkg_restc_repo = https://github.com/kivra/restclient +pkg_restc_commit = master + +PACKAGES += rfc4627_jsonrpc +pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc +pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation. +pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627 +pkg_rfc4627_jsonrpc_fetch = git +pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627 +pkg_rfc4627_jsonrpc_commit = master + +PACKAGES += riak_control +pkg_riak_control_name = riak_control +pkg_riak_control_description = Webmachine-based administration interface for Riak. +pkg_riak_control_homepage = https://github.com/basho/riak_control +pkg_riak_control_fetch = git +pkg_riak_control_repo = https://github.com/basho/riak_control +pkg_riak_control_commit = master + +PACKAGES += riak_core +pkg_riak_core_name = riak_core +pkg_riak_core_description = Distributed systems infrastructure used by Riak. +pkg_riak_core_homepage = https://github.com/basho/riak_core +pkg_riak_core_fetch = git +pkg_riak_core_repo = https://github.com/basho/riak_core +pkg_riak_core_commit = master + +PACKAGES += riak_dt +pkg_riak_dt_name = riak_dt +pkg_riak_dt_description = Convergent replicated datatypes in Erlang +pkg_riak_dt_homepage = https://github.com/basho/riak_dt +pkg_riak_dt_fetch = git +pkg_riak_dt_repo = https://github.com/basho/riak_dt +pkg_riak_dt_commit = master + +PACKAGES += riak_ensemble +pkg_riak_ensemble_name = riak_ensemble +pkg_riak_ensemble_description = Multi-Paxos framework in Erlang +pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble +pkg_riak_ensemble_fetch = git +pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble +pkg_riak_ensemble_commit = master + +PACKAGES += riak_kv +pkg_riak_kv_name = riak_kv +pkg_riak_kv_description = Riak Key/Value Store +pkg_riak_kv_homepage = https://github.com/basho/riak_kv +pkg_riak_kv_fetch = git +pkg_riak_kv_repo = https://github.com/basho/riak_kv +pkg_riak_kv_commit = master + +PACKAGES += riak_pg +pkg_riak_pg_name = riak_pg +pkg_riak_pg_description = Distributed process groups with riak_core. +pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg +pkg_riak_pg_fetch = git +pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg +pkg_riak_pg_commit = master + +PACKAGES += riak_pipe +pkg_riak_pipe_name = riak_pipe +pkg_riak_pipe_description = Riak Pipelines +pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe +pkg_riak_pipe_fetch = git +pkg_riak_pipe_repo = https://github.com/basho/riak_pipe +pkg_riak_pipe_commit = master + +PACKAGES += riak_sysmon +pkg_riak_sysmon_name = riak_sysmon +pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages +pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon +pkg_riak_sysmon_fetch = git +pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon +pkg_riak_sysmon_commit = master + +PACKAGES += riak_test +pkg_riak_test_name = riak_test +pkg_riak_test_description = I'm in your cluster, testing your riaks +pkg_riak_test_homepage = https://github.com/basho/riak_test +pkg_riak_test_fetch = git +pkg_riak_test_repo = https://github.com/basho/riak_test +pkg_riak_test_commit = master + +PACKAGES += riakc +pkg_riakc_name = riakc +pkg_riakc_description = Erlang clients for Riak. +pkg_riakc_homepage = https://github.com/basho/riak-erlang-client +pkg_riakc_fetch = git +pkg_riakc_repo = https://github.com/basho/riak-erlang-client +pkg_riakc_commit = master + +PACKAGES += riakhttpc +pkg_riakhttpc_name = riakhttpc +pkg_riakhttpc_description = Riak Erlang client using the HTTP interface +pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client +pkg_riakhttpc_fetch = git +pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client +pkg_riakhttpc_commit = master + +PACKAGES += riaknostic +pkg_riaknostic_name = riaknostic +pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap +pkg_riaknostic_homepage = https://github.com/basho/riaknostic +pkg_riaknostic_fetch = git +pkg_riaknostic_repo = https://github.com/basho/riaknostic +pkg_riaknostic_commit = master + +PACKAGES += riakpool +pkg_riakpool_name = riakpool +pkg_riakpool_description = erlang riak client pool +pkg_riakpool_homepage = https://github.com/dweldon/riakpool +pkg_riakpool_fetch = git +pkg_riakpool_repo = https://github.com/dweldon/riakpool +pkg_riakpool_commit = master + +PACKAGES += rivus_cep +pkg_rivus_cep_name = rivus_cep +pkg_rivus_cep_description = Complex event processing in Erlang +pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep +pkg_rivus_cep_fetch = git +pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep +pkg_rivus_cep_commit = master + +PACKAGES += rlimit +pkg_rlimit_name = rlimit +pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent +pkg_rlimit_homepage = https://github.com/jlouis/rlimit +pkg_rlimit_fetch = git +pkg_rlimit_repo = https://github.com/jlouis/rlimit +pkg_rlimit_commit = master + +PACKAGES += rust_mk +pkg_rust_mk_name = rust_mk +pkg_rust_mk_description = Build Rust crates in an Erlang application +pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk +pkg_rust_mk_fetch = git +pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk +pkg_rust_mk_commit = master + +PACKAGES += safetyvalve +pkg_safetyvalve_name = safetyvalve +pkg_safetyvalve_description = A safety valve for your erlang node +pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve +pkg_safetyvalve_fetch = git +pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve +pkg_safetyvalve_commit = master + +PACKAGES += seestar +pkg_seestar_name = seestar +pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol +pkg_seestar_homepage = https://github.com/iamaleksey/seestar +pkg_seestar_fetch = git +pkg_seestar_repo = https://github.com/iamaleksey/seestar +pkg_seestar_commit = master + +PACKAGES += service +pkg_service_name = service +pkg_service_description = A minimal Erlang behavior for creating CloudI internal services +pkg_service_homepage = http://cloudi.org/ +pkg_service_fetch = git +pkg_service_repo = https://github.com/CloudI/service +pkg_service_commit = master + +PACKAGES += setup +pkg_setup_name = setup +pkg_setup_description = Generic setup utility for Erlang-based systems +pkg_setup_homepage = https://github.com/uwiger/setup +pkg_setup_fetch = git +pkg_setup_repo = https://github.com/uwiger/setup +pkg_setup_commit = master + +PACKAGES += sext +pkg_sext_name = sext +pkg_sext_description = Sortable Erlang Term Serialization +pkg_sext_homepage = https://github.com/uwiger/sext +pkg_sext_fetch = git +pkg_sext_repo = https://github.com/uwiger/sext +pkg_sext_commit = master + +PACKAGES += sfmt +pkg_sfmt_name = sfmt +pkg_sfmt_description = SFMT pseudo random number generator for Erlang. +pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang +pkg_sfmt_fetch = git +pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang +pkg_sfmt_commit = master + +PACKAGES += sgte +pkg_sgte_name = sgte +pkg_sgte_description = A simple Erlang Template Engine +pkg_sgte_homepage = https://github.com/filippo/sgte +pkg_sgte_fetch = git +pkg_sgte_repo = https://github.com/filippo/sgte +pkg_sgte_commit = master + +PACKAGES += sheriff +pkg_sheriff_name = sheriff +pkg_sheriff_description = Parse transform for type based validation. +pkg_sheriff_homepage = http://ninenines.eu +pkg_sheriff_fetch = git +pkg_sheriff_repo = https://github.com/extend/sheriff +pkg_sheriff_commit = master + +PACKAGES += shotgun +pkg_shotgun_name = shotgun +pkg_shotgun_description = better than just a gun +pkg_shotgun_homepage = https://github.com/inaka/shotgun +pkg_shotgun_fetch = git +pkg_shotgun_repo = https://github.com/inaka/shotgun +pkg_shotgun_commit = master + +PACKAGES += sidejob +pkg_sidejob_name = sidejob +pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang +pkg_sidejob_homepage = https://github.com/basho/sidejob +pkg_sidejob_fetch = git +pkg_sidejob_repo = https://github.com/basho/sidejob +pkg_sidejob_commit = master + +PACKAGES += sieve +pkg_sieve_name = sieve +pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang +pkg_sieve_homepage = https://github.com/benoitc/sieve +pkg_sieve_fetch = git +pkg_sieve_repo = https://github.com/benoitc/sieve +pkg_sieve_commit = master + +PACKAGES += sighandler +pkg_sighandler_name = sighandler +pkg_sighandler_description = Handle UNIX signals in Er lang +pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler +pkg_sighandler_fetch = git +pkg_sighandler_repo = https://github.com/jkingsbery/sighandler +pkg_sighandler_commit = master + +PACKAGES += simhash +pkg_simhash_name = simhash +pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data. +pkg_simhash_homepage = https://github.com/ferd/simhash +pkg_simhash_fetch = git +pkg_simhash_repo = https://github.com/ferd/simhash +pkg_simhash_commit = master + +PACKAGES += simple_bridge +pkg_simple_bridge_name = simple_bridge +pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers. +pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge +pkg_simple_bridge_fetch = git +pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge +pkg_simple_bridge_commit = master + +PACKAGES += simple_oauth2 +pkg_simple_oauth2_name = simple_oauth2 +pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured) +pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2 +pkg_simple_oauth2_fetch = git +pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2 +pkg_simple_oauth2_commit = master + +PACKAGES += skel +pkg_skel_name = skel +pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang +pkg_skel_homepage = https://github.com/ParaPhrase/skel +pkg_skel_fetch = git +pkg_skel_repo = https://github.com/ParaPhrase/skel +pkg_skel_commit = master + +PACKAGES += slack +pkg_slack_name = slack +pkg_slack_description = Minimal slack notification OTP library. +pkg_slack_homepage = https://github.com/DonBranson/slack +pkg_slack_fetch = git +pkg_slack_repo = https://github.com/DonBranson/slack.git +pkg_slack_commit = master + +PACKAGES += smother +pkg_smother_name = smother +pkg_smother_description = Extended code coverage metrics for Erlang. +pkg_smother_homepage = https://ramsay-t.github.io/Smother/ +pkg_smother_fetch = git +pkg_smother_repo = https://github.com/ramsay-t/Smother +pkg_smother_commit = master + +PACKAGES += snappyer +pkg_snappyer_name = snappyer +pkg_snappyer_description = Snappy as nif for Erlang +pkg_snappyer_homepage = https://github.com/zmstone/snappyer +pkg_snappyer_fetch = git +pkg_snappyer_repo = https://github.com/zmstone/snappyer.git +pkg_snappyer_commit = master + +PACKAGES += social +pkg_social_name = social +pkg_social_description = Cowboy handler for social login via OAuth2 providers +pkg_social_homepage = https://github.com/dvv/social +pkg_social_fetch = git +pkg_social_repo = https://github.com/dvv/social +pkg_social_commit = master + +PACKAGES += spapi_router +pkg_spapi_router_name = spapi_router +pkg_spapi_router_description = Partially-connected Erlang clustering +pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router +pkg_spapi_router_fetch = git +pkg_spapi_router_repo = https://github.com/spilgames/spapi-router +pkg_spapi_router_commit = master + +PACKAGES += sqerl +pkg_sqerl_name = sqerl +pkg_sqerl_description = An Erlang-flavoured SQL DSL +pkg_sqerl_homepage = https://github.com/hairyhum/sqerl +pkg_sqerl_fetch = git +pkg_sqerl_repo = https://github.com/hairyhum/sqerl +pkg_sqerl_commit = master + +PACKAGES += srly +pkg_srly_name = srly +pkg_srly_description = Native Erlang Unix serial interface +pkg_srly_homepage = https://github.com/msantos/srly +pkg_srly_fetch = git +pkg_srly_repo = https://github.com/msantos/srly +pkg_srly_commit = master + +PACKAGES += sshrpc +pkg_sshrpc_name = sshrpc +pkg_sshrpc_description = Erlang SSH RPC module (experimental) +pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc +pkg_sshrpc_fetch = git +pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc +pkg_sshrpc_commit = master + +PACKAGES += stable +pkg_stable_name = stable +pkg_stable_description = Library of assorted helpers for Cowboy web server. +pkg_stable_homepage = https://github.com/dvv/stable +pkg_stable_fetch = git +pkg_stable_repo = https://github.com/dvv/stable +pkg_stable_commit = master + +PACKAGES += statebox +pkg_statebox_name = statebox +pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak. +pkg_statebox_homepage = https://github.com/mochi/statebox +pkg_statebox_fetch = git +pkg_statebox_repo = https://github.com/mochi/statebox +pkg_statebox_commit = master + +PACKAGES += statebox_riak +pkg_statebox_riak_name = statebox_riak +pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media. +pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak +pkg_statebox_riak_fetch = git +pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak +pkg_statebox_riak_commit = master + +PACKAGES += statman +pkg_statman_name = statman +pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM +pkg_statman_homepage = https://github.com/knutin/statman +pkg_statman_fetch = git +pkg_statman_repo = https://github.com/knutin/statman +pkg_statman_commit = master + +PACKAGES += statsderl +pkg_statsderl_name = statsderl +pkg_statsderl_description = StatsD client (erlang) +pkg_statsderl_homepage = https://github.com/lpgauth/statsderl +pkg_statsderl_fetch = git +pkg_statsderl_repo = https://github.com/lpgauth/statsderl +pkg_statsderl_commit = master + +PACKAGES += stdinout_pool +pkg_stdinout_pool_name = stdinout_pool +pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication. +pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool +pkg_stdinout_pool_fetch = git +pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool +pkg_stdinout_pool_commit = master + +PACKAGES += stockdb +pkg_stockdb_name = stockdb +pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang +pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb +pkg_stockdb_fetch = git +pkg_stockdb_repo = https://github.com/maxlapshin/stockdb +pkg_stockdb_commit = master + +PACKAGES += stripe +pkg_stripe_name = stripe +pkg_stripe_description = Erlang interface to the stripe.com API +pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang +pkg_stripe_fetch = git +pkg_stripe_repo = https://github.com/mattsta/stripe-erlang +pkg_stripe_commit = v1 + +PACKAGES += subproc +pkg_subproc_name = subproc +pkg_subproc_description = unix subprocess manager with {active,once|false} modes +pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc +pkg_subproc_fetch = git +pkg_subproc_repo = https://github.com/dozzie/subproc +pkg_subproc_commit = v0.1.0 + +PACKAGES += supervisor3 +pkg_supervisor3_name = supervisor3 +pkg_supervisor3_description = OTP supervisor with additional strategies +pkg_supervisor3_homepage = https://github.com/klarna/supervisor3 +pkg_supervisor3_fetch = git +pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git +pkg_supervisor3_commit = master + +PACKAGES += surrogate +pkg_surrogate_name = surrogate +pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes. +pkg_surrogate_homepage = https://github.com/skruger/Surrogate +pkg_surrogate_fetch = git +pkg_surrogate_repo = https://github.com/skruger/Surrogate +pkg_surrogate_commit = master + +PACKAGES += swab +pkg_swab_name = swab +pkg_swab_description = General purpose buffer handling module +pkg_swab_homepage = https://github.com/crownedgrouse/swab +pkg_swab_fetch = git +pkg_swab_repo = https://github.com/crownedgrouse/swab +pkg_swab_commit = master + +PACKAGES += swarm +pkg_swarm_name = swarm +pkg_swarm_description = Fast and simple acceptor pool for Erlang +pkg_swarm_homepage = https://github.com/jeremey/swarm +pkg_swarm_fetch = git +pkg_swarm_repo = https://github.com/jeremey/swarm +pkg_swarm_commit = master + +PACKAGES += switchboard +pkg_switchboard_name = switchboard +pkg_switchboard_description = A framework for processing email using worker plugins. +pkg_switchboard_homepage = https://github.com/thusfresh/switchboard +pkg_switchboard_fetch = git +pkg_switchboard_repo = https://github.com/thusfresh/switchboard +pkg_switchboard_commit = master + +PACKAGES += syn +pkg_syn_name = syn +pkg_syn_description = A global Process Registry and Process Group manager for Erlang. +pkg_syn_homepage = https://github.com/ostinelli/syn +pkg_syn_fetch = git +pkg_syn_repo = https://github.com/ostinelli/syn +pkg_syn_commit = master + +PACKAGES += sync +pkg_sync_name = sync +pkg_sync_description = On-the-fly recompiling and reloading in Erlang. +pkg_sync_homepage = https://github.com/rustyio/sync +pkg_sync_fetch = git +pkg_sync_repo = https://github.com/rustyio/sync +pkg_sync_commit = master + +PACKAGES += syntaxerl +pkg_syntaxerl_name = syntaxerl +pkg_syntaxerl_description = Syntax checker for Erlang +pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl +pkg_syntaxerl_fetch = git +pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl +pkg_syntaxerl_commit = master + +PACKAGES += syslog +pkg_syslog_name = syslog +pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3) +pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog +pkg_syslog_fetch = git +pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog +pkg_syslog_commit = master + +PACKAGES += taskforce +pkg_taskforce_name = taskforce +pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks. +pkg_taskforce_homepage = https://github.com/g-andrade/taskforce +pkg_taskforce_fetch = git +pkg_taskforce_repo = https://github.com/g-andrade/taskforce +pkg_taskforce_commit = master + +PACKAGES += tddreloader +pkg_tddreloader_name = tddreloader +pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes +pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader +pkg_tddreloader_fetch = git +pkg_tddreloader_repo = https://github.com/version2beta/tddreloader +pkg_tddreloader_commit = master + +PACKAGES += tempo +pkg_tempo_name = tempo +pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang. +pkg_tempo_homepage = https://github.com/selectel/tempo +pkg_tempo_fetch = git +pkg_tempo_repo = https://github.com/selectel/tempo +pkg_tempo_commit = master + +PACKAGES += ticktick +pkg_ticktick_name = ticktick +pkg_ticktick_description = Ticktick is an id generator for message service. +pkg_ticktick_homepage = https://github.com/ericliang/ticktick +pkg_ticktick_fetch = git +pkg_ticktick_repo = https://github.com/ericliang/ticktick +pkg_ticktick_commit = master + +PACKAGES += tinymq +pkg_tinymq_name = tinymq +pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue +pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq +pkg_tinymq_fetch = git +pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq +pkg_tinymq_commit = master + +PACKAGES += tinymt +pkg_tinymt_name = tinymt +pkg_tinymt_description = TinyMT pseudo random number generator for Erlang. +pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang +pkg_tinymt_fetch = git +pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang +pkg_tinymt_commit = master + +PACKAGES += tirerl +pkg_tirerl_name = tirerl +pkg_tirerl_description = Erlang interface to Elastic Search +pkg_tirerl_homepage = https://github.com/inaka/tirerl +pkg_tirerl_fetch = git +pkg_tirerl_repo = https://github.com/inaka/tirerl +pkg_tirerl_commit = master + +PACKAGES += toml +pkg_toml_name = toml +pkg_toml_description = TOML (0.4.0) config parser +pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML +pkg_toml_fetch = git +pkg_toml_repo = https://github.com/dozzie/toml +pkg_toml_commit = v0.2.0 + +PACKAGES += traffic_tools +pkg_traffic_tools_name = traffic_tools +pkg_traffic_tools_description = Simple traffic limiting library +pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools +pkg_traffic_tools_fetch = git +pkg_traffic_tools_repo = https://github.com/systra/traffic_tools +pkg_traffic_tools_commit = master + +PACKAGES += trails +pkg_trails_name = trails +pkg_trails_description = A couple of improvements over Cowboy Routes +pkg_trails_homepage = http://inaka.github.io/cowboy-trails/ +pkg_trails_fetch = git +pkg_trails_repo = https://github.com/inaka/cowboy-trails +pkg_trails_commit = master + +PACKAGES += trane +pkg_trane_name = trane +pkg_trane_description = SAX style broken HTML parser in Erlang +pkg_trane_homepage = https://github.com/massemanet/trane +pkg_trane_fetch = git +pkg_trane_repo = https://github.com/massemanet/trane +pkg_trane_commit = master + +PACKAGES += transit +pkg_transit_name = transit +pkg_transit_description = transit format for erlang +pkg_transit_homepage = https://github.com/isaiah/transit-erlang +pkg_transit_fetch = git +pkg_transit_repo = https://github.com/isaiah/transit-erlang +pkg_transit_commit = master + +PACKAGES += trie +pkg_trie_name = trie +pkg_trie_description = Erlang Trie Implementation +pkg_trie_homepage = https://github.com/okeuday/trie +pkg_trie_fetch = git +pkg_trie_repo = https://github.com/okeuday/trie +pkg_trie_commit = master + +PACKAGES += triq +pkg_triq_name = triq +pkg_triq_description = Trifork QuickCheck +pkg_triq_homepage = https://triq.gitlab.io +pkg_triq_fetch = git +pkg_triq_repo = https://gitlab.com/triq/triq.git +pkg_triq_commit = master + +PACKAGES += tunctl +pkg_tunctl_name = tunctl +pkg_tunctl_description = Erlang TUN/TAP interface +pkg_tunctl_homepage = https://github.com/msantos/tunctl +pkg_tunctl_fetch = git +pkg_tunctl_repo = https://github.com/msantos/tunctl +pkg_tunctl_commit = master + +PACKAGES += twerl +pkg_twerl_name = twerl +pkg_twerl_description = Erlang client for the Twitter Streaming API +pkg_twerl_homepage = https://github.com/lucaspiller/twerl +pkg_twerl_fetch = git +pkg_twerl_repo = https://github.com/lucaspiller/twerl +pkg_twerl_commit = oauth + +PACKAGES += twitter_erlang +pkg_twitter_erlang_name = twitter_erlang +pkg_twitter_erlang_description = An Erlang twitter client +pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter +pkg_twitter_erlang_fetch = git +pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter +pkg_twitter_erlang_commit = master + +PACKAGES += ucol_nif +pkg_ucol_nif_name = ucol_nif +pkg_ucol_nif_description = ICU based collation Erlang module +pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif +pkg_ucol_nif_fetch = git +pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif +pkg_ucol_nif_commit = master + +PACKAGES += unicorn +pkg_unicorn_name = unicorn +pkg_unicorn_description = Generic configuration server +pkg_unicorn_homepage = https://github.com/shizzard/unicorn +pkg_unicorn_fetch = git +pkg_unicorn_repo = https://github.com/shizzard/unicorn +pkg_unicorn_commit = master + +PACKAGES += unsplit +pkg_unsplit_name = unsplit +pkg_unsplit_description = Resolves conflicts in Mnesia after network splits +pkg_unsplit_homepage = https://github.com/uwiger/unsplit +pkg_unsplit_fetch = git +pkg_unsplit_repo = https://github.com/uwiger/unsplit +pkg_unsplit_commit = master + +PACKAGES += uuid +pkg_uuid_name = uuid +pkg_uuid_description = Erlang UUID Implementation +pkg_uuid_homepage = https://github.com/okeuday/uuid +pkg_uuid_fetch = git +pkg_uuid_repo = https://github.com/okeuday/uuid +pkg_uuid_commit = master + +PACKAGES += ux +pkg_ux_name = ux +pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation) +pkg_ux_homepage = https://github.com/erlang-unicode/ux +pkg_ux_fetch = git +pkg_ux_repo = https://github.com/erlang-unicode/ux +pkg_ux_commit = master + +PACKAGES += vert +pkg_vert_name = vert +pkg_vert_description = Erlang binding to libvirt virtualization API +pkg_vert_homepage = https://github.com/msantos/erlang-libvirt +pkg_vert_fetch = git +pkg_vert_repo = https://github.com/msantos/erlang-libvirt +pkg_vert_commit = master + +PACKAGES += verx +pkg_verx_name = verx +pkg_verx_description = Erlang implementation of the libvirtd remote protocol +pkg_verx_homepage = https://github.com/msantos/verx +pkg_verx_fetch = git +pkg_verx_repo = https://github.com/msantos/verx +pkg_verx_commit = master + +PACKAGES += vmq_acl +pkg_vmq_acl_name = vmq_acl +pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_acl_homepage = https://verne.mq/ +pkg_vmq_acl_fetch = git +pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl +pkg_vmq_acl_commit = master + +PACKAGES += vmq_bridge +pkg_vmq_bridge_name = vmq_bridge +pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_bridge_homepage = https://verne.mq/ +pkg_vmq_bridge_fetch = git +pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge +pkg_vmq_bridge_commit = master + +PACKAGES += vmq_graphite +pkg_vmq_graphite_name = vmq_graphite +pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_graphite_homepage = https://verne.mq/ +pkg_vmq_graphite_fetch = git +pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite +pkg_vmq_graphite_commit = master + +PACKAGES += vmq_passwd +pkg_vmq_passwd_name = vmq_passwd +pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_passwd_homepage = https://verne.mq/ +pkg_vmq_passwd_fetch = git +pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd +pkg_vmq_passwd_commit = master + +PACKAGES += vmq_server +pkg_vmq_server_name = vmq_server +pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_server_homepage = https://verne.mq/ +pkg_vmq_server_fetch = git +pkg_vmq_server_repo = https://github.com/erlio/vmq_server +pkg_vmq_server_commit = master + +PACKAGES += vmq_snmp +pkg_vmq_snmp_name = vmq_snmp +pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_snmp_homepage = https://verne.mq/ +pkg_vmq_snmp_fetch = git +pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp +pkg_vmq_snmp_commit = master + +PACKAGES += vmq_systree +pkg_vmq_systree_name = vmq_systree +pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_systree_homepage = https://verne.mq/ +pkg_vmq_systree_fetch = git +pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree +pkg_vmq_systree_commit = master + +PACKAGES += vmstats +pkg_vmstats_name = vmstats +pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs. +pkg_vmstats_homepage = https://github.com/ferd/vmstats +pkg_vmstats_fetch = git +pkg_vmstats_repo = https://github.com/ferd/vmstats +pkg_vmstats_commit = master + +PACKAGES += walrus +pkg_walrus_name = walrus +pkg_walrus_description = Walrus - Mustache-like Templating +pkg_walrus_homepage = https://github.com/devinus/walrus +pkg_walrus_fetch = git +pkg_walrus_repo = https://github.com/devinus/walrus +pkg_walrus_commit = master + +PACKAGES += webmachine +pkg_webmachine_name = webmachine +pkg_webmachine_description = A REST-based system for building web applications. +pkg_webmachine_homepage = https://github.com/basho/webmachine +pkg_webmachine_fetch = git +pkg_webmachine_repo = https://github.com/basho/webmachine +pkg_webmachine_commit = master + +PACKAGES += websocket_client +pkg_websocket_client_name = websocket_client +pkg_websocket_client_description = Erlang websocket client (ws and wss supported) +pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client +pkg_websocket_client_fetch = git +pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client +pkg_websocket_client_commit = master + +PACKAGES += worker_pool +pkg_worker_pool_name = worker_pool +pkg_worker_pool_description = a simple erlang worker pool +pkg_worker_pool_homepage = https://github.com/inaka/worker_pool +pkg_worker_pool_fetch = git +pkg_worker_pool_repo = https://github.com/inaka/worker_pool +pkg_worker_pool_commit = master + +PACKAGES += wrangler +pkg_wrangler_name = wrangler +pkg_wrangler_description = Import of the Wrangler svn repository. +pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html +pkg_wrangler_fetch = git +pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler +pkg_wrangler_commit = master + +PACKAGES += wsock +pkg_wsock_name = wsock +pkg_wsock_description = Erlang library to build WebSocket clients and servers +pkg_wsock_homepage = https://github.com/madtrick/wsock +pkg_wsock_fetch = git +pkg_wsock_repo = https://github.com/madtrick/wsock +pkg_wsock_commit = master + +PACKAGES += xhttpc +pkg_xhttpc_name = xhttpc +pkg_xhttpc_description = Extensible HTTP Client for Erlang +pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc +pkg_xhttpc_fetch = git +pkg_xhttpc_repo = https://github.com/seriyps/xhttpc +pkg_xhttpc_commit = master + +PACKAGES += xref_runner +pkg_xref_runner_name = xref_runner +pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref) +pkg_xref_runner_homepage = https://github.com/inaka/xref_runner +pkg_xref_runner_fetch = git +pkg_xref_runner_repo = https://github.com/inaka/xref_runner +pkg_xref_runner_commit = master + +PACKAGES += yamerl +pkg_yamerl_name = yamerl +pkg_yamerl_description = YAML 1.2 parser in pure Erlang +pkg_yamerl_homepage = https://github.com/yakaz/yamerl +pkg_yamerl_fetch = git +pkg_yamerl_repo = https://github.com/yakaz/yamerl +pkg_yamerl_commit = master + +PACKAGES += yamler +pkg_yamler_name = yamler +pkg_yamler_description = libyaml-based yaml loader for Erlang +pkg_yamler_homepage = https://github.com/goertzenator/yamler +pkg_yamler_fetch = git +pkg_yamler_repo = https://github.com/goertzenator/yamler +pkg_yamler_commit = master + +PACKAGES += yaws +pkg_yaws_name = yaws +pkg_yaws_description = Yaws webserver +pkg_yaws_homepage = http://yaws.hyber.org +pkg_yaws_fetch = git +pkg_yaws_repo = https://github.com/klacke/yaws +pkg_yaws_commit = master + +PACKAGES += zab_engine +pkg_zab_engine_name = zab_engine +pkg_zab_engine_description = zab propotocol implement by erlang +pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine +pkg_zab_engine_fetch = git +pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine +pkg_zab_engine_commit = master + +PACKAGES += zabbix_sender +pkg_zabbix_sender_name = zabbix_sender +pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang +pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender +pkg_zabbix_sender_fetch = git +pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git +pkg_zabbix_sender_commit = master + +PACKAGES += zeta +pkg_zeta_name = zeta +pkg_zeta_description = HTTP access log parser in Erlang +pkg_zeta_homepage = https://github.com/s1n4/zeta +pkg_zeta_fetch = git +pkg_zeta_repo = https://github.com/s1n4/zeta +pkg_zeta_commit = master + +PACKAGES += zippers +pkg_zippers_name = zippers +pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers +pkg_zippers_homepage = https://github.com/ferd/zippers +pkg_zippers_fetch = git +pkg_zippers_repo = https://github.com/ferd/zippers +pkg_zippers_commit = master + +PACKAGES += zlists +pkg_zlists_name = zlists +pkg_zlists_description = Erlang lazy lists library. +pkg_zlists_homepage = https://github.com/vjache/erlang-zlists +pkg_zlists_fetch = git +pkg_zlists_repo = https://github.com/vjache/erlang-zlists +pkg_zlists_commit = master + +PACKAGES += zraft_lib +pkg_zraft_lib_name = zraft_lib +pkg_zraft_lib_description = Erlang raft consensus protocol implementation +pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib +pkg_zraft_lib_fetch = git +pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib +pkg_zraft_lib_commit = master + +PACKAGES += zucchini +pkg_zucchini_name = zucchini +pkg_zucchini_description = An Erlang INI parser +pkg_zucchini_homepage = https://github.com/devinus/zucchini +pkg_zucchini_fetch = git +pkg_zucchini_repo = https://github.com/devinus/zucchini +pkg_zucchini_commit = master + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: search + +define pkg_print + $(verbose) printf "%s\n" \ + $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \ + "App name: $(pkg_$(1)_name)" \ + "Description: $(pkg_$(1)_description)" \ + "Home page: $(pkg_$(1)_homepage)" \ + "Fetch with: $(pkg_$(1)_fetch)" \ + "Repository: $(pkg_$(1)_repo)" \ + "Commit: $(pkg_$(1)_commit)" \ + "" + +endef + +search: +ifdef q + $(foreach p,$(PACKAGES), \ + $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \ + $(call pkg_print,$(p)))) +else + $(foreach p,$(PACKAGES),$(call pkg_print,$(p))) +endif + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-deps clean-tmp-deps.log + +# Configuration. + +ifdef OTP_DEPS +$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.) +endif + +IGNORE_DEPS ?= +export IGNORE_DEPS + +APPS_DIR ?= $(CURDIR)/apps +export APPS_DIR + +DEPS_DIR ?= $(CURDIR)/deps +export DEPS_DIR + +REBAR_DEPS_DIR = $(DEPS_DIR) +export REBAR_DEPS_DIR + +REBAR_GIT ?= https://github.com/rebar/rebar +REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01 + +# External "early" plugins (see core/plugins.mk for regular plugins). +# They both use the core_dep_plugin macro. + +define core_dep_plugin +ifeq ($(2),$(PROJECT)) +-include $$(patsubst $(PROJECT)/%,%,$(1)) +else +-include $(DEPS_DIR)/$(1) + +$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ; +endif +endef + +DEP_EARLY_PLUGINS ?= + +$(foreach p,$(DEP_EARLY_PLUGINS),\ + $(eval $(if $(findstring /,$p),\ + $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\ + $(call core_dep_plugin,$p/early-plugins.mk,$p)))) + +# Query functions. + +query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1))) +_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail)) +_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail) + +query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1))) + +query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1))) +_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1))) + +query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo)) +query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1))) +query_repo_git-subfolder = $(call query_repo_git,$(1)) +query_repo_git-submodule = - +query_repo_hg = $(call query_repo_default,$(1)) +query_repo_svn = $(call query_repo_default,$(1)) +query_repo_cp = $(call query_repo_default,$(1)) +query_repo_ln = $(call query_repo_default,$(1)) +query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1)) +query_repo_fail = - +query_repo_legacy = - + +query_version = $(call _qv,$(1),$(call query_fetch_method,$(1))) +_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1))) + +query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit))) +query_version_git = $(call query_version_default,$(1)) +query_version_git-subfolder = $(call query_version_git,$(1)) +query_version_git-submodule = - +query_version_hg = $(call query_version_default,$(1)) +query_version_svn = - +query_version_cp = - +query_version_ln = - +query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit))) +query_version_fail = - +query_version_legacy = - + +query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1))) +_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-) + +query_extra_git = - +query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-) +query_extra_git-submodule = - +query_extra_hg = - +query_extra_svn = - +query_extra_cp = - +query_extra_ln = - +query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-) +query_extra_fail = - +query_extra_legacy = - + +query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1))) + +# Deprecated legacy query functions. +dep_fetch = $(call query_fetch_method,$(1)) +dep_name = $(call query_name,$(1)) +dep_repo = $(call query_repo_git,$(1)) +dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit))) + +LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a))) +ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep)))) + +# When we are calling an app directly we don't want to include it here +# otherwise it'll be treated both as an apps and a top-level project. +ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d))) +ifdef ROOT_DIR +ifndef IS_APP +ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS)) +endif +endif + +ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),) +ifeq ($(ERL_LIBS),) + ERL_LIBS = $(APPS_DIR):$(DEPS_DIR) +else + ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR) +endif +endif +export ERL_LIBS + +export NO_AUTOPATCH + +# Verbosity. + +dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))"; +dep_verbose_2 = set -x; +dep_verbose = $(dep_verbose_$(V)) + +# Optimization: don't recompile deps unless truly necessary. + +ifndef IS_DEP +ifneq ($(MAKELEVEL),0) +$(shell rm -f ebin/dep_built) +endif +endif + +# Core targets. + +ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS)) + +apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP) +# Create ebin directory for all apps to make sure Erlang recognizes them +# as proper OTP applications when using -include_lib. This is a temporary +# fix, a proper fix would be to compile apps/* in the right order. +ifndef IS_APP +ifneq ($(ALL_APPS_DIRS),) + $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \ + mkdir -p $$dep/ebin; \ + done +endif +endif +# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only +# compile that list of apps. Otherwise, compile everything. +# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps. +ifneq ($(ALL_APPS_DIRS_TO_BUILD),) + $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \ + if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \ + :; \ + else \ + echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \ + $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \ + fi \ + done +endif + +clean-tmp-deps.log: +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log +endif + +# Erlang.mk does not rebuild dependencies after they were compiled +# once. If a developer is working on the top-level project and some +# dependencies at the same time, he may want to change this behavior. +# There are two solutions: +# 1. Set `FULL=1` so that all dependencies are visited and +# recursively recompiled if necessary. +# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that +# should be recompiled (instead of the whole set). + +FORCE_REBUILD ?= + +ifeq ($(origin FULL),undefined) +ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),) +define force_rebuild_dep +echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")" +endef +endif +endif + +ifneq ($(SKIP_DEPS),) +deps:: +else +deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP) +ifneq ($(ALL_DEPS_DIRS),) + $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \ + if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \ + :; \ + else \ + echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \ + if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \ + :; \ + elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \ + $(MAKE) -C $$dep IS_DEP=1; \ + if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \ + else \ + echo "Error: No Makefile to build dependency $$dep." >&2; \ + exit 2; \ + fi \ + fi \ + done +endif +endif + +# Deps related targets. + +# @todo rename GNUmakefile and makefile into Makefile first, if they exist +# While Makefile file could be GNUmakefile or makefile, +# in practice only Makefile is needed so far. +define dep_autopatch + if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \ + rm -rf $(DEPS_DIR)/$1/ebin/; \ + $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \ + $(call dep_autopatch_erlang_mk,$(1)); \ + elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \ + if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \ + $(call dep_autopatch2,$1); \ + elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \ + $(call dep_autopatch2,$(1)); \ + elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \ + $(call dep_autopatch2,$(1)); \ + elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \ + $(call dep_autopatch2,$(1)); \ + fi \ + else \ + if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \ + $(call dep_autopatch_noop,$(1)); \ + else \ + $(call dep_autopatch2,$(1)); \ + fi \ + fi +endef + +define dep_autopatch2 + ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \ + mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \ + rm -f $(DEPS_DIR)/$1/ebin/$1.app; \ + if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \ + $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \ + fi; \ + $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \ + if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \ + $(call dep_autopatch_fetch_rebar); \ + $(call dep_autopatch_rebar,$(1)); \ + else \ + $(call dep_autopatch_gen,$(1)); \ + fi +endef + +define dep_autopatch_noop + printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile +endef + +# Replace "include erlang.mk" with a line that will load the parent Erlang.mk +# if given. Do it for all 3 possible Makefile file names. +ifeq ($(NO_AUTOPATCH_ERLANG_MK),) +define dep_autopatch_erlang_mk + for f in Makefile makefile GNUmakefile; do \ + if [ -f $(DEPS_DIR)/$1/$$f ]; then \ + sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \ + fi \ + done +endef +else +define dep_autopatch_erlang_mk + : +endef +endif + +define dep_autopatch_gen + printf "%s\n" \ + "ERLC_OPTS = +debug_info" \ + "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile +endef + +# We use flock/lockf when available to avoid concurrency issues. +define dep_autopatch_fetch_rebar + if command -v flock >/dev/null; then \ + flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \ + elif command -v lockf >/dev/null; then \ + lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \ + else \ + $(call dep_autopatch_fetch_rebar2); \ + fi +endef + +define dep_autopatch_fetch_rebar2 + if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \ + git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \ + cd $(ERLANG_MK_TMP)/rebar; \ + git checkout -q $(REBAR_COMMIT); \ + ./bootstrap; \ + cd -; \ + fi +endef + +define dep_autopatch_rebar + if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \ + mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \ + fi; \ + $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \ + rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app +endef + +define dep_autopatch_rebar.erl + application:load(rebar), + application:set_env(rebar, log_level, debug), + rmemo:start(), + Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of + {ok, Conf0} -> Conf0; + _ -> [] + end, + {Conf, OsEnv} = fun() -> + case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of + false -> {Conf1, []}; + true -> + Bindings0 = erl_eval:new_bindings(), + Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0), + Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1), + Before = os:getenv(), + {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings), + {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)} + end + end(), + Write = fun (Text) -> + file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append]) + end, + Escape = fun (Text) -> + re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}]) + end, + Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package " + "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"), + Write("C_SRC_DIR = /path/do/not/exist\n"), + Write("C_SRC_TYPE = rebar\n"), + Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"), + Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]), + ToList = fun + (V) when is_atom(V) -> atom_to_list(V); + (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'" + end, + fun() -> + Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"), + case lists:keyfind(erl_opts, 1, Conf) of + false -> ok; + {_, ErlOpts} -> + lists:foreach(fun + ({d, D}) -> + Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n"); + ({d, DKey, DVal}) -> + Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n"); + ({i, I}) -> + Write(["ERLC_OPTS += -I ", I, "\n"]); + ({platform_define, Regex, D}) -> + case rebar_utils:is_arch(Regex) of + true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n"); + false -> ok + end; + ({parse_transform, PT}) -> + Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n"); + (_) -> ok + end, ErlOpts) + end, + Write("\n") + end(), + GetHexVsn = fun(N, NP) -> + case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of + {ok, Lock} -> + io:format("~p~n", [Lock]), + LockPkgs = case lists:keyfind("1.2.0", 1, Lock) of + {_, LP} -> + LP; + _ -> + case lists:keyfind("1.1.0", 1, Lock) of + {_, LP} -> + LP; + _ -> + false + end + end, + if + is_list(LockPkgs) -> + io:format("~p~n", [LockPkgs]), + case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of + {_, {pkg, _, Vsn}, _} -> + io:format("~p~n", [Vsn]), + {N, {hex, NP, binary_to_list(Vsn)}}; + _ -> + false + end; + true -> + false + end; + _ -> + false + end + end, + SemVsn = fun + ("~>" ++ S0) -> + S = case S0 of + " " ++ S1 -> S1; + _ -> S0 + end, + case length([ok || $$. <- S]) of + 0 -> S ++ ".0.0"; + 1 -> S ++ ".0"; + _ -> S + end; + (S) -> S + end, + fun() -> + File = case lists:keyfind(deps, 1, Conf) of + false -> []; + {_, Deps} -> + [begin case case Dep of + N when is_atom(N) -> GetHexVsn(N, N); + {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}}; + {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP); + {N, S, {pkg, NP}} -> {N, {hex, NP, S}}; + {N, S} when is_tuple(S) -> {N, S}; + {N, _, S} -> {N, S}; + {N, _, S, _} -> {N, S}; + _ -> false + end of + false -> ok; + {Name, Source} -> + {Method, Repo, Commit} = case Source of + {hex, NPV, V} -> {hex, V, NPV}; + {git, R} -> {git, R, master}; + {M, R, {branch, C}} -> {M, R, C}; + {M, R, {ref, C}} -> {M, R, C}; + {M, R, {tag, C}} -> {M, R, C}; + {M, R, C} -> {M, R, C} + end, + Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit])) + end end || Dep <- Deps] + end + end(), + fun() -> + case lists:keyfind(erl_first_files, 1, Conf) of + false -> ok; + {_, Files} -> + Names = [[" ", case lists:reverse(F) of + "lre." ++ Elif -> lists:reverse(Elif); + "lrx." ++ Elif -> lists:reverse(Elif); + "lry." ++ Elif -> lists:reverse(Elif); + Elif -> lists:reverse(Elif) + end] || "src/" ++ F <- Files], + Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names])) + end + end(), + Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"), + Write("\npreprocess::\n"), + Write("\npre-deps::\n"), + Write("\npre-app::\n"), + PatchHook = fun(Cmd) -> + Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]), + case Cmd2 of + "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1); + "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1); + "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1); + "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1); + _ -> Escape(Cmd) + end + end, + fun() -> + case lists:keyfind(pre_hooks, 1, Conf) of + false -> ok; + {_, Hooks} -> + [case H of + {'get-deps', Cmd} -> + Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n"); + {compile, Cmd} -> + Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n"); + {Regex, compile, Cmd} -> + case rebar_utils:is_arch(Regex) of + true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n"); + false -> ok + end; + _ -> ok + end || H <- Hooks] + end + end(), + ShellToMk = fun(V0) -> + V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]), + V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]), + re:replace(V, "-Werror\\\\b", "", [{return, list}, global]) + end, + PortSpecs = fun() -> + case lists:keyfind(port_specs, 1, Conf) of + false -> + case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of + false -> []; + true -> + [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"), + proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}] + end; + {_, Specs} -> + lists:flatten([case S of + {Output, Input} -> {ShellToMk(Output), Input, []}; + {Regex, Output, Input} -> + case rebar_utils:is_arch(Regex) of + true -> {ShellToMk(Output), Input, []}; + false -> [] + end; + {Regex, Output, Input, [{env, Env}]} -> + case rebar_utils:is_arch(Regex) of + true -> {ShellToMk(Output), Input, Env}; + false -> [] + end + end || S <- Specs]) + end + end(), + PortSpecWrite = fun (Text) -> + file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append]) + end, + case PortSpecs of + [] -> ok; + _ -> + Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"), + PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n", + [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])), + PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n", + [code:lib_dir(erl_interface, lib)])), + [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv], + FilterEnv = fun(Env) -> + lists:flatten([case E of + {_, _} -> E; + {Regex, K, V} -> + case rebar_utils:is_arch(Regex) of + true -> {K, V}; + false -> [] + end + end || E <- Env]) + end, + MergeEnv = fun(Env) -> + lists:foldl(fun ({K, V}, Acc) -> + case lists:keyfind(K, 1, Acc) of + false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc]; + {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc] + end + end, [], Env) + end, + PortEnv = case lists:keyfind(port_env, 1, Conf) of + false -> []; + {_, PortEnv0} -> FilterEnv(PortEnv0) + end, + PortSpec = fun ({Output, Input0, Env}) -> + filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output), + Input = [[" ", I] || I <- Input0], + PortSpecWrite([ + [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))], + case $(PLATFORM) of + darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress"; + _ -> "" + end, + "\n\nall:: ", Output, "\n\t@:\n\n", + "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))], + Output, ": $$\(foreach ext,.c .C .cc .cpp,", + "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n", + "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)", + case {filename:extension(Output), $(PLATFORM)} of + {[], _} -> "\n"; + {_, darwin} -> "\n"; + _ -> " -shared\n" + end]) + end, + [PortSpec(S) || S <- PortSpecs] + end, + fun() -> + case lists:keyfind(plugins, 1, Conf) of + false -> ok; + {_, Plugins0} -> + Plugins = [P || P <- Plugins0, is_tuple(P)], + case lists:keyfind('lfe-compile', 1, Plugins) of + false -> ok; + _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n") + end + end + end(), + Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"), + RunPlugin = fun(Plugin, Step) -> + case erlang:function_exported(Plugin, Step, 2) of + false -> ok; + true -> + c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"), + Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(), + dict:store(base_dir, "", dict:new())}, undefined), + io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret]) + end + end, + fun() -> + case lists:keyfind(plugins, 1, Conf) of + false -> ok; + {_, Plugins0} -> + Plugins = [P || P <- Plugins0, is_atom(P)], + [begin + case lists:keyfind(deps, 1, Conf) of + false -> ok; + {_, Deps} -> + case lists:keyfind(P, 1, Deps) of + false -> ok; + _ -> + Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P), + io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]), + io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]), + code:add_patha(Path ++ "/ebin") + end + end + end || P <- Plugins], + [case code:load_file(P) of + {module, P} -> ok; + _ -> + case lists:keyfind(plugin_dir, 1, Conf) of + false -> ok; + {_, PluginsDir} -> + ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl", + {ok, P, Bin} = compile:file(ErlFile, [binary]), + {module, P} = code:load_binary(P, ErlFile, Bin) + end + end || P <- Plugins], + [RunPlugin(P, preprocess) || P <- Plugins], + [RunPlugin(P, pre_compile) || P <- Plugins], + [RunPlugin(P, compile) || P <- Plugins] + end + end(), + halt() +endef + +define dep_autopatch_appsrc_script.erl + AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)", + AppSrcScript = AppSrc ++ ".script", + {ok, Conf0} = file:consult(AppSrc), + Bindings0 = erl_eval:new_bindings(), + Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0), + Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1), + Conf = case file:script(AppSrcScript, Bindings) of + {ok, [C]} -> C; + {ok, C} -> C + end, + ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])), + halt() +endef + +define dep_autopatch_appsrc.erl + AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)", + AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end, + case filelib:is_regular(AppSrcIn) of + false -> ok; + true -> + {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn), + L1 = lists:keystore(modules, 1, L0, {modules, []}), + L2 = case lists:keyfind(vsn, 1, L1) of + {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))}); + {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"}); + _ -> L1 + end, + L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end, + ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])), + case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end + end, + halt() +endef + +define dep_fetch_git + git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \ + cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1)); +endef + +define dep_fetch_git-subfolder + mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \ + git clone -q -n -- $(call dep_repo,$1) \ + $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \ + cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \ + && git checkout -q $(call dep_commit,$1); \ + ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \ + $(DEPS_DIR)/$(call dep_name,$1); +endef + +define dep_fetch_git-submodule + git submodule update --init -- $(DEPS_DIR)/$1; +endef + +define dep_fetch_hg + hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \ + cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1)); +endef + +define dep_fetch_svn + svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); +endef + +define dep_fetch_cp + cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); +endef + +define dep_fetch_ln + ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); +endef + +# Hex only has a package version. No need to look in the Erlang.mk packages. +define dep_fetch_hex + mkdir -p $(ERLANG_MK_TMP)/hex $(DEPS_DIR)/$1; \ + $(call core_http_get,$(ERLANG_MK_TMP)/hex/$1.tar,\ + https://repo.hex.pm/tarballs/$(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1)-$(strip $(word 2,$(dep_$1))).tar); \ + tar -xOf $(ERLANG_MK_TMP)/hex/$1.tar contents.tar.gz | tar -C $(DEPS_DIR)/$1 -xzf -; +endef + +define dep_fetch_fail + echo "Error: Unknown or invalid dependency: $(1)." >&2; \ + exit 78; +endef + +# Kept for compatibility purposes with older Erlang.mk configuration. +define dep_fetch_legacy + $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \ + git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \ + cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master); +endef + +define dep_target +$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP) + $(eval DEP_NAME := $(call dep_name,$1)) + $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))")) + $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \ + echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \ + exit 17; \ + fi + $(verbose) mkdir -p $(DEPS_DIR) + $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1)) + $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \ + && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \ + echo " AUTO " $(DEP_STR); \ + cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \ + fi + - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \ + echo " CONF " $(DEP_STR); \ + cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \ + fi +ifeq ($(filter $(1),$(NO_AUTOPATCH)),) + $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME) +endif + +.PHONY: autopatch-$(call dep_name,$1) + +autopatch-$(call dep_name,$1):: + $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \ + if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \ + echo " PATCH Downloading rabbitmq-codegen"; \ + git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \ + fi; \ + if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \ + echo " PATCH Downloading rabbitmq-server"; \ + git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \ + fi; \ + ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \ + elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \ + if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \ + echo " PATCH Downloading rabbitmq-codegen"; \ + git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \ + fi \ + elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \ + ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \ + else \ + $$(call dep_autopatch,$(call dep_name,$1)) \ + fi +endef + +$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep)))) + +ifndef IS_APP +clean:: clean-apps + +clean-apps: + $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \ + $(MAKE) -C $$dep clean IS_APP=1; \ + done + +distclean:: distclean-apps + +distclean-apps: + $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \ + $(MAKE) -C $$dep distclean IS_APP=1; \ + done +endif + +ifndef SKIP_DEPS +distclean:: distclean-deps + +distclean-deps: + $(gen_verbose) rm -rf $(DEPS_DIR) +endif + +# Forward-declare variables used in core/deps-tools.mk. This is required +# in case plugins use them. + +ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log +ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log +ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log +ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log +ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log + +ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log +ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log +ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log +ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log +ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: clean-app + +# Configuration. + +ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \ + +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec +COMPILE_FIRST ?= +COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST))) +ERLC_EXCLUDE ?= +ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE))) + +ERLC_ASN1_OPTS ?= + +ERLC_MIB_OPTS ?= +COMPILE_MIB_FIRST ?= +COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST))) + +# Verbosity. + +app_verbose_0 = @echo " APP " $(PROJECT); +app_verbose_2 = set -x; +app_verbose = $(app_verbose_$(V)) + +appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src; +appsrc_verbose_2 = set -x; +appsrc_verbose = $(appsrc_verbose_$(V)) + +makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d; +makedep_verbose_2 = set -x; +makedep_verbose = $(makedep_verbose_$(V)) + +erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\ + $(filter %.erl %.core,$(?F))); +erlc_verbose_2 = set -x; +erlc_verbose = $(erlc_verbose_$(V)) + +xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F)); +xyrl_verbose_2 = set -x; +xyrl_verbose = $(xyrl_verbose_$(V)) + +asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F)); +asn1_verbose_2 = set -x; +asn1_verbose = $(asn1_verbose_$(V)) + +mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F)); +mib_verbose_2 = set -x; +mib_verbose = $(mib_verbose_$(V)) + +ifneq ($(wildcard src/),) + +# Targets. + +app:: $(if $(wildcard ebin/test),clean) deps + $(verbose) $(MAKE) --no-print-directory $(PROJECT).d + $(verbose) $(MAKE) --no-print-directory app-build + +ifeq ($(wildcard src/$(PROJECT_MOD).erl),) +define app_file +{application, '$(PROJECT)', [ + {description, "$(PROJECT_DESCRIPTION)"}, + {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP), + {id$(comma)$(space)"$(1)"}$(comma)) + {modules, [$(call comma_list,$(2))]}, + {registered, []}, + {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]}, + {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),) +]}. +endef +else +define app_file +{application, '$(PROJECT)', [ + {description, "$(PROJECT_DESCRIPTION)"}, + {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP), + {id$(comma)$(space)"$(1)"}$(comma)) + {modules, [$(call comma_list,$(2))]}, + {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]}, + {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]}, + {mod, {$(PROJECT_MOD), []}}, + {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),) +]}. +endef +endif + +app-build: ebin/$(PROJECT).app + $(verbose) : + +# Source files. + +ALL_SRC_FILES := $(sort $(call core_find,src/,*)) + +ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES)) +CORE_FILES := $(filter %.core,$(ALL_SRC_FILES)) + +# ASN.1 files. + +ifneq ($(wildcard asn1/),) +ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1)) +ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES)))) + +define compile_asn1 + $(verbose) mkdir -p include/ + $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1) + $(verbose) mv asn1/*.erl src/ + -$(verbose) mv asn1/*.hrl include/ + $(verbose) mv asn1/*.asn1db include/ +endef + +$(PROJECT).d:: $(ASN1_FILES) + $(if $(strip $?),$(call compile_asn1,$?)) +endif + +# SNMP MIB files. + +ifneq ($(wildcard mibs/),) +MIB_FILES = $(sort $(call core_find,mibs/,*.mib)) + +$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES) + $(verbose) mkdir -p include/ priv/mibs/ + $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $? + $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?))) +endif + +# Leex and Yecc files. + +XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES)) +XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES)))) +ERL_FILES += $(XRL_ERL_FILES) + +YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES)) +YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES)))) +ERL_FILES += $(YRL_ERL_FILES) + +$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES) + $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?) + +# Erlang and Core Erlang files. + +define makedep.erl + E = ets:new(makedep, [bag]), + G = digraph:new([acyclic]), + ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")), + DepsDir = "$(call core_native_path,$(DEPS_DIR))", + AppsDir = "$(call core_native_path,$(APPS_DIR))", + DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))", + DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))", + AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))", + AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))", + DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")), + AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")), + Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles], + Add = fun (Mod, Dep) -> + case lists:keyfind(Dep, 1, Modules) of + false -> ok; + {_, DepFile} -> + {_, ModFile} = lists:keyfind(Mod, 1, Modules), + ets:insert(E, {ModFile, DepFile}), + digraph:add_vertex(G, Mod), + digraph:add_vertex(G, Dep), + digraph:add_edge(G, Mod, Dep) + end + end, + AddHd = fun (F, Mod, DepFile) -> + case file:open(DepFile, [read]) of + {error, enoent} -> + ok; + {ok, Fd} -> + {_, ModFile} = lists:keyfind(Mod, 1, Modules), + case ets:match(E, {ModFile, DepFile}) of + [] -> + ets:insert(E, {ModFile, DepFile}), + F(F, Fd, Mod,0); + _ -> ok + end + end + end, + SearchHrl = fun + F(_Hrl, []) -> {error,enoent}; + F(Hrl, [Dir|Dirs]) -> + HrlF = filename:join([Dir,Hrl]), + case filelib:is_file(HrlF) of + true -> + {ok, HrlF}; + false -> F(Hrl,Dirs) + end + end, + Attr = fun + (_F, Mod, behavior, Dep) -> + Add(Mod, Dep); + (_F, Mod, behaviour, Dep) -> + Add(Mod, Dep); + (_F, Mod, compile, {parse_transform, Dep}) -> + Add(Mod, Dep); + (_F, Mod, compile, Opts) when is_list(Opts) -> + case proplists:get_value(parse_transform, Opts) of + undefined -> ok; + Dep -> Add(Mod, Dep) + end; + (F, Mod, include, Hrl) -> + case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of + {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl); + {error, _} -> false + end; + (F, Mod, include_lib, Hrl) -> + case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of + {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl); + {error, _} -> false + end; + (F, Mod, import, {Imp, _}) -> + IsFile = + case lists:keyfind(Imp, 1, Modules) of + false -> false; + {_, FilePath} -> filelib:is_file(FilePath) + end, + case IsFile of + false -> ok; + true -> Add(Mod, Imp) + end; + (_, _, _, _) -> ok + end, + MakeDepend = fun + (F, Fd, Mod, StartLocation) -> + {ok, Filename} = file:pid2name(Fd), + case io:parse_erl_form(Fd, undefined, StartLocation) of + {ok, AbsData, EndLocation} -> + case AbsData of + {attribute, _, Key, Value} -> + Attr(F, Mod, Key, Value), + F(F, Fd, Mod, EndLocation); + _ -> F(F, Fd, Mod, EndLocation) + end; + {eof, _ } -> file:close(Fd); + {error, ErrorDescription } -> + file:close(Fd); + {error, ErrorInfo, ErrorLocation} -> + F(F, Fd, Mod, ErrorLocation) + end, + ok + end, + [begin + Mod = list_to_atom(filename:basename(F, ".erl")), + case file:open(F, [read]) of + {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0); + {error, enoent} -> ok + end + end || F <- ErlFiles], + Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))), + CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)], + TargetPath = fun(Target) -> + case lists:keyfind(Target, 1, Modules) of + false -> ""; + {_, DepFile} -> + DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")), + string:join(DirSubname ++ [atom_to_list(Target)], "/") + end + end, + Output0 = [ + "# Generated by Erlang.mk. Edit at your own risk!\n\n", + [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend], + "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n" + ], + Output = case "รฉ" of + [233] -> unicode:characters_to_binary(Output0); + _ -> Output0 + end, + ok = file:write_file("$(1)", Output), + halt() +endef + +ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),) +$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST) + $(makedep_verbose) $(call erlang,$(call makedep.erl,$@)) +endif + +ifeq ($(IS_APP)$(IS_DEP),) +ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0) +# Rebuild everything when the Makefile changes. +$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP) + $(verbose) if test -f $@; then \ + touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \ + touch -c $(PROJECT).d; \ + fi + $(verbose) touch $@ + +$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change +ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change +endif +endif + +$(PROJECT).d:: + $(verbose) : + +include $(wildcard $(PROJECT).d) + +ebin/$(PROJECT).app:: ebin/ + +ebin/: + $(verbose) mkdir -p ebin/ + +define compile_erl + $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \ + -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1)) +endef + +define validate_app_file + case file:consult("ebin/$(PROJECT).app") of + {ok, _} -> halt(); + _ -> halt(1) + end +endef + +ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src) + $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?)) + $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE))) +# Older git versions do not have the --first-parent flag. Do without in that case. + $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \ + || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true)) + $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \ + $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES))))))) +ifeq ($(wildcard src/$(PROJECT).app.src),) + $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \ + > ebin/$(PROJECT).app + $(verbose) if ! $(call erlang,$(call validate_app_file)); then \ + echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \ + exit 1; \ + fi +else + $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \ + echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \ + exit 1; \ + fi + $(appsrc_verbose) cat src/$(PROJECT).app.src \ + | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \ + | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \ + > ebin/$(PROJECT).app +endif +ifneq ($(wildcard src/$(PROJECT).appup),) + $(verbose) cp src/$(PROJECT).appup ebin/ +endif + +clean:: clean-app + +clean-app: + $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \ + $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \ + $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \ + $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \ + $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES)))) + +endif + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2015, Viktor Sรถderqvist +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: docs-deps + +# Configuration. + +ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS)) + +# Targets. + +$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +doc-deps: +else +doc-deps: $(ALL_DOC_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: rel-deps + +# Configuration. + +ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS)) + +# Targets. + +$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +rel-deps: +else +rel-deps: $(ALL_REL_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: test-deps test-dir test-build clean-test-dir + +# Configuration. + +TEST_DIR ?= $(CURDIR)/test + +ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS)) + +TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard +TEST_ERLC_OPTS += -DTEST=1 + +# Targets. + +$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +test-deps: +else +test-deps: $(ALL_TEST_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \ + if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \ + :; \ + else \ + $(MAKE) -C $$dep IS_DEP=1; \ + if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \ + fi \ + done +endif + +ifneq ($(wildcard $(TEST_DIR)),) +test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build + @: + +test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\ + $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE)))); +test_erlc_verbose_2 = set -x; +test_erlc_verbose = $(test_erlc_verbose_$(V)) + +define compile_test_erl + $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \ + -pa ebin/ -I include/ $(1) +endef + +ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl) +$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST) + $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?)) + $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@) +endif + +test-build:: IS_TEST=1 +test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS) +test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps) +# We already compiled everything when IS_APP=1. +ifndef IS_APP +ifneq ($(wildcard src),) + $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(gen_verbose) touch ebin/test +endif +ifneq ($(wildcard $(TEST_DIR)),) + $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" +endif +endif + +# Roughly the same as test-build, but when IS_APP=1. +# We only care about compiling the current application. +ifdef IS_APP +test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS) +test-build-app:: deps test-deps +ifneq ($(wildcard src),) + $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(gen_verbose) touch ebin/test +endif +ifneq ($(wildcard $(TEST_DIR)),) + $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" +endif +endif + +clean:: clean-test-dir + +clean-test-dir: +ifneq ($(wildcard $(TEST_DIR)/*.beam),) + $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: rebar.config + +# We strip out -Werror because we don't want to fail due to +# warnings when used as a dependency. + +compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g') + +define compat_convert_erlc_opts +$(if $(filter-out -Werror,$1),\ + $(if $(findstring +,$1),\ + $(shell echo $1 | cut -b 2-))) +endef + +define compat_erlc_opts_to_list +[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))] +endef + +define compat_rebar_config +{deps, [ +$(call comma_list,$(foreach d,$(DEPS),\ + $(if $(filter hex,$(call dep_fetch,$d)),\ + {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\ + {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}}))) +]}. +{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}. +endef + +rebar.config: + $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config) + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck) + +.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual + +# Core targets. + +docs:: asciidoc + +distclean:: distclean-asciidoc-guide distclean-asciidoc-manual + +# Plugin-specific targets. + +asciidoc: asciidoc-guide asciidoc-manual + +# User guide. + +ifeq ($(wildcard doc/src/guide/book.asciidoc),) +asciidoc-guide: +else +asciidoc-guide: distclean-asciidoc-guide doc-deps + a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf + a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/ + +distclean-asciidoc-guide: + $(gen_verbose) rm -rf doc/html/ doc/guide.pdf +endif + +# Man pages. + +ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc) + +ifeq ($(ASCIIDOC_MANUAL_FILES),) +asciidoc-manual: +else + +# Configuration. + +MAN_INSTALL_PATH ?= /usr/local/share/man +MAN_SECTIONS ?= 3 7 +MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/') +MAN_VERSION ?= $(PROJECT_VERSION) + +# Plugin-specific targets. + +define asciidoc2man.erl +try + [begin + io:format(" ADOC ~s~n", [F]), + ok = asciideck:to_manpage(asciideck:parse_file(F), #{ + compress => gzip, + outdir => filename:dirname(F), + extra2 => "$(MAN_PROJECT) $(MAN_VERSION)", + extra3 => "$(MAN_PROJECT) Function Reference" + }) + end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]], + halt(0) +catch C:E -> + io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]), + halt(1) +end. +endef + +asciidoc-manual:: doc-deps + +asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES) + $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?)) + $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;) + +install-docs:: install-asciidoc + +install-asciidoc: asciidoc-manual + $(foreach s,$(MAN_SECTIONS),\ + mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \ + install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;) + +distclean-asciidoc-manual: + $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS)) +endif +endif + +# Copyright (c) 2014-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates + +# Core targets. + +help:: + $(verbose) printf "%s\n" "" \ + "Bootstrap targets:" \ + " bootstrap Generate a skeleton of an OTP application" \ + " bootstrap-lib Generate a skeleton of an OTP library" \ + " bootstrap-rel Generate the files needed to build a release" \ + " new-app in=NAME Create a new local OTP application NAME" \ + " new-lib in=NAME Create a new local OTP library NAME" \ + " new t=TPL n=NAME Generate a module NAME based on the template TPL" \ + " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \ + " list-templates List available templates" + +# Bootstrap templates. + +define bs_appsrc +{application, $p, [ + {description, ""}, + {vsn, "0.1.0"}, + {id, "git"}, + {modules, []}, + {registered, []}, + {applications, [ + kernel, + stdlib + ]}, + {mod, {$p_app, []}}, + {env, []} +]}. +endef + +define bs_appsrc_lib +{application, $p, [ + {description, ""}, + {vsn, "0.1.0"}, + {id, "git"}, + {modules, []}, + {registered, []}, + {applications, [ + kernel, + stdlib + ]} +]}. +endef + +# To prevent autocompletion issues with ZSH, we add "include erlang.mk" +# separately during the actual bootstrap. +define bs_Makefile +PROJECT = $p +PROJECT_DESCRIPTION = New project +PROJECT_VERSION = 0.1.0 +$(if $(SP), +# Whitespace to be used when creating files from templates. +SP = $(SP) +) +endef + +define bs_apps_Makefile +PROJECT = $p +PROJECT_DESCRIPTION = New project +PROJECT_VERSION = 0.1.0 +$(if $(SP), +# Whitespace to be used when creating files from templates. +SP = $(SP) +) +# Make sure we know where the applications are located. +ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app) +APPS_DIR ?= .. +DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app) + +include $$(ROOT_DIR)/erlang.mk +endef + +define bs_app +-module($p_app). +-behaviour(application). + +-export([start/2]). +-export([stop/1]). + +start(_Type, _Args) -> + $p_sup:start_link(). + +stop(_State) -> + ok. +endef + +define bs_relx_config +{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}. +{extended_start_script, true}. +{sys_config, "config/sys.config"}. +{vm_args, "config/vm.args"}. +endef + +define bs_sys_config +[ +]. +endef + +define bs_vm_args +-name $p@127.0.0.1 +-setcookie $p +-heart +endef + +# Normal templates. + +define tpl_supervisor +-module($(n)). +-behaviour(supervisor). + +-export([start_link/0]). +-export([init/1]). + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +init([]) -> + Procs = [], + {ok, {{one_for_one, 1, 5}, Procs}}. +endef + +define tpl_gen_server +-module($(n)). +-behaviour(gen_server). + +%% API. +-export([start_link/0]). + +%% gen_server. +-export([init/1]). +-export([handle_call/3]). +-export([handle_cast/2]). +-export([handle_info/2]). +-export([terminate/2]). +-export([code_change/3]). + +-record(state, { +}). + +%% API. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_server:start_link(?MODULE, [], []). + +%% gen_server. + +init([]) -> + {ok, #state{}}. + +handle_call(_Request, _From, State) -> + {reply, ignored, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. +endef + +define tpl_module +-module($(n)). +-export([]). +endef + +define tpl_cowboy_http +-module($(n)). +-behaviour(cowboy_http_handler). + +-export([init/3]). +-export([handle/2]). +-export([terminate/3]). + +-record(state, { +}). + +init(_, Req, _Opts) -> + {ok, Req, #state{}}. + +handle(Req, State=#state{}) -> + {ok, Req2} = cowboy_req:reply(200, Req), + {ok, Req2, State}. + +terminate(_Reason, _Req, _State) -> + ok. +endef + +define tpl_gen_fsm +-module($(n)). +-behaviour(gen_fsm). + +%% API. +-export([start_link/0]). + +%% gen_fsm. +-export([init/1]). +-export([state_name/2]). +-export([handle_event/3]). +-export([state_name/3]). +-export([handle_sync_event/4]). +-export([handle_info/3]). +-export([terminate/3]). +-export([code_change/4]). + +-record(state, { +}). + +%% API. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_fsm:start_link(?MODULE, [], []). + +%% gen_fsm. + +init([]) -> + {ok, state_name, #state{}}. + +state_name(_Event, StateData) -> + {next_state, state_name, StateData}. + +handle_event(_Event, StateName, StateData) -> + {next_state, StateName, StateData}. + +state_name(_Event, _From, StateData) -> + {reply, ignored, state_name, StateData}. + +handle_sync_event(_Event, _From, StateName, StateData) -> + {reply, ignored, StateName, StateData}. + +handle_info(_Info, StateName, StateData) -> + {next_state, StateName, StateData}. + +terminate(_Reason, _StateName, _StateData) -> + ok. + +code_change(_OldVsn, StateName, StateData, _Extra) -> + {ok, StateName, StateData}. +endef + +define tpl_gen_statem +-module($(n)). +-behaviour(gen_statem). + +%% API. +-export([start_link/0]). + +%% gen_statem. +-export([callback_mode/0]). +-export([init/1]). +-export([state_name/3]). +-export([handle_event/4]). +-export([terminate/3]). +-export([code_change/4]). + +-record(state, { +}). + +%% API. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_statem:start_link(?MODULE, [], []). + +%% gen_statem. + +callback_mode() -> + state_functions. + +init([]) -> + {ok, state_name, #state{}}. + +state_name(_EventType, _EventData, StateData) -> + {next_state, state_name, StateData}. + +handle_event(_EventType, _EventData, StateName, StateData) -> + {next_state, StateName, StateData}. + +terminate(_Reason, _StateName, _StateData) -> + ok. + +code_change(_OldVsn, StateName, StateData, _Extra) -> + {ok, StateName, StateData}. +endef + +define tpl_cowboy_loop +-module($(n)). +-behaviour(cowboy_loop_handler). + +-export([init/3]). +-export([info/3]). +-export([terminate/3]). + +-record(state, { +}). + +init(_, Req, _Opts) -> + {loop, Req, #state{}, 5000, hibernate}. + +info(_Info, Req, State) -> + {loop, Req, State, hibernate}. + +terminate(_Reason, _Req, _State) -> + ok. +endef + +define tpl_cowboy_rest +-module($(n)). + +-export([init/3]). +-export([content_types_provided/2]). +-export([get_html/2]). + +init(_, _Req, _Opts) -> + {upgrade, protocol, cowboy_rest}. + +content_types_provided(Req, State) -> + {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}. + +get_html(Req, State) -> + {<<"This is REST!">>, Req, State}. +endef + +define tpl_cowboy_ws +-module($(n)). +-behaviour(cowboy_websocket_handler). + +-export([init/3]). +-export([websocket_init/3]). +-export([websocket_handle/3]). +-export([websocket_info/3]). +-export([websocket_terminate/3]). + +-record(state, { +}). + +init(_, _, _) -> + {upgrade, protocol, cowboy_websocket}. + +websocket_init(_, Req, _Opts) -> + Req2 = cowboy_req:compact(Req), + {ok, Req2, #state{}}. + +websocket_handle({text, Data}, Req, State) -> + {reply, {text, Data}, Req, State}; +websocket_handle({binary, Data}, Req, State) -> + {reply, {binary, Data}, Req, State}; +websocket_handle(_Frame, Req, State) -> + {ok, Req, State}. + +websocket_info(_Info, Req, State) -> + {ok, Req, State}. + +websocket_terminate(_Reason, _Req, _State) -> + ok. +endef + +define tpl_ranch_protocol +-module($(n)). +-behaviour(ranch_protocol). + +-export([start_link/4]). +-export([init/4]). + +-type opts() :: []. +-export_type([opts/0]). + +-record(state, { + socket :: inet:socket(), + transport :: module() +}). + +start_link(Ref, Socket, Transport, Opts) -> + Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]), + {ok, Pid}. + +-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok. +init(Ref, Socket, Transport, _Opts) -> + ok = ranch:accept_ack(Ref), + loop(#state{socket=Socket, transport=Transport}). + +loop(State) -> + loop(State). +endef + +# Plugin-specific targets. + +ifndef WS +ifdef SP +WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a)) +else +WS = $(tab) +endif +endif + +bootstrap: +ifneq ($(wildcard src/),) + $(error Error: src/ directory already exists) +endif + $(eval p := $(PROJECT)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(eval n := $(PROJECT)_sup) + $(verbose) $(call core_render,bs_Makefile,Makefile) + $(verbose) echo "include erlang.mk" >> Makefile + $(verbose) mkdir src/ +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src) +endif + $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl) + $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl) + +bootstrap-lib: +ifneq ($(wildcard src/),) + $(error Error: src/ directory already exists) +endif + $(eval p := $(PROJECT)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(verbose) $(call core_render,bs_Makefile,Makefile) + $(verbose) echo "include erlang.mk" >> Makefile + $(verbose) mkdir src/ +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src) +endif + +bootstrap-rel: +ifneq ($(wildcard relx.config),) + $(error Error: relx.config already exists) +endif +ifneq ($(wildcard config/),) + $(error Error: config/ directory already exists) +endif + $(eval p := $(PROJECT)) + $(verbose) $(call core_render,bs_relx_config,relx.config) + $(verbose) mkdir config/ + $(verbose) $(call core_render,bs_sys_config,config/sys.config) + $(verbose) $(call core_render,bs_vm_args,config/vm.args) + +new-app: +ifndef in + $(error Usage: $(MAKE) new-app in=APP) +endif +ifneq ($(wildcard $(APPS_DIR)/$in),) + $(error Error: Application $in already exists) +endif + $(eval p := $(in)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(eval n := $(in)_sup) + $(verbose) mkdir -p $(APPS_DIR)/$p/src/ + $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile) +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src) +endif + $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl) + $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl) + +new-lib: +ifndef in + $(error Usage: $(MAKE) new-lib in=APP) +endif +ifneq ($(wildcard $(APPS_DIR)/$in),) + $(error Error: Application $in already exists) +endif + $(eval p := $(in)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(verbose) mkdir -p $(APPS_DIR)/$p/src/ + $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile) +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src) +endif + +new: +ifeq ($(wildcard src/)$(in),) + $(error Error: src/ directory does not exist) +endif +ifndef t + $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP]) +endif +ifndef n + $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP]) +endif +ifdef in + $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl) +else + $(verbose) $(call core_render,tpl_$(t),src/$(n).erl) +endif + +list-templates: + $(verbose) @echo Available templates: + $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES)))) + +# Copyright (c) 2014-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: clean-c_src distclean-c_src-env + +# Configuration. + +C_SRC_DIR ?= $(CURDIR)/c_src +C_SRC_ENV ?= $(C_SRC_DIR)/env.mk +C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT) +C_SRC_TYPE ?= shared + +# System type and C compiler/flags. + +ifeq ($(PLATFORM),msys2) + C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe + C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll +else + C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= + C_SRC_OUTPUT_SHARED_EXTENSION ?= .so +endif + +ifeq ($(C_SRC_TYPE),shared) + C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION) +else + C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION) +endif + +ifeq ($(PLATFORM),msys2) +# We hardcode the compiler used on MSYS2. The default CC=cc does +# not produce working code. The "gcc" MSYS2 package also doesn't. + CC = /mingw64/bin/gcc + export CC + CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -finline-functions -Wall +else ifeq ($(PLATFORM),darwin) + CC ?= cc + CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -arch x86_64 -Wall + LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress +else ifeq ($(PLATFORM),freebsd) + CC ?= cc + CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -finline-functions -Wall +else ifeq ($(PLATFORM),linux) + CC ?= gcc + CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -finline-functions -Wall +endif + +ifneq ($(PLATFORM),msys2) + CFLAGS += -fPIC + CXXFLAGS += -fPIC +endif + +CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)" +CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)" + +LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei + +# Verbosity. + +c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F)); +c_verbose = $(c_verbose_$(V)) + +cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F)); +cpp_verbose = $(cpp_verbose_$(V)) + +link_verbose_0 = @echo " LD " $(@F); +link_verbose = $(link_verbose_$(V)) + +# Targets. + +ifeq ($(wildcard $(C_SRC_DIR)),) +else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),) +app:: app-c_src + +test-build:: app-c_src + +app-c_src: + $(MAKE) -C $(C_SRC_DIR) + +clean:: + $(MAKE) -C $(C_SRC_DIR) clean + +else + +ifeq ($(SOURCES),) +SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat)))) +endif +OBJECTS = $(addsuffix .o, $(basename $(SOURCES))) + +COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c +COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c + +app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE) + +test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE) + +$(C_SRC_OUTPUT_FILE): $(OBJECTS) + $(verbose) mkdir -p $(dir $@) + $(link_verbose) $(CC) $(OBJECTS) \ + $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \ + -o $(C_SRC_OUTPUT_FILE) + +$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV) + +%.o: %.c + $(COMPILE_C) $(OUTPUT_OPTION) $< + +%.o: %.cc + $(COMPILE_CPP) $(OUTPUT_OPTION) $< + +%.o: %.C + $(COMPILE_CPP) $(OUTPUT_OPTION) $< + +%.o: %.cpp + $(COMPILE_CPP) $(OUTPUT_OPTION) $< + +clean:: clean-c_src + +clean-c_src: + $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS) + +endif + +ifneq ($(wildcard $(C_SRC_DIR)),) +ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().') + +$(C_SRC_ENV): + $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \ + io_lib:format( \ + \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \ + \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \ + \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \ + \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \ + \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \ + [code:root_dir(), erlang:system_info(version), \ + code:lib_dir(erl_interface, include), \ + code:lib_dir(erl_interface, lib)])), \ + halt()." + +distclean:: distclean-c_src-env + +distclean-c_src-env: + $(gen_verbose) rm -f $(C_SRC_ENV) + +-include $(C_SRC_ENV) + +ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR)) +$(shell rm -f $(C_SRC_ENV)) +endif +endif + +# Templates. + +define bs_c_nif +#include "erl_nif.h" + +static int loads = 0; + +static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) +{ + /* Initialize private data. */ + *priv_data = NULL; + + loads++; + + return 0; +} + +static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) +{ + /* Convert the private data to the new version. */ + *priv_data = *old_priv_data; + + loads++; + + return 0; +} + +static void unload(ErlNifEnv* env, void* priv_data) +{ + if (loads == 1) { + /* Destroy the private data. */ + } + + loads--; +} + +static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + if (enif_is_atom(env, argv[0])) { + return enif_make_tuple2(env, + enif_make_atom(env, "hello"), + argv[0]); + } + + return enif_make_tuple2(env, + enif_make_atom(env, "error"), + enif_make_atom(env, "badarg")); +} + +static ErlNifFunc nif_funcs[] = { + {"hello", 1, hello} +}; + +ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload) +endef + +define bs_erl_nif +-module($n). + +-export([hello/1]). + +-on_load(on_load/0). +on_load() -> + PrivDir = case code:priv_dir(?MODULE) of + {error, _} -> + AppPath = filename:dirname(filename:dirname(code:which(?MODULE))), + filename:join(AppPath, "priv"); + Path -> + Path + end, + erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0). + +hello(_) -> + erlang:nif_error({not_loaded, ?MODULE}). +endef + +new-nif: +ifneq ($(wildcard $(C_SRC_DIR)/$n.c),) + $(error Error: $(C_SRC_DIR)/$n.c already exists) +endif +ifneq ($(wildcard src/$n.erl),) + $(error Error: src/$n.erl already exists) +endif +ifndef n + $(error Usage: $(MAKE) new-nif n=NAME [in=APP]) +endif +ifdef in + $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in= +else + $(verbose) mkdir -p $(C_SRC_DIR) src/ + $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c) + $(verbose) $(call core_render,bs_erl_nif,src/$n.erl) +endif + +# Copyright (c) 2015-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: ci ci-prepare ci-setup + +CI_OTP ?= +CI_HIPE ?= +CI_ERLLVM ?= + +ifeq ($(CI_VM),native) +ERLC_OPTS += +native +TEST_ERLC_OPTS += +native +else ifeq ($(CI_VM),erllvm) +ERLC_OPTS += +native +'{hipe, [to_llvm]}' +TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}' +endif + +ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),) +ci:: +else + +ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM))) + +ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE))) + +ci-setup:: + $(verbose) : + +ci-extra:: + $(verbose) : + +ci_verbose_0 = @echo " CI " $(1); +ci_verbose = $(ci_verbose_$(V)) + +define ci_target +ci-$1: $(KERL_INSTALL_DIR)/$2 + $(verbose) $(MAKE) --no-print-directory clean + $(ci_verbose) \ + PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \ + CI_OTP_RELEASE="$1" \ + CT_OPTS="-label $1" \ + CI_VM="$3" \ + $(MAKE) ci-setup tests + $(verbose) $(MAKE) --no-print-directory ci-extra +endef + +$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp))) +$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native))) +$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm))) + +$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp)))) +$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp)))) + +help:: + $(verbose) printf "%s\n" "" \ + "Continuous Integration targets:" \ + " ci Run '$(MAKE) tests' on all configured Erlang versions." \ + "" \ + "The CI_OTP variable must be defined with the Erlang versions" \ + "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3" + +endif + +# Copyright (c) 2020, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifdef CONCUERROR_TESTS + +.PHONY: concuerror distclean-concuerror + +# Configuration + +CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs +CONCUERROR_OPTS ?= + +# Core targets. + +check:: concuerror + +ifndef KEEP_LOGS +distclean:: distclean-concuerror +endif + +# Plugin-specific targets. + +$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP) + $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror + $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror + +$(CONCUERROR_LOGS_DIR): + $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR) + +define concuerror_html_report + + + + +Concuerror HTML report + + +

Concuerror HTML report

+

Generated on $(concuerror_date)

+
    +$(foreach t,$(concuerror_targets),
  • $(t)
  • ) +
+ + +endef + +concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS))) + $(eval concuerror_date := $(shell date)) + $(eval concuerror_targets := $^) + $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html) + +define concuerror_target +.PHONY: concuerror-$1-$2 + +concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR) + $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \ + --pa $(CURDIR)/ebin --pa $(TEST_DIR) \ + -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \ + $$(CONCUERROR_OPTS) -m $1 -t $2 +endef + +$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test)))))) + +distclean-concuerror: + $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR) + +endif + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: ct apps-ct distclean-ct + +# Configuration. + +CT_OPTS ?= + +ifneq ($(wildcard $(TEST_DIR)),) +ifndef CT_SUITES +CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl)))) +endif +endif +CT_SUITES ?= +CT_LOGS_DIR ?= $(CURDIR)/logs + +# Core targets. + +tests:: ct + +ifndef KEEP_LOGS +distclean:: distclean-ct +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Common_test targets:" \ + " ct Run all the common_test suites for this project" \ + "" \ + "All your common_test suites have their associated targets." \ + "A suite named http_SUITE can be ran using the ct-http target." + +# Plugin-specific targets. + +CT_RUN = ct_run \ + -no_auto_compile \ + -noinput \ + -pa $(CURDIR)/ebin $(TEST_DIR) \ + -dir $(TEST_DIR) \ + -logdir $(CT_LOGS_DIR) + +ifeq ($(CT_SUITES),) +ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct) +else +# We do not run tests if we are in an apps/* with no test directory. +ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1) +ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct) + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS) +endif +endif + +ifneq ($(ALL_APPS_DIRS),) +define ct_app_target +apps-ct-$1: test-build + $$(MAKE) -C $1 ct IS_APP=1 +endef + +$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app)))) + +apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS)) +endif + +ifdef t +ifeq (,$(findstring :,$t)) +CT_EXTRA = -group $t +else +t_words = $(subst :, ,$t) +CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words)) +endif +else +ifdef c +CT_EXTRA = -case $c +else +CT_EXTRA = +endif +endif + +define ct_suite_target +ct-$(1): test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS) +endef + +$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test)))) + +distclean-ct: + $(gen_verbose) rm -rf $(CT_LOGS_DIR) + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: plt distclean-plt dialyze + +# Configuration. + +DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt +export DIALYZER_PLT + +PLT_APPS ?= +DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS) +DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs +DIALYZER_PLT_OPTS ?= + +# Core targets. + +check:: dialyze + +distclean:: distclean-plt + +help:: + $(verbose) printf "%s\n" "" \ + "Dialyzer targets:" \ + " plt Build a PLT file for this project" \ + " dialyze Analyze the project using Dialyzer" + +# Plugin-specific targets. + +define filter_opts.erl + Opts = init:get_plain_arguments(), + {Filtered, _} = lists:foldl(fun + (O, {Os, true}) -> {[O|Os], false}; + (O = "-D", {Os, _}) -> {[O|Os], true}; + (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false}; + (O = "-I", {Os, _}) -> {[O|Os], true}; + (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false}; + (O = "-pa", {Os, _}) -> {[O|Os], true}; + (_, Acc) -> Acc + end, {[], false}, Opts), + io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]), + halt(). +endef + +# DIALYZER_PLT is a variable understood directly by Dialyzer. +# +# We append the path to erts at the end of the PLT. This works +# because the PLT file is in the external term format and the +# function binary_to_term/1 ignores any trailing data. +$(DIALYZER_PLT): deps app + $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \ + while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log)) + $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \ + erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2 + $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@ + +plt: $(DIALYZER_PLT) + +distclean-plt: + $(gen_verbose) rm -f $(DIALYZER_PLT) + +ifneq ($(wildcard $(DIALYZER_PLT)),) +dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app) + $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \ + grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \ + rm $(DIALYZER_PLT); \ + $(MAKE) plt; \ + fi +else +dialyze: $(DIALYZER_PLT) +endif + $(verbose) dialyzer --no_native `$(ERL) \ + -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \ + -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/) + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-edoc edoc + +# Configuration. + +EDOC_OPTS ?= +EDOC_SRC_DIRS ?= +EDOC_OUTPUT ?= doc + +define edoc.erl + SrcPaths = lists:foldl(fun(P, Acc) -> + filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc + end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]), + DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}], + edoc:application($(1), ".", [$(2)] ++ DefaultOpts), + halt(0). +endef + +# Core targets. + +ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),) +docs:: edoc +endif + +distclean:: distclean-edoc + +# Plugin-specific targets. + +edoc: distclean-edoc doc-deps + $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS))) + +distclean-edoc: + $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# Configuration. + +DTL_FULL_PATH ?= +DTL_PATH ?= templates/ +DTL_PREFIX ?= +DTL_SUFFIX ?= _dtl +DTL_OPTS ?= + +# Verbosity. + +dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F)); +dtl_verbose = $(dtl_verbose_$(V)) + +# Core targets. + +DTL_PATH := $(abspath $(DTL_PATH)) +DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl)) + +ifneq ($(DTL_FILES),) + +DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%))) +DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES))) +BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES))) + +ifneq ($(words $(DTL_FILES)),0) +# Rebuild templates when the Makefile changes. +$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP) + $(verbose) if test -f $@; then \ + touch $(DTL_FILES); \ + fi + $(verbose) touch $@ + +ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl +endif + +define erlydtl_compile.erl + [begin + Module0 = case "$(strip $(DTL_FULL_PATH))" of + "" -> + filename:basename(F, ".dtl"); + _ -> + "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"), + re:replace(F2, "/", "_", [{return, list}, global]) + end, + Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"), + case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of + ok -> ok; + {ok, _} -> ok + end + end || F <- string:tokens("$(1)", " ")], + halt(). +endef + +ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/ + $(if $(strip $?),\ + $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\ + -pa ebin/)) + +endif + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2014, Dave Cottlehuber +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-escript escript escript-zip + +# Configuration. + +ESCRIPT_NAME ?= $(PROJECT) +ESCRIPT_FILE ?= $(ESCRIPT_NAME) + +ESCRIPT_SHEBANG ?= /usr/bin/env escript +ESCRIPT_COMMENT ?= This is an -*- erlang -*- file +ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME) + +ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null) +ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip + +# Core targets. + +distclean:: distclean-escript + +help:: + $(verbose) printf "%s\n" "" \ + "Escript targets:" \ + " escript Build an executable escript archive" \ + +# Plugin-specific targets. + +escript-zip:: FULL=1 +escript-zip:: deps app + $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP)) + $(verbose) rm -f $(ESCRIPT_ZIP_FILE) + $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/* +ifneq ($(DEPS),) + $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \ + $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \ + $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log))))) +endif + +escript:: escript-zip + $(gen_verbose) printf "%s\n" \ + "#!$(ESCRIPT_SHEBANG)" \ + "%% $(ESCRIPT_COMMENT)" \ + "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE) + $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE) + $(verbose) chmod +x $(ESCRIPT_FILE) + +distclean-escript: + $(gen_verbose) rm -f $(ESCRIPT_FILE) + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# Copyright (c) 2014, Enrique Fernandez +# This file is contributed to erlang.mk and subject to the terms of the ISC License. + +.PHONY: eunit apps-eunit + +# Configuration + +EUNIT_OPTS ?= +EUNIT_ERL_OPTS ?= + +# Core targets. + +tests:: eunit + +help:: + $(verbose) printf "%s\n" "" \ + "EUnit targets:" \ + " eunit Run all the EUnit tests for this project" + +# Plugin-specific targets. + +define eunit.erl + $(call cover.erl) + CoverSetup(), + case eunit:test($1, [$(EUNIT_OPTS)]) of + ok -> ok; + error -> halt(2) + end, + CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"), + halt() +endef + +EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin + +ifdef t +ifeq (,$(findstring :,$(t))) +eunit: test-build cover-data-dir + $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS)) +else +eunit: test-build cover-data-dir + $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS)) +endif +else +EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES))) +EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl))) + +EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \ + $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)') + +eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir +ifneq ($(wildcard src/ $(TEST_DIR)),) + $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS)) +endif + +ifneq ($(ALL_APPS_DIRS),) +apps-eunit: test-build + $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \ + [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \ + exit $$eunit_retcode +endif +endif + +# Copyright (c) 2020, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +HEX_CORE_GIT ?= https://github.com/hexpm/hex_core +HEX_CORE_COMMIT ?= v0.7.0 + +PACKAGES += hex_core +pkg_hex_core_name = hex_core +pkg_hex_core_description = Reference implementation of Hex specifications +pkg_hex_core_homepage = $(HEX_CORE_GIT) +pkg_hex_core_fetch = git +pkg_hex_core_repo = $(HEX_CORE_GIT) +pkg_hex_core_commit = $(HEX_CORE_COMMIT) + +# We automatically depend on hex_core when the project isn't already. +$(if $(filter hex_core,$(DEPS) $(BUILD_DEPS) $(DOC_DEPS) $(REL_DEPS) $(TEST_DEPS)),,\ + $(eval $(call dep_target,hex_core))) + +hex-core: $(DEPS_DIR)/hex_core + $(verbose) if [ ! -e $(DEPS_DIR)/hex_core/ebin/dep_built ]; then \ + $(MAKE) -C $(DEPS_DIR)/hex_core IS_DEP=1; \ + touch $(DEPS_DIR)/hex_core/ebin/dep_built; \ + fi + +# @todo This must also apply to fetching. +HEX_CONFIG ?= + +define hex_config.erl + begin + Config0 = hex_core:default_config(), + Config0$(HEX_CONFIG) + end +endef + +define hex_user_create.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + case hex_api_user:create(Config, <<"$(strip $1)">>, <<"$(strip $2)">>, <<"$(strip $3)">>) of + {ok, {201, _, #{<<"email">> := Email, <<"url">> := URL, <<"username">> := Username}}} -> + io:format("User ~s (~s) created at ~s~n" + "Please check your inbox for a confirmation email.~n" + "You must confirm before you are allowed to publish packages.~n", + [Username, Email, URL]), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(80) + end +endef + +# The $(info ) call inserts a new line after the password prompt. +hex-user-create: hex-core + $(if $(HEX_USERNAME),,$(eval HEX_USERNAME := $(shell read -p "Username: " username; echo $$username))) + $(if $(HEX_PASSWORD),,$(eval HEX_PASSWORD := $(shell stty -echo; read -p "Password: " password; stty echo; echo $$password) $(info ))) + $(if $(HEX_EMAIL),,$(eval HEX_EMAIL := $(shell read -p "Email: " email; echo $$email))) + $(gen_verbose) $(call erlang,$(call hex_user_create.erl,$(HEX_USERNAME),$(HEX_PASSWORD),$(HEX_EMAIL))) + +define hex_key_add.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => iolist_to_binary([<<"Basic ">>, base64:encode(<<"$(strip $1):$(strip $2)">>)])}, + Permissions = [ + case string:split(P, <<":">>) of + [D] -> #{domain => D}; + [D, R] -> #{domain => D, resource => R} + end + || P <- string:split(<<"$(strip $4)">>, <<",">>, all)], + case hex_api_key:add(ConfigF, <<"$(strip $3)">>, Permissions) of + {ok, {201, _, #{<<"secret">> := Secret}}} -> + io:format("Key ~s created for user ~s~nSecret: ~s~n" + "Please store the secret in a secure location, such as a password store.~n" + "The secret will be requested for most Hex-related operations.~n", + [<<"$(strip $3)">>, <<"$(strip $1)">>, Secret]), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(81) + end +endef + +hex-key-add: hex-core + $(if $(HEX_USERNAME),,$(eval HEX_USERNAME := $(shell read -p "Username: " username; echo $$username))) + $(if $(HEX_PASSWORD),,$(eval HEX_PASSWORD := $(shell stty -echo; read -p "Password: " password; stty echo; echo $$password) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_key_add.erl,$(HEX_USERNAME),$(HEX_PASSWORD),\ + $(if $(name),$(name),$(shell hostname)-erlang-mk),\ + $(if $(perm),$(perm),api))) + +HEX_TARBALL_EXTRA_METADATA ?= + +# @todo Check that we can += files +HEX_TARBALL_FILES ?= \ + $(wildcard early-plugins.mk) \ + $(wildcard ebin/$(PROJECT).app) \ + $(wildcard ebin/$(PROJECT).appup) \ + $(wildcard $(notdir $(ERLANG_MK_FILENAME))) \ + $(sort $(call core_find,include/,*.hrl)) \ + $(wildcard LICENSE*) \ + $(wildcard Makefile) \ + $(wildcard plugins.mk) \ + $(sort $(call core_find,priv/,*)) \ + $(wildcard README*) \ + $(wildcard rebar.config) \ + $(sort $(call core_find,src/,*)) + +HEX_TARBALL_OUTPUT_FILE ?= $(ERLANG_MK_TMP)/$(PROJECT).tar + +# @todo Need to check for rebar.config and/or the absence of DEPS to know +# whether a project will work with Rebar. +# +# @todo contributors licenses links in HEX_TARBALL_EXTRA_METADATA + +# In order to build the requirements metadata we look into DEPS. +# We do not require that the project use Hex dependencies, however +# Hex.pm does require that the package name and version numbers +# correspond to a real Hex package. +define hex_tarball_create.erl + Files0 = [$(call comma_list,$(patsubst %,"%",$(HEX_TARBALL_FILES)))], + Requirements0 = #{ + $(foreach d,$(DEPS), + <<"$(if $(subst hex,,$(call query_fetch_method,$d)),$d,$(if $(word 3,$(dep_$d)),$(word 3,$(dep_$d)),$d))">> => #{ + <<"app">> => <<"$d">>, + <<"optional">> => false, + <<"requirement">> => <<"$(call query_version,$d)">> + },) + $(if $(DEPS),dummy => dummy) + }, + Requirements = maps:remove(dummy, Requirements0), + Metadata0 = #{ + app => <<"$(strip $(PROJECT))">>, + build_tools => [<<"make">>, <<"rebar3">>], + description => <<"$(strip $(PROJECT_DESCRIPTION))">>, + files => [unicode:characters_to_binary(F) || F <- Files0], + name => <<"$(strip $(PROJECT))">>, + requirements => Requirements, + version => <<"$(strip $(PROJECT_VERSION))">> + }, + Metadata = Metadata0$(HEX_TARBALL_EXTRA_METADATA), + Files = [case file:read_file(F) of + {ok, Bin} -> + {F, Bin}; + {error, Reason} -> + io:format("Error trying to open file ~0p: ~0p~n", [F, Reason]), + halt(82) + end || F <- Files0], + case hex_tarball:create(Metadata, Files) of + {ok, #{tarball := Tarball}} -> + ok = file:write_file("$(strip $(HEX_TARBALL_OUTPUT_FILE))", Tarball), + halt(0); + {error, Reason} -> + io:format("Error ~0p~n", [Reason]), + halt(83) + end +endef + +hex_tar_verbose_0 = @echo " TAR $(notdir $(ERLANG_MK_TMP))/$(@F)"; +hex_tar_verbose_2 = set -x; +hex_tar_verbose = $(hex_tar_verbose_$(V)) + +$(HEX_TARBALL_OUTPUT_FILE): hex-core app + $(hex_tar_verbose) $(call erlang,$(call hex_tarball_create.erl)) + +hex-tarball-create: $(HEX_TARBALL_OUTPUT_FILE) + +define hex_release_publish_summary.erl + {ok, Tarball} = erl_tar:open("$(strip $(HEX_TARBALL_OUTPUT_FILE))", [read]), + ok = erl_tar:extract(Tarball, [{cwd, "$(ERLANG_MK_TMP)"}, {files, ["metadata.config"]}]), + {ok, Metadata} = file:consult("$(ERLANG_MK_TMP)/metadata.config"), + #{ + <<"name">> := Name, + <<"version">> := Version, + <<"files">> := Files, + <<"requirements">> := Deps + } = maps:from_list(Metadata), + io:format("Publishing ~s ~s~n Dependencies:~n", [Name, Version]), + case Deps of + [] -> + io:format(" (none)~n"); + _ -> + [begin + #{<<"app">> := DA, <<"requirement">> := DR} = maps:from_list(D), + io:format(" ~s ~s~n", [DA, DR]) + end || {_, D} <- Deps] + end, + io:format(" Included files:~n"), + [io:format(" ~s~n", [F]) || F <- Files], + io:format("You may also review the contents of the tarball file.~n" + "Please enter your secret key to proceed.~n"), + halt(0) +endef + +define hex_release_publish.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + {ok, Tarball} = file:read_file("$(strip $(HEX_TARBALL_OUTPUT_FILE))"), + case hex_api_release:publish(ConfigF, Tarball, [{replace, $2}]) of + {ok, {200, _, #{}}} -> + io:format("Release replaced~n"), + halt(0); + {ok, {201, _, #{}}} -> + io:format("Release published~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(84) + end +endef + +hex-release-tarball: hex-core $(HEX_TARBALL_OUTPUT_FILE) + $(verbose) $(call erlang,$(call hex_release_publish_summary.erl)) + +hex-release-publish: hex-core hex-release-tarball + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_publish.erl,$(HEX_SECRET),false)) + +hex-release-replace: hex-core hex-release-tarball + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_publish.erl,$(HEX_SECRET),true)) + +define hex_release_delete.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + case hex_api_release:delete(ConfigF, <<"$(strip $(PROJECT))">>, <<"$(strip $(PROJECT_VERSION))">>) of + {ok, {204, _, _}} -> + io:format("Release $(strip $(PROJECT_VERSION)) deleted~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(85) + end +endef + +hex-release-delete: hex-core + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_delete.erl,$(HEX_SECRET))) + +define hex_release_retire.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + Params = #{<<"reason">> => <<"$(strip $3)">>, <<"message">> => <<"$(strip $4)">>}, + case hex_api_release:retire(ConfigF, <<"$(strip $(PROJECT))">>, <<"$(strip $2)">>, Params) of + {ok, {204, _, _}} -> + io:format("Release $(strip $2) has been retired~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(86) + end +endef + +hex-release-retire: hex-core + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_retire.erl,$(HEX_SECRET),\ + $(if $(HEX_VERSION),$(HEX_VERSION),$(PROJECT_VERSION)),\ + $(if $(HEX_REASON),$(HEX_REASON),invalid),\ + $(HEX_MESSAGE))) + +define hex_release_unretire.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + case hex_api_release:unretire(ConfigF, <<"$(strip $(PROJECT))">>, <<"$(strip $2)">>) of + {ok, {204, _, _}} -> + io:format("Release $(strip $2) is not retired anymore~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(87) + end +endef + +hex-release-unretire: hex-core + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_unretire.erl,$(HEX_SECRET),\ + $(if $(HEX_VERSION),$(HEX_VERSION),$(PROJECT_VERSION)))) + +HEX_DOCS_DOC_DIR ?= doc/ +HEX_DOCS_TARBALL_FILES ?= $(sort $(call core_find,$(HEX_DOCS_DOC_DIR),*)) +HEX_DOCS_TARBALL_OUTPUT_FILE ?= $(ERLANG_MK_TMP)/$(PROJECT)-docs.tar.gz + +$(HEX_DOCS_TARBALL_OUTPUT_FILE): hex-core app docs + $(hex_tar_verbose) tar czf $(HEX_DOCS_TARBALL_OUTPUT_FILE) -C $(HEX_DOCS_DOC_DIR) \ + $(HEX_DOCS_TARBALL_FILES:$(HEX_DOCS_DOC_DIR)%=%) + +hex-docs-tarball-create: $(HEX_DOCS_TARBALL_OUTPUT_FILE) + +define hex_docs_publish.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + {ok, Tarball} = file:read_file("$(strip $(HEX_DOCS_TARBALL_OUTPUT_FILE))"), + case hex_api:post(ConfigF, + ["packages", "$(strip $(PROJECT))", "releases", "$(strip $(PROJECT_VERSION))", "docs"], + {"application/octet-stream", Tarball}) of + {ok, {Status, _, _}} when Status >= 200, Status < 300 -> + io:format("Docs published~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(88) + end +endef + +hex-docs-publish: hex-core hex-docs-tarball-create + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_docs_publish.erl,$(HEX_SECRET))) + +define hex_docs_delete.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + case hex_api:delete(ConfigF, + ["packages", "$(strip $(PROJECT))", "releases", "$(strip $2)", "docs"]) of + {ok, {Status, _, _}} when Status >= 200, Status < 300 -> + io:format("Docs removed~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(89) + end +endef + +hex-docs-delete: hex-core + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_docs_delete.erl,$(HEX_SECRET),\ + $(if $(HEX_VERSION),$(HEX_VERSION),$(PROJECT_VERSION)))) + +# Copyright (c) 2015-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper) +.PHONY: proper + +# Targets. + +tests:: proper + +define proper_check.erl + $(call cover.erl) + code:add_pathsa([ + "$(call core_native_path,$(CURDIR)/ebin)", + "$(call core_native_path,$(DEPS_DIR)/*/ebin)", + "$(call core_native_path,$(TEST_DIR))"]), + Module = fun(M) -> + [true] =:= lists:usort([ + case atom_to_list(F) of + "prop_" ++ _ -> + io:format("Testing ~p:~p/0~n", [M, F]), + proper:quickcheck(M:F(), nocolors); + _ -> + true + end + || {F, 0} <- M:module_info(exports)]) + end, + try begin + CoverSetup(), + Res = case $(1) of + all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]); + module -> Module($(2)); + function -> proper:quickcheck($(2), nocolors) + end, + CoverExport("$(COVER_DATA_DIR)/proper.coverdata"), + Res + end of + true -> halt(0); + _ -> halt(1) + catch error:undef -> + io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]), + halt(0) + end. +endef + +ifdef t +ifeq (,$(findstring :,$(t))) +proper: test-build cover-data-dir + $(verbose) $(call erlang,$(call proper_check.erl,module,$(t))) +else +proper: test-build cover-data-dir + $(verbose) echo Testing $(t)/0 + $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)())) +endif +else +proper: test-build cover-data-dir + $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \ + $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam)))))) + $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES))) +endif +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# Verbosity. + +proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F)); +proto_verbose = $(proto_verbose_$(V)) + +# Core targets. + +ifneq ($(wildcard src/),) +ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),) +PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES)) +ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES)))) + +ifeq ($(PROTO_FILES),) +$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: + $(verbose) : +else +# Rebuild proto files when the Makefile changes. +# We exclude $(PROJECT).d to avoid a circular dependency. +$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP) + $(verbose) if test -f $@; then \ + touch $(PROTO_FILES); \ + fi + $(verbose) touch $@ + +$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs +endif + +ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),) +define compile_proto.erl + [begin + protobuffs_compile:generate_source(F, [ + {output_include_dir, "./include"}, + {output_src_dir, "./src"}]) + end || F <- string:tokens("$1", " ")], + halt(). +endef +else +define compile_proto.erl + [begin + gpb_compile:file(F, [ + {include_as_lib, true}, + {module_name_suffix, "_pb"}, + {o_hrl, "./include"}, + {o_erl, "./src"}]) + end || F <- string:tokens("$1", " ")], + halt(). +endef +endif + +ifneq ($(PROTO_FILES),) +$(PROJECT).d:: $(PROTO_FILES) + $(verbose) mkdir -p ebin/ include/ + $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?))) +endif +endif +endif + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: relx-rel relx-relup distclean-relx-rel run + +# Configuration. + +RELX ?= $(ERLANG_MK_TMP)/relx +RELX_CONFIG ?= $(CURDIR)/relx.config + +RELX_URL ?= https://erlang.mk/res/relx-v3.27.0 +RELX_OPTS ?= +RELX_OUTPUT_DIR ?= _rel +RELX_REL_EXT ?= +RELX_TAR ?= 1 + +ifdef SFX + RELX_TAR = 1 +endif + +ifeq ($(firstword $(RELX_OPTS)),-o) + RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS)) +else + RELX_OPTS += -o $(RELX_OUTPUT_DIR) +endif + +# Core targets. + +ifeq ($(IS_DEP),) +ifneq ($(wildcard $(RELX_CONFIG)),) +rel:: relx-rel + +relup:: relx-relup +endif +endif + +distclean:: distclean-relx-rel + +# Plugin-specific targets. + +$(RELX): | $(ERLANG_MK_TMP) + $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL)) + $(verbose) chmod +x $(RELX) + +relx-rel: $(RELX) rel-deps app + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release + $(verbose) $(MAKE) relx-post-rel +ifeq ($(RELX_TAR),1) + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar +endif + +relx-relup: $(RELX) rel-deps app + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release + $(MAKE) relx-post-rel + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar) + +distclean-relx-rel: + $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR) + +# Default hooks. +relx-post-rel:: + $(verbose) : + +# Run target. + +ifeq ($(wildcard $(RELX_CONFIG)),) +run:: +else + +define get_relx_release.erl + {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"), + {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config), + Vsn = case Vsn0 of + {cmd, Cmd} -> os:cmd(Cmd); + semver -> ""; + {semver, _} -> ""; + VsnStr -> Vsn0 + end, + Extended = case lists:keyfind(extended_start_script, 1, Config) of + {_, true} -> "1"; + _ -> "" + end, + io:format("~s ~s ~s", [Name, Vsn, Extended]), + halt(0). +endef + +RELX_REL := $(shell $(call erlang,$(get_relx_release.erl))) +RELX_REL_NAME := $(word 1,$(RELX_REL)) +RELX_REL_VSN := $(word 2,$(RELX_REL)) +RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console) + +ifeq ($(PLATFORM),msys2) +RELX_REL_EXT := .cmd +endif + +run:: all + $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD) + +ifdef RELOAD +rel:: + $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping + $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \ + eval "io:format(\"~p~n\", [c:lm()])" +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Relx targets:" \ + " run Compile the project, build the release and run it" + +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# Copyright (c) 2014, M Robert Martin +# This file is contributed to erlang.mk and subject to the terms of the ISC License. + +.PHONY: shell + +# Configuration. + +SHELL_ERL ?= erl +SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR) +SHELL_OPTS ?= + +ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS)) + +# Core targets + +help:: + $(verbose) printf "%s\n" "" \ + "Shell targets:" \ + " shell Run an erlang shell with SHELL_OPTS or reasonable default" + +# Plugin-specific targets. + +$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +build-shell-deps: +else +build-shell-deps: $(ALL_SHELL_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \ + if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \ + :; \ + else \ + $(MAKE) -C $$dep IS_DEP=1; \ + if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \ + fi \ + done +endif + +shell:: build-shell-deps + $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS) + +# Copyright 2017, Stanislaw Klekot +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-sphinx sphinx + +# Configuration. + +SPHINX_BUILD ?= sphinx-build +SPHINX_SOURCE ?= doc +SPHINX_CONFDIR ?= +SPHINX_FORMATS ?= html +SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees +SPHINX_OPTS ?= + +#sphinx_html_opts = +#sphinx_html_output = html +#sphinx_man_opts = +#sphinx_man_output = man +#sphinx_latex_opts = +#sphinx_latex_output = latex + +# Helpers. + +sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q +sphinx_build_1 = $(SPHINX_BUILD) -N +sphinx_build_2 = set -x; $(SPHINX_BUILD) +sphinx_build = $(sphinx_build_$(V)) + +define sphinx.build +$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1) + +endef + +define sphinx.output +$(if $(sphinx_$1_output),$(sphinx_$1_output),$1) +endef + +# Targets. + +ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),) +docs:: sphinx +distclean:: distclean-sphinx +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Sphinx targets:" \ + " sphinx Generate Sphinx documentation." \ + "" \ + "ReST sources and 'conf.py' file are expected in directory pointed by" \ + "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \ + "'html' format is generated by default); target directory can be specified by" \ + 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \ + "Additional Sphinx options can be set in SPHINX_OPTS." + +# Plugin-specific targets. + +sphinx: + $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F)) + +distclean-sphinx: + $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F))) + +# Copyright (c) 2017, Jean-Sรฉbastien Pรฉdron +# This file is contributed to erlang.mk and subject to the terms of the ISC License. + +.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS + +show-ERL_LIBS: + @echo $(ERL_LIBS) + +show-ERLC_OPTS: + @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";) + +show-TEST_ERLC_OPTS: + @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";) + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq) +.PHONY: triq + +# Targets. + +tests:: triq + +define triq_check.erl + $(call cover.erl) + code:add_pathsa([ + "$(call core_native_path,$(CURDIR)/ebin)", + "$(call core_native_path,$(DEPS_DIR)/*/ebin)", + "$(call core_native_path,$(TEST_DIR))"]), + try begin + CoverSetup(), + Res = case $(1) of + all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]); + module -> triq:check($(2)); + function -> triq:check($(2)) + end, + CoverExport("$(COVER_DATA_DIR)/triq.coverdata"), + Res + end of + true -> halt(0); + _ -> halt(1) + catch error:undef -> + io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]), + halt(0) + end. +endef + +ifdef t +ifeq (,$(findstring :,$(t))) +triq: test-build cover-data-dir + $(verbose) $(call erlang,$(call triq_check.erl,module,$(t))) +else +triq: test-build cover-data-dir + $(verbose) echo Testing $(t)/0 + $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)())) +endif +else +triq: test-build cover-data-dir + $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \ + $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam)))))) + $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES))) +endif +endif + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2015, Erlang Solutions Ltd. +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: xref distclean-xref + +# Configuration. + +ifeq ($(XREF_CONFIG),) + XREFR_ARGS := +else + XREFR_ARGS := -c $(XREF_CONFIG) +endif + +XREFR ?= $(CURDIR)/xrefr +export XREFR + +XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr + +# Core targets. + +help:: + $(verbose) printf '%s\n' '' \ + 'Xref targets:' \ + ' xref Run Xrefr using $$XREF_CONFIG as config file if defined' + +distclean:: distclean-xref + +# Plugin-specific targets. + +$(XREFR): + $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL)) + $(verbose) chmod +x $(XREFR) + +xref: deps app $(XREFR) + $(gen_verbose) $(XREFR) $(XREFR_ARGS) + +distclean-xref: + $(gen_verbose) rm -rf $(XREFR) + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2015, Viktor Sรถderqvist +# This file is part of erlang.mk and subject to the terms of the ISC License. + +COVER_REPORT_DIR ?= cover +COVER_DATA_DIR ?= $(COVER_REPORT_DIR) + +ifdef COVER +COVER_APPS ?= $(notdir $(ALL_APPS_DIRS)) +COVER_DEPS ?= +endif + +# Code coverage for Common Test. + +ifdef COVER +ifdef CT_RUN +ifneq ($(wildcard $(TEST_DIR)),) +test-build:: $(TEST_DIR)/ct.cover.spec + +$(TEST_DIR)/ct.cover.spec: cover-data-dir + $(gen_verbose) printf "%s\n" \ + "{incl_app, '$(PROJECT)', details}." \ + "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \ + $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \ + $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \ + '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@ + +CT_RUN += -cover $(TEST_DIR)/ct.cover.spec +endif +endif +endif + +# Code coverage for other tools. + +ifdef COVER +define cover.erl + CoverSetup = fun() -> + Dirs = ["$(call core_native_path,$(CURDIR)/ebin)" + $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)") + $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")], + [begin + case filelib:is_dir(Dir) of + false -> false; + true -> + case cover:compile_beam_directory(Dir) of + {error, _} -> halt(1); + _ -> true + end + end + end || Dir <- Dirs] + end, + CoverExport = fun(Filename) -> cover:export(Filename) end, +endef +else +define cover.erl + CoverSetup = fun() -> ok end, + CoverExport = fun(_) -> ok end, +endef +endif + +# Core targets + +ifdef COVER +ifneq ($(COVER_REPORT_DIR),) +tests:: + $(verbose) $(MAKE) --no-print-directory cover-report +endif + +cover-data-dir: | $(COVER_DATA_DIR) + +$(COVER_DATA_DIR): + $(verbose) mkdir -p $(COVER_DATA_DIR) +else +cover-data-dir: +endif + +clean:: coverdata-clean + +ifneq ($(COVER_REPORT_DIR),) +distclean:: cover-report-clean +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Cover targets:" \ + " cover-report Generate a HTML coverage report from previously collected" \ + " cover data." \ + " all.coverdata Merge all coverdata files into all.coverdata." \ + "" \ + "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \ + "target tests additionally generates a HTML coverage report from the combined" \ + "coverdata files from each of these testing tools. HTML reports can be disabled" \ + "by setting COVER_REPORT_DIR to empty." + +# Plugin specific targets + +COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata)) + +.PHONY: coverdata-clean +coverdata-clean: + $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec + +# Merge all coverdata files into one. +define cover_export.erl + $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) + cover:export("$(COVER_DATA_DIR)/$@"), halt(0). +endef + +all.coverdata: $(COVERDATA) cover-data-dir + $(gen_verbose) $(call erlang,$(cover_export.erl)) + +# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to +# empty if you want the coverdata files but not the HTML report. +ifneq ($(COVER_REPORT_DIR),) + +.PHONY: cover-report-clean cover-report + +cover-report-clean: + $(gen_verbose) rm -rf $(COVER_REPORT_DIR) +ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR)) + $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR)) +endif + +ifeq ($(COVERDATA),) +cover-report: +else + +# Modules which include eunit.hrl always contain one line without coverage +# because eunit defines test/0 which is never called. We compensate for this. +EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \ + grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \ + | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq)) + +define cover_report.erl + $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) + Ms = cover:imported_modules(), + [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M) + ++ ".COVER.html", [html]) || M <- Ms], + Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms], + EunitHrlMods = [$(EUNIT_HRL_MODS)], + Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of + true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report], + TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]), + TotalN = lists:sum([N || {_, {_, N}} <- Report1]), + Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end, + TotalPerc = Perc(TotalY, TotalN), + {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]), + io:format(F, "~n" + "~n" + "Coverage report~n" + "~n", []), + io:format(F, "

Coverage

~n

Total: ~p%

~n", [TotalPerc]), + io:format(F, "~n", []), + [io:format(F, "" + "~n", + [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1], + How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))", + Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")", + io:format(F, "
ModuleCoverage
~p~p%
~n" + "

Generated using ~s and erlang.mk on ~s.

~n" + "", [How, Date]), + halt(). +endef + +cover-report: + $(verbose) mkdir -p $(COVER_REPORT_DIR) + $(gen_verbose) $(call erlang,$(cover_report.erl)) + +endif +endif # ifneq ($(COVER_REPORT_DIR),) + +# Copyright (c) 2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: sfx + +ifdef RELX_REL +ifdef SFX + +# Configuration. + +SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz +SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run + +# Core targets. + +rel:: sfx + +# Plugin-specific targets. + +define sfx_stub +#!/bin/sh + +TMPDIR=`mktemp -d` +ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0` +FILENAME=$$(basename $$0) +REL=$${FILENAME%.*} + +tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR + +$$TMPDIR/bin/$$REL console +RET=$$? + +rm -rf $$TMPDIR + +exit $$RET + +__ARCHIVE_BELOW__ +endef + +sfx: + $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE)) + $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE) + $(verbose) chmod +x $(SFX_OUTPUT_FILE) + +endif +endif + +# Copyright (c) 2013-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# External plugins. + +DEP_PLUGINS ?= + +$(foreach p,$(DEP_PLUGINS),\ + $(eval $(if $(findstring /,$p),\ + $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\ + $(call core_dep_plugin,$p/plugins.mk,$p)))) + +help:: help-plugins + +help-plugins:: + $(verbose) : + +# Copyright (c) 2013-2015, Loรฏc Hoguin +# Copyright (c) 2015-2016, Jean-Sรฉbastien Pรฉdron +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# Fetch dependencies recursively (without building them). + +.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \ + fetch-shell-deps + +.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST) + +fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST) +fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) +fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) +fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) +fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST) + +ifneq ($(SKIP_DEPS),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): + $(verbose) :> $@ +else +# By default, we fetch "normal" dependencies. They are also included no +# matter the type of requested dependencies. +# +# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS). + +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS) + +# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of +# dependencies with a single target. +ifneq ($(filter doc,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS) +endif +ifneq ($(filter rel,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS) +endif +ifneq ($(filter test,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS) +endif +ifneq ($(filter shell,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS) +endif + +ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log) + +$(ERLANG_MK_RECURSIVE_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP) +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST) +endif + $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST) + $(verbose) set -e; for dep in $^ ; do \ + if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \ + echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \ + if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \ + $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \ + $(MAKE) -C $$dep fetch-deps \ + IS_DEP=1 \ + ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \ + fi \ + fi \ + done +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \ + uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted + $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \ + || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ + $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted + $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST) +endif +endif # ifneq ($(SKIP_DEPS),) + +# List dependencies recursively. + +.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \ + list-shell-deps + +list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST) +list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) +list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) +list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) +list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST) + +list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps: + $(verbose) cat $^ + +# Query dependencies recursively. + +.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \ + query-shell-deps + +QUERY ?= name fetch_method repo version + +define query_target +$(1): $(2) clean-tmp-query.log +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) rm -f $(4) +endif + $(verbose) $(foreach dep,$(3),\ + echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;) + $(if $(filter-out query-deps,$(1)),,\ + $(verbose) set -e; for dep in $(3) ; do \ + if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \ + :; \ + else \ + echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \ + $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \ + fi \ + done) +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) touch $(4) + $(verbose) cat $(4) +endif +endef + +clean-tmp-query.log: +ifeq ($(IS_DEP),) + $(verbose) rm -f $(ERLANG_MK_TMP)/query.log +endif + +$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE))) +$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE))) +$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE))) +$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE))) +$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE))) diff --git a/deps/cowboy/hex_metadata.config b/deps/cowboy/hex_metadata.config new file mode 100644 index 0000000..a612e3c --- /dev/null +++ b/deps/cowboy/hex_metadata.config @@ -0,0 +1,36 @@ +{<<"app">>,<<"cowboy">>}. +{<<"build_tools">>,[<<"make">>,<<"rebar3">>]}. +{<<"description">>,<<"Small, fast, modern HTTP server.">>}. +{<<"files">>, + [<<"ebin/cowboy.app">>,<<"erlang.mk">>,<<"LICENSE">>,<<"Makefile">>, + <<"plugins.mk">>,<<"README.asciidoc">>,<<"rebar.config">>, + <<"src/cowboy.erl">>,<<"src/cowboy_app.erl">>,<<"src/cowboy_bstr.erl">>, + <<"src/cowboy_children.erl">>,<<"src/cowboy_clear.erl">>, + <<"src/cowboy_clock.erl">>,<<"src/cowboy_compress_h.erl">>, + <<"src/cowboy_constraints.erl">>,<<"src/cowboy_handler.erl">>, + <<"src/cowboy_http.erl">>,<<"src/cowboy_http2.erl">>, + <<"src/cowboy_loop.erl">>,<<"src/cowboy_metrics_h.erl">>, + <<"src/cowboy_middleware.erl">>,<<"src/cowboy_req.erl">>, + <<"src/cowboy_rest.erl">>,<<"src/cowboy_router.erl">>, + <<"src/cowboy_static.erl">>,<<"src/cowboy_stream.erl">>, + <<"src/cowboy_stream_h.erl">>,<<"src/cowboy_sub_protocol.erl">>, + <<"src/cowboy_sup.erl">>,<<"src/cowboy_tls.erl">>, + <<"src/cowboy_tracer_h.erl">>,<<"src/cowboy_websocket.erl">>]}. +{<<"licenses">>,[<<"ISC">>]}. +{<<"links">>, + [{<<"Function reference">>, + <<"https://ninenines.eu/docs/en/cowboy/2.9/manual/">>}, + {<<"GitHub">>,<<"https://github.com/ninenines/cowboy">>}, + {<<"Sponsor">>,<<"https://github.com/sponsors/essen">>}, + {<<"User guide">>,<<"https://ninenines.eu/docs/en/cowboy/2.9/guide/">>}]}. +{<<"name">>,<<"cowboy">>}. +{<<"requirements">>, + [{<<"cowlib">>, + [{<<"app">>,<<"cowlib">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"2.11.0">>}]}, + {<<"ranch">>, + [{<<"app">>,<<"ranch">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"1.8.0">>}]}]}. +{<<"version">>,<<"2.9.0">>}. diff --git a/deps/cowboy/plugins.mk b/deps/cowboy/plugins.mk new file mode 100644 index 0000000..3fb2f7e --- /dev/null +++ b/deps/cowboy/plugins.mk @@ -0,0 +1,75 @@ +# See LICENSE for licensing information. + +# Plain HTTP handlers. +define tpl_cowboy.http +-module($(n)). +-behavior(cowboy_handler). + +-export([init/2]). + +init(Req, State) -> + {ok, Req, State}. +endef + +# Loop handlers. +define tpl_cowboy.loop +-module($(n)). +-behavior(cowboy_loop). + +-export([init/2]). +-export([info/3]). + +init(Req, State) -> + {cowboy_loop, Req, State, hibernate}. + +info(_Info, Req, State) -> + {ok, Req, State, hibernate}. +endef + +# REST handlers. +define tpl_cowboy.rest +-module($(n)). +-behavior(cowboy_rest). + +-export([init/2]). +-export([content_types_provided/2]). +-export([to_html/2]). + +init(Req, State) -> + {cowboy_rest, Req, State}. + +content_types_provided(Req, State) -> + {[ + {{<<"text">>, <<"html">>, '*'}, to_html} + ], Req, State}. + +to_html(Req, State) -> + {<<"This is REST!">>, Req, State}. +endef + +# Websocket handlers. +define tpl_cowboy.ws +-module($(n)). +-behavior(cowboy_websocket). + +-export([init/2]). +-export([websocket_init/1]). +-export([websocket_handle/2]). +-export([websocket_info/2]). + +init(Req, State) -> + {cowboy_websocket, Req, State}. + +websocket_init(State) -> + {[], State}. + +websocket_handle({text, Data}, State) -> + {[{text, Data}], State}; +websocket_handle({binary, Data}, State) -> + {[{binary, Data}], State}; +websocket_handle(_Frame, State) -> + {[], State}. + +websocket_info(_Info, State) -> + {[], State}. +endef diff --git a/deps/cowboy/rebar.config b/deps/cowboy/rebar.config new file mode 100644 index 0000000..1532343 --- /dev/null +++ b/deps/cowboy/rebar.config @@ -0,0 +1,4 @@ +{deps, [ +{cowlib,".*",{git,"https://github.com/ninenines/cowlib","2.11.0"}},{ranch,".*",{git,"https://github.com/ninenines/ranch","1.8.0"}} +]}. +{erl_opts, [debug_info,warn_export_vars,warn_shadow_vars,warn_obsolete_guard,warn_missing_spec,warn_untyped_record]}. diff --git a/deps/cowboy/src/cowboy.erl b/deps/cowboy/src/cowboy.erl new file mode 100644 index 0000000..c4be25b --- /dev/null +++ b/deps/cowboy/src/cowboy.erl @@ -0,0 +1,105 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy). + +-export([start_clear/3]). +-export([start_tls/3]). +-export([stop_listener/1]). +-export([set_env/3]). + +%% Internal. +-export([log/2]). +-export([log/4]). + +-type opts() :: cowboy_http:opts() | cowboy_http2:opts(). +-export_type([opts/0]). + +-type fields() :: [atom() + | {atom(), cowboy_constraints:constraint() | [cowboy_constraints:constraint()]} + | {atom(), cowboy_constraints:constraint() | [cowboy_constraints:constraint()], any()}]. +-export_type([fields/0]). + +-type http_headers() :: #{binary() => iodata()}. +-export_type([http_headers/0]). + +-type http_status() :: non_neg_integer() | binary(). +-export_type([http_status/0]). + +-type http_version() :: 'HTTP/2' | 'HTTP/1.1' | 'HTTP/1.0'. +-export_type([http_version/0]). + +-spec start_clear(ranch:ref(), ranch:opts(), opts()) + -> {ok, pid()} | {error, any()}. +start_clear(Ref, TransOpts0, ProtoOpts0) -> + TransOpts1 = ranch:normalize_opts(TransOpts0), + {TransOpts, ConnectionType} = ensure_connection_type(TransOpts1), + ProtoOpts = ProtoOpts0#{connection_type => ConnectionType}, + ranch:start_listener(Ref, ranch_tcp, TransOpts, cowboy_clear, ProtoOpts). + +-spec start_tls(ranch:ref(), ranch:opts(), opts()) + -> {ok, pid()} | {error, any()}. +start_tls(Ref, TransOpts0, ProtoOpts0) -> + TransOpts1 = ranch:normalize_opts(TransOpts0), + SocketOpts = maps:get(socket_opts, TransOpts1, []), + TransOpts2 = TransOpts1#{socket_opts => [ + {next_protocols_advertised, [<<"h2">>, <<"http/1.1">>]}, + {alpn_preferred_protocols, [<<"h2">>, <<"http/1.1">>]} + |SocketOpts]}, + {TransOpts, ConnectionType} = ensure_connection_type(TransOpts2), + ProtoOpts = ProtoOpts0#{connection_type => ConnectionType}, + ranch:start_listener(Ref, ranch_ssl, TransOpts, cowboy_tls, ProtoOpts). + +ensure_connection_type(TransOpts=#{connection_type := ConnectionType}) -> + {TransOpts, ConnectionType}; +ensure_connection_type(TransOpts) -> + {TransOpts#{connection_type => supervisor}, supervisor}. + +-spec stop_listener(ranch:ref()) -> ok | {error, not_found}. +stop_listener(Ref) -> + ranch:stop_listener(Ref). + +-spec set_env(ranch:ref(), atom(), any()) -> ok. +set_env(Ref, Name, Value) -> + Opts = ranch:get_protocol_options(Ref), + Env = maps:get(env, Opts, #{}), + Opts2 = maps:put(env, maps:put(Name, Value, Env), Opts), + ok = ranch:set_protocol_options(Ref, Opts2). + +%% Internal. + +-spec log({log, logger:level(), io:format(), list()}, opts()) -> ok. +log({log, Level, Format, Args}, Opts) -> + log(Level, Format, Args, Opts). + +-spec log(logger:level(), io:format(), list(), opts()) -> ok. +log(Level, Format, Args, #{logger := Logger}) + when Logger =/= error_logger -> + _ = Logger:Level(Format, Args), + ok; +%% We use error_logger by default. Because error_logger does +%% not have all the levels we accept we have to do some +%% mapping to error_logger functions. +log(Level, Format, Args, _) -> + Function = case Level of + emergency -> error_msg; + alert -> error_msg; + critical -> error_msg; + error -> error_msg; + warning -> warning_msg; + notice -> warning_msg; + info -> info_msg; + debug -> info_msg + end, + error_logger:Function(Format, Args). diff --git a/deps/cowboy/src/cowboy_app.erl b/deps/cowboy/src/cowboy_app.erl new file mode 100644 index 0000000..74cba41 --- /dev/null +++ b/deps/cowboy/src/cowboy_app.erl @@ -0,0 +1,27 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_app). +-behaviour(application). + +-export([start/2]). +-export([stop/1]). + +-spec start(_, _) -> {ok, pid()}. +start(_, _) -> + cowboy_sup:start_link(). + +-spec stop(_) -> ok. +stop(_) -> + ok. diff --git a/deps/cowboy/src/cowboy_bstr.erl b/deps/cowboy/src/cowboy_bstr.erl new file mode 100644 index 0000000..d8041e4 --- /dev/null +++ b/deps/cowboy/src/cowboy_bstr.erl @@ -0,0 +1,123 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_bstr). + +%% Binary strings. +-export([capitalize_token/1]). +-export([to_lower/1]). +-export([to_upper/1]). + +%% Characters. +-export([char_to_lower/1]). +-export([char_to_upper/1]). + +%% The first letter and all letters after a dash are capitalized. +%% This is the form seen for header names in the HTTP/1.1 RFC and +%% others. Note that using this form isn't required, as header names +%% are case insensitive, and it is only provided for use with eventual +%% badly implemented clients. +-spec capitalize_token(B) -> B when B::binary(). +capitalize_token(B) -> + capitalize_token(B, true, <<>>). +capitalize_token(<<>>, _, Acc) -> + Acc; +capitalize_token(<< $-, Rest/bits >>, _, Acc) -> + capitalize_token(Rest, true, << Acc/binary, $- >>); +capitalize_token(<< C, Rest/bits >>, true, Acc) -> + capitalize_token(Rest, false, << Acc/binary, (char_to_upper(C)) >>); +capitalize_token(<< C, Rest/bits >>, false, Acc) -> + capitalize_token(Rest, false, << Acc/binary, (char_to_lower(C)) >>). + +-spec to_lower(B) -> B when B::binary(). +to_lower(B) -> + << << (char_to_lower(C)) >> || << C >> <= B >>. + +-spec to_upper(B) -> B when B::binary(). +to_upper(B) -> + << << (char_to_upper(C)) >> || << C >> <= B >>. + +-spec char_to_lower(char()) -> char(). +char_to_lower($A) -> $a; +char_to_lower($B) -> $b; +char_to_lower($C) -> $c; +char_to_lower($D) -> $d; +char_to_lower($E) -> $e; +char_to_lower($F) -> $f; +char_to_lower($G) -> $g; +char_to_lower($H) -> $h; +char_to_lower($I) -> $i; +char_to_lower($J) -> $j; +char_to_lower($K) -> $k; +char_to_lower($L) -> $l; +char_to_lower($M) -> $m; +char_to_lower($N) -> $n; +char_to_lower($O) -> $o; +char_to_lower($P) -> $p; +char_to_lower($Q) -> $q; +char_to_lower($R) -> $r; +char_to_lower($S) -> $s; +char_to_lower($T) -> $t; +char_to_lower($U) -> $u; +char_to_lower($V) -> $v; +char_to_lower($W) -> $w; +char_to_lower($X) -> $x; +char_to_lower($Y) -> $y; +char_to_lower($Z) -> $z; +char_to_lower(Ch) -> Ch. + +-spec char_to_upper(char()) -> char(). +char_to_upper($a) -> $A; +char_to_upper($b) -> $B; +char_to_upper($c) -> $C; +char_to_upper($d) -> $D; +char_to_upper($e) -> $E; +char_to_upper($f) -> $F; +char_to_upper($g) -> $G; +char_to_upper($h) -> $H; +char_to_upper($i) -> $I; +char_to_upper($j) -> $J; +char_to_upper($k) -> $K; +char_to_upper($l) -> $L; +char_to_upper($m) -> $M; +char_to_upper($n) -> $N; +char_to_upper($o) -> $O; +char_to_upper($p) -> $P; +char_to_upper($q) -> $Q; +char_to_upper($r) -> $R; +char_to_upper($s) -> $S; +char_to_upper($t) -> $T; +char_to_upper($u) -> $U; +char_to_upper($v) -> $V; +char_to_upper($w) -> $W; +char_to_upper($x) -> $X; +char_to_upper($y) -> $Y; +char_to_upper($z) -> $Z; +char_to_upper(Ch) -> Ch. + +%% Tests. + +-ifdef(TEST). +capitalize_token_test_() -> + Tests = [ + {<<"heLLo-woRld">>, <<"Hello-World">>}, + {<<"Sec-Websocket-Version">>, <<"Sec-Websocket-Version">>}, + {<<"Sec-WebSocket-Version">>, <<"Sec-Websocket-Version">>}, + {<<"sec-websocket-version">>, <<"Sec-Websocket-Version">>}, + {<<"SEC-WEBSOCKET-VERSION">>, <<"Sec-Websocket-Version">>}, + {<<"Sec-WebSocket--Version">>, <<"Sec-Websocket--Version">>}, + {<<"Sec-WebSocket---Version">>, <<"Sec-Websocket---Version">>} + ], + [{H, fun() -> R = capitalize_token(H) end} || {H, R} <- Tests]. +-endif. diff --git a/deps/cowboy/src/cowboy_children.erl b/deps/cowboy/src/cowboy_children.erl new file mode 100644 index 0000000..05d39fb --- /dev/null +++ b/deps/cowboy/src/cowboy_children.erl @@ -0,0 +1,192 @@ +%% Copyright (c) 2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_children). + +-export([init/0]). +-export([up/4]). +-export([down/2]). +-export([shutdown/2]). +-export([shutdown_timeout/3]). +-export([terminate/1]). +-export([handle_supervisor_call/4]). + +-record(child, { + pid :: pid(), + streamid :: cowboy_stream:streamid() | undefined, + shutdown :: timeout(), + timer = undefined :: undefined | reference() +}). + +-type children() :: [#child{}]. +-export_type([children/0]). + +-spec init() -> []. +init() -> + []. + +-spec up(Children, pid(), cowboy_stream:streamid(), timeout()) + -> Children when Children::children(). +up(Children, Pid, StreamID, Shutdown) -> + [#child{ + pid=Pid, + streamid=StreamID, + shutdown=Shutdown + }|Children]. + +-spec down(Children, pid()) + -> {ok, cowboy_stream:streamid() | undefined, Children} | error + when Children::children(). +down(Children0, Pid) -> + case lists:keytake(Pid, #child.pid, Children0) of + {value, #child{streamid=StreamID, timer=Ref}, Children} -> + _ = case Ref of + undefined -> ok; + _ -> erlang:cancel_timer(Ref, [{async, true}, {info, false}]) + end, + {ok, StreamID, Children}; + false -> + error + end. + +%% We ask the processes to shutdown first. This gives +%% a chance to processes that are trapping exits to +%% shut down gracefully. Others will exit immediately. +%% +%% @todo We currently fire one timer per process being +%% shut down. This is probably not the most efficient. +%% A more efficient solution could be to maintain a +%% single timer and decrease the shutdown time of all +%% processes when it fires. This is however much more +%% complex, and there aren't that many processes that +%% will need to be shutdown through this function, so +%% this is left for later. +-spec shutdown(Children, cowboy_stream:streamid()) + -> Children when Children::children(). +shutdown(Children0, StreamID) -> + [ + case Child of + #child{pid=Pid, streamid=StreamID, shutdown=Shutdown} -> + exit(Pid, shutdown), + Ref = erlang:start_timer(Shutdown, self(), {shutdown, Pid}), + Child#child{streamid=undefined, timer=Ref}; + _ -> + Child + end + || Child <- Children0]. + +-spec shutdown_timeout(children(), reference(), pid()) -> ok. +shutdown_timeout(Children, Ref, Pid) -> + case lists:keyfind(Pid, #child.pid, Children) of + #child{timer=Ref} -> + exit(Pid, kill), + ok; + _ -> + ok + end. + +-spec terminate(children()) -> ok. +terminate(Children) -> + %% For each child, either ask for it to shut down, + %% or cancel its shutdown timer if it already is. + %% + %% We do not need to flush stray timeout messages out because + %% we are either terminating or switching protocols, + %% and in the latter case we flush all messages. + _ = [case TRef of + undefined -> exit(Pid, shutdown); + _ -> erlang:cancel_timer(TRef, [{async, true}, {info, false}]) + end || #child{pid=Pid, timer=TRef} <- Children], + before_terminate_loop(Children). + +before_terminate_loop([]) -> + ok; +before_terminate_loop(Children) -> + %% Find the longest shutdown time. + Time = longest_shutdown_time(Children, 0), + %% We delay the creation of the timer if one of the + %% processes has an infinity shutdown value. + TRef = case Time of + infinity -> undefined; + _ -> erlang:start_timer(Time, self(), terminate) + end, + %% Loop until that time or until all children are dead. + terminate_loop(Children, TRef). + +terminate_loop([], TRef) -> + %% Don't forget to cancel the timer, if any! + case TRef of + undefined -> + ok; + _ -> + _ = erlang:cancel_timer(TRef, [{async, true}, {info, false}]), + ok + end; +terminate_loop(Children, TRef) -> + receive + {'EXIT', Pid, _} when TRef =:= undefined -> + {value, #child{shutdown=Shutdown}, Children1} + = lists:keytake(Pid, #child.pid, Children), + %% We delayed the creation of the timer. If a process with + %% infinity shutdown just ended, we might have to start that timer. + case Shutdown of + infinity -> before_terminate_loop(Children1); + _ -> terminate_loop(Children1, TRef) + end; + {'EXIT', Pid, _} -> + terminate_loop(lists:keydelete(Pid, #child.pid, Children), TRef); + {timeout, TRef, terminate} -> + %% Brutally kill any remaining children. + _ = [exit(Pid, kill) || #child{pid=Pid} <- Children], + ok + end. + +longest_shutdown_time([], Time) -> + Time; +longest_shutdown_time([#child{shutdown=ChildTime}|Tail], Time) when ChildTime > Time -> + longest_shutdown_time(Tail, ChildTime); +longest_shutdown_time([_|Tail], Time) -> + longest_shutdown_time(Tail, Time). + +-spec handle_supervisor_call(any(), {pid(), any()}, children(), module()) -> ok. +handle_supervisor_call(which_children, {From, Tag}, Children, Module) -> + From ! {Tag, which_children(Children, Module)}, + ok; +handle_supervisor_call(count_children, {From, Tag}, Children, _) -> + From ! {Tag, count_children(Children)}, + ok; +%% We disable start_child since only incoming requests +%% end up creating a new process. +handle_supervisor_call({start_child, _}, {From, Tag}, _, _) -> + From ! {Tag, {error, start_child_disabled}}, + ok; +%% All other calls refer to children. We act in a similar way +%% to a simple_one_for_one so we never find those. +handle_supervisor_call(_, {From, Tag}, _, _) -> + From ! {Tag, {error, not_found}}, + ok. + +-spec which_children(children(), module()) -> [{module(), pid(), worker, [module()]}]. +which_children(Children, Module) -> + [{Module, Pid, worker, [Module]} || #child{pid=Pid} <- Children]. + +-spec count_children(children()) -> [{atom(), non_neg_integer()}]. +count_children(Children) -> + Count = length(Children), + [ + {specs, 1}, + {active, Count}, + {supervisors, 0}, + {workers, Count} + ]. diff --git a/deps/cowboy/src/cowboy_clear.erl b/deps/cowboy/src/cowboy_clear.erl new file mode 100644 index 0000000..4f3a234 --- /dev/null +++ b/deps/cowboy/src/cowboy_clear.erl @@ -0,0 +1,60 @@ +%% Copyright (c) 2016-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_clear). +-behavior(ranch_protocol). + +-export([start_link/3]). +-export([start_link/4]). +-export([connection_process/4]). + +%% Ranch 1. +-spec start_link(ranch:ref(), inet:socket(), module(), cowboy:opts()) -> {ok, pid()}. +start_link(Ref, _Socket, Transport, Opts) -> + start_link(Ref, Transport, Opts). + +%% Ranch 2. +-spec start_link(ranch:ref(), module(), cowboy:opts()) -> {ok, pid()}. +start_link(Ref, Transport, Opts) -> + Pid = proc_lib:spawn_link(?MODULE, connection_process, + [self(), Ref, Transport, Opts]), + {ok, Pid}. + +-spec connection_process(pid(), ranch:ref(), module(), cowboy:opts()) -> ok. +connection_process(Parent, Ref, Transport, Opts) -> + ProxyInfo = case maps:get(proxy_header, Opts, false) of + true -> + {ok, ProxyInfo0} = ranch:recv_proxy_header(Ref, 1000), + ProxyInfo0; + false -> + undefined + end, + {ok, Socket} = ranch:handshake(Ref), + %% Use cowboy_http2 directly only when 'http' is missing. + %% Otherwise switch to cowboy_http2 from cowboy_http. + %% + %% @todo Extend this option to cowboy_tls and allow disabling + %% the switch to cowboy_http2 in cowboy_http. Also document it. + Protocol = case maps:get(protocols, Opts, [http2, http]) of + [http2] -> cowboy_http2; + [_|_] -> cowboy_http + end, + init(Parent, Ref, Socket, Transport, ProxyInfo, Opts, Protocol). + +init(Parent, Ref, Socket, Transport, ProxyInfo, Opts, Protocol) -> + _ = case maps:get(connection_type, Opts, supervisor) of + worker -> ok; + supervisor -> process_flag(trap_exit, true) + end, + Protocol:init(Parent, Ref, Socket, Transport, ProxyInfo, Opts). diff --git a/deps/cowboy/src/cowboy_clock.erl b/deps/cowboy/src/cowboy_clock.erl new file mode 100644 index 0000000..28f8a1b --- /dev/null +++ b/deps/cowboy/src/cowboy_clock.erl @@ -0,0 +1,221 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% While a gen_server process runs in the background to update +%% the cache of formatted dates every second, all API calls are +%% local and directly read from the ETS cache table, providing +%% fast time and date computations. +-module(cowboy_clock). +-behaviour(gen_server). + +%% API. +-export([start_link/0]). +-export([stop/0]). +-export([rfc1123/0]). +-export([rfc1123/1]). + +%% gen_server. +-export([init/1]). +-export([handle_call/3]). +-export([handle_cast/2]). +-export([handle_info/2]). +-export([terminate/2]). +-export([code_change/3]). + +-record(state, { + universaltime = undefined :: undefined | calendar:datetime(), + rfc1123 = <<>> :: binary(), + tref = undefined :: undefined | reference() +}). + +%% API. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_server:start_link({local, ?MODULE}, ?MODULE, [], []). + +-spec stop() -> stopped. +stop() -> + gen_server:call(?MODULE, stop). + +%% When the ets table doesn't exist, either because of a bug +%% or because Cowboy is being restarted, we perform in a +%% slightly degraded state and build a new timestamp for +%% every request. +-spec rfc1123() -> binary(). +rfc1123() -> + try + ets:lookup_element(?MODULE, rfc1123, 2) + catch error:badarg -> + rfc1123(erlang:universaltime()) + end. + +-spec rfc1123(calendar:datetime()) -> binary(). +rfc1123(DateTime) -> + update_rfc1123(<<>>, undefined, DateTime). + +%% gen_server. + +-spec init([]) -> {ok, #state{}}. +init([]) -> + ?MODULE = ets:new(?MODULE, [set, protected, + named_table, {read_concurrency, true}]), + T = erlang:universaltime(), + B = update_rfc1123(<<>>, undefined, T), + TRef = erlang:send_after(1000, self(), update), + ets:insert(?MODULE, {rfc1123, B}), + {ok, #state{universaltime=T, rfc1123=B, tref=TRef}}. + +-type from() :: {pid(), term()}. +-spec handle_call + (stop, from(), State) -> {stop, normal, stopped, State} + when State::#state{}. +handle_call(stop, _From, State) -> + {stop, normal, stopped, State}; +handle_call(_Request, _From, State) -> + {reply, ignored, State}. + +-spec handle_cast(_, State) -> {noreply, State} when State::#state{}. +handle_cast(_Msg, State) -> + {noreply, State}. + +-spec handle_info(any(), State) -> {noreply, State} when State::#state{}. +handle_info(update, #state{universaltime=Prev, rfc1123=B1, tref=TRef0}) -> + %% Cancel the timer in case an external process sent an update message. + _ = erlang:cancel_timer(TRef0), + T = erlang:universaltime(), + B2 = update_rfc1123(B1, Prev, T), + ets:insert(?MODULE, {rfc1123, B2}), + TRef = erlang:send_after(1000, self(), update), + {noreply, #state{universaltime=T, rfc1123=B2, tref=TRef}}; +handle_info(_Info, State) -> + {noreply, State}. + +-spec terminate(_, _) -> ok. +terminate(_Reason, _State) -> + ok. + +-spec code_change(_, State, _) -> {ok, State} when State::#state{}. +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%% Internal. + +-spec update_rfc1123(binary(), undefined | calendar:datetime(), + calendar:datetime()) -> binary(). +update_rfc1123(Bin, Now, Now) -> + Bin; +update_rfc1123(<< Keep:23/binary, _/bits >>, + {Date, {H, M, _}}, {Date, {H, M, S}}) -> + << Keep/binary, (pad_int(S))/binary, " GMT" >>; +update_rfc1123(<< Keep:20/binary, _/bits >>, + {Date, {H, _, _}}, {Date, {H, M, S}}) -> + << Keep/binary, (pad_int(M))/binary, $:, (pad_int(S))/binary, " GMT" >>; +update_rfc1123(<< Keep:17/binary, _/bits >>, {Date, _}, {Date, {H, M, S}}) -> + << Keep/binary, (pad_int(H))/binary, $:, (pad_int(M))/binary, + $:, (pad_int(S))/binary, " GMT" >>; +update_rfc1123(<< _:7/binary, Keep:10/binary, _/bits >>, + {{Y, Mo, _}, _}, {Date = {Y, Mo, D}, {H, M, S}}) -> + Wday = calendar:day_of_the_week(Date), + << (weekday(Wday))/binary, ", ", (pad_int(D))/binary, Keep/binary, + (pad_int(H))/binary, $:, (pad_int(M))/binary, + $:, (pad_int(S))/binary, " GMT" >>; +update_rfc1123(<< _:11/binary, Keep:6/binary, _/bits >>, + {{Y, _, _}, _}, {Date = {Y, Mo, D}, {H, M, S}}) -> + Wday = calendar:day_of_the_week(Date), + << (weekday(Wday))/binary, ", ", (pad_int(D))/binary, " ", + (month(Mo))/binary, Keep/binary, + (pad_int(H))/binary, $:, (pad_int(M))/binary, + $:, (pad_int(S))/binary, " GMT" >>; +update_rfc1123(_, _, {Date = {Y, Mo, D}, {H, M, S}}) -> + Wday = calendar:day_of_the_week(Date), + << (weekday(Wday))/binary, ", ", (pad_int(D))/binary, " ", + (month(Mo))/binary, " ", (integer_to_binary(Y))/binary, + " ", (pad_int(H))/binary, $:, (pad_int(M))/binary, + $:, (pad_int(S))/binary, " GMT" >>. + +%% Following suggestion by MononcQc on #erlounge. +-spec pad_int(0..59) -> binary(). +pad_int(X) when X < 10 -> + << $0, ($0 + X) >>; +pad_int(X) -> + integer_to_binary(X). + +-spec weekday(1..7) -> <<_:24>>. +weekday(1) -> <<"Mon">>; +weekday(2) -> <<"Tue">>; +weekday(3) -> <<"Wed">>; +weekday(4) -> <<"Thu">>; +weekday(5) -> <<"Fri">>; +weekday(6) -> <<"Sat">>; +weekday(7) -> <<"Sun">>. + +-spec month(1..12) -> <<_:24>>. +month( 1) -> <<"Jan">>; +month( 2) -> <<"Feb">>; +month( 3) -> <<"Mar">>; +month( 4) -> <<"Apr">>; +month( 5) -> <<"May">>; +month( 6) -> <<"Jun">>; +month( 7) -> <<"Jul">>; +month( 8) -> <<"Aug">>; +month( 9) -> <<"Sep">>; +month(10) -> <<"Oct">>; +month(11) -> <<"Nov">>; +month(12) -> <<"Dec">>. + +%% Tests. + +-ifdef(TEST). +update_rfc1123_test_() -> + Tests = [ + {<<"Sat, 14 May 2011 14:25:33 GMT">>, undefined, + {{2011, 5, 14}, {14, 25, 33}}, <<>>}, + {<<"Sat, 14 May 2011 14:25:33 GMT">>, {{2011, 5, 14}, {14, 25, 33}}, + {{2011, 5, 14}, {14, 25, 33}}, <<"Sat, 14 May 2011 14:25:33 GMT">>}, + {<<"Sat, 14 May 2011 14:25:34 GMT">>, {{2011, 5, 14}, {14, 25, 33}}, + {{2011, 5, 14}, {14, 25, 34}}, <<"Sat, 14 May 2011 14:25:33 GMT">>}, + {<<"Sat, 14 May 2011 14:26:00 GMT">>, {{2011, 5, 14}, {14, 25, 59}}, + {{2011, 5, 14}, {14, 26, 0}}, <<"Sat, 14 May 2011 14:25:59 GMT">>}, + {<<"Sat, 14 May 2011 15:00:00 GMT">>, {{2011, 5, 14}, {14, 59, 59}}, + {{2011, 5, 14}, {15, 0, 0}}, <<"Sat, 14 May 2011 14:59:59 GMT">>}, + {<<"Sun, 15 May 2011 00:00:00 GMT">>, {{2011, 5, 14}, {23, 59, 59}}, + {{2011, 5, 15}, { 0, 0, 0}}, <<"Sat, 14 May 2011 23:59:59 GMT">>}, + {<<"Wed, 01 Jun 2011 00:00:00 GMT">>, {{2011, 5, 31}, {23, 59, 59}}, + {{2011, 6, 1}, { 0, 0, 0}}, <<"Tue, 31 May 2011 23:59:59 GMT">>}, + {<<"Sun, 01 Jan 2012 00:00:00 GMT">>, {{2011, 5, 31}, {23, 59, 59}}, + {{2012, 1, 1}, { 0, 0, 0}}, <<"Sat, 31 Dec 2011 23:59:59 GMT">>} + ], + [{R, fun() -> R = update_rfc1123(B, P, N) end} || {R, P, N, B} <- Tests]. + +pad_int_test_() -> + Tests = [ + { 0, <<"00">>}, { 1, <<"01">>}, { 2, <<"02">>}, { 3, <<"03">>}, + { 4, <<"04">>}, { 5, <<"05">>}, { 6, <<"06">>}, { 7, <<"07">>}, + { 8, <<"08">>}, { 9, <<"09">>}, {10, <<"10">>}, {11, <<"11">>}, + {12, <<"12">>}, {13, <<"13">>}, {14, <<"14">>}, {15, <<"15">>}, + {16, <<"16">>}, {17, <<"17">>}, {18, <<"18">>}, {19, <<"19">>}, + {20, <<"20">>}, {21, <<"21">>}, {22, <<"22">>}, {23, <<"23">>}, + {24, <<"24">>}, {25, <<"25">>}, {26, <<"26">>}, {27, <<"27">>}, + {28, <<"28">>}, {29, <<"29">>}, {30, <<"30">>}, {31, <<"31">>}, + {32, <<"32">>}, {33, <<"33">>}, {34, <<"34">>}, {35, <<"35">>}, + {36, <<"36">>}, {37, <<"37">>}, {38, <<"38">>}, {39, <<"39">>}, + {40, <<"40">>}, {41, <<"41">>}, {42, <<"42">>}, {43, <<"43">>}, + {44, <<"44">>}, {45, <<"45">>}, {46, <<"46">>}, {47, <<"47">>}, + {48, <<"48">>}, {49, <<"49">>}, {50, <<"50">>}, {51, <<"51">>}, + {52, <<"52">>}, {53, <<"53">>}, {54, <<"54">>}, {55, <<"55">>}, + {56, <<"56">>}, {57, <<"57">>}, {58, <<"58">>}, {59, <<"59">>} + ], + [{I, fun() -> O = pad_int(I) end} || {I, O} <- Tests]. +-endif. diff --git a/deps/cowboy/src/cowboy_compress_h.erl b/deps/cowboy/src/cowboy_compress_h.erl new file mode 100644 index 0000000..374cb6a --- /dev/null +++ b/deps/cowboy/src/cowboy_compress_h.erl @@ -0,0 +1,249 @@ +%% Copyright (c) 2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_compress_h). +-behavior(cowboy_stream). + +-export([init/3]). +-export([data/4]). +-export([info/3]). +-export([terminate/3]). +-export([early_error/5]). + +-record(state, { + next :: any(), + threshold :: non_neg_integer() | undefined, + compress = undefined :: undefined | gzip, + deflate = undefined :: undefined | zlib:zstream(), + deflate_flush = sync :: none | sync +}). + +-spec init(cowboy_stream:streamid(), cowboy_req:req(), cowboy:opts()) + -> {cowboy_stream:commands(), #state{}}. +init(StreamID, Req, Opts) -> + State0 = check_req(Req), + CompressThreshold = maps:get(compress_threshold, Opts, 300), + DeflateFlush = buffering_to_zflush(maps:get(compress_buffering, Opts, false)), + {Commands0, Next} = cowboy_stream:init(StreamID, Req, Opts), + fold(Commands0, State0#state{next=Next, + threshold=CompressThreshold, + deflate_flush=DeflateFlush}). + +-spec data(cowboy_stream:streamid(), cowboy_stream:fin(), cowboy_req:resp_body(), State) + -> {cowboy_stream:commands(), State} when State::#state{}. +data(StreamID, IsFin, Data, State0=#state{next=Next0}) -> + {Commands0, Next} = cowboy_stream:data(StreamID, IsFin, Data, Next0), + fold(Commands0, State0#state{next=Next}). + +-spec info(cowboy_stream:streamid(), any(), State) + -> {cowboy_stream:commands(), State} when State::#state{}. +info(StreamID, Info, State0=#state{next=Next0}) -> + {Commands0, Next} = cowboy_stream:info(StreamID, Info, Next0), + fold(Commands0, State0#state{next=Next}). + +-spec terminate(cowboy_stream:streamid(), cowboy_stream:reason(), #state{}) -> any(). +terminate(StreamID, Reason, #state{next=Next, deflate=Z}) -> + %% Clean the zlib:stream() in case something went wrong. + %% In the normal scenario the stream is already closed. + case Z of + undefined -> ok; + _ -> zlib:close(Z) + end, + cowboy_stream:terminate(StreamID, Reason, Next). + +-spec early_error(cowboy_stream:streamid(), cowboy_stream:reason(), + cowboy_stream:partial_req(), Resp, cowboy:opts()) -> Resp + when Resp::cowboy_stream:resp_command(). +early_error(StreamID, Reason, PartialReq, Resp, Opts) -> + cowboy_stream:early_error(StreamID, Reason, PartialReq, Resp, Opts). + +%% Internal. + +%% Check if the client supports decoding of gzip responses. +%% +%% A malformed accept-encoding header is ignored (no compression). +check_req(Req) -> + try cowboy_req:parse_header(<<"accept-encoding">>, Req) of + %% Client doesn't support any compression algorithm. + undefined -> + #state{compress=undefined}; + Encodings -> + %% We only support gzip so look for it specifically. + %% @todo A recipient SHOULD consider "x-gzip" to be + %% equivalent to "gzip". (RFC7230 4.2.3) + case [E || E={<<"gzip">>, Q} <- Encodings, Q =/= 0] of + [] -> + #state{compress=undefined}; + _ -> + #state{compress=gzip} + end + catch + _:_ -> + #state{compress=undefined} + end. + +%% Do not compress responses that contain the content-encoding header. +check_resp_headers(#{<<"content-encoding">> := _}, State) -> + State#state{compress=undefined}; +check_resp_headers(_, State) -> + State. + +fold(Commands, State=#state{compress=undefined}) -> + {Commands, State}; +fold(Commands, State) -> + fold(Commands, State, []). + +fold([], State, Acc) -> + {lists:reverse(Acc), State}; +%% We do not compress full sendfile bodies. +fold([Response={response, _, _, {sendfile, _, _, _}}|Tail], State, Acc) -> + fold(Tail, State, [Response|Acc]); +%% We compress full responses directly, unless they are lower than +%% the configured threshold or we find we are not able to by looking at the headers. +fold([Response0={response, _, Headers, Body}|Tail], + State0=#state{threshold=CompressThreshold}, Acc) -> + case check_resp_headers(Headers, State0) of + State=#state{compress=undefined} -> + fold(Tail, State, [Response0|Acc]); + State1 -> + BodyLength = iolist_size(Body), + if + BodyLength =< CompressThreshold -> + fold(Tail, State1, [Response0|Acc]); + true -> + {Response, State} = gzip_response(Response0, State1), + fold(Tail, State, [Response|Acc]) + end + end; +%% Check headers and initiate compression... +fold([Response0={headers, _, Headers}|Tail], State0, Acc) -> + case check_resp_headers(Headers, State0) of + State=#state{compress=undefined} -> + fold(Tail, State, [Response0|Acc]); + State1 -> + {Response, State} = gzip_headers(Response0, State1), + fold(Tail, State, [Response|Acc]) + end; +%% then compress each data commands individually. +fold([Data0={data, _, _}|Tail], State0=#state{compress=gzip}, Acc) -> + {Data, State} = gzip_data(Data0, State0), + fold(Tail, State, [Data|Acc]); +%% When trailers are sent we need to end the compression. +%% This results in an extra data command being sent. +fold([Trailers={trailers, _}|Tail], State0=#state{compress=gzip}, Acc) -> + {{data, fin, Data}, State} = gzip_data({data, fin, <<>>}, State0), + fold(Tail, State, [Trailers, {data, nofin, Data}|Acc]); +%% All the options from this handler can be updated for the current stream. +%% The set_options command must be propagated as-is regardless. +fold([SetOptions={set_options, Opts}|Tail], State=#state{ + threshold=CompressThreshold0, deflate_flush=DeflateFlush0}, Acc) -> + CompressThreshold = maps:get(compress_threshold, Opts, CompressThreshold0), + DeflateFlush = case Opts of + #{compress_buffering := CompressBuffering} -> + buffering_to_zflush(CompressBuffering); + _ -> + DeflateFlush0 + end, + fold(Tail, State#state{threshold=CompressThreshold, deflate_flush=DeflateFlush}, + [SetOptions|Acc]); +%% Otherwise, we have an unrelated command or compression is disabled. +fold([Command|Tail], State, Acc) -> + fold(Tail, State, [Command|Acc]). + +buffering_to_zflush(true) -> none; +buffering_to_zflush(false) -> sync. + +gzip_response({response, Status, Headers, Body}, State) -> + %% We can't call zlib:gzip/1 because it does an + %% iolist_to_binary(GzBody) at the end to return + %% a binary(). Therefore the code here is largely + %% a duplicate of the code of that function. + Z = zlib:open(), + GzBody = try + %% 31 = 16+?MAX_WBITS from zlib.erl + %% @todo It might be good to allow them to be configured? + zlib:deflateInit(Z, default, deflated, 31, 8, default), + Gz = zlib:deflate(Z, Body, finish), + zlib:deflateEnd(Z), + Gz + after + zlib:close(Z) + end, + {{response, Status, vary(Headers#{ + <<"content-length">> => integer_to_binary(iolist_size(GzBody)), + <<"content-encoding">> => <<"gzip">> + }), GzBody}, State}. + +gzip_headers({headers, Status, Headers0}, State) -> + Z = zlib:open(), + %% We use the same arguments as when compressing the body fully. + %% @todo It might be good to allow them to be configured? + zlib:deflateInit(Z, default, deflated, 31, 8, default), + Headers = maps:remove(<<"content-length">>, Headers0), + {{headers, Status, vary(Headers#{ + <<"content-encoding">> => <<"gzip">> + })}, State#state{deflate=Z}}. + +%% We must add content-encoding to vary if it's not already there. +vary(Headers=#{<<"vary">> := Vary}) -> + try cow_http_hd:parse_vary(iolist_to_binary(Vary)) of + '*' -> Headers; + List -> + case lists:member(<<"accept-encoding">>, List) of + true -> Headers; + false -> Headers#{<<"vary">> => [Vary, <<", accept-encoding">>]} + end + catch _:_ -> + %% The vary header is invalid. Probably empty. We replace it with ours. + Headers#{<<"vary">> => <<"accept-encoding">>} + end; +vary(Headers) -> + Headers#{<<"vary">> => <<"accept-encoding">>}. + +%% It is not possible to combine zlib and the sendfile +%% syscall as far as I can tell, because the zlib format +%% includes a checksum at the end of the stream. We have +%% to read the file in memory, making this not suitable for +%% large files. +gzip_data({data, nofin, Sendfile={sendfile, _, _, _}}, + State=#state{deflate=Z, deflate_flush=Flush}) -> + {ok, Data0} = read_file(Sendfile), + Data = zlib:deflate(Z, Data0, Flush), + {{data, nofin, Data}, State}; +gzip_data({data, fin, Sendfile={sendfile, _, _, _}}, State=#state{deflate=Z}) -> + {ok, Data0} = read_file(Sendfile), + Data = zlib:deflate(Z, Data0, finish), + zlib:deflateEnd(Z), + zlib:close(Z), + {{data, fin, Data}, State#state{deflate=undefined}}; +gzip_data({data, nofin, Data0}, State=#state{deflate=Z, deflate_flush=Flush}) -> + Data = zlib:deflate(Z, Data0, Flush), + {{data, nofin, Data}, State}; +gzip_data({data, fin, Data0}, State=#state{deflate=Z}) -> + Data = zlib:deflate(Z, Data0, finish), + zlib:deflateEnd(Z), + zlib:close(Z), + {{data, fin, Data}, State#state{deflate=undefined}}. + +read_file({sendfile, Offset, Bytes, Path}) -> + {ok, IoDevice} = file:open(Path, [read, raw, binary]), + try + _ = case Offset of + 0 -> ok; + _ -> file:position(IoDevice, {bof, Offset}) + end, + file:read(IoDevice, Bytes) + after + file:close(IoDevice) + end. diff --git a/deps/cowboy/src/cowboy_constraints.erl b/deps/cowboy/src/cowboy_constraints.erl new file mode 100644 index 0000000..6509c4b --- /dev/null +++ b/deps/cowboy/src/cowboy_constraints.erl @@ -0,0 +1,174 @@ +%% Copyright (c) 2014-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_constraints). + +-export([validate/2]). +-export([reverse/2]). +-export([format_error/1]). + +-type constraint() :: int | nonempty | fun(). +-export_type([constraint/0]). + +-type reason() :: {constraint(), any(), any()}. +-export_type([reason/0]). + +-spec validate(binary(), constraint() | [constraint()]) + -> {ok, any()} | {error, reason()}. +validate(Value, Constraints) when is_list(Constraints) -> + apply_list(forward, Value, Constraints); +validate(Value, Constraint) -> + apply_list(forward, Value, [Constraint]). + +-spec reverse(any(), constraint() | [constraint()]) + -> {ok, binary()} | {error, reason()}. +reverse(Value, Constraints) when is_list(Constraints) -> + apply_list(reverse, Value, Constraints); +reverse(Value, Constraint) -> + apply_list(reverse, Value, [Constraint]). + +-spec format_error(reason()) -> iodata(). +format_error({Constraint, Reason, Value}) -> + apply_constraint(format_error, {Reason, Value}, Constraint). + +apply_list(_, Value, []) -> + {ok, Value}; +apply_list(Type, Value0, [Constraint|Tail]) -> + case apply_constraint(Type, Value0, Constraint) of + {ok, Value} -> + apply_list(Type, Value, Tail); + {error, Reason} -> + {error, {Constraint, Reason, Value0}} + end. + +%% @todo {int, From, To}, etc. +apply_constraint(Type, Value, int) -> + int(Type, Value); +apply_constraint(Type, Value, nonempty) -> + nonempty(Type, Value); +apply_constraint(Type, Value, F) when is_function(F) -> + F(Type, Value). + +%% Constraint functions. + +int(forward, Value) -> + try + {ok, binary_to_integer(Value)} + catch _:_ -> + {error, not_an_integer} + end; +int(reverse, Value) -> + try + {ok, integer_to_binary(Value)} + catch _:_ -> + {error, not_an_integer} + end; +int(format_error, {not_an_integer, Value}) -> + io_lib:format("The value ~p is not an integer.", [Value]). + +nonempty(Type, <<>>) when Type =/= format_error -> + {error, empty}; +nonempty(Type, Value) when Type =/= format_error, is_binary(Value) -> + {ok, Value}; +nonempty(format_error, {empty, Value}) -> + io_lib:format("The value ~p is empty.", [Value]). + +-ifdef(TEST). + +validate_test() -> + F = fun(_, Value) -> + try + {ok, binary_to_atom(Value, latin1)} + catch _:_ -> + {error, not_a_binary} + end + end, + %% Value, Constraints, Result. + Tests = [ + {<<>>, [], <<>>}, + {<<"123">>, int, 123}, + {<<"123">>, [int], 123}, + {<<"123">>, [nonempty, int], 123}, + {<<"123">>, [int, nonempty], 123}, + {<<>>, nonempty, error}, + {<<>>, [nonempty], error}, + {<<"hello">>, F, hello}, + {<<"hello">>, [F], hello}, + {<<"123">>, [F, int], error}, + {<<"123">>, [int, F], error}, + {<<"hello">>, [nonempty, F], hello}, + {<<"hello">>, [F, nonempty], hello} + ], + [{lists:flatten(io_lib:format("~p, ~p", [V, C])), fun() -> + case R of + error -> {error, _} = validate(V, C); + _ -> {ok, R} = validate(V, C) + end + end} || {V, C, R} <- Tests]. + +reverse_test() -> + F = fun(_, Value) -> + try + {ok, atom_to_binary(Value, latin1)} + catch _:_ -> + {error, not_an_atom} + end + end, + %% Value, Constraints, Result. + Tests = [ + {<<>>, [], <<>>}, + {123, int, <<"123">>}, + {123, [int], <<"123">>}, + {123, [nonempty, int], <<"123">>}, + {123, [int, nonempty], <<"123">>}, + {<<>>, nonempty, error}, + {<<>>, [nonempty], error}, + {hello, F, <<"hello">>}, + {hello, [F], <<"hello">>}, + {123, [F, int], error}, + {123, [int, F], error}, + {hello, [nonempty, F], <<"hello">>}, + {hello, [F, nonempty], <<"hello">>} + ], + [{lists:flatten(io_lib:format("~p, ~p", [V, C])), fun() -> + case R of + error -> {error, _} = reverse(V, C); + _ -> {ok, R} = reverse(V, C) + end + end} || {V, C, R} <- Tests]. + +int_format_error_test() -> + {error, Reason} = validate(<<"string">>, int), + Bin = iolist_to_binary(format_error(Reason)), + true = is_binary(Bin), + ok. + +nonempty_format_error_test() -> + {error, Reason} = validate(<<>>, nonempty), + Bin = iolist_to_binary(format_error(Reason)), + true = is_binary(Bin), + ok. + +fun_format_error_test() -> + F = fun + (format_error, {test, <<"value">>}) -> + formatted; + (_, _) -> + {error, test} + end, + {error, Reason} = validate(<<"value">>, F), + formatted = format_error(Reason), + ok. + +-endif. diff --git a/deps/cowboy/src/cowboy_handler.erl b/deps/cowboy/src/cowboy_handler.erl new file mode 100644 index 0000000..c0f7ff7 --- /dev/null +++ b/deps/cowboy/src/cowboy_handler.erl @@ -0,0 +1,57 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% Handler middleware. +%% +%% Execute the handler given by the handler and handler_opts +%% environment values. The result of this execution is added to the +%% environment under the result value. +-module(cowboy_handler). +-behaviour(cowboy_middleware). + +-export([execute/2]). +-export([terminate/4]). + +-callback init(Req, any()) + -> {ok | module(), Req, any()} + | {module(), Req, any(), any()} + when Req::cowboy_req:req(). + +-callback terminate(any(), map(), any()) -> ok. +-optional_callbacks([terminate/3]). + +-spec execute(Req, Env) -> {ok, Req, Env} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +execute(Req, Env=#{handler := Handler, handler_opts := HandlerOpts}) -> + try Handler:init(Req, HandlerOpts) of + {ok, Req2, State} -> + Result = terminate(normal, Req2, State, Handler), + {ok, Req2, Env#{result => Result}}; + {Mod, Req2, State} -> + Mod:upgrade(Req2, Env, Handler, State); + {Mod, Req2, State, Opts} -> + Mod:upgrade(Req2, Env, Handler, State, Opts) + catch Class:Reason:Stacktrace -> + terminate({crash, Class, Reason}, Req, HandlerOpts, Handler), + erlang:raise(Class, Reason, Stacktrace) + end. + +-spec terminate(any(), Req | undefined, any(), module()) -> ok when Req::cowboy_req:req(). +terminate(Reason, Req, State, Handler) -> + case erlang:function_exported(Handler, terminate, 3) of + true -> + Handler:terminate(Reason, Req, State); + false -> + ok + end. diff --git a/deps/cowboy/src/cowboy_http.erl b/deps/cowboy/src/cowboy_http.erl new file mode 100644 index 0000000..c9bceed --- /dev/null +++ b/deps/cowboy/src/cowboy_http.erl @@ -0,0 +1,1523 @@ +%% Copyright (c) 2016-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_http). + +-export([init/6]). + +-export([system_continue/3]). +-export([system_terminate/4]). +-export([system_code_change/4]). + +-type opts() :: #{ + active_n => pos_integer(), + chunked => boolean(), + compress_buffering => boolean(), + compress_threshold => non_neg_integer(), + connection_type => worker | supervisor, + env => cowboy_middleware:env(), + http10_keepalive => boolean(), + idle_timeout => timeout(), + inactivity_timeout => timeout(), + initial_stream_flow_size => non_neg_integer(), + linger_timeout => timeout(), + logger => module(), + max_authority_length => non_neg_integer(), + max_empty_lines => non_neg_integer(), + max_header_name_length => non_neg_integer(), + max_header_value_length => non_neg_integer(), + max_headers => non_neg_integer(), + max_keepalive => non_neg_integer(), + max_method_length => non_neg_integer(), + max_request_line_length => non_neg_integer(), + metrics_callback => cowboy_metrics_h:metrics_callback(), + metrics_req_filter => fun((cowboy_req:req()) -> map()), + metrics_resp_headers_filter => fun((cowboy:http_headers()) -> cowboy:http_headers()), + middlewares => [module()], + proxy_header => boolean(), + request_timeout => timeout(), + sendfile => boolean(), + shutdown_timeout => timeout(), + stream_handlers => [module()], + tracer_callback => cowboy_tracer_h:tracer_callback(), + tracer_flags => [atom()], + tracer_match_specs => cowboy_tracer_h:tracer_match_specs(), + %% Open ended because configured stream handlers might add options. + _ => _ +}. +-export_type([opts/0]). + +-record(ps_request_line, { + empty_lines = 0 :: non_neg_integer() +}). + +-record(ps_header, { + method = undefined :: binary(), + authority = undefined :: binary() | undefined, + path = undefined :: binary(), + qs = undefined :: binary(), + version = undefined :: cowboy:http_version(), + headers = undefined :: cowboy:http_headers() | undefined, + name = undefined :: binary() | undefined +}). + +-record(ps_body, { + length :: non_neg_integer() | undefined, + received = 0 :: non_neg_integer(), + transfer_decode_fun :: fun((binary(), cow_http_te:state()) -> cow_http_te:decode_ret()), + transfer_decode_state :: cow_http_te:state() +}). + +-record(stream, { + id = undefined :: cowboy_stream:streamid(), + %% Stream handlers and their state. + state = undefined :: {module(), any()}, + %% Request method. + method = undefined :: binary(), + %% Client HTTP version for this stream. + version = undefined :: cowboy:http_version(), + %% Unparsed te header. Used to know if we can send trailers. + te :: undefined | binary(), + %% Expected body size. + local_expected_size = undefined :: undefined | non_neg_integer(), + %% Sent body size. + local_sent_size = 0 :: non_neg_integer(), + %% Commands queued. + queue = [] :: cowboy_stream:commands() +}). + +-type stream() :: #stream{}. + +-record(state, { + parent :: pid(), + ref :: ranch:ref(), + socket :: inet:socket(), + transport :: module(), + proxy_header :: undefined | ranch_proxy_header:proxy_info(), + opts = #{} :: cowboy:opts(), + buffer = <<>> :: binary(), + + %% Some options may be overriden for the current stream. + overriden_opts = #{} :: cowboy:opts(), + + %% Remote address and port for the connection. + peer = undefined :: {inet:ip_address(), inet:port_number()}, + + %% Local address and port for the connection. + sock = undefined :: {inet:ip_address(), inet:port_number()}, + + %% Client certificate (TLS only). + cert :: undefined | binary(), + + timer = undefined :: undefined | reference(), + + %% Whether we are currently receiving data from the socket. + active = true :: boolean(), + + %% Identifier for the stream currently being read (or waiting to be received). + in_streamid = 1 :: pos_integer(), + + %% Parsing state for the current stream or stream-to-be. + in_state = #ps_request_line{} :: #ps_request_line{} | #ps_header{} | #ps_body{}, + + %% Flow requested for the current stream. + flow = infinity :: non_neg_integer() | infinity, + + %% Identifier for the stream currently being written. + %% Note that out_streamid =< in_streamid. + out_streamid = 1 :: pos_integer(), + + %% Whether we finished writing data for the current stream. + out_state = wait :: wait | chunked | streaming | done, + + %% The connection will be closed after this stream. + last_streamid = undefined :: pos_integer(), + + %% Currently active HTTP/1.1 streams. + streams = [] :: [stream()], + + %% Children processes created by streams. + children = cowboy_children:init() :: cowboy_children:children() +}). + +-include_lib("cowlib/include/cow_inline.hrl"). +-include_lib("cowlib/include/cow_parse.hrl"). + +-spec init(pid(), ranch:ref(), inet:socket(), module(), + ranch_proxy_header:proxy_info(), cowboy:opts()) -> ok. +init(Parent, Ref, Socket, Transport, ProxyHeader, Opts) -> + Peer0 = Transport:peername(Socket), + Sock0 = Transport:sockname(Socket), + Cert1 = case Transport:name() of + ssl -> + case ssl:peercert(Socket) of + {error, no_peercert} -> + {ok, undefined}; + Cert0 -> + Cert0 + end; + _ -> + {ok, undefined} + end, + case {Peer0, Sock0, Cert1} of + {{ok, Peer}, {ok, Sock}, {ok, Cert}} -> + State = #state{ + parent=Parent, ref=Ref, socket=Socket, + transport=Transport, proxy_header=ProxyHeader, opts=Opts, + peer=Peer, sock=Sock, cert=Cert, + last_streamid=maps:get(max_keepalive, Opts, 1000)}, + setopts_active(State), + loop(set_timeout(State, request_timeout)); + {{error, Reason}, _, _} -> + terminate(undefined, {socket_error, Reason, + 'A socket error occurred when retrieving the peer name.'}); + {_, {error, Reason}, _} -> + terminate(undefined, {socket_error, Reason, + 'A socket error occurred when retrieving the sock name.'}); + {_, _, {error, Reason}} -> + terminate(undefined, {socket_error, Reason, + 'A socket error occurred when retrieving the client TLS certificate.'}) + end. + +setopts_active(#state{socket=Socket, transport=Transport, opts=Opts}) -> + N = maps:get(active_n, Opts, 100), + Transport:setopts(Socket, [{active, N}]). + +active(State) -> + setopts_active(State), + State#state{active=true}. + +passive(State=#state{socket=Socket, transport=Transport}) -> + Transport:setopts(Socket, [{active, false}]), + Messages = Transport:messages(), + flush_passive(Socket, Messages), + State#state{active=false}. + +flush_passive(Socket, Messages) -> + receive + {Passive, Socket} when Passive =:= element(4, Messages); + %% Hardcoded for compatibility with Ranch 1.x. + Passive =:= tcp_passive; Passive =:= ssl_passive -> + flush_passive(Socket, Messages) + after 0 -> + ok + end. + +loop(State=#state{parent=Parent, socket=Socket, transport=Transport, opts=Opts, + buffer=Buffer, timer=TimerRef, children=Children, in_streamid=InStreamID, + last_streamid=LastStreamID}) -> + Messages = Transport:messages(), + InactivityTimeout = maps:get(inactivity_timeout, Opts, 300000), + receive + %% Discard data coming in after the last request + %% we want to process was received fully. + {OK, Socket, _} when OK =:= element(1, Messages), InStreamID > LastStreamID -> + loop(State); + %% Socket messages. + {OK, Socket, Data} when OK =:= element(1, Messages) -> + parse(<< Buffer/binary, Data/binary >>, State); + {Closed, Socket} when Closed =:= element(2, Messages) -> + terminate(State, {socket_error, closed, 'The socket has been closed.'}); + {Error, Socket, Reason} when Error =:= element(3, Messages) -> + terminate(State, {socket_error, Reason, 'An error has occurred on the socket.'}); + {Passive, Socket} when Passive =:= element(4, Messages); + %% Hardcoded for compatibility with Ranch 1.x. + Passive =:= tcp_passive; Passive =:= ssl_passive -> + setopts_active(State), + loop(State); + %% Timeouts. + {timeout, Ref, {shutdown, Pid}} -> + cowboy_children:shutdown_timeout(Children, Ref, Pid), + loop(State); + {timeout, TimerRef, Reason} -> + timeout(State, Reason); + {timeout, _, _} -> + loop(State); + %% System messages. + {'EXIT', Parent, shutdown} -> + Reason = {stop, {exit, shutdown}, 'Parent process requested shutdown.'}, + loop(initiate_closing(State, Reason)); + {'EXIT', Parent, Reason} -> + terminate(State, {stop, {exit, Reason}, 'Parent process terminated.'}); + {system, From, Request} -> + sys:handle_system_msg(Request, From, Parent, ?MODULE, [], State); + %% Messages pertaining to a stream. + {{Pid, StreamID}, Msg} when Pid =:= self() -> + loop(info(State, StreamID, Msg)); + %% Exit signal from children. + Msg = {'EXIT', Pid, _} -> + loop(down(State, Pid, Msg)); + %% Calls from supervisor module. + {'$gen_call', From, Call} -> + cowboy_children:handle_supervisor_call(Call, From, Children, ?MODULE), + loop(State); + %% Unknown messages. + Msg -> + cowboy:log(warning, "Received stray message ~p.~n", [Msg], Opts), + loop(State) + after InactivityTimeout -> + terminate(State, {internal_error, timeout, 'No message or data received before timeout.'}) + end. + +%% We do not set request_timeout if there are active streams. +set_timeout(State=#state{streams=[_|_]}, request_timeout) -> + State; +%% We do not set request_timeout if we are skipping a body. +set_timeout(State=#state{in_state=#ps_body{}}, request_timeout) -> + State; +%% We do not set idle_timeout if there are no active streams, +%% unless when we are skipping a body. +set_timeout(State=#state{streams=[], in_state=InState}, idle_timeout) + when element(1, InState) =/= ps_body -> + State; +%% Otherwise we can set the timeout. +set_timeout(State0=#state{opts=Opts, overriden_opts=Override}, Name) -> + State = cancel_timeout(State0), + Default = case Name of + request_timeout -> 5000; + idle_timeout -> 60000 + end, + Timeout = case Override of + %% The timeout may have been overriden for the current stream. + #{Name := Timeout0} -> Timeout0; + _ -> maps:get(Name, Opts, Default) + end, + TimerRef = case Timeout of + infinity -> undefined; + Timeout -> erlang:start_timer(Timeout, self(), Name) + end, + State#state{timer=TimerRef}. + +cancel_timeout(State=#state{timer=TimerRef}) -> + ok = case TimerRef of + undefined -> + ok; + _ -> + %% Do a synchronous cancel and remove the message if any + %% to avoid receiving stray messages. + _ = erlang:cancel_timer(TimerRef), + receive + {timeout, TimerRef, _} -> ok + after 0 -> + ok + end + end, + State#state{timer=undefined}. + +-spec timeout(_, _) -> no_return(). +timeout(State=#state{in_state=#ps_request_line{}}, request_timeout) -> + terminate(State, {connection_error, timeout, + 'No request-line received before timeout.'}); +timeout(State=#state{in_state=#ps_header{}}, request_timeout) -> + error_terminate(408, State, {connection_error, timeout, + 'Request headers not received before timeout.'}); +timeout(State, idle_timeout) -> + terminate(State, {connection_error, timeout, + 'Connection idle longer than configuration allows.'}). + +parse(<<>>, State) -> + loop(State#state{buffer= <<>>}); +%% Do not process requests that come in after the last request +%% and discard the buffer if any to save memory. +parse(_, State=#state{in_streamid=InStreamID, in_state=#ps_request_line{}, + last_streamid=LastStreamID}) when InStreamID > LastStreamID -> + loop(State#state{buffer= <<>>}); +parse(Buffer, State=#state{in_state=#ps_request_line{empty_lines=EmptyLines}}) -> + after_parse(parse_request(Buffer, State, EmptyLines)); +parse(Buffer, State=#state{in_state=PS=#ps_header{headers=Headers, name=undefined}}) -> + after_parse(parse_header(Buffer, + State#state{in_state=PS#ps_header{headers=undefined}}, + Headers)); +parse(Buffer, State=#state{in_state=PS=#ps_header{headers=Headers, name=Name}}) -> + after_parse(parse_hd_before_value(Buffer, + State#state{in_state=PS#ps_header{headers=undefined, name=undefined}}, + Headers, Name)); +parse(Buffer, State=#state{in_state=#ps_body{}}) -> + after_parse(parse_body(Buffer, State)). + +after_parse({request, Req=#{streamid := StreamID, method := Method, + headers := Headers, version := Version}, + State0=#state{opts=Opts, buffer=Buffer, streams=Streams0}}) -> + try cowboy_stream:init(StreamID, Req, Opts) of + {Commands, StreamState} -> + Flow = maps:get(initial_stream_flow_size, Opts, 65535), + TE = maps:get(<<"te">>, Headers, undefined), + Streams = [#stream{id=StreamID, state=StreamState, + method=Method, version=Version, te=TE}|Streams0], + State1 = case maybe_req_close(State0, Headers, Version) of + close -> State0#state{streams=Streams, last_streamid=StreamID, flow=Flow}; + keepalive -> State0#state{streams=Streams, flow=Flow} + end, + State = set_timeout(State1, idle_timeout), + parse(Buffer, commands(State, StreamID, Commands)) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(init, + [StreamID, Req, Opts], + Class, Exception, Stacktrace), Opts), + early_error(500, State0, {internal_error, {Class, Exception}, + 'Unhandled exception in cowboy_stream:init/3.'}, Req), + parse(Buffer, State0) + end; +%% Streams are sequential so the body is always about the last stream created +%% unless that stream has terminated. +after_parse({data, StreamID, IsFin, Data, State0=#state{opts=Opts, buffer=Buffer, + streams=Streams0=[Stream=#stream{id=StreamID, state=StreamState0}|_]}}) -> + try cowboy_stream:data(StreamID, IsFin, Data, StreamState0) of + {Commands, StreamState} -> + Streams = lists:keyreplace(StreamID, #stream.id, Streams0, + Stream#stream{state=StreamState}), + State1 = set_timeout(State0, case IsFin of + fin -> request_timeout; + nofin -> idle_timeout + end), + State = update_flow(IsFin, Data, State1#state{streams=Streams}), + parse(Buffer, commands(State, StreamID, Commands)) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(data, + [StreamID, IsFin, Data, StreamState0], + Class, Exception, Stacktrace), Opts), + %% @todo Should call parse after this. + stream_terminate(State0, StreamID, {internal_error, {Class, Exception}, + 'Unhandled exception in cowboy_stream:data/4.'}) + end; +%% No corresponding stream. We must skip the body of the previous request +%% in order to process the next one. +after_parse({data, _, IsFin, _, State}) -> + loop(set_timeout(State, case IsFin of + fin -> request_timeout; + nofin -> idle_timeout + end)); +after_parse({more, State}) -> + loop(set_timeout(State, idle_timeout)). + +update_flow(fin, _, State) -> + %% This function is only called after parsing, therefore we + %% are expecting to be in active mode already. + State#state{flow=infinity}; +update_flow(nofin, Data, State0=#state{flow=Flow0}) -> + Flow = Flow0 - byte_size(Data), + State = State0#state{flow=Flow}, + if + Flow0 > 0, Flow =< 0 -> + passive(State); + true -> + State + end. + +%% Request-line. + +-spec parse_request(Buffer, State, non_neg_integer()) + -> {request, cowboy_req:req(), State} + | {data, cowboy_stream:streamid(), cowboy_stream:fin(), binary(), State} + | {more, State} + when Buffer::binary(), State::#state{}. +%% Empty lines must be using \r\n. +parse_request(<< $\n, _/bits >>, State, _) -> + error_terminate(400, State, {connection_error, protocol_error, + 'Empty lines between requests must use the CRLF line terminator. (RFC7230 3.5)'}); +parse_request(<< $\s, _/bits >>, State, _) -> + error_terminate(400, State, {connection_error, protocol_error, + 'The request-line must not begin with a space. (RFC7230 3.1.1, RFC7230 3.5)'}); +%% We limit the length of the Request-line to MaxLength to avoid endlessly +%% reading from the socket and eventually crashing. +parse_request(Buffer, State=#state{opts=Opts, in_streamid=InStreamID}, EmptyLines) -> + MaxLength = maps:get(max_request_line_length, Opts, 8000), + MaxEmptyLines = maps:get(max_empty_lines, Opts, 5), + case match_eol(Buffer, 0) of + nomatch when byte_size(Buffer) > MaxLength -> + error_terminate(414, State, {connection_error, limit_reached, + 'The request-line length is larger than configuration allows. (RFC7230 3.1.1)'}); + nomatch -> + {more, State#state{buffer=Buffer, in_state=#ps_request_line{empty_lines=EmptyLines}}}; + 1 when EmptyLines =:= MaxEmptyLines -> + error_terminate(400, State, {connection_error, limit_reached, + 'More empty lines were received than configuration allows. (RFC7230 3.5)'}); + 1 -> + << _:16, Rest/bits >> = Buffer, + parse_request(Rest, State, EmptyLines + 1); + _ -> + case Buffer of + %% @todo * is only for server-wide OPTIONS request (RFC7230 5.3.4); tests + << "OPTIONS * ", Rest/bits >> -> + parse_version(Rest, State, <<"OPTIONS">>, undefined, <<"*">>, <<>>); + <<"CONNECT ", _/bits>> -> + error_terminate(501, State, {connection_error, no_error, + 'The CONNECT method is currently not implemented. (RFC7231 4.3.6)'}); + <<"TRACE ", _/bits>> -> + error_terminate(501, State, {connection_error, no_error, + 'The TRACE method is currently not implemented. (RFC7231 4.3.8)'}); + %% Accept direct HTTP/2 only at the beginning of the connection. + << "PRI * HTTP/2.0\r\n", _/bits >> when InStreamID =:= 1 -> + %% @todo Might be worth throwing to get a clean stacktrace. + http2_upgrade(State, Buffer); + _ -> + parse_method(Buffer, State, <<>>, + maps:get(max_method_length, Opts, 32)) + end + end. + +match_eol(<< $\n, _/bits >>, N) -> + N; +match_eol(<< _, Rest/bits >>, N) -> + match_eol(Rest, N + 1); +match_eol(_, _) -> + nomatch. + +parse_method(_, State, _, 0) -> + error_terminate(501, State, {connection_error, limit_reached, + 'The method name is longer than configuration allows. (RFC7230 3.1.1)'}); +parse_method(<< C, Rest/bits >>, State, SoFar, Remaining) -> + case C of + $\r -> error_terminate(400, State, {connection_error, protocol_error, + 'The method name must not be followed with a line break. (RFC7230 3.1.1)'}); + $\s -> parse_uri(Rest, State, SoFar); + _ when ?IS_TOKEN(C) -> parse_method(Rest, State, << SoFar/binary, C >>, Remaining - 1); + _ -> error_terminate(400, State, {connection_error, protocol_error, + 'The method name must contain only valid token characters. (RFC7230 3.1.1)'}) + end. + +parse_uri(<< H, T, T, P, "://", Rest/bits >>, State, Method) + when H =:= $h orelse H =:= $H, T =:= $t orelse T =:= $T; + P =:= $p orelse P =:= $P -> + parse_uri_authority(Rest, State, Method); +parse_uri(<< H, T, T, P, S, "://", Rest/bits >>, State, Method) + when H =:= $h orelse H =:= $H, T =:= $t orelse T =:= $T; + P =:= $p orelse P =:= $P; S =:= $s orelse S =:= $S -> + parse_uri_authority(Rest, State, Method); +parse_uri(<< $/, Rest/bits >>, State, Method) -> + parse_uri_path(Rest, State, Method, undefined, <<$/>>); +parse_uri(_, State, _) -> + error_terminate(400, State, {connection_error, protocol_error, + 'Invalid request-line or request-target. (RFC7230 3.1.1, RFC7230 5.3)'}). + +%% @todo We probably want to apply max_authority_length also +%% to the host header and to document this option. It might +%% also be useful for HTTP/2 requests. +parse_uri_authority(Rest, State=#state{opts=Opts}, Method) -> + parse_uri_authority(Rest, State, Method, <<>>, + maps:get(max_authority_length, Opts, 255)). + +parse_uri_authority(_, State, _, _, 0) -> + error_terminate(414, State, {connection_error, limit_reached, + 'The authority component of the absolute URI is longer than configuration allows. (RFC7230 2.7.1)'}); +parse_uri_authority(<>, State, Method, SoFar, Remaining) -> + case C of + $\r -> + error_terminate(400, State, {connection_error, protocol_error, + 'The request-target must not be followed by a line break. (RFC7230 3.1.1)'}); + $@ -> + error_terminate(400, State, {connection_error, protocol_error, + 'Absolute URIs must not include a userinfo component. (RFC7230 2.7.1)'}); + C when SoFar =:= <<>> andalso + ((C =:= $/) orelse (C =:= $\s) orelse (C =:= $?) orelse (C =:= $#)) -> + error_terminate(400, State, {connection_error, protocol_error, + 'Absolute URIs must include a non-empty host component. (RFC7230 2.7.1)'}); + $: when SoFar =:= <<>> -> + error_terminate(400, State, {connection_error, protocol_error, + 'Absolute URIs must include a non-empty host component. (RFC7230 2.7.1)'}); + $/ -> parse_uri_path(Rest, State, Method, SoFar, <<"/">>); + $\s -> parse_version(Rest, State, Method, SoFar, <<"/">>, <<>>); + $? -> parse_uri_query(Rest, State, Method, SoFar, <<"/">>, <<>>); + $# -> skip_uri_fragment(Rest, State, Method, SoFar, <<"/">>, <<>>); + C -> parse_uri_authority(Rest, State, Method, <>, Remaining - 1) + end. + +parse_uri_path(<>, State, Method, Authority, SoFar) -> + case C of + $\r -> error_terminate(400, State, {connection_error, protocol_error, + 'The request-target must not be followed by a line break. (RFC7230 3.1.1)'}); + $\s -> parse_version(Rest, State, Method, Authority, SoFar, <<>>); + $? -> parse_uri_query(Rest, State, Method, Authority, SoFar, <<>>); + $# -> skip_uri_fragment(Rest, State, Method, Authority, SoFar, <<>>); + _ -> parse_uri_path(Rest, State, Method, Authority, <>) + end. + +parse_uri_query(<>, State, M, A, P, SoFar) -> + case C of + $\r -> error_terminate(400, State, {connection_error, protocol_error, + 'The request-target must not be followed by a line break. (RFC7230 3.1.1)'}); + $\s -> parse_version(Rest, State, M, A, P, SoFar); + $# -> skip_uri_fragment(Rest, State, M, A, P, SoFar); + _ -> parse_uri_query(Rest, State, M, A, P, <>) + end. + +skip_uri_fragment(<>, State, M, A, P, Q) -> + case C of + $\r -> error_terminate(400, State, {connection_error, protocol_error, + 'The request-target must not be followed by a line break. (RFC7230 3.1.1)'}); + $\s -> parse_version(Rest, State, M, A, P, Q); + _ -> skip_uri_fragment(Rest, State, M, A, P, Q) + end. + +parse_version(<< "HTTP/1.1\r\n", Rest/bits >>, State, M, A, P, Q) -> + before_parse_headers(Rest, State, M, A, P, Q, 'HTTP/1.1'); +parse_version(<< "HTTP/1.0\r\n", Rest/bits >>, State, M, A, P, Q) -> + before_parse_headers(Rest, State, M, A, P, Q, 'HTTP/1.0'); +parse_version(<< "HTTP/1.", _, C, _/bits >>, State, _, _, _, _) when C =:= $\s; C =:= $\t -> + error_terminate(400, State, {connection_error, protocol_error, + 'Whitespace is not allowed after the HTTP version. (RFC7230 3.1.1)'}); +parse_version(<< C, _/bits >>, State, _, _, _, _) when C =:= $\s; C =:= $\t -> + error_terminate(400, State, {connection_error, protocol_error, + 'The separator between request target and version must be a single SP. (RFC7230 3.1.1)'}); +parse_version(_, State, _, _, _, _) -> + error_terminate(505, State, {connection_error, protocol_error, + 'Unsupported HTTP version. (RFC7230 2.6)'}). + +before_parse_headers(Rest, State, M, A, P, Q, V) -> + parse_header(Rest, State#state{in_state=#ps_header{ + method=M, authority=A, path=P, qs=Q, version=V}}, #{}). + +%% Headers. + +%% We need two or more bytes in the buffer to continue. +parse_header(Rest, State=#state{in_state=PS}, Headers) when byte_size(Rest) < 2 -> + {more, State#state{buffer=Rest, in_state=PS#ps_header{headers=Headers}}}; +parse_header(<< $\r, $\n, Rest/bits >>, S, Headers) -> + request(Rest, S, Headers); +parse_header(Buffer, State=#state{opts=Opts, in_state=PS}, Headers) -> + MaxHeaders = maps:get(max_headers, Opts, 100), + NumHeaders = maps:size(Headers), + if + NumHeaders >= MaxHeaders -> + error_terminate(431, State#state{in_state=PS#ps_header{headers=Headers}}, + {connection_error, limit_reached, + 'The number of headers is larger than configuration allows. (RFC7230 3.2.5, RFC6585 5)'}); + true -> + parse_header_colon(Buffer, State, Headers) + end. + +parse_header_colon(Buffer, State=#state{opts=Opts, in_state=PS}, Headers) -> + MaxLength = maps:get(max_header_name_length, Opts, 64), + case match_colon(Buffer, 0) of + nomatch when byte_size(Buffer) > MaxLength -> + error_terminate(431, State#state{in_state=PS#ps_header{headers=Headers}}, + {connection_error, limit_reached, + 'A header name is larger than configuration allows. (RFC7230 3.2.5, RFC6585 5)'}); + nomatch -> + %% We don't have a colon but we might have an invalid header line, + %% so check if we have an LF and abort with an error if we do. + case match_eol(Buffer, 0) of + nomatch -> + {more, State#state{buffer=Buffer, in_state=PS#ps_header{headers=Headers}}}; + _ -> + error_terminate(400, State#state{in_state=PS#ps_header{headers=Headers}}, + {connection_error, protocol_error, + 'A header line is missing a colon separator. (RFC7230 3.2.4)'}) + end; + _ -> + parse_hd_name(Buffer, State, Headers, <<>>) + end. + +match_colon(<< $:, _/bits >>, N) -> + N; +match_colon(<< _, Rest/bits >>, N) -> + match_colon(Rest, N + 1); +match_colon(_, _) -> + nomatch. + +parse_hd_name(<< $:, Rest/bits >>, State, H, SoFar) -> + parse_hd_before_value(Rest, State, H, SoFar); +parse_hd_name(<< C, _/bits >>, State=#state{in_state=PS}, H, <<>>) when ?IS_WS(C) -> + error_terminate(400, State#state{in_state=PS#ps_header{headers=H}}, + {connection_error, protocol_error, + 'Whitespace is not allowed before the header name. (RFC7230 3.2)'}); +parse_hd_name(<< C, _/bits >>, State=#state{in_state=PS}, H, _) when ?IS_WS(C) -> + error_terminate(400, State#state{in_state=PS#ps_header{headers=H}}, + {connection_error, protocol_error, + 'Whitespace is not allowed between the header name and the colon. (RFC7230 3.2.4)'}); +parse_hd_name(<< C, Rest/bits >>, State, H, SoFar) -> + ?LOWER(parse_hd_name, Rest, State, H, SoFar). + +parse_hd_before_value(<< $\s, Rest/bits >>, S, H, N) -> + parse_hd_before_value(Rest, S, H, N); +parse_hd_before_value(<< $\t, Rest/bits >>, S, H, N) -> + parse_hd_before_value(Rest, S, H, N); +parse_hd_before_value(Buffer, State=#state{opts=Opts, in_state=PS}, H, N) -> + MaxLength = maps:get(max_header_value_length, Opts, 4096), + case match_eol(Buffer, 0) of + nomatch when byte_size(Buffer) > MaxLength -> + error_terminate(431, State#state{in_state=PS#ps_header{headers=H}}, + {connection_error, limit_reached, + 'A header value is larger than configuration allows. (RFC7230 3.2.5, RFC6585 5)'}); + nomatch -> + {more, State#state{buffer=Buffer, in_state=PS#ps_header{headers=H, name=N}}}; + _ -> + parse_hd_value(Buffer, State, H, N, <<>>) + end. + +parse_hd_value(<< $\r, $\n, Rest/bits >>, S, Headers0, Name, SoFar) -> + Value = clean_value_ws_end(SoFar, byte_size(SoFar) - 1), + Headers = case maps:get(Name, Headers0, undefined) of + undefined -> Headers0#{Name => Value}; + %% The cookie header does not use proper HTTP header lists. + Value0 when Name =:= <<"cookie">> -> Headers0#{Name => << Value0/binary, "; ", Value/binary >>}; + Value0 -> Headers0#{Name => << Value0/binary, ", ", Value/binary >>} + end, + parse_header(Rest, S, Headers); +parse_hd_value(<< C, Rest/bits >>, S, H, N, SoFar) -> + parse_hd_value(Rest, S, H, N, << SoFar/binary, C >>). + +clean_value_ws_end(_, -1) -> + <<>>; +clean_value_ws_end(Value, N) -> + case binary:at(Value, N) of + $\s -> clean_value_ws_end(Value, N - 1); + $\t -> clean_value_ws_end(Value, N - 1); + _ -> + S = N + 1, + << Value2:S/binary, _/bits >> = Value, + Value2 + end. + +-ifdef(TEST). +clean_value_ws_end_test_() -> + Tests = [ + {<<>>, <<>>}, + {<<" ">>, <<>>}, + {<<"text/*;q=0.3, text/html;q=0.7, text/html;level=1, " + "text/html;level=2;q=0.4, */*;q=0.5 \t \t ">>, + <<"text/*;q=0.3, text/html;q=0.7, text/html;level=1, " + "text/html;level=2;q=0.4, */*;q=0.5">>} + ], + [{V, fun() -> R = clean_value_ws_end(V, byte_size(V) - 1) end} || {V, R} <- Tests]. + +horse_clean_value_ws_end() -> + horse:repeat(200000, + clean_value_ws_end( + <<"text/*;q=0.3, text/html;q=0.7, text/html;level=1, " + "text/html;level=2;q=0.4, */*;q=0.5 ">>, + byte_size(<<"text/*;q=0.3, text/html;q=0.7, text/html;level=1, " + "text/html;level=2;q=0.4, */*;q=0.5 ">>) - 1) + ). +-endif. + +request(Buffer, State=#state{transport=Transport, + in_state=PS=#ps_header{authority=Authority, version=Version}}, Headers) -> + case maps:get(<<"host">>, Headers, undefined) of + undefined when Version =:= 'HTTP/1.1' -> + %% @todo Might want to not close the connection on this and next one. + error_terminate(400, State#state{in_state=PS#ps_header{headers=Headers}}, + {stream_error, protocol_error, + 'HTTP/1.1 requests must include a host header. (RFC7230 5.4)'}); + undefined -> + request(Buffer, State, Headers, <<>>, default_port(Transport:secure())); + %% @todo When CONNECT requests come in we need to ignore the RawHost + %% and instead use the Authority as the source of host. + RawHost when Authority =:= undefined; Authority =:= RawHost -> + request_parse_host(Buffer, State, Headers, RawHost); + %% RFC7230 does not explicitly ask us to reject requests + %% that have a different authority component and host header. + %% However it DOES ask clients to set them to the same value, + %% so we enforce that. + _ -> + error_terminate(400, State#state{in_state=PS#ps_header{headers=Headers}}, + {stream_error, protocol_error, + 'The host header is different than the absolute-form authority component. (RFC7230 5.4)'}) + end. + +request_parse_host(Buffer, State=#state{transport=Transport, in_state=PS}, Headers, RawHost) -> + try cow_http_hd:parse_host(RawHost) of + {Host, undefined} -> + request(Buffer, State, Headers, Host, default_port(Transport:secure())); + {Host, Port} when Port > 0, Port =< 65535 -> + request(Buffer, State, Headers, Host, Port); + _ -> + error_terminate(400, State, {stream_error, protocol_error, + 'The port component of the absolute-form is not in the range 0..65535. (RFC7230 2.7.1)'}) + catch _:_ -> + error_terminate(400, State#state{in_state=PS#ps_header{headers=Headers}}, + {stream_error, protocol_error, + 'The host header is invalid. (RFC7230 5.4)'}) + end. + +-spec default_port(boolean()) -> 80 | 443. +default_port(true) -> 443; +default_port(_) -> 80. + +%% End of request parsing. + +request(Buffer, State0=#state{ref=Ref, transport=Transport, peer=Peer, sock=Sock, cert=Cert, + proxy_header=ProxyHeader, in_streamid=StreamID, in_state= + PS=#ps_header{method=Method, path=Path, qs=Qs, version=Version}}, + Headers0, Host, Port) -> + Scheme = case Transport:secure() of + true -> <<"https">>; + false -> <<"http">> + end, + {Headers, HasBody, BodyLength, TDecodeFun, TDecodeState} = case Headers0 of + #{<<"transfer-encoding">> := TransferEncoding0} -> + try cow_http_hd:parse_transfer_encoding(TransferEncoding0) of + [<<"chunked">>] -> + {maps:remove(<<"content-length">>, Headers0), + true, undefined, fun cow_http_te:stream_chunked/2, {0, 0}}; + _ -> + error_terminate(400, State0#state{in_state=PS#ps_header{headers=Headers0}}, + {stream_error, protocol_error, + 'Cowboy only supports transfer-encoding: chunked. (RFC7230 3.3.1)'}) + catch _:_ -> + error_terminate(400, State0#state{in_state=PS#ps_header{headers=Headers0}}, + {stream_error, protocol_error, + 'The transfer-encoding header is invalid. (RFC7230 3.3.1)'}) + end; + #{<<"content-length">> := <<"0">>} -> + {Headers0, false, 0, undefined, undefined}; + #{<<"content-length">> := BinLength} -> + Length = try + cow_http_hd:parse_content_length(BinLength) + catch _:_ -> + error_terminate(400, State0#state{in_state=PS#ps_header{headers=Headers0}}, + {stream_error, protocol_error, + 'The content-length header is invalid. (RFC7230 3.3.2)'}) + end, + {Headers0, true, Length, fun cow_http_te:stream_identity/2, {0, Length}}; + _ -> + {Headers0, false, 0, undefined, undefined} + end, + Req0 = #{ + ref => Ref, + pid => self(), + streamid => StreamID, + peer => Peer, + sock => Sock, + cert => Cert, + method => Method, + scheme => Scheme, + host => Host, + port => Port, + path => Path, + qs => Qs, + version => Version, + %% We are transparently taking care of transfer-encodings so + %% the user code has no need to know about it. + headers => maps:remove(<<"transfer-encoding">>, Headers), + has_body => HasBody, + body_length => BodyLength + }, + %% We add the PROXY header information if any. + Req = case ProxyHeader of + undefined -> Req0; + _ -> Req0#{proxy_header => ProxyHeader} + end, + case is_http2_upgrade(Headers, Version) of + false -> + State = case HasBody of + true -> + State0#state{in_state=#ps_body{ + length = BodyLength, + transfer_decode_fun = TDecodeFun, + transfer_decode_state = TDecodeState + }}; + false -> + State0#state{in_streamid=StreamID + 1, in_state=#ps_request_line{}} + end, + {request, Req, State#state{buffer=Buffer}}; + {true, HTTP2Settings} -> + %% We save the headers in case the upgrade will fail + %% and we need to pass them to cowboy_stream:early_error. + http2_upgrade(State0#state{in_state=PS#ps_header{headers=Headers}}, + Buffer, HTTP2Settings, Req) + end. + +%% HTTP/2 upgrade. + +%% @todo We must not upgrade to h2c over a TLS connection. +is_http2_upgrade(#{<<"connection">> := Conn, <<"upgrade">> := Upgrade, + <<"http2-settings">> := HTTP2Settings}, 'HTTP/1.1') -> + Conns = cow_http_hd:parse_connection(Conn), + case {lists:member(<<"upgrade">>, Conns), lists:member(<<"http2-settings">>, Conns)} of + {true, true} -> + Protocols = cow_http_hd:parse_upgrade(Upgrade), + case lists:member(<<"h2c">>, Protocols) of + true -> + {true, HTTP2Settings}; + false -> + false + end; + _ -> + false + end; +is_http2_upgrade(_, _) -> + false. + +%% Prior knowledge upgrade, without an HTTP/1.1 request. +http2_upgrade(State=#state{parent=Parent, ref=Ref, socket=Socket, transport=Transport, + proxy_header=ProxyHeader, opts=Opts, peer=Peer, sock=Sock, cert=Cert}, Buffer) -> + case Transport:secure() of + false -> + _ = cancel_timeout(State), + cowboy_http2:init(Parent, Ref, Socket, Transport, + ProxyHeader, Opts, Peer, Sock, Cert, Buffer); + true -> + error_terminate(400, State, {connection_error, protocol_error, + 'Clients that support HTTP/2 over TLS MUST use ALPN. (RFC7540 3.4)'}) + end. + +%% Upgrade via an HTTP/1.1 request. +http2_upgrade(State=#state{parent=Parent, ref=Ref, socket=Socket, transport=Transport, + proxy_header=ProxyHeader, opts=Opts, peer=Peer, sock=Sock, cert=Cert}, + Buffer, HTTP2Settings, Req) -> + %% @todo + %% However if the client sent a body, we need to read the body in full + %% and if we can't do that, return a 413 response. Some options are in order. + %% Always half-closed stream coming from this side. + try cow_http_hd:parse_http2_settings(HTTP2Settings) of + Settings -> + _ = cancel_timeout(State), + cowboy_http2:init(Parent, Ref, Socket, Transport, + ProxyHeader, Opts, Peer, Sock, Cert, Buffer, Settings, Req) + catch _:_ -> + error_terminate(400, State, {connection_error, protocol_error, + 'The HTTP2-Settings header must contain a base64 SETTINGS payload. (RFC7540 3.2, RFC7540 3.2.1)'}) + end. + +%% Request body parsing. + +parse_body(Buffer, State=#state{in_streamid=StreamID, in_state= + PS=#ps_body{received=Received, transfer_decode_fun=TDecode, + transfer_decode_state=TState0}}) -> + %% @todo Proper trailers. + try TDecode(Buffer, TState0) of + more -> + {more, State#state{buffer=Buffer}}; + {more, Data, TState} -> + {data, StreamID, nofin, Data, State#state{buffer= <<>>, + in_state=PS#ps_body{received=Received + byte_size(Data), + transfer_decode_state=TState}}}; + {more, Data, _Length, TState} when is_integer(_Length) -> + {data, StreamID, nofin, Data, State#state{buffer= <<>>, + in_state=PS#ps_body{received=Received + byte_size(Data), + transfer_decode_state=TState}}}; + {more, Data, Rest, TState} -> + {data, StreamID, nofin, Data, State#state{buffer=Rest, + in_state=PS#ps_body{received=Received + byte_size(Data), + transfer_decode_state=TState}}}; + {done, _HasTrailers, Rest} -> + {data, StreamID, fin, <<>>, + State#state{buffer=Rest, in_streamid=StreamID + 1, in_state=#ps_request_line{}}}; + {done, Data, _HasTrailers, Rest} -> + {data, StreamID, fin, Data, + State#state{buffer=Rest, in_streamid=StreamID + 1, in_state=#ps_request_line{}}} + catch _:_ -> + Reason = {connection_error, protocol_error, + 'Failure to decode the content. (RFC7230 4)'}, + terminate(stream_terminate(State, StreamID, Reason), Reason) + end. + +%% Message handling. + +down(State=#state{opts=Opts, children=Children0}, Pid, Msg) -> + case cowboy_children:down(Children0, Pid) of + %% The stream was terminated already. + {ok, undefined, Children} -> + State#state{children=Children}; + %% The stream is still running. + {ok, StreamID, Children} -> + info(State#state{children=Children}, StreamID, Msg); + %% The process was unknown. + error -> + cowboy:log(warning, "Received EXIT signal ~p for unknown process ~p.~n", + [Msg, Pid], Opts), + State + end. + +info(State=#state{opts=Opts, streams=Streams0}, StreamID, Msg) -> + case lists:keyfind(StreamID, #stream.id, Streams0) of + Stream = #stream{state=StreamState0} -> + try cowboy_stream:info(StreamID, Msg, StreamState0) of + {Commands, StreamState} -> + Streams = lists:keyreplace(StreamID, #stream.id, Streams0, + Stream#stream{state=StreamState}), + commands(State#state{streams=Streams}, StreamID, Commands) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(info, + [StreamID, Msg, StreamState0], + Class, Exception, Stacktrace), Opts), + stream_terminate(State, StreamID, {internal_error, {Class, Exception}, + 'Unhandled exception in cowboy_stream:info/3.'}) + end; + false -> + cowboy:log(warning, "Received message ~p for unknown stream ~p.~n", + [Msg, StreamID], Opts), + State + end. + +%% Commands. + +commands(State, _, []) -> + State; +%% Supervise a child process. +commands(State=#state{children=Children}, StreamID, [{spawn, Pid, Shutdown}|Tail]) -> + commands(State#state{children=cowboy_children:up(Children, Pid, StreamID, Shutdown)}, + StreamID, Tail); +%% Error handling. +commands(State, StreamID, [Error = {internal_error, _, _}|Tail]) -> + commands(stream_terminate(State, StreamID, Error), StreamID, Tail); +%% Commands for a stream currently inactive. +commands(State=#state{out_streamid=Current, streams=Streams0}, StreamID, Commands) + when Current =/= StreamID -> + + %% @todo We still want to handle some commands... + + Stream = #stream{queue=Queue} = lists:keyfind(StreamID, #stream.id, Streams0), + Streams = lists:keyreplace(StreamID, #stream.id, Streams0, + Stream#stream{queue=Queue ++ Commands}), + State#state{streams=Streams}; +%% When we have finished reading the request body, do nothing. +commands(State=#state{flow=infinity}, StreamID, [{flow, _}|Tail]) -> + commands(State, StreamID, Tail); +%% Read the request body. +commands(State0=#state{flow=Flow0}, StreamID, [{flow, Size}|Tail]) -> + %% We must read *at least* Size of data otherwise functions + %% like cowboy_req:read_body/1,2 will wait indefinitely. + Flow = if + Flow0 < 0 -> Size; + true -> Flow0 + Size + end, + %% Reenable active mode if necessary. + State = if + Flow0 =< 0, Flow > 0 -> + active(State0); + true -> + State0 + end, + commands(State#state{flow=Flow}, StreamID, Tail); +%% Error responses are sent only if a response wasn't sent already. +commands(State=#state{out_state=wait, out_streamid=StreamID}, StreamID, + [{error_response, Status, Headers0, Body}|Tail]) -> + %% We close the connection when the error response is 408, as it + %% indicates a timeout and the RFC recommends that we stop here. (RFC7231 6.5.7) + Headers = case Status of + 408 -> Headers0#{<<"connection">> => <<"close">>}; + <<"408", _/bits>> -> Headers0#{<<"connection">> => <<"close">>}; + _ -> Headers0 + end, + commands(State, StreamID, [{response, Status, Headers, Body}|Tail]); +commands(State, StreamID, [{error_response, _, _, _}|Tail]) -> + commands(State, StreamID, Tail); +%% Send an informational response. +commands(State=#state{socket=Socket, transport=Transport, out_state=wait, streams=Streams}, + StreamID, [{inform, StatusCode, Headers}|Tail]) -> + %% @todo I'm pretty sure the last stream in the list is the one we want + %% considering all others are queued. + #stream{version=Version} = lists:keyfind(StreamID, #stream.id, Streams), + _ = case Version of + 'HTTP/1.1' -> + Transport:send(Socket, cow_http:response(StatusCode, 'HTTP/1.1', + headers_to_list(Headers))); + %% Do not send informational responses to HTTP/1.0 clients. (RFC7231 6.2) + 'HTTP/1.0' -> + ok + end, + commands(State, StreamID, Tail); +%% Send a full response. +%% +%% @todo Kill the stream if it sent a response when one has already been sent. +%% @todo Keep IsFin in the state. +%% @todo Same two things above apply to DATA, possibly promise too. +commands(State0=#state{socket=Socket, transport=Transport, out_state=wait, streams=Streams}, StreamID, + [{response, StatusCode, Headers0, Body}|Tail]) -> + %% @todo I'm pretty sure the last stream in the list is the one we want + %% considering all others are queued. + #stream{version=Version} = lists:keyfind(StreamID, #stream.id, Streams), + {State1, Headers} = connection(State0, Headers0, StreamID, Version), + State = State1#state{out_state=done}, + %% @todo Ensure content-length is set. 204 must never have content-length set. + Response = cow_http:response(StatusCode, 'HTTP/1.1', headers_to_list(Headers)), + %% @todo 204 and 304 responses must not include a response body. (RFC7230 3.3.1, RFC7230 3.3.2) + case Body of + {sendfile, _, _, _} -> + Transport:send(Socket, Response), + sendfile(State, Body); + _ -> + Transport:send(Socket, [Response, Body]) + end, + commands(State, StreamID, Tail); +%% Send response headers and initiate chunked encoding or streaming. +commands(State0=#state{socket=Socket, transport=Transport, + opts=Opts, overriden_opts=Override, streams=Streams0, out_state=OutState}, + StreamID, [{headers, StatusCode, Headers0}|Tail]) -> + %% @todo Same as above (about the last stream in the list). + Stream = #stream{version=Version} = lists:keyfind(StreamID, #stream.id, Streams0), + Status = cow_http:status_to_integer(StatusCode), + ContentLength = maps:get(<<"content-length">>, Headers0, undefined), + %% Chunked transfer-encoding can be disabled on a per-request basis. + Chunked = case Override of + #{chunked := Chunked0} -> Chunked0; + _ -> maps:get(chunked, Opts, true) + end, + {State1, Headers1} = case {Status, ContentLength, Version} of + {204, _, 'HTTP/1.1'} -> + {State0#state{out_state=done}, Headers0}; + {304, _, 'HTTP/1.1'} -> + {State0#state{out_state=done}, Headers0}; + {_, undefined, 'HTTP/1.1'} when Chunked -> + {State0#state{out_state=chunked}, Headers0#{<<"transfer-encoding">> => <<"chunked">>}}; + %% Close the connection after streaming without content-length + %% to all HTTP/1.0 clients and to HTTP/1.1 clients when chunked is disabled. + {_, undefined, _} -> + {State0#state{out_state=streaming, last_streamid=StreamID}, Headers0}; + %% Stream the response body without chunked transfer-encoding. + _ -> + ExpectedSize = cow_http_hd:parse_content_length(ContentLength), + Streams = lists:keyreplace(StreamID, #stream.id, Streams0, + Stream#stream{local_expected_size=ExpectedSize}), + {State0#state{out_state=streaming, streams=Streams}, Headers0} + end, + Headers2 = case stream_te(OutState, Stream) of + trailers -> Headers1; + _ -> maps:remove(<<"trailer">>, Headers1) + end, + {State, Headers} = connection(State1, Headers2, StreamID, Version), + Transport:send(Socket, cow_http:response(StatusCode, 'HTTP/1.1', headers_to_list(Headers))), + commands(State, StreamID, Tail); +%% Send a response body chunk. +%% @todo We need to kill the stream if it tries to send data before headers. +commands(State0=#state{socket=Socket, transport=Transport, streams=Streams0, out_state=OutState}, + StreamID, [{data, IsFin, Data}|Tail]) -> + %% Do not send anything when the user asks to send an empty + %% data frame, as that would break the protocol. + Size = case Data of + {sendfile, _, B, _} -> B; + _ -> iolist_size(Data) + end, + %% Depending on the current state we may need to send nothing, + %% the last chunk, chunked data with/without the last chunk, + %% or just the data as-is. + Stream = case lists:keyfind(StreamID, #stream.id, Streams0) of + Stream0=#stream{method= <<"HEAD">>} -> + Stream0; + Stream0 when Size =:= 0, IsFin =:= fin, OutState =:= chunked -> + Transport:send(Socket, <<"0\r\n\r\n">>), + Stream0; + Stream0 when Size =:= 0 -> + Stream0; + Stream0 when is_tuple(Data), OutState =:= chunked -> + Transport:send(Socket, [integer_to_binary(Size, 16), <<"\r\n">>]), + sendfile(State0, Data), + Transport:send(Socket, + case IsFin of + fin -> <<"\r\n0\r\n\r\n">>; + nofin -> <<"\r\n">> + end), + Stream0; + Stream0 when OutState =:= chunked -> + Transport:send(Socket, [ + integer_to_binary(Size, 16), <<"\r\n">>, Data, + case IsFin of + fin -> <<"\r\n0\r\n\r\n">>; + nofin -> <<"\r\n">> + end + ]), + Stream0; + Stream0 when OutState =:= streaming -> + #stream{local_sent_size=SentSize0, local_expected_size=ExpectedSize} = Stream0, + SentSize = SentSize0 + Size, + if + %% ExpectedSize may be undefined, which is > any integer value. + SentSize > ExpectedSize -> + terminate(State0, response_body_too_large); + is_tuple(Data) -> + sendfile(State0, Data); + true -> + Transport:send(Socket, Data) + end, + Stream0#stream{local_sent_size=SentSize} + end, + State = case IsFin of + fin -> State0#state{out_state=done}; + nofin -> State0 + end, + Streams = lists:keyreplace(StreamID, #stream.id, Streams0, Stream), + commands(State#state{streams=Streams}, StreamID, Tail); +commands(State=#state{socket=Socket, transport=Transport, streams=Streams, out_state=OutState}, + StreamID, [{trailers, Trailers}|Tail]) -> + case stream_te(OutState, lists:keyfind(StreamID, #stream.id, Streams)) of + trailers -> + Transport:send(Socket, [ + <<"0\r\n">>, + cow_http:headers(maps:to_list(Trailers)), + <<"\r\n">> + ]); + no_trailers -> + Transport:send(Socket, <<"0\r\n\r\n">>); + not_chunked -> + ok + end, + commands(State#state{out_state=done}, StreamID, Tail); +%% Protocol takeover. +commands(State0=#state{ref=Ref, parent=Parent, socket=Socket, transport=Transport, + out_state=OutState, opts=Opts, buffer=Buffer, children=Children}, StreamID, + [{switch_protocol, Headers, Protocol, InitialState}|_Tail]) -> + %% @todo If there's streams opened after this one, fail instead of 101. + State1 = cancel_timeout(State0), + %% Before we send the 101 response we need to stop receiving data + %% from the socket, otherwise the data might be receive before the + %% call to flush/0 and we end up inadvertently dropping a packet. + %% + %% @todo Handle cases where the request came with a body. We need + %% to process or skip the body before the upgrade can be completed. + State = passive(State1), + %% Send a 101 response if necessary, then terminate the stream. + #state{streams=Streams} = case OutState of + wait -> info(State, StreamID, {inform, 101, Headers}); + _ -> State + end, + #stream{state=StreamState} = lists:keyfind(StreamID, #stream.id, Streams), + %% @todo We need to shutdown processes here first. + stream_call_terminate(StreamID, switch_protocol, StreamState, State), + %% Terminate children processes and flush any remaining messages from the mailbox. + cowboy_children:terminate(Children), + flush(Parent), + Protocol:takeover(Parent, Ref, Socket, Transport, Opts, Buffer, InitialState); +%% Set options dynamically. +commands(State0=#state{overriden_opts=Opts}, + StreamID, [{set_options, SetOpts}|Tail]) -> + State1 = case SetOpts of + #{idle_timeout := IdleTimeout} -> + set_timeout(State0#state{overriden_opts=Opts#{idle_timeout => IdleTimeout}}, + idle_timeout); + _ -> + State0 + end, + State = case SetOpts of + #{chunked := Chunked} -> + State1#state{overriden_opts=Opts#{chunked => Chunked}}; + _ -> + State1 + end, + commands(State, StreamID, Tail); +%% Stream shutdown. +commands(State, StreamID, [stop|Tail]) -> + %% @todo Do we want to run the commands after a stop? + %% @todo We currently wait for the stop command before we + %% continue with the next request/response. In theory, if + %% the request body was read fully and the response body + %% was sent fully we should be able to start working on + %% the next request concurrently. This can be done as a + %% future optimization. + maybe_terminate(State, StreamID, Tail); +%% Log event. +commands(State=#state{opts=Opts}, StreamID, [Log={log, _, _, _}|Tail]) -> + cowboy:log(Log, Opts), + commands(State, StreamID, Tail); +%% HTTP/1.1 does not support push; ignore. +commands(State, StreamID, [{push, _, _, _, _, _, _, _}|Tail]) -> + commands(State, StreamID, Tail). + +%% The set-cookie header is special; we can only send one cookie per header. +headers_to_list(Headers0=#{<<"set-cookie">> := SetCookies}) -> + Headers1 = maps:to_list(maps:remove(<<"set-cookie">>, Headers0)), + Headers1 ++ [{<<"set-cookie">>, Value} || Value <- SetCookies]; +headers_to_list(Headers) -> + maps:to_list(Headers). + +%% We wrap the sendfile call into a try/catch because on OTP-20 +%% and earlier a few different crashes could occur for sockets +%% that were closing or closed. For example a badarg in +%% erlang:port_get_data(#Port<...>) or a badmatch like +%% {{badmatch,{error,einval}},[{prim_file,sendfile,8,[]}... +%% +%% OTP-21 uses a NIF instead of a port so the implementation +%% and behavior has dramatically changed and it is unclear +%% whether it will be necessary in the future. +%% +%% This try/catch prevents some noisy logs to be written +%% when these errors occur. +sendfile(State=#state{socket=Socket, transport=Transport, opts=Opts}, + {sendfile, Offset, Bytes, Path}) -> + try + %% When sendfile is disabled we explicitly use the fallback. + _ = case maps:get(sendfile, Opts, true) of + true -> Transport:sendfile(Socket, Path, Offset, Bytes); + false -> ranch_transport:sendfile(Transport, Socket, Path, Offset, Bytes, []) + end, + ok + catch _:_ -> + terminate(State, {socket_error, sendfile_crash, + 'An error occurred when using the sendfile function.'}) + end. + +%% Flush messages specific to cowboy_http before handing over the +%% connection to another protocol. +flush(Parent) -> + receive + {timeout, _, _} -> + flush(Parent); + {{Pid, _}, _} when Pid =:= self() -> + flush(Parent); + {'EXIT', Pid, _} when Pid =/= Parent -> + flush(Parent) + after 0 -> + ok + end. + +%% @todo In these cases I'm not sure if we should continue processing commands. +maybe_terminate(State=#state{last_streamid=StreamID}, StreamID, _Tail) -> + terminate(stream_terminate(State, StreamID, normal), normal); %% @todo Reason ok? +maybe_terminate(State, StreamID, _Tail) -> + stream_terminate(State, StreamID, normal). + +stream_terminate(State0=#state{opts=Opts, in_streamid=InStreamID, in_state=InState, + out_streamid=OutStreamID, out_state=OutState, streams=Streams0, + children=Children0}, StreamID, Reason) -> + #stream{version=Version, local_expected_size=ExpectedSize, local_sent_size=SentSize} + = lists:keyfind(StreamID, #stream.id, Streams0), + %% Send a response or terminate chunks depending on the current output state. + State1 = #state{streams=Streams1} = case OutState of + wait when element(1, Reason) =:= internal_error -> + info(State0, StreamID, {response, 500, #{<<"content-length">> => <<"0">>}, <<>>}); + wait when element(1, Reason) =:= connection_error -> + info(State0, StreamID, {response, 400, #{<<"content-length">> => <<"0">>}, <<>>}); + wait -> + info(State0, StreamID, {response, 204, #{}, <<>>}); + chunked when Version =:= 'HTTP/1.1' -> + info(State0, StreamID, {data, fin, <<>>}); + streaming when SentSize < ExpectedSize -> + terminate(State0, response_body_too_small); + _ -> %% done or Version =:= 'HTTP/1.0' + State0 + end, + %% Stop the stream, shutdown children and reset overriden options. + {value, #stream{state=StreamState}, Streams} + = lists:keytake(StreamID, #stream.id, Streams1), + stream_call_terminate(StreamID, Reason, StreamState, State1), + Children = cowboy_children:shutdown(Children0, StreamID), + State = State1#state{overriden_opts=#{}, streams=Streams, children=Children}, + %% We want to drop the connection if the body was not read fully + %% and we don't know its length or more remains to be read than + %% configuration allows. + MaxSkipBodyLength = maps:get(max_skip_body_length, Opts, 1000000), + case InState of + #ps_body{length=undefined} + when InStreamID =:= OutStreamID -> + terminate(State, skip_body_unknown_length); + #ps_body{length=Len, received=Received} + when InStreamID =:= OutStreamID, Received + MaxSkipBodyLength < Len -> + terminate(State, skip_body_too_large); + #ps_body{} when InStreamID =:= OutStreamID -> + stream_next(State#state{flow=infinity}); + _ -> + stream_next(State) + end. + +stream_next(State0=#state{opts=Opts, active=Active, out_streamid=OutStreamID, streams=Streams}) -> + NextOutStreamID = OutStreamID + 1, + case lists:keyfind(NextOutStreamID, #stream.id, Streams) of + false -> + State0#state{out_streamid=NextOutStreamID, out_state=wait}; + #stream{queue=Commands} -> + State = case Active of + true -> State0; + false -> active(State0) + end, + %% @todo Remove queue from the stream. + %% We set the flow to the initial flow size even though + %% we might have sent some data through already due to pipelining. + Flow = maps:get(initial_stream_flow_size, Opts, 65535), + commands(State#state{flow=Flow, out_streamid=NextOutStreamID, out_state=wait}, + NextOutStreamID, Commands) + end. + +stream_call_terminate(StreamID, Reason, StreamState, #state{opts=Opts}) -> + try + cowboy_stream:terminate(StreamID, Reason, StreamState) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(terminate, + [StreamID, Reason, StreamState], + Class, Exception, Stacktrace), Opts) + end. + +maybe_req_close(#state{opts=#{http10_keepalive := false}}, _, 'HTTP/1.0') -> + close; +maybe_req_close(_, #{<<"connection">> := Conn}, 'HTTP/1.0') -> + Conns = cow_http_hd:parse_connection(Conn), + case lists:member(<<"keep-alive">>, Conns) of + true -> keepalive; + false -> close + end; +maybe_req_close(_, _, 'HTTP/1.0') -> + close; +maybe_req_close(_, #{<<"connection">> := Conn}, 'HTTP/1.1') -> + case connection_hd_is_close(Conn) of + true -> close; + false -> keepalive + end; +maybe_req_close(_, _, _) -> + keepalive. + +connection(State=#state{last_streamid=StreamID}, Headers=#{<<"connection">> := Conn}, StreamID, _) -> + case connection_hd_is_close(Conn) of + true -> {State, Headers}; + %% @todo Here we need to remove keep-alive and add close, not just add close. + false -> {State, Headers#{<<"connection">> => [<<"close, ">>, Conn]}} + end; +connection(State=#state{last_streamid=StreamID}, Headers, StreamID, _) -> + {State, Headers#{<<"connection">> => <<"close">>}}; +connection(State, Headers=#{<<"connection">> := Conn}, StreamID, _) -> + case connection_hd_is_close(Conn) of + true -> {State#state{last_streamid=StreamID}, Headers}; + %% @todo Here we need to set keep-alive only if it wasn't set before. + false -> {State, Headers} + end; +connection(State, Headers, _, 'HTTP/1.0') -> + {State, Headers#{<<"connection">> => <<"keep-alive">>}}; +connection(State, Headers, _, _) -> + {State, Headers}. + +connection_hd_is_close(Conn) -> + Conns = cow_http_hd:parse_connection(iolist_to_binary(Conn)), + lists:member(<<"close">>, Conns). + +stream_te(streaming, _) -> + not_chunked; +%% No TE header was sent. +stream_te(_, #stream{te=undefined}) -> + no_trailers; +stream_te(_, #stream{te=TE0}) -> + try cow_http_hd:parse_te(TE0) of + {TE1, _} -> TE1 + catch _:_ -> + %% If we can't parse the TE header, assume we can't send trailers. + no_trailers + end. + +%% This function is only called when an error occurs on a new stream. +-spec error_terminate(cowboy:http_status(), #state{}, _) -> no_return(). +error_terminate(StatusCode, State=#state{ref=Ref, peer=Peer, in_state=StreamState}, Reason) -> + PartialReq = case StreamState of + #ps_request_line{} -> #{ + ref => Ref, + peer => Peer + }; + #ps_header{method=Method, path=Path, qs=Qs, + version=Version, headers=ReqHeaders} -> #{ + ref => Ref, + peer => Peer, + method => Method, + path => Path, + qs => Qs, + version => Version, + headers => case ReqHeaders of + undefined -> #{}; + _ -> ReqHeaders + end + } + end, + early_error(StatusCode, State, Reason, PartialReq, #{<<"connection">> => <<"close">>}), + terminate(State, Reason). + +early_error(StatusCode, State, Reason, PartialReq) -> + early_error(StatusCode, State, Reason, PartialReq, #{}). + +early_error(StatusCode0, #state{socket=Socket, transport=Transport, + opts=Opts, in_streamid=StreamID}, Reason, PartialReq, RespHeaders0) -> + RespHeaders1 = RespHeaders0#{<<"content-length">> => <<"0">>}, + Resp = {response, StatusCode0, RespHeaders1, <<>>}, + try cowboy_stream:early_error(StreamID, Reason, PartialReq, Resp, Opts) of + {response, StatusCode, RespHeaders, RespBody} -> + Transport:send(Socket, [ + cow_http:response(StatusCode, 'HTTP/1.1', maps:to_list(RespHeaders)), + %% @todo We shouldn't send the body when the method is HEAD. + %% @todo Technically we allow the sendfile tuple. + RespBody + ]) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(early_error, + [StreamID, Reason, PartialReq, Resp, Opts], + Class, Exception, Stacktrace), Opts), + %% We still need to send an error response, so send what we initially + %% wanted to send. It's better than nothing. + Transport:send(Socket, cow_http:response(StatusCode0, + 'HTTP/1.1', maps:to_list(RespHeaders1))) + end, + ok. + +initiate_closing(State=#state{streams=[]}, Reason) -> + terminate(State, Reason); +initiate_closing(State=#state{streams=[_Stream|Streams], + out_streamid=OutStreamID}, Reason) -> + terminate_all_streams(State, Streams, Reason), + State#state{last_streamid=OutStreamID}. + +-spec terminate(_, _) -> no_return(). +terminate(undefined, Reason) -> + exit({shutdown, Reason}); +terminate(State=#state{streams=Streams, children=Children}, Reason) -> + terminate_all_streams(State, Streams, Reason), + cowboy_children:terminate(Children), + terminate_linger(State), + exit({shutdown, Reason}). + +terminate_all_streams(_, [], _) -> + ok; +terminate_all_streams(State, [#stream{id=StreamID, state=StreamState}|Tail], Reason) -> + stream_call_terminate(StreamID, Reason, StreamState, State), + terminate_all_streams(State, Tail, Reason). + +terminate_linger(State=#state{socket=Socket, transport=Transport, opts=Opts}) -> + case Transport:shutdown(Socket, write) of + ok -> + case maps:get(linger_timeout, Opts, 1000) of + 0 -> + ok; + infinity -> + terminate_linger_before_loop(State, undefined, Transport:messages()); + Timeout -> + TimerRef = erlang:start_timer(Timeout, self(), linger_timeout), + terminate_linger_before_loop(State, TimerRef, Transport:messages()) + end; + {error, _} -> + ok + end. + +terminate_linger_before_loop(State, TimerRef, Messages) -> + %% We may already be in active mode when we do this + %% but it's OK because we are shutting down anyway. + case setopts_active(State) of + ok -> + terminate_linger_loop(State, TimerRef, Messages); + {error, _} -> + ok + end. + +terminate_linger_loop(State=#state{socket=Socket}, TimerRef, Messages) -> + receive + {OK, Socket, _} when OK =:= element(1, Messages) -> + terminate_linger_loop(State, TimerRef, Messages); + {Closed, Socket} when Closed =:= element(2, Messages) -> + ok; + {Error, Socket, _} when Error =:= element(3, Messages) -> + ok; + {Passive, Socket} when Passive =:= tcp_passive; Passive =:= ssl_passive -> + terminate_linger_before_loop(State, TimerRef, Messages); + {timeout, TimerRef, linger_timeout} -> + ok; + _ -> + terminate_linger_loop(State, TimerRef, Messages) + end. + +%% System callbacks. + +-spec system_continue(_, _, #state{}) -> ok. +system_continue(_, _, State) -> + loop(State). + +-spec system_terminate(any(), _, _, #state{}) -> no_return(). +system_terminate(Reason0, _, _, State) -> + Reason = {stop, {exit, Reason0}, 'sys:terminate/2,3 was called.'}, + loop(initiate_closing(State, Reason)). + +-spec system_code_change(Misc, _, _, _) -> {ok, Misc} when Misc::{#state{}, binary()}. +system_code_change(Misc, _, _, _) -> + {ok, Misc}. diff --git a/deps/cowboy/src/cowboy_http2.erl b/deps/cowboy/src/cowboy_http2.erl new file mode 100644 index 0000000..ed2623c --- /dev/null +++ b/deps/cowboy/src/cowboy_http2.erl @@ -0,0 +1,1220 @@ +%% Copyright (c) 2015-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_http2). + +-export([init/6]). +-export([init/10]). +-export([init/12]). + +-export([system_continue/3]). +-export([system_terminate/4]). +-export([system_code_change/4]). + +-type opts() :: #{ + active_n => pos_integer(), + compress_buffering => boolean(), + compress_threshold => non_neg_integer(), + connection_type => worker | supervisor, + connection_window_margin_size => 0..16#7fffffff, + connection_window_update_threshold => 0..16#7fffffff, + enable_connect_protocol => boolean(), + env => cowboy_middleware:env(), + goaway_initial_timeout => timeout(), + goaway_complete_timeout => timeout(), + idle_timeout => timeout(), + inactivity_timeout => timeout(), + initial_connection_window_size => 65535..16#7fffffff, + initial_stream_window_size => 0..16#7fffffff, + linger_timeout => timeout(), + logger => module(), + max_concurrent_streams => non_neg_integer() | infinity, + max_connection_buffer_size => non_neg_integer(), + max_connection_window_size => 0..16#7fffffff, + max_decode_table_size => non_neg_integer(), + max_encode_table_size => non_neg_integer(), + max_frame_size_received => 16384..16777215, + max_frame_size_sent => 16384..16777215 | infinity, + max_received_frame_rate => {pos_integer(), timeout()}, + max_reset_stream_rate => {pos_integer(), timeout()}, + max_stream_buffer_size => non_neg_integer(), + max_stream_window_size => 0..16#7fffffff, + metrics_callback => cowboy_metrics_h:metrics_callback(), + metrics_req_filter => fun((cowboy_req:req()) -> map()), + metrics_resp_headers_filter => fun((cowboy:http_headers()) -> cowboy:http_headers()), + middlewares => [module()], + preface_timeout => timeout(), + proxy_header => boolean(), + sendfile => boolean(), + settings_timeout => timeout(), + shutdown_timeout => timeout(), + stream_handlers => [module()], + stream_window_data_threshold => 0..16#7fffffff, + stream_window_margin_size => 0..16#7fffffff, + stream_window_update_threshold => 0..16#7fffffff, + tracer_callback => cowboy_tracer_h:tracer_callback(), + tracer_flags => [atom()], + tracer_match_specs => cowboy_tracer_h:tracer_match_specs(), + %% Open ended because configured stream handlers might add options. + _ => _ +}. +-export_type([opts/0]). + +-record(stream, { + %% Whether the stream is currently stopping. + status = running :: running | stopping, + + %% Flow requested for this stream. + flow = 0 :: non_neg_integer(), + + %% Stream state. + state :: {module, any()} +}). + +-record(state, { + parent = undefined :: pid(), + ref :: ranch:ref(), + socket = undefined :: inet:socket(), + transport :: module(), + proxy_header :: undefined | ranch_proxy_header:proxy_info(), + opts = #{} :: opts(), + + %% Timer for idle_timeout; also used for goaway timers. + timer = undefined :: undefined | reference(), + + %% Remote address and port for the connection. + peer = undefined :: {inet:ip_address(), inet:port_number()}, + + %% Local address and port for the connection. + sock = undefined :: {inet:ip_address(), inet:port_number()}, + + %% Client certificate (TLS only). + cert :: undefined | binary(), + + %% HTTP/2 state machine. + http2_status :: sequence | settings | upgrade | connected | closing_initiated | closing, + http2_machine :: cow_http2_machine:http2_machine(), + + %% HTTP/2 frame rate flood protection. + frame_rate_num :: undefined | pos_integer(), + frame_rate_time :: undefined | integer(), + + %% HTTP/2 reset stream flood protection. + reset_rate_num :: undefined | pos_integer(), + reset_rate_time :: undefined | integer(), + + %% Flow requested for all streams. + flow = 0 :: non_neg_integer(), + + %% Currently active HTTP/2 streams. Streams may be initiated either + %% by the client or by the server through PUSH_PROMISE frames. + streams = #{} :: #{cow_http2:streamid() => #stream{}}, + + %% Streams can spawn zero or more children which are then managed + %% by this module if operating as a supervisor. + children = cowboy_children:init() :: cowboy_children:children() +}). + +-spec init(pid(), ranch:ref(), inet:socket(), module(), + ranch_proxy_header:proxy_info() | undefined, cowboy:opts()) -> ok. +init(Parent, Ref, Socket, Transport, ProxyHeader, Opts) -> + Peer0 = Transport:peername(Socket), + Sock0 = Transport:sockname(Socket), + Cert1 = case Transport:name() of + ssl -> + case ssl:peercert(Socket) of + {error, no_peercert} -> + {ok, undefined}; + Cert0 -> + Cert0 + end; + _ -> + {ok, undefined} + end, + case {Peer0, Sock0, Cert1} of + {{ok, Peer}, {ok, Sock}, {ok, Cert}} -> + init(Parent, Ref, Socket, Transport, ProxyHeader, Opts, Peer, Sock, Cert, <<>>); + {{error, Reason}, _, _} -> + terminate(undefined, {socket_error, Reason, + 'A socket error occurred when retrieving the peer name.'}); + {_, {error, Reason}, _} -> + terminate(undefined, {socket_error, Reason, + 'A socket error occurred when retrieving the sock name.'}); + {_, _, {error, Reason}} -> + terminate(undefined, {socket_error, Reason, + 'A socket error occurred when retrieving the client TLS certificate.'}) + end. + +-spec init(pid(), ranch:ref(), inet:socket(), module(), + ranch_proxy_header:proxy_info() | undefined, cowboy:opts(), + {inet:ip_address(), inet:port_number()}, {inet:ip_address(), inet:port_number()}, + binary() | undefined, binary()) -> ok. +init(Parent, Ref, Socket, Transport, ProxyHeader, Opts, Peer, Sock, Cert, Buffer) -> + {ok, Preface, HTTP2Machine} = cow_http2_machine:init(server, Opts), + State = set_idle_timeout(init_rate_limiting(#state{parent=Parent, ref=Ref, socket=Socket, + transport=Transport, proxy_header=ProxyHeader, + opts=Opts, peer=Peer, sock=Sock, cert=Cert, + http2_status=sequence, http2_machine=HTTP2Machine})), + Transport:send(Socket, Preface), + setopts_active(State), + case Buffer of + <<>> -> loop(State, Buffer); + _ -> parse(State, Buffer) + end. + +init_rate_limiting(State) -> + CurrentTime = erlang:monotonic_time(millisecond), + init_reset_rate_limiting(init_frame_rate_limiting(State, CurrentTime), CurrentTime). + +init_frame_rate_limiting(State=#state{opts=Opts}, CurrentTime) -> + {FrameRateNum, FrameRatePeriod} = maps:get(max_received_frame_rate, Opts, {10000, 10000}), + State#state{ + frame_rate_num=FrameRateNum, frame_rate_time=add_period(CurrentTime, FrameRatePeriod) + }. + +init_reset_rate_limiting(State=#state{opts=Opts}, CurrentTime) -> + {ResetRateNum, ResetRatePeriod} = maps:get(max_reset_stream_rate, Opts, {10, 10000}), + State#state{ + reset_rate_num=ResetRateNum, reset_rate_time=add_period(CurrentTime, ResetRatePeriod) + }. + +add_period(_, infinity) -> infinity; +add_period(Time, Period) -> Time + Period. + +%% @todo Add an argument for the request body. +-spec init(pid(), ranch:ref(), inet:socket(), module(), + ranch_proxy_header:proxy_info() | undefined, cowboy:opts(), + {inet:ip_address(), inet:port_number()}, {inet:ip_address(), inet:port_number()}, + binary() | undefined, binary(), map() | undefined, cowboy_req:req()) -> ok. +init(Parent, Ref, Socket, Transport, ProxyHeader, Opts, Peer, Sock, Cert, Buffer, + _Settings, Req=#{method := Method}) -> + {ok, Preface, HTTP2Machine0} = cow_http2_machine:init(server, Opts), + {ok, StreamID, HTTP2Machine} + = cow_http2_machine:init_upgrade_stream(Method, HTTP2Machine0), + State0 = #state{parent=Parent, ref=Ref, socket=Socket, + transport=Transport, proxy_header=ProxyHeader, + opts=Opts, peer=Peer, sock=Sock, cert=Cert, + http2_status=upgrade, http2_machine=HTTP2Machine}, + State1 = headers_frame(State0#state{ + http2_machine=HTTP2Machine}, StreamID, Req), + %% We assume that the upgrade will be applied. A stream handler + %% must not prevent the normal operations of the server. + State2 = info(State1, 1, {switch_protocol, #{ + <<"connection">> => <<"Upgrade">>, + <<"upgrade">> => <<"h2c">> + }, ?MODULE, undefined}), %% @todo undefined or #{}? + State = set_idle_timeout(init_rate_limiting(State2#state{http2_status=sequence})), + Transport:send(Socket, Preface), + setopts_active(State), + case Buffer of + <<>> -> loop(State, Buffer); + _ -> parse(State, Buffer) + end. + +%% Because HTTP/2 has flow control and Cowboy has other rate limiting +%% mechanisms implemented, a very large active_n value should be fine, +%% as long as the stream handlers do their work in a timely manner. +setopts_active(#state{socket=Socket, transport=Transport, opts=Opts}) -> + N = maps:get(active_n, Opts, 100), + Transport:setopts(Socket, [{active, N}]). + +loop(State=#state{parent=Parent, socket=Socket, transport=Transport, + opts=Opts, timer=TimerRef, children=Children}, Buffer) -> + Messages = Transport:messages(), + InactivityTimeout = maps:get(inactivity_timeout, Opts, 300000), + receive + %% Socket messages. + {OK, Socket, Data} when OK =:= element(1, Messages) -> + parse(set_idle_timeout(State), << Buffer/binary, Data/binary >>); + {Closed, Socket} when Closed =:= element(2, Messages) -> + Reason = case State#state.http2_status of + closing -> {stop, closed, 'The client is going away.'}; + _ -> {socket_error, closed, 'The socket has been closed.'} + end, + terminate(State, Reason); + {Error, Socket, Reason} when Error =:= element(3, Messages) -> + terminate(State, {socket_error, Reason, 'An error has occurred on the socket.'}); + {Passive, Socket} when Passive =:= element(4, Messages); + %% Hardcoded for compatibility with Ranch 1.x. + Passive =:= tcp_passive; Passive =:= ssl_passive -> + setopts_active(State), + loop(State, Buffer); + %% System messages. + {'EXIT', Parent, shutdown} -> + Reason = {stop, {exit, shutdown}, 'Parent process requested shutdown.'}, + loop(initiate_closing(State, Reason), Buffer); + {'EXIT', Parent, Reason} -> + terminate(State, {stop, {exit, Reason}, 'Parent process terminated.'}); + {system, From, Request} -> + sys:handle_system_msg(Request, From, Parent, ?MODULE, [], {State, Buffer}); + %% Timeouts. + {timeout, TimerRef, idle_timeout} -> + terminate(State, {stop, timeout, + 'Connection idle longer than configuration allows.'}); + {timeout, Ref, {shutdown, Pid}} -> + cowboy_children:shutdown_timeout(Children, Ref, Pid), + loop(State, Buffer); + {timeout, TRef, {cow_http2_machine, Name}} -> + loop(timeout(State, Name, TRef), Buffer); + {timeout, TimerRef, {goaway_initial_timeout, Reason}} -> + loop(closing(State, Reason), Buffer); + {timeout, TimerRef, {goaway_complete_timeout, Reason}} -> + terminate(State, {stop, stop_reason(Reason), + 'Graceful shutdown timed out.'}); + %% Messages pertaining to a stream. + {{Pid, StreamID}, Msg} when Pid =:= self() -> + loop(info(State, StreamID, Msg), Buffer); + %% Exit signal from children. + Msg = {'EXIT', Pid, _} -> + loop(down(State, Pid, Msg), Buffer); + %% Calls from supervisor module. + {'$gen_call', From, Call} -> + cowboy_children:handle_supervisor_call(Call, From, Children, ?MODULE), + loop(State, Buffer); + Msg -> + cowboy:log(warning, "Received stray message ~p.", [Msg], Opts), + loop(State, Buffer) + after InactivityTimeout -> + terminate(State, {internal_error, timeout, 'No message or data received before timeout.'}) + end. + +set_idle_timeout(State=#state{http2_status=Status, timer=TimerRef}) + when Status =:= closing_initiated orelse Status =:= closing, + TimerRef =/= undefined -> + State; +set_idle_timeout(State=#state{opts=Opts}) -> + set_timeout(State, maps:get(idle_timeout, Opts, 60000), idle_timeout). + +set_timeout(State=#state{timer=TimerRef0}, Timeout, Message) -> + ok = case TimerRef0 of + undefined -> ok; + _ -> erlang:cancel_timer(TimerRef0, [{async, true}, {info, false}]) + end, + TimerRef = case Timeout of + infinity -> undefined; + Timeout -> erlang:start_timer(Timeout, self(), Message) + end, + State#state{timer=TimerRef}. + +%% HTTP/2 protocol parsing. + +parse(State=#state{http2_status=sequence}, Data) -> + case cow_http2:parse_sequence(Data) of + {ok, Rest} -> + parse(State#state{http2_status=settings}, Rest); + more -> + loop(State, Data); + Error = {connection_error, _, _} -> + terminate(State, Error) + end; +parse(State=#state{http2_status=Status, http2_machine=HTTP2Machine, streams=Streams}, Data) -> + MaxFrameSize = cow_http2_machine:get_local_setting(max_frame_size, HTTP2Machine), + case cow_http2:parse(Data, MaxFrameSize) of + {ok, Frame, Rest} -> + parse(frame_rate(State, Frame), Rest); + {ignore, Rest} -> + parse(frame_rate(State, ignore), Rest); + {stream_error, StreamID, Reason, Human, Rest} -> + parse(reset_stream(State, StreamID, {stream_error, Reason, Human}), Rest); + Error = {connection_error, _, _} -> + terminate(State, Error); + %% Terminate the connection if we are closing and all streams have completed. + more when Status =:= closing, Streams =:= #{} -> + terminate(State, {stop, normal, 'The connection is going away.'}); + more -> + loop(State, Data) + end. + +%% Frame rate flood protection. + +frame_rate(State0=#state{frame_rate_num=Num0, frame_rate_time=Time}, Frame) -> + {Result, State} = case Num0 - 1 of + 0 -> + CurrentTime = erlang:monotonic_time(millisecond), + if + CurrentTime < Time -> + {error, State0}; + true -> + %% When the option has a period of infinity we cannot reach this clause. + {ok, init_frame_rate_limiting(State0, CurrentTime)} + end; + Num -> + {ok, State0#state{frame_rate_num=Num}} + end, + case {Result, Frame} of + {ok, ignore} -> ignored_frame(State); + {ok, _} -> frame(State, Frame); + {error, _} -> terminate(State, {connection_error, enhance_your_calm, + 'Frame rate larger than configuration allows. Flood? (CVE-2019-9512, CVE-2019-9515, CVE-2019-9518)'}) + end. + +%% Frames received. + +%% We do nothing when receiving a lingering DATA frame. +%% We already removed the stream flow from the connection +%% flow and are therefore already accounting for the window +%% being reduced by these frames. +frame(State=#state{http2_machine=HTTP2Machine0}, Frame) -> + case cow_http2_machine:frame(Frame, HTTP2Machine0) of + {ok, HTTP2Machine} -> + maybe_ack(State#state{http2_machine=HTTP2Machine}, Frame); + {ok, {data, StreamID, IsFin, Data}, HTTP2Machine} -> + data_frame(State#state{http2_machine=HTTP2Machine}, StreamID, IsFin, Data); + {ok, {headers, StreamID, IsFin, Headers, PseudoHeaders, BodyLen}, HTTP2Machine} -> + headers_frame(State#state{http2_machine=HTTP2Machine}, + StreamID, IsFin, Headers, PseudoHeaders, BodyLen); + {ok, {trailers, _StreamID, _Trailers}, HTTP2Machine} -> + %% @todo Propagate trailers. + State#state{http2_machine=HTTP2Machine}; + {ok, {rst_stream, StreamID, Reason}, HTTP2Machine} -> + rst_stream_frame(State#state{http2_machine=HTTP2Machine}, StreamID, Reason); + {ok, GoAway={goaway, _, _, _}, HTTP2Machine} -> + goaway(State#state{http2_machine=HTTP2Machine}, GoAway); + {send, SendData, HTTP2Machine} -> + %% We may need to send an alarm for each of the streams sending data. + lists:foldl( + fun({StreamID, _, _}, S) -> maybe_send_data_alarm(S, HTTP2Machine0, StreamID) end, + send_data(maybe_ack(State#state{http2_machine=HTTP2Machine}, Frame), SendData, []), + SendData); + {error, {stream_error, StreamID, Reason, Human}, HTTP2Machine} -> + reset_stream(State#state{http2_machine=HTTP2Machine}, + StreamID, {stream_error, Reason, Human}); + {error, Error={connection_error, _, _}, HTTP2Machine} -> + terminate(State#state{http2_machine=HTTP2Machine}, Error) + end. + +%% We use this opportunity to mark the HTTP/2 status as connected +%% if we were still waiting for a SETTINGS frame. +maybe_ack(State=#state{http2_status=settings}, Frame) -> + maybe_ack(State#state{http2_status=connected}, Frame); +maybe_ack(State=#state{socket=Socket, transport=Transport}, Frame) -> + case Frame of + {settings, _} -> Transport:send(Socket, cow_http2:settings_ack()); + {ping, Opaque} -> Transport:send(Socket, cow_http2:ping_ack(Opaque)); + _ -> ok + end, + State. + +data_frame(State0=#state{opts=Opts, flow=Flow, streams=Streams}, StreamID, IsFin, Data) -> + case Streams of + #{StreamID := Stream=#stream{status=running, flow=StreamFlow, state=StreamState0}} -> + try cowboy_stream:data(StreamID, IsFin, Data, StreamState0) of + {Commands, StreamState} -> + %% Remove the amount of data received from the flow. + %% We may receive more data than we requested. We ensure + %% that the flow value doesn't go lower than 0. + Size = byte_size(Data), + State = update_window(State0#state{flow=max(0, Flow - Size), + streams=Streams#{StreamID => Stream#stream{ + flow=max(0, StreamFlow - Size), state=StreamState}}}, + StreamID), + commands(State, StreamID, Commands) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(data, + [StreamID, IsFin, Data, StreamState0], + Class, Exception, Stacktrace), Opts), + reset_stream(State0, StreamID, {internal_error, {Class, Exception}, + 'Unhandled exception in cowboy_stream:data/4.'}) + end; + %% We ignore DATA frames for streams that are stopping. + #{} -> + State0 + end. + +headers_frame(State, StreamID, IsFin, Headers, + PseudoHeaders=#{method := <<"CONNECT">>}, _) + when map_size(PseudoHeaders) =:= 2 -> + early_error(State, StreamID, IsFin, Headers, PseudoHeaders, 501, + 'The CONNECT method is currently not implemented. (RFC7231 4.3.6)'); +headers_frame(State, StreamID, IsFin, Headers, + PseudoHeaders=#{method := <<"TRACE">>}, _) -> + early_error(State, StreamID, IsFin, Headers, PseudoHeaders, 501, + 'The TRACE method is currently not implemented. (RFC7231 4.3.8)'); +headers_frame(State, StreamID, IsFin, Headers, PseudoHeaders=#{authority := Authority}, BodyLen) -> + headers_frame_parse_host(State, StreamID, IsFin, Headers, PseudoHeaders, BodyLen, Authority); +headers_frame(State, StreamID, IsFin, Headers, PseudoHeaders, BodyLen) -> + case lists:keyfind(<<"host">>, 1, Headers) of + {_, Authority} -> + headers_frame_parse_host(State, StreamID, IsFin, Headers, PseudoHeaders, BodyLen, Authority); + _ -> + reset_stream(State, StreamID, {stream_error, protocol_error, + 'Requests translated from HTTP/1.1 must include a host header. (RFC7540 8.1.2.3, RFC7230 5.4)'}) + end. + +headers_frame_parse_host(State=#state{ref=Ref, peer=Peer, sock=Sock, cert=Cert, proxy_header=ProxyHeader}, + StreamID, IsFin, Headers, PseudoHeaders=#{method := Method, scheme := Scheme, path := PathWithQs}, + BodyLen, Authority) -> + try cow_http_hd:parse_host(Authority) of + {Host, Port0} -> + Port = ensure_port(Scheme, Port0), + try cow_http:parse_fullpath(PathWithQs) of + {<<>>, _} -> + reset_stream(State, StreamID, {stream_error, protocol_error, + 'The path component must not be empty. (RFC7540 8.1.2.3)'}); + {Path, Qs} -> + Req0 = #{ + ref => Ref, + pid => self(), + streamid => StreamID, + peer => Peer, + sock => Sock, + cert => Cert, + method => Method, + scheme => Scheme, + host => Host, + port => Port, + path => Path, + qs => Qs, + version => 'HTTP/2', + headers => headers_to_map(Headers, #{}), + has_body => IsFin =:= nofin, + body_length => BodyLen + }, + %% We add the PROXY header information if any. + Req1 = case ProxyHeader of + undefined -> Req0; + _ -> Req0#{proxy_header => ProxyHeader} + end, + %% We add the protocol information for extended CONNECTs. + Req = case PseudoHeaders of + #{protocol := Protocol} -> Req1#{protocol => Protocol}; + _ -> Req1 + end, + headers_frame(State, StreamID, Req) + catch _:_ -> + reset_stream(State, StreamID, {stream_error, protocol_error, + 'The :path pseudo-header is invalid. (RFC7540 8.1.2.3)'}) + end + catch _:_ -> + reset_stream(State, StreamID, {stream_error, protocol_error, + 'The :authority pseudo-header is invalid. (RFC7540 8.1.2.3)'}) + end. + +ensure_port(<<"http">>, undefined) -> 80; +ensure_port(<<"https">>, undefined) -> 443; +ensure_port(_, Port) -> Port. + +%% This function is necessary to properly handle duplicate headers +%% and the special-case cookie header. +headers_to_map([], Acc) -> + Acc; +headers_to_map([{Name, Value}|Tail], Acc0) -> + Acc = case Acc0 of + %% The cookie header does not use proper HTTP header lists. + #{Name := Value0} when Name =:= <<"cookie">> -> + Acc0#{Name => << Value0/binary, "; ", Value/binary >>}; + #{Name := Value0} -> + Acc0#{Name => << Value0/binary, ", ", Value/binary >>}; + _ -> + Acc0#{Name => Value} + end, + headers_to_map(Tail, Acc). + +headers_frame(State=#state{opts=Opts, streams=Streams}, StreamID, Req) -> + try cowboy_stream:init(StreamID, Req, Opts) of + {Commands, StreamState} -> + commands(State#state{ + streams=Streams#{StreamID => #stream{state=StreamState}}}, + StreamID, Commands) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(init, + [StreamID, Req, Opts], + Class, Exception, Stacktrace), Opts), + reset_stream(State, StreamID, {internal_error, {Class, Exception}, + 'Unhandled exception in cowboy_stream:init/3.'}) + end. + +early_error(State0=#state{ref=Ref, opts=Opts, peer=Peer}, + StreamID, _IsFin, Headers, #{method := Method}, + StatusCode0, HumanReadable) -> + %% We automatically terminate the stream but it is not an error + %% per se (at least not in the first implementation). + Reason = {stream_error, no_error, HumanReadable}, + %% The partial Req is minimal for now. We only have one case + %% where it can be called (when a method is completely disabled). + %% @todo Fill in the other elements. + PartialReq = #{ + ref => Ref, + peer => Peer, + method => Method, + headers => headers_to_map(Headers, #{}) + }, + Resp = {response, StatusCode0, RespHeaders0=#{<<"content-length">> => <<"0">>}, <<>>}, + try cowboy_stream:early_error(StreamID, Reason, PartialReq, Resp, Opts) of + {response, StatusCode, RespHeaders, RespBody} -> + send_response(State0, StreamID, StatusCode, RespHeaders, RespBody) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(early_error, + [StreamID, Reason, PartialReq, Resp, Opts], + Class, Exception, Stacktrace), Opts), + %% We still need to send an error response, so send what we initially + %% wanted to send. It's better than nothing. + send_headers(State0, StreamID, fin, StatusCode0, RespHeaders0) + end. + +rst_stream_frame(State=#state{streams=Streams0, children=Children0}, StreamID, Reason) -> + case maps:take(StreamID, Streams0) of + {#stream{state=StreamState}, Streams} -> + terminate_stream_handler(State, StreamID, Reason, StreamState), + Children = cowboy_children:shutdown(Children0, StreamID), + State#state{streams=Streams, children=Children}; + error -> + State + end. + +ignored_frame(State=#state{http2_machine=HTTP2Machine0}) -> + case cow_http2_machine:ignored_frame(HTTP2Machine0) of + {ok, HTTP2Machine} -> + State#state{http2_machine=HTTP2Machine}; + {error, Error={connection_error, _, _}, HTTP2Machine} -> + terminate(State#state{http2_machine=HTTP2Machine}, Error) + end. + +%% HTTP/2 timeouts. + +timeout(State=#state{http2_machine=HTTP2Machine0}, Name, TRef) -> + case cow_http2_machine:timeout(Name, TRef, HTTP2Machine0) of + {ok, HTTP2Machine} -> + State#state{http2_machine=HTTP2Machine}; + {error, Error={connection_error, _, _}, HTTP2Machine} -> + terminate(State#state{http2_machine=HTTP2Machine}, Error) + end. + +%% Erlang messages. + +down(State0=#state{opts=Opts, children=Children0}, Pid, Msg) -> + State = case cowboy_children:down(Children0, Pid) of + %% The stream was terminated already. + {ok, undefined, Children} -> + State0#state{children=Children}; + %% The stream is still running. + {ok, StreamID, Children} -> + info(State0#state{children=Children}, StreamID, Msg); + %% The process was unknown. + error -> + cowboy:log(warning, "Received EXIT signal ~p for unknown process ~p.~n", + [Msg, Pid], Opts), + State0 + end, + if + State#state.http2_status =:= closing, State#state.streams =:= #{} -> + terminate(State, {stop, normal, 'The connection is going away.'}); + true -> + State + end. + +info(State=#state{opts=Opts, http2_machine=HTTP2Machine, streams=Streams}, StreamID, Msg) -> + case Streams of + #{StreamID := Stream=#stream{state=StreamState0}} -> + try cowboy_stream:info(StreamID, Msg, StreamState0) of + {Commands, StreamState} -> + commands(State#state{streams=Streams#{StreamID => Stream#stream{state=StreamState}}}, + StreamID, Commands) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(info, + [StreamID, Msg, StreamState0], + Class, Exception, Stacktrace), Opts), + reset_stream(State, StreamID, {internal_error, {Class, Exception}, + 'Unhandled exception in cowboy_stream:info/3.'}) + end; + _ -> + case cow_http2_machine:is_lingering_stream(StreamID, HTTP2Machine) of + true -> + ok; + false -> + cowboy:log(warning, "Received message ~p for unknown stream ~p.", + [Msg, StreamID], Opts) + end, + State + end. + +%% Stream handler commands. +%% +%% @todo Kill the stream if it tries to send a response, headers, +%% data or push promise when the stream is closed or half-closed. + +commands(State, _, []) -> + State; +%% Error responses are sent only if a response wasn't sent already. +commands(State=#state{http2_machine=HTTP2Machine}, StreamID, + [{error_response, StatusCode, Headers, Body}|Tail]) -> + case cow_http2_machine:get_stream_local_state(StreamID, HTTP2Machine) of + {ok, idle, _} -> + commands(State, StreamID, [{response, StatusCode, Headers, Body}|Tail]); + _ -> + commands(State, StreamID, Tail) + end; +%% Send an informational response. +commands(State0, StreamID, [{inform, StatusCode, Headers}|Tail]) -> + State = send_headers(State0, StreamID, idle, StatusCode, Headers), + commands(State, StreamID, Tail); +%% Send response headers. +commands(State0, StreamID, [{response, StatusCode, Headers, Body}|Tail]) -> + State = send_response(State0, StreamID, StatusCode, Headers, Body), + commands(State, StreamID, Tail); +%% Send response headers. +commands(State0, StreamID, [{headers, StatusCode, Headers}|Tail]) -> + State = send_headers(State0, StreamID, nofin, StatusCode, Headers), + commands(State, StreamID, Tail); +%% Send a response body chunk. +commands(State0, StreamID, [{data, IsFin, Data}|Tail]) -> + State = maybe_send_data(State0, StreamID, IsFin, Data, []), + commands(State, StreamID, Tail); +%% Send trailers. +commands(State0, StreamID, [{trailers, Trailers}|Tail]) -> + State = maybe_send_data(State0, StreamID, fin, {trailers, maps:to_list(Trailers)}, []), + commands(State, StreamID, Tail); +%% Send a push promise. +%% +%% @todo Responses sent as a result of a push_promise request +%% must not send push_promise frames themselves. +%% +%% @todo We should not send push_promise frames when we are +%% in the closing http2_status. +commands(State0=#state{socket=Socket, transport=Transport, http2_machine=HTTP2Machine0}, + StreamID, [{push, Method, Scheme, Host, Port, Path, Qs, Headers0}|Tail]) -> + Authority = case {Scheme, Port} of + {<<"http">>, 80} -> Host; + {<<"https">>, 443} -> Host; + _ -> iolist_to_binary([Host, $:, integer_to_binary(Port)]) + end, + PathWithQs = iolist_to_binary(case Qs of + <<>> -> Path; + _ -> [Path, $?, Qs] + end), + PseudoHeaders = #{ + method => Method, + scheme => Scheme, + authority => Authority, + path => PathWithQs + }, + %% We need to make sure the header value is binary before we can + %% create the Req object, as it expects them to be flat. + Headers = maps:to_list(maps:map(fun(_, V) -> iolist_to_binary(V) end, Headers0)), + State = case cow_http2_machine:prepare_push_promise(StreamID, HTTP2Machine0, + PseudoHeaders, Headers) of + {ok, PromisedStreamID, HeaderBlock, HTTP2Machine} -> + Transport:send(Socket, cow_http2:push_promise( + StreamID, PromisedStreamID, HeaderBlock)), + headers_frame(State0#state{http2_machine=HTTP2Machine}, + PromisedStreamID, fin, Headers, PseudoHeaders, 0); + {error, no_push} -> + State0 + end, + commands(State, StreamID, Tail); +%% Read the request body. +commands(State0=#state{flow=Flow, streams=Streams}, StreamID, [{flow, Size}|Tail]) -> + #{StreamID := Stream=#stream{flow=StreamFlow}} = Streams, + State = update_window(State0#state{flow=Flow + Size, + streams=Streams#{StreamID => Stream#stream{flow=StreamFlow + Size}}}, + StreamID), + commands(State, StreamID, Tail); +%% Supervise a child process. +commands(State=#state{children=Children}, StreamID, [{spawn, Pid, Shutdown}|Tail]) -> + commands(State#state{children=cowboy_children:up(Children, Pid, StreamID, Shutdown)}, + StreamID, Tail); +%% Error handling. +commands(State, StreamID, [Error = {internal_error, _, _}|_Tail]) -> + %% @todo Do we want to run the commands after an internal_error? + %% @todo Do we even allow commands after? + %% @todo Only reset when the stream still exists. + reset_stream(State, StreamID, Error); +%% Upgrade to HTTP/2. This is triggered by cowboy_http2 itself. +commands(State=#state{socket=Socket, transport=Transport, http2_status=upgrade}, + StreamID, [{switch_protocol, Headers, ?MODULE, _}|Tail]) -> + %% @todo This 101 response needs to be passed through stream handlers. + Transport:send(Socket, cow_http:response(101, 'HTTP/1.1', maps:to_list(Headers))), + commands(State, StreamID, Tail); +%% Use a different protocol within the stream (CONNECT :protocol). +%% @todo Make sure we error out when the feature is disabled. +commands(State0, StreamID, [{switch_protocol, Headers, _Mod, _ModState}|Tail]) -> + State = info(State0, StreamID, {headers, 200, Headers}), + commands(State, StreamID, Tail); +%% Set options dynamically. +commands(State, StreamID, [{set_options, _Opts}|Tail]) -> + commands(State, StreamID, Tail); +commands(State, StreamID, [stop|_Tail]) -> + %% @todo Do we want to run the commands after a stop? + %% @todo Do we even allow commands after? + stop_stream(State, StreamID); +%% Log event. +commands(State=#state{opts=Opts}, StreamID, [Log={log, _, _, _}|Tail]) -> + cowboy:log(Log, Opts), + commands(State, StreamID, Tail). + +%% Tentatively update the window after the flow was updated. + +update_window(State=#state{socket=Socket, transport=Transport, + http2_machine=HTTP2Machine0, flow=Flow, streams=Streams}, StreamID) -> + #{StreamID := #stream{flow=StreamFlow}} = Streams, + {Data1, HTTP2Machine2} = case cow_http2_machine:ensure_window(Flow, HTTP2Machine0) of + ok -> {<<>>, HTTP2Machine0}; + {ok, Increment1, HTTP2Machine1} -> {cow_http2:window_update(Increment1), HTTP2Machine1} + end, + {Data2, HTTP2Machine} = case cow_http2_machine:ensure_window(StreamID, StreamFlow, HTTP2Machine2) of + ok -> {<<>>, HTTP2Machine2}; + {ok, Increment2, HTTP2Machine3} -> {cow_http2:window_update(StreamID, Increment2), HTTP2Machine3} + end, + case {Data1, Data2} of + {<<>>, <<>>} -> ok; + _ -> Transport:send(Socket, [Data1, Data2]) + end, + State#state{http2_machine=HTTP2Machine}. + +%% Send the response, trailers or data. + +send_response(State0=#state{http2_machine=HTTP2Machine0}, StreamID, StatusCode, Headers, Body) -> + Size = case Body of + {sendfile, _, Bytes, _} -> Bytes; + _ -> iolist_size(Body) + end, + case Size of + 0 -> + State = send_headers(State0, StreamID, fin, StatusCode, Headers), + maybe_terminate_stream(State, StreamID, fin); + _ -> + %% @todo Add a test for HEAD to make sure we don't send the body when + %% returning {response...} from a stream handler (or {headers...} then {data...}). + {ok, _IsFin, HeaderBlock, HTTP2Machine} + = cow_http2_machine:prepare_headers(StreamID, HTTP2Machine0, nofin, + #{status => cow_http:status_to_integer(StatusCode)}, + headers_to_list(Headers)), + maybe_send_data(State0#state{http2_machine=HTTP2Machine}, StreamID, fin, Body, + [cow_http2:headers(StreamID, nofin, HeaderBlock)]) + end. + +send_headers(State=#state{socket=Socket, transport=Transport, + http2_machine=HTTP2Machine0}, StreamID, IsFin0, StatusCode, Headers) -> + {ok, IsFin, HeaderBlock, HTTP2Machine} + = cow_http2_machine:prepare_headers(StreamID, HTTP2Machine0, IsFin0, + #{status => cow_http:status_to_integer(StatusCode)}, + headers_to_list(Headers)), + Transport:send(Socket, cow_http2:headers(StreamID, IsFin, HeaderBlock)), + State#state{http2_machine=HTTP2Machine}. + +%% The set-cookie header is special; we can only send one cookie per header. +headers_to_list(Headers0=#{<<"set-cookie">> := SetCookies}) -> + Headers = maps:to_list(maps:remove(<<"set-cookie">>, Headers0)), + Headers ++ [{<<"set-cookie">>, Value} || Value <- SetCookies]; +headers_to_list(Headers) -> + maps:to_list(Headers). + +maybe_send_data(State0=#state{socket=Socket, transport=Transport, + http2_machine=HTTP2Machine0}, StreamID, IsFin, Data0, Prefix) -> + Data = case is_tuple(Data0) of + false -> {data, Data0}; + true -> Data0 + end, + case cow_http2_machine:send_or_queue_data(StreamID, HTTP2Machine0, IsFin, Data) of + {ok, HTTP2Machine} -> + %% If we have prefix data (like a HEADERS frame) we need to send it + %% even if we do not send any DATA frames. + case Prefix of + [] -> ok; + _ -> Transport:send(Socket, Prefix) + end, + maybe_send_data_alarm(State0#state{http2_machine=HTTP2Machine}, HTTP2Machine0, StreamID); + {send, SendData, HTTP2Machine} -> + State = #state{http2_status=Status, streams=Streams} + = send_data(State0#state{http2_machine=HTTP2Machine}, SendData, Prefix), + %% Terminate the connection if we are closing and all streams have completed. + if + Status =:= closing, Streams =:= #{} -> + terminate(State, {stop, normal, 'The connection is going away.'}); + true -> + maybe_send_data_alarm(State, HTTP2Machine0, StreamID) + end + end. + +send_data(State0=#state{socket=Socket, transport=Transport, opts=Opts}, SendData, Prefix) -> + {Acc, State} = prepare_data(State0, SendData, [], Prefix), + _ = [case Data of + {sendfile, Offset, Bytes, Path} -> + %% When sendfile is disabled we explicitly use the fallback. + _ = case maps:get(sendfile, Opts, true) of + true -> Transport:sendfile(Socket, Path, Offset, Bytes); + false -> ranch_transport:sendfile(Transport, Socket, Path, Offset, Bytes, []) + end; + _ -> + Transport:send(Socket, Data) + end || Data <- Acc], + State. + +prepare_data(State, [], Acc, []) -> + {lists:reverse(Acc), State}; +prepare_data(State, [], Acc, Buffer) -> + {lists:reverse([lists:reverse(Buffer)|Acc]), State}; +prepare_data(State0, [{StreamID, IsFin, SendData}|Tail], Acc0, Buffer0) -> + {Acc, Buffer, State} = prepare_data(State0, StreamID, IsFin, SendData, Acc0, Buffer0), + prepare_data(State, Tail, Acc, Buffer). + +prepare_data(State0, StreamID, IsFin, [], Acc, Buffer) -> + State = maybe_terminate_stream(State0, StreamID, IsFin), + {Acc, Buffer, State}; +prepare_data(State0, StreamID, IsFin, [FrameData|Tail], Acc, Buffer) -> + FrameIsFin = case Tail of + [] -> IsFin; + _ -> nofin + end, + case prepare_data_frame(State0, StreamID, FrameIsFin, FrameData) of + {{MoreData, Sendfile}, State} when is_tuple(Sendfile) -> + case Buffer of + [] -> + prepare_data(State, StreamID, IsFin, Tail, + [Sendfile, MoreData|Acc], []); + _ -> + prepare_data(State, StreamID, IsFin, Tail, + [Sendfile, lists:reverse([MoreData|Buffer])|Acc], []) + end; + {MoreData, State} -> + prepare_data(State, StreamID, IsFin, Tail, + Acc, [MoreData|Buffer]) + end. + +prepare_data_frame(State, StreamID, IsFin, {data, Data}) -> + {cow_http2:data(StreamID, IsFin, Data), + State}; +prepare_data_frame(State, StreamID, IsFin, Sendfile={sendfile, _, Bytes, _}) -> + {{cow_http2:data_header(StreamID, IsFin, Bytes), Sendfile}, + State}; +%% The stream is terminated in cow_http2_machine:prepare_trailers. +prepare_data_frame(State=#state{http2_machine=HTTP2Machine0}, + StreamID, nofin, {trailers, Trailers}) -> + {ok, HeaderBlock, HTTP2Machine} + = cow_http2_machine:prepare_trailers(StreamID, HTTP2Machine0, Trailers), + {cow_http2:headers(StreamID, fin, HeaderBlock), + State#state{http2_machine=HTTP2Machine}}. + +%% After we have sent or queued data we may need to set or clear an alarm. +%% We do this by comparing the HTTP2Machine buffer state before/after for +%% the relevant streams. +maybe_send_data_alarm(State=#state{opts=Opts, http2_machine=HTTP2Machine}, HTTP2Machine0, StreamID) -> + ConnBufferSizeBefore = cow_http2_machine:get_connection_local_buffer_size(HTTP2Machine0), + ConnBufferSizeAfter = cow_http2_machine:get_connection_local_buffer_size(HTTP2Machine), + {ok, StreamBufferSizeBefore} = cow_http2_machine:get_stream_local_buffer_size(StreamID, HTTP2Machine0), + %% When the stream ends up closed after it finished sending data, + %% we do not want to trigger an alarm. We act as if the buffer + %% size did not change. + StreamBufferSizeAfter = case cow_http2_machine:get_stream_local_buffer_size(StreamID, HTTP2Machine) of + {ok, BSA} -> BSA; + {error, closed} -> StreamBufferSizeBefore + end, + MaxConnBufferSize = maps:get(max_connection_buffer_size, Opts, 16000000), + MaxStreamBufferSize = maps:get(max_stream_buffer_size, Opts, 8000000), + %% I do not want to document these internal events yet. I am not yet + %% convinced it should be {alarm, Name, on|off} and not {internal_event, E} + %% or something else entirely. Though alarms are probably right. + if + ConnBufferSizeBefore >= MaxConnBufferSize, ConnBufferSizeAfter < MaxConnBufferSize -> + connection_alarm(State, connection_buffer_full, off); + ConnBufferSizeBefore < MaxConnBufferSize, ConnBufferSizeAfter >= MaxConnBufferSize -> + connection_alarm(State, connection_buffer_full, on); + StreamBufferSizeBefore >= MaxStreamBufferSize, StreamBufferSizeAfter < MaxStreamBufferSize -> + stream_alarm(State, StreamID, stream_buffer_full, off); + StreamBufferSizeBefore < MaxStreamBufferSize, StreamBufferSizeAfter >= MaxStreamBufferSize -> + stream_alarm(State, StreamID, stream_buffer_full, on); + true -> + State + end. + +connection_alarm(State0=#state{streams=Streams}, Name, Value) -> + lists:foldl(fun(StreamID, State) -> + stream_alarm(State, StreamID, Name, Value) + end, State0, maps:keys(Streams)). + +stream_alarm(State, StreamID, Name, Value) -> + info(State, StreamID, {alarm, Name, Value}). + +%% Terminate a stream or the connection. + +%% We may have to cancel streams even if we receive multiple +%% GOAWAY frames as the LastStreamID value may be lower than +%% the one previously received. +goaway(State0=#state{socket=Socket, transport=Transport, http2_machine=HTTP2Machine0, + http2_status=Status, streams=Streams0}, {goaway, LastStreamID, Reason, _}) + when Status =:= connected; Status =:= closing_initiated; Status =:= closing -> + Streams = goaway_streams(State0, maps:to_list(Streams0), LastStreamID, + {stop, {goaway, Reason}, 'The connection is going away.'}, []), + State = State0#state{streams=maps:from_list(Streams)}, + if + Status =:= connected; Status =:= closing_initiated -> + {OurLastStreamID, HTTP2Machine} = + cow_http2_machine:set_last_streamid(HTTP2Machine0), + Transport:send(Socket, cow_http2:goaway( + OurLastStreamID, no_error, <<>>)), + State#state{http2_status=closing, + http2_machine=HTTP2Machine}; + true -> + State + end; +%% We terminate the connection immediately if it hasn't fully been initialized. +goaway(State, {goaway, _, Reason, _}) -> + terminate(State, {stop, {goaway, Reason}, 'The connection is going away.'}). + +%% Cancel client-initiated streams that are above LastStreamID. +goaway_streams(_, [], _, _, Acc) -> + Acc; +goaway_streams(State, [{StreamID, #stream{state=StreamState}}|Tail], LastStreamID, Reason, Acc) + when StreamID > LastStreamID, (StreamID rem 2) =:= 0 -> + terminate_stream_handler(State, StreamID, Reason, StreamState), + goaway_streams(State, Tail, LastStreamID, Reason, Acc); +goaway_streams(State, [Stream|Tail], LastStreamID, Reason, Acc) -> + goaway_streams(State, Tail, LastStreamID, Reason, [Stream|Acc]). + +%% A server that is attempting to gracefully shut down a connection SHOULD send +%% an initial GOAWAY frame with the last stream identifier set to 2^31-1 and a +%% NO_ERROR code. This signals to the client that a shutdown is imminent and +%% that initiating further requests is prohibited. After allowing time for any +%% in-flight stream creation (at least one round-trip time), the server can send +%% another GOAWAY frame with an updated last stream identifier. This ensures +%% that a connection can be cleanly shut down without losing requests. +-spec initiate_closing(#state{}, _) -> #state{}. +initiate_closing(State=#state{http2_status=connected, socket=Socket, + transport=Transport, opts=Opts}, Reason) -> + Transport:send(Socket, cow_http2:goaway(16#7fffffff, no_error, <<>>)), + Timeout = maps:get(goaway_initial_timeout, Opts, 1000), + Message = {goaway_initial_timeout, Reason}, + set_timeout(State#state{http2_status=closing_initiated}, Timeout, Message); +initiate_closing(State=#state{http2_status=Status}, _Reason) + when Status =:= closing_initiated; Status =:= closing -> + %% This happens if sys:terminate/2,3 is called twice or if the supervisor + %% tells us to shutdown after sys:terminate/2,3 is called or vice versa. + State; +initiate_closing(State, Reason) -> + terminate(State, {stop, stop_reason(Reason), 'The connection is going away.'}). + +%% Switch to 'closing' state and stop accepting new streams. +-spec closing(#state{}, Reason :: term()) -> #state{}. +closing(State=#state{streams=Streams}, Reason) when Streams =:= #{} -> + terminate(State, Reason); +closing(State=#state{http2_status=closing_initiated, + http2_machine=HTTP2Machine0, socket=Socket, transport=Transport}, + Reason) -> + %% Stop accepting new streams. + {LastStreamID, HTTP2Machine} = + cow_http2_machine:set_last_streamid(HTTP2Machine0), + Transport:send(Socket, cow_http2:goaway(LastStreamID, no_error, <<>>)), + closing(State#state{http2_status=closing, http2_machine=HTTP2Machine}, Reason); +closing(State=#state{http2_status=closing, opts=Opts}, Reason) -> + %% If client sent GOAWAY, we may already be in 'closing' but without the + %% goaway complete timeout set. + Timeout = maps:get(goaway_complete_timeout, Opts, 3000), + Message = {goaway_complete_timeout, Reason}, + set_timeout(State, Timeout, Message). + +stop_reason({stop, Reason, _}) -> Reason; +stop_reason(Reason) -> Reason. + +-spec terminate(#state{}, _) -> no_return(). +terminate(undefined, Reason) -> + exit({shutdown, Reason}); +terminate(State=#state{socket=Socket, transport=Transport, http2_status=Status, + http2_machine=HTTP2Machine, streams=Streams, children=Children}, Reason) + when Status =:= connected; Status =:= closing_initiated; Status =:= closing -> + %% @todo We might want to optionally send the Reason value + %% as debug data in the GOAWAY frame here. Perhaps more. + if + Status =:= connected; Status =:= closing_initiated -> + Transport:send(Socket, cow_http2:goaway( + cow_http2_machine:get_last_streamid(HTTP2Machine), + terminate_reason(Reason), <<>>)); + %% We already sent the GOAWAY frame. + Status =:= closing -> + ok + end, + terminate_all_streams(State, maps:to_list(Streams), Reason), + cowboy_children:terminate(Children), + terminate_linger(State), + exit({shutdown, Reason}); +terminate(#state{socket=Socket, transport=Transport}, Reason) -> + Transport:close(Socket), + exit({shutdown, Reason}). + +terminate_reason({connection_error, Reason, _}) -> Reason; +terminate_reason({stop, _, _}) -> no_error; +terminate_reason({socket_error, _, _}) -> internal_error; +terminate_reason({internal_error, _, _}) -> internal_error. + +terminate_all_streams(_, [], _) -> + ok; +terminate_all_streams(State, [{StreamID, #stream{state=StreamState}}|Tail], Reason) -> + terminate_stream_handler(State, StreamID, Reason, StreamState), + terminate_all_streams(State, Tail, Reason). + +%% This code is copied from cowboy_http. +terminate_linger(State=#state{socket=Socket, transport=Transport, opts=Opts}) -> + case Transport:shutdown(Socket, write) of + ok -> + case maps:get(linger_timeout, Opts, 1000) of + 0 -> + ok; + infinity -> + terminate_linger_before_loop(State, undefined, Transport:messages()); + Timeout -> + TimerRef = erlang:start_timer(Timeout, self(), linger_timeout), + terminate_linger_before_loop(State, TimerRef, Transport:messages()) + end; + {error, _} -> + ok + end. + +terminate_linger_before_loop(State, TimerRef, Messages) -> + %% We may already be in active mode when we do this + %% but it's OK because we are shutting down anyway. + case setopts_active(State) of + ok -> + terminate_linger_loop(State, TimerRef, Messages); + {error, _} -> + ok + end. + +terminate_linger_loop(State=#state{socket=Socket}, TimerRef, Messages) -> + receive + {OK, Socket, _} when OK =:= element(1, Messages) -> + terminate_linger_loop(State, TimerRef, Messages); + {Closed, Socket} when Closed =:= element(2, Messages) -> + ok; + {Error, Socket, _} when Error =:= element(3, Messages) -> + ok; + {Passive, Socket} when Passive =:= tcp_passive; Passive =:= ssl_passive -> + terminate_linger_before_loop(State, TimerRef, Messages); + {timeout, TimerRef, linger_timeout} -> + ok; + _ -> + terminate_linger_loop(State, TimerRef, Messages) + end. + +%% @todo Don't send an RST_STREAM if one was already sent. +reset_stream(State0=#state{socket=Socket, transport=Transport, + http2_machine=HTTP2Machine0}, StreamID, Error) -> + Reason = case Error of + {internal_error, _, _} -> internal_error; + {stream_error, Reason0, _} -> Reason0 + end, + Transport:send(Socket, cow_http2:rst_stream(StreamID, Reason)), + State1 = case cow_http2_machine:reset_stream(StreamID, HTTP2Machine0) of + {ok, HTTP2Machine} -> + terminate_stream(State0#state{http2_machine=HTTP2Machine}, StreamID, Error); + {error, not_found} -> + terminate_stream(State0, StreamID, Error) + end, + case reset_rate(State1) of + {ok, State} -> + State; + error -> + terminate(State1, {connection_error, enhance_your_calm, + 'Stream reset rate larger than configuration allows. Flood? (CVE-2019-9514)'}) + end. + +reset_rate(State0=#state{reset_rate_num=Num0, reset_rate_time=Time}) -> + case Num0 - 1 of + 0 -> + CurrentTime = erlang:monotonic_time(millisecond), + if + CurrentTime < Time -> + error; + true -> + %% When the option has a period of infinity we cannot reach this clause. + {ok, init_reset_rate_limiting(State0, CurrentTime)} + end; + Num -> + {ok, State0#state{reset_rate_num=Num}} + end. + +stop_stream(State=#state{http2_machine=HTTP2Machine}, StreamID) -> + case cow_http2_machine:get_stream_local_state(StreamID, HTTP2Machine) of + %% When the stream terminates normally (without sending RST_STREAM) + %% and no response was sent, we need to send a proper response back to the client. + %% We delay the termination of the stream until the response is fully sent. + {ok, idle, _} -> + info(stopping(State, StreamID), StreamID, {response, 204, #{}, <<>>}); + %% When a response was sent but not terminated, we need to close the stream. + %% We delay the termination of the stream until the response is fully sent. + {ok, nofin, fin} -> + stopping(State, StreamID); + %% We only send a final DATA frame if there isn't one queued yet. + {ok, nofin, _} -> + info(stopping(State, StreamID), StreamID, {data, fin, <<>>}); + %% When a response was sent fully we can terminate the stream, + %% regardless of the stream being in half-closed or closed state. + _ -> + terminate_stream(State, StreamID) + end. + +stopping(State=#state{streams=Streams}, StreamID) -> + #{StreamID := Stream} = Streams, + State#state{streams=Streams#{StreamID => Stream#stream{status=stopping}}}. + +%% If we finished sending data and the stream is stopping, terminate it. +maybe_terminate_stream(State=#state{streams=Streams}, StreamID, fin) -> + case Streams of + #{StreamID := #stream{status=stopping}} -> + terminate_stream(State, StreamID); + _ -> + State + end; +maybe_terminate_stream(State, _, _) -> + State. + +%% When the stream stops normally without reading the request +%% body fully we need to tell the client to stop sending it. +%% We do this by sending an RST_STREAM with reason NO_ERROR. (RFC7540 8.1.0) +terminate_stream(State0=#state{socket=Socket, transport=Transport, + http2_machine=HTTP2Machine0}, StreamID) -> + State = case cow_http2_machine:get_stream_local_state(StreamID, HTTP2Machine0) of + {ok, fin, _} -> + Transport:send(Socket, cow_http2:rst_stream(StreamID, no_error)), + {ok, HTTP2Machine} = cow_http2_machine:reset_stream(StreamID, HTTP2Machine0), + State0#state{http2_machine=HTTP2Machine}; + {error, closed} -> + State0 + end, + terminate_stream(State, StreamID, normal). + +%% We remove the stream flow from the connection flow. Any further +%% data received for this stream is therefore fully contained within +%% the extra window we allocated for this stream. +terminate_stream(State=#state{flow=Flow, streams=Streams0, children=Children0}, StreamID, Reason) -> + case maps:take(StreamID, Streams0) of + {#stream{flow=StreamFlow, state=StreamState}, Streams} -> + terminate_stream_handler(State, StreamID, Reason, StreamState), + Children = cowboy_children:shutdown(Children0, StreamID), + State#state{flow=Flow - StreamFlow, streams=Streams, children=Children}; + error -> + State + end. + +terminate_stream_handler(#state{opts=Opts}, StreamID, Reason, StreamState) -> + try + cowboy_stream:terminate(StreamID, Reason, StreamState) + catch Class:Exception:Stacktrace -> + cowboy:log(cowboy_stream:make_error_log(terminate, + [StreamID, Reason, StreamState], + Class, Exception, Stacktrace), Opts) + end. + +%% System callbacks. + +-spec system_continue(_, _, {#state{}, binary()}) -> ok. +system_continue(_, _, {State, Buffer}) -> + loop(State, Buffer). + +-spec system_terminate(any(), _, _, {#state{}, binary()}) -> no_return(). +system_terminate(Reason0, _, _, {State, Buffer}) -> + Reason = {stop, {exit, Reason0}, 'sys:terminate/2,3 was called.'}, + loop(initiate_closing(State, Reason), Buffer). + +-spec system_code_change(Misc, _, _, _) -> {ok, Misc} when Misc::{#state{}, binary()}. +system_code_change(Misc, _, _, _) -> + {ok, Misc}. diff --git a/deps/cowboy/src/cowboy_loop.erl b/deps/cowboy/src/cowboy_loop.erl new file mode 100644 index 0000000..21eb96e --- /dev/null +++ b/deps/cowboy/src/cowboy_loop.erl @@ -0,0 +1,108 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_loop). +-behaviour(cowboy_sub_protocol). + +-export([upgrade/4]). +-export([upgrade/5]). +-export([loop/4]). + +-export([system_continue/3]). +-export([system_terminate/4]). +-export([system_code_change/4]). + +-callback init(Req, any()) + -> {ok | module(), Req, any()} + | {module(), Req, any(), any()} + when Req::cowboy_req:req(). + +-callback info(any(), Req, State) + -> {ok, Req, State} + | {ok, Req, State, hibernate} + | {stop, Req, State} + when Req::cowboy_req:req(), State::any(). + +-callback terminate(any(), cowboy_req:req(), any()) -> ok. +-optional_callbacks([terminate/3]). + +-spec upgrade(Req, Env, module(), any()) + -> {ok, Req, Env} | {suspend, ?MODULE, loop, [any()]} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +upgrade(Req, Env, Handler, HandlerState) -> + loop(Req, Env, Handler, HandlerState). + +-spec upgrade(Req, Env, module(), any(), hibernate) + -> {suspend, ?MODULE, loop, [any()]} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +upgrade(Req, Env, Handler, HandlerState, hibernate) -> + suspend(Req, Env, Handler, HandlerState). + +-spec loop(Req, Env, module(), any()) + -> {ok, Req, Env} | {suspend, ?MODULE, loop, [any()]} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +%% @todo Handle system messages. +loop(Req=#{pid := Parent}, Env, Handler, HandlerState) -> + receive + %% System messages. + {'EXIT', Parent, Reason} -> + terminate(Req, Env, Handler, HandlerState, Reason); + {system, From, Request} -> + sys:handle_system_msg(Request, From, Parent, ?MODULE, [], + {Req, Env, Handler, HandlerState}); + %% Calls from supervisor module. + {'$gen_call', From, Call} -> + cowboy_children:handle_supervisor_call(Call, From, [], ?MODULE), + loop(Req, Env, Handler, HandlerState); + Message -> + call(Req, Env, Handler, HandlerState, Message) + end. + +call(Req0, Env, Handler, HandlerState0, Message) -> + try Handler:info(Message, Req0, HandlerState0) of + {ok, Req, HandlerState} -> + loop(Req, Env, Handler, HandlerState); + {ok, Req, HandlerState, hibernate} -> + suspend(Req, Env, Handler, HandlerState); + {stop, Req, HandlerState} -> + terminate(Req, Env, Handler, HandlerState, stop) + catch Class:Reason:Stacktrace -> + cowboy_handler:terminate({crash, Class, Reason}, Req0, HandlerState0, Handler), + erlang:raise(Class, Reason, Stacktrace) + end. + +suspend(Req, Env, Handler, HandlerState) -> + {suspend, ?MODULE, loop, [Req, Env, Handler, HandlerState]}. + +terminate(Req, Env, Handler, HandlerState, Reason) -> + Result = cowboy_handler:terminate(Reason, Req, HandlerState, Handler), + {ok, Req, Env#{result => Result}}. + +%% System callbacks. + +-spec system_continue(_, _, {Req, Env, module(), any()}) + -> {ok, Req, Env} | {suspend, ?MODULE, loop, [any()]} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +system_continue(_, _, {Req, Env, Handler, HandlerState}) -> + loop(Req, Env, Handler, HandlerState). + +-spec system_terminate(any(), _, _, {Req, Env, module(), any()}) + -> {ok, Req, Env} when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +system_terminate(Reason, _, _, {Req, Env, Handler, HandlerState}) -> + terminate(Req, Env, Handler, HandlerState, Reason). + +-spec system_code_change(Misc, _, _, _) -> {ok, Misc} + when Misc::{cowboy_req:req(), cowboy_middleware:env(), module(), any()}. +system_code_change(Misc, _, _, _) -> + {ok, Misc}. diff --git a/deps/cowboy/src/cowboy_metrics_h.erl b/deps/cowboy/src/cowboy_metrics_h.erl new file mode 100644 index 0000000..4107aac --- /dev/null +++ b/deps/cowboy/src/cowboy_metrics_h.erl @@ -0,0 +1,331 @@ +%% Copyright (c) 2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_metrics_h). +-behavior(cowboy_stream). + +-export([init/3]). +-export([data/4]). +-export([info/3]). +-export([terminate/3]). +-export([early_error/5]). + +-type proc_metrics() :: #{pid() => #{ + %% Time at which the process spawned. + spawn := integer(), + + %% Time at which the process exited. + exit => integer(), + + %% Reason for the process exit. + reason => any() +}}. + +-type informational_metrics() :: #{ + %% Informational response status. + status := cowboy:http_status(), + + %% Headers sent with the informational response. + headers := cowboy:http_headers(), + + %% Time when the informational response was sent. + time := integer() +}. + +-type metrics() :: #{ + %% The identifier for this listener. + ref := ranch:ref(), + + %% The pid for this connection. + pid := pid(), + + %% The streamid also indicates the total number of requests on + %% this connection (StreamID div 2 + 1). + streamid := cowboy_stream:streamid(), + + %% The terminate reason is always useful. + reason := cowboy_stream:reason(), + + %% A filtered Req object or a partial Req object + %% depending on how far the request got to. + req => cowboy_req:req(), + partial_req => cowboy_stream:partial_req(), + + %% Response status. + resp_status := cowboy:http_status(), + + %% Filtered response headers. + resp_headers := cowboy:http_headers(), + + %% Start/end of the processing of the request. + %% + %% This represents the time from this stream handler's init + %% to terminate. + req_start => integer(), + req_end => integer(), + + %% Start/end of the receiving of the request body. + %% Begins when the first packet has been received. + req_body_start => integer(), + req_body_end => integer(), + + %% Start/end of the sending of the response. + %% Begins when we send the headers and ends on the final + %% packet of the response body. If everything is sent at + %% once these values are identical. + resp_start => integer(), + resp_end => integer(), + + %% For early errors all we get is the time we received it. + early_error_time => integer(), + + %% Start/end of spawned processes. This is where most of + %% the user code lies, excluding stream handlers. On a + %% default Cowboy configuration there should be only one + %% process: the request process. + procs => proc_metrics(), + + %% Informational responses sent before the final response. + informational => [informational_metrics()], + + %% Length of the request and response bodies. This does + %% not include the framing. + req_body_length => non_neg_integer(), + resp_body_length => non_neg_integer(), + + %% Additional metadata set by the user. + user_data => map() +}. +-export_type([metrics/0]). + +-type metrics_callback() :: fun((metrics()) -> any()). +-export_type([metrics_callback/0]). + +-record(state, { + next :: any(), + callback :: fun((metrics()) -> any()), + resp_headers_filter :: undefined | fun((cowboy:http_headers()) -> cowboy:http_headers()), + req :: map(), + resp_status :: undefined | cowboy:http_status(), + resp_headers :: undefined | cowboy:http_headers(), + ref :: ranch:ref(), + req_start :: integer(), + req_end :: undefined | integer(), + req_body_start :: undefined | integer(), + req_body_end :: undefined | integer(), + resp_start :: undefined | integer(), + resp_end :: undefined | integer(), + procs = #{} :: proc_metrics(), + informational = [] :: [informational_metrics()], + req_body_length = 0 :: non_neg_integer(), + resp_body_length = 0 :: non_neg_integer(), + user_data = #{} :: map() +}). + +-spec init(cowboy_stream:streamid(), cowboy_req:req(), cowboy:opts()) + -> {[{spawn, pid(), timeout()}], #state{}}. +init(StreamID, Req=#{ref := Ref}, Opts=#{metrics_callback := Fun}) -> + ReqStart = erlang:monotonic_time(), + {Commands, Next} = cowboy_stream:init(StreamID, Req, Opts), + FilteredReq = case maps:get(metrics_req_filter, Opts, undefined) of + undefined -> Req; + ReqFilter -> ReqFilter(Req) + end, + RespHeadersFilter = maps:get(metrics_resp_headers_filter, Opts, undefined), + {Commands, fold(Commands, #state{ + next=Next, + callback=Fun, + resp_headers_filter=RespHeadersFilter, + req=FilteredReq, + ref=Ref, + req_start=ReqStart + })}. + +-spec data(cowboy_stream:streamid(), cowboy_stream:fin(), cowboy_req:resp_body(), State) + -> {cowboy_stream:commands(), State} when State::#state{}. +data(StreamID, IsFin=fin, Data, State=#state{req_body_start=undefined}) -> + ReqBody = erlang:monotonic_time(), + do_data(StreamID, IsFin, Data, State#state{ + req_body_start=ReqBody, + req_body_end=ReqBody, + req_body_length=byte_size(Data) + }); +data(StreamID, IsFin=fin, Data, State=#state{req_body_length=ReqBodyLen}) -> + ReqBodyEnd = erlang:monotonic_time(), + do_data(StreamID, IsFin, Data, State#state{ + req_body_end=ReqBodyEnd, + req_body_length=ReqBodyLen + byte_size(Data) + }); +data(StreamID, IsFin, Data, State=#state{req_body_start=undefined}) -> + ReqBodyStart = erlang:monotonic_time(), + do_data(StreamID, IsFin, Data, State#state{ + req_body_start=ReqBodyStart, + req_body_length=byte_size(Data) + }); +data(StreamID, IsFin, Data, State=#state{req_body_length=ReqBodyLen}) -> + do_data(StreamID, IsFin, Data, State#state{ + req_body_length=ReqBodyLen + byte_size(Data) + }). + +do_data(StreamID, IsFin, Data, State0=#state{next=Next0}) -> + {Commands, Next} = cowboy_stream:data(StreamID, IsFin, Data, Next0), + {Commands, fold(Commands, State0#state{next=Next})}. + +-spec info(cowboy_stream:streamid(), any(), State) + -> {cowboy_stream:commands(), State} when State::#state{}. +info(StreamID, Info={'EXIT', Pid, Reason}, State0=#state{procs=Procs}) -> + ProcEnd = erlang:monotonic_time(), + P = maps:get(Pid, Procs), + State = State0#state{procs=Procs#{Pid => P#{ + exit => ProcEnd, + reason => Reason + }}}, + do_info(StreamID, Info, State); +info(StreamID, Info, State) -> + do_info(StreamID, Info, State). + +do_info(StreamID, Info, State0=#state{next=Next0}) -> + {Commands, Next} = cowboy_stream:info(StreamID, Info, Next0), + {Commands, fold(Commands, State0#state{next=Next})}. + +fold([], State) -> + State; +fold([{spawn, Pid, _}|Tail], State0=#state{procs=Procs}) -> + ProcStart = erlang:monotonic_time(), + State = State0#state{procs=Procs#{Pid => #{spawn => ProcStart}}}, + fold(Tail, State); +fold([{inform, Status, Headers}|Tail], + State=#state{informational=Infos}) -> + Time = erlang:monotonic_time(), + fold(Tail, State#state{informational=[#{ + status => Status, + headers => Headers, + time => Time + }|Infos]}); +fold([{response, Status, Headers, Body}|Tail], + State=#state{resp_headers_filter=RespHeadersFilter}) -> + Resp = erlang:monotonic_time(), + fold(Tail, State#state{ + resp_status=Status, + resp_headers=case RespHeadersFilter of + undefined -> Headers; + _ -> RespHeadersFilter(Headers) + end, + resp_start=Resp, + resp_end=Resp, + resp_body_length=resp_body_length(Body) + }); +fold([{error_response, Status, Headers, Body}|Tail], + State=#state{resp_status=RespStatus}) -> + %% The error_response command only results in a response + %% if no response was sent before. + case RespStatus of + undefined -> + fold([{response, Status, Headers, Body}|Tail], State); + _ -> + fold(Tail, State) + end; +fold([{headers, Status, Headers}|Tail], + State=#state{resp_headers_filter=RespHeadersFilter}) -> + RespStart = erlang:monotonic_time(), + fold(Tail, State#state{ + resp_status=Status, + resp_headers=case RespHeadersFilter of + undefined -> Headers; + _ -> RespHeadersFilter(Headers) + end, + resp_start=RespStart + }); +%% @todo It might be worthwhile to keep the sendfile information around, +%% especially if these frames ultimately result in a sendfile syscall. +fold([{data, nofin, Data}|Tail], State=#state{resp_body_length=RespBodyLen}) -> + fold(Tail, State#state{ + resp_body_length=RespBodyLen + resp_body_length(Data) + }); +fold([{data, fin, Data}|Tail], State=#state{resp_body_length=RespBodyLen}) -> + RespEnd = erlang:monotonic_time(), + fold(Tail, State#state{ + resp_end=RespEnd, + resp_body_length=RespBodyLen + resp_body_length(Data) + }); +fold([{set_options, SetOpts}|Tail], State0=#state{user_data=OldUserData}) -> + State = case SetOpts of + #{metrics_user_data := NewUserData} -> + State0#state{user_data=maps:merge(OldUserData, NewUserData)}; + _ -> + State0 + end, + fold(Tail, State); +fold([_|Tail], State) -> + fold(Tail, State). + +-spec terminate(cowboy_stream:streamid(), cowboy_stream:reason(), #state{}) -> any(). +terminate(StreamID, Reason, #state{next=Next, callback=Fun, + req=Req, resp_status=RespStatus, resp_headers=RespHeaders, ref=Ref, + req_start=ReqStart, req_body_start=ReqBodyStart, + req_body_end=ReqBodyEnd, resp_start=RespStart, resp_end=RespEnd, + procs=Procs, informational=Infos, user_data=UserData, + req_body_length=ReqBodyLen, resp_body_length=RespBodyLen}) -> + Res = cowboy_stream:terminate(StreamID, Reason, Next), + ReqEnd = erlang:monotonic_time(), + Metrics = #{ + ref => Ref, + pid => self(), + streamid => StreamID, + reason => Reason, + req => Req, + resp_status => RespStatus, + resp_headers => RespHeaders, + req_start => ReqStart, + req_end => ReqEnd, + req_body_start => ReqBodyStart, + req_body_end => ReqBodyEnd, + resp_start => RespStart, + resp_end => RespEnd, + procs => Procs, + informational => lists:reverse(Infos), + req_body_length => ReqBodyLen, + resp_body_length => RespBodyLen, + user_data => UserData + }, + Fun(Metrics), + Res. + +-spec early_error(cowboy_stream:streamid(), cowboy_stream:reason(), + cowboy_stream:partial_req(), Resp, cowboy:opts()) -> Resp + when Resp::cowboy_stream:resp_command(). +early_error(StreamID, Reason, PartialReq=#{ref := Ref}, Resp0, Opts=#{metrics_callback := Fun}) -> + Time = erlang:monotonic_time(), + Resp = {response, RespStatus, RespHeaders, RespBody} + = cowboy_stream:early_error(StreamID, Reason, PartialReq, Resp0, Opts), + %% As far as metrics go we are limited in what we can provide + %% in this case. + Metrics = #{ + ref => Ref, + pid => self(), + streamid => StreamID, + reason => Reason, + partial_req => PartialReq, + resp_status => RespStatus, + resp_headers => RespHeaders, + early_error_time => Time, + resp_body_length => resp_body_length(RespBody) + }, + Fun(Metrics), + Resp. + +resp_body_length({sendfile, _, Len, _}) -> + Len; +resp_body_length(Data) -> + iolist_size(Data). diff --git a/deps/cowboy/src/cowboy_middleware.erl b/deps/cowboy/src/cowboy_middleware.erl new file mode 100644 index 0000000..9a739f1 --- /dev/null +++ b/deps/cowboy/src/cowboy_middleware.erl @@ -0,0 +1,24 @@ +%% Copyright (c) 2013-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_middleware). + +-type env() :: #{atom() => any()}. +-export_type([env/0]). + +-callback execute(Req, Env) + -> {ok, Req, Env} + | {suspend, module(), atom(), [any()]} + | {stop, Req} + when Req::cowboy_req:req(), Env::env(). diff --git a/deps/cowboy/src/cowboy_req.erl b/deps/cowboy/src/cowboy_req.erl new file mode 100644 index 0000000..90c5a3a --- /dev/null +++ b/deps/cowboy/src/cowboy_req.erl @@ -0,0 +1,1016 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% Copyright (c) 2011, Anthony Ramine +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_req). + +%% Request. +-export([method/1]). +-export([version/1]). +-export([peer/1]). +-export([sock/1]). +-export([cert/1]). +-export([scheme/1]). +-export([host/1]). +-export([host_info/1]). +-export([port/1]). +-export([path/1]). +-export([path_info/1]). +-export([qs/1]). +-export([parse_qs/1]). +-export([match_qs/2]). +-export([uri/1]). +-export([uri/2]). +-export([binding/2]). +-export([binding/3]). +-export([bindings/1]). +-export([header/2]). +-export([header/3]). +-export([headers/1]). +-export([parse_header/2]). +-export([parse_header/3]). +-export([filter_cookies/2]). +-export([parse_cookies/1]). +-export([match_cookies/2]). + +%% Request body. +-export([has_body/1]). +-export([body_length/1]). +-export([read_body/1]). +-export([read_body/2]). +-export([read_urlencoded_body/1]). +-export([read_urlencoded_body/2]). +-export([read_and_match_urlencoded_body/2]). +-export([read_and_match_urlencoded_body/3]). + +%% Multipart. +-export([read_part/1]). +-export([read_part/2]). +-export([read_part_body/1]). +-export([read_part_body/2]). + +%% Response. +-export([set_resp_cookie/3]). +-export([set_resp_cookie/4]). +-export([resp_header/2]). +-export([resp_header/3]). +-export([resp_headers/1]). +-export([set_resp_header/3]). +-export([set_resp_headers/2]). +-export([has_resp_header/2]). +-export([delete_resp_header/2]). +-export([set_resp_body/2]). +%% @todo set_resp_body/3 with a ContentType or even Headers argument, to set content headers. +-export([has_resp_body/1]). +-export([inform/2]). +-export([inform/3]). +-export([reply/2]). +-export([reply/3]). +-export([reply/4]). +-export([stream_reply/2]). +-export([stream_reply/3]). +%% @todo stream_body/2 (nofin) +-export([stream_body/3]). +%% @todo stream_events/2 (nofin) +-export([stream_events/3]). +-export([stream_trailers/2]). +-export([push/3]). +-export([push/4]). + +%% Stream handlers. +-export([cast/2]). + +%% Internal. +-export([response_headers/2]). + +-type read_body_opts() :: #{ + length => non_neg_integer() | infinity, + period => non_neg_integer(), + timeout => timeout() +}. +-export_type([read_body_opts/0]). + +%% While sendfile allows a Len of 0 that means "everything past Offset", +%% Cowboy expects the real length as it is used as metadata. +-type resp_body() :: iodata() + | {sendfile, non_neg_integer(), non_neg_integer(), file:name_all()}. +-export_type([resp_body/0]). + +-type push_opts() :: #{ + method => binary(), + scheme => binary(), + host => binary(), + port => inet:port_number(), + qs => binary() +}. +-export_type([push_opts/0]). + +-type req() :: #{ + %% Public interface. + method := binary(), + version := cowboy:http_version() | atom(), + scheme := binary(), + host := binary(), + port := inet:port_number(), + path := binary(), + qs := binary(), + headers := cowboy:http_headers(), + peer := {inet:ip_address(), inet:port_number()}, + sock := {inet:ip_address(), inet:port_number()}, + cert := binary() | undefined, + + %% Private interface. + ref := ranch:ref(), + pid := pid(), + streamid := cowboy_stream:streamid(), + + host_info => cowboy_router:tokens(), + path_info => cowboy_router:tokens(), + bindings => cowboy_router:bindings(), + + has_body := boolean(), + body_length := non_neg_integer() | undefined, + has_read_body => true, + multipart => {binary(), binary()} | done, + + has_sent_resp => headers | true, + resp_cookies => #{iodata() => iodata()}, + resp_headers => #{binary() => iodata()}, + resp_body => resp_body(), + + proxy_header => ranch_proxy_header:proxy_info(), + media_type => {binary(), binary(), [{binary(), binary()}]}, + language => binary() | undefined, + charset => binary() | undefined, + range => {binary(), binary() + | [{non_neg_integer(), non_neg_integer() | infinity} | neg_integer()]}, + websocket_version => 7 | 8 | 13, + + %% The user is encouraged to use the Req to store information + %% when no better solution is available. + _ => _ +}. +-export_type([req/0]). + +%% Request. + +-spec method(req()) -> binary(). +method(#{method := Method}) -> + Method. + +-spec version(req()) -> cowboy:http_version(). +version(#{version := Version}) -> + Version. + +-spec peer(req()) -> {inet:ip_address(), inet:port_number()}. +peer(#{peer := Peer}) -> + Peer. + +-spec sock(req()) -> {inet:ip_address(), inet:port_number()}. +sock(#{sock := Sock}) -> + Sock. + +-spec cert(req()) -> binary() | undefined. +cert(#{cert := Cert}) -> + Cert. + +-spec scheme(req()) -> binary(). +scheme(#{scheme := Scheme}) -> + Scheme. + +-spec host(req()) -> binary(). +host(#{host := Host}) -> + Host. + +%% @todo The host_info is undefined if cowboy_router isn't used. Do we want to crash? +-spec host_info(req()) -> cowboy_router:tokens() | undefined. +host_info(#{host_info := HostInfo}) -> + HostInfo. + +-spec port(req()) -> inet:port_number(). +port(#{port := Port}) -> + Port. + +-spec path(req()) -> binary(). +path(#{path := Path}) -> + Path. + +%% @todo The path_info is undefined if cowboy_router isn't used. Do we want to crash? +-spec path_info(req()) -> cowboy_router:tokens() | undefined. +path_info(#{path_info := PathInfo}) -> + PathInfo. + +-spec qs(req()) -> binary(). +qs(#{qs := Qs}) -> + Qs. + +%% @todo Might be useful to limit the number of keys. +-spec parse_qs(req()) -> [{binary(), binary() | true}]. +parse_qs(#{qs := Qs}) -> + try + cow_qs:parse_qs(Qs) + catch _:_:Stacktrace -> + erlang:raise(exit, {request_error, qs, + 'Malformed query string; application/x-www-form-urlencoded expected.' + }, Stacktrace) + end. + +-spec match_qs(cowboy:fields(), req()) -> map(). +match_qs(Fields, Req) -> + case filter(Fields, kvlist_to_map(Fields, parse_qs(Req))) of + {ok, Map} -> + Map; + {error, Errors} -> + exit({request_error, {match_qs, Errors}, + 'Query string validation constraints failed for the reasons provided.'}) + end. + +-spec uri(req()) -> iodata(). +uri(Req) -> + uri(Req, #{}). + +-spec uri(req(), map()) -> iodata(). +uri(#{scheme := Scheme0, host := Host0, port := Port0, + path := Path0, qs := Qs0}, Opts) -> + Scheme = case maps:get(scheme, Opts, Scheme0) of + S = undefined -> S; + S -> iolist_to_binary(S) + end, + Host = maps:get(host, Opts, Host0), + Port = maps:get(port, Opts, Port0), + {Path, Qs} = case maps:get(path, Opts, Path0) of + <<"*">> -> {<<>>, <<>>}; + P -> {P, maps:get(qs, Opts, Qs0)} + end, + Fragment = maps:get(fragment, Opts, undefined), + [uri_host(Scheme, Scheme0, Port, Host), uri_path(Path), uri_qs(Qs), uri_fragment(Fragment)]. + +uri_host(_, _, _, undefined) -> <<>>; +uri_host(Scheme, Scheme0, Port, Host) -> + case iolist_size(Host) of + 0 -> <<>>; + _ -> [uri_scheme(Scheme), <<"//">>, Host, uri_port(Scheme, Scheme0, Port)] + end. + +uri_scheme(undefined) -> <<>>; +uri_scheme(Scheme) -> + case iolist_size(Scheme) of + 0 -> Scheme; + _ -> [Scheme, $:] + end. + +uri_port(_, _, undefined) -> <<>>; +uri_port(undefined, <<"http">>, 80) -> <<>>; +uri_port(undefined, <<"https">>, 443) -> <<>>; +uri_port(<<"http">>, _, 80) -> <<>>; +uri_port(<<"https">>, _, 443) -> <<>>; +uri_port(_, _, Port) -> + [$:, integer_to_binary(Port)]. + +uri_path(undefined) -> <<>>; +uri_path(Path) -> Path. + +uri_qs(undefined) -> <<>>; +uri_qs(Qs) -> + case iolist_size(Qs) of + 0 -> Qs; + _ -> [$?, Qs] + end. + +uri_fragment(undefined) -> <<>>; +uri_fragment(Fragment) -> + case iolist_size(Fragment) of + 0 -> Fragment; + _ -> [$#, Fragment] + end. + +-ifdef(TEST). +uri1_test() -> + <<"http://localhost/path">> = iolist_to_binary(uri(#{ + scheme => <<"http">>, host => <<"localhost">>, port => 80, + path => <<"/path">>, qs => <<>>})), + <<"http://localhost:443/path">> = iolist_to_binary(uri(#{ + scheme => <<"http">>, host => <<"localhost">>, port => 443, + path => <<"/path">>, qs => <<>>})), + <<"http://localhost:8080/path">> = iolist_to_binary(uri(#{ + scheme => <<"http">>, host => <<"localhost">>, port => 8080, + path => <<"/path">>, qs => <<>>})), + <<"http://localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(#{ + scheme => <<"http">>, host => <<"localhost">>, port => 8080, + path => <<"/path">>, qs => <<"dummy=2785">>})), + <<"https://localhost/path">> = iolist_to_binary(uri(#{ + scheme => <<"https">>, host => <<"localhost">>, port => 443, + path => <<"/path">>, qs => <<>>})), + <<"https://localhost:8443/path">> = iolist_to_binary(uri(#{ + scheme => <<"https">>, host => <<"localhost">>, port => 8443, + path => <<"/path">>, qs => <<>>})), + <<"https://localhost:8443/path?dummy=2785">> = iolist_to_binary(uri(#{ + scheme => <<"https">>, host => <<"localhost">>, port => 8443, + path => <<"/path">>, qs => <<"dummy=2785">>})), + ok. + +uri2_test() -> + Req = #{ + scheme => <<"http">>, host => <<"localhost">>, port => 8080, + path => <<"/path">>, qs => <<"dummy=2785">> + }, + <<"http://localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{})), + %% Disable individual components. + <<"//localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{scheme => undefined})), + <<"/path?dummy=2785">> = iolist_to_binary(uri(Req, #{host => undefined})), + <<"http://localhost/path?dummy=2785">> = iolist_to_binary(uri(Req, #{port => undefined})), + <<"http://localhost:8080?dummy=2785">> = iolist_to_binary(uri(Req, #{path => undefined})), + <<"http://localhost:8080/path">> = iolist_to_binary(uri(Req, #{qs => undefined})), + <<"http://localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{fragment => undefined})), + <<"http://localhost:8080">> = iolist_to_binary(uri(Req, #{path => undefined, qs => undefined})), + <<>> = iolist_to_binary(uri(Req, #{host => undefined, path => undefined, qs => undefined})), + %% Empty values. + <<"//localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{scheme => <<>>})), + <<"//localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{scheme => ""})), + <<"//localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{scheme => [<<>>]})), + <<"/path?dummy=2785">> = iolist_to_binary(uri(Req, #{host => <<>>})), + <<"/path?dummy=2785">> = iolist_to_binary(uri(Req, #{host => ""})), + <<"/path?dummy=2785">> = iolist_to_binary(uri(Req, #{host => [<<>>]})), + <<"http://localhost:8080?dummy=2785">> = iolist_to_binary(uri(Req, #{path => <<>>})), + <<"http://localhost:8080?dummy=2785">> = iolist_to_binary(uri(Req, #{path => ""})), + <<"http://localhost:8080?dummy=2785">> = iolist_to_binary(uri(Req, #{path => [<<>>]})), + <<"http://localhost:8080/path">> = iolist_to_binary(uri(Req, #{qs => <<>>})), + <<"http://localhost:8080/path">> = iolist_to_binary(uri(Req, #{qs => ""})), + <<"http://localhost:8080/path">> = iolist_to_binary(uri(Req, #{qs => [<<>>]})), + <<"http://localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{fragment => <<>>})), + <<"http://localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{fragment => ""})), + <<"http://localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{fragment => [<<>>]})), + %% Port is integer() | undefined. + {'EXIT', _} = (catch iolist_to_binary(uri(Req, #{port => <<>>}))), + {'EXIT', _} = (catch iolist_to_binary(uri(Req, #{port => ""}))), + {'EXIT', _} = (catch iolist_to_binary(uri(Req, #{port => [<<>>]}))), + %% Update components. + <<"https://localhost:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{scheme => "https"})), + <<"http://example.org:8080/path?dummy=2785">> = iolist_to_binary(uri(Req, #{host => "example.org"})), + <<"http://localhost:123/path?dummy=2785">> = iolist_to_binary(uri(Req, #{port => 123})), + <<"http://localhost:8080/custom?dummy=2785">> = iolist_to_binary(uri(Req, #{path => "/custom"})), + <<"http://localhost:8080/path?smart=42">> = iolist_to_binary(uri(Req, #{qs => "smart=42"})), + <<"http://localhost:8080/path?dummy=2785#intro">> = iolist_to_binary(uri(Req, #{fragment => "intro"})), + %% Interesting combinations. + <<"http://localhost/path?dummy=2785">> = iolist_to_binary(uri(Req, #{port => 80})), + <<"https://localhost/path?dummy=2785">> = iolist_to_binary(uri(Req, #{scheme => "https", port => 443})), + ok. +-endif. + +-spec binding(atom(), req()) -> any() | undefined. +binding(Name, Req) -> + binding(Name, Req, undefined). + +-spec binding(atom(), req(), Default) -> any() | Default when Default::any(). +binding(Name, #{bindings := Bindings}, Default) when is_atom(Name) -> + case Bindings of + #{Name := Value} -> Value; + _ -> Default + end; +binding(Name, _, Default) when is_atom(Name) -> + Default. + +-spec bindings(req()) -> cowboy_router:bindings(). +bindings(#{bindings := Bindings}) -> + Bindings; +bindings(_) -> + #{}. + +-spec header(binary(), req()) -> binary() | undefined. +header(Name, Req) -> + header(Name, Req, undefined). + +-spec header(binary(), req(), Default) -> binary() | Default when Default::any(). +header(Name, #{headers := Headers}, Default) -> + maps:get(Name, Headers, Default). + +-spec headers(req()) -> cowboy:http_headers(). +headers(#{headers := Headers}) -> + Headers. + +-spec parse_header(binary(), Req) -> any() when Req::req(). +parse_header(Name = <<"content-length">>, Req) -> + parse_header(Name, Req, 0); +parse_header(Name = <<"cookie">>, Req) -> + parse_header(Name, Req, []); +parse_header(Name, Req) -> + parse_header(Name, Req, undefined). + +-spec parse_header(binary(), Req, any()) -> any() when Req::req(). +parse_header(Name, Req, Default) -> + try + parse_header(Name, Req, Default, parse_header_fun(Name)) + catch _:_:Stacktrace -> + erlang:raise(exit, {request_error, {header, Name}, + 'Malformed header. Please consult the relevant specification.' + }, Stacktrace) + end. + +parse_header_fun(<<"accept">>) -> fun cow_http_hd:parse_accept/1; +parse_header_fun(<<"accept-charset">>) -> fun cow_http_hd:parse_accept_charset/1; +parse_header_fun(<<"accept-encoding">>) -> fun cow_http_hd:parse_accept_encoding/1; +parse_header_fun(<<"accept-language">>) -> fun cow_http_hd:parse_accept_language/1; +parse_header_fun(<<"access-control-request-headers">>) -> fun cow_http_hd:parse_access_control_request_headers/1; +parse_header_fun(<<"access-control-request-method">>) -> fun cow_http_hd:parse_access_control_request_method/1; +parse_header_fun(<<"authorization">>) -> fun cow_http_hd:parse_authorization/1; +parse_header_fun(<<"connection">>) -> fun cow_http_hd:parse_connection/1; +parse_header_fun(<<"content-encoding">>) -> fun cow_http_hd:parse_content_encoding/1; +parse_header_fun(<<"content-language">>) -> fun cow_http_hd:parse_content_language/1; +parse_header_fun(<<"content-length">>) -> fun cow_http_hd:parse_content_length/1; +parse_header_fun(<<"content-type">>) -> fun cow_http_hd:parse_content_type/1; +parse_header_fun(<<"cookie">>) -> fun cow_cookie:parse_cookie/1; +parse_header_fun(<<"expect">>) -> fun cow_http_hd:parse_expect/1; +parse_header_fun(<<"if-match">>) -> fun cow_http_hd:parse_if_match/1; +parse_header_fun(<<"if-modified-since">>) -> fun cow_http_hd:parse_if_modified_since/1; +parse_header_fun(<<"if-none-match">>) -> fun cow_http_hd:parse_if_none_match/1; +parse_header_fun(<<"if-range">>) -> fun cow_http_hd:parse_if_range/1; +parse_header_fun(<<"if-unmodified-since">>) -> fun cow_http_hd:parse_if_unmodified_since/1; +parse_header_fun(<<"max-forwards">>) -> fun cow_http_hd:parse_max_forwards/1; +parse_header_fun(<<"origin">>) -> fun cow_http_hd:parse_origin/1; +parse_header_fun(<<"proxy-authorization">>) -> fun cow_http_hd:parse_proxy_authorization/1; +parse_header_fun(<<"range">>) -> fun cow_http_hd:parse_range/1; +parse_header_fun(<<"sec-websocket-extensions">>) -> fun cow_http_hd:parse_sec_websocket_extensions/1; +parse_header_fun(<<"sec-websocket-protocol">>) -> fun cow_http_hd:parse_sec_websocket_protocol_req/1; +parse_header_fun(<<"sec-websocket-version">>) -> fun cow_http_hd:parse_sec_websocket_version_req/1; +parse_header_fun(<<"trailer">>) -> fun cow_http_hd:parse_trailer/1; +parse_header_fun(<<"upgrade">>) -> fun cow_http_hd:parse_upgrade/1; +parse_header_fun(<<"x-forwarded-for">>) -> fun cow_http_hd:parse_x_forwarded_for/1. + +parse_header(Name, Req, Default, ParseFun) -> + case header(Name, Req) of + undefined -> Default; + Value -> ParseFun(Value) + end. + +-spec filter_cookies([atom() | binary()], Req) -> Req when Req::req(). +filter_cookies(Names0, Req=#{headers := Headers}) -> + Names = [if + is_atom(N) -> atom_to_binary(N, utf8); + true -> N + end || N <- Names0], + case header(<<"cookie">>, Req) of + undefined -> Req; + Value0 -> + Cookies0 = binary:split(Value0, <<$;>>), + Cookies = lists:filter(fun(Cookie) -> + lists:member(cookie_name(Cookie), Names) + end, Cookies0), + Value = iolist_to_binary(lists:join($;, Cookies)), + Req#{headers => Headers#{<<"cookie">> => Value}} + end. + +%% This is a specialized function to extract a cookie name +%% regardless of whether the name is valid or not. We skip +%% whitespace at the beginning and take whatever's left to +%% be the cookie name, up to the = sign. +cookie_name(<<$\s, Rest/binary>>) -> cookie_name(Rest); +cookie_name(<<$\t, Rest/binary>>) -> cookie_name(Rest); +cookie_name(Name) -> cookie_name(Name, <<>>). + +cookie_name(<<>>, Name) -> Name; +cookie_name(<<$=, _/bits>>, Name) -> Name; +cookie_name(<>, Acc) -> cookie_name(Rest, <>). + +-spec parse_cookies(req()) -> [{binary(), binary()}]. +parse_cookies(Req) -> + parse_header(<<"cookie">>, Req). + +-spec match_cookies(cowboy:fields(), req()) -> map(). +match_cookies(Fields, Req) -> + case filter(Fields, kvlist_to_map(Fields, parse_cookies(Req))) of + {ok, Map} -> + Map; + {error, Errors} -> + exit({request_error, {match_cookies, Errors}, + 'Cookie validation constraints failed for the reasons provided.'}) + end. + +%% Request body. + +-spec has_body(req()) -> boolean(). +has_body(#{has_body := HasBody}) -> + HasBody. + +%% The length may not be known if HTTP/1.1 with a transfer-encoding; +%% or HTTP/2 with no content-length header. The length is always +%% known once the body has been completely read. +-spec body_length(req()) -> undefined | non_neg_integer(). +body_length(#{body_length := Length}) -> + Length. + +-spec read_body(Req) -> {ok, binary(), Req} | {more, binary(), Req} when Req::req(). +read_body(Req) -> + read_body(Req, #{}). + +-spec read_body(Req, read_body_opts()) -> {ok, binary(), Req} | {more, binary(), Req} when Req::req(). +read_body(Req=#{has_body := false}, _) -> + {ok, <<>>, Req}; +read_body(Req=#{has_read_body := true}, _) -> + {ok, <<>>, Req}; +read_body(Req, Opts) -> + Length = maps:get(length, Opts, 8000000), + Period = maps:get(period, Opts, 15000), + Timeout = maps:get(timeout, Opts, Period + 1000), + Ref = make_ref(), + cast({read_body, self(), Ref, Length, Period}, Req), + receive + {request_body, Ref, nofin, Body} -> + {more, Body, Req}; + {request_body, Ref, fin, BodyLength, Body} -> + {ok, Body, set_body_length(Req, BodyLength)} + after Timeout -> + exit(timeout) + end. + +set_body_length(Req=#{headers := Headers}, BodyLength) -> + Req#{ + headers => Headers#{<<"content-length">> => integer_to_binary(BodyLength)}, + body_length => BodyLength, + has_read_body => true + }. + +-spec read_urlencoded_body(Req) -> {ok, [{binary(), binary() | true}], Req} when Req::req(). +read_urlencoded_body(Req) -> + read_urlencoded_body(Req, #{length => 64000, period => 5000}). + +-spec read_urlencoded_body(Req, read_body_opts()) -> {ok, [{binary(), binary() | true}], Req} when Req::req(). +read_urlencoded_body(Req0, Opts) -> + case read_body(Req0, Opts) of + {ok, Body, Req} -> + try + {ok, cow_qs:parse_qs(Body), Req} + catch _:_:Stacktrace -> + erlang:raise(exit, {request_error, urlencoded_body, + 'Malformed body; application/x-www-form-urlencoded expected.' + }, Stacktrace) + end; + {more, Body, _} -> + Length = maps:get(length, Opts, 64000), + if + byte_size(Body) < Length -> + exit({request_error, timeout, + 'The request body was not received within the configured time.'}); + true -> + exit({request_error, payload_too_large, + 'The request body is larger than allowed by configuration.'}) + end + end. + +-spec read_and_match_urlencoded_body(cowboy:fields(), Req) + -> {ok, map(), Req} when Req::req(). +read_and_match_urlencoded_body(Fields, Req) -> + read_and_match_urlencoded_body(Fields, Req, #{length => 64000, period => 5000}). + +-spec read_and_match_urlencoded_body(cowboy:fields(), Req, read_body_opts()) + -> {ok, map(), Req} when Req::req(). +read_and_match_urlencoded_body(Fields, Req0, Opts) -> + {ok, Qs, Req} = read_urlencoded_body(Req0, Opts), + case filter(Fields, kvlist_to_map(Fields, Qs)) of + {ok, Map} -> + {ok, Map, Req}; + {error, Errors} -> + exit({request_error, {read_and_match_urlencoded_body, Errors}, + 'Urlencoded request body validation constraints failed for the reasons provided.'}) + end. + +%% Multipart. + +-spec read_part(Req) + -> {ok, cowboy:http_headers(), Req} | {done, Req} + when Req::req(). +read_part(Req) -> + read_part(Req, #{length => 64000, period => 5000}). + +-spec read_part(Req, read_body_opts()) + -> {ok, cowboy:http_headers(), Req} | {done, Req} + when Req::req(). +read_part(Req, Opts) -> + case maps:is_key(multipart, Req) of + true -> + {Data, Req2} = stream_multipart(Req, Opts, headers), + read_part(Data, Opts, Req2); + false -> + read_part(init_multipart(Req), Opts) + end. + +read_part(Buffer, Opts, Req=#{multipart := {Boundary, _}}) -> + try cow_multipart:parse_headers(Buffer, Boundary) of + more -> + {Data, Req2} = stream_multipart(Req, Opts, headers), + read_part(<< Buffer/binary, Data/binary >>, Opts, Req2); + {more, Buffer2} -> + {Data, Req2} = stream_multipart(Req, Opts, headers), + read_part(<< Buffer2/binary, Data/binary >>, Opts, Req2); + {ok, Headers0, Rest} -> + Headers = maps:from_list(Headers0), + %% Reject multipart content containing duplicate headers. + true = map_size(Headers) =:= length(Headers0), + {ok, Headers, Req#{multipart => {Boundary, Rest}}}; + %% Ignore epilogue. + {done, _} -> + {done, Req#{multipart => done}} + catch _:_:Stacktrace -> + erlang:raise(exit, {request_error, {multipart, headers}, + 'Malformed body; multipart expected.' + }, Stacktrace) + end. + +-spec read_part_body(Req) + -> {ok, binary(), Req} | {more, binary(), Req} + when Req::req(). +read_part_body(Req) -> + read_part_body(Req, #{}). + +-spec read_part_body(Req, read_body_opts()) + -> {ok, binary(), Req} | {more, binary(), Req} + when Req::req(). +read_part_body(Req, Opts) -> + case maps:is_key(multipart, Req) of + true -> + read_part_body(<<>>, Opts, Req, <<>>); + false -> + read_part_body(init_multipart(Req), Opts) + end. + +read_part_body(Buffer, Opts, Req=#{multipart := {Boundary, _}}, Acc) -> + Length = maps:get(length, Opts, 8000000), + case byte_size(Acc) > Length of + true -> + {more, Acc, Req#{multipart => {Boundary, Buffer}}}; + false -> + {Data, Req2} = stream_multipart(Req, Opts, body), + case cow_multipart:parse_body(<< Buffer/binary, Data/binary >>, Boundary) of + {ok, Body} -> + read_part_body(<<>>, Opts, Req2, << Acc/binary, Body/binary >>); + {ok, Body, Rest} -> + read_part_body(Rest, Opts, Req2, << Acc/binary, Body/binary >>); + done -> + {ok, Acc, Req2}; + {done, Body} -> + {ok, << Acc/binary, Body/binary >>, Req2}; + {done, Body, Rest} -> + {ok, << Acc/binary, Body/binary >>, + Req2#{multipart => {Boundary, Rest}}} + end + end. + +init_multipart(Req) -> + {<<"multipart">>, _, Params} = parse_header(<<"content-type">>, Req), + case lists:keyfind(<<"boundary">>, 1, Params) of + {_, Boundary} -> + Req#{multipart => {Boundary, <<>>}}; + false -> + exit({request_error, {multipart, boundary}, + 'Missing boundary parameter for multipart media type.'}) + end. + +stream_multipart(Req=#{multipart := done}, _, _) -> + {<<>>, Req}; +stream_multipart(Req=#{multipart := {_, <<>>}}, Opts, Type) -> + case read_body(Req, Opts) of + {more, Data, Req2} -> + {Data, Req2}; + %% We crash when the data ends unexpectedly. + {ok, <<>>, _} -> + exit({request_error, {multipart, Type}, + 'Malformed body; multipart expected.'}); + {ok, Data, Req2} -> + {Data, Req2} + end; +stream_multipart(Req=#{multipart := {Boundary, Buffer}}, _, _) -> + {Buffer, Req#{multipart => {Boundary, <<>>}}}. + +%% Response. + +-spec set_resp_cookie(iodata(), iodata(), Req) + -> Req when Req::req(). +set_resp_cookie(Name, Value, Req) -> + set_resp_cookie(Name, Value, Req, #{}). + +%% The cookie name cannot contain any of the following characters: +%% =,;\s\t\r\n\013\014 +%% +%% The cookie value cannot contain any of the following characters: +%% ,; \t\r\n\013\014 +-spec set_resp_cookie(binary(), iodata(), Req, cow_cookie:cookie_opts()) + -> Req when Req::req(). +set_resp_cookie(Name, Value, Req, Opts) -> + Cookie = cow_cookie:setcookie(Name, Value, Opts), + RespCookies = maps:get(resp_cookies, Req, #{}), + Req#{resp_cookies => RespCookies#{Name => Cookie}}. + +%% @todo We could add has_resp_cookie and delete_resp_cookie now. + +-spec set_resp_header(binary(), iodata(), Req) + -> Req when Req::req(). +set_resp_header(Name, Value, Req=#{resp_headers := RespHeaders}) -> + Req#{resp_headers => RespHeaders#{Name => Value}}; +set_resp_header(Name,Value, Req) -> + Req#{resp_headers => #{Name => Value}}. + +-spec set_resp_headers(cowboy:http_headers(), Req) + -> Req when Req::req(). +set_resp_headers(Headers, Req=#{resp_headers := RespHeaders}) -> + Req#{resp_headers => maps:merge(RespHeaders, Headers)}; +set_resp_headers(Headers, Req) -> + Req#{resp_headers => Headers}. + +-spec resp_header(binary(), req()) -> binary() | undefined. +resp_header(Name, Req) -> + resp_header(Name, Req, undefined). + +-spec resp_header(binary(), req(), Default) + -> binary() | Default when Default::any(). +resp_header(Name, #{resp_headers := Headers}, Default) -> + maps:get(Name, Headers, Default); +resp_header(_, #{}, Default) -> + Default. + +-spec resp_headers(req()) -> cowboy:http_headers(). +resp_headers(#{resp_headers := RespHeaders}) -> + RespHeaders; +resp_headers(#{}) -> + #{}. + +-spec set_resp_body(resp_body(), Req) -> Req when Req::req(). +set_resp_body(Body, Req) -> + Req#{resp_body => Body}. + +-spec has_resp_header(binary(), req()) -> boolean(). +has_resp_header(Name, #{resp_headers := RespHeaders}) -> + maps:is_key(Name, RespHeaders); +has_resp_header(_, _) -> + false. + +-spec has_resp_body(req()) -> boolean(). +has_resp_body(#{resp_body := {sendfile, _, _, _}}) -> + true; +has_resp_body(#{resp_body := RespBody}) -> + iolist_size(RespBody) > 0; +has_resp_body(_) -> + false. + +-spec delete_resp_header(binary(), Req) + -> Req when Req::req(). +delete_resp_header(Name, Req=#{resp_headers := RespHeaders}) -> + Req#{resp_headers => maps:remove(Name, RespHeaders)}; +%% There are no resp headers so we have nothing to delete. +delete_resp_header(_, Req) -> + Req. + +-spec inform(cowboy:http_status(), req()) -> ok. +inform(Status, Req) -> + inform(Status, #{}, Req). + +-spec inform(cowboy:http_status(), cowboy:http_headers(), req()) -> ok. +inform(_, _, #{has_sent_resp := _}) -> + error(function_clause); %% @todo Better error message. +inform(Status, Headers, Req) when is_integer(Status); is_binary(Status) -> + cast({inform, Status, Headers}, Req). + +-spec reply(cowboy:http_status(), Req) -> Req when Req::req(). +reply(Status, Req) -> + reply(Status, #{}, Req). + +-spec reply(cowboy:http_status(), cowboy:http_headers(), Req) + -> Req when Req::req(). +reply(Status, Headers, Req=#{resp_body := Body}) -> + reply(Status, Headers, Body, Req); +reply(Status, Headers, Req) -> + reply(Status, Headers, <<>>, Req). + +-spec reply(cowboy:http_status(), cowboy:http_headers(), resp_body(), Req) + -> Req when Req::req(). +reply(_, _, _, #{has_sent_resp := _}) -> + error(function_clause); %% @todo Better error message. +reply(Status, Headers, {sendfile, _, 0, _}, Req) + when is_integer(Status); is_binary(Status) -> + do_reply(Status, Headers#{ + <<"content-length">> => <<"0">> + }, <<>>, Req); +reply(Status, Headers, SendFile = {sendfile, _, Len, _}, Req) + when is_integer(Status); is_binary(Status) -> + do_reply(Status, Headers#{ + <<"content-length">> => integer_to_binary(Len) + }, SendFile, Req); +%% 204 responses must not include content-length. 304 responses may +%% but only when set explicitly. (RFC7230 3.3.1, RFC7230 3.3.2) +%% Neither status code must include a response body. (RFC7230 3.3) +reply(Status, Headers, Body, Req) + when Status =:= 204; Status =:= 304 -> + 0 = iolist_size(Body), + do_reply(Status, Headers, Body, Req); +reply(Status = <<"204",_/bits>>, Headers, Body, Req) -> + 0 = iolist_size(Body), + do_reply(Status, Headers, Body, Req); +reply(Status = <<"304",_/bits>>, Headers, Body, Req) -> + 0 = iolist_size(Body), + do_reply(Status, Headers, Body, Req); +reply(Status, Headers, Body, Req) + when is_integer(Status); is_binary(Status) -> + do_reply(Status, Headers#{ + <<"content-length">> => integer_to_binary(iolist_size(Body)) + }, Body, Req). + +%% Don't send any body for HEAD responses. While the protocol code is +%% supposed to enforce this rule, we prefer to avoid copying too much +%% data around if we can avoid it. +do_reply(Status, Headers, _, Req=#{method := <<"HEAD">>}) -> + cast({response, Status, response_headers(Headers, Req), <<>>}, Req), + done_replying(Req, true); +do_reply(Status, Headers, Body, Req) -> + cast({response, Status, response_headers(Headers, Req), Body}, Req), + done_replying(Req, true). + +done_replying(Req, HasSentResp) -> + maps:without([resp_cookies, resp_headers, resp_body], Req#{has_sent_resp => HasSentResp}). + +-spec stream_reply(cowboy:http_status(), Req) -> Req when Req::req(). +stream_reply(Status, Req) -> + stream_reply(Status, #{}, Req). + +-spec stream_reply(cowboy:http_status(), cowboy:http_headers(), Req) + -> Req when Req::req(). +stream_reply(_, _, #{has_sent_resp := _}) -> + error(function_clause); +%% 204 and 304 responses must NOT send a body. We therefore +%% transform the call to a full response and expect the user +%% to NOT call stream_body/3 afterwards. (RFC7230 3.3) +stream_reply(Status = 204, Headers=#{}, Req) -> + reply(Status, Headers, <<>>, Req); +stream_reply(Status = <<"204",_/bits>>, Headers=#{}, Req) -> + reply(Status, Headers, <<>>, Req); +stream_reply(Status = 304, Headers=#{}, Req) -> + reply(Status, Headers, <<>>, Req); +stream_reply(Status = <<"304",_/bits>>, Headers=#{}, Req) -> + reply(Status, Headers, <<>>, Req); +stream_reply(Status, Headers=#{}, Req) when is_integer(Status); is_binary(Status) -> + cast({headers, Status, response_headers(Headers, Req)}, Req), + done_replying(Req, headers). + +-spec stream_body(resp_body(), fin | nofin, req()) -> ok. +%% Error out if headers were not sent. +%% Don't send any body for HEAD responses. +stream_body(_, _, #{method := <<"HEAD">>, has_sent_resp := headers}) -> + ok; +%% Don't send a message if the data is empty, except for the +%% very last message with IsFin=fin. When using sendfile this +%% is converted to a data tuple, however. +stream_body({sendfile, _, 0, _}, nofin, _) -> + ok; +stream_body({sendfile, _, 0, _}, IsFin=fin, Req=#{has_sent_resp := headers}) -> + stream_body({data, self(), IsFin, <<>>}, Req); +stream_body({sendfile, O, B, P}, IsFin, Req=#{has_sent_resp := headers}) + when is_integer(O), O >= 0, is_integer(B), B > 0 -> + stream_body({data, self(), IsFin, {sendfile, O, B, P}}, Req); +stream_body(Data, IsFin=nofin, Req=#{has_sent_resp := headers}) + when not is_tuple(Data) -> + case iolist_size(Data) of + 0 -> ok; + _ -> stream_body({data, self(), IsFin, Data}, Req) + end; +stream_body(Data, IsFin, Req=#{has_sent_resp := headers}) + when not is_tuple(Data) -> + stream_body({data, self(), IsFin, Data}, Req). + +%% @todo Do we need a timeout? +stream_body(Msg, Req=#{pid := Pid}) -> + cast(Msg, Req), + receive {data_ack, Pid} -> ok end. + +-spec stream_events(cow_sse:event() | [cow_sse:event()], fin | nofin, req()) -> ok. +stream_events(Event, IsFin, Req) when is_map(Event) -> + stream_events([Event], IsFin, Req); +stream_events(Events, IsFin, Req=#{has_sent_resp := headers}) -> + stream_body({data, self(), IsFin, cow_sse:events(Events)}, Req). + +-spec stream_trailers(cowboy:http_headers(), req()) -> ok. +stream_trailers(Trailers, Req=#{has_sent_resp := headers}) -> + cast({trailers, Trailers}, Req). + +-spec push(iodata(), cowboy:http_headers(), req()) -> ok. +push(Path, Headers, Req) -> + push(Path, Headers, Req, #{}). + +%% @todo Optimization: don't send anything at all for HTTP/1.0 and HTTP/1.1. +%% @todo Path, Headers, Opts, everything should be in proper binary, +%% or normalized when creating the Req object. +-spec push(iodata(), cowboy:http_headers(), req(), push_opts()) -> ok. +push(Path, Headers, Req=#{scheme := Scheme0, host := Host0, port := Port0}, Opts) -> + Method = maps:get(method, Opts, <<"GET">>), + Scheme = maps:get(scheme, Opts, Scheme0), + Host = maps:get(host, Opts, Host0), + Port = maps:get(port, Opts, Port0), + Qs = maps:get(qs, Opts, <<>>), + cast({push, Method, Scheme, Host, Port, Path, Qs, Headers}, Req). + +%% Stream handlers. + +-spec cast(any(), req()) -> ok. +cast(Msg, #{pid := Pid, streamid := StreamID}) -> + Pid ! {{Pid, StreamID}, Msg}, + ok. + +%% Internal. + +%% @todo What about set-cookie headers set through set_resp_header or reply? +-spec response_headers(Headers, req()) -> Headers when Headers::cowboy:http_headers(). +response_headers(Headers0, Req) -> + RespHeaders = maps:get(resp_headers, Req, #{}), + Headers = maps:merge(#{ + <<"date">> => cowboy_clock:rfc1123(), + <<"server">> => <<"Cowboy">> + }, maps:merge(RespHeaders, Headers0)), + %% The set-cookie header is special; we can only send one cookie per header. + %% We send the list of values for many cookies in one key of the map, + %% and let the protocols deal with it directly. + case maps:get(resp_cookies, Req, undefined) of + undefined -> Headers; + RespCookies -> Headers#{<<"set-cookie">> => maps:values(RespCookies)} + end. + +%% Create map, convert keys to atoms and group duplicate keys into lists. +%% Keys that are not found in the user provided list are entirely skipped. +%% @todo Can probably be done directly while parsing. +kvlist_to_map(Fields, KvList) -> + Keys = [case K of + {Key, _} -> Key; + {Key, _, _} -> Key; + Key -> Key + end || K <- Fields], + kvlist_to_map(Keys, KvList, #{}). + +kvlist_to_map(_, [], Map) -> + Map; +kvlist_to_map(Keys, [{Key, Value}|Tail], Map) -> + try binary_to_existing_atom(Key, utf8) of + Atom -> + case lists:member(Atom, Keys) of + true -> + case maps:find(Atom, Map) of + {ok, MapValue} when is_list(MapValue) -> + kvlist_to_map(Keys, Tail, + Map#{Atom => [Value|MapValue]}); + {ok, MapValue} -> + kvlist_to_map(Keys, Tail, + Map#{Atom => [Value, MapValue]}); + error -> + kvlist_to_map(Keys, Tail, + Map#{Atom => Value}) + end; + false -> + kvlist_to_map(Keys, Tail, Map) + end + catch error:badarg -> + kvlist_to_map(Keys, Tail, Map) + end. + +filter(Fields, Map0) -> + filter(Fields, Map0, #{}). + +%% Loop through fields, if value is missing and no default, +%% record the error; else if value is missing and has a +%% default, set default; otherwise apply constraints. If +%% constraint fails, record the error. +%% +%% When there is an error at the end, crash. +filter([], Map, Errors) -> + case maps:size(Errors) of + 0 -> {ok, Map}; + _ -> {error, Errors} + end; +filter([{Key, Constraints}|Tail], Map, Errors) -> + filter_constraints(Tail, Map, Errors, Key, maps:get(Key, Map), Constraints); +filter([{Key, Constraints, Default}|Tail], Map, Errors) -> + case maps:find(Key, Map) of + {ok, Value} -> + filter_constraints(Tail, Map, Errors, Key, Value, Constraints); + error -> + filter(Tail, Map#{Key => Default}, Errors) + end; +filter([Key|Tail], Map, Errors) -> + case maps:is_key(Key, Map) of + true -> + filter(Tail, Map, Errors); + false -> + filter(Tail, Map, Errors#{Key => required}) + end. + +filter_constraints(Tail, Map, Errors, Key, Value0, Constraints) -> + case cowboy_constraints:validate(Value0, Constraints) of + {ok, Value} -> + filter(Tail, Map#{Key => Value}, Errors); + {error, Reason} -> + filter(Tail, Map, Errors#{Key => Reason}) + end. diff --git a/deps/cowboy/src/cowboy_rest.erl b/deps/cowboy/src/cowboy_rest.erl new file mode 100644 index 0000000..7d0fe80 --- /dev/null +++ b/deps/cowboy/src/cowboy_rest.erl @@ -0,0 +1,1637 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% Originally based on the Webmachine Diagram from Alan Dean and +%% Justin Sheehy. +-module(cowboy_rest). +-behaviour(cowboy_sub_protocol). + +-export([upgrade/4]). +-export([upgrade/5]). + +-type switch_handler() :: {switch_handler, module()} + | {switch_handler, module(), any()}. + +%% Common handler callbacks. + +-callback init(Req, any()) + -> {ok | module(), Req, any()} + | {module(), Req, any(), any()} + when Req::cowboy_req:req(). + +-callback terminate(any(), cowboy_req:req(), any()) -> ok. +-optional_callbacks([terminate/3]). + +%% REST handler callbacks. + +-callback allowed_methods(Req, State) + -> {[binary()], Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([allowed_methods/2]). + +-callback allow_missing_post(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([allow_missing_post/2]). + +-callback charsets_provided(Req, State) + -> {[binary()], Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([charsets_provided/2]). + +-callback content_types_accepted(Req, State) + -> {[{'*' | binary() | {binary(), binary(), '*' | [{binary(), binary()}]}, atom()}], Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([content_types_accepted/2]). + +-callback content_types_provided(Req, State) + -> {[{binary() | {binary(), binary(), '*' | [{binary(), binary()}]}, atom()}], Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([content_types_provided/2]). + +-callback delete_completed(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([delete_completed/2]). + +-callback delete_resource(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([delete_resource/2]). + +-callback expires(Req, State) + -> {calendar:datetime() | binary() | undefined, Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([expires/2]). + +-callback forbidden(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([forbidden/2]). + +-callback generate_etag(Req, State) + -> {binary() | {weak | strong, binary()}, Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([generate_etag/2]). + +-callback is_authorized(Req, State) + -> {true | {false, iodata()}, Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([is_authorized/2]). + +-callback is_conflict(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([is_conflict/2]). + +-callback known_methods(Req, State) + -> {[binary()], Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([known_methods/2]). + +-callback languages_provided(Req, State) + -> {[binary()], Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([languages_provided/2]). + +-callback last_modified(Req, State) + -> {calendar:datetime(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([last_modified/2]). + +-callback malformed_request(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([malformed_request/2]). + +-callback moved_permanently(Req, State) + -> {{true, iodata()} | false, Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([moved_permanently/2]). + +-callback moved_temporarily(Req, State) + -> {{true, iodata()} | false, Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([moved_temporarily/2]). + +-callback multiple_choices(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([multiple_choices/2]). + +-callback options(Req, State) + -> {ok, Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([options/2]). + +-callback previously_existed(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([previously_existed/2]). + +-callback range_satisfiable(Req, State) + -> {boolean() | {false, non_neg_integer() | iodata()}, Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([range_satisfiable/2]). + +-callback ranges_provided(Req, State) + -> {[{binary(), atom()}], Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([ranges_provided/2]). + +-callback rate_limited(Req, State) + -> {{true, non_neg_integer() | calendar:datetime()} | false, Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([rate_limited/2]). + +-callback resource_exists(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([resource_exists/2]). + +-callback service_available(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([service_available/2]). + +-callback uri_too_long(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([uri_too_long/2]). + +-callback valid_content_headers(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([valid_content_headers/2]). + +-callback valid_entity_length(Req, State) + -> {boolean(), Req, State} + | {stop, Req, State} + | {switch_handler(), Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([valid_entity_length/2]). + +-callback variances(Req, State) + -> {[binary()], Req, State} + when Req::cowboy_req:req(), State::any(). +-optional_callbacks([variances/2]). + +%% End of REST callbacks. Whew! + +-record(state, { + method = undefined :: binary(), + + %% Handler. + handler :: atom(), + handler_state :: any(), + + %% Allowed methods. Only used for OPTIONS requests. + allowed_methods :: [binary()] | undefined, + + %% Media type. + content_types_p = [] :: + [{binary() | {binary(), binary(), [{binary(), binary()}] | '*'}, + atom()}], + content_type_a :: undefined + | {binary() | {binary(), binary(), [{binary(), binary()}] | '*'}, + atom()}, + + %% Language. + languages_p = [] :: [binary()], + language_a :: undefined | binary(), + + %% Charset. + charsets_p = undefined :: undefined | [binary()], + charset_a :: undefined | binary(), + + %% Range units. + ranges_a = [] :: [{binary(), atom()}], + + %% Whether the resource exists. + exists = false :: boolean(), + + %% Cached resource calls. + etag :: undefined | no_call | {strong | weak, binary()}, + last_modified :: undefined | no_call | calendar:datetime(), + expires :: undefined | no_call | calendar:datetime() | binary() +}). + +-spec upgrade(Req, Env, module(), any()) + -> {ok, Req, Env} when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +upgrade(Req0, Env, Handler, HandlerState0) -> + Method = cowboy_req:method(Req0), + case service_available(Req0, #state{method=Method, + handler=Handler, handler_state=HandlerState0}) of + {ok, Req, Result} -> + {ok, Req, Env#{result => Result}}; + {Mod, Req, HandlerState} -> + Mod:upgrade(Req, Env, Handler, HandlerState); + {Mod, Req, HandlerState, Opts} -> + Mod:upgrade(Req, Env, Handler, HandlerState, Opts) + end. + +-spec upgrade(Req, Env, module(), any(), any()) + -> {ok, Req, Env} when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +%% cowboy_rest takes no options. +upgrade(Req, Env, Handler, HandlerState, _Opts) -> + upgrade(Req, Env, Handler, HandlerState). + +service_available(Req, State) -> + expect(Req, State, service_available, true, fun known_methods/2, 503). + +%% known_methods/2 should return a list of binary methods. +known_methods(Req, State=#state{method=Method}) -> + case call(Req, State, known_methods) of + no_call when Method =:= <<"HEAD">>; Method =:= <<"GET">>; + Method =:= <<"POST">>; Method =:= <<"PUT">>; + Method =:= <<"PATCH">>; Method =:= <<"DELETE">>; + Method =:= <<"OPTIONS">> -> + next(Req, State, fun uri_too_long/2); + no_call -> + next(Req, State, 501); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {List, Req2, State2} -> + case lists:member(Method, List) of + true -> next(Req2, State2, fun uri_too_long/2); + false -> next(Req2, State2, 501) + end + end. + +uri_too_long(Req, State) -> + expect(Req, State, uri_too_long, false, fun allowed_methods/2, 414). + +%% allowed_methods/2 should return a list of binary methods. +allowed_methods(Req, State=#state{method=Method}) -> + case call(Req, State, allowed_methods) of + no_call when Method =:= <<"HEAD">>; Method =:= <<"GET">> -> + next(Req, State, fun malformed_request/2); + no_call when Method =:= <<"OPTIONS">> -> + next(Req, State#state{allowed_methods= + [<<"HEAD">>, <<"GET">>, <<"OPTIONS">>]}, + fun malformed_request/2); + no_call -> + method_not_allowed(Req, State, + [<<"HEAD">>, <<"GET">>, <<"OPTIONS">>]); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {List, Req2, State2} -> + case lists:member(Method, List) of + true when Method =:= <<"OPTIONS">> -> + next(Req2, State2#state{allowed_methods=List}, + fun malformed_request/2); + true -> + next(Req2, State2, fun malformed_request/2); + false -> + method_not_allowed(Req2, State2, List) + end + end. + +method_not_allowed(Req, State, []) -> + Req2 = cowboy_req:set_resp_header(<<"allow">>, <<>>, Req), + respond(Req2, State, 405); +method_not_allowed(Req, State, Methods) -> + << ", ", Allow/binary >> = << << ", ", M/binary >> || M <- Methods >>, + Req2 = cowboy_req:set_resp_header(<<"allow">>, Allow, Req), + respond(Req2, State, 405). + +malformed_request(Req, State) -> + expect(Req, State, malformed_request, false, fun is_authorized/2, 400). + +%% is_authorized/2 should return true or {false, WwwAuthenticateHeader}. +is_authorized(Req, State) -> + case call(Req, State, is_authorized) of + no_call -> + forbidden(Req, State); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {true, Req2, State2} -> + forbidden(Req2, State2); + {{false, AuthHead}, Req2, State2} -> + Req3 = cowboy_req:set_resp_header( + <<"www-authenticate">>, AuthHead, Req2), + respond(Req3, State2, 401) + end. + +forbidden(Req, State) -> + expect(Req, State, forbidden, false, fun rate_limited/2, 403). + +rate_limited(Req, State) -> + case call(Req, State, rate_limited) of + no_call -> + valid_content_headers(Req, State); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {false, Req2, State2} -> + valid_content_headers(Req2, State2); + {{true, RetryAfter0}, Req2, State2} -> + RetryAfter = if + is_integer(RetryAfter0), RetryAfter0 >= 0 -> + integer_to_binary(RetryAfter0); + is_tuple(RetryAfter0) -> + cowboy_clock:rfc1123(RetryAfter0) + end, + Req3 = cowboy_req:set_resp_header(<<"retry-after">>, RetryAfter, Req2), + respond(Req3, State2, 429) + end. + +valid_content_headers(Req, State) -> + expect(Req, State, valid_content_headers, true, + fun valid_entity_length/2, 501). + +valid_entity_length(Req, State) -> + expect(Req, State, valid_entity_length, true, fun options/2, 413). + +%% If you need to add additional headers to the response at this point, +%% you should do it directly in the options/2 call using set_resp_headers. +options(Req, State=#state{allowed_methods=Methods, method= <<"OPTIONS">>}) -> + case call(Req, State, options) of + no_call when Methods =:= [] -> + Req2 = cowboy_req:set_resp_header(<<"allow">>, <<>>, Req), + respond(Req2, State, 200); + no_call -> + << ", ", Allow/binary >> + = << << ", ", M/binary >> || M <- Methods >>, + Req2 = cowboy_req:set_resp_header(<<"allow">>, Allow, Req), + respond(Req2, State, 200); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {ok, Req2, State2} -> + respond(Req2, State2, 200) + end; +options(Req, State) -> + content_types_provided(Req, State). + +%% content_types_provided/2 should return a list of content types and their +%% associated callback function as a tuple: {{Type, SubType, Params}, Fun}. +%% Type and SubType are the media type as binary. Params is a list of +%% Key/Value tuple, with Key and Value a binary. Fun is the name of the +%% callback that will be used to return the content of the response. It is +%% given as an atom. +%% +%% An example of such return value would be: +%% {{<<"text">>, <<"html">>, []}, to_html} +%% +%% Note that it is also possible to return a binary content type that will +%% then be parsed by Cowboy. However note that while this may make your +%% resources a little more readable, this is a lot less efficient. +%% +%% An example of such return value would be: +%% {<<"text/html">>, to_html} +content_types_provided(Req, State) -> + case call(Req, State, content_types_provided) of + no_call -> + State2 = State#state{ + content_types_p=[{{<<"text">>, <<"html">>, '*'}, to_html}]}, + try cowboy_req:parse_header(<<"accept">>, Req) of + undefined -> + languages_provided( + Req#{media_type => {<<"text">>, <<"html">>, []}}, + State2#state{content_type_a={{<<"text">>, <<"html">>, []}, to_html}}); + Accept -> + choose_media_type(Req, State2, prioritize_accept(Accept)) + catch _:_ -> + respond(Req, State2, 400) + end; + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {[], Req2, State2} -> + not_acceptable(Req2, State2); + {CTP, Req2, State2} -> + CTP2 = [normalize_content_types(P) || P <- CTP], + State3 = State2#state{content_types_p=CTP2}, + try cowboy_req:parse_header(<<"accept">>, Req2) of + undefined -> + {PMT0, _Fun} = HeadCTP = hd(CTP2), + %% We replace the wildcard by an empty list of parameters. + PMT = case PMT0 of + {Type, SubType, '*'} -> {Type, SubType, []}; + _ -> PMT0 + end, + languages_provided( + Req2#{media_type => PMT}, + State3#state{content_type_a=HeadCTP}); + Accept -> + choose_media_type(Req2, State3, prioritize_accept(Accept)) + catch _:_ -> + respond(Req2, State3, 400) + end + end. + +normalize_content_types({ContentType, Callback}) + when is_binary(ContentType) -> + {cow_http_hd:parse_content_type(ContentType), Callback}; +normalize_content_types(Normalized) -> + Normalized. + +prioritize_accept(Accept) -> + lists:sort( + fun ({MediaTypeA, Quality, _AcceptParamsA}, + {MediaTypeB, Quality, _AcceptParamsB}) -> + %% Same quality, check precedence in more details. + prioritize_mediatype(MediaTypeA, MediaTypeB); + ({_MediaTypeA, QualityA, _AcceptParamsA}, + {_MediaTypeB, QualityB, _AcceptParamsB}) -> + %% Just compare the quality. + QualityA > QualityB + end, Accept). + +%% Media ranges can be overridden by more specific media ranges or +%% specific media types. If more than one media range applies to a given +%% type, the most specific reference has precedence. +%% +%% We always choose B over A when we can't decide between the two. +prioritize_mediatype({TypeA, SubTypeA, ParamsA}, {TypeB, SubTypeB, ParamsB}) -> + case TypeB of + TypeA -> + case SubTypeB of + SubTypeA -> length(ParamsA) > length(ParamsB); + <<"*">> -> true; + _Any -> false + end; + <<"*">> -> true; + _Any -> false + end. + +%% Ignoring the rare AcceptParams. Not sure what should be done about them. +choose_media_type(Req, State, []) -> + not_acceptable(Req, State); +choose_media_type(Req, State=#state{content_types_p=CTP}, + [MediaType|Tail]) -> + match_media_type(Req, State, Tail, CTP, MediaType). + +match_media_type(Req, State, Accept, [], _MediaType) -> + choose_media_type(Req, State, Accept); +match_media_type(Req, State, Accept, CTP, + MediaType = {{<<"*">>, <<"*">>, _Params_A}, _QA, _APA}) -> + match_media_type_params(Req, State, Accept, CTP, MediaType); +match_media_type(Req, State, Accept, + CTP = [{{Type, SubType_P, _PP}, _Fun}|_Tail], + MediaType = {{Type, SubType_A, _PA}, _QA, _APA}) + when SubType_P =:= SubType_A; SubType_A =:= <<"*">> -> + match_media_type_params(Req, State, Accept, CTP, MediaType); +match_media_type(Req, State, Accept, [_Any|Tail], MediaType) -> + match_media_type(Req, State, Accept, Tail, MediaType). + +match_media_type_params(Req, State, Accept, + [Provided = {{TP, STP, '*'}, _Fun}|Tail], + MediaType = {{TA, _STA, Params_A0}, _QA, _APA}) -> + case lists:keytake(<<"charset">>, 1, Params_A0) of + {value, {_, Charset}, Params_A} when TA =:= <<"text">> -> + %% When we match against a wildcard, the media type is text + %% and has a charset parameter, we call charsets_provided + %% and check that the charset is provided. If the callback + %% is not exported, we accept inconditionally but ignore + %% the given charset so as to not send a wrong value back. + case call(Req, State, charsets_provided) of + no_call -> + languages_provided(Req#{media_type => {TP, STP, Params_A0}}, + State#state{content_type_a=Provided}); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {CP, Req2, State2} -> + State3 = State2#state{charsets_p=CP}, + case lists:member(Charset, CP) of + false -> + match_media_type(Req2, State3, Accept, Tail, MediaType); + true -> + languages_provided(Req2#{media_type => {TP, STP, Params_A}}, + State3#state{content_type_a=Provided, + charset_a=Charset}) + end + end; + _ -> + languages_provided(Req#{media_type => {TP, STP, Params_A0}}, + State#state{content_type_a=Provided}) + end; +match_media_type_params(Req, State, Accept, + [Provided = {PMT = {TP, STP, Params_P0}, Fun}|Tail], + MediaType = {{_TA, _STA, Params_A}, _QA, _APA}) -> + case lists:sort(Params_P0) =:= lists:sort(Params_A) of + true when TP =:= <<"text">> -> + %% When a charset was provided explicitly in both the charset header + %% and the media types provided and the negotiation is successful, + %% we keep the charset and don't call charsets_provided. This only + %% applies to text media types, however. + {Charset, Params_P} = case lists:keytake(<<"charset">>, 1, Params_P0) of + false -> {undefined, Params_P0}; + {value, {_, Charset0}, Params_P1} -> {Charset0, Params_P1} + end, + languages_provided(Req#{media_type => {TP, STP, Params_P}}, + State#state{content_type_a={{TP, STP, Params_P}, Fun}, + charset_a=Charset}); + true -> + languages_provided(Req#{media_type => PMT}, + State#state{content_type_a=Provided}); + false -> + match_media_type(Req, State, Accept, Tail, MediaType) + end. + +%% languages_provided should return a list of binary values indicating +%% which languages are accepted by the resource. +%% +%% @todo I suppose we should also ask the resource if it wants to +%% set a language itself or if it wants it to be automatically chosen. +languages_provided(Req, State) -> + case call(Req, State, languages_provided) of + no_call -> + charsets_provided(Req, State); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {[], Req2, State2} -> + not_acceptable(Req2, State2); + {LP, Req2, State2} -> + State3 = State2#state{languages_p=LP}, + case cowboy_req:parse_header(<<"accept-language">>, Req2) of + undefined -> + set_language(Req2, State3#state{language_a=hd(LP)}); + AcceptLanguage -> + AcceptLanguage2 = prioritize_languages(AcceptLanguage), + choose_language(Req2, State3, AcceptLanguage2) + end + end. + +%% A language-range matches a language-tag if it exactly equals the tag, +%% or if it exactly equals a prefix of the tag such that the first tag +%% character following the prefix is "-". The special range "*", if +%% present in the Accept-Language field, matches every tag not matched +%% by any other range present in the Accept-Language field. +%% +%% @todo The last sentence probably means we should always put '*' +%% at the end of the list. +prioritize_languages(AcceptLanguages) -> + lists:sort( + fun ({_TagA, QualityA}, {_TagB, QualityB}) -> + QualityA > QualityB + end, AcceptLanguages). + +choose_language(Req, State, []) -> + not_acceptable(Req, State); +choose_language(Req, State=#state{languages_p=LP}, [Language|Tail]) -> + match_language(Req, State, Tail, LP, Language). + +match_language(Req, State, Accept, [], _Language) -> + choose_language(Req, State, Accept); +match_language(Req, State, _Accept, [Provided|_Tail], {'*', _Quality}) -> + set_language(Req, State#state{language_a=Provided}); +match_language(Req, State, _Accept, [Provided|_Tail], {Provided, _Quality}) -> + set_language(Req, State#state{language_a=Provided}); +match_language(Req, State, Accept, [Provided|Tail], + Language = {Tag, _Quality}) -> + Length = byte_size(Tag), + case Provided of + << Tag:Length/binary, $-, _Any/bits >> -> + set_language(Req, State#state{language_a=Provided}); + _Any -> + match_language(Req, State, Accept, Tail, Language) + end. + +set_language(Req, State=#state{language_a=Language}) -> + Req2 = cowboy_req:set_resp_header(<<"content-language">>, Language, Req), + charsets_provided(Req2#{language => Language}, State). + +%% charsets_provided should return a list of binary values indicating +%% which charsets are accepted by the resource. +%% +%% A charset may have been selected while negotiating the accept header. +%% There's no need to select one again. +charsets_provided(Req, State=#state{charset_a=Charset}) + when Charset =/= undefined -> + set_content_type(Req, State); +%% If charsets_p is defined, use it instead of calling charsets_provided +%% again. We also call this clause during normal execution to avoid +%% duplicating code. +charsets_provided(Req, State=#state{charsets_p=[]}) -> + not_acceptable(Req, State); +charsets_provided(Req, State=#state{charsets_p=CP}) + when CP =/= undefined -> + case cowboy_req:parse_header(<<"accept-charset">>, Req) of + undefined -> + set_content_type(Req, State#state{charset_a=hd(CP)}); + AcceptCharset0 -> + AcceptCharset = prioritize_charsets(AcceptCharset0), + choose_charset(Req, State, AcceptCharset) + end; +charsets_provided(Req, State) -> + case call(Req, State, charsets_provided) of + no_call -> + set_content_type(Req, State); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {CP, Req2, State2} -> + charsets_provided(Req2, State2#state{charsets_p=CP}) + end. + +prioritize_charsets(AcceptCharsets) -> + lists:sort( + fun ({_CharsetA, QualityA}, {_CharsetB, QualityB}) -> + QualityA > QualityB + end, AcceptCharsets). + +choose_charset(Req, State, []) -> + not_acceptable(Req, State); +%% A q-value of 0 means not acceptable. +choose_charset(Req, State, [{_, 0}|Tail]) -> + choose_charset(Req, State, Tail); +choose_charset(Req, State=#state{charsets_p=CP}, [Charset|Tail]) -> + match_charset(Req, State, Tail, CP, Charset). + +match_charset(Req, State, Accept, [], _Charset) -> + choose_charset(Req, State, Accept); +match_charset(Req, State, _Accept, [Provided|_], {<<"*">>, _}) -> + set_content_type(Req, State#state{charset_a=Provided}); +match_charset(Req, State, _Accept, [Provided|_], {Provided, _}) -> + set_content_type(Req, State#state{charset_a=Provided}); +match_charset(Req, State, Accept, [_|Tail], Charset) -> + match_charset(Req, State, Accept, Tail, Charset). + +set_content_type(Req, State=#state{ + content_type_a={{Type, SubType, Params}, _Fun}, + charset_a=Charset}) -> + ParamsBin = set_content_type_build_params(Params, []), + ContentType = [Type, <<"/">>, SubType, ParamsBin], + ContentType2 = case {Type, Charset} of + {<<"text">>, Charset} when Charset =/= undefined -> + [ContentType, <<"; charset=">>, Charset]; + _ -> + ContentType + end, + Req2 = cowboy_req:set_resp_header(<<"content-type">>, ContentType2, Req), + encodings_provided(Req2#{charset => Charset}, State). + +set_content_type_build_params('*', []) -> + <<>>; +set_content_type_build_params([], []) -> + <<>>; +set_content_type_build_params([], Acc) -> + lists:reverse(Acc); +set_content_type_build_params([{Attr, Value}|Tail], Acc) -> + set_content_type_build_params(Tail, [[Attr, <<"=">>, Value], <<";">>|Acc]). + +%% @todo Match for identity as we provide nothing else for now. +%% @todo Don't forget to set the Content-Encoding header when we reply a body +%% and the found encoding is something other than identity. +encodings_provided(Req, State) -> + ranges_provided(Req, State). + +not_acceptable(Req, State) -> + respond(Req, State, 406). + +ranges_provided(Req, State) -> + case call(Req, State, ranges_provided) of + no_call -> + variances(Req, State); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {[], Req2, State2} -> + Req3 = cowboy_req:set_resp_header(<<"accept-ranges">>, <<"none">>, Req2), + variances(Req3, State2#state{ranges_a=[]}); + {RP, Req2, State2} -> + <<", ", AcceptRanges/binary>> = <<<<", ", R/binary>> || {R, _} <- RP>>, + Req3 = cowboy_req:set_resp_header(<<"accept-ranges">>, AcceptRanges, Req2), + variances(Req3, State2#state{ranges_a=RP}) + end. + +%% variances/2 should return a list of headers that will be added +%% to the Vary response header. The Accept, Accept-Language, +%% Accept-Charset and Accept-Encoding headers do not need to be +%% specified. +%% +%% @todo Do Accept-Encoding too when we handle it. +%% @todo Does the order matter? +variances(Req, State=#state{content_types_p=CTP, + languages_p=LP, charsets_p=CP}) -> + Variances = case CTP of + [] -> []; + [_] -> []; + [_|_] -> [<<"accept">>] + end, + Variances2 = case LP of + [] -> Variances; + [_] -> Variances; + [_|_] -> [<<"accept-language">>|Variances] + end, + Variances3 = case CP of + undefined -> Variances2; + [] -> Variances2; + [_] -> Variances2; + [_|_] -> [<<"accept-charset">>|Variances2] + end, + try variances(Req, State, Variances3) of + {Variances4, Req2, State2} -> + case [[<<", ">>, V] || V <- Variances4] of + [] -> + resource_exists(Req2, State2); + [[<<", ">>, H]|Variances5] -> + Req3 = cowboy_req:set_resp_header( + <<"vary">>, [H|Variances5], Req2), + resource_exists(Req3, State2) + end + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +variances(Req, State, Variances) -> + case unsafe_call(Req, State, variances) of + no_call -> + {Variances, Req, State}; + {HandlerVariances, Req2, State2} -> + {Variances ++ HandlerVariances, Req2, State2} + end. + +resource_exists(Req, State) -> + expect(Req, State, resource_exists, true, + fun if_match_exists/2, fun if_match_must_not_exist/2). + +if_match_exists(Req, State) -> + State2 = State#state{exists=true}, + case cowboy_req:parse_header(<<"if-match">>, Req) of + undefined -> + if_unmodified_since_exists(Req, State2); + '*' -> + if_unmodified_since_exists(Req, State2); + ETagsList -> + if_match(Req, State2, ETagsList) + end. + +if_match(Req, State, EtagsList) -> + try generate_etag(Req, State) of + %% Strong Etag comparison: weak Etag never matches. + {{weak, _}, Req2, State2} -> + precondition_failed(Req2, State2); + {Etag, Req2, State2} -> + case lists:member(Etag, EtagsList) of + true -> if_none_match_exists(Req2, State2); + %% Etag may be `undefined' which cannot be a member. + false -> precondition_failed(Req2, State2) + end + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +if_match_must_not_exist(Req, State) -> + case cowboy_req:header(<<"if-match">>, Req) of + undefined -> is_put_to_missing_resource(Req, State); + _ -> precondition_failed(Req, State) + end. + +if_unmodified_since_exists(Req, State) -> + try cowboy_req:parse_header(<<"if-unmodified-since">>, Req) of + undefined -> + if_none_match_exists(Req, State); + IfUnmodifiedSince -> + if_unmodified_since(Req, State, IfUnmodifiedSince) + catch _:_ -> + if_none_match_exists(Req, State) + end. + +%% If LastModified is the atom 'no_call', we continue. +if_unmodified_since(Req, State, IfUnmodifiedSince) -> + try last_modified(Req, State) of + {LastModified, Req2, State2} -> + case LastModified > IfUnmodifiedSince of + true -> precondition_failed(Req2, State2); + false -> if_none_match_exists(Req2, State2) + end + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +if_none_match_exists(Req, State) -> + case cowboy_req:parse_header(<<"if-none-match">>, Req) of + undefined -> + if_modified_since_exists(Req, State); + '*' -> + precondition_is_head_get(Req, State); + EtagsList -> + if_none_match(Req, State, EtagsList) + end. + +if_none_match(Req, State, EtagsList) -> + try generate_etag(Req, State) of + {Etag, Req2, State2} -> + case Etag of + undefined -> + precondition_failed(Req2, State2); + Etag -> + case is_weak_match(Etag, EtagsList) of + true -> precondition_is_head_get(Req2, State2); + false -> method(Req2, State2) + end + end + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +%% Weak Etag comparison: only check the opaque tag. +is_weak_match(_, []) -> + false; +is_weak_match({_, Tag}, [{_, Tag}|_]) -> + true; +is_weak_match(Etag, [_|Tail]) -> + is_weak_match(Etag, Tail). + +precondition_is_head_get(Req, State=#state{method=Method}) + when Method =:= <<"HEAD">>; Method =:= <<"GET">> -> + not_modified(Req, State); +precondition_is_head_get(Req, State) -> + precondition_failed(Req, State). + +if_modified_since_exists(Req, State) -> + try cowboy_req:parse_header(<<"if-modified-since">>, Req) of + undefined -> + method(Req, State); + IfModifiedSince -> + if_modified_since_now(Req, State, IfModifiedSince) + catch _:_ -> + method(Req, State) + end. + +if_modified_since_now(Req, State, IfModifiedSince) -> + case IfModifiedSince > erlang:universaltime() of + true -> method(Req, State); + false -> if_modified_since(Req, State, IfModifiedSince) + end. + +if_modified_since(Req, State, IfModifiedSince) -> + try last_modified(Req, State) of + {undefined, Req2, State2} -> + method(Req2, State2); + {LastModified, Req2, State2} -> + case LastModified > IfModifiedSince of + true -> method(Req2, State2); + false -> not_modified(Req2, State2) + end + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +not_modified(Req, State) -> + Req2 = cowboy_req:delete_resp_header(<<"content-type">>, Req), + try set_resp_etag(Req2, State) of + {Req3, State2} -> + try set_resp_expires(Req3, State2) of + {Req4, State3} -> + respond(Req4, State3, 304) + catch Class:Reason:Stacktrace -> + error_terminate(Req, State2, Class, Reason, Stacktrace) + end + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +precondition_failed(Req, State) -> + respond(Req, State, 412). + +is_put_to_missing_resource(Req, State=#state{method= <<"PUT">>}) -> + moved_permanently(Req, State, fun is_conflict/2); +is_put_to_missing_resource(Req, State) -> + previously_existed(Req, State). + +%% moved_permanently/2 should return either false or {true, Location} +%% with Location the full new URI of the resource. +moved_permanently(Req, State, OnFalse) -> + case call(Req, State, moved_permanently) of + {{true, Location}, Req2, State2} -> + Req3 = cowboy_req:set_resp_header( + <<"location">>, Location, Req2), + respond(Req3, State2, 301); + {false, Req2, State2} -> + OnFalse(Req2, State2); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + no_call -> + OnFalse(Req, State) + end. + +previously_existed(Req, State) -> + expect(Req, State, previously_existed, false, + fun (R, S) -> is_post_to_missing_resource(R, S, 404) end, + fun (R, S) -> moved_permanently(R, S, fun moved_temporarily/2) end). + +%% moved_temporarily/2 should return either false or {true, Location} +%% with Location the full new URI of the resource. +moved_temporarily(Req, State) -> + case call(Req, State, moved_temporarily) of + {{true, Location}, Req2, State2} -> + Req3 = cowboy_req:set_resp_header( + <<"location">>, Location, Req2), + respond(Req3, State2, 307); + {false, Req2, State2} -> + is_post_to_missing_resource(Req2, State2, 410); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + no_call -> + is_post_to_missing_resource(Req, State, 410) + end. + +is_post_to_missing_resource(Req, State=#state{method= <<"POST">>}, OnFalse) -> + allow_missing_post(Req, State, OnFalse); +is_post_to_missing_resource(Req, State, OnFalse) -> + respond(Req, State, OnFalse). + +allow_missing_post(Req, State, OnFalse) -> + expect(Req, State, allow_missing_post, true, fun accept_resource/2, OnFalse). + +method(Req, State=#state{method= <<"DELETE">>}) -> + delete_resource(Req, State); +method(Req, State=#state{method= <<"PUT">>}) -> + is_conflict(Req, State); +method(Req, State=#state{method=Method}) + when Method =:= <<"POST">>; Method =:= <<"PATCH">> -> + accept_resource(Req, State); +method(Req, State=#state{method=Method}) + when Method =:= <<"GET">>; Method =:= <<"HEAD">> -> + set_resp_body_etag(Req, State); +method(Req, State) -> + multiple_choices(Req, State). + +%% delete_resource/2 should start deleting the resource and return. +delete_resource(Req, State) -> + expect(Req, State, delete_resource, false, 500, fun delete_completed/2). + +%% delete_completed/2 indicates whether the resource has been deleted yet. +delete_completed(Req, State) -> + expect(Req, State, delete_completed, true, fun has_resp_body/2, 202). + +is_conflict(Req, State) -> + expect(Req, State, is_conflict, false, fun accept_resource/2, 409). + +%% content_types_accepted should return a list of media types and their +%% associated callback functions in the same format as content_types_provided. +%% +%% The callback will then be called and is expected to process the content +%% pushed to the resource in the request body. +%% +%% content_types_accepted SHOULD return a different list +%% for each HTTP method. +accept_resource(Req, State) -> + case call(Req, State, content_types_accepted) of + no_call -> + respond(Req, State, 415); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {CTA, Req2, State2} -> + CTA2 = [normalize_content_types(P) || P <- CTA], + try cowboy_req:parse_header(<<"content-type">>, Req2) of + %% We do not match against the boundary parameter for multipart. + {Type = <<"multipart">>, SubType, Params} -> + ContentType = {Type, SubType, lists:keydelete(<<"boundary">>, 1, Params)}, + choose_content_type(Req2, State2, ContentType, CTA2); + ContentType -> + choose_content_type(Req2, State2, ContentType, CTA2) + catch _:_ -> + respond(Req2, State2, 415) + end + end. + +%% The special content type '*' will always match. It can be used as a +%% catch-all content type for accepting any kind of request content. +%% Note that because it will always match, it should be the last of the +%% list of content types, otherwise it'll shadow the ones following. +choose_content_type(Req, State, _ContentType, []) -> + respond(Req, State, 415); +choose_content_type(Req, State, ContentType, [{Accepted, Fun}|_Tail]) + when Accepted =:= '*'; Accepted =:= ContentType -> + process_content_type(Req, State, Fun); +%% The special parameter '*' will always match any kind of content type +%% parameters. +%% Note that because it will always match, it should be the last of the +%% list for specific content type, otherwise it'll shadow the ones following. +choose_content_type(Req, State, {Type, SubType, Param}, + [{{Type, SubType, AcceptedParam}, Fun}|_Tail]) + when AcceptedParam =:= '*'; AcceptedParam =:= Param -> + process_content_type(Req, State, Fun); +choose_content_type(Req, State, ContentType, [_Any|Tail]) -> + choose_content_type(Req, State, ContentType, Tail). + +process_content_type(Req, State=#state{method=Method, exists=Exists}, Fun) -> + try case call(Req, State, Fun) of + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {true, Req2, State2} when Exists -> + next(Req2, State2, fun has_resp_body/2); + {true, Req2, State2} -> + next(Req2, State2, fun maybe_created/2); + {false, Req2, State2} -> + respond(Req2, State2, 400); + {{created, ResURL}, Req2, State2} when Method =:= <<"POST">> -> + Req3 = cowboy_req:set_resp_header( + <<"location">>, ResURL, Req2), + respond(Req3, State2, 201); + {{see_other, ResURL}, Req2, State2} when Method =:= <<"POST">> -> + Req3 = cowboy_req:set_resp_header( + <<"location">>, ResURL, Req2), + respond(Req3, State2, 303); + {{true, ResURL}, Req2, State2} when Method =:= <<"POST">> -> + Req3 = cowboy_req:set_resp_header( + <<"location">>, ResURL, Req2), + if + Exists -> respond(Req3, State2, 303); + true -> respond(Req3, State2, 201) + end + end catch Class:Reason = {case_clause, no_call}:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +%% If PUT was used then the resource has been created at the current URL. +%% Otherwise, if a location header has been set then the resource has been +%% created at a new URL. If not, send a 200 or 204 as expected from a +%% POST or PATCH request. +maybe_created(Req, State=#state{method= <<"PUT">>}) -> + respond(Req, State, 201); +maybe_created(Req, State) -> + case cowboy_req:has_resp_header(<<"location">>, Req) of + true -> respond(Req, State, 201); + false -> has_resp_body(Req, State) + end. + +has_resp_body(Req, State) -> + case cowboy_req:has_resp_body(Req) of + true -> multiple_choices(Req, State); + false -> respond(Req, State, 204) + end. + +%% Set the Etag header if any for the response provided. +set_resp_body_etag(Req, State) -> + try set_resp_etag(Req, State) of + {Req2, State2} -> + set_resp_body_last_modified(Req2, State2) + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +%% Set the Last-Modified header if any for the response provided. +set_resp_body_last_modified(Req, State) -> + try last_modified(Req, State) of + {LastModified, Req2, State2} -> + case LastModified of + LastModified when is_atom(LastModified) -> + set_resp_body_expires(Req2, State2); + LastModified -> + LastModifiedBin = cowboy_clock:rfc1123(LastModified), + Req3 = cowboy_req:set_resp_header( + <<"last-modified">>, LastModifiedBin, Req2), + set_resp_body_expires(Req3, State2) + end + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +%% Set the Expires header if any for the response provided. +set_resp_body_expires(Req, State) -> + try set_resp_expires(Req, State) of + {Req2, State2} -> + if_range(Req2, State2) + catch Class:Reason:Stacktrace -> + error_terminate(Req, State, Class, Reason, Stacktrace) + end. + +%% When both the if-range and range headers are set, we perform +%% a strong comparison. If it fails, we send a full response. +if_range(Req=#{headers := #{<<"if-range">> := _, <<"range">> := _}}, + State=#state{etag=Etag}) -> + try cowboy_req:parse_header(<<"if-range">>, Req) of + %% Strong etag comparison is an exact match with the generate_etag result. + Etag={strong, _} -> + range(Req, State); + %% We cannot do a strong date comparison because we have + %% no way of knowing whether the representation changed + %% twice during the second covered by the presented + %% validator. (RFC7232 2.2.2) + _ -> + set_resp_body(Req, State) + catch _:_ -> + set_resp_body(Req, State) + end; +if_range(Req, State) -> + range(Req, State). + +range(Req, State=#state{ranges_a=[]}) -> + set_resp_body(Req, State); +range(Req, State) -> + try cowboy_req:parse_header(<<"range">>, Req) of + undefined -> + set_resp_body(Req, State); + %% @todo Maybe change parse_header to return <<"bytes">> in 3.0. + {bytes, BytesRange} -> + choose_range(Req, State, {<<"bytes">>, BytesRange}); + Range -> + choose_range(Req, State, Range) + catch _:_ -> + %% We send a 416 response back when we can't parse the + %% range header at all. I'm not sure this is the right + %% way to go but at least this can help clients identify + %% what went wrong when their range requests never work. + range_not_satisfiable(Req, State, undefined) + end. + +choose_range(Req, State=#state{ranges_a=RangesAccepted}, Range={RangeUnit, _}) -> + case lists:keyfind(RangeUnit, 1, RangesAccepted) of + {_, Callback} -> + %% We pass the selected range onward in the Req. + range_satisfiable(Req#{range => Range}, State, Callback); + false -> + set_resp_body(Req, State) + end. + +range_satisfiable(Req, State, Callback) -> + case call(Req, State, range_satisfiable) of + no_call -> + set_ranged_body(Req, State, Callback); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {true, Req2, State2} -> + set_ranged_body(Req2, State2, Callback); + {false, Req2, State2} -> + range_not_satisfiable(Req2, State2, undefined); + {{false, Int}, Req2, State2} when is_integer(Int) -> + range_not_satisfiable(Req2, State2, [<<"*/">>, integer_to_binary(Int)]); + {{false, Iodata}, Req2, State2} when is_binary(Iodata); is_list(Iodata) -> + range_not_satisfiable(Req2, State2, Iodata) + end. + +%% When the callback selected is 'auto' and the range unit +%% is bytes, we call the normal provide callback and split +%% the content automatically. +set_ranged_body(Req=#{range := {<<"bytes">>, _}}, State, auto) -> + set_ranged_body_auto(Req, State); +set_ranged_body(Req, State, Callback) -> + set_ranged_body_callback(Req, State, Callback). + +set_ranged_body_auto(Req, State=#state{handler=Handler, content_type_a={_, Callback}}) -> + try case call(Req, State, Callback) of + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {Body, Req2, State2} -> + maybe_set_ranged_body_auto(Req2, State2, Body) + end catch Class:{case_clause, no_call}:Stacktrace -> + error_terminate(Req, State, Class, {error, {missing_callback, {Handler, Callback, 2}}, + 'A callback specified in content_types_provided/2 is not exported.'}, + Stacktrace) + end. + +maybe_set_ranged_body_auto(Req=#{range := {_, Ranges}}, State, Body) -> + Size = case Body of + {sendfile, _, Bytes, _} -> Bytes; + _ -> iolist_size(Body) + end, + Checks = [case Range of + {From, infinity} -> From < Size; + {From, To} -> (From < Size) andalso (From =< To) andalso (To =< Size); + Neg -> (Neg =/= 0) andalso (-Neg < Size) + end || Range <- Ranges], + case lists:usort(Checks) of + [true] -> set_ranged_body_auto(Req, State, Body); + _ -> range_not_satisfiable(Req, State, [<<"*/">>, integer_to_binary(Size)]) + end. + +%% We might also want to have some checks about range order, +%% number of ranges, and perhaps also join ranges that are +%% too close into one contiguous range. Some of these can +%% be done before calling the ProvideCallback. + +set_ranged_body_auto(Req=#{range := {_, Ranges}}, State, Body) -> + Parts = [ranged_partition(Range, Body) || Range <- Ranges], + case Parts of + [OnePart] -> set_one_ranged_body(Req, State, OnePart); + _ when is_tuple(Body) -> send_multipart_ranged_body(Req, State, Parts); + _ -> set_multipart_ranged_body(Req, State, Parts) + end. + +ranged_partition(Range, {sendfile, Offset0, Bytes0, Path}) -> + {From, To, Offset, Bytes} = case Range of + {From0, infinity} -> {From0, Bytes0 - 1, Offset0 + From0, Bytes0 - From0}; + {From0, To0} -> {From0, To0, Offset0 + From0, 1 + To0 - From0}; + Neg -> {Bytes0 + Neg, Bytes0 - 1, Offset0 + Bytes0 + Neg, -Neg} + end, + {{From, To, Bytes0}, {sendfile, Offset, Bytes, Path}}; +ranged_partition(Range, Data0) -> + Total = iolist_size(Data0), + {From, To, Data} = case Range of + {From0, infinity} -> + {_, Data1} = cow_iolists:split(From0, Data0), + {From0, Total - 1, Data1}; + {From0, To0} -> + {_, Data1} = cow_iolists:split(From0, Data0), + {Data2, _} = cow_iolists:split(To0 - From0 + 1, Data1), + {From0, To0, Data2}; + Neg -> + {_, Data1} = cow_iolists:split(Total + Neg, Data0), + {Total + Neg, Total - 1, Data1} + end, + {{From, To, Total}, Data}. + +-ifdef(TEST). +ranged_partition_test_() -> + Tests = [ + %% Sendfile with open-ended range. + {{0, infinity}, {sendfile, 0, 12, "t"}, {{0, 11, 12}, {sendfile, 0, 12, "t"}}}, + {{6, infinity}, {sendfile, 0, 12, "t"}, {{6, 11, 12}, {sendfile, 6, 6, "t"}}}, + {{11, infinity}, {sendfile, 0, 12, "t"}, {{11, 11, 12}, {sendfile, 11, 1, "t"}}}, + %% Sendfile with open-ended range. Sendfile tuple has an offset originally. + {{0, infinity}, {sendfile, 3, 12, "t"}, {{0, 11, 12}, {sendfile, 3, 12, "t"}}}, + {{6, infinity}, {sendfile, 3, 12, "t"}, {{6, 11, 12}, {sendfile, 9, 6, "t"}}}, + {{11, infinity}, {sendfile, 3, 12, "t"}, {{11, 11, 12}, {sendfile, 14, 1, "t"}}}, + %% Sendfile with a specific range. + {{0, 11}, {sendfile, 0, 12, "t"}, {{0, 11, 12}, {sendfile, 0, 12, "t"}}}, + {{6, 11}, {sendfile, 0, 12, "t"}, {{6, 11, 12}, {sendfile, 6, 6, "t"}}}, + {{11, 11}, {sendfile, 0, 12, "t"}, {{11, 11, 12}, {sendfile, 11, 1, "t"}}}, + {{1, 10}, {sendfile, 0, 12, "t"}, {{1, 10, 12}, {sendfile, 1, 10, "t"}}}, + %% Sendfile with a specific range. Sendfile tuple has an offset originally. + {{0, 11}, {sendfile, 3, 12, "t"}, {{0, 11, 12}, {sendfile, 3, 12, "t"}}}, + {{6, 11}, {sendfile, 3, 12, "t"}, {{6, 11, 12}, {sendfile, 9, 6, "t"}}}, + {{11, 11}, {sendfile, 3, 12, "t"}, {{11, 11, 12}, {sendfile, 14, 1, "t"}}}, + {{1, 10}, {sendfile, 3, 12, "t"}, {{1, 10, 12}, {sendfile, 4, 10, "t"}}}, + %% Sendfile with negative range. + {-12, {sendfile, 0, 12, "t"}, {{0, 11, 12}, {sendfile, 0, 12, "t"}}}, + {-6, {sendfile, 0, 12, "t"}, {{6, 11, 12}, {sendfile, 6, 6, "t"}}}, + {-1, {sendfile, 0, 12, "t"}, {{11, 11, 12}, {sendfile, 11, 1, "t"}}}, + %% Sendfile with negative range. Sendfile tuple has an offset originally. + {-12, {sendfile, 3, 12, "t"}, {{0, 11, 12}, {sendfile, 3, 12, "t"}}}, + {-6, {sendfile, 3, 12, "t"}, {{6, 11, 12}, {sendfile, 9, 6, "t"}}}, + {-1, {sendfile, 3, 12, "t"}, {{11, 11, 12}, {sendfile, 14, 1, "t"}}}, + %% Iodata with open-ended range. + {{0, infinity}, <<"Hello world!">>, {{0, 11, 12}, <<"Hello world!">>}}, + {{6, infinity}, <<"Hello world!">>, {{6, 11, 12}, <<"world!">>}}, + {{11, infinity}, <<"Hello world!">>, {{11, 11, 12}, <<"!">>}}, + %% Iodata with a specific range. The resulting data is + %% wrapped in a list because of how cow_iolists:split/2 works. + {{0, 11}, <<"Hello world!">>, {{0, 11, 12}, [<<"Hello world!">>]}}, + {{6, 11}, <<"Hello world!">>, {{6, 11, 12}, [<<"world!">>]}}, + {{11, 11}, <<"Hello world!">>, {{11, 11, 12}, [<<"!">>]}}, + {{1, 10}, <<"Hello world!">>, {{1, 10, 12}, [<<"ello world">>]}}, + %% Iodata with negative range. + {-12, <<"Hello world!">>, {{0, 11, 12}, <<"Hello world!">>}}, + {-6, <<"Hello world!">>, {{6, 11, 12}, <<"world!">>}}, + {-1, <<"Hello world!">>, {{11, 11, 12}, <<"!">>}} + ], + [{iolist_to_binary(io_lib:format("range ~p data ~p", [VR, VD])), + fun() -> R = ranged_partition(VR, VD) end} || {VR, VD, R} <- Tests]. +-endif. + +set_ranged_body_callback(Req, State=#state{handler=Handler}, Callback) -> + try case call(Req, State, Callback) of + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + %% When we receive a single range, we send it directly. + {[OneRange], Req2, State2} -> + set_one_ranged_body(Req2, State2, OneRange); + %% When we receive multiple ranges we have to send them as multipart/byteranges. + %% This also applies to non-bytes units. (RFC7233 A) If users don't want to use + %% this for non-bytes units they can always return a single range with a binary + %% content-range information. + {Ranges, Req2, State2} when length(Ranges) > 1 -> + %% We have to check whether there are sendfile tuples in the + %% ranges to be sent. If there are we must use stream_reply. + HasSendfile = [] =/= [true || {_, {sendfile, _, _, _}} <- Ranges], + case HasSendfile of + true -> send_multipart_ranged_body(Req2, State2, Ranges); + false -> set_multipart_ranged_body(Req2, State2, Ranges) + end + end catch Class:{case_clause, no_call}:Stacktrace -> + error_terminate(Req, State, Class, {error, {missing_callback, {Handler, Callback, 2}}, + 'A callback specified in ranges_provided/2 is not exported.'}, + Stacktrace) + end. + +set_one_ranged_body(Req0, State, OneRange) -> + {ContentRange, Body} = prepare_range(Req0, OneRange), + Req1 = cowboy_req:set_resp_header(<<"content-range">>, ContentRange, Req0), + Req = cowboy_req:set_resp_body(Body, Req1), + respond(Req, State, 206). + +set_multipart_ranged_body(Req, State, [FirstRange|MoreRanges]) -> + Boundary = cow_multipart:boundary(), + ContentType = cowboy_req:resp_header(<<"content-type">>, Req), + {FirstContentRange, FirstPartBody} = prepare_range(Req, FirstRange), + FirstPartHead = cow_multipart:first_part(Boundary, [ + {<<"content-type">>, ContentType}, + {<<"content-range">>, FirstContentRange} + ]), + MoreParts = [begin + {NextContentRange, NextPartBody} = prepare_range(Req, NextRange), + NextPartHead = cow_multipart:part(Boundary, [ + {<<"content-type">>, ContentType}, + {<<"content-range">>, NextContentRange} + ]), + [NextPartHead, NextPartBody] + end || NextRange <- MoreRanges], + Body = [FirstPartHead, FirstPartBody, MoreParts, cow_multipart:close(Boundary)], + Req2 = cowboy_req:set_resp_header(<<"content-type">>, + [<<"multipart/byteranges; boundary=">>, Boundary], Req), + Req3 = cowboy_req:set_resp_body(Body, Req2), + respond(Req3, State, 206). + +%% Similar to set_multipart_ranged_body except we have to stream +%% the data because the parts contain sendfile tuples. +send_multipart_ranged_body(Req, State, [FirstRange|MoreRanges]) -> + Boundary = cow_multipart:boundary(), + ContentType = cowboy_req:resp_header(<<"content-type">>, Req), + Req2 = cowboy_req:set_resp_header(<<"content-type">>, + [<<"multipart/byteranges; boundary=">>, Boundary], Req), + Req3 = cowboy_req:stream_reply(206, Req2), + {FirstContentRange, FirstPartBody} = prepare_range(Req, FirstRange), + FirstPartHead = cow_multipart:first_part(Boundary, [ + {<<"content-type">>, ContentType}, + {<<"content-range">>, FirstContentRange} + ]), + cowboy_req:stream_body(FirstPartHead, nofin, Req3), + cowboy_req:stream_body(FirstPartBody, nofin, Req3), + _ = [begin + {NextContentRange, NextPartBody} = prepare_range(Req, NextRange), + NextPartHead = cow_multipart:part(Boundary, [ + {<<"content-type">>, ContentType}, + {<<"content-range">>, NextContentRange} + ]), + cowboy_req:stream_body(NextPartHead, nofin, Req3), + cowboy_req:stream_body(NextPartBody, nofin, Req3), + [NextPartHead, NextPartBody] + end || NextRange <- MoreRanges], + cowboy_req:stream_body(cow_multipart:close(Boundary), fin, Req3), + terminate(Req3, State). + +prepare_range(#{range := {RangeUnit, _}}, {{From, To, Total0}, Body}) -> + Total = case Total0 of + '*' -> <<"*">>; + _ -> integer_to_binary(Total0) + end, + ContentRange = [RangeUnit, $\s, integer_to_binary(From), + $-, integer_to_binary(To), $/, Total], + {ContentRange, Body}; +prepare_range(#{range := {RangeUnit, _}}, {RangeData, Body}) -> + {[RangeUnit, $\s, RangeData], Body}. + +%% We send the content-range header when we can on error. +range_not_satisfiable(Req, State, undefined) -> + respond(Req, State, 416); +range_not_satisfiable(Req0=#{range := {RangeUnit, _}}, State, RangeData) -> + Req = cowboy_req:set_resp_header(<<"content-range">>, + [RangeUnit, $\s, RangeData], Req0), + respond(Req, State, 416). + +%% Set the response headers and call the callback found using +%% content_types_provided/2 to obtain the request body and add +%% it to the response. +set_resp_body(Req, State=#state{handler=Handler, content_type_a={_, Callback}}) -> + try case call(Req, State, Callback) of + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {Body, Req2, State2} -> + Req3 = cowboy_req:set_resp_body(Body, Req2), + multiple_choices(Req3, State2) + end catch Class:{case_clause, no_call}:Stacktrace -> + error_terminate(Req, State, Class, {error, {missing_callback, {Handler, Callback, 2}}, + 'A callback specified in content_types_provided/2 is not exported.'}, + Stacktrace) + end. + +multiple_choices(Req, State) -> + expect(Req, State, multiple_choices, false, 200, 300). + +%% Response utility functions. + +set_resp_etag(Req, State) -> + {Etag, Req2, State2} = generate_etag(Req, State), + case Etag of + undefined -> + {Req2, State2}; + Etag -> + Req3 = cowboy_req:set_resp_header( + <<"etag">>, encode_etag(Etag), Req2), + {Req3, State2} + end. + +-spec encode_etag({strong | weak, binary()}) -> iolist(). +encode_etag({strong, Etag}) -> [$",Etag,$"]; +encode_etag({weak, Etag}) -> ["W/\"",Etag,$"]. + +set_resp_expires(Req, State) -> + {Expires, Req2, State2} = expires(Req, State), + case Expires of + Expires when is_atom(Expires) -> + {Req2, State2}; + Expires when is_binary(Expires) -> + Req3 = cowboy_req:set_resp_header( + <<"expires">>, Expires, Req2), + {Req3, State2}; + Expires -> + ExpiresBin = cowboy_clock:rfc1123(Expires), + Req3 = cowboy_req:set_resp_header( + <<"expires">>, ExpiresBin, Req2), + {Req3, State2} + end. + +%% Info retrieval. No logic. + +generate_etag(Req, State=#state{etag=no_call}) -> + {undefined, Req, State}; +generate_etag(Req, State=#state{etag=undefined}) -> + case unsafe_call(Req, State, generate_etag) of + no_call -> + {undefined, Req, State#state{etag=no_call}}; + {Etag, Req2, State2} when is_binary(Etag) -> + Etag2 = cow_http_hd:parse_etag(Etag), + {Etag2, Req2, State2#state{etag=Etag2}}; + {Etag, Req2, State2} -> + {Etag, Req2, State2#state{etag=Etag}} + end; +generate_etag(Req, State=#state{etag=Etag}) -> + {Etag, Req, State}. + +last_modified(Req, State=#state{last_modified=no_call}) -> + {undefined, Req, State}; +last_modified(Req, State=#state{last_modified=undefined}) -> + case unsafe_call(Req, State, last_modified) of + no_call -> + {undefined, Req, State#state{last_modified=no_call}}; + {LastModified, Req2, State2} -> + {LastModified, Req2, State2#state{last_modified=LastModified}} + end; +last_modified(Req, State=#state{last_modified=LastModified}) -> + {LastModified, Req, State}. + +expires(Req, State=#state{expires=no_call}) -> + {undefined, Req, State}; +expires(Req, State=#state{expires=undefined}) -> + case unsafe_call(Req, State, expires) of + no_call -> + {undefined, Req, State#state{expires=no_call}}; + {Expires, Req2, State2} -> + {Expires, Req2, State2#state{expires=Expires}} + end; +expires(Req, State=#state{expires=Expires}) -> + {Expires, Req, State}. + +%% REST primitives. + +expect(Req, State, Callback, Expected, OnTrue, OnFalse) -> + case call(Req, State, Callback) of + no_call -> + next(Req, State, OnTrue); + {stop, Req2, State2} -> + terminate(Req2, State2); + {Switch, Req2, State2} when element(1, Switch) =:= switch_handler -> + switch_handler(Switch, Req2, State2); + {Expected, Req2, State2} -> + next(Req2, State2, OnTrue); + {_Unexpected, Req2, State2} -> + next(Req2, State2, OnFalse) + end. + +call(Req0, State=#state{handler=Handler, + handler_state=HandlerState0}, Callback) -> + case erlang:function_exported(Handler, Callback, 2) of + true -> + try Handler:Callback(Req0, HandlerState0) of + no_call -> + no_call; + {Result, Req, HandlerState} -> + {Result, Req, State#state{handler_state=HandlerState}} + catch Class:Reason:Stacktrace -> + error_terminate(Req0, State, Class, Reason, Stacktrace) + end; + false -> + no_call + end. + +unsafe_call(Req0, State=#state{handler=Handler, + handler_state=HandlerState0}, Callback) -> + case erlang:function_exported(Handler, Callback, 2) of + false -> + no_call; + true -> + case Handler:Callback(Req0, HandlerState0) of + no_call -> + no_call; + {Result, Req, HandlerState} -> + {Result, Req, State#state{handler_state=HandlerState}} + end + end. + +next(Req, State, Next) when is_function(Next) -> + Next(Req, State); +next(Req, State, StatusCode) when is_integer(StatusCode) -> + respond(Req, State, StatusCode). + +respond(Req0, State, StatusCode) -> + %% We remove the content-type header when there is no body, + %% except when the status code is 200 because it might have + %% been intended (for example sending an empty file). + Req = case cowboy_req:has_resp_body(Req0) of + true when StatusCode =:= 200 -> Req0; + true -> Req0; + false -> cowboy_req:delete_resp_header(<<"content-type">>, Req0) + end, + terminate(cowboy_req:reply(StatusCode, Req), State). + +switch_handler({switch_handler, Mod}, Req, #state{handler_state=HandlerState}) -> + {Mod, Req, HandlerState}; +switch_handler({switch_handler, Mod, Opts}, Req, #state{handler_state=HandlerState}) -> + {Mod, Req, HandlerState, Opts}. + +-spec error_terminate(cowboy_req:req(), #state{}, atom(), any(), any()) -> no_return(). +error_terminate(Req, #state{handler=Handler, handler_state=HandlerState}, Class, Reason, Stacktrace) -> + cowboy_handler:terminate({crash, Class, Reason}, Req, HandlerState, Handler), + erlang:raise(Class, Reason, Stacktrace). + +terminate(Req, #state{handler=Handler, handler_state=HandlerState}) -> + Result = cowboy_handler:terminate(normal, Req, HandlerState, Handler), + {ok, Req, Result}. diff --git a/deps/cowboy/src/cowboy_router.erl b/deps/cowboy/src/cowboy_router.erl new file mode 100644 index 0000000..0b7fe41 --- /dev/null +++ b/deps/cowboy/src/cowboy_router.erl @@ -0,0 +1,603 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% Routing middleware. +%% +%% Resolve the handler to be used for the request based on the +%% routing information found in the dispatch environment value. +%% When found, the handler module and associated data are added to +%% the environment as the handler and handler_opts values +%% respectively. +%% +%% If the route cannot be found, processing stops with either +%% a 400 or a 404 reply. +-module(cowboy_router). +-behaviour(cowboy_middleware). + +-export([compile/1]). +-export([execute/2]). + +-type bindings() :: #{atom() => any()}. +-type tokens() :: [binary()]. +-export_type([bindings/0]). +-export_type([tokens/0]). + +-type route_match() :: '_' | iodata(). +-type route_path() :: {Path::route_match(), Handler::module(), Opts::any()} + | {Path::route_match(), cowboy:fields(), Handler::module(), Opts::any()}. +-type route_rule() :: {Host::route_match(), Paths::[route_path()]} + | {Host::route_match(), cowboy:fields(), Paths::[route_path()]}. +-type routes() :: [route_rule()]. +-export_type([routes/0]). + +-type dispatch_match() :: '_' | <<_:8>> | [binary() | '_' | '...' | atom()]. +-type dispatch_path() :: {dispatch_match(), cowboy:fields(), module(), any()}. +-type dispatch_rule() :: {Host::dispatch_match(), cowboy:fields(), Paths::[dispatch_path()]}. +-opaque dispatch_rules() :: [dispatch_rule()]. +-export_type([dispatch_rules/0]). + +-spec compile(routes()) -> dispatch_rules(). +compile(Routes) -> + compile(Routes, []). + +compile([], Acc) -> + lists:reverse(Acc); +compile([{Host, Paths}|Tail], Acc) -> + compile([{Host, [], Paths}|Tail], Acc); +compile([{HostMatch, Fields, Paths}|Tail], Acc) -> + HostRules = case HostMatch of + '_' -> '_'; + _ -> compile_host(HostMatch) + end, + PathRules = compile_paths(Paths, []), + Hosts = case HostRules of + '_' -> [{'_', Fields, PathRules}]; + _ -> [{R, Fields, PathRules} || R <- HostRules] + end, + compile(Tail, Hosts ++ Acc). + +compile_host(HostMatch) when is_list(HostMatch) -> + compile_host(list_to_binary(HostMatch)); +compile_host(HostMatch) when is_binary(HostMatch) -> + compile_rules(HostMatch, $., [], [], <<>>). + +compile_paths([], Acc) -> + lists:reverse(Acc); +compile_paths([{PathMatch, Handler, Opts}|Tail], Acc) -> + compile_paths([{PathMatch, [], Handler, Opts}|Tail], Acc); +compile_paths([{PathMatch, Fields, Handler, Opts}|Tail], Acc) + when is_list(PathMatch) -> + compile_paths([{iolist_to_binary(PathMatch), + Fields, Handler, Opts}|Tail], Acc); +compile_paths([{'_', Fields, Handler, Opts}|Tail], Acc) -> + compile_paths(Tail, [{'_', Fields, Handler, Opts}] ++ Acc); +compile_paths([{<<"*">>, Fields, Handler, Opts}|Tail], Acc) -> + compile_paths(Tail, [{<<"*">>, Fields, Handler, Opts}|Acc]); +compile_paths([{<< $/, PathMatch/bits >>, Fields, Handler, Opts}|Tail], + Acc) -> + PathRules = compile_rules(PathMatch, $/, [], [], <<>>), + Paths = [{lists:reverse(R), Fields, Handler, Opts} || R <- PathRules], + compile_paths(Tail, Paths ++ Acc); +compile_paths([{PathMatch, _, _, _}|_], _) -> + error({badarg, "The following route MUST begin with a slash: " + ++ binary_to_list(PathMatch)}). + +compile_rules(<<>>, _, Segments, Rules, <<>>) -> + [Segments|Rules]; +compile_rules(<<>>, _, Segments, Rules, Acc) -> + [[Acc|Segments]|Rules]; +compile_rules(<< S, Rest/bits >>, S, Segments, Rules, <<>>) -> + compile_rules(Rest, S, Segments, Rules, <<>>); +compile_rules(<< S, Rest/bits >>, S, Segments, Rules, Acc) -> + compile_rules(Rest, S, [Acc|Segments], Rules, <<>>); +%% Colon on path segment start is special, otherwise allow. +compile_rules(<< $:, Rest/bits >>, S, Segments, Rules, <<>>) -> + {NameBin, Rest2} = compile_binding(Rest, S, <<>>), + Name = binary_to_atom(NameBin, utf8), + compile_rules(Rest2, S, Segments, Rules, Name); +compile_rules(<< $[, $., $., $., $], Rest/bits >>, S, Segments, Rules, Acc) + when Acc =:= <<>> -> + compile_rules(Rest, S, ['...'|Segments], Rules, Acc); +compile_rules(<< $[, $., $., $., $], Rest/bits >>, S, Segments, Rules, Acc) -> + compile_rules(Rest, S, ['...', Acc|Segments], Rules, Acc); +compile_rules(<< $[, S, Rest/bits >>, S, Segments, Rules, Acc) -> + compile_brackets(Rest, S, [Acc|Segments], Rules); +compile_rules(<< $[, Rest/bits >>, S, Segments, Rules, <<>>) -> + compile_brackets(Rest, S, Segments, Rules); +%% Open bracket in the middle of a segment. +compile_rules(<< $[, _/bits >>, _, _, _, _) -> + error(badarg); +%% Missing an open bracket. +compile_rules(<< $], _/bits >>, _, _, _, _) -> + error(badarg); +compile_rules(<< C, Rest/bits >>, S, Segments, Rules, Acc) -> + compile_rules(Rest, S, Segments, Rules, << Acc/binary, C >>). + +%% Everything past $: until the segment separator ($. for hosts, +%% $/ for paths) or $[ or $] or end of binary is the binding name. +compile_binding(<<>>, _, <<>>) -> + error(badarg); +compile_binding(Rest = <<>>, _, Acc) -> + {Acc, Rest}; +compile_binding(Rest = << C, _/bits >>, S, Acc) + when C =:= S; C =:= $[; C =:= $] -> + {Acc, Rest}; +compile_binding(<< C, Rest/bits >>, S, Acc) -> + compile_binding(Rest, S, << Acc/binary, C >>). + +compile_brackets(Rest, S, Segments, Rules) -> + {Bracket, Rest2} = compile_brackets_split(Rest, <<>>, 0), + Rules1 = compile_rules(Rest2, S, Segments, [], <<>>), + Rules2 = compile_rules(<< Bracket/binary, Rest2/binary >>, + S, Segments, [], <<>>), + Rules ++ Rules2 ++ Rules1. + +%% Missing a close bracket. +compile_brackets_split(<<>>, _, _) -> + error(badarg); +%% Make sure we don't confuse the closing bracket we're looking for. +compile_brackets_split(<< C, Rest/bits >>, Acc, N) when C =:= $[ -> + compile_brackets_split(Rest, << Acc/binary, C >>, N + 1); +compile_brackets_split(<< C, Rest/bits >>, Acc, N) when C =:= $], N > 0 -> + compile_brackets_split(Rest, << Acc/binary, C >>, N - 1); +%% That's the right one. +compile_brackets_split(<< $], Rest/bits >>, Acc, 0) -> + {Acc, Rest}; +compile_brackets_split(<< C, Rest/bits >>, Acc, N) -> + compile_brackets_split(Rest, << Acc/binary, C >>, N). + +-spec execute(Req, Env) + -> {ok, Req, Env} | {stop, Req} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +execute(Req=#{host := Host, path := Path}, Env=#{dispatch := Dispatch0}) -> + Dispatch = case Dispatch0 of + {persistent_term, Key} -> persistent_term:get(Key); + _ -> Dispatch0 + end, + case match(Dispatch, Host, Path) of + {ok, Handler, HandlerOpts, Bindings, HostInfo, PathInfo} -> + {ok, Req#{ + host_info => HostInfo, + path_info => PathInfo, + bindings => Bindings + }, Env#{ + handler => Handler, + handler_opts => HandlerOpts + }}; + {error, notfound, host} -> + {stop, cowboy_req:reply(400, Req)}; + {error, badrequest, path} -> + {stop, cowboy_req:reply(400, Req)}; + {error, notfound, path} -> + {stop, cowboy_req:reply(404, Req)} + end. + +%% Internal. + +%% Match hostname tokens and path tokens against dispatch rules. +%% +%% It is typically used for matching tokens for the hostname and path of +%% the request against a global dispatch rule for your listener. +%% +%% Dispatch rules are a list of {Hostname, PathRules} tuples, with +%% PathRules being a list of {Path, HandlerMod, HandlerOpts}. +%% +%% Hostname and Path are match rules and can be either the +%% atom '_', which matches everything, `<<"*">>', which match the +%% wildcard path, or a list of tokens. +%% +%% Each token can be either a binary, the atom '_', +%% the atom '...' or a named atom. A binary token must match exactly, +%% '_' matches everything for a single token, '...' matches +%% everything for the rest of the tokens and a named atom will bind the +%% corresponding token value and return it. +%% +%% The list of hostname tokens is reversed before matching. For example, if +%% we were to match "www.ninenines.eu", we would first match "eu", then +%% "ninenines", then "www". This means that in the context of hostnames, +%% the '...' atom matches properly the lower levels of the domain +%% as would be expected. +%% +%% When a result is found, this function will return the handler module and +%% options found in the dispatch list, a key-value list of bindings and +%% the tokens that were matched by the '...' atom for both the +%% hostname and path. +-spec match(dispatch_rules(), Host::binary() | tokens(), Path::binary()) + -> {ok, module(), any(), bindings(), + HostInfo::undefined | tokens(), + PathInfo::undefined | tokens()} + | {error, notfound, host} | {error, notfound, path} + | {error, badrequest, path}. +match([], _, _) -> + {error, notfound, host}; +%% If the host is '_' then there can be no constraints. +match([{'_', [], PathMatchs}|_Tail], _, Path) -> + match_path(PathMatchs, undefined, Path, #{}); +match([{HostMatch, Fields, PathMatchs}|Tail], Tokens, Path) + when is_list(Tokens) -> + case list_match(Tokens, HostMatch, #{}) of + false -> + match(Tail, Tokens, Path); + {true, Bindings, HostInfo} -> + HostInfo2 = case HostInfo of + undefined -> undefined; + _ -> lists:reverse(HostInfo) + end, + case check_constraints(Fields, Bindings) of + {ok, Bindings2} -> + match_path(PathMatchs, HostInfo2, Path, Bindings2); + nomatch -> + match(Tail, Tokens, Path) + end + end; +match(Dispatch, Host, Path) -> + match(Dispatch, split_host(Host), Path). + +-spec match_path([dispatch_path()], + HostInfo::undefined | tokens(), binary() | tokens(), bindings()) + -> {ok, module(), any(), bindings(), + HostInfo::undefined | tokens(), + PathInfo::undefined | tokens()} + | {error, notfound, path} | {error, badrequest, path}. +match_path([], _, _, _) -> + {error, notfound, path}; +%% If the path is '_' then there can be no constraints. +match_path([{'_', [], Handler, Opts}|_Tail], HostInfo, _, Bindings) -> + {ok, Handler, Opts, Bindings, HostInfo, undefined}; +match_path([{<<"*">>, _, Handler, Opts}|_Tail], HostInfo, <<"*">>, Bindings) -> + {ok, Handler, Opts, Bindings, HostInfo, undefined}; +match_path([_|Tail], HostInfo, <<"*">>, Bindings) -> + match_path(Tail, HostInfo, <<"*">>, Bindings); +match_path([{PathMatch, Fields, Handler, Opts}|Tail], HostInfo, Tokens, + Bindings) when is_list(Tokens) -> + case list_match(Tokens, PathMatch, Bindings) of + false -> + match_path(Tail, HostInfo, Tokens, Bindings); + {true, PathBinds, PathInfo} -> + case check_constraints(Fields, PathBinds) of + {ok, PathBinds2} -> + {ok, Handler, Opts, PathBinds2, HostInfo, PathInfo}; + nomatch -> + match_path(Tail, HostInfo, Tokens, Bindings) + end + end; +match_path(_Dispatch, _HostInfo, badrequest, _Bindings) -> + {error, badrequest, path}; +match_path(Dispatch, HostInfo, Path, Bindings) -> + match_path(Dispatch, HostInfo, split_path(Path), Bindings). + +check_constraints([], Bindings) -> + {ok, Bindings}; +check_constraints([Field|Tail], Bindings) when is_atom(Field) -> + check_constraints(Tail, Bindings); +check_constraints([Field|Tail], Bindings) -> + Name = element(1, Field), + case Bindings of + #{Name := Value0} -> + Constraints = element(2, Field), + case cowboy_constraints:validate(Value0, Constraints) of + {ok, Value} -> + check_constraints(Tail, Bindings#{Name => Value}); + {error, _} -> + nomatch + end; + _ -> + check_constraints(Tail, Bindings) + end. + +-spec split_host(binary()) -> tokens(). +split_host(Host) -> + split_host(Host, []). + +split_host(Host, Acc) -> + case binary:match(Host, <<".">>) of + nomatch when Host =:= <<>> -> + Acc; + nomatch -> + [Host|Acc]; + {Pos, _} -> + << Segment:Pos/binary, _:8, Rest/bits >> = Host, + false = byte_size(Segment) == 0, + split_host(Rest, [Segment|Acc]) + end. + +%% Following RFC2396, this function may return path segments containing any +%% character, including / if, and only if, a / was escaped +%% and part of a path segment. +-spec split_path(binary()) -> tokens() | badrequest. +split_path(<< $/, Path/bits >>) -> + split_path(Path, []); +split_path(_) -> + badrequest. + +split_path(Path, Acc) -> + try + case binary:match(Path, <<"/">>) of + nomatch when Path =:= <<>> -> + remove_dot_segments(lists:reverse([cow_uri:urldecode(S) || S <- Acc]), []); + nomatch -> + remove_dot_segments(lists:reverse([cow_uri:urldecode(S) || S <- [Path|Acc]]), []); + {Pos, _} -> + << Segment:Pos/binary, _:8, Rest/bits >> = Path, + split_path(Rest, [Segment|Acc]) + end + catch error:_ -> + badrequest + end. + +remove_dot_segments([], Acc) -> + lists:reverse(Acc); +remove_dot_segments([<<".">>|Segments], Acc) -> + remove_dot_segments(Segments, Acc); +remove_dot_segments([<<"..">>|Segments], Acc=[]) -> + remove_dot_segments(Segments, Acc); +remove_dot_segments([<<"..">>|Segments], [_|Acc]) -> + remove_dot_segments(Segments, Acc); +remove_dot_segments([S|Segments], Acc) -> + remove_dot_segments(Segments, [S|Acc]). + +-ifdef(TEST). +remove_dot_segments_test_() -> + Tests = [ + {[<<"a">>, <<"b">>, <<"c">>, <<".">>, <<"..">>, <<"..">>, <<"g">>], [<<"a">>, <<"g">>]}, + {[<<"mid">>, <<"content=5">>, <<"..">>, <<"6">>], [<<"mid">>, <<"6">>]}, + {[<<"..">>, <<"a">>], [<<"a">>]} + ], + [fun() -> R = remove_dot_segments(S, []) end || {S, R} <- Tests]. +-endif. + +-spec list_match(tokens(), dispatch_match(), bindings()) + -> {true, bindings(), undefined | tokens()} | false. +%% Atom '...' matches any trailing path, stop right now. +list_match(List, ['...'], Binds) -> + {true, Binds, List}; +%% Atom '_' matches anything, continue. +list_match([_E|Tail], ['_'|TailMatch], Binds) -> + list_match(Tail, TailMatch, Binds); +%% Both values match, continue. +list_match([E|Tail], [E|TailMatch], Binds) -> + list_match(Tail, TailMatch, Binds); +%% Bind E to the variable name V and continue, +%% unless V was already defined and E isn't identical to the previous value. +list_match([E|Tail], [V|TailMatch], Binds) when is_atom(V) -> + case Binds of + %% @todo This isn't right, the constraint must be applied FIRST + %% otherwise we can't check for example ints in both host/path. + #{V := E} -> + list_match(Tail, TailMatch, Binds); + #{V := _} -> + false; + _ -> + list_match(Tail, TailMatch, Binds#{V => E}) + end; +%% Match complete. +list_match([], [], Binds) -> + {true, Binds, undefined}; +%% Values don't match, stop. +list_match(_List, _Match, _Binds) -> + false. + +%% Tests. + +-ifdef(TEST). +compile_test_() -> + Tests = [ + %% Match any host and path. + {[{'_', [{'_', h, o}]}], + [{'_', [], [{'_', [], h, o}]}]}, + {[{"cowboy.example.org", + [{"/", ha, oa}, {"/path/to/resource", hb, ob}]}], + [{[<<"org">>, <<"example">>, <<"cowboy">>], [], [ + {[], [], ha, oa}, + {[<<"path">>, <<"to">>, <<"resource">>], [], hb, ob}]}]}, + {[{'_', [{"/path/to/resource/", h, o}]}], + [{'_', [], [{[<<"path">>, <<"to">>, <<"resource">>], [], h, o}]}]}, + % Cyrillic from a latin1 encoded file. + {[{'_', [{[47,208,191,209,131,209,130,209,140,47,208,186,47,209,128, + 208,181,209,129,209,131,209,128,209,129,209,131,47], h, o}]}], + [{'_', [], [{[<<208,191,209,131,209,130,209,140>>, <<208,186>>, + <<209,128,208,181,209,129,209,131,209,128,209,129,209,131>>], + [], h, o}]}]}, + {[{"cowboy.example.org.", [{'_', h, o}]}], + [{[<<"org">>, <<"example">>, <<"cowboy">>], [], [{'_', [], h, o}]}]}, + {[{".cowboy.example.org", [{'_', h, o}]}], + [{[<<"org">>, <<"example">>, <<"cowboy">>], [], [{'_', [], h, o}]}]}, + % Cyrillic from a latin1 encoded file. + {[{[208,189,208,181,208,186,208,184,208,185,46,209,129,208,176, + 208,185,209,130,46,209,128,209,132,46], [{'_', h, o}]}], + [{[<<209,128,209,132>>, <<209,129,208,176,208,185,209,130>>, + <<208,189,208,181,208,186,208,184,208,185>>], + [], [{'_', [], h, o}]}]}, + {[{":subdomain.example.org", [{"/hats/:name/prices", h, o}]}], + [{[<<"org">>, <<"example">>, subdomain], [], [ + {[<<"hats">>, name, <<"prices">>], [], h, o}]}]}, + {[{"ninenines.:_", [{"/hats/:_", h, o}]}], + [{['_', <<"ninenines">>], [], [{[<<"hats">>, '_'], [], h, o}]}]}, + {[{"[www.]ninenines.eu", + [{"/horses", h, o}, {"/hats/[page/:number]", h, o}]}], [ + {[<<"eu">>, <<"ninenines">>], [], [ + {[<<"horses">>], [], h, o}, + {[<<"hats">>], [], h, o}, + {[<<"hats">>, <<"page">>, number], [], h, o}]}, + {[<<"eu">>, <<"ninenines">>, <<"www">>], [], [ + {[<<"horses">>], [], h, o}, + {[<<"hats">>], [], h, o}, + {[<<"hats">>, <<"page">>, number], [], h, o}]}]}, + {[{'_', [{"/hats/:page/:number", h, o}]}], [{'_', [], [ + {[<<"hats">>, page, number], [], h, o}]}]}, + {[{'_', [{"/hats/[page/[:number]]", h, o}]}], [{'_', [], [ + {[<<"hats">>], [], h, o}, + {[<<"hats">>, <<"page">>], [], h, o}, + {[<<"hats">>, <<"page">>, number], [], h, o}]}]}, + {[{"[...]ninenines.eu", [{"/hats/[...]", h, o}]}], + [{[<<"eu">>, <<"ninenines">>, '...'], [], [ + {[<<"hats">>, '...'], [], h, o}]}]}, + %% Path segment containing a colon. + {[{'_', [{"/foo/bar:blah", h, o}]}], [{'_', [], [ + {[<<"foo">>, <<"bar:blah">>], [], h, o}]}]} + ], + [{lists:flatten(io_lib:format("~p", [Rt])), + fun() -> Rs = compile(Rt) end} || {Rt, Rs} <- Tests]. + +split_host_test_() -> + Tests = [ + {<<"">>, []}, + {<<"*">>, [<<"*">>]}, + {<<"cowboy.ninenines.eu">>, + [<<"eu">>, <<"ninenines">>, <<"cowboy">>]}, + {<<"ninenines.eu">>, + [<<"eu">>, <<"ninenines">>]}, + {<<"ninenines.eu.">>, + [<<"eu">>, <<"ninenines">>]}, + {<<"a.b.c.d.e.f.g.h.i.j.k.l.m.n.o.p.q.r.s.t.u.v.w.x.y.z">>, + [<<"z">>, <<"y">>, <<"x">>, <<"w">>, <<"v">>, <<"u">>, <<"t">>, + <<"s">>, <<"r">>, <<"q">>, <<"p">>, <<"o">>, <<"n">>, <<"m">>, + <<"l">>, <<"k">>, <<"j">>, <<"i">>, <<"h">>, <<"g">>, <<"f">>, + <<"e">>, <<"d">>, <<"c">>, <<"b">>, <<"a">>]} + ], + [{H, fun() -> R = split_host(H) end} || {H, R} <- Tests]. + +split_path_test_() -> + Tests = [ + {<<"/">>, []}, + {<<"/extend//cowboy">>, [<<"extend">>, <<>>, <<"cowboy">>]}, + {<<"/users">>, [<<"users">>]}, + {<<"/users/42/friends">>, [<<"users">>, <<"42">>, <<"friends">>]}, + {<<"/users/a%20b/c%21d">>, [<<"users">>, <<"a b">>, <<"c!d">>]} + ], + [{P, fun() -> R = split_path(P) end} || {P, R} <- Tests]. + +match_test_() -> + Dispatch = [ + {[<<"eu">>, <<"ninenines">>, '_', <<"www">>], [], [ + {[<<"users">>, '_', <<"mails">>], [], match_any_subdomain_users, []} + ]}, + {[<<"eu">>, <<"ninenines">>], [], [ + {[<<"users">>, id, <<"friends">>], [], match_extend_users_friends, []}, + {'_', [], match_extend, []} + ]}, + {[var, <<"ninenines">>], [], [ + {[<<"threads">>, var], [], match_duplicate_vars, + [we, {expect, two}, var, here]} + ]}, + {[ext, <<"erlang">>], [], [ + {'_', [], match_erlang_ext, []} + ]}, + {'_', [], [ + {[<<"users">>, id, <<"friends">>], [], match_users_friends, []}, + {'_', [], match_any, []} + ]} + ], + Tests = [ + {<<"any">>, <<"/">>, {ok, match_any, [], #{}}}, + {<<"www.any.ninenines.eu">>, <<"/users/42/mails">>, + {ok, match_any_subdomain_users, [], #{}}}, + {<<"www.ninenines.eu">>, <<"/users/42/mails">>, + {ok, match_any, [], #{}}}, + {<<"www.ninenines.eu">>, <<"/">>, + {ok, match_any, [], #{}}}, + {<<"www.any.ninenines.eu">>, <<"/not_users/42/mails">>, + {error, notfound, path}}, + {<<"ninenines.eu">>, <<"/">>, + {ok, match_extend, [], #{}}}, + {<<"ninenines.eu">>, <<"/users/42/friends">>, + {ok, match_extend_users_friends, [], #{id => <<"42">>}}}, + {<<"erlang.fr">>, '_', + {ok, match_erlang_ext, [], #{ext => <<"fr">>}}}, + {<<"any">>, <<"/users/444/friends">>, + {ok, match_users_friends, [], #{id => <<"444">>}}}, + {<<"any">>, <<"/users//friends">>, + {ok, match_users_friends, [], #{id => <<>>}}} + ], + [{lists:flatten(io_lib:format("~p, ~p", [H, P])), fun() -> + {ok, Handler, Opts, Binds, undefined, undefined} + = match(Dispatch, H, P) + end} || {H, P, {ok, Handler, Opts, Binds}} <- Tests]. + +match_info_test_() -> + Dispatch = [ + {[<<"eu">>, <<"ninenines">>, <<"www">>], [], [ + {[<<"pathinfo">>, <<"is">>, <<"next">>, '...'], [], match_path, []} + ]}, + {[<<"eu">>, <<"ninenines">>, '...'], [], [ + {'_', [], match_any, []} + ]} + ], + Tests = [ + {<<"ninenines.eu">>, <<"/">>, + {ok, match_any, [], #{}, [], undefined}}, + {<<"bugs.ninenines.eu">>, <<"/">>, + {ok, match_any, [], #{}, [<<"bugs">>], undefined}}, + {<<"cowboy.bugs.ninenines.eu">>, <<"/">>, + {ok, match_any, [], #{}, [<<"cowboy">>, <<"bugs">>], undefined}}, + {<<"www.ninenines.eu">>, <<"/pathinfo/is/next">>, + {ok, match_path, [], #{}, undefined, []}}, + {<<"www.ninenines.eu">>, <<"/pathinfo/is/next/path_info">>, + {ok, match_path, [], #{}, undefined, [<<"path_info">>]}}, + {<<"www.ninenines.eu">>, <<"/pathinfo/is/next/foo/bar">>, + {ok, match_path, [], #{}, undefined, [<<"foo">>, <<"bar">>]}} + ], + [{lists:flatten(io_lib:format("~p, ~p", [H, P])), fun() -> + R = match(Dispatch, H, P) + end} || {H, P, R} <- Tests]. + +match_constraints_test() -> + Dispatch0 = [{'_', [], + [{[<<"path">>, value], [{value, int}], match, []}]}], + {ok, _, [], #{value := 123}, _, _} = match(Dispatch0, + <<"ninenines.eu">>, <<"/path/123">>), + {ok, _, [], #{value := 123}, _, _} = match(Dispatch0, + <<"ninenines.eu">>, <<"/path/123/">>), + {error, notfound, path} = match(Dispatch0, + <<"ninenines.eu">>, <<"/path/NaN/">>), + Dispatch1 = [{'_', [], + [{[<<"path">>, value, <<"more">>], [{value, nonempty}], match, []}]}], + {ok, _, [], #{value := <<"something">>}, _, _} = match(Dispatch1, + <<"ninenines.eu">>, <<"/path/something/more">>), + {error, notfound, path} = match(Dispatch1, + <<"ninenines.eu">>, <<"/path//more">>), + Dispatch2 = [{'_', [], [{[<<"path">>, username], + [{username, fun(_, Value) -> + case cowboy_bstr:to_lower(Value) of + Value -> {ok, Value}; + _ -> {error, not_lowercase} + end end}], + match, []}]}], + {ok, _, [], #{username := <<"essen">>}, _, _} = match(Dispatch2, + <<"ninenines.eu">>, <<"/path/essen">>), + {error, notfound, path} = match(Dispatch2, + <<"ninenines.eu">>, <<"/path/ESSEN">>), + ok. + +match_same_bindings_test() -> + Dispatch = [{[same, same], [], [{'_', [], match, []}]}], + {ok, _, [], #{same := <<"eu">>}, _, _} = match(Dispatch, + <<"eu.eu">>, <<"/">>), + {error, notfound, host} = match(Dispatch, + <<"ninenines.eu">>, <<"/">>), + Dispatch2 = [{[<<"eu">>, <<"ninenines">>, user], [], + [{[<<"path">>, user], [], match, []}]}], + {ok, _, [], #{user := <<"essen">>}, _, _} = match(Dispatch2, + <<"essen.ninenines.eu">>, <<"/path/essen">>), + {ok, _, [], #{user := <<"essen">>}, _, _} = match(Dispatch2, + <<"essen.ninenines.eu">>, <<"/path/essen/">>), + {error, notfound, path} = match(Dispatch2, + <<"essen.ninenines.eu">>, <<"/path/notessen">>), + Dispatch3 = [{'_', [], [{[same, same], [], match, []}]}], + {ok, _, [], #{same := <<"path">>}, _, _} = match(Dispatch3, + <<"ninenines.eu">>, <<"/path/path">>), + {error, notfound, path} = match(Dispatch3, + <<"ninenines.eu">>, <<"/path/to">>), + ok. +-endif. diff --git a/deps/cowboy/src/cowboy_static.erl b/deps/cowboy/src/cowboy_static.erl new file mode 100644 index 0000000..b0cf146 --- /dev/null +++ b/deps/cowboy/src/cowboy_static.erl @@ -0,0 +1,418 @@ +%% Copyright (c) 2013-2017, Loรฏc Hoguin +%% Copyright (c) 2011, Magnus Klaar +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_static). + +-export([init/2]). +-export([malformed_request/2]). +-export([forbidden/2]). +-export([content_types_provided/2]). +-export([charsets_provided/2]). +-export([ranges_provided/2]). +-export([resource_exists/2]). +-export([last_modified/2]). +-export([generate_etag/2]). +-export([get_file/2]). + +-type extra_charset() :: {charset, module(), function()} | {charset, binary()}. +-type extra_etag() :: {etag, module(), function()} | {etag, false}. +-type extra_mimetypes() :: {mimetypes, module(), function()} + | {mimetypes, binary() | {binary(), binary(), [{binary(), binary()}]}}. +-type extra() :: [extra_charset() | extra_etag() | extra_mimetypes()]. +-type opts() :: {file | dir, string() | binary()} + | {file | dir, string() | binary(), extra()} + | {priv_file | priv_dir, atom(), string() | binary()} + | {priv_file | priv_dir, atom(), string() | binary(), extra()}. +-export_type([opts/0]). + +-include_lib("kernel/include/file.hrl"). + +-type state() :: {binary(), {direct | archive, #file_info{}} + | {error, atom()}, extra()}. + +%% Resolve the file that will be sent and get its file information. +%% If the handler is configured to manage a directory, check that the +%% requested file is inside the configured directory. + +-spec init(Req, opts()) -> {cowboy_rest, Req, error | state()} when Req::cowboy_req:req(). +init(Req, {Name, Path}) -> + init_opts(Req, {Name, Path, []}); +init(Req, {Name, App, Path}) + when Name =:= priv_file; Name =:= priv_dir -> + init_opts(Req, {Name, App, Path, []}); +init(Req, Opts) -> + init_opts(Req, Opts). + +init_opts(Req, {priv_file, App, Path, Extra}) -> + {PrivPath, HowToAccess} = priv_path(App, Path), + init_info(Req, absname(PrivPath), HowToAccess, Extra); +init_opts(Req, {file, Path, Extra}) -> + init_info(Req, absname(Path), direct, Extra); +init_opts(Req, {priv_dir, App, Path, Extra}) -> + {PrivPath, HowToAccess} = priv_path(App, Path), + init_dir(Req, PrivPath, HowToAccess, Extra); +init_opts(Req, {dir, Path, Extra}) -> + init_dir(Req, Path, direct, Extra). + +priv_path(App, Path) -> + case code:priv_dir(App) of + {error, bad_name} -> + error({badarg, "Can't resolve the priv_dir of application " + ++ atom_to_list(App)}); + PrivDir when is_list(Path) -> + { + PrivDir ++ "/" ++ Path, + how_to_access_app_priv(PrivDir) + }; + PrivDir when is_binary(Path) -> + { + << (list_to_binary(PrivDir))/binary, $/, Path/binary >>, + how_to_access_app_priv(PrivDir) + } + end. + +how_to_access_app_priv(PrivDir) -> + %% If the priv directory is not a directory, it must be + %% inside an Erlang application .ez archive. We call + %% how_to_access_app_priv1() to find the corresponding archive. + case filelib:is_dir(PrivDir) of + true -> direct; + false -> how_to_access_app_priv1(PrivDir) + end. + +how_to_access_app_priv1(Dir) -> + %% We go "up" by one path component at a time and look for a + %% regular file. + Archive = filename:dirname(Dir), + case Archive of + Dir -> + %% filename:dirname() returned its argument: + %% we reach the root directory. We found no + %% archive so we return 'direct': the given priv + %% directory doesn't exist. + direct; + _ -> + case filelib:is_regular(Archive) of + true -> {archive, Archive}; + false -> how_to_access_app_priv1(Archive) + end + end. + +absname(Path) when is_list(Path) -> + filename:absname(list_to_binary(Path)); +absname(Path) when is_binary(Path) -> + filename:absname(Path). + +init_dir(Req, Path, HowToAccess, Extra) when is_list(Path) -> + init_dir(Req, list_to_binary(Path), HowToAccess, Extra); +init_dir(Req, Path, HowToAccess, Extra) -> + Dir = fullpath(filename:absname(Path)), + case cowboy_req:path_info(Req) of + %% When dir/priv_dir are used and there is no path_info + %% this is a configuration error and we abort immediately. + undefined -> + {ok, cowboy_req:reply(500, Req), error}; + PathInfo -> + case validate_reserved(PathInfo) of + error -> + {cowboy_rest, Req, error}; + ok -> + Filepath = filename:join([Dir|PathInfo]), + Len = byte_size(Dir), + case fullpath(Filepath) of + << Dir:Len/binary, $/, _/binary >> -> + init_info(Req, Filepath, HowToAccess, Extra); + << Dir:Len/binary >> -> + init_info(Req, Filepath, HowToAccess, Extra); + _ -> + {cowboy_rest, Req, error} + end + end + end. + +validate_reserved([]) -> + ok; +validate_reserved([P|Tail]) -> + case validate_reserved1(P) of + ok -> validate_reserved(Tail); + error -> error + end. + +%% We always reject forward slash, backward slash and NUL as +%% those have special meanings across the supported platforms. +%% We could support the backward slash on some platforms but +%% for the sake of consistency and simplicity we don't. +validate_reserved1(<<>>) -> + ok; +validate_reserved1(<<$/, _/bits>>) -> + error; +validate_reserved1(<<$\\, _/bits>>) -> + error; +validate_reserved1(<<0, _/bits>>) -> + error; +validate_reserved1(<<_, Rest/bits>>) -> + validate_reserved1(Rest). + +fullpath(Path) -> + fullpath(filename:split(Path), []). +fullpath([], Acc) -> + filename:join(lists:reverse(Acc)); +fullpath([<<".">>|Tail], Acc) -> + fullpath(Tail, Acc); +fullpath([<<"..">>|Tail], Acc=[_]) -> + fullpath(Tail, Acc); +fullpath([<<"..">>|Tail], [_|Acc]) -> + fullpath(Tail, Acc); +fullpath([Segment|Tail], Acc) -> + fullpath(Tail, [Segment|Acc]). + +init_info(Req, Path, HowToAccess, Extra) -> + Info = read_file_info(Path, HowToAccess), + {cowboy_rest, Req, {Path, Info, Extra}}. + +read_file_info(Path, direct) -> + case file:read_file_info(Path, [{time, universal}]) of + {ok, Info} -> {direct, Info}; + Error -> Error + end; +read_file_info(Path, {archive, Archive}) -> + case file:read_file_info(Archive, [{time, universal}]) of + {ok, ArchiveInfo} -> + %% The Erlang application archive is fine. + %% Now check if the requested file is in that + %% archive. We also need the file_info to merge + %% them with the archive's one. + PathS = binary_to_list(Path), + case erl_prim_loader:read_file_info(PathS) of + {ok, ContainedFileInfo} -> + Info = fix_archived_file_info( + ArchiveInfo, + ContainedFileInfo), + {archive, Info}; + error -> + {error, enoent} + end; + Error -> + Error + end. + +fix_archived_file_info(ArchiveInfo, ContainedFileInfo) -> + %% We merge the archive and content #file_info because we are + %% interested by the timestamps of the archive, but the type and + %% size of the contained file/directory. + %% + %% We reset the access to 'read', because we won't rewrite the + %% archive. + ArchiveInfo#file_info{ + size = ContainedFileInfo#file_info.size, + type = ContainedFileInfo#file_info.type, + access = read + }. + +-ifdef(TEST). +fullpath_test_() -> + Tests = [ + {<<"/home/cowboy">>, <<"/home/cowboy">>}, + {<<"/home/cowboy">>, <<"/home/cowboy/">>}, + {<<"/home/cowboy">>, <<"/home/cowboy/./">>}, + {<<"/home/cowboy">>, <<"/home/cowboy/./././././.">>}, + {<<"/home/cowboy">>, <<"/home/cowboy/abc/..">>}, + {<<"/home/cowboy">>, <<"/home/cowboy/abc/../">>}, + {<<"/home/cowboy">>, <<"/home/cowboy/abc/./../.">>}, + {<<"/">>, <<"/home/cowboy/../../../../../..">>}, + {<<"/etc/passwd">>, <<"/home/cowboy/../../etc/passwd">>} + ], + [{P, fun() -> R = fullpath(P) end} || {R, P} <- Tests]. + +good_path_check_test_() -> + Tests = [ + <<"/home/cowboy/file">>, + <<"/home/cowboy/file/">>, + <<"/home/cowboy/./file">>, + <<"/home/cowboy/././././././file">>, + <<"/home/cowboy/abc/../file">>, + <<"/home/cowboy/abc/../file">>, + <<"/home/cowboy/abc/./.././file">> + ], + [{P, fun() -> + case fullpath(P) of + << "/home/cowboy/", _/bits >> -> ok + end + end} || P <- Tests]. + +bad_path_check_test_() -> + Tests = [ + <<"/home/cowboy/../../../../../../file">>, + <<"/home/cowboy/../../etc/passwd">> + ], + [{P, fun() -> + error = case fullpath(P) of + << "/home/cowboy/", _/bits >> -> ok; + _ -> error + end + end} || P <- Tests]. + +good_path_win32_check_test_() -> + Tests = case os:type() of + {unix, _} -> + []; + {win32, _} -> + [ + <<"c:/home/cowboy/file">>, + <<"c:/home/cowboy/file/">>, + <<"c:/home/cowboy/./file">>, + <<"c:/home/cowboy/././././././file">>, + <<"c:/home/cowboy/abc/../file">>, + <<"c:/home/cowboy/abc/../file">>, + <<"c:/home/cowboy/abc/./.././file">> + ] + end, + [{P, fun() -> + case fullpath(P) of + << "c:/home/cowboy/", _/bits >> -> ok + end + end} || P <- Tests]. + +bad_path_win32_check_test_() -> + Tests = case os:type() of + {unix, _} -> + []; + {win32, _} -> + [ + <<"c:/home/cowboy/../../secretfile.bat">>, + <<"c:/home/cowboy/c:/secretfile.bat">>, + <<"c:/home/cowboy/..\\..\\secretfile.bat">>, + <<"c:/home/cowboy/c:\\secretfile.bat">> + ] + end, + [{P, fun() -> + error = case fullpath(P) of + << "c:/home/cowboy/", _/bits >> -> ok; + _ -> error + end + end} || P <- Tests]. +-endif. + +%% Reject requests that tried to access a file outside +%% the target directory, or used reserved characters. + +-spec malformed_request(Req, State) + -> {boolean(), Req, State}. +malformed_request(Req, State) -> + {State =:= error, Req, State}. + +%% Directories, files that can't be accessed at all and +%% files with no read flag are forbidden. + +-spec forbidden(Req, State) + -> {boolean(), Req, State} + when State::state(). +forbidden(Req, State={_, {_, #file_info{type=directory}}, _}) -> + {true, Req, State}; +forbidden(Req, State={_, {error, eacces}, _}) -> + {true, Req, State}; +forbidden(Req, State={_, {_, #file_info{access=Access}}, _}) + when Access =:= write; Access =:= none -> + {true, Req, State}; +forbidden(Req, State) -> + {false, Req, State}. + +%% Detect the mimetype of the file. + +-spec content_types_provided(Req, State) + -> {[{binary(), get_file}], Req, State} + when State::state(). +content_types_provided(Req, State={Path, _, Extra}) when is_list(Extra) -> + case lists:keyfind(mimetypes, 1, Extra) of + false -> + {[{cow_mimetypes:web(Path), get_file}], Req, State}; + {mimetypes, Module, Function} -> + {[{Module:Function(Path), get_file}], Req, State}; + {mimetypes, Type} -> + {[{Type, get_file}], Req, State} + end. + +%% Detect the charset of the file. + +-spec charsets_provided(Req, State) + -> {[binary()], Req, State} + when State::state(). +charsets_provided(Req, State={Path, _, Extra}) -> + case lists:keyfind(charset, 1, Extra) of + %% We simulate the callback not being exported. + false -> + no_call; + {charset, Module, Function} -> + {[Module:Function(Path)], Req, State}; + {charset, Charset} when is_binary(Charset) -> + {[Charset], Req, State} + end. + +%% Enable support for range requests. + +-spec ranges_provided(Req, State) + -> {[{binary(), auto}], Req, State} + when State::state(). +ranges_provided(Req, State) -> + {[{<<"bytes">>, auto}], Req, State}. + +%% Assume the resource doesn't exist if it's not a regular file. + +-spec resource_exists(Req, State) + -> {boolean(), Req, State} + when State::state(). +resource_exists(Req, State={_, {_, #file_info{type=regular}}, _}) -> + {true, Req, State}; +resource_exists(Req, State) -> + {false, Req, State}. + +%% Generate an etag for the file. + +-spec generate_etag(Req, State) + -> {{strong | weak, binary()}, Req, State} + when State::state(). +generate_etag(Req, State={Path, {_, #file_info{size=Size, mtime=Mtime}}, + Extra}) -> + case lists:keyfind(etag, 1, Extra) of + false -> + {generate_default_etag(Size, Mtime), Req, State}; + {etag, Module, Function} -> + {Module:Function(Path, Size, Mtime), Req, State}; + {etag, false} -> + {undefined, Req, State} + end. + +generate_default_etag(Size, Mtime) -> + {strong, integer_to_binary(erlang:phash2({Size, Mtime}, 16#ffffffff))}. + +%% Return the time of last modification of the file. + +-spec last_modified(Req, State) + -> {calendar:datetime(), Req, State} + when State::state(). +last_modified(Req, State={_, {_, #file_info{mtime=Modified}}, _}) -> + {Modified, Req, State}. + +%% Stream the file. + +-spec get_file(Req, State) + -> {{sendfile, 0, non_neg_integer(), binary()}, Req, State} + when State::state(). +get_file(Req, State={Path, {direct, #file_info{size=Size}}, _}) -> + {{sendfile, 0, Size, Path}, Req, State}; +get_file(Req, State={Path, {archive, _}, _}) -> + PathS = binary_to_list(Path), + {ok, Bin, _} = erl_prim_loader:get_file(PathS), + {Bin, Req, State}. diff --git a/deps/cowboy/src/cowboy_stream.erl b/deps/cowboy/src/cowboy_stream.erl new file mode 100644 index 0000000..2dad6d0 --- /dev/null +++ b/deps/cowboy/src/cowboy_stream.erl @@ -0,0 +1,193 @@ +%% Copyright (c) 2015-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_stream). + +-type state() :: any(). +-type human_reason() :: atom(). + +-type streamid() :: any(). +-export_type([streamid/0]). + +-type fin() :: fin | nofin. +-export_type([fin/0]). + +%% @todo Perhaps it makes more sense to have resp_body in this module? + +-type resp_command() + :: {response, cowboy:http_status(), cowboy:http_headers(), cowboy_req:resp_body()}. +-export_type([resp_command/0]). + +-type commands() :: [{inform, cowboy:http_status(), cowboy:http_headers()} + | resp_command() + | {headers, cowboy:http_status(), cowboy:http_headers()} + | {data, fin(), cowboy_req:resp_body()} + | {trailers, cowboy:http_headers()} + | {push, binary(), binary(), binary(), inet:port_number(), + binary(), binary(), cowboy:http_headers()} + | {flow, pos_integer()} + | {spawn, pid(), timeout()} + | {error_response, cowboy:http_status(), cowboy:http_headers(), iodata()} + | {switch_protocol, cowboy:http_headers(), module(), state()} + | {internal_error, any(), human_reason()} + | {set_options, map()} + | {log, logger:level(), io:format(), list()} + | stop]. +-export_type([commands/0]). + +-type reason() :: normal | switch_protocol + | {internal_error, timeout | {error | exit | throw, any()}, human_reason()} + | {socket_error, closed | atom(), human_reason()} + | {stream_error, cow_http2:error(), human_reason()} + | {connection_error, cow_http2:error(), human_reason()} + | {stop, cow_http2:frame() | {exit, any()}, human_reason()}. +-export_type([reason/0]). + +-type partial_req() :: map(). %% @todo Take what's in cowboy_req with everything? optional. +-export_type([partial_req/0]). + +-callback init(streamid(), cowboy_req:req(), cowboy:opts()) -> {commands(), state()}. +-callback data(streamid(), fin(), binary(), State) -> {commands(), State} when State::state(). +-callback info(streamid(), any(), State) -> {commands(), State} when State::state(). +-callback terminate(streamid(), reason(), state()) -> any(). +-callback early_error(streamid(), reason(), partial_req(), Resp, cowboy:opts()) + -> Resp when Resp::resp_command(). + +%% @todo To optimize the number of active timers we could have a command +%% that enables a timeout that is called in the absence of any other call, +%% similar to what gen_server does. However the nice thing about this is +%% that the connection process can keep a single timer around (the same +%% one that would be used to detect half-closed sockets) and use this +%% timer and other events to trigger the timeout in streams at their +%% intended time. +%% +%% This same timer can be used to try and send PING frames to help detect +%% that the connection is indeed unresponsive. + +-export([init/3]). +-export([data/4]). +-export([info/3]). +-export([terminate/3]). +-export([early_error/5]). +-export([make_error_log/5]). + +%% Note that this and other functions in this module do NOT catch +%% exceptions. We want the exception to go all the way down to the +%% protocol code. +%% +%% OK the failure scenario is not so clear. The problem is +%% that the failure at any point in init/3 will result in the +%% corresponding state being lost. I am unfortunately not +%% confident we can do anything about this. If the crashing +%% handler just created a process, we'll never know about it. +%% Therefore at this time I choose to leave all failure handling +%% to the protocol process. +%% +%% Note that a failure in init/3 will result in terminate/3 +%% NOT being called. This is because the state is not available. + +-spec init(streamid(), cowboy_req:req(), cowboy:opts()) + -> {commands(), {module(), state()} | undefined}. +init(StreamID, Req, Opts) -> + case maps:get(stream_handlers, Opts, [cowboy_stream_h]) of + [] -> + {[], undefined}; + [Handler|Tail] -> + %% We call the next handler and remove it from the list of + %% stream handlers. This means that handlers that run after + %% it have no knowledge it exists. Should user require this + %% knowledge they can just define a separate option that will + %% be left untouched. + {Commands, State} = Handler:init(StreamID, Req, Opts#{stream_handlers => Tail}), + {Commands, {Handler, State}} + end. + +-spec data(streamid(), fin(), binary(), {Handler, State} | undefined) + -> {commands(), {Handler, State} | undefined} + when Handler::module(), State::state(). +data(_, _, _, undefined) -> + {[], undefined}; +data(StreamID, IsFin, Data, {Handler, State0}) -> + {Commands, State} = Handler:data(StreamID, IsFin, Data, State0), + {Commands, {Handler, State}}. + +-spec info(streamid(), any(), {Handler, State} | undefined) + -> {commands(), {Handler, State} | undefined} + when Handler::module(), State::state(). +info(_, _, undefined) -> + {[], undefined}; +info(StreamID, Info, {Handler, State0}) -> + {Commands, State} = Handler:info(StreamID, Info, State0), + {Commands, {Handler, State}}. + +-spec terminate(streamid(), reason(), {module(), state()} | undefined) -> ok. +terminate(_, _, undefined) -> + ok; +terminate(StreamID, Reason, {Handler, State}) -> + _ = Handler:terminate(StreamID, Reason, State), + ok. + +-spec early_error(streamid(), reason(), partial_req(), Resp, cowboy:opts()) + -> Resp when Resp::resp_command(). +early_error(StreamID, Reason, PartialReq, Resp, Opts) -> + case maps:get(stream_handlers, Opts, [cowboy_stream_h]) of + [] -> + Resp; + [Handler|Tail] -> + %% This is the same behavior as in init/3. + Handler:early_error(StreamID, Reason, + PartialReq, Resp, Opts#{stream_handlers => Tail}) + end. + +-spec make_error_log(init | data | info | terminate | early_error, + list(), error | exit | throw, any(), list()) + -> {log, error, string(), list()}. +make_error_log(init, [StreamID, Req, Opts], Class, Exception, Stacktrace) -> + {log, error, + "Unhandled exception ~p:~p in cowboy_stream:init(~p, Req, Opts)~n" + "Stacktrace: ~p~n" + "Req: ~p~n" + "Opts: ~p~n", + [Class, Exception, StreamID, Stacktrace, Req, Opts]}; +make_error_log(data, [StreamID, IsFin, Data, State], Class, Exception, Stacktrace) -> + {log, error, + "Unhandled exception ~p:~p in cowboy_stream:data(~p, ~p, Data, State)~n" + "Stacktrace: ~p~n" + "Data: ~p~n" + "State: ~p~n", + [Class, Exception, StreamID, IsFin, Stacktrace, Data, State]}; +make_error_log(info, [StreamID, Msg, State], Class, Exception, Stacktrace) -> + {log, error, + "Unhandled exception ~p:~p in cowboy_stream:info(~p, Msg, State)~n" + "Stacktrace: ~p~n" + "Msg: ~p~n" + "State: ~p~n", + [Class, Exception, StreamID, Stacktrace, Msg, State]}; +make_error_log(terminate, [StreamID, Reason, State], Class, Exception, Stacktrace) -> + {log, error, + "Unhandled exception ~p:~p in cowboy_stream:terminate(~p, Reason, State)~n" + "Stacktrace: ~p~n" + "Reason: ~p~n" + "State: ~p~n", + [Class, Exception, StreamID, Stacktrace, Reason, State]}; +make_error_log(early_error, [StreamID, Reason, PartialReq, Resp, Opts], + Class, Exception, Stacktrace) -> + {log, error, + "Unhandled exception ~p:~p in cowboy_stream:early_error(~p, Reason, PartialReq, Resp, Opts)~n" + "Stacktrace: ~p~n" + "Reason: ~p~n" + "PartialReq: ~p~n" + "Resp: ~p~n" + "Opts: ~p~n", + [Class, Exception, StreamID, Stacktrace, Reason, PartialReq, Resp, Opts]}. diff --git a/deps/cowboy/src/cowboy_stream_h.erl b/deps/cowboy/src/cowboy_stream_h.erl new file mode 100644 index 0000000..f516f3d --- /dev/null +++ b/deps/cowboy/src/cowboy_stream_h.erl @@ -0,0 +1,324 @@ +%% Copyright (c) 2016-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_stream_h). +-behavior(cowboy_stream). + +-export([init/3]). +-export([data/4]). +-export([info/3]). +-export([terminate/3]). +-export([early_error/5]). + +-export([request_process/3]). +-export([resume/5]). + +-record(state, { + next :: any(), + ref = undefined :: ranch:ref(), + pid = undefined :: pid(), + expect = undefined :: undefined | continue, + read_body_pid = undefined :: pid() | undefined, + read_body_ref = undefined :: reference() | undefined, + read_body_timer_ref = undefined :: reference() | undefined, + read_body_length = 0 :: non_neg_integer() | infinity | auto, + read_body_is_fin = nofin :: nofin | {fin, non_neg_integer()}, + read_body_buffer = <<>> :: binary(), + body_length = 0 :: non_neg_integer(), + stream_body_pid = undefined :: pid() | undefined, + stream_body_status = normal :: normal | blocking | blocked +}). + +-spec init(cowboy_stream:streamid(), cowboy_req:req(), cowboy:opts()) + -> {[{spawn, pid(), timeout()}], #state{}}. +init(StreamID, Req=#{ref := Ref}, Opts) -> + Env = maps:get(env, Opts, #{}), + Middlewares = maps:get(middlewares, Opts, [cowboy_router, cowboy_handler]), + Shutdown = maps:get(shutdown_timeout, Opts, 5000), + Pid = proc_lib:spawn_link(?MODULE, request_process, [Req, Env, Middlewares]), + Expect = expect(Req), + {Commands, Next} = cowboy_stream:init(StreamID, Req, Opts), + {[{spawn, Pid, Shutdown}|Commands], + #state{next=Next, ref=Ref, pid=Pid, expect=Expect}}. + +%% Ignore the expect header in HTTP/1.0. +expect(#{version := 'HTTP/1.0'}) -> + undefined; +expect(Req) -> + try cowboy_req:parse_header(<<"expect">>, Req) of + Expect -> + Expect + catch _:_ -> + undefined + end. + +%% If we receive data and stream is waiting for data: +%% If we accumulated enough data or IsFin=fin, send it. +%% If we are in auto mode, send it and update flow control. +%% If not, buffer it. +%% If not, buffer it. +%% +%% We always reset the expect field when we receive data, +%% since the client started sending the request body before +%% we could send a 100 continue response. + +-spec data(cowboy_stream:streamid(), cowboy_stream:fin(), cowboy_req:resp_body(), State) + -> {cowboy_stream:commands(), State} when State::#state{}. +%% Stream isn't waiting for data. +data(StreamID, IsFin, Data, State=#state{ + read_body_ref=undefined, read_body_buffer=Buffer, body_length=BodyLen}) -> + do_data(StreamID, IsFin, Data, [], State#state{ + expect=undefined, + read_body_is_fin=IsFin, + read_body_buffer= << Buffer/binary, Data/binary >>, + body_length=BodyLen + byte_size(Data) + }); +%% Stream is waiting for data using auto mode. +%% +%% There is no buffering done in auto mode. +data(StreamID, IsFin, Data, State=#state{read_body_pid=Pid, read_body_ref=Ref, + read_body_length=auto, body_length=BodyLen}) -> + send_request_body(Pid, Ref, IsFin, BodyLen, Data), + do_data(StreamID, IsFin, Data, [{flow, byte_size(Data)}], State#state{ + read_body_ref=undefined, + %% @todo This is wrong, it's missing byte_size(Data). + body_length=BodyLen + }); +%% Stream is waiting for data but we didn't receive enough to send yet. +data(StreamID, IsFin=nofin, Data, State=#state{ + read_body_length=ReadLen, read_body_buffer=Buffer, body_length=BodyLen}) + when byte_size(Data) + byte_size(Buffer) < ReadLen -> + do_data(StreamID, IsFin, Data, [], State#state{ + expect=undefined, + read_body_buffer= << Buffer/binary, Data/binary >>, + body_length=BodyLen + byte_size(Data) + }); +%% Stream is waiting for data and we received enough to send. +data(StreamID, IsFin, Data, State=#state{read_body_pid=Pid, read_body_ref=Ref, + read_body_timer_ref=TRef, read_body_buffer=Buffer, body_length=BodyLen0}) -> + BodyLen = BodyLen0 + byte_size(Data), + ok = erlang:cancel_timer(TRef, [{async, true}, {info, false}]), + send_request_body(Pid, Ref, IsFin, BodyLen, <>), + do_data(StreamID, IsFin, Data, [], State#state{ + expect=undefined, + read_body_ref=undefined, + read_body_timer_ref=undefined, + read_body_buffer= <<>>, + body_length=BodyLen + }). + +do_data(StreamID, IsFin, Data, Commands1, State=#state{next=Next0}) -> + {Commands2, Next} = cowboy_stream:data(StreamID, IsFin, Data, Next0), + {Commands1 ++ Commands2, State#state{next=Next}}. + +-spec info(cowboy_stream:streamid(), any(), State) + -> {cowboy_stream:commands(), State} when State::#state{}. +info(StreamID, Info={'EXIT', Pid, normal}, State=#state{pid=Pid}) -> + do_info(StreamID, Info, [stop], State); +info(StreamID, Info={'EXIT', Pid, {{request_error, Reason, _HumanReadable}, _}}, + State=#state{pid=Pid}) -> + Status = case Reason of + timeout -> 408; + payload_too_large -> 413; + _ -> 400 + end, + %% @todo Headers? Details in body? Log the crash? More stuff in debug only? + do_info(StreamID, Info, [ + {error_response, Status, #{<<"content-length">> => <<"0">>}, <<>>}, + stop + ], State); +info(StreamID, Exit={'EXIT', Pid, {Reason, Stacktrace}}, State=#state{ref=Ref, pid=Pid}) -> + Commands0 = [{internal_error, Exit, 'Stream process crashed.'}], + Commands = case Reason of + normal -> Commands0; + shutdown -> Commands0; + {shutdown, _} -> Commands0; + _ -> [{log, error, + "Ranch listener ~p, connection process ~p, stream ~p " + "had its request process ~p exit with reason " + "~999999p and stacktrace ~999999p~n", + [Ref, self(), StreamID, Pid, Reason, Stacktrace]} + |Commands0] + end, + do_info(StreamID, Exit, [ + {error_response, 500, #{<<"content-length">> => <<"0">>}, <<>>} + |Commands], State); +%% Request body, auto mode, no body buffered. +info(StreamID, Info={read_body, Pid, Ref, auto, infinity}, State=#state{read_body_buffer= <<>>}) -> + do_info(StreamID, Info, [], State#state{ + read_body_pid=Pid, + read_body_ref=Ref, + read_body_length=auto + }); +%% Request body, auto mode, body buffered or complete. +info(StreamID, Info={read_body, Pid, Ref, auto, infinity}, State=#state{ + read_body_is_fin=IsFin, read_body_buffer=Buffer, body_length=BodyLen}) -> + send_request_body(Pid, Ref, IsFin, BodyLen, Buffer), + do_info(StreamID, Info, [{flow, byte_size(Buffer)}], + State#state{read_body_buffer= <<>>}); +%% Request body, body buffered large enough or complete. +%% +%% We do not send a 100 continue response if the client +%% already started sending the body. +info(StreamID, Info={read_body, Pid, Ref, Length, _}, State=#state{ + read_body_is_fin=IsFin, read_body_buffer=Buffer, body_length=BodyLen}) + when IsFin =:= fin; byte_size(Buffer) >= Length -> + send_request_body(Pid, Ref, IsFin, BodyLen, Buffer), + do_info(StreamID, Info, [], State#state{read_body_buffer= <<>>}); +%% Request body, not enough to send yet. +info(StreamID, Info={read_body, Pid, Ref, Length, Period}, State=#state{expect=Expect}) -> + Commands = case Expect of + continue -> [{inform, 100, #{}}, {flow, Length}]; + undefined -> [{flow, Length}] + end, + TRef = erlang:send_after(Period, self(), {{self(), StreamID}, {read_body_timeout, Ref}}), + do_info(StreamID, Info, Commands, State#state{ + read_body_pid=Pid, + read_body_ref=Ref, + read_body_timer_ref=TRef, + read_body_length=Length + }); +%% Request body reading timeout; send what we got. +info(StreamID, Info={read_body_timeout, Ref}, State=#state{read_body_pid=Pid, read_body_ref=Ref, + read_body_is_fin=IsFin, read_body_buffer=Buffer, body_length=BodyLen}) -> + send_request_body(Pid, Ref, IsFin, BodyLen, Buffer), + do_info(StreamID, Info, [], State#state{ + read_body_ref=undefined, + read_body_timer_ref=undefined, + read_body_buffer= <<>> + }); +info(StreamID, Info={read_body_timeout, _}, State) -> + do_info(StreamID, Info, [], State); +%% Response. +%% +%% We reset the expect field when a 100 continue response +%% is sent or when any final response is sent. +info(StreamID, Inform={inform, Status, _}, State0) -> + State = case cow_http:status_to_integer(Status) of + 100 -> State0#state{expect=undefined}; + _ -> State0 + end, + do_info(StreamID, Inform, [Inform], State); +info(StreamID, Response={response, _, _, _}, State) -> + do_info(StreamID, Response, [Response], State#state{expect=undefined}); +info(StreamID, Headers={headers, _, _}, State) -> + do_info(StreamID, Headers, [Headers], State#state{expect=undefined}); +%% Sending data involves the data message, the stream_buffer_full alarm +%% and the connection_buffer_full alarm. We stop sending acks when an alarm is on. +%% +%% We only apply backpressure when the message includes a pid. Otherwise +%% it is a message from Cowboy, or the user circumventing the backpressure. +%% +%% We currently do not support sending data from multiple processes concurrently. +info(StreamID, Data={data, _, _}, State) -> + do_info(StreamID, Data, [Data], State); +info(StreamID, Data0={data, Pid, _, _}, State0=#state{stream_body_status=Status}) -> + State = case Status of + normal -> + Pid ! {data_ack, self()}, + State0; + blocking -> + State0#state{stream_body_pid=Pid, stream_body_status=blocked}; + blocked -> + State0 + end, + Data = erlang:delete_element(2, Data0), + do_info(StreamID, Data, [Data], State); +info(StreamID, Alarm={alarm, Name, on}, State0=#state{stream_body_status=Status}) + when Name =:= connection_buffer_full; Name =:= stream_buffer_full -> + State = case Status of + normal -> State0#state{stream_body_status=blocking}; + _ -> State0 + end, + do_info(StreamID, Alarm, [], State); +info(StreamID, Alarm={alarm, Name, off}, State=#state{stream_body_pid=Pid, stream_body_status=Status}) + when Name =:= connection_buffer_full; Name =:= stream_buffer_full -> + _ = case Status of + normal -> ok; + blocking -> ok; + blocked -> Pid ! {data_ack, self()} + end, + do_info(StreamID, Alarm, [], State#state{stream_body_pid=undefined, stream_body_status=normal}); +info(StreamID, Trailers={trailers, _}, State) -> + do_info(StreamID, Trailers, [Trailers], State); +info(StreamID, Push={push, _, _, _, _, _, _, _}, State) -> + do_info(StreamID, Push, [Push], State); +info(StreamID, SwitchProtocol={switch_protocol, _, _, _}, State) -> + do_info(StreamID, SwitchProtocol, [SwitchProtocol], State#state{expect=undefined}); +%% Convert the set_options message to a command. +info(StreamID, SetOptions={set_options, _}, State) -> + do_info(StreamID, SetOptions, [SetOptions], State); +%% Unknown message, either stray or meant for a handler down the line. +info(StreamID, Info, State) -> + do_info(StreamID, Info, [], State). + +do_info(StreamID, Info, Commands1, State0=#state{next=Next0}) -> + {Commands2, Next} = cowboy_stream:info(StreamID, Info, Next0), + {Commands1 ++ Commands2, State0#state{next=Next}}. + +-spec terminate(cowboy_stream:streamid(), cowboy_stream:reason(), #state{}) -> ok. +terminate(StreamID, Reason, #state{next=Next}) -> + cowboy_stream:terminate(StreamID, Reason, Next). + +-spec early_error(cowboy_stream:streamid(), cowboy_stream:reason(), + cowboy_stream:partial_req(), Resp, cowboy:opts()) -> Resp + when Resp::cowboy_stream:resp_command(). +early_error(StreamID, Reason, PartialReq, Resp, Opts) -> + cowboy_stream:early_error(StreamID, Reason, PartialReq, Resp, Opts). + +send_request_body(Pid, Ref, nofin, _, Data) -> + Pid ! {request_body, Ref, nofin, Data}, + ok; +send_request_body(Pid, Ref, fin, BodyLen, Data) -> + Pid ! {request_body, Ref, fin, BodyLen, Data}, + ok. + +%% Request process. + +%% We add the stacktrace to exit exceptions here in order +%% to simplify the debugging of errors. The proc_lib library +%% already adds the stacktrace to other types of exceptions. +-spec request_process(cowboy_req:req(), cowboy_middleware:env(), [module()]) -> ok. +request_process(Req, Env, Middlewares) -> + try + execute(Req, Env, Middlewares) + catch + exit:Reason={shutdown, _}:Stacktrace -> + erlang:raise(exit, Reason, Stacktrace); + exit:Reason:Stacktrace when Reason =/= normal, Reason =/= shutdown -> + erlang:raise(exit, {Reason, Stacktrace}, Stacktrace) + end. + +execute(_, _, []) -> + ok; +execute(Req, Env, [Middleware|Tail]) -> + case Middleware:execute(Req, Env) of + {ok, Req2, Env2} -> + execute(Req2, Env2, Tail); + {suspend, Module, Function, Args} -> + proc_lib:hibernate(?MODULE, resume, [Env, Tail, Module, Function, Args]); + {stop, _Req2} -> + ok + end. + +-spec resume(cowboy_middleware:env(), [module()], module(), atom(), [any()]) -> ok. +resume(Env, Tail, Module, Function, Args) -> + case apply(Module, Function, Args) of + {ok, Req2, Env2} -> + execute(Req2, Env2, Tail); + {suspend, Module2, Function2, Args2} -> + proc_lib:hibernate(?MODULE, resume, [Env, Tail, Module2, Function2, Args2]); + {stop, _Req2} -> + ok + end. diff --git a/deps/cowboy/src/cowboy_sub_protocol.erl b/deps/cowboy/src/cowboy_sub_protocol.erl new file mode 100644 index 0000000..6714289 --- /dev/null +++ b/deps/cowboy/src/cowboy_sub_protocol.erl @@ -0,0 +1,24 @@ +%% Copyright (c) 2013-2017, Loรฏc Hoguin +%% Copyright (c) 2013, James Fish +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_sub_protocol). + +-callback upgrade(Req, Env, module(), any()) + -> {ok, Req, Env} | {suspend, module(), atom(), [any()]} | {stop, Req} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). + +-callback upgrade(Req, Env, module(), any(), any()) + -> {ok, Req, Env} | {suspend, module(), atom(), [any()]} | {stop, Req} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). diff --git a/deps/cowboy/src/cowboy_sup.erl b/deps/cowboy/src/cowboy_sup.erl new file mode 100644 index 0000000..d3ac3b0 --- /dev/null +++ b/deps/cowboy/src/cowboy_sup.erl @@ -0,0 +1,30 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_sup). +-behaviour(supervisor). + +-export([start_link/0]). +-export([init/1]). + +-spec start_link() -> {ok, pid()}. +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +-spec init([]) + -> {ok, {{supervisor:strategy(), 10, 10}, [supervisor:child_spec()]}}. +init([]) -> + Procs = [{cowboy_clock, {cowboy_clock, start_link, []}, + permanent, 5000, worker, [cowboy_clock]}], + {ok, {{one_for_one, 10, 10}, Procs}}. diff --git a/deps/cowboy/src/cowboy_tls.erl b/deps/cowboy/src/cowboy_tls.erl new file mode 100644 index 0000000..c049ecb --- /dev/null +++ b/deps/cowboy/src/cowboy_tls.erl @@ -0,0 +1,56 @@ +%% Copyright (c) 2015-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_tls). +-behavior(ranch_protocol). + +-export([start_link/3]). +-export([start_link/4]). +-export([connection_process/4]). + +%% Ranch 1. +-spec start_link(ranch:ref(), ssl:sslsocket(), module(), cowboy:opts()) -> {ok, pid()}. +start_link(Ref, _Socket, Transport, Opts) -> + start_link(Ref, Transport, Opts). + +%% Ranch 2. +-spec start_link(ranch:ref(), module(), cowboy:opts()) -> {ok, pid()}. +start_link(Ref, Transport, Opts) -> + Pid = proc_lib:spawn_link(?MODULE, connection_process, + [self(), Ref, Transport, Opts]), + {ok, Pid}. + +-spec connection_process(pid(), ranch:ref(), module(), cowboy:opts()) -> ok. +connection_process(Parent, Ref, Transport, Opts) -> + ProxyInfo = case maps:get(proxy_header, Opts, false) of + true -> + {ok, ProxyInfo0} = ranch:recv_proxy_header(Ref, 1000), + ProxyInfo0; + false -> + undefined + end, + {ok, Socket} = ranch:handshake(Ref), + case ssl:negotiated_protocol(Socket) of + {ok, <<"h2">>} -> + init(Parent, Ref, Socket, Transport, ProxyInfo, Opts, cowboy_http2); + _ -> %% http/1.1 or no protocol negotiated. + init(Parent, Ref, Socket, Transport, ProxyInfo, Opts, cowboy_http) + end. + +init(Parent, Ref, Socket, Transport, ProxyInfo, Opts, Protocol) -> + _ = case maps:get(connection_type, Opts, supervisor) of + worker -> ok; + supervisor -> process_flag(trap_exit, true) + end, + Protocol:init(Parent, Ref, Socket, Transport, ProxyInfo, Opts). diff --git a/deps/cowboy/src/cowboy_tracer_h.erl b/deps/cowboy/src/cowboy_tracer_h.erl new file mode 100644 index 0000000..9a19ae1 --- /dev/null +++ b/deps/cowboy/src/cowboy_tracer_h.erl @@ -0,0 +1,192 @@ +%% Copyright (c) 2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cowboy_tracer_h). +-behavior(cowboy_stream). + +-export([init/3]). +-export([data/4]). +-export([info/3]). +-export([terminate/3]). +-export([early_error/5]). + +-export([set_trace_patterns/0]). + +-export([tracer_process/3]). +-export([system_continue/3]). +-export([system_terminate/4]). +-export([system_code_change/4]). + +-type match_predicate() + :: fun((cowboy_stream:streamid(), cowboy_req:req(), cowboy:opts()) -> boolean()). + +-type tracer_match_specs() :: [match_predicate() + | {method, binary()} + | {host, binary()} + | {path, binary()} + | {path_start, binary()} + | {header, binary()} + | {header, binary(), binary()} + | {peer_ip, inet:ip_address()} +]. +-export_type([tracer_match_specs/0]). + +-type tracer_callback() :: fun((init | terminate | tuple(), any()) -> any()). +-export_type([tracer_callback/0]). + +-spec init(cowboy_stream:streamid(), cowboy_req:req(), cowboy:opts()) + -> {cowboy_stream:commands(), any()}. +init(StreamID, Req, Opts) -> + init_tracer(StreamID, Req, Opts), + cowboy_stream:init(StreamID, Req, Opts). + +-spec data(cowboy_stream:streamid(), cowboy_stream:fin(), cowboy_req:resp_body(), State) + -> {cowboy_stream:commands(), State} when State::any(). +data(StreamID, IsFin, Data, Next) -> + cowboy_stream:data(StreamID, IsFin, Data, Next). + +-spec info(cowboy_stream:streamid(), any(), State) + -> {cowboy_stream:commands(), State} when State::any(). +info(StreamID, Info, Next) -> + cowboy_stream:info(StreamID, Info, Next). + +-spec terminate(cowboy_stream:streamid(), cowboy_stream:reason(), any()) -> any(). +terminate(StreamID, Reason, Next) -> + cowboy_stream:terminate(StreamID, Reason, Next). + +-spec early_error(cowboy_stream:streamid(), cowboy_stream:reason(), + cowboy_stream:partial_req(), Resp, cowboy:opts()) -> Resp + when Resp::cowboy_stream:resp_command(). +early_error(StreamID, Reason, PartialReq, Resp, Opts) -> + cowboy_stream:early_error(StreamID, Reason, PartialReq, Resp, Opts). + +%% API. + +%% These trace patterns are most likely not suitable for production. +-spec set_trace_patterns() -> ok. +set_trace_patterns() -> + erlang:trace_pattern({'_', '_', '_'}, [{'_', [], [{return_trace}]}], [local]), + erlang:trace_pattern(on_load, [{'_', [], [{return_trace}]}], [local]), + ok. + +%% Internal. + +init_tracer(StreamID, Req, Opts=#{tracer_match_specs := List, tracer_callback := _}) -> + case match(List, StreamID, Req, Opts) of + false -> + ok; + true -> + start_tracer(StreamID, Req, Opts) + end; +%% When the options tracer_match_specs or tracer_callback +%% are not provided we do not enable tracing. +init_tracer(_, _, _) -> + ok. + +match([], _, _, _) -> + true; +match([Predicate|Tail], StreamID, Req, Opts) when is_function(Predicate) -> + case Predicate(StreamID, Req, Opts) of + true -> match(Tail, StreamID, Req, Opts); + false -> false + end; +match([{method, Value}|Tail], StreamID, Req=#{method := Value}, Opts) -> + match(Tail, StreamID, Req, Opts); +match([{host, Value}|Tail], StreamID, Req=#{host := Value}, Opts) -> + match(Tail, StreamID, Req, Opts); +match([{path, Value}|Tail], StreamID, Req=#{path := Value}, Opts) -> + match(Tail, StreamID, Req, Opts); +match([{path_start, PathStart}|Tail], StreamID, Req=#{path := Path}, Opts) -> + Len = byte_size(PathStart), + case Path of + <> -> match(Tail, StreamID, Req, Opts); + _ -> false + end; +match([{header, Name}|Tail], StreamID, Req=#{headers := Headers}, Opts) -> + case Headers of + #{Name := _} -> match(Tail, StreamID, Req, Opts); + _ -> false + end; +match([{header, Name, Value}|Tail], StreamID, Req=#{headers := Headers}, Opts) -> + case Headers of + #{Name := Value} -> match(Tail, StreamID, Req, Opts); + _ -> false + end; +match([{peer_ip, IP}|Tail], StreamID, Req=#{peer := {IP, _}}, Opts) -> + match(Tail, StreamID, Req, Opts); +match(_, _, _, _) -> + false. + +%% We only start the tracer if one wasn't started before. +start_tracer(StreamID, Req, Opts) -> + case erlang:trace_info(self(), tracer) of + {tracer, []} -> + TracerPid = proc_lib:spawn_link(?MODULE, tracer_process, [StreamID, Req, Opts]), + %% The default flags are probably not suitable for production. + Flags = maps:get(tracer_flags, Opts, [ + send, 'receive', call, return_to, + procs, ports, monotonic_timestamp, + %% The set_on_spawn flag is necessary to catch events + %% from request processes. + set_on_spawn + ]), + erlang:trace(self(), true, [{tracer, TracerPid}|Flags]), + ok; + _ -> + ok + end. + +%% Tracer process. + +-spec tracer_process(_, _, _) -> no_return(). +tracer_process(StreamID, Req=#{pid := Parent}, Opts=#{tracer_callback := Fun}) -> + %% This is necessary because otherwise the tracer could stop + %% before it has finished processing the events in its queue. + process_flag(trap_exit, true), + State = Fun(init, {StreamID, Req, Opts}), + tracer_loop(Parent, Opts, State). + +tracer_loop(Parent, Opts=#{tracer_callback := Fun}, State0) -> + receive + Msg when element(1, Msg) =:= trace; element(1, Msg) =:= trace_ts -> + State = Fun(Msg, State0), + tracer_loop(Parent, Opts, State); + {'EXIT', Parent, Reason} -> + tracer_terminate(Reason, Opts, State0); + {system, From, Request} -> + sys:handle_system_msg(Request, From, Parent, ?MODULE, [], {Opts, State0}); + Msg -> + cowboy:log(warning, "~p: Tracer process received stray message ~9999p~n", + [?MODULE, Msg], Opts), + tracer_loop(Parent, Opts, State0) + end. + +-spec tracer_terminate(_, _, _) -> no_return(). +tracer_terminate(Reason, #{tracer_callback := Fun}, State) -> + _ = Fun(terminate, State), + exit(Reason). + +%% System callbacks. + +-spec system_continue(pid(), _, {cowboy:opts(), any()}) -> no_return(). +system_continue(Parent, _, {Opts, State}) -> + tracer_loop(Parent, Opts, State). + +-spec system_terminate(any(), _, _, _) -> no_return(). +system_terminate(Reason, _, _, {Opts, State}) -> + tracer_terminate(Reason, Opts, State). + +-spec system_code_change(Misc, _, _, _) -> {ok, Misc} when Misc::any(). +system_code_change(Misc, _, _, _) -> + {ok, Misc}. diff --git a/deps/cowboy/src/cowboy_websocket.erl b/deps/cowboy/src/cowboy_websocket.erl new file mode 100644 index 0000000..e7d8f31 --- /dev/null +++ b/deps/cowboy/src/cowboy_websocket.erl @@ -0,0 +1,707 @@ +%% Copyright (c) 2011-2017, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% Cowboy supports versions 7 through 17 of the Websocket drafts. +%% It also supports RFC6455, the proposed standard for Websocket. +-module(cowboy_websocket). +-behaviour(cowboy_sub_protocol). + +-export([is_upgrade_request/1]). +-export([upgrade/4]). +-export([upgrade/5]). +-export([takeover/7]). +-export([loop/3]). + +-export([system_continue/3]). +-export([system_terminate/4]). +-export([system_code_change/4]). + +-type commands() :: [cow_ws:frame() + | {active, boolean()} + | {deflate, boolean()} + | {set_options, map()} + | {shutdown_reason, any()} +]. +-export_type([commands/0]). + +-type call_result(State) :: {commands(), State} | {commands(), State, hibernate}. + +-type deprecated_call_result(State) :: {ok, State} + | {ok, State, hibernate} + | {reply, cow_ws:frame() | [cow_ws:frame()], State} + | {reply, cow_ws:frame() | [cow_ws:frame()], State, hibernate} + | {stop, State}. + +-type terminate_reason() :: normal | stop | timeout + | remote | {remote, cow_ws:close_code(), binary()} + | {error, badencoding | badframe | closed | atom()} + | {crash, error | exit | throw, any()}. + +-callback init(Req, any()) + -> {ok | module(), Req, any()} + | {module(), Req, any(), any()} + when Req::cowboy_req:req(). + +-callback websocket_init(State) + -> call_result(State) | deprecated_call_result(State) when State::any(). +-optional_callbacks([websocket_init/1]). + +-callback websocket_handle(ping | pong | {text | binary | ping | pong, binary()}, State) + -> call_result(State) | deprecated_call_result(State) when State::any(). +-callback websocket_info(any(), State) + -> call_result(State) | deprecated_call_result(State) when State::any(). + +-callback terminate(any(), cowboy_req:req(), any()) -> ok. +-optional_callbacks([terminate/3]). + +-type opts() :: #{ + active_n => pos_integer(), + compress => boolean(), + deflate_opts => cow_ws:deflate_opts(), + idle_timeout => timeout(), + max_frame_size => non_neg_integer() | infinity, + req_filter => fun((cowboy_req:req()) -> map()), + validate_utf8 => boolean() +}. +-export_type([opts/0]). + +-record(state, { + parent :: undefined | pid(), + ref :: ranch:ref(), + socket = undefined :: inet:socket() | {pid(), cowboy_stream:streamid()} | undefined, + transport = undefined :: module() | undefined, + opts = #{} :: opts(), + active = true :: boolean(), + handler :: module(), + key = undefined :: undefined | binary(), + timeout_ref = undefined :: undefined | reference(), + messages = undefined :: undefined | {atom(), atom(), atom()} + | {atom(), atom(), atom(), atom()}, + hibernate = false :: boolean(), + frag_state = undefined :: cow_ws:frag_state(), + frag_buffer = <<>> :: binary(), + utf8_state :: cow_ws:utf8_state(), + deflate = true :: boolean(), + extensions = #{} :: map(), + req = #{} :: map(), + shutdown_reason = normal :: any() +}). + +%% Because the HTTP/1.1 and HTTP/2 handshakes are so different, +%% this function is necessary to figure out whether a request +%% is trying to upgrade to the Websocket protocol. + +-spec is_upgrade_request(cowboy_req:req()) -> boolean(). +is_upgrade_request(#{version := 'HTTP/2', method := <<"CONNECT">>, protocol := Protocol}) -> + <<"websocket">> =:= cowboy_bstr:to_lower(Protocol); +is_upgrade_request(Req=#{version := 'HTTP/1.1', method := <<"GET">>}) -> + ConnTokens = cowboy_req:parse_header(<<"connection">>, Req, []), + case lists:member(<<"upgrade">>, ConnTokens) of + false -> + false; + true -> + UpgradeTokens = cowboy_req:parse_header(<<"upgrade">>, Req), + lists:member(<<"websocket">>, UpgradeTokens) + end; +is_upgrade_request(_) -> + false. + +%% Stream process. + +-spec upgrade(Req, Env, module(), any()) + -> {ok, Req, Env} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +upgrade(Req, Env, Handler, HandlerState) -> + upgrade(Req, Env, Handler, HandlerState, #{}). + +-spec upgrade(Req, Env, module(), any(), opts()) + -> {ok, Req, Env} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +%% @todo Immediately crash if a response has already been sent. +upgrade(Req0=#{version := Version}, Env, Handler, HandlerState, Opts) -> + FilteredReq = case maps:get(req_filter, Opts, undefined) of + undefined -> maps:with([method, version, scheme, host, port, path, qs, peer], Req0); + FilterFun -> FilterFun(Req0) + end, + Utf8State = case maps:get(validate_utf8, Opts, true) of + true -> 0; + false -> undefined + end, + State0 = #state{opts=Opts, handler=Handler, utf8_state=Utf8State, req=FilteredReq}, + try websocket_upgrade(State0, Req0) of + {ok, State, Req} -> + websocket_handshake(State, Req, HandlerState, Env); + %% The status code 426 is specific to HTTP/1.1 connections. + {error, upgrade_required} when Version =:= 'HTTP/1.1' -> + {ok, cowboy_req:reply(426, #{ + <<"connection">> => <<"upgrade">>, + <<"upgrade">> => <<"websocket">> + }, Req0), Env}; + %% Use a generic 400 error for HTTP/2. + {error, upgrade_required} -> + {ok, cowboy_req:reply(400, Req0), Env} + catch _:_ -> + %% @todo Probably log something here? + %% @todo Test that we can have 2 /ws 400 status code in a row on the same connection. + %% @todo Does this even work? + {ok, cowboy_req:reply(400, Req0), Env} + end. + +websocket_upgrade(State, Req=#{version := Version}) -> + case is_upgrade_request(Req) of + false -> + {error, upgrade_required}; + true when Version =:= 'HTTP/1.1' -> + Key = cowboy_req:header(<<"sec-websocket-key">>, Req), + false = Key =:= undefined, + websocket_version(State#state{key=Key}, Req); + true -> + websocket_version(State, Req) + end. + +websocket_version(State, Req) -> + WsVersion = cowboy_req:parse_header(<<"sec-websocket-version">>, Req), + case WsVersion of + 7 -> ok; + 8 -> ok; + 13 -> ok + end, + websocket_extensions(State, Req#{websocket_version => WsVersion}). + +websocket_extensions(State=#state{opts=Opts}, Req) -> + %% @todo We want different options for this. For example + %% * compress everything auto + %% * compress only text auto + %% * compress only binary auto + %% * compress nothing auto (but still enabled it) + %% * disable compression + Compress = maps:get(compress, Opts, false), + case {Compress, cowboy_req:parse_header(<<"sec-websocket-extensions">>, Req)} of + {true, Extensions} when Extensions =/= undefined -> + websocket_extensions(State, Req, Extensions, []); + _ -> + {ok, State, Req} + end. + +websocket_extensions(State, Req, [], []) -> + {ok, State, Req}; +websocket_extensions(State, Req, [], [<<", ">>|RespHeader]) -> + {ok, State, cowboy_req:set_resp_header(<<"sec-websocket-extensions">>, lists:reverse(RespHeader), Req)}; +%% For HTTP/2 we ARE on the controlling process and do NOT want to update the owner. +websocket_extensions(State=#state{opts=Opts, extensions=Extensions}, + Req=#{pid := Pid, version := Version}, + [{<<"permessage-deflate">>, Params}|Tail], RespHeader) -> + DeflateOpts0 = maps:get(deflate_opts, Opts, #{}), + DeflateOpts = case Version of + 'HTTP/1.1' -> DeflateOpts0#{owner => Pid}; + _ -> DeflateOpts0 + end, + try cow_ws:negotiate_permessage_deflate(Params, Extensions, DeflateOpts) of + {ok, RespExt, Extensions2} -> + websocket_extensions(State#state{extensions=Extensions2}, + Req, Tail, [<<", ">>, RespExt|RespHeader]); + ignore -> + websocket_extensions(State, Req, Tail, RespHeader) + catch exit:{error, incompatible_zlib_version, _} -> + websocket_extensions(State, Req, Tail, RespHeader) + end; +websocket_extensions(State=#state{opts=Opts, extensions=Extensions}, + Req=#{pid := Pid, version := Version}, + [{<<"x-webkit-deflate-frame">>, Params}|Tail], RespHeader) -> + DeflateOpts0 = maps:get(deflate_opts, Opts, #{}), + DeflateOpts = case Version of + 'HTTP/1.1' -> DeflateOpts0#{owner => Pid}; + _ -> DeflateOpts0 + end, + try cow_ws:negotiate_x_webkit_deflate_frame(Params, Extensions, DeflateOpts) of + {ok, RespExt, Extensions2} -> + websocket_extensions(State#state{extensions=Extensions2}, + Req, Tail, [<<", ">>, RespExt|RespHeader]); + ignore -> + websocket_extensions(State, Req, Tail, RespHeader) + catch exit:{error, incompatible_zlib_version, _} -> + websocket_extensions(State, Req, Tail, RespHeader) + end; +websocket_extensions(State, Req, [_|Tail], RespHeader) -> + websocket_extensions(State, Req, Tail, RespHeader). + +-spec websocket_handshake(#state{}, Req, any(), Env) + -> {ok, Req, Env} + when Req::cowboy_req:req(), Env::cowboy_middleware:env(). +websocket_handshake(State=#state{key=Key}, + Req=#{version := 'HTTP/1.1', pid := Pid, streamid := StreamID}, + HandlerState, Env) -> + Challenge = base64:encode(crypto:hash(sha, + << Key/binary, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11" >>)), + %% @todo We don't want date and server headers. + Headers = cowboy_req:response_headers(#{ + <<"connection">> => <<"Upgrade">>, + <<"upgrade">> => <<"websocket">>, + <<"sec-websocket-accept">> => Challenge + }, Req), + Pid ! {{Pid, StreamID}, {switch_protocol, Headers, ?MODULE, {State, HandlerState}}}, + {ok, Req, Env}; +%% For HTTP/2 we do not let the process die, we instead keep it +%% for the Websocket stream. This is because in HTTP/2 we only +%% have a stream, it doesn't take over the whole connection. +websocket_handshake(State, Req=#{ref := Ref, pid := Pid, streamid := StreamID}, + HandlerState, _Env) -> + %% @todo We don't want date and server headers. + Headers = cowboy_req:response_headers(#{}, Req), + Pid ! {{Pid, StreamID}, {switch_protocol, Headers, ?MODULE, {State, HandlerState}}}, + takeover(Pid, Ref, {Pid, StreamID}, undefined, undefined, <<>>, + {State, HandlerState}). + +%% Connection process. + +-record(ps_header, { + buffer = <<>> :: binary() +}). + +-record(ps_payload, { + type :: cow_ws:frame_type(), + len :: non_neg_integer(), + mask_key :: cow_ws:mask_key(), + rsv :: cow_ws:rsv(), + close_code = undefined :: undefined | cow_ws:close_code(), + unmasked = <<>> :: binary(), + unmasked_len = 0 :: non_neg_integer(), + buffer = <<>> :: binary() +}). + +-type parse_state() :: #ps_header{} | #ps_payload{}. + +-spec takeover(pid(), ranch:ref(), inet:socket() | {pid(), cowboy_stream:streamid()}, + module() | undefined, any(), binary(), + {#state{}, any()}) -> no_return(). +takeover(Parent, Ref, Socket, Transport, _Opts, Buffer, + {State0=#state{handler=Handler}, HandlerState}) -> + %% @todo We should have an option to disable this behavior. + ranch:remove_connection(Ref), + Messages = case Transport of + undefined -> undefined; + _ -> Transport:messages() + end, + State = loop_timeout(State0#state{parent=Parent, + ref=Ref, socket=Socket, transport=Transport, + key=undefined, messages=Messages}), + %% We call parse_header/3 immediately because there might be + %% some data in the buffer that was sent along with the handshake. + %% While it is not allowed by the protocol to send frames immediately, + %% we still want to process that data if any. + case erlang:function_exported(Handler, websocket_init, 1) of + true -> handler_call(State, HandlerState, #ps_header{buffer=Buffer}, + websocket_init, undefined, fun after_init/3); + false -> after_init(State, HandlerState, #ps_header{buffer=Buffer}) + end. + +after_init(State=#state{active=true}, HandlerState, ParseState) -> + %% Enable active,N for HTTP/1.1, and auto read_body for HTTP/2. + %% We must do this only after calling websocket_init/1 (if any) + %% to give the handler a chance to disable active mode immediately. + setopts_active(State), + maybe_read_body(State), + parse_header(State, HandlerState, ParseState); +after_init(State, HandlerState, ParseState) -> + parse_header(State, HandlerState, ParseState). + +%% We have two ways of reading the body for Websocket. For HTTP/1.1 +%% we have full control of the socket and can therefore use active,N. +%% For HTTP/2 we are just a stream, and are instead using read_body +%% (automatic mode). Technically HTTP/2 will only go passive after +%% receiving the next data message, while HTTP/1.1 goes passive +%% immediately but there might still be data to be processed in +%% the message queue. + +setopts_active(#state{transport=undefined}) -> + ok; +setopts_active(#state{socket=Socket, transport=Transport, opts=Opts}) -> + N = maps:get(active_n, Opts, 100), + Transport:setopts(Socket, [{active, N}]). + +maybe_read_body(#state{socket=Stream={Pid, _}, transport=undefined, active=true}) -> + %% @todo Keep Ref around. + ReadBodyRef = make_ref(), + Pid ! {Stream, {read_body, self(), ReadBodyRef, auto, infinity}}, + ok; +maybe_read_body(_) -> + ok. + +active(State) -> + setopts_active(State), + maybe_read_body(State), + State#state{active=true}. + +passive(State=#state{transport=undefined}) -> + %% Unfortunately we cannot currently cancel read_body. + %% But that's OK, we will just stop reading the body + %% after the next message. + State#state{active=false}; +passive(State=#state{socket=Socket, transport=Transport, messages=Messages}) -> + Transport:setopts(Socket, [{active, false}]), + flush_passive(Socket, Messages), + State#state{active=false}. + +flush_passive(Socket, Messages) -> + receive + {Passive, Socket} when Passive =:= element(4, Messages); + %% Hardcoded for compatibility with Ranch 1.x. + Passive =:= tcp_passive; Passive =:= ssl_passive -> + flush_passive(Socket, Messages) + after 0 -> + ok + end. + +before_loop(State=#state{hibernate=true}, HandlerState, ParseState) -> + proc_lib:hibernate(?MODULE, loop, + [State#state{hibernate=false}, HandlerState, ParseState]); +before_loop(State, HandlerState, ParseState) -> + loop(State, HandlerState, ParseState). + +-spec loop_timeout(#state{}) -> #state{}. +loop_timeout(State=#state{opts=Opts, timeout_ref=PrevRef}) -> + _ = case PrevRef of + undefined -> ignore; + PrevRef -> erlang:cancel_timer(PrevRef) + end, + case maps:get(idle_timeout, Opts, 60000) of + infinity -> + State#state{timeout_ref=undefined}; + Timeout -> + TRef = erlang:start_timer(Timeout, self(), ?MODULE), + State#state{timeout_ref=TRef} + end. + +-spec loop(#state{}, any(), parse_state()) -> no_return(). +loop(State=#state{parent=Parent, socket=Socket, messages=Messages, + timeout_ref=TRef}, HandlerState, ParseState) -> + receive + %% Socket messages. (HTTP/1.1) + {OK, Socket, Data} when OK =:= element(1, Messages) -> + State2 = loop_timeout(State), + parse(State2, HandlerState, ParseState, Data); + {Closed, Socket} when Closed =:= element(2, Messages) -> + terminate(State, HandlerState, {error, closed}); + {Error, Socket, Reason} when Error =:= element(3, Messages) -> + terminate(State, HandlerState, {error, Reason}); + {Passive, Socket} when Passive =:= element(4, Messages); + %% Hardcoded for compatibility with Ranch 1.x. + Passive =:= tcp_passive; Passive =:= ssl_passive -> + setopts_active(State), + loop(State, HandlerState, ParseState); + %% Body reading messages. (HTTP/2) + {request_body, _Ref, nofin, Data} -> + maybe_read_body(State), + State2 = loop_timeout(State), + parse(State2, HandlerState, ParseState, Data); + %% @todo We need to handle this case as if it was an {error, closed} + %% but not before we finish processing frames. We probably should have + %% a check in before_loop to let us stop looping if a flag is set. + {request_body, _Ref, fin, _, Data} -> + maybe_read_body(State), + State2 = loop_timeout(State), + parse(State2, HandlerState, ParseState, Data); + %% Timeouts. + {timeout, TRef, ?MODULE} -> + websocket_close(State, HandlerState, timeout); + {timeout, OlderTRef, ?MODULE} when is_reference(OlderTRef) -> + before_loop(State, HandlerState, ParseState); + %% System messages. + {'EXIT', Parent, Reason} -> + %% @todo We should exit gracefully. + exit(Reason); + {system, From, Request} -> + sys:handle_system_msg(Request, From, Parent, ?MODULE, [], + {State, HandlerState, ParseState}); + %% Calls from supervisor module. + {'$gen_call', From, Call} -> + cowboy_children:handle_supervisor_call(Call, From, [], ?MODULE), + before_loop(State, HandlerState, ParseState); + Message -> + handler_call(State, HandlerState, ParseState, + websocket_info, Message, fun before_loop/3) + end. + +parse(State, HandlerState, PS=#ps_header{buffer=Buffer}, Data) -> + parse_header(State, HandlerState, PS#ps_header{ + buffer= <>}); +parse(State, HandlerState, PS=#ps_payload{buffer=Buffer}, Data) -> + parse_payload(State, HandlerState, PS#ps_payload{buffer= <<>>}, + <>). + +parse_header(State=#state{opts=Opts, frag_state=FragState, extensions=Extensions}, + HandlerState, ParseState=#ps_header{buffer=Data}) -> + MaxFrameSize = maps:get(max_frame_size, Opts, infinity), + case cow_ws:parse_header(Data, Extensions, FragState) of + %% All frames sent from the client to the server are masked. + {_, _, _, _, undefined, _} -> + websocket_close(State, HandlerState, {error, badframe}); + {_, _, _, Len, _, _} when Len > MaxFrameSize -> + websocket_close(State, HandlerState, {error, badsize}); + {Type, FragState2, Rsv, Len, MaskKey, Rest} -> + parse_payload(State#state{frag_state=FragState2}, HandlerState, + #ps_payload{type=Type, len=Len, mask_key=MaskKey, rsv=Rsv}, Rest); + more -> + before_loop(State, HandlerState, ParseState); + error -> + websocket_close(State, HandlerState, {error, badframe}) + end. + +parse_payload(State=#state{frag_state=FragState, utf8_state=Incomplete, extensions=Extensions}, + HandlerState, ParseState=#ps_payload{ + type=Type, len=Len, mask_key=MaskKey, rsv=Rsv, + unmasked=Unmasked, unmasked_len=UnmaskedLen}, Data) -> + case cow_ws:parse_payload(Data, MaskKey, Incomplete, UnmaskedLen, + Type, Len, FragState, Extensions, Rsv) of + {ok, CloseCode, Payload, Utf8State, Rest} -> + dispatch_frame(State#state{utf8_state=Utf8State}, HandlerState, + ParseState#ps_payload{unmasked= <>, + close_code=CloseCode}, Rest); + {ok, Payload, Utf8State, Rest} -> + dispatch_frame(State#state{utf8_state=Utf8State}, HandlerState, + ParseState#ps_payload{unmasked= <>}, + Rest); + {more, CloseCode, Payload, Utf8State} -> + before_loop(State#state{utf8_state=Utf8State}, HandlerState, + ParseState#ps_payload{len=Len - byte_size(Data), close_code=CloseCode, + unmasked= <>, + unmasked_len=UnmaskedLen + byte_size(Data)}); + {more, Payload, Utf8State} -> + before_loop(State#state{utf8_state=Utf8State}, HandlerState, + ParseState#ps_payload{len=Len - byte_size(Data), + unmasked= <>, + unmasked_len=UnmaskedLen + byte_size(Data)}); + Error = {error, _Reason} -> + websocket_close(State, HandlerState, Error) + end. + +dispatch_frame(State=#state{opts=Opts, frag_state=FragState, frag_buffer=SoFar}, HandlerState, + #ps_payload{type=Type0, unmasked=Payload0, close_code=CloseCode0}, RemainingData) -> + MaxFrameSize = maps:get(max_frame_size, Opts, infinity), + case cow_ws:make_frame(Type0, Payload0, CloseCode0, FragState) of + %% @todo Allow receiving fragments. + {fragment, _, _, Payload} when byte_size(Payload) + byte_size(SoFar) > MaxFrameSize -> + websocket_close(State, HandlerState, {error, badsize}); + {fragment, nofin, _, Payload} -> + parse_header(State#state{frag_buffer= << SoFar/binary, Payload/binary >>}, + HandlerState, #ps_header{buffer=RemainingData}); + {fragment, fin, Type, Payload} -> + handler_call(State#state{frag_state=undefined, frag_buffer= <<>>}, HandlerState, + #ps_header{buffer=RemainingData}, + websocket_handle, {Type, << SoFar/binary, Payload/binary >>}, + fun parse_header/3); + close -> + websocket_close(State, HandlerState, remote); + {close, CloseCode, Payload} -> + websocket_close(State, HandlerState, {remote, CloseCode, Payload}); + Frame = ping -> + transport_send(State, nofin, frame(pong, State)), + handler_call(State, HandlerState, + #ps_header{buffer=RemainingData}, + websocket_handle, Frame, fun parse_header/3); + Frame = {ping, Payload} -> + transport_send(State, nofin, frame({pong, Payload}, State)), + handler_call(State, HandlerState, + #ps_header{buffer=RemainingData}, + websocket_handle, Frame, fun parse_header/3); + Frame -> + handler_call(State, HandlerState, + #ps_header{buffer=RemainingData}, + websocket_handle, Frame, fun parse_header/3) + end. + +handler_call(State=#state{handler=Handler}, HandlerState, + ParseState, Callback, Message, NextState) -> + try case Callback of + websocket_init -> Handler:websocket_init(HandlerState); + _ -> Handler:Callback(Message, HandlerState) + end of + {Commands, HandlerState2} when is_list(Commands) -> + handler_call_result(State, + HandlerState2, ParseState, NextState, Commands); + {Commands, HandlerState2, hibernate} when is_list(Commands) -> + handler_call_result(State#state{hibernate=true}, + HandlerState2, ParseState, NextState, Commands); + %% The following call results are deprecated. + {ok, HandlerState2} -> + NextState(State, HandlerState2, ParseState); + {ok, HandlerState2, hibernate} -> + NextState(State#state{hibernate=true}, HandlerState2, ParseState); + {reply, Payload, HandlerState2} -> + case websocket_send(Payload, State) of + ok -> + NextState(State, HandlerState2, ParseState); + stop -> + terminate(State, HandlerState2, stop); + Error = {error, _} -> + terminate(State, HandlerState2, Error) + end; + {reply, Payload, HandlerState2, hibernate} -> + case websocket_send(Payload, State) of + ok -> + NextState(State#state{hibernate=true}, + HandlerState2, ParseState); + stop -> + terminate(State, HandlerState2, stop); + Error = {error, _} -> + terminate(State, HandlerState2, Error) + end; + {stop, HandlerState2} -> + websocket_close(State, HandlerState2, stop) + catch Class:Reason:Stacktrace -> + websocket_send_close(State, {crash, Class, Reason}), + handler_terminate(State, HandlerState, {crash, Class, Reason}), + erlang:raise(Class, Reason, Stacktrace) + end. + +-spec handler_call_result(#state{}, any(), parse_state(), fun(), commands()) -> no_return(). +handler_call_result(State0, HandlerState, ParseState, NextState, Commands) -> + case commands(Commands, State0, []) of + {ok, State} -> + NextState(State, HandlerState, ParseState); + {stop, State} -> + terminate(State, HandlerState, stop); + {Error = {error, _}, State} -> + terminate(State, HandlerState, Error) + end. + +commands([], State, []) -> + {ok, State}; +commands([], State, Data) -> + Result = transport_send(State, nofin, lists:reverse(Data)), + {Result, State}; +commands([{active, Active}|Tail], State0=#state{active=Active0}, Data) when is_boolean(Active) -> + State = if + Active, not Active0 -> + active(State0); + Active0, not Active -> + passive(State0); + true -> + State0 + end, + commands(Tail, State#state{active=Active}, Data); +commands([{deflate, Deflate}|Tail], State, Data) when is_boolean(Deflate) -> + commands(Tail, State#state{deflate=Deflate}, Data); +commands([{set_options, SetOpts}|Tail], State0=#state{opts=Opts}, Data) -> + State = case SetOpts of + #{idle_timeout := IdleTimeout} -> + loop_timeout(State0#state{opts=Opts#{idle_timeout => IdleTimeout}}); + _ -> + State0 + end, + commands(Tail, State, Data); +commands([{shutdown_reason, ShutdownReason}|Tail], State, Data) -> + commands(Tail, State#state{shutdown_reason=ShutdownReason}, Data); +commands([Frame|Tail], State, Data0) -> + Data = [frame(Frame, State)|Data0], + case is_close_frame(Frame) of + true -> + _ = transport_send(State, fin, lists:reverse(Data)), + {stop, State}; + false -> + commands(Tail, State, Data) + end. + +transport_send(#state{socket=Stream={Pid, _}, transport=undefined}, IsFin, Data) -> + Pid ! {Stream, {data, IsFin, Data}}, + ok; +transport_send(#state{socket=Socket, transport=Transport}, _, Data) -> + Transport:send(Socket, Data). + +-spec websocket_send(cow_ws:frame(), #state{}) -> ok | stop | {error, atom()}. +websocket_send(Frames, State) when is_list(Frames) -> + websocket_send_many(Frames, State, []); +websocket_send(Frame, State) -> + Data = frame(Frame, State), + case is_close_frame(Frame) of + true -> + _ = transport_send(State, fin, Data), + stop; + false -> + transport_send(State, nofin, Data) + end. + +websocket_send_many([], State, Acc) -> + transport_send(State, nofin, lists:reverse(Acc)); +websocket_send_many([Frame|Tail], State, Acc0) -> + Acc = [frame(Frame, State)|Acc0], + case is_close_frame(Frame) of + true -> + _ = transport_send(State, fin, lists:reverse(Acc)), + stop; + false -> + websocket_send_many(Tail, State, Acc) + end. + +is_close_frame(close) -> true; +is_close_frame({close, _}) -> true; +is_close_frame({close, _, _}) -> true; +is_close_frame(_) -> false. + +-spec websocket_close(#state{}, any(), terminate_reason()) -> no_return(). +websocket_close(State, HandlerState, Reason) -> + websocket_send_close(State, Reason), + terminate(State, HandlerState, Reason). + +websocket_send_close(State, Reason) -> + _ = case Reason of + Normal when Normal =:= stop; Normal =:= timeout -> + transport_send(State, fin, frame({close, 1000, <<>>}, State)); + {error, badframe} -> + transport_send(State, fin, frame({close, 1002, <<>>}, State)); + {error, badencoding} -> + transport_send(State, fin, frame({close, 1007, <<>>}, State)); + {error, badsize} -> + transport_send(State, fin, frame({close, 1009, <<>>}, State)); + {crash, _, _} -> + transport_send(State, fin, frame({close, 1011, <<>>}, State)); + remote -> + transport_send(State, fin, frame(close, State)); + {remote, Code, _} -> + transport_send(State, fin, frame({close, Code, <<>>}, State)) + end, + ok. + +%% Don't compress frames while deflate is disabled. +frame(Frame, #state{deflate=false, extensions=Extensions}) -> + cow_ws:frame(Frame, Extensions#{deflate => false}); +frame(Frame, #state{extensions=Extensions}) -> + cow_ws:frame(Frame, Extensions). + +-spec terminate(#state{}, any(), terminate_reason()) -> no_return(). +terminate(State=#state{shutdown_reason=Shutdown}, HandlerState, Reason) -> + handler_terminate(State, HandlerState, Reason), + case Shutdown of + normal -> exit(normal); + _ -> exit({shutdown, Shutdown}) + end. + +handler_terminate(#state{handler=Handler, req=Req}, HandlerState, Reason) -> + cowboy_handler:terminate(Reason, Req, HandlerState, Handler). + +%% System callbacks. + +-spec system_continue(_, _, {#state{}, any(), parse_state()}) -> no_return(). +system_continue(_, _, {State, HandlerState, ParseState}) -> + loop(State, HandlerState, ParseState). + +-spec system_terminate(any(), _, _, {#state{}, any(), parse_state()}) -> no_return(). +system_terminate(Reason, _, _, {State, HandlerState, _}) -> + %% @todo We should exit gracefully, if possible. + terminate(State, HandlerState, Reason). + +-spec system_code_change(Misc, _, _, _) + -> {ok, Misc} when Misc::{#state{}, any(), parse_state()}. +system_code_change(Misc, _, _, _) -> + {ok, Misc}. diff --git a/deps/cowboy_telemetry/.fetch b/deps/cowboy_telemetry/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/cowboy_telemetry/.hex b/deps/cowboy_telemetry/.hex new file mode 100644 index 0000000000000000000000000000000000000000..e9c5bbefc0e702c49c6e35af2ec0ddfcf858170e GIT binary patch literal 283 zcmZ9H%~As)5QH(&=*jX)Qn36DH{T>RFoPQnkRls3XJ1^_qt~vk{=WX1H}jUncII!~ zpjdwTDc$6%<1EDEoG#ydD0;}h!%h|dM`c3#L6KUk~Cq;8fCW9dX*tYpRGe> z!C6FxNhT6c1uMRBznsS29f@7%h+V&vxUX1vrM#r}72EBw@QDjA{We_Mb@}h&DL}-e zn2mLow2E#b8#2J8DHG_G?E?FpVgwDN0BCjaMw%3e2eI?Ht;C1%{9H&peh#r?tACl? Ho4x!6e&kR7 literal 0 HcmV?d00001 diff --git a/deps/cowboy_telemetry/LICENSE b/deps/cowboy_telemetry/LICENSE new file mode 100644 index 0000000..6be8bd4 --- /dev/null +++ b/deps/cowboy_telemetry/LICENSE @@ -0,0 +1,191 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020, Anonymous . + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/deps/cowboy_telemetry/README.md b/deps/cowboy_telemetry/README.md new file mode 100644 index 0000000..7f2482b --- /dev/null +++ b/deps/cowboy_telemetry/README.md @@ -0,0 +1,70 @@ +cowboy_telemetry +===== + +[![Hex.pm Version](https://img.shields.io/hexpm/v/cowboy_telemetry.svg)](https://hex.pm/packages/cowboy_telemetry) +[![Erlang CI](https://github.com/beam-telemetry/cowboy_telemetry/workflows/Erlang%20CI/badge.svg?branch=main)](https://github.com/beam-telemetry/cowboy_telemetry/actions) + +[Telemetry](https://github.com/beam-telemetry/telemetry) instrumentation for the [Cowboy](https://github.com/ninenines/cowboy) HTTP server. + +This package contains a [`cowboy_stream`](https://ninenines.eu/docs/en/cowboy/2.8/manual/cowboy_stream/) handler that will instrument each request and emit `telemetry` events. + +## Usage + +Configure your cowboy server with the `cowboy_telemetry_h` stream handler first. + +```erlang +cowboy:start_clear(http, [{port, Port}], #{ + env => #{dispatch => Dispatch}, + stream_handlers => [cowboy_telemetry_h, cowboy_stream_h] +}. +``` + +## Telemetry Events + +#### `[cowboy, request, start]` + +A span event emitted at the beginning of a request. + +* `measurements`: `#{system_time => erlang:system_time()}` +* `metadata`: `#{stream_id => cowboy_stream:streamid(), req => cowboy_req:req()}` + +#### `[cowboy, request, stop]` + +A span event emitted at the end of a request. + +* `measurements`: `measurements()` +* `metadata`: `metadata()` + +If the request is terminated early - by the client or by the server - before a response is sent, the metadata will also contain an `error`: + +* `metadata`: `metadata()` + `#{error => cowboy_stream:reason()}` + +#### `[cowboy, request, exception]` + +A span event emitted if the request process exits. + +* `measurements`: `measurements()` +* `metadata`: `metadata()` + `#{kind => exit, stacktrace => list()}` + +#### `[cowboy, request, early_error]` + +A single event emitted when Cowboy itself returns an `early_error` response before executing any handlers. + +* `measurements`: `#{system_time => erlang:system_time(), resp_body_length => non_neg_integer()}` +* `metadata`: `metadata()` without `procs` or `informational` + +### Types + +* `measurements()`: + * `duration :: req_start - req_end` see [`cowboy_metrics_h`](https://github.com/ninenines/cowboy/blob/master/src/cowboy_metrics_h.erl#L75) + * `req_body_duration :: req_body_start - req_body_end` see [`cowboy_metrics_h`](https://github.com/ninenines/cowboy/blob/master/src/cowboy_metrics_h.erl#L80) + * `resp_duration :: resp_start - resp_end` see [`cowboy_metrics_h`](https://github.com/ninenines/cowboy/blob/master/src/cowboy_metrics_h.erl#L87) + * `req_body_length :: non_neg_integer()` + * `resp_body_length :: non_neg_integer()` +* `metadata()`: + * `pid`, `streamid`, `req`, `resp_headers`, `resp_status`, and `ref` from `cowboy_metrics_h:metrics()` +* `cowboy_metrics_h:metrics()`: Defined in [`cowboy_metrics_h`](https://github.com/ninenines/cowboy/blob/master/src/cowboy_metrics_h.erl#L46) + +Note: + +* The `telemetry` handlers are executed from the cowboy connection process, not from the request process. diff --git a/deps/cowboy_telemetry/hex_metadata.config b/deps/cowboy_telemetry/hex_metadata.config new file mode 100644 index 0000000..aa66e70 --- /dev/null +++ b/deps/cowboy_telemetry/hex_metadata.config @@ -0,0 +1,20 @@ +{<<"app">>,<<"cowboy_telemetry">>}. +{<<"build_tools">>,[<<"rebar3">>]}. +{<<"description">>,<<"Telemetry instrumentation for Cowboy">>}. +{<<"files">>, + [<<"LICENSE">>,<<"README.md">>,<<"rebar.config">>,<<"rebar.lock">>, + <<"src/cowboy_telemetry.app.src">>,<<"src/cowboy_telemetry_h.erl">>]}. +{<<"licenses">>,[<<"Apache 2.0">>]}. +{<<"links">>, + [{<<"Github">>,<<"https://github.com/beam-telemetry/cowboy_telemetry">>}]}. +{<<"name">>,<<"cowboy_telemetry">>}. +{<<"requirements">>, + [{<<"cowboy">>, + [{<<"app">>,<<"cowboy">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 2.7">>}]}, + {<<"telemetry">>, + [{<<"app">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"requirement">>,<<"~> 1.0">>}]}]}. +{<<"version">>,<<"0.4.0">>}. diff --git a/deps/cowboy_telemetry/rebar.config b/deps/cowboy_telemetry/rebar.config new file mode 100644 index 0000000..488e06d --- /dev/null +++ b/deps/cowboy_telemetry/rebar.config @@ -0,0 +1,6 @@ +{erl_opts, [debug_info]}. +{project_plugins, [rebar3_hex]}. +{deps, [ + {telemetry, "~> 1.0"}, + {cowboy, "~> 2.7"} +]}. diff --git a/deps/cowboy_telemetry/rebar.lock b/deps/cowboy_telemetry/rebar.lock new file mode 100644 index 0000000..2853a25 --- /dev/null +++ b/deps/cowboy_telemetry/rebar.lock @@ -0,0 +1,17 @@ +{"1.2.0", +[{<<"cowboy">>,{pkg,<<"cowboy">>,<<"2.8.0">>},0}, + {<<"cowlib">>,{pkg,<<"cowlib">>,<<"2.9.1">>},1}, + {<<"ranch">>,{pkg,<<"ranch">>,<<"1.7.1">>},1}, + {<<"telemetry">>,{pkg,<<"telemetry">>,<<"1.0.0">>},0}]}. +[ +{pkg_hash,[ + {<<"cowboy">>, <<"F3DC62E35797ECD9AC1B50DB74611193C29815401E53BAC9A5C0577BD7BC667D">>}, + {<<"cowlib">>, <<"61A6C7C50CF07FDD24B2F45B89500BB93B6686579B069A89F88CB211E1125C78">>}, + {<<"ranch">>, <<"6B1FAB51B49196860B733A49C07604465A47BDB78AA10C1C16A3D199F7F8C881">>}, + {<<"telemetry">>, <<"0F453A102CDF13D506B7C0AB158324C337C41F1CC7548F0BC0E130BBF0AE9452">>}]}, +{pkg_hash_ext,[ + {<<"cowboy">>, <<"4643E4FBA74AC96D4D152C75803DE6FAD0B3FA5DF354C71AFDD6CBEEB15FAC8A">>}, + {<<"cowlib">>, <<"E4175DC240A70D996156160891E1C62238EDE1729E45740BDD38064DAD476170">>}, + {<<"ranch">>, <<"451D8527787DF716D99DC36162FCA05934915DB0B6141BBDAC2EA8D3C7AFC7D7">>}, + {<<"telemetry">>, <<"73BC09FA59B4A0284EFB4624335583C528E07EC9AE76ACA96EA0673850AEC57A">>}]} +]. diff --git a/deps/cowboy_telemetry/src/cowboy_telemetry.app.src b/deps/cowboy_telemetry/src/cowboy_telemetry.app.src new file mode 100644 index 0000000..940fa10 --- /dev/null +++ b/deps/cowboy_telemetry/src/cowboy_telemetry.app.src @@ -0,0 +1,10 @@ +{application,cowboy_telemetry, + [{description,"Telemetry instrumentation for Cowboy"}, + {vsn,"0.4.0"}, + {registered,[]}, + {applications,[kernel,stdlib,telemetry]}, + {env,[]}, + {modules,[]}, + {licenses,["Apache 2.0"]}, + {links,[{"Github", + "https://github.com/beam-telemetry/cowboy_telemetry"}]}]}. diff --git a/deps/cowboy_telemetry/src/cowboy_telemetry_h.erl b/deps/cowboy_telemetry/src/cowboy_telemetry_h.erl new file mode 100644 index 0000000..b490121 --- /dev/null +++ b/deps/cowboy_telemetry/src/cowboy_telemetry_h.erl @@ -0,0 +1,92 @@ +-module(cowboy_telemetry_h). +-behavior(cowboy_stream). + +-export([init/3]). +-export([data/4]). +-export([info/3]). +-export([terminate/3]). +-export([early_error/5]). + +init(StreamID, Req, Opts) -> + telemetry:execute( + [cowboy, request, start], + #{system_time => erlang:system_time()}, + #{streamid => StreamID, req => Req}), + cowboy_metrics_h:init(StreamID, Req, add_metrics_callback(Opts)). + +info(StreamID, Info, State) -> + cowboy_metrics_h:info(StreamID, Info, State). + +data(StreamID, IsFin, Data, State) -> + cowboy_metrics_h:data(StreamID, IsFin, Data, State). + +terminate(StreamID, Reason, State) -> + cowboy_metrics_h:terminate(StreamID, Reason, State). + +early_error(StreamID, Reason, PartialReq, Resp, Opts) -> + cowboy_metrics_h:early_error(StreamID, Reason, PartialReq, Resp, add_metrics_callback(Opts)). + +% + +add_metrics_callback(Opts) -> + maps:put(metrics_callback, fun metrics_callback/1, Opts). + +metrics_callback(#{early_error_time := Time} = Metrics) when is_number(Time) -> + {RespBodyLength, Metadata} = maps:take(resp_body_length, Metrics), + telemetry:execute( + [cowboy, request, early_error], + #{system_time => erlang:system_time(), resp_body_length => RespBodyLength}, + Metadata); +metrics_callback(#{reason := {internal_error, {'EXIT', _, {Reason, Stacktrace}}, _}} = Metrics) -> + telemetry:execute( + [cowboy, request, exception], + measurements(Metrics), + (metadata(Metrics))#{kind => exit, reason => Reason, stacktrace => Stacktrace}); +metrics_callback(#{reason := {ErrorType, _, _} = Reason} = Metrics) + when ErrorType == socket_error; + ErrorType == connection_error -> + telemetry:execute( + [cowboy, request, stop], + measurements(Metrics), + (metadata(Metrics))#{error => Reason}); +metrics_callback(Metrics) -> + telemetry:execute( + [cowboy, request, stop], + measurements(Metrics), + metadata(Metrics)). + +measurements(Metrics) -> + #{req_body_length := ReqBodyLength, resp_body_length := RespBodyLength} = Metrics, + + #{ + duration => duration(req_start, req_end, Metrics), + req_body_duration => duration(req_body_start, req_body_end, Metrics), + resp_duration => duration(resp_start, resp_end, Metrics), + req_body_length => ReqBodyLength, + resp_body_length => RespBodyLength + }. + +metadata(Metrics) -> + #{ + pid := Pid, + streamid := Streamid, + req := Req, + resp_headers := RespHeaders, + resp_status := RespStatus, + ref := Ref + } = Metrics, + + #{ + pid => Pid, + streamid => Streamid, + req => Req, + resp_headers => RespHeaders, + resp_status => RespStatus, + ref => Ref + }. + +duration(StartKey, EndKey, Metrics) -> + case Metrics of + #{StartKey := Start, EndKey := End} when is_integer(Start), is_integer(End) -> End - Start; + #{} -> 0 + end. diff --git a/deps/cowlib/.fetch b/deps/cowlib/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/cowlib/.hex b/deps/cowlib/.hex new file mode 100644 index 0000000000000000000000000000000000000000..cf2fb2644f5314e25169b80a26eaac4b215cb888 GIT binary patch literal 281 zcmZ9HOH#uy5JXiD7*-r5C5^3-waZPY`5Bu;cFOs{o{OWfWZPBU@Ac2rOgnYE$-b!} zcXiIS>fYvMvF%+ZljG%9N=*-*^EhVddPK+&QF<`8V8#hE4V|EHN`cTUX7sEfc;edP zA%VwE-D{zRK5V<3>RzMnsnF5XwVN?*-DeFJD)xZ2vVNUrt|II0`d + +Permission to use, copy, modify, and/or distribute this software for any +purpose with or without fee is hereby granted, provided that the above +copyright notice and this permission notice appear in all copies. + +THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. diff --git a/deps/cowlib/Makefile b/deps/cowlib/Makefile new file mode 100644 index 0000000..f67c726 --- /dev/null +++ b/deps/cowlib/Makefile @@ -0,0 +1,113 @@ +# See LICENSE for licensing information. + +PROJECT = cowlib +PROJECT_DESCRIPTION = Support library for manipulating Web protocols. +PROJECT_VERSION = 2.11.0 + +# Options. + +#ERLC_OPTS += +bin_opt_info +ifdef HIPE + ERLC_OPTS += -smp +native + TEST_ERLC_OPTS += -smp +native +endif + +DIALYZER_OPTS = -Werror_handling -Wunmatched_returns + +# Dependencies. + +LOCAL_DEPS = crypto + +DOC_DEPS = asciideck + +TEST_DEPS = $(if $(CI_ERLANG_MK),ci.erlang.mk) base32 horse proper jsx \ + structured-header-tests uritemplate-tests +dep_base32 = git https://github.com/dnsimple/base32_erlang main +dep_horse = git https://github.com/ninenines/horse.git master +dep_jsx = git https://github.com/talentdeficit/jsx v2.10.0 +dep_structured-header-tests = git https://github.com/httpwg/structured-header-tests e614583397e7f65e0082c0fff3929f32a298b9f2 +dep_uritemplate-tests = git https://github.com/uri-templates/uritemplate-test master + +# CI configuration. + +dep_ci.erlang.mk = git https://github.com/ninenines/ci.erlang.mk master +DEP_EARLY_PLUGINS = ci.erlang.mk + +AUTO_CI_OTP ?= OTP-21+ +AUTO_CI_HIPE ?= OTP-LATEST +# AUTO_CI_ERLLVM ?= OTP-LATEST +AUTO_CI_WINDOWS ?= OTP-21+ + +# Hex configuration. + +define HEX_TARBALL_EXTRA_METADATA +#{ + licenses => [<<"ISC">>], + links => #{ + <<"Function reference">> => <<"https://ninenines.eu/docs/en/cowlib/2.11/manual/">>, + <<"GitHub">> => <<"https://github.com/ninenines/cowlib">>, + <<"Sponsor">> => <<"https://github.com/sponsors/essen">> + } +} +endef + +# Standard targets. + +include erlang.mk + +# Compile options. + +TEST_ERLC_OPTS += +'{parse_transform, eunit_autoexport}' +'{parse_transform, horse_autoexport}' + +# Mimetypes module generator. + +GEN_URL = http://svn.apache.org/repos/asf/httpd/httpd/trunk/docs/conf/mime.types +GEN_SRC = src/cow_mimetypes.erl.src +GEN_OUT = src/cow_mimetypes.erl + +.PHONY: gen + +gen: + $(gen_verbose) cat $(GEN_SRC) \ + | head -n `grep -n "%% GENERATED" $(GEN_SRC) | cut -d : -f 1` \ + > $(GEN_OUT) + $(gen_verbose) wget -qO - $(GEN_URL) \ + | grep -v ^# \ + | awk '{for (i=2; i<=NF; i++) if ($$i != "") { \ + split($$1, a, "/"); \ + print "all_ext(<<\"" $$i "\">>) -> {<<\"" \ + a[1] "\">>, <<\"" a[2] "\">>, []};"}}' \ + | sort \ + | uniq -w 25 \ + >> $(GEN_OUT) + $(gen_verbose) cat $(GEN_SRC) \ + | tail -n +`grep -n "%% GENERATED" $(GEN_SRC) | cut -d : -f 1` \ + >> $(GEN_OUT) + +# Performance testing. + +ifeq ($(MAKECMDGOALS),perfs) +.NOTPARALLEL: +endif + +.PHONY: perfs + +perfs: test-build + $(gen_verbose) erl -noshell -pa ebin -eval 'horse:app_perf($(PROJECT)), erlang:halt().' + +# Prepare for the release. + +prepare_tag: + $(verbose) $(warning Hex metadata: $(HEX_TARBALL_EXTRA_METADATA)) + $(verbose) echo + $(verbose) echo -n "Most recent tag: " + $(verbose) git tag --sort taggerdate | tail -n1 + $(verbose) git verify-tag `git tag --sort taggerdate | tail -n1` + $(verbose) echo -n "MAKEFILE: " + $(verbose) grep -m1 PROJECT_VERSION Makefile + $(verbose) echo -n "APP: " + $(verbose) grep -m1 vsn ebin/$(PROJECT).app | sed 's/ //g' + $(verbose) echo + $(verbose) echo "Dependencies:" + $(verbose) grep ^DEPS Makefile || echo "DEPS =" + $(verbose) grep ^dep_ Makefile || true diff --git a/deps/cowlib/README.asciidoc b/deps/cowlib/README.asciidoc new file mode 100644 index 0000000..949e22d --- /dev/null +++ b/deps/cowlib/README.asciidoc @@ -0,0 +1,18 @@ += Cowlib + +Cowlib is a support library for manipulating Web protocols. + +== Goals + +Cowlib provides libraries for parsing and building messages +for various Web protocols, including HTTP/1.1, HTTP/2 and +Websocket. + +It is optimized for completeness rather than speed. No value +is ignored, they are all returned. + +== Support + +* Official IRC Channel: #ninenines on irc.freenode.net +* https://ninenines.eu/services[Commercial Support] +* https://github.com/sponsors/essen[Sponsor me!] diff --git a/deps/cowlib/ebin/cowlib.app b/deps/cowlib/ebin/cowlib.app new file mode 100644 index 0000000..bb49c80 --- /dev/null +++ b/deps/cowlib/ebin/cowlib.app @@ -0,0 +1,8 @@ +{application, 'cowlib', [ + {description, "Support library for manipulating Web protocols."}, + {vsn, "2.11.0"}, + {modules, ['cow_base64url','cow_cookie','cow_date','cow_hpack','cow_http','cow_http2','cow_http2_machine','cow_http_hd','cow_http_struct_hd','cow_http_te','cow_iolists','cow_link','cow_mimetypes','cow_multipart','cow_qs','cow_spdy','cow_sse','cow_uri','cow_uri_template','cow_ws']}, + {registered, []}, + {applications, [kernel,stdlib,crypto]}, + {env, []} +]}. \ No newline at end of file diff --git a/deps/cowlib/erlang.mk b/deps/cowlib/erlang.mk new file mode 100644 index 0000000..9174521 --- /dev/null +++ b/deps/cowlib/erlang.mk @@ -0,0 +1,8109 @@ +# Copyright (c) 2013-2016, Loรฏc Hoguin +# +# Permission to use, copy, modify, and/or distribute this software for any +# purpose with or without fee is hereby granted, provided that the above +# copyright notice and this permission notice appear in all copies. +# +# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +.PHONY: all app apps deps search rel relup docs install-docs check tests clean distclean help erlang-mk + +ERLANG_MK_FILENAME := $(realpath $(lastword $(MAKEFILE_LIST))) +export ERLANG_MK_FILENAME + +ERLANG_MK_VERSION = 2020.03.05-27-g7f608c6-dirty +ERLANG_MK_WITHOUT = + +# Make 3.81 and 3.82 are deprecated. + +ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.81) +$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html) +endif + +ifeq ($(MAKELEVEL)$(MAKE_VERSION),03.82) +$(warning Please upgrade to GNU Make 4 or later: https://erlang.mk/guide/installation.html) +endif + +# Core configuration. + +PROJECT ?= $(notdir $(CURDIR)) +PROJECT := $(strip $(PROJECT)) + +PROJECT_VERSION ?= rolling +PROJECT_MOD ?= $(PROJECT)_app +PROJECT_ENV ?= [] + +# Verbosity. + +V ?= 0 + +verbose_0 = @ +verbose_2 = set -x; +verbose = $(verbose_$(V)) + +ifeq ($(V),3) +SHELL := $(SHELL) -x +endif + +gen_verbose_0 = @echo " GEN " $@; +gen_verbose_2 = set -x; +gen_verbose = $(gen_verbose_$(V)) + +gen_verbose_esc_0 = @echo " GEN " $$@; +gen_verbose_esc_2 = set -x; +gen_verbose_esc = $(gen_verbose_esc_$(V)) + +# Temporary files directory. + +ERLANG_MK_TMP ?= $(CURDIR)/.erlang.mk +export ERLANG_MK_TMP + +# "erl" command. + +ERL = erl +A1 -noinput -boot no_dot_erlang + +# Platform detection. + +ifeq ($(PLATFORM),) +UNAME_S := $(shell uname -s) + +ifeq ($(UNAME_S),Linux) +PLATFORM = linux +else ifeq ($(UNAME_S),Darwin) +PLATFORM = darwin +else ifeq ($(UNAME_S),SunOS) +PLATFORM = solaris +else ifeq ($(UNAME_S),GNU) +PLATFORM = gnu +else ifeq ($(UNAME_S),FreeBSD) +PLATFORM = freebsd +else ifeq ($(UNAME_S),NetBSD) +PLATFORM = netbsd +else ifeq ($(UNAME_S),OpenBSD) +PLATFORM = openbsd +else ifeq ($(UNAME_S),DragonFly) +PLATFORM = dragonfly +else ifeq ($(shell uname -o),Msys) +PLATFORM = msys2 +else +$(error Unable to detect platform. Please open a ticket with the output of uname -a.) +endif + +export PLATFORM +endif + +# Core targets. + +all:: deps app rel + +# Noop to avoid a Make warning when there's nothing to do. +rel:: + $(verbose) : + +relup:: deps app + +check:: tests + +clean:: clean-crashdump + +clean-crashdump: +ifneq ($(wildcard erl_crash.dump),) + $(gen_verbose) rm -f erl_crash.dump +endif + +distclean:: clean distclean-tmp + +$(ERLANG_MK_TMP): + $(verbose) mkdir -p $(ERLANG_MK_TMP) + +distclean-tmp: + $(gen_verbose) rm -rf $(ERLANG_MK_TMP) + +help:: + $(verbose) printf "%s\n" \ + "erlang.mk (version $(ERLANG_MK_VERSION)) is distributed under the terms of the ISC License." \ + "Copyright (c) 2013-2016 Loรฏc Hoguin " \ + "" \ + "Usage: [V=1] $(MAKE) [target]..." \ + "" \ + "Core targets:" \ + " all Run deps, app and rel targets in that order" \ + " app Compile the project" \ + " deps Fetch dependencies (if needed) and compile them" \ + " fetch-deps Fetch dependencies recursively (if needed) without compiling them" \ + " list-deps List dependencies recursively on stdout" \ + " search q=... Search for a package in the built-in index" \ + " rel Build a release for this project, if applicable" \ + " docs Build the documentation for this project" \ + " install-docs Install the man pages for this project" \ + " check Compile and run all tests and analysis for this project" \ + " tests Run the tests for this project" \ + " clean Delete temporary and output files from most targets" \ + " distclean Delete all temporary and output files" \ + " help Display this help and exit" \ + " erlang-mk Update erlang.mk to the latest version" + +# Core functions. + +empty := +space := $(empty) $(empty) +tab := $(empty) $(empty) +comma := , + +define newline + + +endef + +define comma_list +$(subst $(space),$(comma),$(strip $(1))) +endef + +define escape_dquotes +$(subst ",\",$1) +endef + +# Adding erlang.mk to make Erlang scripts who call init:get_plain_arguments() happy. +define erlang +$(ERL) $2 -pz $(ERLANG_MK_TMP)/rebar/ebin -eval "$(subst $(newline),,$(call escape_dquotes,$1))" -- erlang.mk +endef + +ifeq ($(PLATFORM),msys2) +core_native_path = $(shell cygpath -m $1) +else +core_native_path = $1 +endif + +core_http_get = curl -Lf$(if $(filter-out 0,$(V)),,s)o $(call core_native_path,$1) $2 + +core_eq = $(and $(findstring $(1),$(2)),$(findstring $(2),$(1))) + +# We skip files that contain spaces because they end up causing issues. +core_find = $(if $(wildcard $1),$(shell find $(1:%/=%) \( -type l -o -type f \) -name $(subst *,\*,$2) | grep -v " ")) + +core_lc = $(subst A,a,$(subst B,b,$(subst C,c,$(subst D,d,$(subst E,e,$(subst F,f,$(subst G,g,$(subst H,h,$(subst I,i,$(subst J,j,$(subst K,k,$(subst L,l,$(subst M,m,$(subst N,n,$(subst O,o,$(subst P,p,$(subst Q,q,$(subst R,r,$(subst S,s,$(subst T,t,$(subst U,u,$(subst V,v,$(subst W,w,$(subst X,x,$(subst Y,y,$(subst Z,z,$(1))))))))))))))))))))))))))) + +core_ls = $(filter-out $(1),$(shell echo $(1))) + +# @todo Use a solution that does not require using perl. +core_relpath = $(shell perl -e 'use File::Spec; print File::Spec->abs2rel(@ARGV) . "\n"' $1 $2) + +define core_render + printf -- '$(subst $(newline),\n,$(subst %,%%,$(subst ','\'',$(subst $(tab),$(WS),$(call $(1))))))\n' > $(2) +endef + +# Automated update. + +ERLANG_MK_REPO ?= https://github.com/ninenines/erlang.mk +ERLANG_MK_COMMIT ?= +ERLANG_MK_BUILD_CONFIG ?= build.config +ERLANG_MK_BUILD_DIR ?= .erlang.mk.build + +erlang-mk: WITHOUT ?= $(ERLANG_MK_WITHOUT) +erlang-mk: +ifdef ERLANG_MK_COMMIT + $(verbose) git clone $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR) + $(verbose) cd $(ERLANG_MK_BUILD_DIR) && git checkout $(ERLANG_MK_COMMIT) +else + $(verbose) git clone --depth 1 $(ERLANG_MK_REPO) $(ERLANG_MK_BUILD_DIR) +endif + $(verbose) if [ -f $(ERLANG_MK_BUILD_CONFIG) ]; then cp $(ERLANG_MK_BUILD_CONFIG) $(ERLANG_MK_BUILD_DIR)/build.config; fi + $(gen_verbose) $(MAKE) --no-print-directory -C $(ERLANG_MK_BUILD_DIR) WITHOUT='$(strip $(WITHOUT))' UPGRADE=1 + $(verbose) cp $(ERLANG_MK_BUILD_DIR)/erlang.mk ./erlang.mk + $(verbose) rm -rf $(ERLANG_MK_BUILD_DIR) + $(verbose) rm -rf $(ERLANG_MK_TMP) + +# The erlang.mk package index is bundled in the default erlang.mk build. +# Search for the string "copyright" to skip to the rest of the code. + +# Copyright (c) 2015-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-kerl + +KERL_INSTALL_DIR ?= $(HOME)/erlang + +ifeq ($(strip $(KERL)),) +KERL := $(ERLANG_MK_TMP)/kerl/kerl +endif + +KERL_DIR = $(ERLANG_MK_TMP)/kerl + +export KERL + +KERL_GIT ?= https://github.com/kerl/kerl +KERL_COMMIT ?= master + +KERL_MAKEFLAGS ?= + +OTP_GIT ?= https://github.com/erlang/otp + +define kerl_otp_target +$(KERL_INSTALL_DIR)/$(1): $(KERL) + $(verbose) if [ ! -d $$@ ]; then \ + MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $(1) $(1); \ + $(KERL) install $(1) $(KERL_INSTALL_DIR)/$(1); \ + fi +endef + +define kerl_hipe_target +$(KERL_INSTALL_DIR)/$1-native: $(KERL) + $(verbose) if [ ! -d $$@ ]; then \ + KERL_CONFIGURE_OPTIONS=--enable-native-libs \ + MAKEFLAGS="$(KERL_MAKEFLAGS)" $(KERL) build git $(OTP_GIT) $1 $1-native; \ + $(KERL) install $1-native $(KERL_INSTALL_DIR)/$1-native; \ + fi +endef + +$(KERL): $(KERL_DIR) + +$(KERL_DIR): | $(ERLANG_MK_TMP) + $(gen_verbose) git clone --depth 1 $(KERL_GIT) $(ERLANG_MK_TMP)/kerl + $(verbose) cd $(ERLANG_MK_TMP)/kerl && git checkout $(KERL_COMMIT) + $(verbose) chmod +x $(KERL) + +distclean:: distclean-kerl + +distclean-kerl: + $(gen_verbose) rm -rf $(KERL_DIR) + +# Allow users to select which version of Erlang/OTP to use for a project. + +ifneq ($(strip $(LATEST_ERLANG_OTP)),) +# In some environments it is necessary to filter out master. +ERLANG_OTP := $(notdir $(lastword $(sort\ + $(filter-out $(KERL_INSTALL_DIR)/master $(KERL_INSTALL_DIR)/OTP_R%,\ + $(filter-out %-rc1 %-rc2 %-rc3,$(wildcard $(KERL_INSTALL_DIR)/*[^-native])))))) +endif + +ERLANG_OTP ?= +ERLANG_HIPE ?= + +# Use kerl to enforce a specific Erlang/OTP version for a project. +ifneq ($(strip $(ERLANG_OTP)),) +export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_OTP)/bin:$(PATH) +SHELL := env PATH=$(PATH) $(SHELL) +$(eval $(call kerl_otp_target,$(ERLANG_OTP))) + +# Build Erlang/OTP only if it doesn't already exist. +ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_OTP))$(BUILD_ERLANG_OTP),) +$(info Building Erlang/OTP $(ERLANG_OTP)... Please wait...) +$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_OTP) ERLANG_OTP=$(ERLANG_OTP) BUILD_ERLANG_OTP=1 >&2) +endif + +else +# Same for a HiPE enabled VM. +ifneq ($(strip $(ERLANG_HIPE)),) +export PATH := $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native/bin:$(PATH) +SHELL := env PATH=$(PATH) $(SHELL) +$(eval $(call kerl_hipe_target,$(ERLANG_HIPE))) + +# Build Erlang/OTP only if it doesn't already exist. +ifeq ($(wildcard $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native)$(BUILD_ERLANG_OTP),) +$(info Building HiPE-enabled Erlang/OTP $(ERLANG_OTP)... Please wait...) +$(shell $(MAKE) $(KERL_INSTALL_DIR)/$(ERLANG_HIPE)-native ERLANG_HIPE=$(ERLANG_HIPE) BUILD_ERLANG_OTP=1 >&2) +endif + +endif +endif + +PACKAGES += aberth +pkg_aberth_name = aberth +pkg_aberth_description = Generic BERT-RPC server in Erlang +pkg_aberth_homepage = https://github.com/a13x/aberth +pkg_aberth_fetch = git +pkg_aberth_repo = https://github.com/a13x/aberth +pkg_aberth_commit = master + +PACKAGES += active +pkg_active_name = active +pkg_active_description = Active development for Erlang: rebuild and reload source/binary files while the VM is running +pkg_active_homepage = https://github.com/proger/active +pkg_active_fetch = git +pkg_active_repo = https://github.com/proger/active +pkg_active_commit = master + +PACKAGES += actordb_core +pkg_actordb_core_name = actordb_core +pkg_actordb_core_description = ActorDB main source +pkg_actordb_core_homepage = http://www.actordb.com/ +pkg_actordb_core_fetch = git +pkg_actordb_core_repo = https://github.com/biokoda/actordb_core +pkg_actordb_core_commit = master + +PACKAGES += actordb_thrift +pkg_actordb_thrift_name = actordb_thrift +pkg_actordb_thrift_description = Thrift API for ActorDB +pkg_actordb_thrift_homepage = http://www.actordb.com/ +pkg_actordb_thrift_fetch = git +pkg_actordb_thrift_repo = https://github.com/biokoda/actordb_thrift +pkg_actordb_thrift_commit = master + +PACKAGES += aleppo +pkg_aleppo_name = aleppo +pkg_aleppo_description = Alternative Erlang Pre-Processor +pkg_aleppo_homepage = https://github.com/ErlyORM/aleppo +pkg_aleppo_fetch = git +pkg_aleppo_repo = https://github.com/ErlyORM/aleppo +pkg_aleppo_commit = master + +PACKAGES += alog +pkg_alog_name = alog +pkg_alog_description = Simply the best logging framework for Erlang +pkg_alog_homepage = https://github.com/siberian-fast-food/alogger +pkg_alog_fetch = git +pkg_alog_repo = https://github.com/siberian-fast-food/alogger +pkg_alog_commit = master + +PACKAGES += amqp_client +pkg_amqp_client_name = amqp_client +pkg_amqp_client_description = RabbitMQ Erlang AMQP client +pkg_amqp_client_homepage = https://www.rabbitmq.com/erlang-client-user-guide.html +pkg_amqp_client_fetch = git +pkg_amqp_client_repo = https://github.com/rabbitmq/rabbitmq-erlang-client.git +pkg_amqp_client_commit = master + +PACKAGES += annotations +pkg_annotations_name = annotations +pkg_annotations_description = Simple code instrumentation utilities +pkg_annotations_homepage = https://github.com/hyperthunk/annotations +pkg_annotations_fetch = git +pkg_annotations_repo = https://github.com/hyperthunk/annotations +pkg_annotations_commit = master + +PACKAGES += antidote +pkg_antidote_name = antidote +pkg_antidote_description = Large-scale computation without synchronisation +pkg_antidote_homepage = https://syncfree.lip6.fr/ +pkg_antidote_fetch = git +pkg_antidote_repo = https://github.com/SyncFree/antidote +pkg_antidote_commit = master + +PACKAGES += apns +pkg_apns_name = apns +pkg_apns_description = Apple Push Notification Server for Erlang +pkg_apns_homepage = http://inaka.github.com/apns4erl +pkg_apns_fetch = git +pkg_apns_repo = https://github.com/inaka/apns4erl +pkg_apns_commit = master + +PACKAGES += asciideck +pkg_asciideck_name = asciideck +pkg_asciideck_description = Asciidoc for Erlang. +pkg_asciideck_homepage = https://ninenines.eu +pkg_asciideck_fetch = git +pkg_asciideck_repo = https://github.com/ninenines/asciideck +pkg_asciideck_commit = master + +PACKAGES += azdht +pkg_azdht_name = azdht +pkg_azdht_description = Azureus Distributed Hash Table (DHT) in Erlang +pkg_azdht_homepage = https://github.com/arcusfelis/azdht +pkg_azdht_fetch = git +pkg_azdht_repo = https://github.com/arcusfelis/azdht +pkg_azdht_commit = master + +PACKAGES += backoff +pkg_backoff_name = backoff +pkg_backoff_description = Simple exponential backoffs in Erlang +pkg_backoff_homepage = https://github.com/ferd/backoff +pkg_backoff_fetch = git +pkg_backoff_repo = https://github.com/ferd/backoff +pkg_backoff_commit = master + +PACKAGES += barrel_tcp +pkg_barrel_tcp_name = barrel_tcp +pkg_barrel_tcp_description = barrel is a generic TCP acceptor pool with low latency in Erlang. +pkg_barrel_tcp_homepage = https://github.com/benoitc-attic/barrel_tcp +pkg_barrel_tcp_fetch = git +pkg_barrel_tcp_repo = https://github.com/benoitc-attic/barrel_tcp +pkg_barrel_tcp_commit = master + +PACKAGES += basho_bench +pkg_basho_bench_name = basho_bench +pkg_basho_bench_description = A load-generation and testing tool for basically whatever you can write a returning Erlang function for. +pkg_basho_bench_homepage = https://github.com/basho/basho_bench +pkg_basho_bench_fetch = git +pkg_basho_bench_repo = https://github.com/basho/basho_bench +pkg_basho_bench_commit = master + +PACKAGES += bcrypt +pkg_bcrypt_name = bcrypt +pkg_bcrypt_description = Bcrypt Erlang / C library +pkg_bcrypt_homepage = https://github.com/erlangpack/bcrypt +pkg_bcrypt_fetch = git +pkg_bcrypt_repo = https://github.com/erlangpack/bcrypt.git +pkg_bcrypt_commit = master + +PACKAGES += beam +pkg_beam_name = beam +pkg_beam_description = BEAM emulator written in Erlang +pkg_beam_homepage = https://github.com/tonyrog/beam +pkg_beam_fetch = git +pkg_beam_repo = https://github.com/tonyrog/beam +pkg_beam_commit = master + +PACKAGES += beanstalk +pkg_beanstalk_name = beanstalk +pkg_beanstalk_description = An Erlang client for beanstalkd +pkg_beanstalk_homepage = https://github.com/tim/erlang-beanstalk +pkg_beanstalk_fetch = git +pkg_beanstalk_repo = https://github.com/tim/erlang-beanstalk +pkg_beanstalk_commit = master + +PACKAGES += bear +pkg_bear_name = bear +pkg_bear_description = a set of statistics functions for erlang +pkg_bear_homepage = https://github.com/boundary/bear +pkg_bear_fetch = git +pkg_bear_repo = https://github.com/boundary/bear +pkg_bear_commit = master + +PACKAGES += bertconf +pkg_bertconf_name = bertconf +pkg_bertconf_description = Make ETS tables out of statc BERT files that are auto-reloaded +pkg_bertconf_homepage = https://github.com/ferd/bertconf +pkg_bertconf_fetch = git +pkg_bertconf_repo = https://github.com/ferd/bertconf +pkg_bertconf_commit = master + +PACKAGES += bifrost +pkg_bifrost_name = bifrost +pkg_bifrost_description = Erlang FTP Server Framework +pkg_bifrost_homepage = https://github.com/thorstadt/bifrost +pkg_bifrost_fetch = git +pkg_bifrost_repo = https://github.com/thorstadt/bifrost +pkg_bifrost_commit = master + +PACKAGES += binpp +pkg_binpp_name = binpp +pkg_binpp_description = Erlang Binary Pretty Printer +pkg_binpp_homepage = https://github.com/jtendo/binpp +pkg_binpp_fetch = git +pkg_binpp_repo = https://github.com/jtendo/binpp +pkg_binpp_commit = master + +PACKAGES += bisect +pkg_bisect_name = bisect +pkg_bisect_description = Ordered fixed-size binary dictionary in Erlang +pkg_bisect_homepage = https://github.com/knutin/bisect +pkg_bisect_fetch = git +pkg_bisect_repo = https://github.com/knutin/bisect +pkg_bisect_commit = master + +PACKAGES += bitcask +pkg_bitcask_name = bitcask +pkg_bitcask_description = because you need another a key/value storage engine +pkg_bitcask_homepage = https://github.com/basho/bitcask +pkg_bitcask_fetch = git +pkg_bitcask_repo = https://github.com/basho/bitcask +pkg_bitcask_commit = develop + +PACKAGES += bitstore +pkg_bitstore_name = bitstore +pkg_bitstore_description = A document based ontology development environment +pkg_bitstore_homepage = https://github.com/bdionne/bitstore +pkg_bitstore_fetch = git +pkg_bitstore_repo = https://github.com/bdionne/bitstore +pkg_bitstore_commit = master + +PACKAGES += bootstrap +pkg_bootstrap_name = bootstrap +pkg_bootstrap_description = A simple, yet powerful Erlang cluster bootstrapping application. +pkg_bootstrap_homepage = https://github.com/schlagert/bootstrap +pkg_bootstrap_fetch = git +pkg_bootstrap_repo = https://github.com/schlagert/bootstrap +pkg_bootstrap_commit = master + +PACKAGES += boss +pkg_boss_name = boss +pkg_boss_description = Erlang web MVC, now featuring Comet +pkg_boss_homepage = https://github.com/ChicagoBoss/ChicagoBoss +pkg_boss_fetch = git +pkg_boss_repo = https://github.com/ChicagoBoss/ChicagoBoss +pkg_boss_commit = master + +PACKAGES += boss_db +pkg_boss_db_name = boss_db +pkg_boss_db_description = BossDB: a sharded, caching, pooling, evented ORM for Erlang +pkg_boss_db_homepage = https://github.com/ErlyORM/boss_db +pkg_boss_db_fetch = git +pkg_boss_db_repo = https://github.com/ErlyORM/boss_db +pkg_boss_db_commit = master + +PACKAGES += brod +pkg_brod_name = brod +pkg_brod_description = Kafka client in Erlang +pkg_brod_homepage = https://github.com/klarna/brod +pkg_brod_fetch = git +pkg_brod_repo = https://github.com/klarna/brod.git +pkg_brod_commit = master + +PACKAGES += bson +pkg_bson_name = bson +pkg_bson_description = BSON documents in Erlang, see bsonspec.org +pkg_bson_homepage = https://github.com/comtihon/bson-erlang +pkg_bson_fetch = git +pkg_bson_repo = https://github.com/comtihon/bson-erlang +pkg_bson_commit = master + +PACKAGES += bullet +pkg_bullet_name = bullet +pkg_bullet_description = Simple, reliable, efficient streaming for Cowboy. +pkg_bullet_homepage = http://ninenines.eu +pkg_bullet_fetch = git +pkg_bullet_repo = https://github.com/ninenines/bullet +pkg_bullet_commit = master + +PACKAGES += cache +pkg_cache_name = cache +pkg_cache_description = Erlang in-memory cache +pkg_cache_homepage = https://github.com/fogfish/cache +pkg_cache_fetch = git +pkg_cache_repo = https://github.com/fogfish/cache +pkg_cache_commit = master + +PACKAGES += cake +pkg_cake_name = cake +pkg_cake_description = Really simple terminal colorization +pkg_cake_homepage = https://github.com/darach/cake-erl +pkg_cake_fetch = git +pkg_cake_repo = https://github.com/darach/cake-erl +pkg_cake_commit = master + +PACKAGES += carotene +pkg_carotene_name = carotene +pkg_carotene_description = Real-time server +pkg_carotene_homepage = https://github.com/carotene/carotene +pkg_carotene_fetch = git +pkg_carotene_repo = https://github.com/carotene/carotene +pkg_carotene_commit = master + +PACKAGES += cberl +pkg_cberl_name = cberl +pkg_cberl_description = NIF based Erlang bindings for Couchbase +pkg_cberl_homepage = https://github.com/chitika/cberl +pkg_cberl_fetch = git +pkg_cberl_repo = https://github.com/chitika/cberl +pkg_cberl_commit = master + +PACKAGES += cecho +pkg_cecho_name = cecho +pkg_cecho_description = An ncurses library for Erlang +pkg_cecho_homepage = https://github.com/mazenharake/cecho +pkg_cecho_fetch = git +pkg_cecho_repo = https://github.com/mazenharake/cecho +pkg_cecho_commit = master + +PACKAGES += cferl +pkg_cferl_name = cferl +pkg_cferl_description = Rackspace / Open Stack Cloud Files Erlang Client +pkg_cferl_homepage = https://github.com/ddossot/cferl +pkg_cferl_fetch = git +pkg_cferl_repo = https://github.com/ddossot/cferl +pkg_cferl_commit = master + +PACKAGES += chaos_monkey +pkg_chaos_monkey_name = chaos_monkey +pkg_chaos_monkey_description = This is The CHAOS MONKEY. It will kill your processes. +pkg_chaos_monkey_homepage = https://github.com/dLuna/chaos_monkey +pkg_chaos_monkey_fetch = git +pkg_chaos_monkey_repo = https://github.com/dLuna/chaos_monkey +pkg_chaos_monkey_commit = master + +PACKAGES += check_node +pkg_check_node_name = check_node +pkg_check_node_description = Nagios Scripts for monitoring Riak +pkg_check_node_homepage = https://github.com/basho-labs/riak_nagios +pkg_check_node_fetch = git +pkg_check_node_repo = https://github.com/basho-labs/riak_nagios +pkg_check_node_commit = master + +PACKAGES += chronos +pkg_chronos_name = chronos +pkg_chronos_description = Timer module for Erlang that makes it easy to abstact time out of the tests. +pkg_chronos_homepage = https://github.com/lehoff/chronos +pkg_chronos_fetch = git +pkg_chronos_repo = https://github.com/lehoff/chronos +pkg_chronos_commit = master + +PACKAGES += chumak +pkg_chumak_name = chumak +pkg_chumak_description = Pure Erlang implementation of ZeroMQ Message Transport Protocol. +pkg_chumak_homepage = http://choven.ca +pkg_chumak_fetch = git +pkg_chumak_repo = https://github.com/chovencorp/chumak +pkg_chumak_commit = master + +PACKAGES += cl +pkg_cl_name = cl +pkg_cl_description = OpenCL binding for Erlang +pkg_cl_homepage = https://github.com/tonyrog/cl +pkg_cl_fetch = git +pkg_cl_repo = https://github.com/tonyrog/cl +pkg_cl_commit = master + +PACKAGES += clique +pkg_clique_name = clique +pkg_clique_description = CLI Framework for Erlang +pkg_clique_homepage = https://github.com/basho/clique +pkg_clique_fetch = git +pkg_clique_repo = https://github.com/basho/clique +pkg_clique_commit = develop + +PACKAGES += cloudi_core +pkg_cloudi_core_name = cloudi_core +pkg_cloudi_core_description = CloudI internal service runtime +pkg_cloudi_core_homepage = http://cloudi.org/ +pkg_cloudi_core_fetch = git +pkg_cloudi_core_repo = https://github.com/CloudI/cloudi_core +pkg_cloudi_core_commit = master + +PACKAGES += cloudi_service_api_requests +pkg_cloudi_service_api_requests_name = cloudi_service_api_requests +pkg_cloudi_service_api_requests_description = CloudI Service API requests (JSON-RPC/Erlang-term support) +pkg_cloudi_service_api_requests_homepage = http://cloudi.org/ +pkg_cloudi_service_api_requests_fetch = git +pkg_cloudi_service_api_requests_repo = https://github.com/CloudI/cloudi_service_api_requests +pkg_cloudi_service_api_requests_commit = master + +PACKAGES += cloudi_service_db +pkg_cloudi_service_db_name = cloudi_service_db +pkg_cloudi_service_db_description = CloudI Database (in-memory/testing/generic) +pkg_cloudi_service_db_homepage = http://cloudi.org/ +pkg_cloudi_service_db_fetch = git +pkg_cloudi_service_db_repo = https://github.com/CloudI/cloudi_service_db +pkg_cloudi_service_db_commit = master + +PACKAGES += cloudi_service_db_cassandra +pkg_cloudi_service_db_cassandra_name = cloudi_service_db_cassandra +pkg_cloudi_service_db_cassandra_description = Cassandra CloudI Service +pkg_cloudi_service_db_cassandra_homepage = http://cloudi.org/ +pkg_cloudi_service_db_cassandra_fetch = git +pkg_cloudi_service_db_cassandra_repo = https://github.com/CloudI/cloudi_service_db_cassandra +pkg_cloudi_service_db_cassandra_commit = master + +PACKAGES += cloudi_service_db_cassandra_cql +pkg_cloudi_service_db_cassandra_cql_name = cloudi_service_db_cassandra_cql +pkg_cloudi_service_db_cassandra_cql_description = Cassandra CQL CloudI Service +pkg_cloudi_service_db_cassandra_cql_homepage = http://cloudi.org/ +pkg_cloudi_service_db_cassandra_cql_fetch = git +pkg_cloudi_service_db_cassandra_cql_repo = https://github.com/CloudI/cloudi_service_db_cassandra_cql +pkg_cloudi_service_db_cassandra_cql_commit = master + +PACKAGES += cloudi_service_db_couchdb +pkg_cloudi_service_db_couchdb_name = cloudi_service_db_couchdb +pkg_cloudi_service_db_couchdb_description = CouchDB CloudI Service +pkg_cloudi_service_db_couchdb_homepage = http://cloudi.org/ +pkg_cloudi_service_db_couchdb_fetch = git +pkg_cloudi_service_db_couchdb_repo = https://github.com/CloudI/cloudi_service_db_couchdb +pkg_cloudi_service_db_couchdb_commit = master + +PACKAGES += cloudi_service_db_elasticsearch +pkg_cloudi_service_db_elasticsearch_name = cloudi_service_db_elasticsearch +pkg_cloudi_service_db_elasticsearch_description = elasticsearch CloudI Service +pkg_cloudi_service_db_elasticsearch_homepage = http://cloudi.org/ +pkg_cloudi_service_db_elasticsearch_fetch = git +pkg_cloudi_service_db_elasticsearch_repo = https://github.com/CloudI/cloudi_service_db_elasticsearch +pkg_cloudi_service_db_elasticsearch_commit = master + +PACKAGES += cloudi_service_db_memcached +pkg_cloudi_service_db_memcached_name = cloudi_service_db_memcached +pkg_cloudi_service_db_memcached_description = memcached CloudI Service +pkg_cloudi_service_db_memcached_homepage = http://cloudi.org/ +pkg_cloudi_service_db_memcached_fetch = git +pkg_cloudi_service_db_memcached_repo = https://github.com/CloudI/cloudi_service_db_memcached +pkg_cloudi_service_db_memcached_commit = master + +PACKAGES += cloudi_service_db_mysql +pkg_cloudi_service_db_mysql_name = cloudi_service_db_mysql +pkg_cloudi_service_db_mysql_description = MySQL CloudI Service +pkg_cloudi_service_db_mysql_homepage = http://cloudi.org/ +pkg_cloudi_service_db_mysql_fetch = git +pkg_cloudi_service_db_mysql_repo = https://github.com/CloudI/cloudi_service_db_mysql +pkg_cloudi_service_db_mysql_commit = master + +PACKAGES += cloudi_service_db_pgsql +pkg_cloudi_service_db_pgsql_name = cloudi_service_db_pgsql +pkg_cloudi_service_db_pgsql_description = PostgreSQL CloudI Service +pkg_cloudi_service_db_pgsql_homepage = http://cloudi.org/ +pkg_cloudi_service_db_pgsql_fetch = git +pkg_cloudi_service_db_pgsql_repo = https://github.com/CloudI/cloudi_service_db_pgsql +pkg_cloudi_service_db_pgsql_commit = master + +PACKAGES += cloudi_service_db_riak +pkg_cloudi_service_db_riak_name = cloudi_service_db_riak +pkg_cloudi_service_db_riak_description = Riak CloudI Service +pkg_cloudi_service_db_riak_homepage = http://cloudi.org/ +pkg_cloudi_service_db_riak_fetch = git +pkg_cloudi_service_db_riak_repo = https://github.com/CloudI/cloudi_service_db_riak +pkg_cloudi_service_db_riak_commit = master + +PACKAGES += cloudi_service_db_tokyotyrant +pkg_cloudi_service_db_tokyotyrant_name = cloudi_service_db_tokyotyrant +pkg_cloudi_service_db_tokyotyrant_description = Tokyo Tyrant CloudI Service +pkg_cloudi_service_db_tokyotyrant_homepage = http://cloudi.org/ +pkg_cloudi_service_db_tokyotyrant_fetch = git +pkg_cloudi_service_db_tokyotyrant_repo = https://github.com/CloudI/cloudi_service_db_tokyotyrant +pkg_cloudi_service_db_tokyotyrant_commit = master + +PACKAGES += cloudi_service_filesystem +pkg_cloudi_service_filesystem_name = cloudi_service_filesystem +pkg_cloudi_service_filesystem_description = Filesystem CloudI Service +pkg_cloudi_service_filesystem_homepage = http://cloudi.org/ +pkg_cloudi_service_filesystem_fetch = git +pkg_cloudi_service_filesystem_repo = https://github.com/CloudI/cloudi_service_filesystem +pkg_cloudi_service_filesystem_commit = master + +PACKAGES += cloudi_service_http_client +pkg_cloudi_service_http_client_name = cloudi_service_http_client +pkg_cloudi_service_http_client_description = HTTP client CloudI Service +pkg_cloudi_service_http_client_homepage = http://cloudi.org/ +pkg_cloudi_service_http_client_fetch = git +pkg_cloudi_service_http_client_repo = https://github.com/CloudI/cloudi_service_http_client +pkg_cloudi_service_http_client_commit = master + +PACKAGES += cloudi_service_http_cowboy +pkg_cloudi_service_http_cowboy_name = cloudi_service_http_cowboy +pkg_cloudi_service_http_cowboy_description = cowboy HTTP/HTTPS CloudI Service +pkg_cloudi_service_http_cowboy_homepage = http://cloudi.org/ +pkg_cloudi_service_http_cowboy_fetch = git +pkg_cloudi_service_http_cowboy_repo = https://github.com/CloudI/cloudi_service_http_cowboy +pkg_cloudi_service_http_cowboy_commit = master + +PACKAGES += cloudi_service_http_elli +pkg_cloudi_service_http_elli_name = cloudi_service_http_elli +pkg_cloudi_service_http_elli_description = elli HTTP CloudI Service +pkg_cloudi_service_http_elli_homepage = http://cloudi.org/ +pkg_cloudi_service_http_elli_fetch = git +pkg_cloudi_service_http_elli_repo = https://github.com/CloudI/cloudi_service_http_elli +pkg_cloudi_service_http_elli_commit = master + +PACKAGES += cloudi_service_map_reduce +pkg_cloudi_service_map_reduce_name = cloudi_service_map_reduce +pkg_cloudi_service_map_reduce_description = Map/Reduce CloudI Service +pkg_cloudi_service_map_reduce_homepage = http://cloudi.org/ +pkg_cloudi_service_map_reduce_fetch = git +pkg_cloudi_service_map_reduce_repo = https://github.com/CloudI/cloudi_service_map_reduce +pkg_cloudi_service_map_reduce_commit = master + +PACKAGES += cloudi_service_oauth1 +pkg_cloudi_service_oauth1_name = cloudi_service_oauth1 +pkg_cloudi_service_oauth1_description = OAuth v1.0 CloudI Service +pkg_cloudi_service_oauth1_homepage = http://cloudi.org/ +pkg_cloudi_service_oauth1_fetch = git +pkg_cloudi_service_oauth1_repo = https://github.com/CloudI/cloudi_service_oauth1 +pkg_cloudi_service_oauth1_commit = master + +PACKAGES += cloudi_service_queue +pkg_cloudi_service_queue_name = cloudi_service_queue +pkg_cloudi_service_queue_description = Persistent Queue Service +pkg_cloudi_service_queue_homepage = http://cloudi.org/ +pkg_cloudi_service_queue_fetch = git +pkg_cloudi_service_queue_repo = https://github.com/CloudI/cloudi_service_queue +pkg_cloudi_service_queue_commit = master + +PACKAGES += cloudi_service_quorum +pkg_cloudi_service_quorum_name = cloudi_service_quorum +pkg_cloudi_service_quorum_description = CloudI Quorum Service +pkg_cloudi_service_quorum_homepage = http://cloudi.org/ +pkg_cloudi_service_quorum_fetch = git +pkg_cloudi_service_quorum_repo = https://github.com/CloudI/cloudi_service_quorum +pkg_cloudi_service_quorum_commit = master + +PACKAGES += cloudi_service_router +pkg_cloudi_service_router_name = cloudi_service_router +pkg_cloudi_service_router_description = CloudI Router Service +pkg_cloudi_service_router_homepage = http://cloudi.org/ +pkg_cloudi_service_router_fetch = git +pkg_cloudi_service_router_repo = https://github.com/CloudI/cloudi_service_router +pkg_cloudi_service_router_commit = master + +PACKAGES += cloudi_service_tcp +pkg_cloudi_service_tcp_name = cloudi_service_tcp +pkg_cloudi_service_tcp_description = TCP CloudI Service +pkg_cloudi_service_tcp_homepage = http://cloudi.org/ +pkg_cloudi_service_tcp_fetch = git +pkg_cloudi_service_tcp_repo = https://github.com/CloudI/cloudi_service_tcp +pkg_cloudi_service_tcp_commit = master + +PACKAGES += cloudi_service_timers +pkg_cloudi_service_timers_name = cloudi_service_timers +pkg_cloudi_service_timers_description = Timers CloudI Service +pkg_cloudi_service_timers_homepage = http://cloudi.org/ +pkg_cloudi_service_timers_fetch = git +pkg_cloudi_service_timers_repo = https://github.com/CloudI/cloudi_service_timers +pkg_cloudi_service_timers_commit = master + +PACKAGES += cloudi_service_udp +pkg_cloudi_service_udp_name = cloudi_service_udp +pkg_cloudi_service_udp_description = UDP CloudI Service +pkg_cloudi_service_udp_homepage = http://cloudi.org/ +pkg_cloudi_service_udp_fetch = git +pkg_cloudi_service_udp_repo = https://github.com/CloudI/cloudi_service_udp +pkg_cloudi_service_udp_commit = master + +PACKAGES += cloudi_service_validate +pkg_cloudi_service_validate_name = cloudi_service_validate +pkg_cloudi_service_validate_description = CloudI Validate Service +pkg_cloudi_service_validate_homepage = http://cloudi.org/ +pkg_cloudi_service_validate_fetch = git +pkg_cloudi_service_validate_repo = https://github.com/CloudI/cloudi_service_validate +pkg_cloudi_service_validate_commit = master + +PACKAGES += cloudi_service_zeromq +pkg_cloudi_service_zeromq_name = cloudi_service_zeromq +pkg_cloudi_service_zeromq_description = ZeroMQ CloudI Service +pkg_cloudi_service_zeromq_homepage = http://cloudi.org/ +pkg_cloudi_service_zeromq_fetch = git +pkg_cloudi_service_zeromq_repo = https://github.com/CloudI/cloudi_service_zeromq +pkg_cloudi_service_zeromq_commit = master + +PACKAGES += cluster_info +pkg_cluster_info_name = cluster_info +pkg_cluster_info_description = Fork of Hibari's nifty cluster_info OTP app +pkg_cluster_info_homepage = https://github.com/basho/cluster_info +pkg_cluster_info_fetch = git +pkg_cluster_info_repo = https://github.com/basho/cluster_info +pkg_cluster_info_commit = master + +PACKAGES += color +pkg_color_name = color +pkg_color_description = ANSI colors for your Erlang +pkg_color_homepage = https://github.com/julianduque/erlang-color +pkg_color_fetch = git +pkg_color_repo = https://github.com/julianduque/erlang-color +pkg_color_commit = master + +PACKAGES += confetti +pkg_confetti_name = confetti +pkg_confetti_description = Erlang configuration provider / application:get_env/2 on steroids +pkg_confetti_homepage = https://github.com/jtendo/confetti +pkg_confetti_fetch = git +pkg_confetti_repo = https://github.com/jtendo/confetti +pkg_confetti_commit = master + +PACKAGES += couchbeam +pkg_couchbeam_name = couchbeam +pkg_couchbeam_description = Apache CouchDB client in Erlang +pkg_couchbeam_homepage = https://github.com/benoitc/couchbeam +pkg_couchbeam_fetch = git +pkg_couchbeam_repo = https://github.com/benoitc/couchbeam +pkg_couchbeam_commit = master + +PACKAGES += covertool +pkg_covertool_name = covertool +pkg_covertool_description = Tool to convert Erlang cover data files into Cobertura XML reports +pkg_covertool_homepage = https://github.com/idubrov/covertool +pkg_covertool_fetch = git +pkg_covertool_repo = https://github.com/idubrov/covertool +pkg_covertool_commit = master + +PACKAGES += cowboy +pkg_cowboy_name = cowboy +pkg_cowboy_description = Small, fast and modular HTTP server. +pkg_cowboy_homepage = http://ninenines.eu +pkg_cowboy_fetch = git +pkg_cowboy_repo = https://github.com/ninenines/cowboy +pkg_cowboy_commit = 1.0.4 + +PACKAGES += cowdb +pkg_cowdb_name = cowdb +pkg_cowdb_description = Pure Key/Value database library for Erlang Applications +pkg_cowdb_homepage = https://github.com/refuge/cowdb +pkg_cowdb_fetch = git +pkg_cowdb_repo = https://github.com/refuge/cowdb +pkg_cowdb_commit = master + +PACKAGES += cowlib +pkg_cowlib_name = cowlib +pkg_cowlib_description = Support library for manipulating Web protocols. +pkg_cowlib_homepage = http://ninenines.eu +pkg_cowlib_fetch = git +pkg_cowlib_repo = https://github.com/ninenines/cowlib +pkg_cowlib_commit = 1.0.2 + +PACKAGES += cpg +pkg_cpg_name = cpg +pkg_cpg_description = CloudI Process Groups +pkg_cpg_homepage = https://github.com/okeuday/cpg +pkg_cpg_fetch = git +pkg_cpg_repo = https://github.com/okeuday/cpg +pkg_cpg_commit = master + +PACKAGES += cqerl +pkg_cqerl_name = cqerl +pkg_cqerl_description = Native Erlang CQL client for Cassandra +pkg_cqerl_homepage = https://matehat.github.io/cqerl/ +pkg_cqerl_fetch = git +pkg_cqerl_repo = https://github.com/matehat/cqerl +pkg_cqerl_commit = master + +PACKAGES += cr +pkg_cr_name = cr +pkg_cr_description = Chain Replication +pkg_cr_homepage = https://synrc.com/apps/cr/doc/cr.htm +pkg_cr_fetch = git +pkg_cr_repo = https://github.com/spawnproc/cr +pkg_cr_commit = master + +PACKAGES += cuttlefish +pkg_cuttlefish_name = cuttlefish +pkg_cuttlefish_description = never lose your childlike sense of wonder baby cuttlefish, promise me? +pkg_cuttlefish_homepage = https://github.com/basho/cuttlefish +pkg_cuttlefish_fetch = git +pkg_cuttlefish_repo = https://github.com/basho/cuttlefish +pkg_cuttlefish_commit = master + +PACKAGES += damocles +pkg_damocles_name = damocles +pkg_damocles_description = Erlang library for generating adversarial network conditions for QAing distributed applications/systems on a single Linux box. +pkg_damocles_homepage = https://github.com/lostcolony/damocles +pkg_damocles_fetch = git +pkg_damocles_repo = https://github.com/lostcolony/damocles +pkg_damocles_commit = master + +PACKAGES += debbie +pkg_debbie_name = debbie +pkg_debbie_description = .DEB Built In Erlang +pkg_debbie_homepage = https://github.com/crownedgrouse/debbie +pkg_debbie_fetch = git +pkg_debbie_repo = https://github.com/crownedgrouse/debbie +pkg_debbie_commit = master + +PACKAGES += decimal +pkg_decimal_name = decimal +pkg_decimal_description = An Erlang decimal arithmetic library +pkg_decimal_homepage = https://github.com/tim/erlang-decimal +pkg_decimal_fetch = git +pkg_decimal_repo = https://github.com/tim/erlang-decimal +pkg_decimal_commit = master + +PACKAGES += detergent +pkg_detergent_name = detergent +pkg_detergent_description = An emulsifying Erlang SOAP library +pkg_detergent_homepage = https://github.com/devinus/detergent +pkg_detergent_fetch = git +pkg_detergent_repo = https://github.com/devinus/detergent +pkg_detergent_commit = master + +PACKAGES += detest +pkg_detest_name = detest +pkg_detest_description = Tool for running tests on a cluster of erlang nodes +pkg_detest_homepage = https://github.com/biokoda/detest +pkg_detest_fetch = git +pkg_detest_repo = https://github.com/biokoda/detest +pkg_detest_commit = master + +PACKAGES += dh_date +pkg_dh_date_name = dh_date +pkg_dh_date_description = Date formatting / parsing library for erlang +pkg_dh_date_homepage = https://github.com/daleharvey/dh_date +pkg_dh_date_fetch = git +pkg_dh_date_repo = https://github.com/daleharvey/dh_date +pkg_dh_date_commit = master + +PACKAGES += dirbusterl +pkg_dirbusterl_name = dirbusterl +pkg_dirbusterl_description = DirBuster successor in Erlang +pkg_dirbusterl_homepage = https://github.com/silentsignal/DirBustErl +pkg_dirbusterl_fetch = git +pkg_dirbusterl_repo = https://github.com/silentsignal/DirBustErl +pkg_dirbusterl_commit = master + +PACKAGES += dispcount +pkg_dispcount_name = dispcount +pkg_dispcount_description = Erlang task dispatcher based on ETS counters. +pkg_dispcount_homepage = https://github.com/ferd/dispcount +pkg_dispcount_fetch = git +pkg_dispcount_repo = https://github.com/ferd/dispcount +pkg_dispcount_commit = master + +PACKAGES += dlhttpc +pkg_dlhttpc_name = dlhttpc +pkg_dlhttpc_description = dispcount-based lhttpc fork for massive amounts of requests to limited endpoints +pkg_dlhttpc_homepage = https://github.com/ferd/dlhttpc +pkg_dlhttpc_fetch = git +pkg_dlhttpc_repo = https://github.com/ferd/dlhttpc +pkg_dlhttpc_commit = master + +PACKAGES += dns +pkg_dns_name = dns +pkg_dns_description = Erlang DNS library +pkg_dns_homepage = https://github.com/aetrion/dns_erlang +pkg_dns_fetch = git +pkg_dns_repo = https://github.com/aetrion/dns_erlang +pkg_dns_commit = master + +PACKAGES += dnssd +pkg_dnssd_name = dnssd +pkg_dnssd_description = Erlang interface to Apple's Bonjour D NS Service Discovery implementation +pkg_dnssd_homepage = https://github.com/benoitc/dnssd_erlang +pkg_dnssd_fetch = git +pkg_dnssd_repo = https://github.com/benoitc/dnssd_erlang +pkg_dnssd_commit = master + +PACKAGES += dynamic_compile +pkg_dynamic_compile_name = dynamic_compile +pkg_dynamic_compile_description = compile and load erlang modules from string input +pkg_dynamic_compile_homepage = https://github.com/jkvor/dynamic_compile +pkg_dynamic_compile_fetch = git +pkg_dynamic_compile_repo = https://github.com/jkvor/dynamic_compile +pkg_dynamic_compile_commit = master + +PACKAGES += e2 +pkg_e2_name = e2 +pkg_e2_description = Library to simply writing correct OTP applications. +pkg_e2_homepage = http://e2project.org +pkg_e2_fetch = git +pkg_e2_repo = https://github.com/gar1t/e2 +pkg_e2_commit = master + +PACKAGES += eamf +pkg_eamf_name = eamf +pkg_eamf_description = eAMF provides Action Message Format (AMF) support for Erlang +pkg_eamf_homepage = https://github.com/mrinalwadhwa/eamf +pkg_eamf_fetch = git +pkg_eamf_repo = https://github.com/mrinalwadhwa/eamf +pkg_eamf_commit = master + +PACKAGES += eavro +pkg_eavro_name = eavro +pkg_eavro_description = Apache Avro encoder/decoder +pkg_eavro_homepage = https://github.com/SIfoxDevTeam/eavro +pkg_eavro_fetch = git +pkg_eavro_repo = https://github.com/SIfoxDevTeam/eavro +pkg_eavro_commit = master + +PACKAGES += ecapnp +pkg_ecapnp_name = ecapnp +pkg_ecapnp_description = Cap'n Proto library for Erlang +pkg_ecapnp_homepage = https://github.com/kaos/ecapnp +pkg_ecapnp_fetch = git +pkg_ecapnp_repo = https://github.com/kaos/ecapnp +pkg_ecapnp_commit = master + +PACKAGES += econfig +pkg_econfig_name = econfig +pkg_econfig_description = simple Erlang config handler using INI files +pkg_econfig_homepage = https://github.com/benoitc/econfig +pkg_econfig_fetch = git +pkg_econfig_repo = https://github.com/benoitc/econfig +pkg_econfig_commit = master + +PACKAGES += edate +pkg_edate_name = edate +pkg_edate_description = date manipulation library for erlang +pkg_edate_homepage = https://github.com/dweldon/edate +pkg_edate_fetch = git +pkg_edate_repo = https://github.com/dweldon/edate +pkg_edate_commit = master + +PACKAGES += edgar +pkg_edgar_name = edgar +pkg_edgar_description = Erlang Does GNU AR +pkg_edgar_homepage = https://github.com/crownedgrouse/edgar +pkg_edgar_fetch = git +pkg_edgar_repo = https://github.com/crownedgrouse/edgar +pkg_edgar_commit = master + +PACKAGES += edis +pkg_edis_name = edis +pkg_edis_description = An Erlang implementation of Redis KV Store +pkg_edis_homepage = http://inaka.github.com/edis/ +pkg_edis_fetch = git +pkg_edis_repo = https://github.com/inaka/edis +pkg_edis_commit = master + +PACKAGES += edns +pkg_edns_name = edns +pkg_edns_description = Erlang/OTP DNS server +pkg_edns_homepage = https://github.com/hcvst/erlang-dns +pkg_edns_fetch = git +pkg_edns_repo = https://github.com/hcvst/erlang-dns +pkg_edns_commit = master + +PACKAGES += edown +pkg_edown_name = edown +pkg_edown_description = EDoc extension for generating Github-flavored Markdown +pkg_edown_homepage = https://github.com/uwiger/edown +pkg_edown_fetch = git +pkg_edown_repo = https://github.com/uwiger/edown +pkg_edown_commit = master + +PACKAGES += eep +pkg_eep_name = eep +pkg_eep_description = Erlang Easy Profiling (eep) application provides a way to analyze application performance and call hierarchy +pkg_eep_homepage = https://github.com/virtan/eep +pkg_eep_fetch = git +pkg_eep_repo = https://github.com/virtan/eep +pkg_eep_commit = master + +PACKAGES += eep_app +pkg_eep_app_name = eep_app +pkg_eep_app_description = Embedded Event Processing +pkg_eep_app_homepage = https://github.com/darach/eep-erl +pkg_eep_app_fetch = git +pkg_eep_app_repo = https://github.com/darach/eep-erl +pkg_eep_app_commit = master + +PACKAGES += efene +pkg_efene_name = efene +pkg_efene_description = Alternative syntax for the Erlang Programming Language focusing on simplicity, ease of use and programmer UX +pkg_efene_homepage = https://github.com/efene/efene +pkg_efene_fetch = git +pkg_efene_repo = https://github.com/efene/efene +pkg_efene_commit = master + +PACKAGES += egeoip +pkg_egeoip_name = egeoip +pkg_egeoip_description = Erlang IP Geolocation module, currently supporting the MaxMind GeoLite City Database. +pkg_egeoip_homepage = https://github.com/mochi/egeoip +pkg_egeoip_fetch = git +pkg_egeoip_repo = https://github.com/mochi/egeoip +pkg_egeoip_commit = master + +PACKAGES += ehsa +pkg_ehsa_name = ehsa +pkg_ehsa_description = Erlang HTTP server basic and digest authentication modules +pkg_ehsa_homepage = https://bitbucket.org/a12n/ehsa +pkg_ehsa_fetch = hg +pkg_ehsa_repo = https://bitbucket.org/a12n/ehsa +pkg_ehsa_commit = default + +PACKAGES += ej +pkg_ej_name = ej +pkg_ej_description = Helper module for working with Erlang terms representing JSON +pkg_ej_homepage = https://github.com/seth/ej +pkg_ej_fetch = git +pkg_ej_repo = https://github.com/seth/ej +pkg_ej_commit = master + +PACKAGES += ejabberd +pkg_ejabberd_name = ejabberd +pkg_ejabberd_description = Robust, ubiquitous and massively scalable Jabber / XMPP Instant Messaging platform +pkg_ejabberd_homepage = https://github.com/processone/ejabberd +pkg_ejabberd_fetch = git +pkg_ejabberd_repo = https://github.com/processone/ejabberd +pkg_ejabberd_commit = master + +PACKAGES += ejwt +pkg_ejwt_name = ejwt +pkg_ejwt_description = erlang library for JSON Web Token +pkg_ejwt_homepage = https://github.com/artefactop/ejwt +pkg_ejwt_fetch = git +pkg_ejwt_repo = https://github.com/artefactop/ejwt +pkg_ejwt_commit = master + +PACKAGES += ekaf +pkg_ekaf_name = ekaf +pkg_ekaf_description = A minimal, high-performance Kafka client in Erlang. +pkg_ekaf_homepage = https://github.com/helpshift/ekaf +pkg_ekaf_fetch = git +pkg_ekaf_repo = https://github.com/helpshift/ekaf +pkg_ekaf_commit = master + +PACKAGES += elarm +pkg_elarm_name = elarm +pkg_elarm_description = Alarm Manager for Erlang. +pkg_elarm_homepage = https://github.com/esl/elarm +pkg_elarm_fetch = git +pkg_elarm_repo = https://github.com/esl/elarm +pkg_elarm_commit = master + +PACKAGES += eleveldb +pkg_eleveldb_name = eleveldb +pkg_eleveldb_description = Erlang LevelDB API +pkg_eleveldb_homepage = https://github.com/basho/eleveldb +pkg_eleveldb_fetch = git +pkg_eleveldb_repo = https://github.com/basho/eleveldb +pkg_eleveldb_commit = master + +PACKAGES += elixir +pkg_elixir_name = elixir +pkg_elixir_description = Elixir is a dynamic, functional language designed for building scalable and maintainable applications +pkg_elixir_homepage = https://elixir-lang.org/ +pkg_elixir_fetch = git +pkg_elixir_repo = https://github.com/elixir-lang/elixir +pkg_elixir_commit = master + +PACKAGES += elli +pkg_elli_name = elli +pkg_elli_description = Simple, robust and performant Erlang web server +pkg_elli_homepage = https://github.com/elli-lib/elli +pkg_elli_fetch = git +pkg_elli_repo = https://github.com/elli-lib/elli +pkg_elli_commit = master + +PACKAGES += elvis +pkg_elvis_name = elvis +pkg_elvis_description = Erlang Style Reviewer +pkg_elvis_homepage = https://github.com/inaka/elvis +pkg_elvis_fetch = git +pkg_elvis_repo = https://github.com/inaka/elvis +pkg_elvis_commit = master + +PACKAGES += emagick +pkg_emagick_name = emagick +pkg_emagick_description = Wrapper for Graphics/ImageMagick command line tool. +pkg_emagick_homepage = https://github.com/kivra/emagick +pkg_emagick_fetch = git +pkg_emagick_repo = https://github.com/kivra/emagick +pkg_emagick_commit = master + +PACKAGES += emysql +pkg_emysql_name = emysql +pkg_emysql_description = Stable, pure Erlang MySQL driver. +pkg_emysql_homepage = https://github.com/Eonblast/Emysql +pkg_emysql_fetch = git +pkg_emysql_repo = https://github.com/Eonblast/Emysql +pkg_emysql_commit = master + +PACKAGES += enm +pkg_enm_name = enm +pkg_enm_description = Erlang driver for nanomsg +pkg_enm_homepage = https://github.com/basho/enm +pkg_enm_fetch = git +pkg_enm_repo = https://github.com/basho/enm +pkg_enm_commit = master + +PACKAGES += entop +pkg_entop_name = entop +pkg_entop_description = A top-like tool for monitoring an Erlang node +pkg_entop_homepage = https://github.com/mazenharake/entop +pkg_entop_fetch = git +pkg_entop_repo = https://github.com/mazenharake/entop +pkg_entop_commit = master + +PACKAGES += epcap +pkg_epcap_name = epcap +pkg_epcap_description = Erlang packet capture interface using pcap +pkg_epcap_homepage = https://github.com/msantos/epcap +pkg_epcap_fetch = git +pkg_epcap_repo = https://github.com/msantos/epcap +pkg_epcap_commit = master + +PACKAGES += eper +pkg_eper_name = eper +pkg_eper_description = Erlang performance and debugging tools. +pkg_eper_homepage = https://github.com/massemanet/eper +pkg_eper_fetch = git +pkg_eper_repo = https://github.com/massemanet/eper +pkg_eper_commit = master + +PACKAGES += epgsql +pkg_epgsql_name = epgsql +pkg_epgsql_description = Erlang PostgreSQL client library. +pkg_epgsql_homepage = https://github.com/epgsql/epgsql +pkg_epgsql_fetch = git +pkg_epgsql_repo = https://github.com/epgsql/epgsql +pkg_epgsql_commit = master + +PACKAGES += episcina +pkg_episcina_name = episcina +pkg_episcina_description = A simple non intrusive resource pool for connections +pkg_episcina_homepage = https://github.com/erlware/episcina +pkg_episcina_fetch = git +pkg_episcina_repo = https://github.com/erlware/episcina +pkg_episcina_commit = master + +PACKAGES += eplot +pkg_eplot_name = eplot +pkg_eplot_description = A plot engine written in erlang. +pkg_eplot_homepage = https://github.com/psyeugenic/eplot +pkg_eplot_fetch = git +pkg_eplot_repo = https://github.com/psyeugenic/eplot +pkg_eplot_commit = master + +PACKAGES += epocxy +pkg_epocxy_name = epocxy +pkg_epocxy_description = Erlang Patterns of Concurrency +pkg_epocxy_homepage = https://github.com/duomark/epocxy +pkg_epocxy_fetch = git +pkg_epocxy_repo = https://github.com/duomark/epocxy +pkg_epocxy_commit = master + +PACKAGES += epubnub +pkg_epubnub_name = epubnub +pkg_epubnub_description = Erlang PubNub API +pkg_epubnub_homepage = https://github.com/tsloughter/epubnub +pkg_epubnub_fetch = git +pkg_epubnub_repo = https://github.com/tsloughter/epubnub +pkg_epubnub_commit = master + +PACKAGES += eqm +pkg_eqm_name = eqm +pkg_eqm_description = Erlang pub sub with supply-demand channels +pkg_eqm_homepage = https://github.com/loucash/eqm +pkg_eqm_fetch = git +pkg_eqm_repo = https://github.com/loucash/eqm +pkg_eqm_commit = master + +PACKAGES += eredis +pkg_eredis_name = eredis +pkg_eredis_description = Erlang Redis client +pkg_eredis_homepage = https://github.com/wooga/eredis +pkg_eredis_fetch = git +pkg_eredis_repo = https://github.com/wooga/eredis +pkg_eredis_commit = master + +PACKAGES += eredis_pool +pkg_eredis_pool_name = eredis_pool +pkg_eredis_pool_description = eredis_pool is Pool of Redis clients, using eredis and poolboy. +pkg_eredis_pool_homepage = https://github.com/hiroeorz/eredis_pool +pkg_eredis_pool_fetch = git +pkg_eredis_pool_repo = https://github.com/hiroeorz/eredis_pool +pkg_eredis_pool_commit = master + +PACKAGES += erl_streams +pkg_erl_streams_name = erl_streams +pkg_erl_streams_description = Streams in Erlang +pkg_erl_streams_homepage = https://github.com/epappas/erl_streams +pkg_erl_streams_fetch = git +pkg_erl_streams_repo = https://github.com/epappas/erl_streams +pkg_erl_streams_commit = master + +PACKAGES += erlang_cep +pkg_erlang_cep_name = erlang_cep +pkg_erlang_cep_description = A basic CEP package written in erlang +pkg_erlang_cep_homepage = https://github.com/danmacklin/erlang_cep +pkg_erlang_cep_fetch = git +pkg_erlang_cep_repo = https://github.com/danmacklin/erlang_cep +pkg_erlang_cep_commit = master + +PACKAGES += erlang_js +pkg_erlang_js_name = erlang_js +pkg_erlang_js_description = A linked-in driver for Erlang to Mozilla's Spidermonkey Javascript runtime. +pkg_erlang_js_homepage = https://github.com/basho/erlang_js +pkg_erlang_js_fetch = git +pkg_erlang_js_repo = https://github.com/basho/erlang_js +pkg_erlang_js_commit = master + +PACKAGES += erlang_localtime +pkg_erlang_localtime_name = erlang_localtime +pkg_erlang_localtime_description = Erlang library for conversion from one local time to another +pkg_erlang_localtime_homepage = https://github.com/dmitryme/erlang_localtime +pkg_erlang_localtime_fetch = git +pkg_erlang_localtime_repo = https://github.com/dmitryme/erlang_localtime +pkg_erlang_localtime_commit = master + +PACKAGES += erlang_smtp +pkg_erlang_smtp_name = erlang_smtp +pkg_erlang_smtp_description = Erlang SMTP and POP3 server code. +pkg_erlang_smtp_homepage = https://github.com/tonyg/erlang-smtp +pkg_erlang_smtp_fetch = git +pkg_erlang_smtp_repo = https://github.com/tonyg/erlang-smtp +pkg_erlang_smtp_commit = master + +PACKAGES += erlang_term +pkg_erlang_term_name = erlang_term +pkg_erlang_term_description = Erlang Term Info +pkg_erlang_term_homepage = https://github.com/okeuday/erlang_term +pkg_erlang_term_fetch = git +pkg_erlang_term_repo = https://github.com/okeuday/erlang_term +pkg_erlang_term_commit = master + +PACKAGES += erlastic_search +pkg_erlastic_search_name = erlastic_search +pkg_erlastic_search_description = An Erlang app for communicating with Elastic Search's rest interface. +pkg_erlastic_search_homepage = https://github.com/tsloughter/erlastic_search +pkg_erlastic_search_fetch = git +pkg_erlastic_search_repo = https://github.com/tsloughter/erlastic_search +pkg_erlastic_search_commit = master + +PACKAGES += erlasticsearch +pkg_erlasticsearch_name = erlasticsearch +pkg_erlasticsearch_description = Erlang thrift interface to elastic_search +pkg_erlasticsearch_homepage = https://github.com/dieswaytoofast/erlasticsearch +pkg_erlasticsearch_fetch = git +pkg_erlasticsearch_repo = https://github.com/dieswaytoofast/erlasticsearch +pkg_erlasticsearch_commit = master + +PACKAGES += erlbrake +pkg_erlbrake_name = erlbrake +pkg_erlbrake_description = Erlang Airbrake notification client +pkg_erlbrake_homepage = https://github.com/kenpratt/erlbrake +pkg_erlbrake_fetch = git +pkg_erlbrake_repo = https://github.com/kenpratt/erlbrake +pkg_erlbrake_commit = master + +PACKAGES += erlcloud +pkg_erlcloud_name = erlcloud +pkg_erlcloud_description = Cloud Computing library for erlang (Amazon EC2, S3, SQS, SimpleDB, Mechanical Turk, ELB) +pkg_erlcloud_homepage = https://github.com/gleber/erlcloud +pkg_erlcloud_fetch = git +pkg_erlcloud_repo = https://github.com/gleber/erlcloud +pkg_erlcloud_commit = master + +PACKAGES += erlcron +pkg_erlcron_name = erlcron +pkg_erlcron_description = Erlang cronish system +pkg_erlcron_homepage = https://github.com/erlware/erlcron +pkg_erlcron_fetch = git +pkg_erlcron_repo = https://github.com/erlware/erlcron +pkg_erlcron_commit = master + +PACKAGES += erldb +pkg_erldb_name = erldb +pkg_erldb_description = ORM (Object-relational mapping) application implemented in Erlang +pkg_erldb_homepage = http://erldb.org +pkg_erldb_fetch = git +pkg_erldb_repo = https://github.com/erldb/erldb +pkg_erldb_commit = master + +PACKAGES += erldis +pkg_erldis_name = erldis +pkg_erldis_description = redis erlang client library +pkg_erldis_homepage = https://github.com/cstar/erldis +pkg_erldis_fetch = git +pkg_erldis_repo = https://github.com/cstar/erldis +pkg_erldis_commit = master + +PACKAGES += erldns +pkg_erldns_name = erldns +pkg_erldns_description = DNS server, in erlang. +pkg_erldns_homepage = https://github.com/aetrion/erl-dns +pkg_erldns_fetch = git +pkg_erldns_repo = https://github.com/aetrion/erl-dns +pkg_erldns_commit = master + +PACKAGES += erldocker +pkg_erldocker_name = erldocker +pkg_erldocker_description = Docker Remote API client for Erlang +pkg_erldocker_homepage = https://github.com/proger/erldocker +pkg_erldocker_fetch = git +pkg_erldocker_repo = https://github.com/proger/erldocker +pkg_erldocker_commit = master + +PACKAGES += erlfsmon +pkg_erlfsmon_name = erlfsmon +pkg_erlfsmon_description = Erlang filesystem event watcher for Linux and OSX +pkg_erlfsmon_homepage = https://github.com/proger/erlfsmon +pkg_erlfsmon_fetch = git +pkg_erlfsmon_repo = https://github.com/proger/erlfsmon +pkg_erlfsmon_commit = master + +PACKAGES += erlgit +pkg_erlgit_name = erlgit +pkg_erlgit_description = Erlang convenience wrapper around git executable +pkg_erlgit_homepage = https://github.com/gleber/erlgit +pkg_erlgit_fetch = git +pkg_erlgit_repo = https://github.com/gleber/erlgit +pkg_erlgit_commit = master + +PACKAGES += erlguten +pkg_erlguten_name = erlguten +pkg_erlguten_description = ErlGuten is a system for high-quality typesetting, written purely in Erlang. +pkg_erlguten_homepage = https://github.com/richcarl/erlguten +pkg_erlguten_fetch = git +pkg_erlguten_repo = https://github.com/richcarl/erlguten +pkg_erlguten_commit = master + +PACKAGES += erlmc +pkg_erlmc_name = erlmc +pkg_erlmc_description = Erlang memcached binary protocol client +pkg_erlmc_homepage = https://github.com/jkvor/erlmc +pkg_erlmc_fetch = git +pkg_erlmc_repo = https://github.com/jkvor/erlmc +pkg_erlmc_commit = master + +PACKAGES += erlmongo +pkg_erlmongo_name = erlmongo +pkg_erlmongo_description = Record based Erlang driver for MongoDB with gridfs support +pkg_erlmongo_homepage = https://github.com/SergejJurecko/erlmongo +pkg_erlmongo_fetch = git +pkg_erlmongo_repo = https://github.com/SergejJurecko/erlmongo +pkg_erlmongo_commit = master + +PACKAGES += erlog +pkg_erlog_name = erlog +pkg_erlog_description = Prolog interpreter in and for Erlang +pkg_erlog_homepage = https://github.com/rvirding/erlog +pkg_erlog_fetch = git +pkg_erlog_repo = https://github.com/rvirding/erlog +pkg_erlog_commit = master + +PACKAGES += erlpass +pkg_erlpass_name = erlpass +pkg_erlpass_description = A library to handle password hashing and changing in a safe manner, independent from any kind of storage whatsoever. +pkg_erlpass_homepage = https://github.com/ferd/erlpass +pkg_erlpass_fetch = git +pkg_erlpass_repo = https://github.com/ferd/erlpass +pkg_erlpass_commit = master + +PACKAGES += erlport +pkg_erlport_name = erlport +pkg_erlport_description = ErlPort - connect Erlang to other languages +pkg_erlport_homepage = https://github.com/hdima/erlport +pkg_erlport_fetch = git +pkg_erlport_repo = https://github.com/hdima/erlport +pkg_erlport_commit = master + +PACKAGES += erlsh +pkg_erlsh_name = erlsh +pkg_erlsh_description = Erlang shell tools +pkg_erlsh_homepage = https://github.com/proger/erlsh +pkg_erlsh_fetch = git +pkg_erlsh_repo = https://github.com/proger/erlsh +pkg_erlsh_commit = master + +PACKAGES += erlsha2 +pkg_erlsha2_name = erlsha2 +pkg_erlsha2_description = SHA-224, SHA-256, SHA-384, SHA-512 implemented in Erlang NIFs. +pkg_erlsha2_homepage = https://github.com/vinoski/erlsha2 +pkg_erlsha2_fetch = git +pkg_erlsha2_repo = https://github.com/vinoski/erlsha2 +pkg_erlsha2_commit = master + +PACKAGES += erlsom +pkg_erlsom_name = erlsom +pkg_erlsom_description = XML parser for Erlang +pkg_erlsom_homepage = https://github.com/willemdj/erlsom +pkg_erlsom_fetch = git +pkg_erlsom_repo = https://github.com/willemdj/erlsom +pkg_erlsom_commit = master + +PACKAGES += erlubi +pkg_erlubi_name = erlubi +pkg_erlubi_description = Ubigraph Erlang Client (and Process Visualizer) +pkg_erlubi_homepage = https://github.com/krestenkrab/erlubi +pkg_erlubi_fetch = git +pkg_erlubi_repo = https://github.com/krestenkrab/erlubi +pkg_erlubi_commit = master + +PACKAGES += erlvolt +pkg_erlvolt_name = erlvolt +pkg_erlvolt_description = VoltDB Erlang Client Driver +pkg_erlvolt_homepage = https://github.com/VoltDB/voltdb-client-erlang +pkg_erlvolt_fetch = git +pkg_erlvolt_repo = https://github.com/VoltDB/voltdb-client-erlang +pkg_erlvolt_commit = master + +PACKAGES += erlware_commons +pkg_erlware_commons_name = erlware_commons +pkg_erlware_commons_description = Erlware Commons is an Erlware project focused on all aspects of reusable Erlang components. +pkg_erlware_commons_homepage = https://github.com/erlware/erlware_commons +pkg_erlware_commons_fetch = git +pkg_erlware_commons_repo = https://github.com/erlware/erlware_commons +pkg_erlware_commons_commit = master + +PACKAGES += erlydtl +pkg_erlydtl_name = erlydtl +pkg_erlydtl_description = Django Template Language for Erlang. +pkg_erlydtl_homepage = https://github.com/erlydtl/erlydtl +pkg_erlydtl_fetch = git +pkg_erlydtl_repo = https://github.com/erlydtl/erlydtl +pkg_erlydtl_commit = master + +PACKAGES += errd +pkg_errd_name = errd +pkg_errd_description = Erlang RRDTool library +pkg_errd_homepage = https://github.com/archaelus/errd +pkg_errd_fetch = git +pkg_errd_repo = https://github.com/archaelus/errd +pkg_errd_commit = master + +PACKAGES += erserve +pkg_erserve_name = erserve +pkg_erserve_description = Erlang/Rserve communication interface +pkg_erserve_homepage = https://github.com/del/erserve +pkg_erserve_fetch = git +pkg_erserve_repo = https://github.com/del/erserve +pkg_erserve_commit = master + +PACKAGES += erwa +pkg_erwa_name = erwa +pkg_erwa_description = A WAMP router and client written in Erlang. +pkg_erwa_homepage = https://github.com/bwegh/erwa +pkg_erwa_fetch = git +pkg_erwa_repo = https://github.com/bwegh/erwa +pkg_erwa_commit = master + +PACKAGES += escalus +pkg_escalus_name = escalus +pkg_escalus_description = An XMPP client library in Erlang for conveniently testing XMPP servers +pkg_escalus_homepage = https://github.com/esl/escalus +pkg_escalus_fetch = git +pkg_escalus_repo = https://github.com/esl/escalus +pkg_escalus_commit = master + +PACKAGES += esh_mk +pkg_esh_mk_name = esh_mk +pkg_esh_mk_description = esh template engine plugin for erlang.mk +pkg_esh_mk_homepage = https://github.com/crownedgrouse/esh.mk +pkg_esh_mk_fetch = git +pkg_esh_mk_repo = https://github.com/crownedgrouse/esh.mk.git +pkg_esh_mk_commit = master + +PACKAGES += espec +pkg_espec_name = espec +pkg_espec_description = ESpec: Behaviour driven development framework for Erlang +pkg_espec_homepage = https://github.com/lucaspiller/espec +pkg_espec_fetch = git +pkg_espec_repo = https://github.com/lucaspiller/espec +pkg_espec_commit = master + +PACKAGES += estatsd +pkg_estatsd_name = estatsd +pkg_estatsd_description = Erlang stats aggregation app that periodically flushes data to graphite +pkg_estatsd_homepage = https://github.com/RJ/estatsd +pkg_estatsd_fetch = git +pkg_estatsd_repo = https://github.com/RJ/estatsd +pkg_estatsd_commit = master + +PACKAGES += etap +pkg_etap_name = etap +pkg_etap_description = etap is a simple erlang testing library that provides TAP compliant output. +pkg_etap_homepage = https://github.com/ngerakines/etap +pkg_etap_fetch = git +pkg_etap_repo = https://github.com/ngerakines/etap +pkg_etap_commit = master + +PACKAGES += etest +pkg_etest_name = etest +pkg_etest_description = A lightweight, convention over configuration test framework for Erlang +pkg_etest_homepage = https://github.com/wooga/etest +pkg_etest_fetch = git +pkg_etest_repo = https://github.com/wooga/etest +pkg_etest_commit = master + +PACKAGES += etest_http +pkg_etest_http_name = etest_http +pkg_etest_http_description = etest Assertions around HTTP (client-side) +pkg_etest_http_homepage = https://github.com/wooga/etest_http +pkg_etest_http_fetch = git +pkg_etest_http_repo = https://github.com/wooga/etest_http +pkg_etest_http_commit = master + +PACKAGES += etoml +pkg_etoml_name = etoml +pkg_etoml_description = TOML language erlang parser +pkg_etoml_homepage = https://github.com/kalta/etoml +pkg_etoml_fetch = git +pkg_etoml_repo = https://github.com/kalta/etoml +pkg_etoml_commit = master + +PACKAGES += eunit +pkg_eunit_name = eunit +pkg_eunit_description = The EUnit lightweight unit testing framework for Erlang - this is the canonical development repository. +pkg_eunit_homepage = https://github.com/richcarl/eunit +pkg_eunit_fetch = git +pkg_eunit_repo = https://github.com/richcarl/eunit +pkg_eunit_commit = master + +PACKAGES += eunit_formatters +pkg_eunit_formatters_name = eunit_formatters +pkg_eunit_formatters_description = Because eunit's output sucks. Let's make it better. +pkg_eunit_formatters_homepage = https://github.com/seancribbs/eunit_formatters +pkg_eunit_formatters_fetch = git +pkg_eunit_formatters_repo = https://github.com/seancribbs/eunit_formatters +pkg_eunit_formatters_commit = master + +PACKAGES += euthanasia +pkg_euthanasia_name = euthanasia +pkg_euthanasia_description = Merciful killer for your Erlang processes +pkg_euthanasia_homepage = https://github.com/doubleyou/euthanasia +pkg_euthanasia_fetch = git +pkg_euthanasia_repo = https://github.com/doubleyou/euthanasia +pkg_euthanasia_commit = master + +PACKAGES += evum +pkg_evum_name = evum +pkg_evum_description = Spawn Linux VMs as Erlang processes in the Erlang VM +pkg_evum_homepage = https://github.com/msantos/evum +pkg_evum_fetch = git +pkg_evum_repo = https://github.com/msantos/evum +pkg_evum_commit = master + +PACKAGES += exec +pkg_exec_name = erlexec +pkg_exec_description = Execute and control OS processes from Erlang/OTP. +pkg_exec_homepage = http://saleyn.github.com/erlexec +pkg_exec_fetch = git +pkg_exec_repo = https://github.com/saleyn/erlexec +pkg_exec_commit = master + +PACKAGES += exml +pkg_exml_name = exml +pkg_exml_description = XML parsing library in Erlang +pkg_exml_homepage = https://github.com/paulgray/exml +pkg_exml_fetch = git +pkg_exml_repo = https://github.com/paulgray/exml +pkg_exml_commit = master + +PACKAGES += exometer +pkg_exometer_name = exometer +pkg_exometer_description = Basic measurement objects and probe behavior +pkg_exometer_homepage = https://github.com/Feuerlabs/exometer +pkg_exometer_fetch = git +pkg_exometer_repo = https://github.com/Feuerlabs/exometer +pkg_exometer_commit = master + +PACKAGES += exs1024 +pkg_exs1024_name = exs1024 +pkg_exs1024_description = Xorshift1024star pseudo random number generator for Erlang. +pkg_exs1024_homepage = https://github.com/jj1bdx/exs1024 +pkg_exs1024_fetch = git +pkg_exs1024_repo = https://github.com/jj1bdx/exs1024 +pkg_exs1024_commit = master + +PACKAGES += exs64 +pkg_exs64_name = exs64 +pkg_exs64_description = Xorshift64star pseudo random number generator for Erlang. +pkg_exs64_homepage = https://github.com/jj1bdx/exs64 +pkg_exs64_fetch = git +pkg_exs64_repo = https://github.com/jj1bdx/exs64 +pkg_exs64_commit = master + +PACKAGES += exsplus116 +pkg_exsplus116_name = exsplus116 +pkg_exsplus116_description = Xorshift116plus for Erlang +pkg_exsplus116_homepage = https://github.com/jj1bdx/exsplus116 +pkg_exsplus116_fetch = git +pkg_exsplus116_repo = https://github.com/jj1bdx/exsplus116 +pkg_exsplus116_commit = master + +PACKAGES += exsplus128 +pkg_exsplus128_name = exsplus128 +pkg_exsplus128_description = Xorshift128plus pseudo random number generator for Erlang. +pkg_exsplus128_homepage = https://github.com/jj1bdx/exsplus128 +pkg_exsplus128_fetch = git +pkg_exsplus128_repo = https://github.com/jj1bdx/exsplus128 +pkg_exsplus128_commit = master + +PACKAGES += ezmq +pkg_ezmq_name = ezmq +pkg_ezmq_description = zMQ implemented in Erlang +pkg_ezmq_homepage = https://github.com/RoadRunnr/ezmq +pkg_ezmq_fetch = git +pkg_ezmq_repo = https://github.com/RoadRunnr/ezmq +pkg_ezmq_commit = master + +PACKAGES += ezmtp +pkg_ezmtp_name = ezmtp +pkg_ezmtp_description = ZMTP protocol in pure Erlang. +pkg_ezmtp_homepage = https://github.com/a13x/ezmtp +pkg_ezmtp_fetch = git +pkg_ezmtp_repo = https://github.com/a13x/ezmtp +pkg_ezmtp_commit = master + +PACKAGES += fast_disk_log +pkg_fast_disk_log_name = fast_disk_log +pkg_fast_disk_log_description = Pool-based asynchronous Erlang disk logger +pkg_fast_disk_log_homepage = https://github.com/lpgauth/fast_disk_log +pkg_fast_disk_log_fetch = git +pkg_fast_disk_log_repo = https://github.com/lpgauth/fast_disk_log +pkg_fast_disk_log_commit = master + +PACKAGES += feeder +pkg_feeder_name = feeder +pkg_feeder_description = Stream parse RSS and Atom formatted XML feeds. +pkg_feeder_homepage = https://github.com/michaelnisi/feeder +pkg_feeder_fetch = git +pkg_feeder_repo = https://github.com/michaelnisi/feeder +pkg_feeder_commit = master + +PACKAGES += find_crate +pkg_find_crate_name = find_crate +pkg_find_crate_description = Find Rust libs and exes in Erlang application priv directory +pkg_find_crate_homepage = https://github.com/goertzenator/find_crate +pkg_find_crate_fetch = git +pkg_find_crate_repo = https://github.com/goertzenator/find_crate +pkg_find_crate_commit = master + +PACKAGES += fix +pkg_fix_name = fix +pkg_fix_description = http://fixprotocol.org/ implementation. +pkg_fix_homepage = https://github.com/maxlapshin/fix +pkg_fix_fetch = git +pkg_fix_repo = https://github.com/maxlapshin/fix +pkg_fix_commit = master + +PACKAGES += flower +pkg_flower_name = flower +pkg_flower_description = FlowER - a Erlang OpenFlow development platform +pkg_flower_homepage = https://github.com/travelping/flower +pkg_flower_fetch = git +pkg_flower_repo = https://github.com/travelping/flower +pkg_flower_commit = master + +PACKAGES += fn +pkg_fn_name = fn +pkg_fn_description = Function utilities for Erlang +pkg_fn_homepage = https://github.com/reiddraper/fn +pkg_fn_fetch = git +pkg_fn_repo = https://github.com/reiddraper/fn +pkg_fn_commit = master + +PACKAGES += folsom +pkg_folsom_name = folsom +pkg_folsom_description = Expose Erlang Events and Metrics +pkg_folsom_homepage = https://github.com/boundary/folsom +pkg_folsom_fetch = git +pkg_folsom_repo = https://github.com/boundary/folsom +pkg_folsom_commit = master + +PACKAGES += folsom_cowboy +pkg_folsom_cowboy_name = folsom_cowboy +pkg_folsom_cowboy_description = A Cowboy based Folsom HTTP Wrapper. +pkg_folsom_cowboy_homepage = https://github.com/boundary/folsom_cowboy +pkg_folsom_cowboy_fetch = git +pkg_folsom_cowboy_repo = https://github.com/boundary/folsom_cowboy +pkg_folsom_cowboy_commit = master + +PACKAGES += folsomite +pkg_folsomite_name = folsomite +pkg_folsomite_description = blow up your graphite / riemann server with folsom metrics +pkg_folsomite_homepage = https://github.com/campanja/folsomite +pkg_folsomite_fetch = git +pkg_folsomite_repo = https://github.com/campanja/folsomite +pkg_folsomite_commit = master + +PACKAGES += fs +pkg_fs_name = fs +pkg_fs_description = Erlang FileSystem Listener +pkg_fs_homepage = https://github.com/synrc/fs +pkg_fs_fetch = git +pkg_fs_repo = https://github.com/synrc/fs +pkg_fs_commit = master + +PACKAGES += fuse +pkg_fuse_name = fuse +pkg_fuse_description = A Circuit Breaker for Erlang +pkg_fuse_homepage = https://github.com/jlouis/fuse +pkg_fuse_fetch = git +pkg_fuse_repo = https://github.com/jlouis/fuse +pkg_fuse_commit = master + +PACKAGES += gcm +pkg_gcm_name = gcm +pkg_gcm_description = An Erlang application for Google Cloud Messaging +pkg_gcm_homepage = https://github.com/pdincau/gcm-erlang +pkg_gcm_fetch = git +pkg_gcm_repo = https://github.com/pdincau/gcm-erlang +pkg_gcm_commit = master + +PACKAGES += gcprof +pkg_gcprof_name = gcprof +pkg_gcprof_description = Garbage Collection profiler for Erlang +pkg_gcprof_homepage = https://github.com/knutin/gcprof +pkg_gcprof_fetch = git +pkg_gcprof_repo = https://github.com/knutin/gcprof +pkg_gcprof_commit = master + +PACKAGES += geas +pkg_geas_name = geas +pkg_geas_description = Guess Erlang Application Scattering +pkg_geas_homepage = https://github.com/crownedgrouse/geas +pkg_geas_fetch = git +pkg_geas_repo = https://github.com/crownedgrouse/geas +pkg_geas_commit = master + +PACKAGES += geef +pkg_geef_name = geef +pkg_geef_description = Git NEEEEF (Erlang NIF) +pkg_geef_homepage = https://github.com/carlosmn/geef +pkg_geef_fetch = git +pkg_geef_repo = https://github.com/carlosmn/geef +pkg_geef_commit = master + +PACKAGES += gen_coap +pkg_gen_coap_name = gen_coap +pkg_gen_coap_description = Generic Erlang CoAP Client/Server +pkg_gen_coap_homepage = https://github.com/gotthardp/gen_coap +pkg_gen_coap_fetch = git +pkg_gen_coap_repo = https://github.com/gotthardp/gen_coap +pkg_gen_coap_commit = master + +PACKAGES += gen_cycle +pkg_gen_cycle_name = gen_cycle +pkg_gen_cycle_description = Simple, generic OTP behaviour for recurring tasks +pkg_gen_cycle_homepage = https://github.com/aerosol/gen_cycle +pkg_gen_cycle_fetch = git +pkg_gen_cycle_repo = https://github.com/aerosol/gen_cycle +pkg_gen_cycle_commit = develop + +PACKAGES += gen_icmp +pkg_gen_icmp_name = gen_icmp +pkg_gen_icmp_description = Erlang interface to ICMP sockets +pkg_gen_icmp_homepage = https://github.com/msantos/gen_icmp +pkg_gen_icmp_fetch = git +pkg_gen_icmp_repo = https://github.com/msantos/gen_icmp +pkg_gen_icmp_commit = master + +PACKAGES += gen_leader +pkg_gen_leader_name = gen_leader +pkg_gen_leader_description = leader election behavior +pkg_gen_leader_homepage = https://github.com/garret-smith/gen_leader_revival +pkg_gen_leader_fetch = git +pkg_gen_leader_repo = https://github.com/garret-smith/gen_leader_revival +pkg_gen_leader_commit = master + +PACKAGES += gen_nb_server +pkg_gen_nb_server_name = gen_nb_server +pkg_gen_nb_server_description = OTP behavior for writing non-blocking servers +pkg_gen_nb_server_homepage = https://github.com/kevsmith/gen_nb_server +pkg_gen_nb_server_fetch = git +pkg_gen_nb_server_repo = https://github.com/kevsmith/gen_nb_server +pkg_gen_nb_server_commit = master + +PACKAGES += gen_paxos +pkg_gen_paxos_name = gen_paxos +pkg_gen_paxos_description = An Erlang/OTP-style implementation of the PAXOS distributed consensus protocol +pkg_gen_paxos_homepage = https://github.com/gburd/gen_paxos +pkg_gen_paxos_fetch = git +pkg_gen_paxos_repo = https://github.com/gburd/gen_paxos +pkg_gen_paxos_commit = master + +PACKAGES += gen_rpc +pkg_gen_rpc_name = gen_rpc +pkg_gen_rpc_description = A scalable RPC library for Erlang-VM based languages +pkg_gen_rpc_homepage = https://github.com/priestjim/gen_rpc.git +pkg_gen_rpc_fetch = git +pkg_gen_rpc_repo = https://github.com/priestjim/gen_rpc.git +pkg_gen_rpc_commit = master + +PACKAGES += gen_smtp +pkg_gen_smtp_name = gen_smtp +pkg_gen_smtp_description = A generic Erlang SMTP server and client that can be extended via callback modules +pkg_gen_smtp_homepage = https://github.com/Vagabond/gen_smtp +pkg_gen_smtp_fetch = git +pkg_gen_smtp_repo = https://github.com/Vagabond/gen_smtp +pkg_gen_smtp_commit = master + +PACKAGES += gen_tracker +pkg_gen_tracker_name = gen_tracker +pkg_gen_tracker_description = supervisor with ets handling of children and their metadata +pkg_gen_tracker_homepage = https://github.com/erlyvideo/gen_tracker +pkg_gen_tracker_fetch = git +pkg_gen_tracker_repo = https://github.com/erlyvideo/gen_tracker +pkg_gen_tracker_commit = master + +PACKAGES += gen_unix +pkg_gen_unix_name = gen_unix +pkg_gen_unix_description = Erlang Unix socket interface +pkg_gen_unix_homepage = https://github.com/msantos/gen_unix +pkg_gen_unix_fetch = git +pkg_gen_unix_repo = https://github.com/msantos/gen_unix +pkg_gen_unix_commit = master + +PACKAGES += geode +pkg_geode_name = geode +pkg_geode_description = geohash/proximity lookup in pure, uncut erlang. +pkg_geode_homepage = https://github.com/bradfordw/geode +pkg_geode_fetch = git +pkg_geode_repo = https://github.com/bradfordw/geode +pkg_geode_commit = master + +PACKAGES += getopt +pkg_getopt_name = getopt +pkg_getopt_description = Module to parse command line arguments using the GNU getopt syntax +pkg_getopt_homepage = https://github.com/jcomellas/getopt +pkg_getopt_fetch = git +pkg_getopt_repo = https://github.com/jcomellas/getopt +pkg_getopt_commit = master + +PACKAGES += gettext +pkg_gettext_name = gettext +pkg_gettext_description = Erlang internationalization library. +pkg_gettext_homepage = https://github.com/etnt/gettext +pkg_gettext_fetch = git +pkg_gettext_repo = https://github.com/etnt/gettext +pkg_gettext_commit = master + +PACKAGES += giallo +pkg_giallo_name = giallo +pkg_giallo_description = Small and flexible web framework on top of Cowboy +pkg_giallo_homepage = https://github.com/kivra/giallo +pkg_giallo_fetch = git +pkg_giallo_repo = https://github.com/kivra/giallo +pkg_giallo_commit = master + +PACKAGES += gin +pkg_gin_name = gin +pkg_gin_description = The guards and for Erlang parse_transform +pkg_gin_homepage = https://github.com/mad-cocktail/gin +pkg_gin_fetch = git +pkg_gin_repo = https://github.com/mad-cocktail/gin +pkg_gin_commit = master + +PACKAGES += gitty +pkg_gitty_name = gitty +pkg_gitty_description = Git access in erlang +pkg_gitty_homepage = https://github.com/maxlapshin/gitty +pkg_gitty_fetch = git +pkg_gitty_repo = https://github.com/maxlapshin/gitty +pkg_gitty_commit = master + +PACKAGES += gold_fever +pkg_gold_fever_name = gold_fever +pkg_gold_fever_description = A Treasure Hunt for Erlangers +pkg_gold_fever_homepage = https://github.com/inaka/gold_fever +pkg_gold_fever_fetch = git +pkg_gold_fever_repo = https://github.com/inaka/gold_fever +pkg_gold_fever_commit = master + +PACKAGES += gpb +pkg_gpb_name = gpb +pkg_gpb_description = A Google Protobuf implementation for Erlang +pkg_gpb_homepage = https://github.com/tomas-abrahamsson/gpb +pkg_gpb_fetch = git +pkg_gpb_repo = https://github.com/tomas-abrahamsson/gpb +pkg_gpb_commit = master + +PACKAGES += gproc +pkg_gproc_name = gproc +pkg_gproc_description = Extended process registry for Erlang +pkg_gproc_homepage = https://github.com/uwiger/gproc +pkg_gproc_fetch = git +pkg_gproc_repo = https://github.com/uwiger/gproc +pkg_gproc_commit = master + +PACKAGES += grapherl +pkg_grapherl_name = grapherl +pkg_grapherl_description = Create graphs of Erlang systems and programs +pkg_grapherl_homepage = https://github.com/eproxus/grapherl +pkg_grapherl_fetch = git +pkg_grapherl_repo = https://github.com/eproxus/grapherl +pkg_grapherl_commit = master + +PACKAGES += grpc +pkg_grpc_name = grpc +pkg_grpc_description = gRPC server in Erlang +pkg_grpc_homepage = https://github.com/Bluehouse-Technology/grpc +pkg_grpc_fetch = git +pkg_grpc_repo = https://github.com/Bluehouse-Technology/grpc +pkg_grpc_commit = master + +PACKAGES += grpc_client +pkg_grpc_client_name = grpc_client +pkg_grpc_client_description = gRPC client in Erlang +pkg_grpc_client_homepage = https://github.com/Bluehouse-Technology/grpc_client +pkg_grpc_client_fetch = git +pkg_grpc_client_repo = https://github.com/Bluehouse-Technology/grpc_client +pkg_grpc_client_commit = master + +PACKAGES += gun +pkg_gun_name = gun +pkg_gun_description = Asynchronous SPDY, HTTP and Websocket client written in Erlang. +pkg_gun_homepage = http//ninenines.eu +pkg_gun_fetch = git +pkg_gun_repo = https://github.com/ninenines/gun +pkg_gun_commit = master + +PACKAGES += gut +pkg_gut_name = gut +pkg_gut_description = gut is a template printing, aka scaffolding, tool for Erlang. Like rails generate or yeoman +pkg_gut_homepage = https://github.com/unbalancedparentheses/gut +pkg_gut_fetch = git +pkg_gut_repo = https://github.com/unbalancedparentheses/gut +pkg_gut_commit = master + +PACKAGES += hackney +pkg_hackney_name = hackney +pkg_hackney_description = simple HTTP client in Erlang +pkg_hackney_homepage = https://github.com/benoitc/hackney +pkg_hackney_fetch = git +pkg_hackney_repo = https://github.com/benoitc/hackney +pkg_hackney_commit = master + +PACKAGES += hamcrest +pkg_hamcrest_name = hamcrest +pkg_hamcrest_description = Erlang port of Hamcrest +pkg_hamcrest_homepage = https://github.com/hyperthunk/hamcrest-erlang +pkg_hamcrest_fetch = git +pkg_hamcrest_repo = https://github.com/hyperthunk/hamcrest-erlang +pkg_hamcrest_commit = master + +PACKAGES += hanoidb +pkg_hanoidb_name = hanoidb +pkg_hanoidb_description = Erlang LSM BTree Storage +pkg_hanoidb_homepage = https://github.com/krestenkrab/hanoidb +pkg_hanoidb_fetch = git +pkg_hanoidb_repo = https://github.com/krestenkrab/hanoidb +pkg_hanoidb_commit = master + +PACKAGES += hottub +pkg_hottub_name = hottub +pkg_hottub_description = Permanent Erlang Worker Pool +pkg_hottub_homepage = https://github.com/bfrog/hottub +pkg_hottub_fetch = git +pkg_hottub_repo = https://github.com/bfrog/hottub +pkg_hottub_commit = master + +PACKAGES += hpack +pkg_hpack_name = hpack +pkg_hpack_description = HPACK Implementation for Erlang +pkg_hpack_homepage = https://github.com/joedevivo/hpack +pkg_hpack_fetch = git +pkg_hpack_repo = https://github.com/joedevivo/hpack +pkg_hpack_commit = master + +PACKAGES += hyper +pkg_hyper_name = hyper +pkg_hyper_description = Erlang implementation of HyperLogLog +pkg_hyper_homepage = https://github.com/GameAnalytics/hyper +pkg_hyper_fetch = git +pkg_hyper_repo = https://github.com/GameAnalytics/hyper +pkg_hyper_commit = master + +PACKAGES += i18n +pkg_i18n_name = i18n +pkg_i18n_description = International components for unicode from Erlang (unicode, date, string, number, format, locale, localization, transliteration, icu4e) +pkg_i18n_homepage = https://github.com/erlang-unicode/i18n +pkg_i18n_fetch = git +pkg_i18n_repo = https://github.com/erlang-unicode/i18n +pkg_i18n_commit = master + +PACKAGES += ibrowse +pkg_ibrowse_name = ibrowse +pkg_ibrowse_description = Erlang HTTP client +pkg_ibrowse_homepage = https://github.com/cmullaparthi/ibrowse +pkg_ibrowse_fetch = git +pkg_ibrowse_repo = https://github.com/cmullaparthi/ibrowse +pkg_ibrowse_commit = master + +PACKAGES += idna +pkg_idna_name = idna +pkg_idna_description = Erlang IDNA lib +pkg_idna_homepage = https://github.com/benoitc/erlang-idna +pkg_idna_fetch = git +pkg_idna_repo = https://github.com/benoitc/erlang-idna +pkg_idna_commit = master + +PACKAGES += ierlang +pkg_ierlang_name = ierlang +pkg_ierlang_description = An Erlang language kernel for IPython. +pkg_ierlang_homepage = https://github.com/robbielynch/ierlang +pkg_ierlang_fetch = git +pkg_ierlang_repo = https://github.com/robbielynch/ierlang +pkg_ierlang_commit = master + +PACKAGES += iota +pkg_iota_name = iota +pkg_iota_description = iota (Inter-dependency Objective Testing Apparatus) - a tool to enforce clean separation of responsibilities in Erlang code +pkg_iota_homepage = https://github.com/jpgneves/iota +pkg_iota_fetch = git +pkg_iota_repo = https://github.com/jpgneves/iota +pkg_iota_commit = master + +PACKAGES += irc_lib +pkg_irc_lib_name = irc_lib +pkg_irc_lib_description = Erlang irc client library +pkg_irc_lib_homepage = https://github.com/OtpChatBot/irc_lib +pkg_irc_lib_fetch = git +pkg_irc_lib_repo = https://github.com/OtpChatBot/irc_lib +pkg_irc_lib_commit = master + +PACKAGES += ircd +pkg_ircd_name = ircd +pkg_ircd_description = A pluggable IRC daemon application/library for Erlang. +pkg_ircd_homepage = https://github.com/tonyg/erlang-ircd +pkg_ircd_fetch = git +pkg_ircd_repo = https://github.com/tonyg/erlang-ircd +pkg_ircd_commit = master + +PACKAGES += iris +pkg_iris_name = iris +pkg_iris_description = Iris Erlang binding +pkg_iris_homepage = https://github.com/project-iris/iris-erl +pkg_iris_fetch = git +pkg_iris_repo = https://github.com/project-iris/iris-erl +pkg_iris_commit = master + +PACKAGES += iso8601 +pkg_iso8601_name = iso8601 +pkg_iso8601_description = Erlang ISO 8601 date formatter/parser +pkg_iso8601_homepage = https://github.com/seansawyer/erlang_iso8601 +pkg_iso8601_fetch = git +pkg_iso8601_repo = https://github.com/seansawyer/erlang_iso8601 +pkg_iso8601_commit = master + +PACKAGES += jamdb_sybase +pkg_jamdb_sybase_name = jamdb_sybase +pkg_jamdb_sybase_description = Erlang driver for SAP Sybase ASE +pkg_jamdb_sybase_homepage = https://github.com/erlangbureau/jamdb_sybase +pkg_jamdb_sybase_fetch = git +pkg_jamdb_sybase_repo = https://github.com/erlangbureau/jamdb_sybase +pkg_jamdb_sybase_commit = master + +PACKAGES += jerg +pkg_jerg_name = jerg +pkg_jerg_description = JSON Schema to Erlang Records Generator +pkg_jerg_homepage = https://github.com/ddossot/jerg +pkg_jerg_fetch = git +pkg_jerg_repo = https://github.com/ddossot/jerg +pkg_jerg_commit = master + +PACKAGES += jesse +pkg_jesse_name = jesse +pkg_jesse_description = jesse (JSon Schema Erlang) is an implementation of a json schema validator for Erlang. +pkg_jesse_homepage = https://github.com/for-GET/jesse +pkg_jesse_fetch = git +pkg_jesse_repo = https://github.com/for-GET/jesse +pkg_jesse_commit = master + +PACKAGES += jiffy +pkg_jiffy_name = jiffy +pkg_jiffy_description = JSON NIFs for Erlang. +pkg_jiffy_homepage = https://github.com/davisp/jiffy +pkg_jiffy_fetch = git +pkg_jiffy_repo = https://github.com/davisp/jiffy +pkg_jiffy_commit = master + +PACKAGES += jiffy_v +pkg_jiffy_v_name = jiffy_v +pkg_jiffy_v_description = JSON validation utility +pkg_jiffy_v_homepage = https://github.com/shizzard/jiffy-v +pkg_jiffy_v_fetch = git +pkg_jiffy_v_repo = https://github.com/shizzard/jiffy-v +pkg_jiffy_v_commit = master + +PACKAGES += jobs +pkg_jobs_name = jobs +pkg_jobs_description = a Job scheduler for load regulation +pkg_jobs_homepage = https://github.com/esl/jobs +pkg_jobs_fetch = git +pkg_jobs_repo = https://github.com/esl/jobs +pkg_jobs_commit = master + +PACKAGES += joxa +pkg_joxa_name = joxa +pkg_joxa_description = A Modern Lisp for the Erlang VM +pkg_joxa_homepage = https://github.com/joxa/joxa +pkg_joxa_fetch = git +pkg_joxa_repo = https://github.com/joxa/joxa +pkg_joxa_commit = master + +PACKAGES += json +pkg_json_name = json +pkg_json_description = a high level json library for erlang (17.0+) +pkg_json_homepage = https://github.com/talentdeficit/json +pkg_json_fetch = git +pkg_json_repo = https://github.com/talentdeficit/json +pkg_json_commit = master + +PACKAGES += json_rec +pkg_json_rec_name = json_rec +pkg_json_rec_description = JSON to erlang record +pkg_json_rec_homepage = https://github.com/justinkirby/json_rec +pkg_json_rec_fetch = git +pkg_json_rec_repo = https://github.com/justinkirby/json_rec +pkg_json_rec_commit = master + +PACKAGES += jsone +pkg_jsone_name = jsone +pkg_jsone_description = An Erlang library for encoding, decoding JSON data. +pkg_jsone_homepage = https://github.com/sile/jsone.git +pkg_jsone_fetch = git +pkg_jsone_repo = https://github.com/sile/jsone.git +pkg_jsone_commit = master + +PACKAGES += jsonerl +pkg_jsonerl_name = jsonerl +pkg_jsonerl_description = yet another but slightly different erlang <-> json encoder/decoder +pkg_jsonerl_homepage = https://github.com/lambder/jsonerl +pkg_jsonerl_fetch = git +pkg_jsonerl_repo = https://github.com/lambder/jsonerl +pkg_jsonerl_commit = master + +PACKAGES += jsonpath +pkg_jsonpath_name = jsonpath +pkg_jsonpath_description = Fast Erlang JSON data retrieval and updates via javascript-like notation +pkg_jsonpath_homepage = https://github.com/GeneStevens/jsonpath +pkg_jsonpath_fetch = git +pkg_jsonpath_repo = https://github.com/GeneStevens/jsonpath +pkg_jsonpath_commit = master + +PACKAGES += jsonx +pkg_jsonx_name = jsonx +pkg_jsonx_description = JSONX is an Erlang library for efficient decode and encode JSON, written in C. +pkg_jsonx_homepage = https://github.com/iskra/jsonx +pkg_jsonx_fetch = git +pkg_jsonx_repo = https://github.com/iskra/jsonx +pkg_jsonx_commit = master + +PACKAGES += jsx +pkg_jsx_name = jsx +pkg_jsx_description = An Erlang application for consuming, producing and manipulating JSON. +pkg_jsx_homepage = https://github.com/talentdeficit/jsx +pkg_jsx_fetch = git +pkg_jsx_repo = https://github.com/talentdeficit/jsx +pkg_jsx_commit = main + +PACKAGES += kafka +pkg_kafka_name = kafka +pkg_kafka_description = Kafka consumer and producer in Erlang +pkg_kafka_homepage = https://github.com/wooga/kafka-erlang +pkg_kafka_fetch = git +pkg_kafka_repo = https://github.com/wooga/kafka-erlang +pkg_kafka_commit = master + +PACKAGES += kafka_protocol +pkg_kafka_protocol_name = kafka_protocol +pkg_kafka_protocol_description = Kafka protocol Erlang library +pkg_kafka_protocol_homepage = https://github.com/klarna/kafka_protocol +pkg_kafka_protocol_fetch = git +pkg_kafka_protocol_repo = https://github.com/klarna/kafka_protocol.git +pkg_kafka_protocol_commit = master + +PACKAGES += kai +pkg_kai_name = kai +pkg_kai_description = DHT storage by Takeshi Inoue +pkg_kai_homepage = https://github.com/synrc/kai +pkg_kai_fetch = git +pkg_kai_repo = https://github.com/synrc/kai +pkg_kai_commit = master + +PACKAGES += katja +pkg_katja_name = katja +pkg_katja_description = A simple Riemann client written in Erlang. +pkg_katja_homepage = https://github.com/nifoc/katja +pkg_katja_fetch = git +pkg_katja_repo = https://github.com/nifoc/katja +pkg_katja_commit = master + +PACKAGES += kdht +pkg_kdht_name = kdht +pkg_kdht_description = kdht is an erlang DHT implementation +pkg_kdht_homepage = https://github.com/kevinlynx/kdht +pkg_kdht_fetch = git +pkg_kdht_repo = https://github.com/kevinlynx/kdht +pkg_kdht_commit = master + +PACKAGES += key2value +pkg_key2value_name = key2value +pkg_key2value_description = Erlang 2-way map +pkg_key2value_homepage = https://github.com/okeuday/key2value +pkg_key2value_fetch = git +pkg_key2value_repo = https://github.com/okeuday/key2value +pkg_key2value_commit = master + +PACKAGES += keys1value +pkg_keys1value_name = keys1value +pkg_keys1value_description = Erlang set associative map for key lists +pkg_keys1value_homepage = https://github.com/okeuday/keys1value +pkg_keys1value_fetch = git +pkg_keys1value_repo = https://github.com/okeuday/keys1value +pkg_keys1value_commit = master + +PACKAGES += kinetic +pkg_kinetic_name = kinetic +pkg_kinetic_description = Erlang Kinesis Client +pkg_kinetic_homepage = https://github.com/AdRoll/kinetic +pkg_kinetic_fetch = git +pkg_kinetic_repo = https://github.com/AdRoll/kinetic +pkg_kinetic_commit = master + +PACKAGES += kjell +pkg_kjell_name = kjell +pkg_kjell_description = Erlang Shell +pkg_kjell_homepage = https://github.com/karlll/kjell +pkg_kjell_fetch = git +pkg_kjell_repo = https://github.com/karlll/kjell +pkg_kjell_commit = master + +PACKAGES += kraken +pkg_kraken_name = kraken +pkg_kraken_description = Distributed Pubsub Server for Realtime Apps +pkg_kraken_homepage = https://github.com/Asana/kraken +pkg_kraken_fetch = git +pkg_kraken_repo = https://github.com/Asana/kraken +pkg_kraken_commit = master + +PACKAGES += kucumberl +pkg_kucumberl_name = kucumberl +pkg_kucumberl_description = A pure-erlang, open-source, implementation of Cucumber +pkg_kucumberl_homepage = https://github.com/openshine/kucumberl +pkg_kucumberl_fetch = git +pkg_kucumberl_repo = https://github.com/openshine/kucumberl +pkg_kucumberl_commit = master + +PACKAGES += kvc +pkg_kvc_name = kvc +pkg_kvc_description = KVC - Key Value Coding for Erlang data structures +pkg_kvc_homepage = https://github.com/etrepum/kvc +pkg_kvc_fetch = git +pkg_kvc_repo = https://github.com/etrepum/kvc +pkg_kvc_commit = master + +PACKAGES += kvlists +pkg_kvlists_name = kvlists +pkg_kvlists_description = Lists of key-value pairs (decoded JSON) in Erlang +pkg_kvlists_homepage = https://github.com/jcomellas/kvlists +pkg_kvlists_fetch = git +pkg_kvlists_repo = https://github.com/jcomellas/kvlists +pkg_kvlists_commit = master + +PACKAGES += kvs +pkg_kvs_name = kvs +pkg_kvs_description = Container and Iterator +pkg_kvs_homepage = https://github.com/synrc/kvs +pkg_kvs_fetch = git +pkg_kvs_repo = https://github.com/synrc/kvs +pkg_kvs_commit = master + +PACKAGES += lager +pkg_lager_name = lager +pkg_lager_description = A logging framework for Erlang/OTP. +pkg_lager_homepage = https://github.com/erlang-lager/lager +pkg_lager_fetch = git +pkg_lager_repo = https://github.com/erlang-lager/lager +pkg_lager_commit = master + +PACKAGES += lager_amqp_backend +pkg_lager_amqp_backend_name = lager_amqp_backend +pkg_lager_amqp_backend_description = AMQP RabbitMQ Lager backend +pkg_lager_amqp_backend_homepage = https://github.com/jbrisbin/lager_amqp_backend +pkg_lager_amqp_backend_fetch = git +pkg_lager_amqp_backend_repo = https://github.com/jbrisbin/lager_amqp_backend +pkg_lager_amqp_backend_commit = master + +PACKAGES += lager_syslog +pkg_lager_syslog_name = lager_syslog +pkg_lager_syslog_description = Syslog backend for lager +pkg_lager_syslog_homepage = https://github.com/erlang-lager/lager_syslog +pkg_lager_syslog_fetch = git +pkg_lager_syslog_repo = https://github.com/erlang-lager/lager_syslog +pkg_lager_syslog_commit = master + +PACKAGES += lambdapad +pkg_lambdapad_name = lambdapad +pkg_lambdapad_description = Static site generator using Erlang. Yes, Erlang. +pkg_lambdapad_homepage = https://github.com/gar1t/lambdapad +pkg_lambdapad_fetch = git +pkg_lambdapad_repo = https://github.com/gar1t/lambdapad +pkg_lambdapad_commit = master + +PACKAGES += lasp +pkg_lasp_name = lasp +pkg_lasp_description = A Language for Distributed, Eventually Consistent Computations +pkg_lasp_homepage = http://lasp-lang.org/ +pkg_lasp_fetch = git +pkg_lasp_repo = https://github.com/lasp-lang/lasp +pkg_lasp_commit = master + +PACKAGES += lasse +pkg_lasse_name = lasse +pkg_lasse_description = SSE handler for Cowboy +pkg_lasse_homepage = https://github.com/inaka/lasse +pkg_lasse_fetch = git +pkg_lasse_repo = https://github.com/inaka/lasse +pkg_lasse_commit = master + +PACKAGES += ldap +pkg_ldap_name = ldap +pkg_ldap_description = LDAP server written in Erlang +pkg_ldap_homepage = https://github.com/spawnproc/ldap +pkg_ldap_fetch = git +pkg_ldap_repo = https://github.com/spawnproc/ldap +pkg_ldap_commit = master + +PACKAGES += lethink +pkg_lethink_name = lethink +pkg_lethink_description = erlang driver for rethinkdb +pkg_lethink_homepage = https://github.com/taybin/lethink +pkg_lethink_fetch = git +pkg_lethink_repo = https://github.com/taybin/lethink +pkg_lethink_commit = master + +PACKAGES += lfe +pkg_lfe_name = lfe +pkg_lfe_description = Lisp Flavoured Erlang (LFE) +pkg_lfe_homepage = https://github.com/rvirding/lfe +pkg_lfe_fetch = git +pkg_lfe_repo = https://github.com/rvirding/lfe +pkg_lfe_commit = master + +PACKAGES += ling +pkg_ling_name = ling +pkg_ling_description = Erlang on Xen +pkg_ling_homepage = https://github.com/cloudozer/ling +pkg_ling_fetch = git +pkg_ling_repo = https://github.com/cloudozer/ling +pkg_ling_commit = master + +PACKAGES += live +pkg_live_name = live +pkg_live_description = Automated module and configuration reloader. +pkg_live_homepage = http://ninenines.eu +pkg_live_fetch = git +pkg_live_repo = https://github.com/ninenines/live +pkg_live_commit = master + +PACKAGES += lmq +pkg_lmq_name = lmq +pkg_lmq_description = Lightweight Message Queue +pkg_lmq_homepage = https://github.com/iij/lmq +pkg_lmq_fetch = git +pkg_lmq_repo = https://github.com/iij/lmq +pkg_lmq_commit = master + +PACKAGES += locker +pkg_locker_name = locker +pkg_locker_description = Atomic distributed 'check and set' for short-lived keys +pkg_locker_homepage = https://github.com/wooga/locker +pkg_locker_fetch = git +pkg_locker_repo = https://github.com/wooga/locker +pkg_locker_commit = master + +PACKAGES += locks +pkg_locks_name = locks +pkg_locks_description = A scalable, deadlock-resolving resource locker +pkg_locks_homepage = https://github.com/uwiger/locks +pkg_locks_fetch = git +pkg_locks_repo = https://github.com/uwiger/locks +pkg_locks_commit = master + +PACKAGES += log4erl +pkg_log4erl_name = log4erl +pkg_log4erl_description = A logger for erlang in the spirit of Log4J. +pkg_log4erl_homepage = https://github.com/ahmednawras/log4erl +pkg_log4erl_fetch = git +pkg_log4erl_repo = https://github.com/ahmednawras/log4erl +pkg_log4erl_commit = master + +PACKAGES += lol +pkg_lol_name = lol +pkg_lol_description = Lisp on erLang, and programming is fun again +pkg_lol_homepage = https://github.com/b0oh/lol +pkg_lol_fetch = git +pkg_lol_repo = https://github.com/b0oh/lol +pkg_lol_commit = master + +PACKAGES += lucid +pkg_lucid_name = lucid +pkg_lucid_description = HTTP/2 server written in Erlang +pkg_lucid_homepage = https://github.com/tatsuhiro-t/lucid +pkg_lucid_fetch = git +pkg_lucid_repo = https://github.com/tatsuhiro-t/lucid +pkg_lucid_commit = master + +PACKAGES += luerl +pkg_luerl_name = luerl +pkg_luerl_description = Lua in Erlang +pkg_luerl_homepage = https://github.com/rvirding/luerl +pkg_luerl_fetch = git +pkg_luerl_repo = https://github.com/rvirding/luerl +pkg_luerl_commit = develop + +PACKAGES += luwak +pkg_luwak_name = luwak +pkg_luwak_description = Large-object storage interface for Riak +pkg_luwak_homepage = https://github.com/basho/luwak +pkg_luwak_fetch = git +pkg_luwak_repo = https://github.com/basho/luwak +pkg_luwak_commit = master + +PACKAGES += lux +pkg_lux_name = lux +pkg_lux_description = Lux (LUcid eXpect scripting) simplifies test automation and provides an Expect-style execution of commands +pkg_lux_homepage = https://github.com/hawk/lux +pkg_lux_fetch = git +pkg_lux_repo = https://github.com/hawk/lux +pkg_lux_commit = master + +PACKAGES += machi +pkg_machi_name = machi +pkg_machi_description = Machi file store +pkg_machi_homepage = https://github.com/basho/machi +pkg_machi_fetch = git +pkg_machi_repo = https://github.com/basho/machi +pkg_machi_commit = master + +PACKAGES += mad +pkg_mad_name = mad +pkg_mad_description = Small and Fast Rebar Replacement +pkg_mad_homepage = https://github.com/synrc/mad +pkg_mad_fetch = git +pkg_mad_repo = https://github.com/synrc/mad +pkg_mad_commit = master + +PACKAGES += marina +pkg_marina_name = marina +pkg_marina_description = Non-blocking Erlang Cassandra CQL3 client +pkg_marina_homepage = https://github.com/lpgauth/marina +pkg_marina_fetch = git +pkg_marina_repo = https://github.com/lpgauth/marina +pkg_marina_commit = master + +PACKAGES += mavg +pkg_mavg_name = mavg +pkg_mavg_description = Erlang :: Exponential moving average library +pkg_mavg_homepage = https://github.com/EchoTeam/mavg +pkg_mavg_fetch = git +pkg_mavg_repo = https://github.com/EchoTeam/mavg +pkg_mavg_commit = master + +PACKAGES += mc_erl +pkg_mc_erl_name = mc_erl +pkg_mc_erl_description = mc-erl is a server for Minecraft 1.4.7 written in Erlang. +pkg_mc_erl_homepage = https://github.com/clonejo/mc-erl +pkg_mc_erl_fetch = git +pkg_mc_erl_repo = https://github.com/clonejo/mc-erl +pkg_mc_erl_commit = master + +PACKAGES += mcd +pkg_mcd_name = mcd +pkg_mcd_description = Fast memcached protocol client in pure Erlang +pkg_mcd_homepage = https://github.com/EchoTeam/mcd +pkg_mcd_fetch = git +pkg_mcd_repo = https://github.com/EchoTeam/mcd +pkg_mcd_commit = master + +PACKAGES += mcerlang +pkg_mcerlang_name = mcerlang +pkg_mcerlang_description = The McErlang model checker for Erlang +pkg_mcerlang_homepage = https://github.com/fredlund/McErlang +pkg_mcerlang_fetch = git +pkg_mcerlang_repo = https://github.com/fredlund/McErlang +pkg_mcerlang_commit = master + +PACKAGES += meck +pkg_meck_name = meck +pkg_meck_description = A mocking library for Erlang +pkg_meck_homepage = https://github.com/eproxus/meck +pkg_meck_fetch = git +pkg_meck_repo = https://github.com/eproxus/meck +pkg_meck_commit = master + +PACKAGES += mekao +pkg_mekao_name = mekao +pkg_mekao_description = SQL constructor +pkg_mekao_homepage = https://github.com/ddosia/mekao +pkg_mekao_fetch = git +pkg_mekao_repo = https://github.com/ddosia/mekao +pkg_mekao_commit = master + +PACKAGES += memo +pkg_memo_name = memo +pkg_memo_description = Erlang memoization server +pkg_memo_homepage = https://github.com/tuncer/memo +pkg_memo_fetch = git +pkg_memo_repo = https://github.com/tuncer/memo +pkg_memo_commit = master + +PACKAGES += merge_index +pkg_merge_index_name = merge_index +pkg_merge_index_description = MergeIndex is an Erlang library for storing ordered sets on disk. It is very similar to an SSTable (in Google's Bigtable) or an HFile (in Hadoop). +pkg_merge_index_homepage = https://github.com/basho/merge_index +pkg_merge_index_fetch = git +pkg_merge_index_repo = https://github.com/basho/merge_index +pkg_merge_index_commit = master + +PACKAGES += merl +pkg_merl_name = merl +pkg_merl_description = Metaprogramming in Erlang +pkg_merl_homepage = https://github.com/richcarl/merl +pkg_merl_fetch = git +pkg_merl_repo = https://github.com/richcarl/merl +pkg_merl_commit = master + +PACKAGES += mimerl +pkg_mimerl_name = mimerl +pkg_mimerl_description = library to handle mimetypes +pkg_mimerl_homepage = https://github.com/benoitc/mimerl +pkg_mimerl_fetch = git +pkg_mimerl_repo = https://github.com/benoitc/mimerl +pkg_mimerl_commit = master + +PACKAGES += mimetypes +pkg_mimetypes_name = mimetypes +pkg_mimetypes_description = Erlang MIME types library +pkg_mimetypes_homepage = https://github.com/spawngrid/mimetypes +pkg_mimetypes_fetch = git +pkg_mimetypes_repo = https://github.com/spawngrid/mimetypes +pkg_mimetypes_commit = master + +PACKAGES += mixer +pkg_mixer_name = mixer +pkg_mixer_description = Mix in functions from other modules +pkg_mixer_homepage = https://github.com/chef/mixer +pkg_mixer_fetch = git +pkg_mixer_repo = https://github.com/chef/mixer +pkg_mixer_commit = master + +PACKAGES += mochiweb +pkg_mochiweb_name = mochiweb +pkg_mochiweb_description = MochiWeb is an Erlang library for building lightweight HTTP servers. +pkg_mochiweb_homepage = https://github.com/mochi/mochiweb +pkg_mochiweb_fetch = git +pkg_mochiweb_repo = https://github.com/mochi/mochiweb +pkg_mochiweb_commit = master + +PACKAGES += mochiweb_xpath +pkg_mochiweb_xpath_name = mochiweb_xpath +pkg_mochiweb_xpath_description = XPath support for mochiweb's html parser +pkg_mochiweb_xpath_homepage = https://github.com/retnuh/mochiweb_xpath +pkg_mochiweb_xpath_fetch = git +pkg_mochiweb_xpath_repo = https://github.com/retnuh/mochiweb_xpath +pkg_mochiweb_xpath_commit = master + +PACKAGES += mockgyver +pkg_mockgyver_name = mockgyver +pkg_mockgyver_description = A mocking library for Erlang +pkg_mockgyver_homepage = https://github.com/klajo/mockgyver +pkg_mockgyver_fetch = git +pkg_mockgyver_repo = https://github.com/klajo/mockgyver +pkg_mockgyver_commit = master + +PACKAGES += modlib +pkg_modlib_name = modlib +pkg_modlib_description = Web framework based on Erlang's inets httpd +pkg_modlib_homepage = https://github.com/gar1t/modlib +pkg_modlib_fetch = git +pkg_modlib_repo = https://github.com/gar1t/modlib +pkg_modlib_commit = master + +PACKAGES += mongodb +pkg_mongodb_name = mongodb +pkg_mongodb_description = MongoDB driver for Erlang +pkg_mongodb_homepage = https://github.com/comtihon/mongodb-erlang +pkg_mongodb_fetch = git +pkg_mongodb_repo = https://github.com/comtihon/mongodb-erlang +pkg_mongodb_commit = master + +PACKAGES += mongooseim +pkg_mongooseim_name = mongooseim +pkg_mongooseim_description = Jabber / XMPP server with focus on performance and scalability, by Erlang Solutions +pkg_mongooseim_homepage = https://www.erlang-solutions.com/products/mongooseim-massively-scalable-ejabberd-platform +pkg_mongooseim_fetch = git +pkg_mongooseim_repo = https://github.com/esl/MongooseIM +pkg_mongooseim_commit = master + +PACKAGES += moyo +pkg_moyo_name = moyo +pkg_moyo_description = Erlang utility functions library +pkg_moyo_homepage = https://github.com/dwango/moyo +pkg_moyo_fetch = git +pkg_moyo_repo = https://github.com/dwango/moyo +pkg_moyo_commit = master + +PACKAGES += msgpack +pkg_msgpack_name = msgpack +pkg_msgpack_description = MessagePack (de)serializer implementation for Erlang +pkg_msgpack_homepage = https://github.com/msgpack/msgpack-erlang +pkg_msgpack_fetch = git +pkg_msgpack_repo = https://github.com/msgpack/msgpack-erlang +pkg_msgpack_commit = master + +PACKAGES += mu2 +pkg_mu2_name = mu2 +pkg_mu2_description = Erlang mutation testing tool +pkg_mu2_homepage = https://github.com/ramsay-t/mu2 +pkg_mu2_fetch = git +pkg_mu2_repo = https://github.com/ramsay-t/mu2 +pkg_mu2_commit = master + +PACKAGES += mustache +pkg_mustache_name = mustache +pkg_mustache_description = Mustache template engine for Erlang. +pkg_mustache_homepage = https://github.com/mojombo/mustache.erl +pkg_mustache_fetch = git +pkg_mustache_repo = https://github.com/mojombo/mustache.erl +pkg_mustache_commit = master + +PACKAGES += myproto +pkg_myproto_name = myproto +pkg_myproto_description = MySQL Server Protocol in Erlang +pkg_myproto_homepage = https://github.com/altenwald/myproto +pkg_myproto_fetch = git +pkg_myproto_repo = https://github.com/altenwald/myproto +pkg_myproto_commit = master + +PACKAGES += mysql +pkg_mysql_name = mysql +pkg_mysql_description = MySQL client library for Erlang/OTP +pkg_mysql_homepage = https://github.com/mysql-otp/mysql-otp +pkg_mysql_fetch = git +pkg_mysql_repo = https://github.com/mysql-otp/mysql-otp +pkg_mysql_commit = 1.5.1 + +PACKAGES += n2o +pkg_n2o_name = n2o +pkg_n2o_description = WebSocket Application Server +pkg_n2o_homepage = https://github.com/5HT/n2o +pkg_n2o_fetch = git +pkg_n2o_repo = https://github.com/5HT/n2o +pkg_n2o_commit = master + +PACKAGES += nat_upnp +pkg_nat_upnp_name = nat_upnp +pkg_nat_upnp_description = Erlang library to map your internal port to an external using UNP IGD +pkg_nat_upnp_homepage = https://github.com/benoitc/nat_upnp +pkg_nat_upnp_fetch = git +pkg_nat_upnp_repo = https://github.com/benoitc/nat_upnp +pkg_nat_upnp_commit = master + +PACKAGES += neo4j +pkg_neo4j_name = neo4j +pkg_neo4j_description = Erlang client library for Neo4J. +pkg_neo4j_homepage = https://github.com/dmitriid/neo4j-erlang +pkg_neo4j_fetch = git +pkg_neo4j_repo = https://github.com/dmitriid/neo4j-erlang +pkg_neo4j_commit = master + +PACKAGES += neotoma +pkg_neotoma_name = neotoma +pkg_neotoma_description = Erlang library and packrat parser-generator for parsing expression grammars. +pkg_neotoma_homepage = https://github.com/seancribbs/neotoma +pkg_neotoma_fetch = git +pkg_neotoma_repo = https://github.com/seancribbs/neotoma +pkg_neotoma_commit = master + +PACKAGES += newrelic +pkg_newrelic_name = newrelic +pkg_newrelic_description = Erlang library for sending metrics to New Relic +pkg_newrelic_homepage = https://github.com/wooga/newrelic-erlang +pkg_newrelic_fetch = git +pkg_newrelic_repo = https://github.com/wooga/newrelic-erlang +pkg_newrelic_commit = master + +PACKAGES += nifty +pkg_nifty_name = nifty +pkg_nifty_description = Erlang NIF wrapper generator +pkg_nifty_homepage = https://github.com/parapluu/nifty +pkg_nifty_fetch = git +pkg_nifty_repo = https://github.com/parapluu/nifty +pkg_nifty_commit = master + +PACKAGES += nitrogen_core +pkg_nitrogen_core_name = nitrogen_core +pkg_nitrogen_core_description = The core Nitrogen library. +pkg_nitrogen_core_homepage = http://nitrogenproject.com/ +pkg_nitrogen_core_fetch = git +pkg_nitrogen_core_repo = https://github.com/nitrogen/nitrogen_core +pkg_nitrogen_core_commit = master + +PACKAGES += nkbase +pkg_nkbase_name = nkbase +pkg_nkbase_description = NkBASE distributed database +pkg_nkbase_homepage = https://github.com/Nekso/nkbase +pkg_nkbase_fetch = git +pkg_nkbase_repo = https://github.com/Nekso/nkbase +pkg_nkbase_commit = develop + +PACKAGES += nkdocker +pkg_nkdocker_name = nkdocker +pkg_nkdocker_description = Erlang Docker client +pkg_nkdocker_homepage = https://github.com/Nekso/nkdocker +pkg_nkdocker_fetch = git +pkg_nkdocker_repo = https://github.com/Nekso/nkdocker +pkg_nkdocker_commit = master + +PACKAGES += nkpacket +pkg_nkpacket_name = nkpacket +pkg_nkpacket_description = Generic Erlang transport layer +pkg_nkpacket_homepage = https://github.com/Nekso/nkpacket +pkg_nkpacket_fetch = git +pkg_nkpacket_repo = https://github.com/Nekso/nkpacket +pkg_nkpacket_commit = master + +PACKAGES += nksip +pkg_nksip_name = nksip +pkg_nksip_description = Erlang SIP application server +pkg_nksip_homepage = https://github.com/kalta/nksip +pkg_nksip_fetch = git +pkg_nksip_repo = https://github.com/kalta/nksip +pkg_nksip_commit = master + +PACKAGES += nodefinder +pkg_nodefinder_name = nodefinder +pkg_nodefinder_description = automatic node discovery via UDP multicast +pkg_nodefinder_homepage = https://github.com/erlanger/nodefinder +pkg_nodefinder_fetch = git +pkg_nodefinder_repo = https://github.com/okeuday/nodefinder +pkg_nodefinder_commit = master + +PACKAGES += nprocreg +pkg_nprocreg_name = nprocreg +pkg_nprocreg_description = Minimal Distributed Erlang Process Registry +pkg_nprocreg_homepage = http://nitrogenproject.com/ +pkg_nprocreg_fetch = git +pkg_nprocreg_repo = https://github.com/nitrogen/nprocreg +pkg_nprocreg_commit = master + +PACKAGES += oauth +pkg_oauth_name = oauth +pkg_oauth_description = An Erlang OAuth 1.0 implementation +pkg_oauth_homepage = https://github.com/tim/erlang-oauth +pkg_oauth_fetch = git +pkg_oauth_repo = https://github.com/tim/erlang-oauth +pkg_oauth_commit = master + +PACKAGES += oauth2 +pkg_oauth2_name = oauth2 +pkg_oauth2_description = Erlang Oauth2 implementation +pkg_oauth2_homepage = https://github.com/kivra/oauth2 +pkg_oauth2_fetch = git +pkg_oauth2_repo = https://github.com/kivra/oauth2 +pkg_oauth2_commit = master + +PACKAGES += observer_cli +pkg_observer_cli_name = observer_cli +pkg_observer_cli_description = Visualize Erlang/Elixir Nodes On The Command Line +pkg_observer_cli_homepage = http://zhongwencool.github.io/observer_cli +pkg_observer_cli_fetch = git +pkg_observer_cli_repo = https://github.com/zhongwencool/observer_cli +pkg_observer_cli_commit = master + +PACKAGES += octopus +pkg_octopus_name = octopus +pkg_octopus_description = Small and flexible pool manager written in Erlang +pkg_octopus_homepage = https://github.com/erlangbureau/octopus +pkg_octopus_fetch = git +pkg_octopus_repo = https://github.com/erlangbureau/octopus +pkg_octopus_commit = master + +PACKAGES += of_protocol +pkg_of_protocol_name = of_protocol +pkg_of_protocol_description = OpenFlow Protocol Library for Erlang +pkg_of_protocol_homepage = https://github.com/FlowForwarding/of_protocol +pkg_of_protocol_fetch = git +pkg_of_protocol_repo = https://github.com/FlowForwarding/of_protocol +pkg_of_protocol_commit = master + +PACKAGES += opencouch +pkg_opencouch_name = couch +pkg_opencouch_description = A embeddable document oriented database compatible with Apache CouchDB +pkg_opencouch_homepage = https://github.com/benoitc/opencouch +pkg_opencouch_fetch = git +pkg_opencouch_repo = https://github.com/benoitc/opencouch +pkg_opencouch_commit = master + +PACKAGES += openflow +pkg_openflow_name = openflow +pkg_openflow_description = An OpenFlow controller written in pure erlang +pkg_openflow_homepage = https://github.com/renatoaguiar/erlang-openflow +pkg_openflow_fetch = git +pkg_openflow_repo = https://github.com/renatoaguiar/erlang-openflow +pkg_openflow_commit = master + +PACKAGES += openid +pkg_openid_name = openid +pkg_openid_description = Erlang OpenID +pkg_openid_homepage = https://github.com/brendonh/erl_openid +pkg_openid_fetch = git +pkg_openid_repo = https://github.com/brendonh/erl_openid +pkg_openid_commit = master + +PACKAGES += openpoker +pkg_openpoker_name = openpoker +pkg_openpoker_description = Genesis Texas hold'em Game Server +pkg_openpoker_homepage = https://github.com/hpyhacking/openpoker +pkg_openpoker_fetch = git +pkg_openpoker_repo = https://github.com/hpyhacking/openpoker +pkg_openpoker_commit = master + +PACKAGES += otpbp +pkg_otpbp_name = otpbp +pkg_otpbp_description = Parse transformer for use new OTP functions in old Erlang/OTP releases (R15, R16, 17, 18, 19) +pkg_otpbp_homepage = https://github.com/Ledest/otpbp +pkg_otpbp_fetch = git +pkg_otpbp_repo = https://github.com/Ledest/otpbp +pkg_otpbp_commit = master + +PACKAGES += pal +pkg_pal_name = pal +pkg_pal_description = Pragmatic Authentication Library +pkg_pal_homepage = https://github.com/manifest/pal +pkg_pal_fetch = git +pkg_pal_repo = https://github.com/manifest/pal +pkg_pal_commit = master + +PACKAGES += parse_trans +pkg_parse_trans_name = parse_trans +pkg_parse_trans_description = Parse transform utilities for Erlang +pkg_parse_trans_homepage = https://github.com/uwiger/parse_trans +pkg_parse_trans_fetch = git +pkg_parse_trans_repo = https://github.com/uwiger/parse_trans +pkg_parse_trans_commit = master + +PACKAGES += parsexml +pkg_parsexml_name = parsexml +pkg_parsexml_description = Simple DOM XML parser with convenient and very simple API +pkg_parsexml_homepage = https://github.com/maxlapshin/parsexml +pkg_parsexml_fetch = git +pkg_parsexml_repo = https://github.com/maxlapshin/parsexml +pkg_parsexml_commit = master + +PACKAGES += partisan +pkg_partisan_name = partisan +pkg_partisan_description = High-performance, high-scalability distributed computing with Erlang and Elixir. +pkg_partisan_homepage = http://partisan.cloud +pkg_partisan_fetch = git +pkg_partisan_repo = https://github.com/lasp-lang/partisan +pkg_partisan_commit = master + +PACKAGES += pegjs +pkg_pegjs_name = pegjs +pkg_pegjs_description = An implementation of PEG.js grammar for Erlang. +pkg_pegjs_homepage = https://github.com/dmitriid/pegjs +pkg_pegjs_fetch = git +pkg_pegjs_repo = https://github.com/dmitriid/pegjs +pkg_pegjs_commit = master + +PACKAGES += percept2 +pkg_percept2_name = percept2 +pkg_percept2_description = Concurrent profiling tool for Erlang +pkg_percept2_homepage = https://github.com/huiqing/percept2 +pkg_percept2_fetch = git +pkg_percept2_repo = https://github.com/huiqing/percept2 +pkg_percept2_commit = master + +PACKAGES += pgo +pkg_pgo_name = pgo +pkg_pgo_description = Erlang Postgres client and connection pool +pkg_pgo_homepage = https://github.com/erleans/pgo.git +pkg_pgo_fetch = git +pkg_pgo_repo = https://github.com/erleans/pgo.git +pkg_pgo_commit = master + +PACKAGES += pgsql +pkg_pgsql_name = pgsql +pkg_pgsql_description = Erlang PostgreSQL driver +pkg_pgsql_homepage = https://github.com/semiocast/pgsql +pkg_pgsql_fetch = git +pkg_pgsql_repo = https://github.com/semiocast/pgsql +pkg_pgsql_commit = master + +PACKAGES += pkgx +pkg_pkgx_name = pkgx +pkg_pkgx_description = Build .deb packages from Erlang releases +pkg_pkgx_homepage = https://github.com/arjan/pkgx +pkg_pkgx_fetch = git +pkg_pkgx_repo = https://github.com/arjan/pkgx +pkg_pkgx_commit = master + +PACKAGES += pkt +pkg_pkt_name = pkt +pkg_pkt_description = Erlang network protocol library +pkg_pkt_homepage = https://github.com/msantos/pkt +pkg_pkt_fetch = git +pkg_pkt_repo = https://github.com/msantos/pkt +pkg_pkt_commit = master + +PACKAGES += plain_fsm +pkg_plain_fsm_name = plain_fsm +pkg_plain_fsm_description = A behaviour/support library for writing plain Erlang FSMs. +pkg_plain_fsm_homepage = https://github.com/uwiger/plain_fsm +pkg_plain_fsm_fetch = git +pkg_plain_fsm_repo = https://github.com/uwiger/plain_fsm +pkg_plain_fsm_commit = master + +PACKAGES += plumtree +pkg_plumtree_name = plumtree +pkg_plumtree_description = Epidemic Broadcast Trees +pkg_plumtree_homepage = https://github.com/helium/plumtree +pkg_plumtree_fetch = git +pkg_plumtree_repo = https://github.com/helium/plumtree +pkg_plumtree_commit = master + +PACKAGES += pmod_transform +pkg_pmod_transform_name = pmod_transform +pkg_pmod_transform_description = Parse transform for parameterized modules +pkg_pmod_transform_homepage = https://github.com/erlang/pmod_transform +pkg_pmod_transform_fetch = git +pkg_pmod_transform_repo = https://github.com/erlang/pmod_transform +pkg_pmod_transform_commit = master + +PACKAGES += pobox +pkg_pobox_name = pobox +pkg_pobox_description = External buffer processes to protect against mailbox overflow in Erlang +pkg_pobox_homepage = https://github.com/ferd/pobox +pkg_pobox_fetch = git +pkg_pobox_repo = https://github.com/ferd/pobox +pkg_pobox_commit = master + +PACKAGES += ponos +pkg_ponos_name = ponos +pkg_ponos_description = ponos is a simple yet powerful load generator written in erlang +pkg_ponos_homepage = https://github.com/klarna/ponos +pkg_ponos_fetch = git +pkg_ponos_repo = https://github.com/klarna/ponos +pkg_ponos_commit = master + +PACKAGES += poolboy +pkg_poolboy_name = poolboy +pkg_poolboy_description = A hunky Erlang worker pool factory +pkg_poolboy_homepage = https://github.com/devinus/poolboy +pkg_poolboy_fetch = git +pkg_poolboy_repo = https://github.com/devinus/poolboy +pkg_poolboy_commit = master + +PACKAGES += pooler +pkg_pooler_name = pooler +pkg_pooler_description = An OTP Process Pool Application +pkg_pooler_homepage = https://github.com/seth/pooler +pkg_pooler_fetch = git +pkg_pooler_repo = https://github.com/seth/pooler +pkg_pooler_commit = master + +PACKAGES += pqueue +pkg_pqueue_name = pqueue +pkg_pqueue_description = Erlang Priority Queues +pkg_pqueue_homepage = https://github.com/okeuday/pqueue +pkg_pqueue_fetch = git +pkg_pqueue_repo = https://github.com/okeuday/pqueue +pkg_pqueue_commit = master + +PACKAGES += procket +pkg_procket_name = procket +pkg_procket_description = Erlang interface to low level socket operations +pkg_procket_homepage = http://blog.listincomprehension.com/search/label/procket +pkg_procket_fetch = git +pkg_procket_repo = https://github.com/msantos/procket +pkg_procket_commit = master + +PACKAGES += prometheus +pkg_prometheus_name = prometheus +pkg_prometheus_description = Prometheus.io client in Erlang +pkg_prometheus_homepage = https://github.com/deadtrickster/prometheus.erl +pkg_prometheus_fetch = git +pkg_prometheus_repo = https://github.com/deadtrickster/prometheus.erl +pkg_prometheus_commit = master + +PACKAGES += prop +pkg_prop_name = prop +pkg_prop_description = An Erlang code scaffolding and generator system. +pkg_prop_homepage = https://github.com/nuex/prop +pkg_prop_fetch = git +pkg_prop_repo = https://github.com/nuex/prop +pkg_prop_commit = master + +PACKAGES += proper +pkg_proper_name = proper +pkg_proper_description = PropEr: a QuickCheck-inspired property-based testing tool for Erlang. +pkg_proper_homepage = http://proper.softlab.ntua.gr +pkg_proper_fetch = git +pkg_proper_repo = https://github.com/manopapad/proper +pkg_proper_commit = master + +PACKAGES += props +pkg_props_name = props +pkg_props_description = Property structure library +pkg_props_homepage = https://github.com/greyarea/props +pkg_props_fetch = git +pkg_props_repo = https://github.com/greyarea/props +pkg_props_commit = master + +PACKAGES += protobuffs +pkg_protobuffs_name = protobuffs +pkg_protobuffs_description = An implementation of Google's Protocol Buffers for Erlang, based on ngerakines/erlang_protobuffs. +pkg_protobuffs_homepage = https://github.com/basho/erlang_protobuffs +pkg_protobuffs_fetch = git +pkg_protobuffs_repo = https://github.com/basho/erlang_protobuffs +pkg_protobuffs_commit = master + +PACKAGES += psycho +pkg_psycho_name = psycho +pkg_psycho_description = HTTP server that provides a WSGI-like interface for applications and middleware. +pkg_psycho_homepage = https://github.com/gar1t/psycho +pkg_psycho_fetch = git +pkg_psycho_repo = https://github.com/gar1t/psycho +pkg_psycho_commit = master + +PACKAGES += purity +pkg_purity_name = purity +pkg_purity_description = A side-effect analyzer for Erlang +pkg_purity_homepage = https://github.com/mpitid/purity +pkg_purity_fetch = git +pkg_purity_repo = https://github.com/mpitid/purity +pkg_purity_commit = master + +PACKAGES += push_service +pkg_push_service_name = push_service +pkg_push_service_description = Push service +pkg_push_service_homepage = https://github.com/hairyhum/push_service +pkg_push_service_fetch = git +pkg_push_service_repo = https://github.com/hairyhum/push_service +pkg_push_service_commit = master + +PACKAGES += qdate +pkg_qdate_name = qdate +pkg_qdate_description = Date, time, and timezone parsing, formatting, and conversion for Erlang. +pkg_qdate_homepage = https://github.com/choptastic/qdate +pkg_qdate_fetch = git +pkg_qdate_repo = https://github.com/choptastic/qdate +pkg_qdate_commit = master + +PACKAGES += qrcode +pkg_qrcode_name = qrcode +pkg_qrcode_description = QR Code encoder in Erlang +pkg_qrcode_homepage = https://github.com/komone/qrcode +pkg_qrcode_fetch = git +pkg_qrcode_repo = https://github.com/komone/qrcode +pkg_qrcode_commit = master + +PACKAGES += quest +pkg_quest_name = quest +pkg_quest_description = Learn Erlang through this set of challenges. An interactive system for getting to know Erlang. +pkg_quest_homepage = https://github.com/eriksoe/ErlangQuest +pkg_quest_fetch = git +pkg_quest_repo = https://github.com/eriksoe/ErlangQuest +pkg_quest_commit = master + +PACKAGES += quickrand +pkg_quickrand_name = quickrand +pkg_quickrand_description = Quick Erlang Random Number Generation +pkg_quickrand_homepage = https://github.com/okeuday/quickrand +pkg_quickrand_fetch = git +pkg_quickrand_repo = https://github.com/okeuday/quickrand +pkg_quickrand_commit = master + +PACKAGES += rabbit +pkg_rabbit_name = rabbit +pkg_rabbit_description = RabbitMQ Server +pkg_rabbit_homepage = https://www.rabbitmq.com/ +pkg_rabbit_fetch = git +pkg_rabbit_repo = https://github.com/rabbitmq/rabbitmq-server.git +pkg_rabbit_commit = master + +PACKAGES += rabbit_exchange_type_riak +pkg_rabbit_exchange_type_riak_name = rabbit_exchange_type_riak +pkg_rabbit_exchange_type_riak_description = Custom RabbitMQ exchange type for sticking messages in Riak +pkg_rabbit_exchange_type_riak_homepage = https://github.com/jbrisbin/riak-exchange +pkg_rabbit_exchange_type_riak_fetch = git +pkg_rabbit_exchange_type_riak_repo = https://github.com/jbrisbin/riak-exchange +pkg_rabbit_exchange_type_riak_commit = master + +PACKAGES += rack +pkg_rack_name = rack +pkg_rack_description = Rack handler for erlang +pkg_rack_homepage = https://github.com/erlyvideo/rack +pkg_rack_fetch = git +pkg_rack_repo = https://github.com/erlyvideo/rack +pkg_rack_commit = master + +PACKAGES += radierl +pkg_radierl_name = radierl +pkg_radierl_description = RADIUS protocol stack implemented in Erlang. +pkg_radierl_homepage = https://github.com/vances/radierl +pkg_radierl_fetch = git +pkg_radierl_repo = https://github.com/vances/radierl +pkg_radierl_commit = master + +PACKAGES += rafter +pkg_rafter_name = rafter +pkg_rafter_description = An Erlang library application which implements the Raft consensus protocol +pkg_rafter_homepage = https://github.com/andrewjstone/rafter +pkg_rafter_fetch = git +pkg_rafter_repo = https://github.com/andrewjstone/rafter +pkg_rafter_commit = master + +PACKAGES += ranch +pkg_ranch_name = ranch +pkg_ranch_description = Socket acceptor pool for TCP protocols. +pkg_ranch_homepage = http://ninenines.eu +pkg_ranch_fetch = git +pkg_ranch_repo = https://github.com/ninenines/ranch +pkg_ranch_commit = 1.2.1 + +PACKAGES += rbeacon +pkg_rbeacon_name = rbeacon +pkg_rbeacon_description = LAN discovery and presence in Erlang. +pkg_rbeacon_homepage = https://github.com/refuge/rbeacon +pkg_rbeacon_fetch = git +pkg_rbeacon_repo = https://github.com/refuge/rbeacon +pkg_rbeacon_commit = master + +PACKAGES += rebar +pkg_rebar_name = rebar +pkg_rebar_description = Erlang build tool that makes it easy to compile and test Erlang applications, port drivers and releases. +pkg_rebar_homepage = http://www.rebar3.org +pkg_rebar_fetch = git +pkg_rebar_repo = https://github.com/rebar/rebar3 +pkg_rebar_commit = master + +PACKAGES += rebus +pkg_rebus_name = rebus +pkg_rebus_description = A stupid simple, internal, pub/sub event bus written in- and for Erlang. +pkg_rebus_homepage = https://github.com/olle/rebus +pkg_rebus_fetch = git +pkg_rebus_repo = https://github.com/olle/rebus +pkg_rebus_commit = master + +PACKAGES += rec2json +pkg_rec2json_name = rec2json +pkg_rec2json_description = Compile erlang record definitions into modules to convert them to/from json easily. +pkg_rec2json_homepage = https://github.com/lordnull/rec2json +pkg_rec2json_fetch = git +pkg_rec2json_repo = https://github.com/lordnull/rec2json +pkg_rec2json_commit = master + +PACKAGES += recon +pkg_recon_name = recon +pkg_recon_description = Collection of functions and scripts to debug Erlang in production. +pkg_recon_homepage = https://github.com/ferd/recon +pkg_recon_fetch = git +pkg_recon_repo = https://github.com/ferd/recon +pkg_recon_commit = master + +PACKAGES += record_info +pkg_record_info_name = record_info +pkg_record_info_description = Convert between record and proplist +pkg_record_info_homepage = https://github.com/bipthelin/erlang-record_info +pkg_record_info_fetch = git +pkg_record_info_repo = https://github.com/bipthelin/erlang-record_info +pkg_record_info_commit = master + +PACKAGES += redgrid +pkg_redgrid_name = redgrid +pkg_redgrid_description = automatic Erlang node discovery via redis +pkg_redgrid_homepage = https://github.com/jkvor/redgrid +pkg_redgrid_fetch = git +pkg_redgrid_repo = https://github.com/jkvor/redgrid +pkg_redgrid_commit = master + +PACKAGES += redo +pkg_redo_name = redo +pkg_redo_description = pipelined erlang redis client +pkg_redo_homepage = https://github.com/jkvor/redo +pkg_redo_fetch = git +pkg_redo_repo = https://github.com/jkvor/redo +pkg_redo_commit = master + +PACKAGES += reload_mk +pkg_reload_mk_name = reload_mk +pkg_reload_mk_description = Live reload plugin for erlang.mk. +pkg_reload_mk_homepage = https://github.com/bullno1/reload.mk +pkg_reload_mk_fetch = git +pkg_reload_mk_repo = https://github.com/bullno1/reload.mk +pkg_reload_mk_commit = master + +PACKAGES += reltool_util +pkg_reltool_util_name = reltool_util +pkg_reltool_util_description = Erlang reltool utility functionality application +pkg_reltool_util_homepage = https://github.com/okeuday/reltool_util +pkg_reltool_util_fetch = git +pkg_reltool_util_repo = https://github.com/okeuday/reltool_util +pkg_reltool_util_commit = master + +PACKAGES += relx +pkg_relx_name = relx +pkg_relx_description = Sane, simple release creation for Erlang +pkg_relx_homepage = https://github.com/erlware/relx +pkg_relx_fetch = git +pkg_relx_repo = https://github.com/erlware/relx +pkg_relx_commit = master + +PACKAGES += resource_discovery +pkg_resource_discovery_name = resource_discovery +pkg_resource_discovery_description = An application used to dynamically discover resources present in an Erlang node cluster. +pkg_resource_discovery_homepage = http://erlware.org/ +pkg_resource_discovery_fetch = git +pkg_resource_discovery_repo = https://github.com/erlware/resource_discovery +pkg_resource_discovery_commit = master + +PACKAGES += restc +pkg_restc_name = restc +pkg_restc_description = Erlang Rest Client +pkg_restc_homepage = https://github.com/kivra/restclient +pkg_restc_fetch = git +pkg_restc_repo = https://github.com/kivra/restclient +pkg_restc_commit = master + +PACKAGES += rfc4627_jsonrpc +pkg_rfc4627_jsonrpc_name = rfc4627_jsonrpc +pkg_rfc4627_jsonrpc_description = Erlang RFC4627 (JSON) codec and JSON-RPC server implementation. +pkg_rfc4627_jsonrpc_homepage = https://github.com/tonyg/erlang-rfc4627 +pkg_rfc4627_jsonrpc_fetch = git +pkg_rfc4627_jsonrpc_repo = https://github.com/tonyg/erlang-rfc4627 +pkg_rfc4627_jsonrpc_commit = master + +PACKAGES += riak_control +pkg_riak_control_name = riak_control +pkg_riak_control_description = Webmachine-based administration interface for Riak. +pkg_riak_control_homepage = https://github.com/basho/riak_control +pkg_riak_control_fetch = git +pkg_riak_control_repo = https://github.com/basho/riak_control +pkg_riak_control_commit = master + +PACKAGES += riak_core +pkg_riak_core_name = riak_core +pkg_riak_core_description = Distributed systems infrastructure used by Riak. +pkg_riak_core_homepage = https://github.com/basho/riak_core +pkg_riak_core_fetch = git +pkg_riak_core_repo = https://github.com/basho/riak_core +pkg_riak_core_commit = master + +PACKAGES += riak_dt +pkg_riak_dt_name = riak_dt +pkg_riak_dt_description = Convergent replicated datatypes in Erlang +pkg_riak_dt_homepage = https://github.com/basho/riak_dt +pkg_riak_dt_fetch = git +pkg_riak_dt_repo = https://github.com/basho/riak_dt +pkg_riak_dt_commit = master + +PACKAGES += riak_ensemble +pkg_riak_ensemble_name = riak_ensemble +pkg_riak_ensemble_description = Multi-Paxos framework in Erlang +pkg_riak_ensemble_homepage = https://github.com/basho/riak_ensemble +pkg_riak_ensemble_fetch = git +pkg_riak_ensemble_repo = https://github.com/basho/riak_ensemble +pkg_riak_ensemble_commit = master + +PACKAGES += riak_kv +pkg_riak_kv_name = riak_kv +pkg_riak_kv_description = Riak Key/Value Store +pkg_riak_kv_homepage = https://github.com/basho/riak_kv +pkg_riak_kv_fetch = git +pkg_riak_kv_repo = https://github.com/basho/riak_kv +pkg_riak_kv_commit = master + +PACKAGES += riak_pg +pkg_riak_pg_name = riak_pg +pkg_riak_pg_description = Distributed process groups with riak_core. +pkg_riak_pg_homepage = https://github.com/cmeiklejohn/riak_pg +pkg_riak_pg_fetch = git +pkg_riak_pg_repo = https://github.com/cmeiklejohn/riak_pg +pkg_riak_pg_commit = master + +PACKAGES += riak_pipe +pkg_riak_pipe_name = riak_pipe +pkg_riak_pipe_description = Riak Pipelines +pkg_riak_pipe_homepage = https://github.com/basho/riak_pipe +pkg_riak_pipe_fetch = git +pkg_riak_pipe_repo = https://github.com/basho/riak_pipe +pkg_riak_pipe_commit = master + +PACKAGES += riak_sysmon +pkg_riak_sysmon_name = riak_sysmon +pkg_riak_sysmon_description = Simple OTP app for managing Erlang VM system_monitor event messages +pkg_riak_sysmon_homepage = https://github.com/basho/riak_sysmon +pkg_riak_sysmon_fetch = git +pkg_riak_sysmon_repo = https://github.com/basho/riak_sysmon +pkg_riak_sysmon_commit = master + +PACKAGES += riak_test +pkg_riak_test_name = riak_test +pkg_riak_test_description = I'm in your cluster, testing your riaks +pkg_riak_test_homepage = https://github.com/basho/riak_test +pkg_riak_test_fetch = git +pkg_riak_test_repo = https://github.com/basho/riak_test +pkg_riak_test_commit = master + +PACKAGES += riakc +pkg_riakc_name = riakc +pkg_riakc_description = Erlang clients for Riak. +pkg_riakc_homepage = https://github.com/basho/riak-erlang-client +pkg_riakc_fetch = git +pkg_riakc_repo = https://github.com/basho/riak-erlang-client +pkg_riakc_commit = master + +PACKAGES += riakhttpc +pkg_riakhttpc_name = riakhttpc +pkg_riakhttpc_description = Riak Erlang client using the HTTP interface +pkg_riakhttpc_homepage = https://github.com/basho/riak-erlang-http-client +pkg_riakhttpc_fetch = git +pkg_riakhttpc_repo = https://github.com/basho/riak-erlang-http-client +pkg_riakhttpc_commit = master + +PACKAGES += riaknostic +pkg_riaknostic_name = riaknostic +pkg_riaknostic_description = A diagnostic tool for Riak installations, to find common errors asap +pkg_riaknostic_homepage = https://github.com/basho/riaknostic +pkg_riaknostic_fetch = git +pkg_riaknostic_repo = https://github.com/basho/riaknostic +pkg_riaknostic_commit = master + +PACKAGES += riakpool +pkg_riakpool_name = riakpool +pkg_riakpool_description = erlang riak client pool +pkg_riakpool_homepage = https://github.com/dweldon/riakpool +pkg_riakpool_fetch = git +pkg_riakpool_repo = https://github.com/dweldon/riakpool +pkg_riakpool_commit = master + +PACKAGES += rivus_cep +pkg_rivus_cep_name = rivus_cep +pkg_rivus_cep_description = Complex event processing in Erlang +pkg_rivus_cep_homepage = https://github.com/vascokk/rivus_cep +pkg_rivus_cep_fetch = git +pkg_rivus_cep_repo = https://github.com/vascokk/rivus_cep +pkg_rivus_cep_commit = master + +PACKAGES += rlimit +pkg_rlimit_name = rlimit +pkg_rlimit_description = Magnus Klaar's rate limiter code from etorrent +pkg_rlimit_homepage = https://github.com/jlouis/rlimit +pkg_rlimit_fetch = git +pkg_rlimit_repo = https://github.com/jlouis/rlimit +pkg_rlimit_commit = master + +PACKAGES += rust_mk +pkg_rust_mk_name = rust_mk +pkg_rust_mk_description = Build Rust crates in an Erlang application +pkg_rust_mk_homepage = https://github.com/goertzenator/rust.mk +pkg_rust_mk_fetch = git +pkg_rust_mk_repo = https://github.com/goertzenator/rust.mk +pkg_rust_mk_commit = master + +PACKAGES += safetyvalve +pkg_safetyvalve_name = safetyvalve +pkg_safetyvalve_description = A safety valve for your erlang node +pkg_safetyvalve_homepage = https://github.com/jlouis/safetyvalve +pkg_safetyvalve_fetch = git +pkg_safetyvalve_repo = https://github.com/jlouis/safetyvalve +pkg_safetyvalve_commit = master + +PACKAGES += seestar +pkg_seestar_name = seestar +pkg_seestar_description = The Erlang client for Cassandra 1.2+ binary protocol +pkg_seestar_homepage = https://github.com/iamaleksey/seestar +pkg_seestar_fetch = git +pkg_seestar_repo = https://github.com/iamaleksey/seestar +pkg_seestar_commit = master + +PACKAGES += service +pkg_service_name = service +pkg_service_description = A minimal Erlang behavior for creating CloudI internal services +pkg_service_homepage = http://cloudi.org/ +pkg_service_fetch = git +pkg_service_repo = https://github.com/CloudI/service +pkg_service_commit = master + +PACKAGES += setup +pkg_setup_name = setup +pkg_setup_description = Generic setup utility for Erlang-based systems +pkg_setup_homepage = https://github.com/uwiger/setup +pkg_setup_fetch = git +pkg_setup_repo = https://github.com/uwiger/setup +pkg_setup_commit = master + +PACKAGES += sext +pkg_sext_name = sext +pkg_sext_description = Sortable Erlang Term Serialization +pkg_sext_homepage = https://github.com/uwiger/sext +pkg_sext_fetch = git +pkg_sext_repo = https://github.com/uwiger/sext +pkg_sext_commit = master + +PACKAGES += sfmt +pkg_sfmt_name = sfmt +pkg_sfmt_description = SFMT pseudo random number generator for Erlang. +pkg_sfmt_homepage = https://github.com/jj1bdx/sfmt-erlang +pkg_sfmt_fetch = git +pkg_sfmt_repo = https://github.com/jj1bdx/sfmt-erlang +pkg_sfmt_commit = master + +PACKAGES += sgte +pkg_sgte_name = sgte +pkg_sgte_description = A simple Erlang Template Engine +pkg_sgte_homepage = https://github.com/filippo/sgte +pkg_sgte_fetch = git +pkg_sgte_repo = https://github.com/filippo/sgte +pkg_sgte_commit = master + +PACKAGES += sheriff +pkg_sheriff_name = sheriff +pkg_sheriff_description = Parse transform for type based validation. +pkg_sheriff_homepage = http://ninenines.eu +pkg_sheriff_fetch = git +pkg_sheriff_repo = https://github.com/extend/sheriff +pkg_sheriff_commit = master + +PACKAGES += shotgun +pkg_shotgun_name = shotgun +pkg_shotgun_description = better than just a gun +pkg_shotgun_homepage = https://github.com/inaka/shotgun +pkg_shotgun_fetch = git +pkg_shotgun_repo = https://github.com/inaka/shotgun +pkg_shotgun_commit = master + +PACKAGES += sidejob +pkg_sidejob_name = sidejob +pkg_sidejob_description = Parallel worker and capacity limiting library for Erlang +pkg_sidejob_homepage = https://github.com/basho/sidejob +pkg_sidejob_fetch = git +pkg_sidejob_repo = https://github.com/basho/sidejob +pkg_sidejob_commit = master + +PACKAGES += sieve +pkg_sieve_name = sieve +pkg_sieve_description = sieve is a simple TCP routing proxy (layer 7) in erlang +pkg_sieve_homepage = https://github.com/benoitc/sieve +pkg_sieve_fetch = git +pkg_sieve_repo = https://github.com/benoitc/sieve +pkg_sieve_commit = master + +PACKAGES += sighandler +pkg_sighandler_name = sighandler +pkg_sighandler_description = Handle UNIX signals in Er lang +pkg_sighandler_homepage = https://github.com/jkingsbery/sighandler +pkg_sighandler_fetch = git +pkg_sighandler_repo = https://github.com/jkingsbery/sighandler +pkg_sighandler_commit = master + +PACKAGES += simhash +pkg_simhash_name = simhash +pkg_simhash_description = Simhashing for Erlang -- hashing algorithm to find near-duplicates in binary data. +pkg_simhash_homepage = https://github.com/ferd/simhash +pkg_simhash_fetch = git +pkg_simhash_repo = https://github.com/ferd/simhash +pkg_simhash_commit = master + +PACKAGES += simple_bridge +pkg_simple_bridge_name = simple_bridge +pkg_simple_bridge_description = A simple, standardized interface library to Erlang HTTP Servers. +pkg_simple_bridge_homepage = https://github.com/nitrogen/simple_bridge +pkg_simple_bridge_fetch = git +pkg_simple_bridge_repo = https://github.com/nitrogen/simple_bridge +pkg_simple_bridge_commit = master + +PACKAGES += simple_oauth2 +pkg_simple_oauth2_name = simple_oauth2 +pkg_simple_oauth2_description = Simple erlang OAuth2 client module for any http server framework (Google, Facebook, Yandex, Vkontakte are preconfigured) +pkg_simple_oauth2_homepage = https://github.com/virtan/simple_oauth2 +pkg_simple_oauth2_fetch = git +pkg_simple_oauth2_repo = https://github.com/virtan/simple_oauth2 +pkg_simple_oauth2_commit = master + +PACKAGES += skel +pkg_skel_name = skel +pkg_skel_description = A Streaming Process-based Skeleton Library for Erlang +pkg_skel_homepage = https://github.com/ParaPhrase/skel +pkg_skel_fetch = git +pkg_skel_repo = https://github.com/ParaPhrase/skel +pkg_skel_commit = master + +PACKAGES += slack +pkg_slack_name = slack +pkg_slack_description = Minimal slack notification OTP library. +pkg_slack_homepage = https://github.com/DonBranson/slack +pkg_slack_fetch = git +pkg_slack_repo = https://github.com/DonBranson/slack.git +pkg_slack_commit = master + +PACKAGES += smother +pkg_smother_name = smother +pkg_smother_description = Extended code coverage metrics for Erlang. +pkg_smother_homepage = https://ramsay-t.github.io/Smother/ +pkg_smother_fetch = git +pkg_smother_repo = https://github.com/ramsay-t/Smother +pkg_smother_commit = master + +PACKAGES += snappyer +pkg_snappyer_name = snappyer +pkg_snappyer_description = Snappy as nif for Erlang +pkg_snappyer_homepage = https://github.com/zmstone/snappyer +pkg_snappyer_fetch = git +pkg_snappyer_repo = https://github.com/zmstone/snappyer.git +pkg_snappyer_commit = master + +PACKAGES += social +pkg_social_name = social +pkg_social_description = Cowboy handler for social login via OAuth2 providers +pkg_social_homepage = https://github.com/dvv/social +pkg_social_fetch = git +pkg_social_repo = https://github.com/dvv/social +pkg_social_commit = master + +PACKAGES += spapi_router +pkg_spapi_router_name = spapi_router +pkg_spapi_router_description = Partially-connected Erlang clustering +pkg_spapi_router_homepage = https://github.com/spilgames/spapi-router +pkg_spapi_router_fetch = git +pkg_spapi_router_repo = https://github.com/spilgames/spapi-router +pkg_spapi_router_commit = master + +PACKAGES += sqerl +pkg_sqerl_name = sqerl +pkg_sqerl_description = An Erlang-flavoured SQL DSL +pkg_sqerl_homepage = https://github.com/hairyhum/sqerl +pkg_sqerl_fetch = git +pkg_sqerl_repo = https://github.com/hairyhum/sqerl +pkg_sqerl_commit = master + +PACKAGES += srly +pkg_srly_name = srly +pkg_srly_description = Native Erlang Unix serial interface +pkg_srly_homepage = https://github.com/msantos/srly +pkg_srly_fetch = git +pkg_srly_repo = https://github.com/msantos/srly +pkg_srly_commit = master + +PACKAGES += sshrpc +pkg_sshrpc_name = sshrpc +pkg_sshrpc_description = Erlang SSH RPC module (experimental) +pkg_sshrpc_homepage = https://github.com/jj1bdx/sshrpc +pkg_sshrpc_fetch = git +pkg_sshrpc_repo = https://github.com/jj1bdx/sshrpc +pkg_sshrpc_commit = master + +PACKAGES += stable +pkg_stable_name = stable +pkg_stable_description = Library of assorted helpers for Cowboy web server. +pkg_stable_homepage = https://github.com/dvv/stable +pkg_stable_fetch = git +pkg_stable_repo = https://github.com/dvv/stable +pkg_stable_commit = master + +PACKAGES += statebox +pkg_statebox_name = statebox +pkg_statebox_description = Erlang state monad with merge/conflict-resolution capabilities. Useful for Riak. +pkg_statebox_homepage = https://github.com/mochi/statebox +pkg_statebox_fetch = git +pkg_statebox_repo = https://github.com/mochi/statebox +pkg_statebox_commit = master + +PACKAGES += statebox_riak +pkg_statebox_riak_name = statebox_riak +pkg_statebox_riak_description = Convenience library that makes it easier to use statebox with riak, extracted from best practices in our production code at Mochi Media. +pkg_statebox_riak_homepage = https://github.com/mochi/statebox_riak +pkg_statebox_riak_fetch = git +pkg_statebox_riak_repo = https://github.com/mochi/statebox_riak +pkg_statebox_riak_commit = master + +PACKAGES += statman +pkg_statman_name = statman +pkg_statman_description = Efficiently collect massive volumes of metrics inside the Erlang VM +pkg_statman_homepage = https://github.com/knutin/statman +pkg_statman_fetch = git +pkg_statman_repo = https://github.com/knutin/statman +pkg_statman_commit = master + +PACKAGES += statsderl +pkg_statsderl_name = statsderl +pkg_statsderl_description = StatsD client (erlang) +pkg_statsderl_homepage = https://github.com/lpgauth/statsderl +pkg_statsderl_fetch = git +pkg_statsderl_repo = https://github.com/lpgauth/statsderl +pkg_statsderl_commit = master + +PACKAGES += stdinout_pool +pkg_stdinout_pool_name = stdinout_pool +pkg_stdinout_pool_description = stdinout_pool : stuff goes in, stuff goes out. there's never any miscommunication. +pkg_stdinout_pool_homepage = https://github.com/mattsta/erlang-stdinout-pool +pkg_stdinout_pool_fetch = git +pkg_stdinout_pool_repo = https://github.com/mattsta/erlang-stdinout-pool +pkg_stdinout_pool_commit = master + +PACKAGES += stockdb +pkg_stockdb_name = stockdb +pkg_stockdb_description = Database for storing Stock Exchange quotes in erlang +pkg_stockdb_homepage = https://github.com/maxlapshin/stockdb +pkg_stockdb_fetch = git +pkg_stockdb_repo = https://github.com/maxlapshin/stockdb +pkg_stockdb_commit = master + +PACKAGES += stripe +pkg_stripe_name = stripe +pkg_stripe_description = Erlang interface to the stripe.com API +pkg_stripe_homepage = https://github.com/mattsta/stripe-erlang +pkg_stripe_fetch = git +pkg_stripe_repo = https://github.com/mattsta/stripe-erlang +pkg_stripe_commit = v1 + +PACKAGES += subproc +pkg_subproc_name = subproc +pkg_subproc_description = unix subprocess manager with {active,once|false} modes +pkg_subproc_homepage = http://dozzie.jarowit.net/trac/wiki/subproc +pkg_subproc_fetch = git +pkg_subproc_repo = https://github.com/dozzie/subproc +pkg_subproc_commit = v0.1.0 + +PACKAGES += supervisor3 +pkg_supervisor3_name = supervisor3 +pkg_supervisor3_description = OTP supervisor with additional strategies +pkg_supervisor3_homepage = https://github.com/klarna/supervisor3 +pkg_supervisor3_fetch = git +pkg_supervisor3_repo = https://github.com/klarna/supervisor3.git +pkg_supervisor3_commit = master + +PACKAGES += surrogate +pkg_surrogate_name = surrogate +pkg_surrogate_description = Proxy server written in erlang. Supports reverse proxy load balancing and forward proxy with http (including CONNECT), socks4, socks5, and transparent proxy modes. +pkg_surrogate_homepage = https://github.com/skruger/Surrogate +pkg_surrogate_fetch = git +pkg_surrogate_repo = https://github.com/skruger/Surrogate +pkg_surrogate_commit = master + +PACKAGES += swab +pkg_swab_name = swab +pkg_swab_description = General purpose buffer handling module +pkg_swab_homepage = https://github.com/crownedgrouse/swab +pkg_swab_fetch = git +pkg_swab_repo = https://github.com/crownedgrouse/swab +pkg_swab_commit = master + +PACKAGES += swarm +pkg_swarm_name = swarm +pkg_swarm_description = Fast and simple acceptor pool for Erlang +pkg_swarm_homepage = https://github.com/jeremey/swarm +pkg_swarm_fetch = git +pkg_swarm_repo = https://github.com/jeremey/swarm +pkg_swarm_commit = master + +PACKAGES += switchboard +pkg_switchboard_name = switchboard +pkg_switchboard_description = A framework for processing email using worker plugins. +pkg_switchboard_homepage = https://github.com/thusfresh/switchboard +pkg_switchboard_fetch = git +pkg_switchboard_repo = https://github.com/thusfresh/switchboard +pkg_switchboard_commit = master + +PACKAGES += syn +pkg_syn_name = syn +pkg_syn_description = A global Process Registry and Process Group manager for Erlang. +pkg_syn_homepage = https://github.com/ostinelli/syn +pkg_syn_fetch = git +pkg_syn_repo = https://github.com/ostinelli/syn +pkg_syn_commit = master + +PACKAGES += sync +pkg_sync_name = sync +pkg_sync_description = On-the-fly recompiling and reloading in Erlang. +pkg_sync_homepage = https://github.com/rustyio/sync +pkg_sync_fetch = git +pkg_sync_repo = https://github.com/rustyio/sync +pkg_sync_commit = master + +PACKAGES += syntaxerl +pkg_syntaxerl_name = syntaxerl +pkg_syntaxerl_description = Syntax checker for Erlang +pkg_syntaxerl_homepage = https://github.com/ten0s/syntaxerl +pkg_syntaxerl_fetch = git +pkg_syntaxerl_repo = https://github.com/ten0s/syntaxerl +pkg_syntaxerl_commit = master + +PACKAGES += syslog +pkg_syslog_name = syslog +pkg_syslog_description = Erlang port driver for interacting with syslog via syslog(3) +pkg_syslog_homepage = https://github.com/Vagabond/erlang-syslog +pkg_syslog_fetch = git +pkg_syslog_repo = https://github.com/Vagabond/erlang-syslog +pkg_syslog_commit = master + +PACKAGES += taskforce +pkg_taskforce_name = taskforce +pkg_taskforce_description = Erlang worker pools for controlled parallelisation of arbitrary tasks. +pkg_taskforce_homepage = https://github.com/g-andrade/taskforce +pkg_taskforce_fetch = git +pkg_taskforce_repo = https://github.com/g-andrade/taskforce +pkg_taskforce_commit = master + +PACKAGES += tddreloader +pkg_tddreloader_name = tddreloader +pkg_tddreloader_description = Shell utility for recompiling, reloading, and testing code as it changes +pkg_tddreloader_homepage = https://github.com/version2beta/tddreloader +pkg_tddreloader_fetch = git +pkg_tddreloader_repo = https://github.com/version2beta/tddreloader +pkg_tddreloader_commit = master + +PACKAGES += tempo +pkg_tempo_name = tempo +pkg_tempo_description = NIF-based date and time parsing and formatting for Erlang. +pkg_tempo_homepage = https://github.com/selectel/tempo +pkg_tempo_fetch = git +pkg_tempo_repo = https://github.com/selectel/tempo +pkg_tempo_commit = master + +PACKAGES += ticktick +pkg_ticktick_name = ticktick +pkg_ticktick_description = Ticktick is an id generator for message service. +pkg_ticktick_homepage = https://github.com/ericliang/ticktick +pkg_ticktick_fetch = git +pkg_ticktick_repo = https://github.com/ericliang/ticktick +pkg_ticktick_commit = master + +PACKAGES += tinymq +pkg_tinymq_name = tinymq +pkg_tinymq_description = TinyMQ - a diminutive, in-memory message queue +pkg_tinymq_homepage = https://github.com/ChicagoBoss/tinymq +pkg_tinymq_fetch = git +pkg_tinymq_repo = https://github.com/ChicagoBoss/tinymq +pkg_tinymq_commit = master + +PACKAGES += tinymt +pkg_tinymt_name = tinymt +pkg_tinymt_description = TinyMT pseudo random number generator for Erlang. +pkg_tinymt_homepage = https://github.com/jj1bdx/tinymt-erlang +pkg_tinymt_fetch = git +pkg_tinymt_repo = https://github.com/jj1bdx/tinymt-erlang +pkg_tinymt_commit = master + +PACKAGES += tirerl +pkg_tirerl_name = tirerl +pkg_tirerl_description = Erlang interface to Elastic Search +pkg_tirerl_homepage = https://github.com/inaka/tirerl +pkg_tirerl_fetch = git +pkg_tirerl_repo = https://github.com/inaka/tirerl +pkg_tirerl_commit = master + +PACKAGES += toml +pkg_toml_name = toml +pkg_toml_description = TOML (0.4.0) config parser +pkg_toml_homepage = http://dozzie.jarowit.net/trac/wiki/TOML +pkg_toml_fetch = git +pkg_toml_repo = https://github.com/dozzie/toml +pkg_toml_commit = v0.2.0 + +PACKAGES += traffic_tools +pkg_traffic_tools_name = traffic_tools +pkg_traffic_tools_description = Simple traffic limiting library +pkg_traffic_tools_homepage = https://github.com/systra/traffic_tools +pkg_traffic_tools_fetch = git +pkg_traffic_tools_repo = https://github.com/systra/traffic_tools +pkg_traffic_tools_commit = master + +PACKAGES += trails +pkg_trails_name = trails +pkg_trails_description = A couple of improvements over Cowboy Routes +pkg_trails_homepage = http://inaka.github.io/cowboy-trails/ +pkg_trails_fetch = git +pkg_trails_repo = https://github.com/inaka/cowboy-trails +pkg_trails_commit = master + +PACKAGES += trane +pkg_trane_name = trane +pkg_trane_description = SAX style broken HTML parser in Erlang +pkg_trane_homepage = https://github.com/massemanet/trane +pkg_trane_fetch = git +pkg_trane_repo = https://github.com/massemanet/trane +pkg_trane_commit = master + +PACKAGES += transit +pkg_transit_name = transit +pkg_transit_description = transit format for erlang +pkg_transit_homepage = https://github.com/isaiah/transit-erlang +pkg_transit_fetch = git +pkg_transit_repo = https://github.com/isaiah/transit-erlang +pkg_transit_commit = master + +PACKAGES += trie +pkg_trie_name = trie +pkg_trie_description = Erlang Trie Implementation +pkg_trie_homepage = https://github.com/okeuday/trie +pkg_trie_fetch = git +pkg_trie_repo = https://github.com/okeuday/trie +pkg_trie_commit = master + +PACKAGES += triq +pkg_triq_name = triq +pkg_triq_description = Trifork QuickCheck +pkg_triq_homepage = https://triq.gitlab.io +pkg_triq_fetch = git +pkg_triq_repo = https://gitlab.com/triq/triq.git +pkg_triq_commit = master + +PACKAGES += tunctl +pkg_tunctl_name = tunctl +pkg_tunctl_description = Erlang TUN/TAP interface +pkg_tunctl_homepage = https://github.com/msantos/tunctl +pkg_tunctl_fetch = git +pkg_tunctl_repo = https://github.com/msantos/tunctl +pkg_tunctl_commit = master + +PACKAGES += twerl +pkg_twerl_name = twerl +pkg_twerl_description = Erlang client for the Twitter Streaming API +pkg_twerl_homepage = https://github.com/lucaspiller/twerl +pkg_twerl_fetch = git +pkg_twerl_repo = https://github.com/lucaspiller/twerl +pkg_twerl_commit = oauth + +PACKAGES += twitter_erlang +pkg_twitter_erlang_name = twitter_erlang +pkg_twitter_erlang_description = An Erlang twitter client +pkg_twitter_erlang_homepage = https://github.com/ngerakines/erlang_twitter +pkg_twitter_erlang_fetch = git +pkg_twitter_erlang_repo = https://github.com/ngerakines/erlang_twitter +pkg_twitter_erlang_commit = master + +PACKAGES += ucol_nif +pkg_ucol_nif_name = ucol_nif +pkg_ucol_nif_description = ICU based collation Erlang module +pkg_ucol_nif_homepage = https://github.com/refuge/ucol_nif +pkg_ucol_nif_fetch = git +pkg_ucol_nif_repo = https://github.com/refuge/ucol_nif +pkg_ucol_nif_commit = master + +PACKAGES += unicorn +pkg_unicorn_name = unicorn +pkg_unicorn_description = Generic configuration server +pkg_unicorn_homepage = https://github.com/shizzard/unicorn +pkg_unicorn_fetch = git +pkg_unicorn_repo = https://github.com/shizzard/unicorn +pkg_unicorn_commit = master + +PACKAGES += unsplit +pkg_unsplit_name = unsplit +pkg_unsplit_description = Resolves conflicts in Mnesia after network splits +pkg_unsplit_homepage = https://github.com/uwiger/unsplit +pkg_unsplit_fetch = git +pkg_unsplit_repo = https://github.com/uwiger/unsplit +pkg_unsplit_commit = master + +PACKAGES += uuid +pkg_uuid_name = uuid +pkg_uuid_description = Erlang UUID Implementation +pkg_uuid_homepage = https://github.com/okeuday/uuid +pkg_uuid_fetch = git +pkg_uuid_repo = https://github.com/okeuday/uuid +pkg_uuid_commit = master + +PACKAGES += ux +pkg_ux_name = ux +pkg_ux_description = Unicode eXtention for Erlang (Strings, Collation) +pkg_ux_homepage = https://github.com/erlang-unicode/ux +pkg_ux_fetch = git +pkg_ux_repo = https://github.com/erlang-unicode/ux +pkg_ux_commit = master + +PACKAGES += vert +pkg_vert_name = vert +pkg_vert_description = Erlang binding to libvirt virtualization API +pkg_vert_homepage = https://github.com/msantos/erlang-libvirt +pkg_vert_fetch = git +pkg_vert_repo = https://github.com/msantos/erlang-libvirt +pkg_vert_commit = master + +PACKAGES += verx +pkg_verx_name = verx +pkg_verx_description = Erlang implementation of the libvirtd remote protocol +pkg_verx_homepage = https://github.com/msantos/verx +pkg_verx_fetch = git +pkg_verx_repo = https://github.com/msantos/verx +pkg_verx_commit = master + +PACKAGES += vmq_acl +pkg_vmq_acl_name = vmq_acl +pkg_vmq_acl_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_acl_homepage = https://verne.mq/ +pkg_vmq_acl_fetch = git +pkg_vmq_acl_repo = https://github.com/erlio/vmq_acl +pkg_vmq_acl_commit = master + +PACKAGES += vmq_bridge +pkg_vmq_bridge_name = vmq_bridge +pkg_vmq_bridge_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_bridge_homepage = https://verne.mq/ +pkg_vmq_bridge_fetch = git +pkg_vmq_bridge_repo = https://github.com/erlio/vmq_bridge +pkg_vmq_bridge_commit = master + +PACKAGES += vmq_graphite +pkg_vmq_graphite_name = vmq_graphite +pkg_vmq_graphite_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_graphite_homepage = https://verne.mq/ +pkg_vmq_graphite_fetch = git +pkg_vmq_graphite_repo = https://github.com/erlio/vmq_graphite +pkg_vmq_graphite_commit = master + +PACKAGES += vmq_passwd +pkg_vmq_passwd_name = vmq_passwd +pkg_vmq_passwd_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_passwd_homepage = https://verne.mq/ +pkg_vmq_passwd_fetch = git +pkg_vmq_passwd_repo = https://github.com/erlio/vmq_passwd +pkg_vmq_passwd_commit = master + +PACKAGES += vmq_server +pkg_vmq_server_name = vmq_server +pkg_vmq_server_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_server_homepage = https://verne.mq/ +pkg_vmq_server_fetch = git +pkg_vmq_server_repo = https://github.com/erlio/vmq_server +pkg_vmq_server_commit = master + +PACKAGES += vmq_snmp +pkg_vmq_snmp_name = vmq_snmp +pkg_vmq_snmp_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_snmp_homepage = https://verne.mq/ +pkg_vmq_snmp_fetch = git +pkg_vmq_snmp_repo = https://github.com/erlio/vmq_snmp +pkg_vmq_snmp_commit = master + +PACKAGES += vmq_systree +pkg_vmq_systree_name = vmq_systree +pkg_vmq_systree_description = Component of VerneMQ: A distributed MQTT message broker +pkg_vmq_systree_homepage = https://verne.mq/ +pkg_vmq_systree_fetch = git +pkg_vmq_systree_repo = https://github.com/erlio/vmq_systree +pkg_vmq_systree_commit = master + +PACKAGES += vmstats +pkg_vmstats_name = vmstats +pkg_vmstats_description = tiny Erlang app that works in conjunction with statsderl in order to generate information on the Erlang VM for graphite logs. +pkg_vmstats_homepage = https://github.com/ferd/vmstats +pkg_vmstats_fetch = git +pkg_vmstats_repo = https://github.com/ferd/vmstats +pkg_vmstats_commit = master + +PACKAGES += walrus +pkg_walrus_name = walrus +pkg_walrus_description = Walrus - Mustache-like Templating +pkg_walrus_homepage = https://github.com/devinus/walrus +pkg_walrus_fetch = git +pkg_walrus_repo = https://github.com/devinus/walrus +pkg_walrus_commit = master + +PACKAGES += webmachine +pkg_webmachine_name = webmachine +pkg_webmachine_description = A REST-based system for building web applications. +pkg_webmachine_homepage = https://github.com/basho/webmachine +pkg_webmachine_fetch = git +pkg_webmachine_repo = https://github.com/basho/webmachine +pkg_webmachine_commit = master + +PACKAGES += websocket_client +pkg_websocket_client_name = websocket_client +pkg_websocket_client_description = Erlang websocket client (ws and wss supported) +pkg_websocket_client_homepage = https://github.com/jeremyong/websocket_client +pkg_websocket_client_fetch = git +pkg_websocket_client_repo = https://github.com/jeremyong/websocket_client +pkg_websocket_client_commit = master + +PACKAGES += worker_pool +pkg_worker_pool_name = worker_pool +pkg_worker_pool_description = a simple erlang worker pool +pkg_worker_pool_homepage = https://github.com/inaka/worker_pool +pkg_worker_pool_fetch = git +pkg_worker_pool_repo = https://github.com/inaka/worker_pool +pkg_worker_pool_commit = master + +PACKAGES += wrangler +pkg_wrangler_name = wrangler +pkg_wrangler_description = Import of the Wrangler svn repository. +pkg_wrangler_homepage = http://www.cs.kent.ac.uk/projects/wrangler/Home.html +pkg_wrangler_fetch = git +pkg_wrangler_repo = https://github.com/RefactoringTools/wrangler +pkg_wrangler_commit = master + +PACKAGES += wsock +pkg_wsock_name = wsock +pkg_wsock_description = Erlang library to build WebSocket clients and servers +pkg_wsock_homepage = https://github.com/madtrick/wsock +pkg_wsock_fetch = git +pkg_wsock_repo = https://github.com/madtrick/wsock +pkg_wsock_commit = master + +PACKAGES += xhttpc +pkg_xhttpc_name = xhttpc +pkg_xhttpc_description = Extensible HTTP Client for Erlang +pkg_xhttpc_homepage = https://github.com/seriyps/xhttpc +pkg_xhttpc_fetch = git +pkg_xhttpc_repo = https://github.com/seriyps/xhttpc +pkg_xhttpc_commit = master + +PACKAGES += xref_runner +pkg_xref_runner_name = xref_runner +pkg_xref_runner_description = Erlang Xref Runner (inspired in rebar xref) +pkg_xref_runner_homepage = https://github.com/inaka/xref_runner +pkg_xref_runner_fetch = git +pkg_xref_runner_repo = https://github.com/inaka/xref_runner +pkg_xref_runner_commit = master + +PACKAGES += yamerl +pkg_yamerl_name = yamerl +pkg_yamerl_description = YAML 1.2 parser in pure Erlang +pkg_yamerl_homepage = https://github.com/yakaz/yamerl +pkg_yamerl_fetch = git +pkg_yamerl_repo = https://github.com/yakaz/yamerl +pkg_yamerl_commit = master + +PACKAGES += yamler +pkg_yamler_name = yamler +pkg_yamler_description = libyaml-based yaml loader for Erlang +pkg_yamler_homepage = https://github.com/goertzenator/yamler +pkg_yamler_fetch = git +pkg_yamler_repo = https://github.com/goertzenator/yamler +pkg_yamler_commit = master + +PACKAGES += yaws +pkg_yaws_name = yaws +pkg_yaws_description = Yaws webserver +pkg_yaws_homepage = http://yaws.hyber.org +pkg_yaws_fetch = git +pkg_yaws_repo = https://github.com/klacke/yaws +pkg_yaws_commit = master + +PACKAGES += zab_engine +pkg_zab_engine_name = zab_engine +pkg_zab_engine_description = zab propotocol implement by erlang +pkg_zab_engine_homepage = https://github.com/xinmingyao/zab_engine +pkg_zab_engine_fetch = git +pkg_zab_engine_repo = https://github.com/xinmingyao/zab_engine +pkg_zab_engine_commit = master + +PACKAGES += zabbix_sender +pkg_zabbix_sender_name = zabbix_sender +pkg_zabbix_sender_description = Zabbix trapper for sending data to Zabbix in pure Erlang +pkg_zabbix_sender_homepage = https://github.com/stalkermn/zabbix_sender +pkg_zabbix_sender_fetch = git +pkg_zabbix_sender_repo = https://github.com/stalkermn/zabbix_sender.git +pkg_zabbix_sender_commit = master + +PACKAGES += zeta +pkg_zeta_name = zeta +pkg_zeta_description = HTTP access log parser in Erlang +pkg_zeta_homepage = https://github.com/s1n4/zeta +pkg_zeta_fetch = git +pkg_zeta_repo = https://github.com/s1n4/zeta +pkg_zeta_commit = master + +PACKAGES += zippers +pkg_zippers_name = zippers +pkg_zippers_description = A library for functional zipper data structures in Erlang. Read more on zippers +pkg_zippers_homepage = https://github.com/ferd/zippers +pkg_zippers_fetch = git +pkg_zippers_repo = https://github.com/ferd/zippers +pkg_zippers_commit = master + +PACKAGES += zlists +pkg_zlists_name = zlists +pkg_zlists_description = Erlang lazy lists library. +pkg_zlists_homepage = https://github.com/vjache/erlang-zlists +pkg_zlists_fetch = git +pkg_zlists_repo = https://github.com/vjache/erlang-zlists +pkg_zlists_commit = master + +PACKAGES += zraft_lib +pkg_zraft_lib_name = zraft_lib +pkg_zraft_lib_description = Erlang raft consensus protocol implementation +pkg_zraft_lib_homepage = https://github.com/dreyk/zraft_lib +pkg_zraft_lib_fetch = git +pkg_zraft_lib_repo = https://github.com/dreyk/zraft_lib +pkg_zraft_lib_commit = master + +PACKAGES += zucchini +pkg_zucchini_name = zucchini +pkg_zucchini_description = An Erlang INI parser +pkg_zucchini_homepage = https://github.com/devinus/zucchini +pkg_zucchini_fetch = git +pkg_zucchini_repo = https://github.com/devinus/zucchini +pkg_zucchini_commit = master + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: search + +define pkg_print + $(verbose) printf "%s\n" \ + $(if $(call core_eq,$(1),$(pkg_$(1)_name)),,"Pkg name: $(1)") \ + "App name: $(pkg_$(1)_name)" \ + "Description: $(pkg_$(1)_description)" \ + "Home page: $(pkg_$(1)_homepage)" \ + "Fetch with: $(pkg_$(1)_fetch)" \ + "Repository: $(pkg_$(1)_repo)" \ + "Commit: $(pkg_$(1)_commit)" \ + "" + +endef + +search: +ifdef q + $(foreach p,$(PACKAGES), \ + $(if $(findstring $(call core_lc,$(q)),$(call core_lc,$(pkg_$(p)_name) $(pkg_$(p)_description))), \ + $(call pkg_print,$(p)))) +else + $(foreach p,$(PACKAGES),$(call pkg_print,$(p))) +endif + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-deps clean-tmp-deps.log + +# Configuration. + +ifdef OTP_DEPS +$(warning The variable OTP_DEPS is deprecated in favor of LOCAL_DEPS.) +endif + +IGNORE_DEPS ?= +export IGNORE_DEPS + +APPS_DIR ?= $(CURDIR)/apps +export APPS_DIR + +DEPS_DIR ?= $(CURDIR)/deps +export DEPS_DIR + +REBAR_DEPS_DIR = $(DEPS_DIR) +export REBAR_DEPS_DIR + +REBAR_GIT ?= https://github.com/rebar/rebar +REBAR_COMMIT ?= 576e12171ab8d69b048b827b92aa65d067deea01 + +HEX_CORE_GIT ?= https://github.com/hexpm/hex_core +HEX_CORE_COMMIT ?= v0.7.0 + +PACKAGES += hex_core +pkg_hex_core_name = hex_core +pkg_hex_core_description = Reference implementation of Hex specifications +pkg_hex_core_homepage = $(HEX_CORE_GIT) +pkg_hex_core_fetch = git +pkg_hex_core_repo = $(HEX_CORE_GIT) +pkg_hex_core_commit = $(HEX_CORE_COMMIT) + +# External "early" plugins (see core/plugins.mk for regular plugins). +# They both use the core_dep_plugin macro. + +define core_dep_plugin +ifeq ($(2),$(PROJECT)) +-include $$(patsubst $(PROJECT)/%,%,$(1)) +else +-include $(DEPS_DIR)/$(1) + +$(DEPS_DIR)/$(1): $(DEPS_DIR)/$(2) ; +endif +endef + +DEP_EARLY_PLUGINS ?= + +$(foreach p,$(DEP_EARLY_PLUGINS),\ + $(eval $(if $(findstring /,$p),\ + $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\ + $(call core_dep_plugin,$p/early-plugins.mk,$p)))) + +# Query functions. + +query_fetch_method = $(if $(dep_$(1)),$(call _qfm_dep,$(word 1,$(dep_$(1)))),$(call _qfm_pkg,$(1))) +_qfm_dep = $(if $(dep_fetch_$(1)),$(1),$(if $(IS_DEP),legacy,fail)) +_qfm_pkg = $(if $(pkg_$(1)_fetch),$(pkg_$(1)_fetch),fail) + +query_name = $(if $(dep_$(1)),$(1),$(if $(pkg_$(1)_name),$(pkg_$(1)_name),$(1))) + +query_repo = $(call _qr,$(1),$(call query_fetch_method,$(1))) +_qr = $(if $(query_repo_$(2)),$(call query_repo_$(2),$(1)),$(call dep_repo,$(1))) + +query_repo_default = $(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_repo)) +query_repo_git = $(patsubst git://github.com/%,https://github.com/%,$(call query_repo_default,$(1))) +query_repo_git-subfolder = $(call query_repo_git,$(1)) +query_repo_git-submodule = - +query_repo_hg = $(call query_repo_default,$(1)) +query_repo_svn = $(call query_repo_default,$(1)) +query_repo_cp = $(call query_repo_default,$(1)) +query_repo_ln = $(call query_repo_default,$(1)) +query_repo_hex = https://hex.pm/packages/$(if $(word 3,$(dep_$(1))),$(word 3,$(dep_$(1))),$(1)) +query_repo_fail = - +query_repo_legacy = - + +query_version = $(call _qv,$(1),$(call query_fetch_method,$(1))) +_qv = $(if $(query_version_$(2)),$(call query_version_$(2),$(1)),$(call dep_commit,$(1))) + +query_version_default = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 3,$(dep_$(1))),$(pkg_$(1)_commit))) +query_version_git = $(call query_version_default,$(1)) +query_version_git-subfolder = $(call query_version_git,$(1)) +query_version_git-submodule = - +query_version_hg = $(call query_version_default,$(1)) +query_version_svn = - +query_version_cp = - +query_version_ln = - +query_version_hex = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(word 2,$(dep_$(1))),$(pkg_$(1)_commit))) +query_version_fail = - +query_version_legacy = - + +query_extra = $(call _qe,$(1),$(call query_fetch_method,$(1))) +_qe = $(if $(query_extra_$(2)),$(call query_extra_$(2),$(1)),-) + +query_extra_git = - +query_extra_git-subfolder = $(if $(dep_$(1)),subfolder=$(word 4,$(dep_$(1))),-) +query_extra_git-submodule = - +query_extra_hg = - +query_extra_svn = - +query_extra_cp = - +query_extra_ln = - +query_extra_hex = $(if $(dep_$(1)),package-name=$(word 3,$(dep_$(1))),-) +query_extra_fail = - +query_extra_legacy = - + +query_absolute_path = $(addprefix $(DEPS_DIR)/,$(call query_name,$(1))) + +# Deprecated legacy query functions. +dep_fetch = $(call query_fetch_method,$(1)) +dep_name = $(call query_name,$(1)) +dep_repo = $(call query_repo_git,$(1)) +dep_commit = $(if $(dep_$(1)_commit),$(dep_$(1)_commit),$(if $(dep_$(1)),$(if $(filter hex,$(word 1,$(dep_$(1)))),$(word 2,$(dep_$(1))),$(word 3,$(dep_$(1)))),$(pkg_$(1)_commit))) + +LOCAL_DEPS_DIRS = $(foreach a,$(LOCAL_DEPS),$(if $(wildcard $(APPS_DIR)/$(a)),$(APPS_DIR)/$(a))) +ALL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(foreach dep,$(filter-out $(IGNORE_DEPS),$(BUILD_DEPS) $(DEPS)),$(call dep_name,$(dep)))) + +# When we are calling an app directly we don't want to include it here +# otherwise it'll be treated both as an apps and a top-level project. +ALL_APPS_DIRS = $(if $(wildcard $(APPS_DIR)/),$(filter-out $(APPS_DIR),$(shell find $(APPS_DIR) -maxdepth 1 -type d))) +ifdef ROOT_DIR +ifndef IS_APP +ALL_APPS_DIRS := $(filter-out $(APPS_DIR)/$(notdir $(CURDIR)),$(ALL_APPS_DIRS)) +endif +endif + +ifeq ($(filter $(APPS_DIR) $(DEPS_DIR),$(subst :, ,$(ERL_LIBS))),) +ifeq ($(ERL_LIBS),) + ERL_LIBS = $(APPS_DIR):$(DEPS_DIR) +else + ERL_LIBS := $(ERL_LIBS):$(APPS_DIR):$(DEPS_DIR) +endif +endif +export ERL_LIBS + +export NO_AUTOPATCH + +# Verbosity. + +dep_verbose_0 = @echo " DEP $1 ($(call dep_commit,$1))"; +dep_verbose_2 = set -x; +dep_verbose = $(dep_verbose_$(V)) + +# Optimization: don't recompile deps unless truly necessary. + +ifndef IS_DEP +ifneq ($(MAKELEVEL),0) +$(shell rm -f ebin/dep_built) +endif +endif + +# Core targets. + +ALL_APPS_DIRS_TO_BUILD = $(if $(LOCAL_DEPS_DIRS)$(IS_APP),$(LOCAL_DEPS_DIRS),$(ALL_APPS_DIRS)) + +apps:: $(ALL_APPS_DIRS) clean-tmp-deps.log | $(ERLANG_MK_TMP) +# Create ebin directory for all apps to make sure Erlang recognizes them +# as proper OTP applications when using -include_lib. This is a temporary +# fix, a proper fix would be to compile apps/* in the right order. +ifndef IS_APP +ifneq ($(ALL_APPS_DIRS),) + $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \ + mkdir -p $$dep/ebin; \ + done +endif +endif +# At the toplevel: if LOCAL_DEPS is defined with at least one local app, only +# compile that list of apps. Otherwise, compile everything. +# Within an app: compile all LOCAL_DEPS that are (uncompiled) local apps. +ifneq ($(ALL_APPS_DIRS_TO_BUILD),) + $(verbose) set -e; for dep in $(ALL_APPS_DIRS_TO_BUILD); do \ + if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/apps.log; then \ + :; \ + else \ + echo $$dep >> $(ERLANG_MK_TMP)/apps.log; \ + $(MAKE) -C $$dep $(if $(IS_TEST),test-build-app) IS_APP=1; \ + fi \ + done +endif + +clean-tmp-deps.log: +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) rm -f $(ERLANG_MK_TMP)/apps.log $(ERLANG_MK_TMP)/deps.log +endif + +# Erlang.mk does not rebuild dependencies after they were compiled +# once. If a developer is working on the top-level project and some +# dependencies at the same time, he may want to change this behavior. +# There are two solutions: +# 1. Set `FULL=1` so that all dependencies are visited and +# recursively recompiled if necessary. +# 2. Set `FORCE_REBUILD=` to the specific list of dependencies that +# should be recompiled (instead of the whole set). + +FORCE_REBUILD ?= + +ifeq ($(origin FULL),undefined) +ifneq ($(strip $(force_rebuild_dep)$(FORCE_REBUILD)),) +define force_rebuild_dep +echo "$(FORCE_REBUILD)" | grep -qw "$$(basename "$1")" +endef +endif +endif + +ifneq ($(SKIP_DEPS),) +deps:: +else +deps:: $(ALL_DEPS_DIRS) apps clean-tmp-deps.log | $(ERLANG_MK_TMP) +ifneq ($(ALL_DEPS_DIRS),) + $(verbose) set -e; for dep in $(ALL_DEPS_DIRS); do \ + if grep -qs ^$$dep$$ $(ERLANG_MK_TMP)/deps.log; then \ + :; \ + else \ + echo $$dep >> $(ERLANG_MK_TMP)/deps.log; \ + if [ -z "$(strip $(FULL))" ] $(if $(force_rebuild_dep),&& ! ($(call force_rebuild_dep,$$dep)),) && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \ + :; \ + elif [ -f $$dep/GNUmakefile ] || [ -f $$dep/makefile ] || [ -f $$dep/Makefile ]; then \ + $(MAKE) -C $$dep IS_DEP=1; \ + if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \ + else \ + echo "Error: No Makefile to build dependency $$dep." >&2; \ + exit 2; \ + fi \ + fi \ + done +endif +endif + +# Deps related targets. + +# @todo rename GNUmakefile and makefile into Makefile first, if they exist +# While Makefile file could be GNUmakefile or makefile, +# in practice only Makefile is needed so far. +define dep_autopatch + if [ -f $(DEPS_DIR)/$(1)/erlang.mk ]; then \ + rm -rf $(DEPS_DIR)/$1/ebin/; \ + $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \ + $(call dep_autopatch_erlang_mk,$(1)); \ + elif [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \ + if [ -f $(DEPS_DIR)/$1/rebar.lock ]; then \ + $(call dep_autopatch2,$1); \ + elif [ 0 != `grep -c "include ../\w*\.mk" $(DEPS_DIR)/$(1)/Makefile` ]; then \ + $(call dep_autopatch2,$(1)); \ + elif [ 0 != `grep -ci "^[^#].*rebar" $(DEPS_DIR)/$(1)/Makefile` ]; then \ + $(call dep_autopatch2,$(1)); \ + elif [ -n "`find $(DEPS_DIR)/$(1)/ -type f -name \*.mk -not -name erlang.mk -exec grep -i "^[^#].*rebar" '{}' \;`" ]; then \ + $(call dep_autopatch2,$(1)); \ + fi \ + else \ + if [ ! -d $(DEPS_DIR)/$(1)/src/ ]; then \ + $(call dep_autopatch_noop,$(1)); \ + else \ + $(call dep_autopatch2,$(1)); \ + fi \ + fi +endef + +define dep_autopatch2 + ! test -f $(DEPS_DIR)/$1/ebin/$1.app || \ + mv -n $(DEPS_DIR)/$1/ebin/$1.app $(DEPS_DIR)/$1/src/$1.app.src; \ + rm -f $(DEPS_DIR)/$1/ebin/$1.app; \ + if [ -f $(DEPS_DIR)/$1/src/$1.app.src.script ]; then \ + $(call erlang,$(call dep_autopatch_appsrc_script.erl,$(1))); \ + fi; \ + $(call erlang,$(call dep_autopatch_appsrc.erl,$(1))); \ + if [ -f $(DEPS_DIR)/$(1)/rebar -o -f $(DEPS_DIR)/$(1)/rebar.config -o -f $(DEPS_DIR)/$(1)/rebar.config.script -o -f $(DEPS_DIR)/$1/rebar.lock ]; then \ + $(call dep_autopatch_fetch_rebar); \ + $(call dep_autopatch_rebar,$(1)); \ + else \ + $(call dep_autopatch_gen,$(1)); \ + fi +endef + +define dep_autopatch_noop + printf "noop:\n" > $(DEPS_DIR)/$(1)/Makefile +endef + +# Replace "include erlang.mk" with a line that will load the parent Erlang.mk +# if given. Do it for all 3 possible Makefile file names. +ifeq ($(NO_AUTOPATCH_ERLANG_MK),) +define dep_autopatch_erlang_mk + for f in Makefile makefile GNUmakefile; do \ + if [ -f $(DEPS_DIR)/$1/$$f ]; then \ + sed -i.bak s/'include *erlang.mk'/'include $$(if $$(ERLANG_MK_FILENAME),$$(ERLANG_MK_FILENAME),erlang.mk)'/ $(DEPS_DIR)/$1/$$f; \ + fi \ + done +endef +else +define dep_autopatch_erlang_mk + : +endef +endif + +define dep_autopatch_gen + printf "%s\n" \ + "ERLC_OPTS = +debug_info" \ + "include ../../erlang.mk" > $(DEPS_DIR)/$(1)/Makefile +endef + +# We use flock/lockf when available to avoid concurrency issues. +define dep_autopatch_fetch_rebar + if command -v flock >/dev/null; then \ + flock $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \ + elif command -v lockf >/dev/null; then \ + lockf $(ERLANG_MK_TMP)/rebar.lock sh -c "$(call dep_autopatch_fetch_rebar2)"; \ + else \ + $(call dep_autopatch_fetch_rebar2); \ + fi +endef + +define dep_autopatch_fetch_rebar2 + if [ ! -d $(ERLANG_MK_TMP)/rebar ]; then \ + git clone -q -n -- $(REBAR_GIT) $(ERLANG_MK_TMP)/rebar; \ + cd $(ERLANG_MK_TMP)/rebar; \ + git checkout -q $(REBAR_COMMIT); \ + ./bootstrap; \ + cd -; \ + fi +endef + +define dep_autopatch_rebar + if [ -f $(DEPS_DIR)/$(1)/Makefile ]; then \ + mv $(DEPS_DIR)/$(1)/Makefile $(DEPS_DIR)/$(1)/Makefile.orig.mk; \ + fi; \ + $(call erlang,$(call dep_autopatch_rebar.erl,$(1))); \ + rm -f $(DEPS_DIR)/$(1)/ebin/$(1).app +endef + +define dep_autopatch_rebar.erl + application:load(rebar), + application:set_env(rebar, log_level, debug), + rmemo:start(), + Conf1 = case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config)") of + {ok, Conf0} -> Conf0; + _ -> [] + end, + {Conf, OsEnv} = fun() -> + case filelib:is_file("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)") of + false -> {Conf1, []}; + true -> + Bindings0 = erl_eval:new_bindings(), + Bindings1 = erl_eval:add_binding('CONFIG', Conf1, Bindings0), + Bindings = erl_eval:add_binding('SCRIPT', "$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings1), + Before = os:getenv(), + {ok, Conf2} = file:script("$(call core_native_path,$(DEPS_DIR)/$1/rebar.config.script)", Bindings), + {Conf2, lists:foldl(fun(E, Acc) -> lists:delete(E, Acc) end, os:getenv(), Before)} + end + end(), + Write = fun (Text) -> + file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/Makefile)", Text, [append]) + end, + Escape = fun (Text) -> + re:replace(Text, "\\\\$$", "\$$$$", [global, {return, list}]) + end, + Write("IGNORE_DEPS += edown eper eunit_formatters meck node_package " + "rebar_lock_deps_plugin rebar_vsn_plugin reltool_util\n"), + Write("C_SRC_DIR = /path/do/not/exist\n"), + Write("C_SRC_TYPE = rebar\n"), + Write("DRV_CFLAGS = -fPIC\nexport DRV_CFLAGS\n"), + Write(["ERLANG_ARCH = ", rebar_utils:wordsize(), "\nexport ERLANG_ARCH\n"]), + ToList = fun + (V) when is_atom(V) -> atom_to_list(V); + (V) when is_list(V) -> "'\\"" ++ V ++ "\\"'" + end, + fun() -> + Write("ERLC_OPTS = +debug_info\nexport ERLC_OPTS\n"), + case lists:keyfind(erl_opts, 1, Conf) of + false -> ok; + {_, ErlOpts} -> + lists:foreach(fun + ({d, D}) -> + Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n"); + ({d, DKey, DVal}) -> + Write("ERLC_OPTS += -D" ++ ToList(DKey) ++ "=" ++ ToList(DVal) ++ "\n"); + ({i, I}) -> + Write(["ERLC_OPTS += -I ", I, "\n"]); + ({platform_define, Regex, D}) -> + case rebar_utils:is_arch(Regex) of + true -> Write("ERLC_OPTS += -D" ++ ToList(D) ++ "=1\n"); + false -> ok + end; + ({parse_transform, PT}) -> + Write("ERLC_OPTS += +'{parse_transform, " ++ ToList(PT) ++ "}'\n"); + (_) -> ok + end, ErlOpts) + end, + Write("\n") + end(), + GetHexVsn = fun(N, NP) -> + case file:consult("$(call core_native_path,$(DEPS_DIR)/$1/rebar.lock)") of + {ok, Lock} -> + io:format("~p~n", [Lock]), + case lists:keyfind("1.1.0", 1, Lock) of + {_, LockPkgs} -> + io:format("~p~n", [LockPkgs]), + case lists:keyfind(atom_to_binary(N, latin1), 1, LockPkgs) of + {_, {pkg, _, Vsn}, _} -> + io:format("~p~n", [Vsn]), + {N, {hex, NP, binary_to_list(Vsn)}}; + _ -> + false + end; + _ -> + false + end; + _ -> + false + end + end, + SemVsn = fun + ("~>" ++ S0) -> + S = case S0 of + " " ++ S1 -> S1; + _ -> S0 + end, + case length([ok || $$. <- S]) of + 0 -> S ++ ".0.0"; + 1 -> S ++ ".0"; + _ -> S + end; + (S) -> S + end, + fun() -> + File = case lists:keyfind(deps, 1, Conf) of + false -> []; + {_, Deps} -> + [begin case case Dep of + N when is_atom(N) -> GetHexVsn(N, N); + {N, S} when is_atom(N), is_list(S) -> {N, {hex, N, SemVsn(S)}}; + {N, {pkg, NP}} when is_atom(N) -> GetHexVsn(N, NP); + {N, S, {pkg, NP}} -> {N, {hex, NP, S}}; + {N, S} when is_tuple(S) -> {N, S}; + {N, _, S} -> {N, S}; + {N, _, S, _} -> {N, S}; + _ -> false + end of + false -> ok; + {Name, Source} -> + {Method, Repo, Commit} = case Source of + {hex, NPV, V} -> {hex, V, NPV}; + {git, R} -> {git, R, master}; + {M, R, {branch, C}} -> {M, R, C}; + {M, R, {ref, C}} -> {M, R, C}; + {M, R, {tag, C}} -> {M, R, C}; + {M, R, C} -> {M, R, C} + end, + Write(io_lib:format("DEPS += ~s\ndep_~s = ~s ~s ~s~n", [Name, Name, Method, Repo, Commit])) + end end || Dep <- Deps] + end + end(), + fun() -> + case lists:keyfind(erl_first_files, 1, Conf) of + false -> ok; + {_, Files} -> + Names = [[" ", case lists:reverse(F) of + "lre." ++ Elif -> lists:reverse(Elif); + "lrx." ++ Elif -> lists:reverse(Elif); + "lry." ++ Elif -> lists:reverse(Elif); + Elif -> lists:reverse(Elif) + end] || "src/" ++ F <- Files], + Write(io_lib:format("COMPILE_FIRST +=~s\n", [Names])) + end + end(), + Write("\n\nrebar_dep: preprocess pre-deps deps pre-app app\n"), + Write("\npreprocess::\n"), + Write("\npre-deps::\n"), + Write("\npre-app::\n"), + PatchHook = fun(Cmd) -> + Cmd2 = re:replace(Cmd, "^([g]?make)(.*)( -C.*)", "\\\\1\\\\3\\\\2", [{return, list}]), + case Cmd2 of + "make -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1); + "gmake -C" ++ Cmd1 -> "$$\(MAKE) -C" ++ Escape(Cmd1); + "make " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1); + "gmake " ++ Cmd1 -> "$$\(MAKE) -f Makefile.orig.mk " ++ Escape(Cmd1); + _ -> Escape(Cmd) + end + end, + fun() -> + case lists:keyfind(pre_hooks, 1, Conf) of + false -> ok; + {_, Hooks} -> + [case H of + {'get-deps', Cmd} -> + Write("\npre-deps::\n\t" ++ PatchHook(Cmd) ++ "\n"); + {compile, Cmd} -> + Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n"); + {Regex, compile, Cmd} -> + case rebar_utils:is_arch(Regex) of + true -> Write("\npre-app::\n\tCC=$$\(CC) " ++ PatchHook(Cmd) ++ "\n"); + false -> ok + end; + _ -> ok + end || H <- Hooks] + end + end(), + ShellToMk = fun(V0) -> + V1 = re:replace(V0, "[$$][(]", "$$\(shell ", [global]), + V = re:replace(V1, "([$$])(?![(])(\\\\w*)", "\\\\1(\\\\2)", [global]), + re:replace(V, "-Werror\\\\b", "", [{return, list}, global]) + end, + PortSpecs = fun() -> + case lists:keyfind(port_specs, 1, Conf) of + false -> + case filelib:is_dir("$(call core_native_path,$(DEPS_DIR)/$1/c_src)") of + false -> []; + true -> + [{"priv/" ++ proplists:get_value(so_name, Conf, "$(1)_drv.so"), + proplists:get_value(port_sources, Conf, ["c_src/*.c"]), []}] + end; + {_, Specs} -> + lists:flatten([case S of + {Output, Input} -> {ShellToMk(Output), Input, []}; + {Regex, Output, Input} -> + case rebar_utils:is_arch(Regex) of + true -> {ShellToMk(Output), Input, []}; + false -> [] + end; + {Regex, Output, Input, [{env, Env}]} -> + case rebar_utils:is_arch(Regex) of + true -> {ShellToMk(Output), Input, Env}; + false -> [] + end + end || S <- Specs]) + end + end(), + PortSpecWrite = fun (Text) -> + file:write_file("$(call core_native_path,$(DEPS_DIR)/$1/c_src/Makefile.erlang.mk)", Text, [append]) + end, + case PortSpecs of + [] -> ok; + _ -> + Write("\npre-app::\n\t@$$\(MAKE) --no-print-directory -f c_src/Makefile.erlang.mk\n"), + PortSpecWrite(io_lib:format("ERL_CFLAGS ?= -finline-functions -Wall -fPIC -I \\"~s/erts-~s/include\\" -I \\"~s\\"\n", + [code:root_dir(), erlang:system_info(version), code:lib_dir(erl_interface, include)])), + PortSpecWrite(io_lib:format("ERL_LDFLAGS ?= -L \\"~s\\" -lei\n", + [code:lib_dir(erl_interface, lib)])), + [PortSpecWrite(["\n", E, "\n"]) || E <- OsEnv], + FilterEnv = fun(Env) -> + lists:flatten([case E of + {_, _} -> E; + {Regex, K, V} -> + case rebar_utils:is_arch(Regex) of + true -> {K, V}; + false -> [] + end + end || E <- Env]) + end, + MergeEnv = fun(Env) -> + lists:foldl(fun ({K, V}, Acc) -> + case lists:keyfind(K, 1, Acc) of + false -> [{K, rebar_utils:expand_env_variable(V, K, "")}|Acc]; + {_, V0} -> [{K, rebar_utils:expand_env_variable(V, K, V0)}|Acc] + end + end, [], Env) + end, + PortEnv = case lists:keyfind(port_env, 1, Conf) of + false -> []; + {_, PortEnv0} -> FilterEnv(PortEnv0) + end, + PortSpec = fun ({Output, Input0, Env}) -> + filelib:ensure_dir("$(call core_native_path,$(DEPS_DIR)/$1/)" ++ Output), + Input = [[" ", I] || I <- Input0], + PortSpecWrite([ + [["\n", K, " = ", ShellToMk(V)] || {K, V} <- lists:reverse(MergeEnv(PortEnv))], + case $(PLATFORM) of + darwin -> "\n\nLDFLAGS += -flat_namespace -undefined suppress"; + _ -> "" + end, + "\n\nall:: ", Output, "\n\t@:\n\n", + "%.o: %.c\n\t$$\(CC) -c -o $$\@ $$\< $$\(CFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + "%.o: %.C\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + "%.o: %.cc\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + "%.o: %.cpp\n\t$$\(CXX) -c -o $$\@ $$\< $$\(CXXFLAGS) $$\(ERL_CFLAGS) $$\(DRV_CFLAGS) $$\(EXE_CFLAGS)\n\n", + [[Output, ": ", K, " += ", ShellToMk(V), "\n"] || {K, V} <- lists:reverse(MergeEnv(FilterEnv(Env)))], + Output, ": $$\(foreach ext,.c .C .cc .cpp,", + "$$\(patsubst %$$\(ext),%.o,$$\(filter %$$\(ext),$$\(wildcard", Input, "))))\n", + "\t$$\(CC) -o $$\@ $$\? $$\(LDFLAGS) $$\(ERL_LDFLAGS) $$\(DRV_LDFLAGS) $$\(EXE_LDFLAGS)", + case {filename:extension(Output), $(PLATFORM)} of + {[], _} -> "\n"; + {_, darwin} -> "\n"; + _ -> " -shared\n" + end]) + end, + [PortSpec(S) || S <- PortSpecs] + end, + fun() -> + case lists:keyfind(plugins, 1, Conf) of + false -> ok; + {_, Plugins0} -> + Plugins = [P || P <- Plugins0, is_tuple(P)], + case lists:keyfind('lfe-compile', 1, Plugins) of + false -> ok; + _ -> Write("\nBUILD_DEPS = lfe lfe.mk\ndep_lfe.mk = git https://github.com/ninenines/lfe.mk master\nDEP_PLUGINS = lfe.mk\n") + end + end + end(), + Write("\ninclude $$\(if $$\(ERLANG_MK_FILENAME),$$\(ERLANG_MK_FILENAME),erlang.mk)"), + RunPlugin = fun(Plugin, Step) -> + case erlang:function_exported(Plugin, Step, 2) of + false -> ok; + true -> + c:cd("$(call core_native_path,$(DEPS_DIR)/$1/)"), + Ret = Plugin:Step({config, "", Conf, dict:new(), dict:new(), dict:new(), + dict:store(base_dir, "", dict:new())}, undefined), + io:format("rebar plugin ~p step ~p ret ~p~n", [Plugin, Step, Ret]) + end + end, + fun() -> + case lists:keyfind(plugins, 1, Conf) of + false -> ok; + {_, Plugins0} -> + Plugins = [P || P <- Plugins0, is_atom(P)], + [begin + case lists:keyfind(deps, 1, Conf) of + false -> ok; + {_, Deps} -> + case lists:keyfind(P, 1, Deps) of + false -> ok; + _ -> + Path = "$(call core_native_path,$(DEPS_DIR)/)" ++ atom_to_list(P), + io:format("~s", [os:cmd("$(MAKE) -C $(call core_native_path,$(DEPS_DIR)/$1) " ++ Path)]), + io:format("~s", [os:cmd("$(MAKE) -C " ++ Path ++ " IS_DEP=1")]), + code:add_patha(Path ++ "/ebin") + end + end + end || P <- Plugins], + [case code:load_file(P) of + {module, P} -> ok; + _ -> + case lists:keyfind(plugin_dir, 1, Conf) of + false -> ok; + {_, PluginsDir} -> + ErlFile = "$(call core_native_path,$(DEPS_DIR)/$1/)" ++ PluginsDir ++ "/" ++ atom_to_list(P) ++ ".erl", + {ok, P, Bin} = compile:file(ErlFile, [binary]), + {module, P} = code:load_binary(P, ErlFile, Bin) + end + end || P <- Plugins], + [RunPlugin(P, preprocess) || P <- Plugins], + [RunPlugin(P, pre_compile) || P <- Plugins], + [RunPlugin(P, compile) || P <- Plugins] + end + end(), + halt() +endef + +define dep_autopatch_appsrc_script.erl + AppSrc = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)", + AppSrcScript = AppSrc ++ ".script", + {ok, Conf0} = file:consult(AppSrc), + Bindings0 = erl_eval:new_bindings(), + Bindings1 = erl_eval:add_binding('CONFIG', Conf0, Bindings0), + Bindings = erl_eval:add_binding('SCRIPT', AppSrcScript, Bindings1), + Conf = case file:script(AppSrcScript, Bindings) of + {ok, [C]} -> C; + {ok, C} -> C + end, + ok = file:write_file(AppSrc, io_lib:format("~p.~n", [Conf])), + halt() +endef + +define dep_autopatch_appsrc.erl + AppSrcOut = "$(call core_native_path,$(DEPS_DIR)/$1/src/$1.app.src)", + AppSrcIn = case filelib:is_regular(AppSrcOut) of false -> "$(call core_native_path,$(DEPS_DIR)/$1/ebin/$1.app)"; true -> AppSrcOut end, + case filelib:is_regular(AppSrcIn) of + false -> ok; + true -> + {ok, [{application, $(1), L0}]} = file:consult(AppSrcIn), + L1 = lists:keystore(modules, 1, L0, {modules, []}), + L2 = case lists:keyfind(vsn, 1, L1) of + {_, git} -> lists:keyreplace(vsn, 1, L1, {vsn, lists:droplast(os:cmd("git -C $(DEPS_DIR)/$1 describe --dirty --tags --always"))}); + {_, {cmd, _}} -> lists:keyreplace(vsn, 1, L1, {vsn, "cmd"}); + _ -> L1 + end, + L3 = case lists:keyfind(registered, 1, L2) of false -> [{registered, []}|L2]; _ -> L2 end, + ok = file:write_file(AppSrcOut, io_lib:format("~p.~n", [{application, $(1), L3}])), + case AppSrcOut of AppSrcIn -> ok; _ -> ok = file:delete(AppSrcIn) end + end, + halt() +endef + +define dep_fetch_git + git clone -q -n -- $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \ + cd $(DEPS_DIR)/$(call dep_name,$(1)) && git checkout -q $(call dep_commit,$(1)); +endef + +define dep_fetch_git-subfolder + mkdir -p $(ERLANG_MK_TMP)/git-subfolder; \ + git clone -q -n -- $(call dep_repo,$1) \ + $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1); \ + cd $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1) \ + && git checkout -q $(call dep_commit,$1); \ + ln -s $(ERLANG_MK_TMP)/git-subfolder/$(call dep_name,$1)/$(word 4,$(dep_$(1))) \ + $(DEPS_DIR)/$(call dep_name,$1); +endef + +define dep_fetch_git-submodule + git submodule update --init -- $(DEPS_DIR)/$1; +endef + +define dep_fetch_hg + hg clone -q -U $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); \ + cd $(DEPS_DIR)/$(call dep_name,$(1)) && hg update -q $(call dep_commit,$(1)); +endef + +define dep_fetch_svn + svn checkout -q $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); +endef + +define dep_fetch_cp + cp -R $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); +endef + +define dep_fetch_ln + ln -s $(call dep_repo,$(1)) $(DEPS_DIR)/$(call dep_name,$(1)); +endef + +# @todo Handle errors. +define dep_fetch_hex.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = hex_core:default_config(), + {ok, {200, #{}, Tarball}} = hex_repo:get_tarball(Config, <<"$(strip $3)">>, <<"$(strip $2)">>), + {ok, #{}} = hex_tarball:unpack(Tarball, "$(DEPS_DIR)/$1"), + halt(0) +endef + +# Hex only has a package version. No need to look in the Erlang.mk packages. +define dep_fetch_hex + if [ ! -e $(DEPS_DIR)/hex_core ]; then \ + echo "Error: Dependency hex_core missing. BUILD_DEPS += hex_core to fix." >&2; \ + exit 81; \ + fi; \ + if [ ! -e $(DEPS_DIR)/hex_core/ebin/dep_built ]; then \ + $(MAKE) -C $(DEPS_DIR)/hex_core IS_DEP=1; \ + touch $(DEPS_DIR)/hex_core/ebin/dep_built; \ + fi; \ + $(call erlang,$(call dep_fetch_hex.erl,$1,$(word 2,$(dep_$1)),\ + $(if $(word 3,$(dep_$1)),$(word 3,$(dep_$1)),$1))); +endef + +define dep_fetch_fail + echo "Error: Unknown or invalid dependency: $(1)." >&2; \ + exit 78; +endef + +# Kept for compatibility purposes with older Erlang.mk configuration. +define dep_fetch_legacy + $(warning WARNING: '$(1)' dependency configuration uses deprecated format.) \ + git clone -q -n -- $(word 1,$(dep_$(1))) $(DEPS_DIR)/$(1); \ + cd $(DEPS_DIR)/$(1) && git checkout -q $(if $(word 2,$(dep_$(1))),$(word 2,$(dep_$(1))),master); +endef + +define dep_target +$(DEPS_DIR)/$(call dep_name,$1): | $(ERLANG_MK_TMP) + $(eval DEP_NAME := $(call dep_name,$1)) + $(eval DEP_STR := $(if $(filter $1,$(DEP_NAME)),$1,"$1 ($(DEP_NAME))")) + $(verbose) if test -d $(APPS_DIR)/$(DEP_NAME); then \ + echo "Error: Dependency" $(DEP_STR) "conflicts with application found in $(APPS_DIR)/$(DEP_NAME)." >&2; \ + exit 17; \ + fi + $(verbose) mkdir -p $(DEPS_DIR) + $(dep_verbose) $(call dep_fetch_$(strip $(call dep_fetch,$(1))),$(1)) + $(verbose) if [ -f $(DEPS_DIR)/$(1)/configure.ac -o -f $(DEPS_DIR)/$(1)/configure.in ] \ + && [ ! -f $(DEPS_DIR)/$(1)/configure ]; then \ + echo " AUTO " $(DEP_STR); \ + cd $(DEPS_DIR)/$(1) && autoreconf -Wall -vif -I m4; \ + fi + - $(verbose) if [ -f $(DEPS_DIR)/$(DEP_NAME)/configure ]; then \ + echo " CONF " $(DEP_STR); \ + cd $(DEPS_DIR)/$(DEP_NAME) && ./configure; \ + fi +ifeq ($(filter $(1),$(NO_AUTOPATCH)),) + $(verbose) $$(MAKE) --no-print-directory autopatch-$(DEP_NAME) +endif + +.PHONY: autopatch-$(call dep_name,$1) + +autopatch-$(call dep_name,$1):: + $(verbose) if [ "$(1)" = "amqp_client" -a "$(RABBITMQ_CLIENT_PATCH)" ]; then \ + if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \ + echo " PATCH Downloading rabbitmq-codegen"; \ + git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \ + fi; \ + if [ ! -d $(DEPS_DIR)/rabbitmq-server ]; then \ + echo " PATCH Downloading rabbitmq-server"; \ + git clone https://github.com/rabbitmq/rabbitmq-server.git $(DEPS_DIR)/rabbitmq-server; \ + fi; \ + ln -s $(DEPS_DIR)/amqp_client/deps/rabbit_common-0.0.0 $(DEPS_DIR)/rabbit_common; \ + elif [ "$(1)" = "rabbit" -a "$(RABBITMQ_SERVER_PATCH)" ]; then \ + if [ ! -d $(DEPS_DIR)/rabbitmq-codegen ]; then \ + echo " PATCH Downloading rabbitmq-codegen"; \ + git clone https://github.com/rabbitmq/rabbitmq-codegen.git $(DEPS_DIR)/rabbitmq-codegen; \ + fi \ + elif [ "$1" = "elixir" -a "$(ELIXIR_PATCH)" ]; then \ + ln -s lib/elixir/ebin $(DEPS_DIR)/elixir/; \ + else \ + $$(call dep_autopatch,$(call dep_name,$1)) \ + fi +endef + +$(foreach dep,$(BUILD_DEPS) $(DEPS),$(eval $(call dep_target,$(dep)))) + +ifndef IS_APP +clean:: clean-apps + +clean-apps: + $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \ + $(MAKE) -C $$dep clean IS_APP=1; \ + done + +distclean:: distclean-apps + +distclean-apps: + $(verbose) set -e; for dep in $(ALL_APPS_DIRS) ; do \ + $(MAKE) -C $$dep distclean IS_APP=1; \ + done +endif + +ifndef SKIP_DEPS +distclean:: distclean-deps + +distclean-deps: + $(gen_verbose) rm -rf $(DEPS_DIR) +endif + +# Forward-declare variables used in core/deps-tools.mk. This is required +# in case plugins use them. + +ERLANG_MK_RECURSIVE_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-deps-list.log +ERLANG_MK_RECURSIVE_DOC_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-doc-deps-list.log +ERLANG_MK_RECURSIVE_REL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-rel-deps-list.log +ERLANG_MK_RECURSIVE_TEST_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-test-deps-list.log +ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST = $(ERLANG_MK_TMP)/recursive-shell-deps-list.log + +ERLANG_MK_QUERY_DEPS_FILE = $(ERLANG_MK_TMP)/query-deps.log +ERLANG_MK_QUERY_DOC_DEPS_FILE = $(ERLANG_MK_TMP)/query-doc-deps.log +ERLANG_MK_QUERY_REL_DEPS_FILE = $(ERLANG_MK_TMP)/query-rel-deps.log +ERLANG_MK_QUERY_TEST_DEPS_FILE = $(ERLANG_MK_TMP)/query-test-deps.log +ERLANG_MK_QUERY_SHELL_DEPS_FILE = $(ERLANG_MK_TMP)/query-shell-deps.log + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: clean-app + +# Configuration. + +ERLC_OPTS ?= -Werror +debug_info +warn_export_vars +warn_shadow_vars \ + +warn_obsolete_guard # +bin_opt_info +warn_export_all +warn_missing_spec +COMPILE_FIRST ?= +COMPILE_FIRST_PATHS = $(addprefix src/,$(addsuffix .erl,$(COMPILE_FIRST))) +ERLC_EXCLUDE ?= +ERLC_EXCLUDE_PATHS = $(addprefix src/,$(addsuffix .erl,$(ERLC_EXCLUDE))) + +ERLC_ASN1_OPTS ?= + +ERLC_MIB_OPTS ?= +COMPILE_MIB_FIRST ?= +COMPILE_MIB_FIRST_PATHS = $(addprefix mibs/,$(addsuffix .mib,$(COMPILE_MIB_FIRST))) + +# Verbosity. + +app_verbose_0 = @echo " APP " $(PROJECT); +app_verbose_2 = set -x; +app_verbose = $(app_verbose_$(V)) + +appsrc_verbose_0 = @echo " APP " $(PROJECT).app.src; +appsrc_verbose_2 = set -x; +appsrc_verbose = $(appsrc_verbose_$(V)) + +makedep_verbose_0 = @echo " DEPEND" $(PROJECT).d; +makedep_verbose_2 = set -x; +makedep_verbose = $(makedep_verbose_$(V)) + +erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\ + $(filter %.erl %.core,$(?F))); +erlc_verbose_2 = set -x; +erlc_verbose = $(erlc_verbose_$(V)) + +xyrl_verbose_0 = @echo " XYRL " $(filter %.xrl %.yrl,$(?F)); +xyrl_verbose_2 = set -x; +xyrl_verbose = $(xyrl_verbose_$(V)) + +asn1_verbose_0 = @echo " ASN1 " $(filter %.asn1,$(?F)); +asn1_verbose_2 = set -x; +asn1_verbose = $(asn1_verbose_$(V)) + +mib_verbose_0 = @echo " MIB " $(filter %.bin %.mib,$(?F)); +mib_verbose_2 = set -x; +mib_verbose = $(mib_verbose_$(V)) + +ifneq ($(wildcard src/),) + +# Targets. + +app:: $(if $(wildcard ebin/test),clean) deps + $(verbose) $(MAKE) --no-print-directory $(PROJECT).d + $(verbose) $(MAKE) --no-print-directory app-build + +ifeq ($(wildcard src/$(PROJECT_MOD).erl),) +define app_file +{application, '$(PROJECT)', [ + {description, "$(PROJECT_DESCRIPTION)"}, + {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP), + {id$(comma)$(space)"$(1)"}$(comma)) + {modules, [$(call comma_list,$(2))]}, + {registered, []}, + {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]}, + {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),) +]}. +endef +else +define app_file +{application, '$(PROJECT)', [ + {description, "$(PROJECT_DESCRIPTION)"}, + {vsn, "$(PROJECT_VERSION)"},$(if $(IS_DEP), + {id$(comma)$(space)"$(1)"}$(comma)) + {modules, [$(call comma_list,$(2))]}, + {registered, [$(call comma_list,$(PROJECT)_sup $(PROJECT_REGISTERED))]}, + {applications, [$(call comma_list,kernel stdlib $(OTP_DEPS) $(LOCAL_DEPS) $(foreach dep,$(DEPS),$(call dep_name,$(dep))))]}, + {mod, {$(PROJECT_MOD), []}}, + {env, $(subst \,\\,$(PROJECT_ENV))}$(if $(findstring {,$(PROJECT_APP_EXTRA_KEYS)),$(comma)$(newline)$(tab)$(subst \,\\,$(PROJECT_APP_EXTRA_KEYS)),) +]}. +endef +endif + +app-build: ebin/$(PROJECT).app + $(verbose) : + +# Source files. + +ALL_SRC_FILES := $(sort $(call core_find,src/,*)) + +ERL_FILES := $(filter %.erl,$(ALL_SRC_FILES)) +CORE_FILES := $(filter %.core,$(ALL_SRC_FILES)) + +# ASN.1 files. + +ifneq ($(wildcard asn1/),) +ASN1_FILES = $(sort $(call core_find,asn1/,*.asn1)) +ERL_FILES += $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES)))) + +define compile_asn1 + $(verbose) mkdir -p include/ + $(asn1_verbose) erlc -v -I include/ -o asn1/ +noobj $(ERLC_ASN1_OPTS) $(1) + $(verbose) mv asn1/*.erl src/ + -$(verbose) mv asn1/*.hrl include/ + $(verbose) mv asn1/*.asn1db include/ +endef + +$(PROJECT).d:: $(ASN1_FILES) + $(if $(strip $?),$(call compile_asn1,$?)) +endif + +# SNMP MIB files. + +ifneq ($(wildcard mibs/),) +MIB_FILES = $(sort $(call core_find,mibs/,*.mib)) + +$(PROJECT).d:: $(COMPILE_MIB_FIRST_PATHS) $(MIB_FILES) + $(verbose) mkdir -p include/ priv/mibs/ + $(mib_verbose) erlc -v $(ERLC_MIB_OPTS) -o priv/mibs/ -I priv/mibs/ $? + $(mib_verbose) erlc -o include/ -- $(addprefix priv/mibs/,$(patsubst %.mib,%.bin,$(notdir $?))) +endif + +# Leex and Yecc files. + +XRL_FILES := $(filter %.xrl,$(ALL_SRC_FILES)) +XRL_ERL_FILES = $(addprefix src/,$(patsubst %.xrl,%.erl,$(notdir $(XRL_FILES)))) +ERL_FILES += $(XRL_ERL_FILES) + +YRL_FILES := $(filter %.yrl,$(ALL_SRC_FILES)) +YRL_ERL_FILES = $(addprefix src/,$(patsubst %.yrl,%.erl,$(notdir $(YRL_FILES)))) +ERL_FILES += $(YRL_ERL_FILES) + +$(PROJECT).d:: $(XRL_FILES) $(YRL_FILES) + $(if $(strip $?),$(xyrl_verbose) erlc -v -o src/ $(YRL_ERLC_OPTS) $?) + +# Erlang and Core Erlang files. + +define makedep.erl + E = ets:new(makedep, [bag]), + G = digraph:new([acyclic]), + ErlFiles = lists:usort(string:tokens("$(ERL_FILES)", " ")), + DepsDir = "$(call core_native_path,$(DEPS_DIR))", + AppsDir = "$(call core_native_path,$(APPS_DIR))", + DepsDirsSrc = "$(if $(wildcard $(DEPS_DIR)/*/src), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/src)))", + DepsDirsInc = "$(if $(wildcard $(DEPS_DIR)/*/include), $(call core_native_path,$(wildcard $(DEPS_DIR)/*/include)))", + AppsDirsSrc = "$(if $(wildcard $(APPS_DIR)/*/src), $(call core_native_path,$(wildcard $(APPS_DIR)/*/src)))", + AppsDirsInc = "$(if $(wildcard $(APPS_DIR)/*/include), $(call core_native_path,$(wildcard $(APPS_DIR)/*/include)))", + DepsDirs = lists:usort(string:tokens(DepsDirsSrc++DepsDirsInc, " ")), + AppsDirs = lists:usort(string:tokens(AppsDirsSrc++AppsDirsInc, " ")), + Modules = [{list_to_atom(filename:basename(F, ".erl")), F} || F <- ErlFiles], + Add = fun (Mod, Dep) -> + case lists:keyfind(Dep, 1, Modules) of + false -> ok; + {_, DepFile} -> + {_, ModFile} = lists:keyfind(Mod, 1, Modules), + ets:insert(E, {ModFile, DepFile}), + digraph:add_vertex(G, Mod), + digraph:add_vertex(G, Dep), + digraph:add_edge(G, Mod, Dep) + end + end, + AddHd = fun (F, Mod, DepFile) -> + case file:open(DepFile, [read]) of + {error, enoent} -> + ok; + {ok, Fd} -> + {_, ModFile} = lists:keyfind(Mod, 1, Modules), + case ets:match(E, {ModFile, DepFile}) of + [] -> + ets:insert(E, {ModFile, DepFile}), + F(F, Fd, Mod,0); + _ -> ok + end + end + end, + SearchHrl = fun + F(_Hrl, []) -> {error,enoent}; + F(Hrl, [Dir|Dirs]) -> + HrlF = filename:join([Dir,Hrl]), + case filelib:is_file(HrlF) of + true -> + {ok, HrlF}; + false -> F(Hrl,Dirs) + end + end, + Attr = fun + (_F, Mod, behavior, Dep) -> + Add(Mod, Dep); + (_F, Mod, behaviour, Dep) -> + Add(Mod, Dep); + (_F, Mod, compile, {parse_transform, Dep}) -> + Add(Mod, Dep); + (_F, Mod, compile, Opts) when is_list(Opts) -> + case proplists:get_value(parse_transform, Opts) of + undefined -> ok; + Dep -> Add(Mod, Dep) + end; + (F, Mod, include, Hrl) -> + case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of + {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl); + {error, _} -> false + end; + (F, Mod, include_lib, Hrl) -> + case SearchHrl(Hrl, ["src", "include",AppsDir,DepsDir]++AppsDirs++DepsDirs) of + {ok, FoundHrl} -> AddHd(F, Mod, FoundHrl); + {error, _} -> false + end; + (F, Mod, import, {Imp, _}) -> + IsFile = + case lists:keyfind(Imp, 1, Modules) of + false -> false; + {_, FilePath} -> filelib:is_file(FilePath) + end, + case IsFile of + false -> ok; + true -> Add(Mod, Imp) + end; + (_, _, _, _) -> ok + end, + MakeDepend = fun + (F, Fd, Mod, StartLocation) -> + {ok, Filename} = file:pid2name(Fd), + case io:parse_erl_form(Fd, undefined, StartLocation) of + {ok, AbsData, EndLocation} -> + case AbsData of + {attribute, _, Key, Value} -> + Attr(F, Mod, Key, Value), + F(F, Fd, Mod, EndLocation); + _ -> F(F, Fd, Mod, EndLocation) + end; + {eof, _ } -> file:close(Fd); + {error, ErrorDescription } -> + file:close(Fd); + {error, ErrorInfo, ErrorLocation} -> + F(F, Fd, Mod, ErrorLocation) + end, + ok + end, + [begin + Mod = list_to_atom(filename:basename(F, ".erl")), + case file:open(F, [read]) of + {ok, Fd} -> MakeDepend(MakeDepend, Fd, Mod,0); + {error, enoent} -> ok + end + end || F <- ErlFiles], + Depend = sofs:to_external(sofs:relation_to_family(sofs:relation(ets:tab2list(E)))), + CompileFirst = [X || X <- lists:reverse(digraph_utils:topsort(G)), [] =/= digraph:in_neighbours(G, X)], + TargetPath = fun(Target) -> + case lists:keyfind(Target, 1, Modules) of + false -> ""; + {_, DepFile} -> + DirSubname = tl(string:tokens(filename:dirname(DepFile), "/")), + string:join(DirSubname ++ [atom_to_list(Target)], "/") + end + end, + Output0 = [ + "# Generated by Erlang.mk. Edit at your own risk!\n\n", + [[F, "::", [[" ", D] || D <- Deps], "; @touch \$$@\n"] || {F, Deps} <- Depend], + "\nCOMPILE_FIRST +=", [[" ", TargetPath(CF)] || CF <- CompileFirst], "\n" + ], + Output = case "รฉ" of + [233] -> unicode:characters_to_binary(Output0); + _ -> Output0 + end, + ok = file:write_file("$(1)", Output), + halt() +endef + +ifeq ($(if $(NO_MAKEDEP),$(wildcard $(PROJECT).d),),) +$(PROJECT).d:: $(ERL_FILES) $(call core_find,include/,*.hrl) $(MAKEFILE_LIST) + $(makedep_verbose) $(call erlang,$(call makedep.erl,$@)) +endif + +ifeq ($(IS_APP)$(IS_DEP),) +ifneq ($(words $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES)),0) +# Rebuild everything when the Makefile changes. +$(ERLANG_MK_TMP)/last-makefile-change: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP) + $(verbose) if test -f $@; then \ + touch $(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES); \ + touch -c $(PROJECT).d; \ + fi + $(verbose) touch $@ + +$(ERL_FILES) $(CORE_FILES) $(ASN1_FILES) $(MIB_FILES) $(XRL_FILES) $(YRL_FILES):: $(ERLANG_MK_TMP)/last-makefile-change +ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change +endif +endif + +$(PROJECT).d:: + $(verbose) : + +include $(wildcard $(PROJECT).d) + +ebin/$(PROJECT).app:: ebin/ + +ebin/: + $(verbose) mkdir -p ebin/ + +define compile_erl + $(erlc_verbose) erlc -v $(if $(IS_DEP),$(filter-out -Werror,$(ERLC_OPTS)),$(ERLC_OPTS)) -o ebin/ \ + -pa ebin/ -I include/ $(filter-out $(ERLC_EXCLUDE_PATHS),$(COMPILE_FIRST_PATHS) $(1)) +endef + +define validate_app_file + case file:consult("ebin/$(PROJECT).app") of + {ok, _} -> halt(); + _ -> halt(1) + end +endef + +ebin/$(PROJECT).app:: $(ERL_FILES) $(CORE_FILES) $(wildcard src/$(PROJECT).app.src) + $(eval FILES_TO_COMPILE := $(filter-out src/$(PROJECT).app.src,$?)) + $(if $(strip $(FILES_TO_COMPILE)),$(call compile_erl,$(FILES_TO_COMPILE))) +# Older git versions do not have the --first-parent flag. Do without in that case. + $(eval GITDESCRIBE := $(shell git describe --dirty --abbrev=7 --tags --always --first-parent 2>/dev/null \ + || git describe --dirty --abbrev=7 --tags --always 2>/dev/null || true)) + $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \ + $(filter-out $(ERLC_EXCLUDE_PATHS),$(ERL_FILES) $(CORE_FILES) $(BEAM_FILES))))))) +ifeq ($(wildcard src/$(PROJECT).app.src),) + $(app_verbose) printf '$(subst %,%%,$(subst $(newline),\n,$(subst ','\'',$(call app_file,$(GITDESCRIBE),$(MODULES)))))' \ + > ebin/$(PROJECT).app + $(verbose) if ! $(call erlang,$(call validate_app_file)); then \ + echo "The .app file produced is invalid. Please verify the value of PROJECT_ENV." >&2; \ + exit 1; \ + fi +else + $(verbose) if [ -z "$$(grep -e '^[^%]*{\s*modules\s*,' src/$(PROJECT).app.src)" ]; then \ + echo "Empty modules entry not found in $(PROJECT).app.src. Please consult the erlang.mk documentation for instructions." >&2; \ + exit 1; \ + fi + $(appsrc_verbose) cat src/$(PROJECT).app.src \ + | sed "s/{[[:space:]]*modules[[:space:]]*,[[:space:]]*\[\]}/{modules, \[$(call comma_list,$(MODULES))\]}/" \ + | sed "s/{id,[[:space:]]*\"git\"}/{id, \"$(subst /,\/,$(GITDESCRIBE))\"}/" \ + > ebin/$(PROJECT).app +endif +ifneq ($(wildcard src/$(PROJECT).appup),) + $(verbose) cp src/$(PROJECT).appup ebin/ +endif + +clean:: clean-app + +clean-app: + $(gen_verbose) rm -rf $(PROJECT).d ebin/ priv/mibs/ $(XRL_ERL_FILES) $(YRL_ERL_FILES) \ + $(addprefix include/,$(patsubst %.mib,%.hrl,$(notdir $(MIB_FILES)))) \ + $(addprefix include/,$(patsubst %.asn1,%.hrl,$(notdir $(ASN1_FILES)))) \ + $(addprefix include/,$(patsubst %.asn1,%.asn1db,$(notdir $(ASN1_FILES)))) \ + $(addprefix src/,$(patsubst %.asn1,%.erl,$(notdir $(ASN1_FILES)))) + +endif + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2015, Viktor Sรถderqvist +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: docs-deps + +# Configuration. + +ALL_DOC_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(DOC_DEPS)) + +# Targets. + +$(foreach dep,$(DOC_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +doc-deps: +else +doc-deps: $(ALL_DOC_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_DOC_DEPS_DIRS) ; do $(MAKE) -C $$dep IS_DEP=1; done +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: rel-deps + +# Configuration. + +ALL_REL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(REL_DEPS)) + +# Targets. + +$(foreach dep,$(REL_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +rel-deps: +else +rel-deps: $(ALL_REL_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_REL_DEPS_DIRS) ; do $(MAKE) -C $$dep; done +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: test-deps test-dir test-build clean-test-dir + +# Configuration. + +TEST_DIR ?= $(CURDIR)/test + +ALL_TEST_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(TEST_DEPS)) + +TEST_ERLC_OPTS ?= +debug_info +warn_export_vars +warn_shadow_vars +warn_obsolete_guard +TEST_ERLC_OPTS += -DTEST=1 + +# Targets. + +$(foreach dep,$(TEST_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +test-deps: +else +test-deps: $(ALL_TEST_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_TEST_DEPS_DIRS) ; do \ + if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \ + :; \ + else \ + $(MAKE) -C $$dep IS_DEP=1; \ + if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \ + fi \ + done +endif + +ifneq ($(wildcard $(TEST_DIR)),) +test-dir: $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build + @: + +test_erlc_verbose_0 = @echo " ERLC " $(filter-out $(patsubst %,%.erl,$(ERLC_EXCLUDE)),\ + $(filter %.erl %.core,$(notdir $(FILES_TO_COMPILE)))); +test_erlc_verbose_2 = set -x; +test_erlc_verbose = $(test_erlc_verbose_$(V)) + +define compile_test_erl + $(test_erlc_verbose) erlc -v $(TEST_ERLC_OPTS) -o $(TEST_DIR) \ + -pa ebin/ -I include/ $(1) +endef + +ERL_TEST_FILES = $(call core_find,$(TEST_DIR)/,*.erl) +$(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build: $(ERL_TEST_FILES) $(MAKEFILE_LIST) + $(eval FILES_TO_COMPILE := $(if $(filter $(MAKEFILE_LIST),$?),$(filter $(ERL_TEST_FILES),$^),$?)) + $(if $(strip $(FILES_TO_COMPILE)),$(call compile_test_erl,$(FILES_TO_COMPILE)) && touch $@) +endif + +test-build:: IS_TEST=1 +test-build:: ERLC_OPTS=$(TEST_ERLC_OPTS) +test-build:: $(if $(wildcard src),$(if $(wildcard ebin/test),,clean)) $(if $(IS_APP),,deps test-deps) +# We already compiled everything when IS_APP=1. +ifndef IS_APP +ifneq ($(wildcard src),) + $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(gen_verbose) touch ebin/test +endif +ifneq ($(wildcard $(TEST_DIR)),) + $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" +endif +endif + +# Roughly the same as test-build, but when IS_APP=1. +# We only care about compiling the current application. +ifdef IS_APP +test-build-app:: ERLC_OPTS=$(TEST_ERLC_OPTS) +test-build-app:: deps test-deps +ifneq ($(wildcard src),) + $(verbose) $(MAKE) --no-print-directory $(PROJECT).d ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(verbose) $(MAKE) --no-print-directory app-build ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" + $(gen_verbose) touch ebin/test +endif +ifneq ($(wildcard $(TEST_DIR)),) + $(verbose) $(MAKE) --no-print-directory test-dir ERLC_OPTS="$(call escape_dquotes,$(TEST_ERLC_OPTS))" +endif +endif + +clean:: clean-test-dir + +clean-test-dir: +ifneq ($(wildcard $(TEST_DIR)/*.beam),) + $(gen_verbose) rm -f $(TEST_DIR)/*.beam $(ERLANG_MK_TMP)/$(PROJECT).last-testdir-build +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: rebar.config + +# We strip out -Werror because we don't want to fail due to +# warnings when used as a dependency. + +compat_prepare_erlc_opts = $(shell echo "$1" | sed 's/, */,/g') + +define compat_convert_erlc_opts +$(if $(filter-out -Werror,$1),\ + $(if $(findstring +,$1),\ + $(shell echo $1 | cut -b 2-))) +endef + +define compat_erlc_opts_to_list +[$(call comma_list,$(foreach o,$(call compat_prepare_erlc_opts,$1),$(call compat_convert_erlc_opts,$o)))] +endef + +define compat_rebar_config +{deps, [ +$(call comma_list,$(foreach d,$(DEPS),\ + $(if $(filter hex,$(call dep_fetch,$d)),\ + {$(call dep_name,$d)$(comma)"$(call dep_repo,$d)"},\ + {$(call dep_name,$d)$(comma)".*"$(comma){git,"$(call dep_repo,$d)"$(comma)"$(call dep_commit,$d)"}}))) +]}. +{erl_opts, $(call compat_erlc_opts_to_list,$(ERLC_OPTS))}. +endef + +rebar.config: + $(gen_verbose) $(call core_render,compat_rebar_config,rebar.config) + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifeq ($(filter asciideck,$(DEPS) $(DOC_DEPS)),asciideck) + +.PHONY: asciidoc asciidoc-guide asciidoc-manual install-asciidoc distclean-asciidoc-guide distclean-asciidoc-manual + +# Core targets. + +docs:: asciidoc + +distclean:: distclean-asciidoc-guide distclean-asciidoc-manual + +# Plugin-specific targets. + +asciidoc: asciidoc-guide asciidoc-manual + +# User guide. + +ifeq ($(wildcard doc/src/guide/book.asciidoc),) +asciidoc-guide: +else +asciidoc-guide: distclean-asciidoc-guide doc-deps + a2x -v -f pdf doc/src/guide/book.asciidoc && mv doc/src/guide/book.pdf doc/guide.pdf + a2x -v -f chunked doc/src/guide/book.asciidoc && mv doc/src/guide/book.chunked/ doc/html/ + +distclean-asciidoc-guide: + $(gen_verbose) rm -rf doc/html/ doc/guide.pdf +endif + +# Man pages. + +ASCIIDOC_MANUAL_FILES := $(wildcard doc/src/manual/*.asciidoc) + +ifeq ($(ASCIIDOC_MANUAL_FILES),) +asciidoc-manual: +else + +# Configuration. + +MAN_INSTALL_PATH ?= /usr/local/share/man +MAN_SECTIONS ?= 3 7 +MAN_PROJECT ?= $(shell echo $(PROJECT) | sed 's/^./\U&\E/') +MAN_VERSION ?= $(PROJECT_VERSION) + +# Plugin-specific targets. + +define asciidoc2man.erl +try + [begin + io:format(" ADOC ~s~n", [F]), + ok = asciideck:to_manpage(asciideck:parse_file(F), #{ + compress => gzip, + outdir => filename:dirname(F), + extra2 => "$(MAN_PROJECT) $(MAN_VERSION)", + extra3 => "$(MAN_PROJECT) Function Reference" + }) + end || F <- [$(shell echo $(addprefix $(comma)\",$(addsuffix \",$1)) | sed 's/^.//')]], + halt(0) +catch C:E -> + io:format("Exception ~p:~p~nStacktrace: ~p~n", [C, E, erlang:get_stacktrace()]), + halt(1) +end. +endef + +asciidoc-manual:: doc-deps + +asciidoc-manual:: $(ASCIIDOC_MANUAL_FILES) + $(gen_verbose) $(call erlang,$(call asciidoc2man.erl,$?)) + $(verbose) $(foreach s,$(MAN_SECTIONS),mkdir -p doc/man$s/ && mv doc/src/manual/*.$s.gz doc/man$s/;) + +install-docs:: install-asciidoc + +install-asciidoc: asciidoc-manual + $(foreach s,$(MAN_SECTIONS),\ + mkdir -p $(MAN_INSTALL_PATH)/man$s/ && \ + install -g `id -g` -o `id -u` -m 0644 doc/man$s/*.gz $(MAN_INSTALL_PATH)/man$s/;) + +distclean-asciidoc-manual: + $(gen_verbose) rm -rf $(addprefix doc/man,$(MAN_SECTIONS)) +endif +endif + +# Copyright (c) 2014-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: bootstrap bootstrap-lib bootstrap-rel new list-templates + +# Core targets. + +help:: + $(verbose) printf "%s\n" "" \ + "Bootstrap targets:" \ + " bootstrap Generate a skeleton of an OTP application" \ + " bootstrap-lib Generate a skeleton of an OTP library" \ + " bootstrap-rel Generate the files needed to build a release" \ + " new-app in=NAME Create a new local OTP application NAME" \ + " new-lib in=NAME Create a new local OTP library NAME" \ + " new t=TPL n=NAME Generate a module NAME based on the template TPL" \ + " new t=T n=N in=APP Generate a module NAME based on the template TPL in APP" \ + " list-templates List available templates" + +# Bootstrap templates. + +define bs_appsrc +{application, $p, [ + {description, ""}, + {vsn, "0.1.0"}, + {id, "git"}, + {modules, []}, + {registered, []}, + {applications, [ + kernel, + stdlib + ]}, + {mod, {$p_app, []}}, + {env, []} +]}. +endef + +define bs_appsrc_lib +{application, $p, [ + {description, ""}, + {vsn, "0.1.0"}, + {id, "git"}, + {modules, []}, + {registered, []}, + {applications, [ + kernel, + stdlib + ]} +]}. +endef + +# To prevent autocompletion issues with ZSH, we add "include erlang.mk" +# separately during the actual bootstrap. +define bs_Makefile +PROJECT = $p +PROJECT_DESCRIPTION = New project +PROJECT_VERSION = 0.1.0 +$(if $(SP), +# Whitespace to be used when creating files from templates. +SP = $(SP) +) +endef + +define bs_apps_Makefile +PROJECT = $p +PROJECT_DESCRIPTION = New project +PROJECT_VERSION = 0.1.0 +$(if $(SP), +# Whitespace to be used when creating files from templates. +SP = $(SP) +) +# Make sure we know where the applications are located. +ROOT_DIR ?= $(call core_relpath,$(dir $(ERLANG_MK_FILENAME)),$(APPS_DIR)/app) +APPS_DIR ?= .. +DEPS_DIR ?= $(call core_relpath,$(DEPS_DIR),$(APPS_DIR)/app) + +include $$(ROOT_DIR)/erlang.mk +endef + +define bs_app +-module($p_app). +-behaviour(application). + +-export([start/2]). +-export([stop/1]). + +start(_Type, _Args) -> + $p_sup:start_link(). + +stop(_State) -> + ok. +endef + +define bs_relx_config +{release, {$p_release, "1"}, [$p, sasl, runtime_tools]}. +{extended_start_script, true}. +{sys_config, "config/sys.config"}. +{vm_args, "config/vm.args"}. +endef + +define bs_sys_config +[ +]. +endef + +define bs_vm_args +-name $p@127.0.0.1 +-setcookie $p +-heart +endef + +# Normal templates. + +define tpl_supervisor +-module($(n)). +-behaviour(supervisor). + +-export([start_link/0]). +-export([init/1]). + +start_link() -> + supervisor:start_link({local, ?MODULE}, ?MODULE, []). + +init([]) -> + Procs = [], + {ok, {{one_for_one, 1, 5}, Procs}}. +endef + +define tpl_gen_server +-module($(n)). +-behaviour(gen_server). + +%% API. +-export([start_link/0]). + +%% gen_server. +-export([init/1]). +-export([handle_call/3]). +-export([handle_cast/2]). +-export([handle_info/2]). +-export([terminate/2]). +-export([code_change/3]). + +-record(state, { +}). + +%% API. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_server:start_link(?MODULE, [], []). + +%% gen_server. + +init([]) -> + {ok, #state{}}. + +handle_call(_Request, _From, State) -> + {reply, ignored, State}. + +handle_cast(_Msg, State) -> + {noreply, State}. + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. +endef + +define tpl_module +-module($(n)). +-export([]). +endef + +define tpl_cowboy_http +-module($(n)). +-behaviour(cowboy_http_handler). + +-export([init/3]). +-export([handle/2]). +-export([terminate/3]). + +-record(state, { +}). + +init(_, Req, _Opts) -> + {ok, Req, #state{}}. + +handle(Req, State=#state{}) -> + {ok, Req2} = cowboy_req:reply(200, Req), + {ok, Req2, State}. + +terminate(_Reason, _Req, _State) -> + ok. +endef + +define tpl_gen_fsm +-module($(n)). +-behaviour(gen_fsm). + +%% API. +-export([start_link/0]). + +%% gen_fsm. +-export([init/1]). +-export([state_name/2]). +-export([handle_event/3]). +-export([state_name/3]). +-export([handle_sync_event/4]). +-export([handle_info/3]). +-export([terminate/3]). +-export([code_change/4]). + +-record(state, { +}). + +%% API. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_fsm:start_link(?MODULE, [], []). + +%% gen_fsm. + +init([]) -> + {ok, state_name, #state{}}. + +state_name(_Event, StateData) -> + {next_state, state_name, StateData}. + +handle_event(_Event, StateName, StateData) -> + {next_state, StateName, StateData}. + +state_name(_Event, _From, StateData) -> + {reply, ignored, state_name, StateData}. + +handle_sync_event(_Event, _From, StateName, StateData) -> + {reply, ignored, StateName, StateData}. + +handle_info(_Info, StateName, StateData) -> + {next_state, StateName, StateData}. + +terminate(_Reason, _StateName, _StateData) -> + ok. + +code_change(_OldVsn, StateName, StateData, _Extra) -> + {ok, StateName, StateData}. +endef + +define tpl_gen_statem +-module($(n)). +-behaviour(gen_statem). + +%% API. +-export([start_link/0]). + +%% gen_statem. +-export([callback_mode/0]). +-export([init/1]). +-export([state_name/3]). +-export([handle_event/4]). +-export([terminate/3]). +-export([code_change/4]). + +-record(state, { +}). + +%% API. + +-spec start_link() -> {ok, pid()}. +start_link() -> + gen_statem:start_link(?MODULE, [], []). + +%% gen_statem. + +callback_mode() -> + state_functions. + +init([]) -> + {ok, state_name, #state{}}. + +state_name(_EventType, _EventData, StateData) -> + {next_state, state_name, StateData}. + +handle_event(_EventType, _EventData, StateName, StateData) -> + {next_state, StateName, StateData}. + +terminate(_Reason, _StateName, _StateData) -> + ok. + +code_change(_OldVsn, StateName, StateData, _Extra) -> + {ok, StateName, StateData}. +endef + +define tpl_cowboy_loop +-module($(n)). +-behaviour(cowboy_loop_handler). + +-export([init/3]). +-export([info/3]). +-export([terminate/3]). + +-record(state, { +}). + +init(_, Req, _Opts) -> + {loop, Req, #state{}, 5000, hibernate}. + +info(_Info, Req, State) -> + {loop, Req, State, hibernate}. + +terminate(_Reason, _Req, _State) -> + ok. +endef + +define tpl_cowboy_rest +-module($(n)). + +-export([init/3]). +-export([content_types_provided/2]). +-export([get_html/2]). + +init(_, _Req, _Opts) -> + {upgrade, protocol, cowboy_rest}. + +content_types_provided(Req, State) -> + {[{{<<"text">>, <<"html">>, '*'}, get_html}], Req, State}. + +get_html(Req, State) -> + {<<"This is REST!">>, Req, State}. +endef + +define tpl_cowboy_ws +-module($(n)). +-behaviour(cowboy_websocket_handler). + +-export([init/3]). +-export([websocket_init/3]). +-export([websocket_handle/3]). +-export([websocket_info/3]). +-export([websocket_terminate/3]). + +-record(state, { +}). + +init(_, _, _) -> + {upgrade, protocol, cowboy_websocket}. + +websocket_init(_, Req, _Opts) -> + Req2 = cowboy_req:compact(Req), + {ok, Req2, #state{}}. + +websocket_handle({text, Data}, Req, State) -> + {reply, {text, Data}, Req, State}; +websocket_handle({binary, Data}, Req, State) -> + {reply, {binary, Data}, Req, State}; +websocket_handle(_Frame, Req, State) -> + {ok, Req, State}. + +websocket_info(_Info, Req, State) -> + {ok, Req, State}. + +websocket_terminate(_Reason, _Req, _State) -> + ok. +endef + +define tpl_ranch_protocol +-module($(n)). +-behaviour(ranch_protocol). + +-export([start_link/4]). +-export([init/4]). + +-type opts() :: []. +-export_type([opts/0]). + +-record(state, { + socket :: inet:socket(), + transport :: module() +}). + +start_link(Ref, Socket, Transport, Opts) -> + Pid = spawn_link(?MODULE, init, [Ref, Socket, Transport, Opts]), + {ok, Pid}. + +-spec init(ranch:ref(), inet:socket(), module(), opts()) -> ok. +init(Ref, Socket, Transport, _Opts) -> + ok = ranch:accept_ack(Ref), + loop(#state{socket=Socket, transport=Transport}). + +loop(State) -> + loop(State). +endef + +# Plugin-specific targets. + +ifndef WS +ifdef SP +WS = $(subst a,,a $(wordlist 1,$(SP),a a a a a a a a a a a a a a a a a a a a)) +else +WS = $(tab) +endif +endif + +bootstrap: +ifneq ($(wildcard src/),) + $(error Error: src/ directory already exists) +endif + $(eval p := $(PROJECT)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(eval n := $(PROJECT)_sup) + $(verbose) $(call core_render,bs_Makefile,Makefile) + $(verbose) echo "include erlang.mk" >> Makefile + $(verbose) mkdir src/ +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc,src/$(PROJECT).app.src) +endif + $(verbose) $(call core_render,bs_app,src/$(PROJECT)_app.erl) + $(verbose) $(call core_render,tpl_supervisor,src/$(PROJECT)_sup.erl) + +bootstrap-lib: +ifneq ($(wildcard src/),) + $(error Error: src/ directory already exists) +endif + $(eval p := $(PROJECT)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(verbose) $(call core_render,bs_Makefile,Makefile) + $(verbose) echo "include erlang.mk" >> Makefile + $(verbose) mkdir src/ +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc_lib,src/$(PROJECT).app.src) +endif + +bootstrap-rel: +ifneq ($(wildcard relx.config),) + $(error Error: relx.config already exists) +endif +ifneq ($(wildcard config/),) + $(error Error: config/ directory already exists) +endif + $(eval p := $(PROJECT)) + $(verbose) $(call core_render,bs_relx_config,relx.config) + $(verbose) mkdir config/ + $(verbose) $(call core_render,bs_sys_config,config/sys.config) + $(verbose) $(call core_render,bs_vm_args,config/vm.args) + +new-app: +ifndef in + $(error Usage: $(MAKE) new-app in=APP) +endif +ifneq ($(wildcard $(APPS_DIR)/$in),) + $(error Error: Application $in already exists) +endif + $(eval p := $(in)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(eval n := $(in)_sup) + $(verbose) mkdir -p $(APPS_DIR)/$p/src/ + $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile) +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc,$(APPS_DIR)/$p/src/$p.app.src) +endif + $(verbose) $(call core_render,bs_app,$(APPS_DIR)/$p/src/$p_app.erl) + $(verbose) $(call core_render,tpl_supervisor,$(APPS_DIR)/$p/src/$p_sup.erl) + +new-lib: +ifndef in + $(error Usage: $(MAKE) new-lib in=APP) +endif +ifneq ($(wildcard $(APPS_DIR)/$in),) + $(error Error: Application $in already exists) +endif + $(eval p := $(in)) + $(if $(shell echo $p | LC_ALL=C grep -x "[a-z0-9_]*"),,\ + $(error Error: Invalid characters in the application name)) + $(verbose) mkdir -p $(APPS_DIR)/$p/src/ + $(verbose) $(call core_render,bs_apps_Makefile,$(APPS_DIR)/$p/Makefile) +ifdef LEGACY + $(verbose) $(call core_render,bs_appsrc_lib,$(APPS_DIR)/$p/src/$p.app.src) +endif + +new: +ifeq ($(wildcard src/)$(in),) + $(error Error: src/ directory does not exist) +endif +ifndef t + $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP]) +endif +ifndef n + $(error Usage: $(MAKE) new t=TEMPLATE n=NAME [in=APP]) +endif +ifdef in + $(verbose) $(call core_render,tpl_$(t),$(APPS_DIR)/$(in)/src/$(n).erl) +else + $(verbose) $(call core_render,tpl_$(t),src/$(n).erl) +endif + +list-templates: + $(verbose) @echo Available templates: + $(verbose) printf " %s\n" $(sort $(patsubst tpl_%,%,$(filter tpl_%,$(.VARIABLES)))) + +# Copyright (c) 2014-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: clean-c_src distclean-c_src-env + +# Configuration. + +C_SRC_DIR ?= $(CURDIR)/c_src +C_SRC_ENV ?= $(C_SRC_DIR)/env.mk +C_SRC_OUTPUT ?= $(CURDIR)/priv/$(PROJECT) +C_SRC_TYPE ?= shared + +# System type and C compiler/flags. + +ifeq ($(PLATFORM),msys2) + C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= .exe + C_SRC_OUTPUT_SHARED_EXTENSION ?= .dll +else + C_SRC_OUTPUT_EXECUTABLE_EXTENSION ?= + C_SRC_OUTPUT_SHARED_EXTENSION ?= .so +endif + +ifeq ($(C_SRC_TYPE),shared) + C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_SHARED_EXTENSION) +else + C_SRC_OUTPUT_FILE = $(C_SRC_OUTPUT)$(C_SRC_OUTPUT_EXECUTABLE_EXTENSION) +endif + +ifeq ($(PLATFORM),msys2) +# We hardcode the compiler used on MSYS2. The default CC=cc does +# not produce working code. The "gcc" MSYS2 package also doesn't. + CC = /mingw64/bin/gcc + export CC + CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -finline-functions -Wall +else ifeq ($(PLATFORM),darwin) + CC ?= cc + CFLAGS ?= -O3 -std=c99 -arch x86_64 -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -arch x86_64 -Wall + LDFLAGS ?= -arch x86_64 -flat_namespace -undefined suppress +else ifeq ($(PLATFORM),freebsd) + CC ?= cc + CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -finline-functions -Wall +else ifeq ($(PLATFORM),linux) + CC ?= gcc + CFLAGS ?= -O3 -std=c99 -finline-functions -Wall -Wmissing-prototypes + CXXFLAGS ?= -O3 -finline-functions -Wall +endif + +ifneq ($(PLATFORM),msys2) + CFLAGS += -fPIC + CXXFLAGS += -fPIC +endif + +CFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)" +CXXFLAGS += -I"$(ERTS_INCLUDE_DIR)" -I"$(ERL_INTERFACE_INCLUDE_DIR)" + +LDLIBS += -L"$(ERL_INTERFACE_LIB_DIR)" -lei + +# Verbosity. + +c_verbose_0 = @echo " C " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F)); +c_verbose = $(c_verbose_$(V)) + +cpp_verbose_0 = @echo " CPP " $(filter-out $(notdir $(MAKEFILE_LIST) $(C_SRC_ENV)),$(^F)); +cpp_verbose = $(cpp_verbose_$(V)) + +link_verbose_0 = @echo " LD " $(@F); +link_verbose = $(link_verbose_$(V)) + +# Targets. + +ifeq ($(wildcard $(C_SRC_DIR)),) +else ifneq ($(wildcard $(C_SRC_DIR)/Makefile),) +app:: app-c_src + +test-build:: app-c_src + +app-c_src: + $(MAKE) -C $(C_SRC_DIR) + +clean:: + $(MAKE) -C $(C_SRC_DIR) clean + +else + +ifeq ($(SOURCES),) +SOURCES := $(sort $(foreach pat,*.c *.C *.cc *.cpp,$(call core_find,$(C_SRC_DIR)/,$(pat)))) +endif +OBJECTS = $(addsuffix .o, $(basename $(SOURCES))) + +COMPILE_C = $(c_verbose) $(CC) $(CFLAGS) $(CPPFLAGS) -c +COMPILE_CPP = $(cpp_verbose) $(CXX) $(CXXFLAGS) $(CPPFLAGS) -c + +app:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE) + +test-build:: $(C_SRC_ENV) $(C_SRC_OUTPUT_FILE) + +$(C_SRC_OUTPUT_FILE): $(OBJECTS) + $(verbose) mkdir -p $(dir $@) + $(link_verbose) $(CC) $(OBJECTS) \ + $(LDFLAGS) $(if $(filter $(C_SRC_TYPE),shared),-shared) $(LDLIBS) \ + -o $(C_SRC_OUTPUT_FILE) + +$(OBJECTS): $(MAKEFILE_LIST) $(C_SRC_ENV) + +%.o: %.c + $(COMPILE_C) $(OUTPUT_OPTION) $< + +%.o: %.cc + $(COMPILE_CPP) $(OUTPUT_OPTION) $< + +%.o: %.C + $(COMPILE_CPP) $(OUTPUT_OPTION) $< + +%.o: %.cpp + $(COMPILE_CPP) $(OUTPUT_OPTION) $< + +clean:: clean-c_src + +clean-c_src: + $(gen_verbose) rm -f $(C_SRC_OUTPUT_FILE) $(OBJECTS) + +endif + +ifneq ($(wildcard $(C_SRC_DIR)),) +ERL_ERTS_DIR = $(shell $(ERL) -eval 'io:format("~s~n", [code:lib_dir(erts)]), halt().') + +$(C_SRC_ENV): + $(verbose) $(ERL) -eval "file:write_file(\"$(call core_native_path,$(C_SRC_ENV))\", \ + io_lib:format( \ + \"# Generated by Erlang.mk. Edit at your own risk!~n~n\" \ + \"ERTS_INCLUDE_DIR ?= ~s/erts-~s/include/~n\" \ + \"ERL_INTERFACE_INCLUDE_DIR ?= ~s~n\" \ + \"ERL_INTERFACE_LIB_DIR ?= ~s~n\" \ + \"ERTS_DIR ?= $(ERL_ERTS_DIR)~n\", \ + [code:root_dir(), erlang:system_info(version), \ + code:lib_dir(erl_interface, include), \ + code:lib_dir(erl_interface, lib)])), \ + halt()." + +distclean:: distclean-c_src-env + +distclean-c_src-env: + $(gen_verbose) rm -f $(C_SRC_ENV) + +-include $(C_SRC_ENV) + +ifneq ($(ERL_ERTS_DIR),$(ERTS_DIR)) +$(shell rm -f $(C_SRC_ENV)) +endif +endif + +# Templates. + +define bs_c_nif +#include "erl_nif.h" + +static int loads = 0; + +static int load(ErlNifEnv* env, void** priv_data, ERL_NIF_TERM load_info) +{ + /* Initialize private data. */ + *priv_data = NULL; + + loads++; + + return 0; +} + +static int upgrade(ErlNifEnv* env, void** priv_data, void** old_priv_data, ERL_NIF_TERM load_info) +{ + /* Convert the private data to the new version. */ + *priv_data = *old_priv_data; + + loads++; + + return 0; +} + +static void unload(ErlNifEnv* env, void* priv_data) +{ + if (loads == 1) { + /* Destroy the private data. */ + } + + loads--; +} + +static ERL_NIF_TERM hello(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) +{ + if (enif_is_atom(env, argv[0])) { + return enif_make_tuple2(env, + enif_make_atom(env, "hello"), + argv[0]); + } + + return enif_make_tuple2(env, + enif_make_atom(env, "error"), + enif_make_atom(env, "badarg")); +} + +static ErlNifFunc nif_funcs[] = { + {"hello", 1, hello} +}; + +ERL_NIF_INIT($n, nif_funcs, load, NULL, upgrade, unload) +endef + +define bs_erl_nif +-module($n). + +-export([hello/1]). + +-on_load(on_load/0). +on_load() -> + PrivDir = case code:priv_dir(?MODULE) of + {error, _} -> + AppPath = filename:dirname(filename:dirname(code:which(?MODULE))), + filename:join(AppPath, "priv"); + Path -> + Path + end, + erlang:load_nif(filename:join(PrivDir, atom_to_list(?MODULE)), 0). + +hello(_) -> + erlang:nif_error({not_loaded, ?MODULE}). +endef + +new-nif: +ifneq ($(wildcard $(C_SRC_DIR)/$n.c),) + $(error Error: $(C_SRC_DIR)/$n.c already exists) +endif +ifneq ($(wildcard src/$n.erl),) + $(error Error: src/$n.erl already exists) +endif +ifndef n + $(error Usage: $(MAKE) new-nif n=NAME [in=APP]) +endif +ifdef in + $(verbose) $(MAKE) -C $(APPS_DIR)/$(in)/ new-nif n=$n in= +else + $(verbose) mkdir -p $(C_SRC_DIR) src/ + $(verbose) $(call core_render,bs_c_nif,$(C_SRC_DIR)/$n.c) + $(verbose) $(call core_render,bs_erl_nif,src/$n.erl) +endif + +# Copyright (c) 2015-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: ci ci-prepare ci-setup + +CI_OTP ?= +CI_HIPE ?= +CI_ERLLVM ?= + +ifeq ($(CI_VM),native) +ERLC_OPTS += +native +TEST_ERLC_OPTS += +native +else ifeq ($(CI_VM),erllvm) +ERLC_OPTS += +native +'{hipe, [to_llvm]}' +TEST_ERLC_OPTS += +native +'{hipe, [to_llvm]}' +endif + +ifeq ($(strip $(CI_OTP) $(CI_HIPE) $(CI_ERLLVM)),) +ci:: +else + +ci:: $(addprefix ci-,$(CI_OTP) $(addsuffix -native,$(CI_HIPE)) $(addsuffix -erllvm,$(CI_ERLLVM))) + +ci-prepare: $(addprefix $(KERL_INSTALL_DIR)/,$(CI_OTP) $(addsuffix -native,$(CI_HIPE))) + +ci-setup:: + $(verbose) : + +ci-extra:: + $(verbose) : + +ci_verbose_0 = @echo " CI " $(1); +ci_verbose = $(ci_verbose_$(V)) + +define ci_target +ci-$1: $(KERL_INSTALL_DIR)/$2 + $(verbose) $(MAKE) --no-print-directory clean + $(ci_verbose) \ + PATH="$(KERL_INSTALL_DIR)/$2/bin:$(PATH)" \ + CI_OTP_RELEASE="$1" \ + CT_OPTS="-label $1" \ + CI_VM="$3" \ + $(MAKE) ci-setup tests + $(verbose) $(MAKE) --no-print-directory ci-extra +endef + +$(foreach otp,$(CI_OTP),$(eval $(call ci_target,$(otp),$(otp),otp))) +$(foreach otp,$(CI_HIPE),$(eval $(call ci_target,$(otp)-native,$(otp)-native,native))) +$(foreach otp,$(CI_ERLLVM),$(eval $(call ci_target,$(otp)-erllvm,$(otp)-native,erllvm))) + +$(foreach otp,$(filter-out $(ERLANG_OTP),$(CI_OTP)),$(eval $(call kerl_otp_target,$(otp)))) +$(foreach otp,$(filter-out $(ERLANG_HIPE),$(sort $(CI_HIPE) $(CI_ERLLLVM))),$(eval $(call kerl_hipe_target,$(otp)))) + +help:: + $(verbose) printf "%s\n" "" \ + "Continuous Integration targets:" \ + " ci Run '$(MAKE) tests' on all configured Erlang versions." \ + "" \ + "The CI_OTP variable must be defined with the Erlang versions" \ + "that must be tested. For example: CI_OTP = OTP-17.3.4 OTP-17.5.3" + +endif + +# Copyright (c) 2020, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifdef CONCUERROR_TESTS + +.PHONY: concuerror distclean-concuerror + +# Configuration + +CONCUERROR_LOGS_DIR ?= $(CURDIR)/logs +CONCUERROR_OPTS ?= + +# Core targets. + +check:: concuerror + +ifndef KEEP_LOGS +distclean:: distclean-concuerror +endif + +# Plugin-specific targets. + +$(ERLANG_MK_TMP)/Concuerror/bin/concuerror: | $(ERLANG_MK_TMP) + $(verbose) git clone https://github.com/parapluu/Concuerror $(ERLANG_MK_TMP)/Concuerror + $(verbose) $(MAKE) -C $(ERLANG_MK_TMP)/Concuerror + +$(CONCUERROR_LOGS_DIR): + $(verbose) mkdir -p $(CONCUERROR_LOGS_DIR) + +define concuerror_html_report + + + + +Concuerror HTML report + + +

Concuerror HTML report

+

Generated on $(concuerror_date)

+
    +$(foreach t,$(concuerror_targets),
  • $(t)
  • ) +
+ + +endef + +concuerror: $(addprefix concuerror-,$(subst :,-,$(CONCUERROR_TESTS))) + $(eval concuerror_date := $(shell date)) + $(eval concuerror_targets := $^) + $(verbose) $(call core_render,concuerror_html_report,$(CONCUERROR_LOGS_DIR)/concuerror.html) + +define concuerror_target +.PHONY: concuerror-$1-$2 + +concuerror-$1-$2: test-build | $(ERLANG_MK_TMP)/Concuerror/bin/concuerror $(CONCUERROR_LOGS_DIR) + $(ERLANG_MK_TMP)/Concuerror/bin/concuerror \ + --pa $(CURDIR)/ebin --pa $(TEST_DIR) \ + -o $(CONCUERROR_LOGS_DIR)/concuerror-$1-$2.txt \ + $$(CONCUERROR_OPTS) -m $1 -t $2 +endef + +$(foreach test,$(CONCUERROR_TESTS),$(eval $(call concuerror_target,$(firstword $(subst :, ,$(test))),$(lastword $(subst :, ,$(test)))))) + +distclean-concuerror: + $(gen_verbose) rm -rf $(CONCUERROR_LOGS_DIR) + +endif + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: ct apps-ct distclean-ct + +# Configuration. + +CT_OPTS ?= + +ifneq ($(wildcard $(TEST_DIR)),) +ifndef CT_SUITES +CT_SUITES := $(sort $(subst _SUITE.erl,,$(notdir $(call core_find,$(TEST_DIR)/,*_SUITE.erl)))) +endif +endif +CT_SUITES ?= +CT_LOGS_DIR ?= $(CURDIR)/logs + +# Core targets. + +tests:: ct + +ifndef KEEP_LOGS +distclean:: distclean-ct +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Common_test targets:" \ + " ct Run all the common_test suites for this project" \ + "" \ + "All your common_test suites have their associated targets." \ + "A suite named http_SUITE can be ran using the ct-http target." + +# Plugin-specific targets. + +CT_RUN = ct_run \ + -no_auto_compile \ + -noinput \ + -pa $(CURDIR)/ebin $(TEST_DIR) \ + -dir $(TEST_DIR) \ + -logdir $(CT_LOGS_DIR) + +ifeq ($(CT_SUITES),) +ct: $(if $(IS_APP)$(ROOT_DIR),,apps-ct) +else +# We do not run tests if we are in an apps/* with no test directory. +ifneq ($(IS_APP)$(wildcard $(TEST_DIR)),1) +ct: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-ct) + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(gen_verbose) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(CT_SUITES)) $(CT_OPTS) +endif +endif + +ifneq ($(ALL_APPS_DIRS),) +define ct_app_target +apps-ct-$1: test-build + $$(MAKE) -C $1 ct IS_APP=1 +endef + +$(foreach app,$(ALL_APPS_DIRS),$(eval $(call ct_app_target,$(app)))) + +apps-ct: $(addprefix apps-ct-,$(ALL_APPS_DIRS)) +endif + +ifdef t +ifeq (,$(findstring :,$t)) +CT_EXTRA = -group $t +else +t_words = $(subst :, ,$t) +CT_EXTRA = -group $(firstword $(t_words)) -case $(lastword $(t_words)) +endif +else +ifdef c +CT_EXTRA = -case $c +else +CT_EXTRA = +endif +endif + +define ct_suite_target +ct-$(1): test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(gen_verbose_esc) $(CT_RUN) -sname ct_$(PROJECT) -suite $(addsuffix _SUITE,$(1)) $(CT_EXTRA) $(CT_OPTS) +endef + +$(foreach test,$(CT_SUITES),$(eval $(call ct_suite_target,$(test)))) + +distclean-ct: + $(gen_verbose) rm -rf $(CT_LOGS_DIR) + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: plt distclean-plt dialyze + +# Configuration. + +DIALYZER_PLT ?= $(CURDIR)/.$(PROJECT).plt +export DIALYZER_PLT + +PLT_APPS ?= +DIALYZER_DIRS ?= --src -r $(wildcard src) $(ALL_APPS_DIRS) +DIALYZER_OPTS ?= -Werror_handling -Wrace_conditions -Wunmatched_returns # -Wunderspecs +DIALYZER_PLT_OPTS ?= + +# Core targets. + +check:: dialyze + +distclean:: distclean-plt + +help:: + $(verbose) printf "%s\n" "" \ + "Dialyzer targets:" \ + " plt Build a PLT file for this project" \ + " dialyze Analyze the project using Dialyzer" + +# Plugin-specific targets. + +define filter_opts.erl + Opts = init:get_plain_arguments(), + {Filtered, _} = lists:foldl(fun + (O, {Os, true}) -> {[O|Os], false}; + (O = "-D", {Os, _}) -> {[O|Os], true}; + (O = [\\$$-, \\$$D, _ | _], {Os, _}) -> {[O|Os], false}; + (O = "-I", {Os, _}) -> {[O|Os], true}; + (O = [\\$$-, \\$$I, _ | _], {Os, _}) -> {[O|Os], false}; + (O = "-pa", {Os, _}) -> {[O|Os], true}; + (_, Acc) -> Acc + end, {[], false}, Opts), + io:format("~s~n", [string:join(lists:reverse(Filtered), " ")]), + halt(). +endef + +# DIALYZER_PLT is a variable understood directly by Dialyzer. +# +# We append the path to erts at the end of the PLT. This works +# because the PLT file is in the external term format and the +# function binary_to_term/1 ignores any trailing data. +$(DIALYZER_PLT): deps app + $(eval DEPS_LOG := $(shell test -f $(ERLANG_MK_TMP)/deps.log && \ + while read p; do test -d $$p/ebin && echo $$p/ebin; done <$(ERLANG_MK_TMP)/deps.log)) + $(verbose) dialyzer --build_plt $(DIALYZER_PLT_OPTS) --apps \ + erts kernel stdlib $(PLT_APPS) $(OTP_DEPS) $(LOCAL_DEPS) $(DEPS_LOG) || test $$? -eq 2 + $(verbose) $(ERL) -eval 'io:format("~n~s~n", [code:lib_dir(erts)]), halt().' >> $@ + +plt: $(DIALYZER_PLT) + +distclean-plt: + $(gen_verbose) rm -f $(DIALYZER_PLT) + +ifneq ($(wildcard $(DIALYZER_PLT)),) +dialyze: $(if $(filter --src,$(DIALYZER_DIRS)),,deps app) + $(verbose) if ! tail -n1 $(DIALYZER_PLT) | \ + grep -q "^`$(ERL) -eval 'io:format("~s", [code:lib_dir(erts)]), halt().'`$$"; then \ + rm $(DIALYZER_PLT); \ + $(MAKE) plt; \ + fi +else +dialyze: $(DIALYZER_PLT) +endif + $(verbose) dialyzer --no_native `$(ERL) \ + -eval "$(subst $(newline),,$(call escape_dquotes,$(call filter_opts.erl)))" \ + -extra $(ERLC_OPTS)` $(DIALYZER_DIRS) $(DIALYZER_OPTS) $(if $(wildcard ebin/),-pa ebin/) + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-edoc edoc + +# Configuration. + +EDOC_OPTS ?= +EDOC_SRC_DIRS ?= +EDOC_OUTPUT ?= doc + +define edoc.erl + SrcPaths = lists:foldl(fun(P, Acc) -> + filelib:wildcard(atom_to_list(P) ++ "/{src,c_src}") ++ Acc + end, [], [$(call comma_list,$(patsubst %,'%',$(call core_native_path,$(EDOC_SRC_DIRS))))]), + DefaultOpts = [{dir, "$(EDOC_OUTPUT)"}, {source_path, SrcPaths}, {subpackages, false}], + edoc:application($(1), ".", [$(2)] ++ DefaultOpts), + halt(0). +endef + +# Core targets. + +ifneq ($(strip $(EDOC_SRC_DIRS)$(wildcard doc/overview.edoc)),) +docs:: edoc +endif + +distclean:: distclean-edoc + +# Plugin-specific targets. + +edoc: distclean-edoc doc-deps + $(gen_verbose) $(call erlang,$(call edoc.erl,$(PROJECT),$(EDOC_OPTS))) + +distclean-edoc: + $(gen_verbose) rm -f $(EDOC_OUTPUT)/*.css $(EDOC_OUTPUT)/*.html $(EDOC_OUTPUT)/*.png $(EDOC_OUTPUT)/edoc-info + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# Configuration. + +DTL_FULL_PATH ?= +DTL_PATH ?= templates/ +DTL_PREFIX ?= +DTL_SUFFIX ?= _dtl +DTL_OPTS ?= + +# Verbosity. + +dtl_verbose_0 = @echo " DTL " $(filter %.dtl,$(?F)); +dtl_verbose = $(dtl_verbose_$(V)) + +# Core targets. + +DTL_PATH := $(abspath $(DTL_PATH)) +DTL_FILES := $(sort $(call core_find,$(DTL_PATH),*.dtl)) + +ifneq ($(DTL_FILES),) + +DTL_NAMES = $(addprefix $(DTL_PREFIX),$(addsuffix $(DTL_SUFFIX),$(DTL_FILES:$(DTL_PATH)/%.dtl=%))) +DTL_MODULES = $(if $(DTL_FULL_PATH),$(subst /,_,$(DTL_NAMES)),$(notdir $(DTL_NAMES))) +BEAM_FILES += $(addsuffix .beam,$(addprefix ebin/,$(DTL_MODULES))) + +ifneq ($(words $(DTL_FILES)),0) +# Rebuild templates when the Makefile changes. +$(ERLANG_MK_TMP)/last-makefile-change-erlydtl: $(MAKEFILE_LIST) | $(ERLANG_MK_TMP) + $(verbose) if test -f $@; then \ + touch $(DTL_FILES); \ + fi + $(verbose) touch $@ + +ebin/$(PROJECT).app:: $(ERLANG_MK_TMP)/last-makefile-change-erlydtl +endif + +define erlydtl_compile.erl + [begin + Module0 = case "$(strip $(DTL_FULL_PATH))" of + "" -> + filename:basename(F, ".dtl"); + _ -> + "$(call core_native_path,$(DTL_PATH))/" ++ F2 = filename:rootname(F, ".dtl"), + re:replace(F2, "/", "_", [{return, list}, global]) + end, + Module = list_to_atom("$(DTL_PREFIX)" ++ string:to_lower(Module0) ++ "$(DTL_SUFFIX)"), + case erlydtl:compile(F, Module, [$(DTL_OPTS)] ++ [{out_dir, "ebin/"}, return_errors]) of + ok -> ok; + {ok, _} -> ok + end + end || F <- string:tokens("$(1)", " ")], + halt(). +endef + +ebin/$(PROJECT).app:: $(DTL_FILES) | ebin/ + $(if $(strip $?),\ + $(dtl_verbose) $(call erlang,$(call erlydtl_compile.erl,$(call core_native_path,$?)),\ + -pa ebin/)) + +endif + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2014, Dave Cottlehuber +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-escript escript escript-zip + +# Configuration. + +ESCRIPT_NAME ?= $(PROJECT) +ESCRIPT_FILE ?= $(ESCRIPT_NAME) + +ESCRIPT_SHEBANG ?= /usr/bin/env escript +ESCRIPT_COMMENT ?= This is an -*- erlang -*- file +ESCRIPT_EMU_ARGS ?= -escript main $(ESCRIPT_NAME) + +ESCRIPT_ZIP ?= 7z a -tzip -mx=9 -mtc=off $(if $(filter-out 0,$(V)),,> /dev/null) +ESCRIPT_ZIP_FILE ?= $(ERLANG_MK_TMP)/escript.zip + +# Core targets. + +distclean:: distclean-escript + +help:: + $(verbose) printf "%s\n" "" \ + "Escript targets:" \ + " escript Build an executable escript archive" \ + +# Plugin-specific targets. + +escript-zip:: FULL=1 +escript-zip:: deps app + $(verbose) mkdir -p $(dir $(ESCRIPT_ZIP)) + $(verbose) rm -f $(ESCRIPT_ZIP_FILE) + $(gen_verbose) cd .. && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) $(PROJECT)/ebin/* +ifneq ($(DEPS),) + $(verbose) cd $(DEPS_DIR) && $(ESCRIPT_ZIP) $(ESCRIPT_ZIP_FILE) \ + $(subst $(DEPS_DIR)/,,$(addsuffix /*,$(wildcard \ + $(addsuffix /ebin,$(shell cat $(ERLANG_MK_TMP)/deps.log))))) +endif + +escript:: escript-zip + $(gen_verbose) printf "%s\n" \ + "#!$(ESCRIPT_SHEBANG)" \ + "%% $(ESCRIPT_COMMENT)" \ + "%%! $(ESCRIPT_EMU_ARGS)" > $(ESCRIPT_FILE) + $(verbose) cat $(ESCRIPT_ZIP_FILE) >> $(ESCRIPT_FILE) + $(verbose) chmod +x $(ESCRIPT_FILE) + +distclean-escript: + $(gen_verbose) rm -f $(ESCRIPT_FILE) + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# Copyright (c) 2014, Enrique Fernandez +# This file is contributed to erlang.mk and subject to the terms of the ISC License. + +.PHONY: eunit apps-eunit + +# Configuration + +EUNIT_OPTS ?= +EUNIT_ERL_OPTS ?= + +# Core targets. + +tests:: eunit + +help:: + $(verbose) printf "%s\n" "" \ + "EUnit targets:" \ + " eunit Run all the EUnit tests for this project" + +# Plugin-specific targets. + +define eunit.erl + $(call cover.erl) + CoverSetup(), + case eunit:test($1, [$(EUNIT_OPTS)]) of + ok -> ok; + error -> halt(2) + end, + CoverExport("$(call core_native_path,$(COVER_DATA_DIR))/eunit.coverdata"), + halt() +endef + +EUNIT_ERL_OPTS += -pa $(TEST_DIR) $(CURDIR)/ebin + +ifdef t +ifeq (,$(findstring :,$(t))) +eunit: test-build cover-data-dir + $(gen_verbose) $(call erlang,$(call eunit.erl,['$(t)']),$(EUNIT_ERL_OPTS)) +else +eunit: test-build cover-data-dir + $(gen_verbose) $(call erlang,$(call eunit.erl,fun $(t)/0),$(EUNIT_ERL_OPTS)) +endif +else +EUNIT_EBIN_MODS = $(notdir $(basename $(ERL_FILES) $(BEAM_FILES))) +EUNIT_TEST_MODS = $(notdir $(basename $(call core_find,$(TEST_DIR)/,*.erl))) + +EUNIT_MODS = $(foreach mod,$(EUNIT_EBIN_MODS) $(filter-out \ + $(patsubst %,%_tests,$(EUNIT_EBIN_MODS)),$(EUNIT_TEST_MODS)),'$(mod)') + +eunit: test-build $(if $(IS_APP)$(ROOT_DIR),,apps-eunit) cover-data-dir +ifneq ($(wildcard src/ $(TEST_DIR)),) + $(gen_verbose) $(call erlang,$(call eunit.erl,[$(call comma_list,$(EUNIT_MODS))]),$(EUNIT_ERL_OPTS)) +endif + +ifneq ($(ALL_APPS_DIRS),) +apps-eunit: test-build + $(verbose) eunit_retcode=0 ; for app in $(ALL_APPS_DIRS); do $(MAKE) -C $$app eunit IS_APP=1; \ + [ $$? -ne 0 ] && eunit_retcode=1 ; done ; \ + exit $$eunit_retcode +endif +endif + +# Copyright (c) 2020, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# We automatically depend on hex_core when the project isn't already. +$(if $(filter hex_core,$(DEPS) $(BUILD_DEPS) $(DOC_DEPS) $(REL_DEPS) $(TEST_DEPS)),,\ + $(eval $(call dep_target,hex_core))) + +hex-core: $(DEPS_DIR)/hex_core + $(verbose) if [ ! -e $(DEPS_DIR)/hex_core/ebin/dep_built ]; then \ + $(MAKE) -C $(DEPS_DIR)/hex_core IS_DEP=1; \ + touch $(DEPS_DIR)/hex_core/ebin/dep_built; \ + fi + +# @todo This must also apply to fetching. +HEX_CONFIG ?= + +define hex_config.erl + begin + Config0 = hex_core:default_config(), + Config0$(HEX_CONFIG) + end +endef + +# @todo Something is wrong about the password I couldn't log into hex.pm. +define hex_user_create.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + case hex_api_user:create(Config, <<"$(strip $1)">>, <<"$(strip $2)">>, <<"$(strip $3)">>) of + {ok, {201, _, #{<<"email">> := Email, <<"url">> := URL, <<"username">> := Username}}} -> + io:format("User ~s (~s) created at ~s~n" + "Please check your inbox for a confirmation email.~n" + "You must confirm before you are allowed to publish packages.~n", + [Username, Email, URL]), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(80) + end +endef + +# The $(info ) call inserts a new line after the password prompt. +hex-user-create: hex-core + $(if $(HEX_USERNAME),,$(eval HEX_USERNAME := $(shell read -p "Username: " username; echo $$username))) + $(if $(HEX_PASSWORD),,$(eval HEX_PASSWORD := $(shell stty -echo; read -p "Password: " password; stty echo; echo $$password) $(info ))) + $(if $(HEX_EMAIL),,$(eval HEX_EMAIL := $(shell read -p "Email: " email; echo $$email))) + $(gen_verbose) $(call erlang,$(call hex_user_create.erl,$(HEX_USERNAME),$(HEX_PASSWORD),$(HEX_EMAIL))) + +define hex_key_add.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => iolist_to_binary([<<"Basic ">>, base64:encode(<<"$(strip $1):$(strip $2)">>)])}, + Permissions = [ + case string:split(P, <<":">>) of + [D] -> #{domain => D}; + [D, R] -> #{domain => D, resource => R} + end + || P <- string:split(<<"$(strip $4)">>, <<",">>, all)], + case hex_api_key:add(ConfigF, <<"$(strip $3)">>, Permissions) of + {ok, {201, _, #{<<"secret">> := Secret}}} -> + io:format("Key ~s created for user ~s~nSecret: ~s~n" + "Please store the secret in a secure location, such as a password store.~n" + "The secret will be requested for most Hex-related operations.~n", + [<<"$(strip $3)">>, <<"$(strip $1)">>, Secret]), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(81) + end +endef + +hex-key-add: hex-core + $(if $(HEX_USERNAME),,$(eval HEX_USERNAME := $(shell read -p "Username: " username; echo $$username))) + $(if $(HEX_PASSWORD),,$(eval HEX_PASSWORD := $(shell stty -echo; read -p "Password: " password; stty echo; echo $$password) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_key_add.erl,$(HEX_USERNAME),$(HEX_PASSWORD),\ + $(if $(name),$(name),$(shell hostname)-erlang-mk),\ + $(if $(perm),$(perm),api))) + +HEX_TARBALL_EXTRA_METADATA ?= + +# @todo Check that we can += files +HEX_TARBALL_FILES ?= \ + $(wildcard early-plugins.mk) \ + $(wildcard ebin/$(PROJECT).app) \ + $(wildcard ebin/$(PROJECT).appup) \ + $(wildcard $(notdir $(ERLANG_MK_FILENAME))) \ + $(call core_find,include/,*.hrl) \ + $(wildcard LICENSE*) \ + $(wildcard Makefile) \ + $(wildcard plugins.mk) \ + $(call core_find,priv/,*) \ + $(wildcard README*) \ + $(wildcard rebar.config) \ + $(call core_find,src/,*) + +HEX_TARBALL_OUTPUT_FILE ?= $(ERLANG_MK_TMP)/$(PROJECT).tar + +# @todo Need to check for rebar.config and/or the absence of DEPS to know +# whether a project will work with Rebar. +# +# @todo contributors licenses links in HEX_TARBALL_EXTRA_METADATA + +# In order to build the requirements metadata we look into DEPS. +# We do not require that the project use Hex dependencies, however +# Hex.pm does require that the package name and version numbers +# correspond to a real Hex package. +define hex_tarball_create.erl + Files0 = [$(call comma_list,$(patsubst %,"%",$(HEX_TARBALL_FILES)))], + Requirements0 = #{ + $(foreach d,$(DEPS), + <<"$(if $(subst hex,,$(call query_fetch_method,$d)),$d,$(if $(word 3,$(dep_$d)),$(word 3,$(dep_$d)),$d))">> => #{ + <<"app">> => <<"$d">>, + <<"optional">> => false, + <<"requirement">> => <<"$(call query_version,$d)">> + },) + $(if $(DEPS),dummy => dummy) + }, + Requirements = maps:remove(dummy, Requirements0), + Metadata0 = #{ + app => <<"$(strip $(PROJECT))">>, + build_tools => [<<"make">>, <<"rebar3">>], + description => <<"$(strip $(PROJECT_DESCRIPTION))">>, + files => [unicode:characters_to_binary(F) || F <- Files0], + name => <<"$(strip $(PROJECT))">>, + requirements => Requirements, + version => <<"$(strip $(PROJECT_VERSION))">> + }, + Metadata = Metadata0$(HEX_TARBALL_EXTRA_METADATA), + Files = [case file:read_file(F) of + {ok, Bin} -> + {F, Bin}; + {error, Reason} -> + io:format("Error trying to open file ~0p: ~0p~n", [F, Reason]), + halt(82) + end || F <- Files0], + case hex_tarball:create(Metadata, Files) of + {ok, #{tarball := Tarball}} -> + ok = file:write_file("$(strip $(HEX_TARBALL_OUTPUT_FILE))", Tarball), + halt(0); + {error, Reason} -> + io:format("Error ~0p~n", [Reason]), + halt(83) + end +endef + +hex_tar_verbose_0 = @echo " TAR $(notdir $(ERLANG_MK_TMP))/$(@F)"; +hex_tar_verbose_2 = set -x; +hex_tar_verbose = $(hex_tar_verbose_$(V)) + +$(HEX_TARBALL_OUTPUT_FILE): hex-core app + $(hex_tar_verbose) $(call erlang,$(call hex_tarball_create.erl)) + +hex-tarball-create: $(HEX_TARBALL_OUTPUT_FILE) + +define hex_release_publish_summary.erl + {ok, Tarball} = erl_tar:open("$(strip $(HEX_TARBALL_OUTPUT_FILE))", [read]), + ok = erl_tar:extract(Tarball, [{cwd, "$(ERLANG_MK_TMP)"}, {files, ["metadata.config"]}]), + {ok, Metadata} = file:consult("$(ERLANG_MK_TMP)/metadata.config"), + #{ + <<"name">> := Name, + <<"version">> := Version, + <<"files">> := Files, + <<"requirements">> := Deps + } = maps:from_list(Metadata), + io:format("Publishing ~s ~s~n Dependencies:~n", [Name, Version]), + case Deps of + [] -> + io:format(" (none)~n"); + _ -> + [begin + #{<<"app">> := DA, <<"requirement">> := DR} = maps:from_list(D), + io:format(" ~s ~s~n", [DA, DR]) + end || {_, D} <- Deps] + end, + io:format(" Included files:~n"), + [io:format(" ~s~n", [F]) || F <- Files], + io:format("You may also review the contents of the tarball file.~n" + "Please enter your secret key to proceed.~n"), + halt(0) +endef + +define hex_release_publish.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + {ok, Tarball} = file:read_file("$(strip $(HEX_TARBALL_OUTPUT_FILE))"), + case hex_api_release:publish(ConfigF, Tarball, [{replace, $2}]) of + {ok, {200, _, #{}}} -> + io:format("Release replaced~n"), + halt(0); + {ok, {201, _, #{}}} -> + io:format("Release published~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(84) + end +endef + +hex-release-tarball: hex-core $(HEX_TARBALL_OUTPUT_FILE) + $(verbose) $(call erlang,$(call hex_release_publish_summary.erl)) + +hex-release-publish: hex-core hex-release-tarball + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_publish.erl,$(HEX_SECRET),false)) + +hex-release-replace: hex-core hex-release-tarball + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_publish.erl,$(HEX_SECRET),true)) + +define hex_release_delete.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + case hex_api_release:delete(ConfigF, <<"$(strip $(PROJECT))">>, <<"$(strip $(PROJECT_VERSION))">>) of + {ok, {204, _, _}} -> + io:format("Release $(strip $(PROJECT_VERSION)) deleted~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(85) + end +endef + +hex-release-delete: hex-core + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_delete.erl,$(HEX_SECRET))) + +define hex_release_retire.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + Params = #{<<"reason">> => <<"$(strip $3)">>, <<"message">> => <<"$(strip $4)">>}, + case hex_api_release:retire(ConfigF, <<"$(strip $(PROJECT))">>, <<"$(strip $2)">>, Params) of + {ok, {204, _, _}} -> + io:format("Release $(strip $2) has been retired~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(86) + end +endef + +hex-release-retire: hex-core + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_retire.erl,$(HEX_SECRET),\ + $(if $(HEX_VERSION),$(HEX_VERSION),$(PROJECT_VERSION)),\ + $(if $(HEX_REASON),$(HEX_REASON),invalid),\ + $(HEX_MESSAGE))) + +define hex_release_unretire.erl + {ok, _} = application:ensure_all_started(ssl), + {ok, _} = application:ensure_all_started(inets), + Config = $(hex_config.erl), + ConfigF = Config#{api_key => <<"$(strip $1)">>}, + case hex_api_release:unretire(ConfigF, <<"$(strip $(PROJECT))">>, <<"$(strip $2)">>) of + {ok, {204, _, _}} -> + io:format("Release $(strip $2) is not retired anymore~n"), + halt(0); + {ok, {Status, _, Errors}} -> + io:format("Error ~b: ~0p~n", [Status, Errors]), + halt(87) + end +endef + +hex-release-unretire: hex-core + $(if $(HEX_SECRET),,$(eval HEX_SECRET := $(shell stty -echo; read -p "Secret: " secret; stty echo; echo $$secret) $(info ))) + $(gen_verbose) $(call erlang,$(call hex_release_unretire.erl,$(HEX_SECRET),\ + $(if $(HEX_VERSION),$(HEX_VERSION),$(PROJECT_VERSION)))) + +# Copyright (c) 2015-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifeq ($(filter proper,$(DEPS) $(TEST_DEPS)),proper) +.PHONY: proper + +# Targets. + +tests:: proper + +define proper_check.erl + $(call cover.erl) + code:add_pathsa([ + "$(call core_native_path,$(CURDIR)/ebin)", + "$(call core_native_path,$(DEPS_DIR)/*/ebin)", + "$(call core_native_path,$(TEST_DIR))"]), + Module = fun(M) -> + [true] =:= lists:usort([ + case atom_to_list(F) of + "prop_" ++ _ -> + io:format("Testing ~p:~p/0~n", [M, F]), + proper:quickcheck(M:F(), nocolors); + _ -> + true + end + || {F, 0} <- M:module_info(exports)]) + end, + try begin + CoverSetup(), + Res = case $(1) of + all -> [true] =:= lists:usort([Module(M) || M <- [$(call comma_list,$(3))]]); + module -> Module($(2)); + function -> proper:quickcheck($(2), nocolors) + end, + CoverExport("$(COVER_DATA_DIR)/proper.coverdata"), + Res + end of + true -> halt(0); + _ -> halt(1) + catch error:undef -> + io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]), + halt(0) + end. +endef + +ifdef t +ifeq (,$(findstring :,$(t))) +proper: test-build cover-data-dir + $(verbose) $(call erlang,$(call proper_check.erl,module,$(t))) +else +proper: test-build cover-data-dir + $(verbose) echo Testing $(t)/0 + $(verbose) $(call erlang,$(call proper_check.erl,function,$(t)())) +endif +else +proper: test-build cover-data-dir + $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \ + $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam)))))) + $(gen_verbose) $(call erlang,$(call proper_check.erl,all,undefined,$(MODULES))) +endif +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# Verbosity. + +proto_verbose_0 = @echo " PROTO " $(filter %.proto,$(?F)); +proto_verbose = $(proto_verbose_$(V)) + +# Core targets. + +ifneq ($(wildcard src/),) +ifneq ($(filter gpb protobuffs,$(BUILD_DEPS) $(DEPS)),) +PROTO_FILES := $(filter %.proto,$(ALL_SRC_FILES)) +ERL_FILES += $(addprefix src/,$(patsubst %.proto,%_pb.erl,$(notdir $(PROTO_FILES)))) + +ifeq ($(PROTO_FILES),) +$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: + $(verbose) : +else +# Rebuild proto files when the Makefile changes. +# We exclude $(PROJECT).d to avoid a circular dependency. +$(ERLANG_MK_TMP)/last-makefile-change-protobuffs: $(filter-out $(PROJECT).d,$(MAKEFILE_LIST)) | $(ERLANG_MK_TMP) + $(verbose) if test -f $@; then \ + touch $(PROTO_FILES); \ + fi + $(verbose) touch $@ + +$(PROJECT).d:: $(ERLANG_MK_TMP)/last-makefile-change-protobuffs +endif + +ifeq ($(filter gpb,$(BUILD_DEPS) $(DEPS)),) +define compile_proto.erl + [begin + protobuffs_compile:generate_source(F, [ + {output_include_dir, "./include"}, + {output_src_dir, "./src"}]) + end || F <- string:tokens("$1", " ")], + halt(). +endef +else +define compile_proto.erl + [begin + gpb_compile:file(F, [ + {include_as_lib, true}, + {module_name_suffix, "_pb"}, + {o_hrl, "./include"}, + {o_erl, "./src"}]) + end || F <- string:tokens("$1", " ")], + halt(). +endef +endif + +ifneq ($(PROTO_FILES),) +$(PROJECT).d:: $(PROTO_FILES) + $(verbose) mkdir -p ebin/ include/ + $(if $(strip $?),$(proto_verbose) $(call erlang,$(call compile_proto.erl,$?))) +endif +endif +endif + +# Copyright (c) 2013-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: relx-rel relx-relup distclean-relx-rel run + +# Configuration. + +RELX ?= $(ERLANG_MK_TMP)/relx +RELX_CONFIG ?= $(CURDIR)/relx.config + +RELX_URL ?= https://erlang.mk/res/relx-v3.27.0 +RELX_OPTS ?= +RELX_OUTPUT_DIR ?= _rel +RELX_REL_EXT ?= +RELX_TAR ?= 1 + +ifdef SFX + RELX_TAR = 1 +endif + +ifeq ($(firstword $(RELX_OPTS)),-o) + RELX_OUTPUT_DIR = $(word 2,$(RELX_OPTS)) +else + RELX_OPTS += -o $(RELX_OUTPUT_DIR) +endif + +# Core targets. + +ifeq ($(IS_DEP),) +ifneq ($(wildcard $(RELX_CONFIG)),) +rel:: relx-rel + +relup:: relx-relup +endif +endif + +distclean:: distclean-relx-rel + +# Plugin-specific targets. + +$(RELX): | $(ERLANG_MK_TMP) + $(gen_verbose) $(call core_http_get,$(RELX),$(RELX_URL)) + $(verbose) chmod +x $(RELX) + +relx-rel: $(RELX) rel-deps app + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release + $(verbose) $(MAKE) relx-post-rel +ifeq ($(RELX_TAR),1) + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) tar +endif + +relx-relup: $(RELX) rel-deps app + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) release + $(MAKE) relx-post-rel + $(verbose) $(RELX) $(if $(filter 1,$V),-V 3) -c $(RELX_CONFIG) $(RELX_OPTS) relup $(if $(filter 1,$(RELX_TAR)),tar) + +distclean-relx-rel: + $(gen_verbose) rm -rf $(RELX_OUTPUT_DIR) + +# Default hooks. +relx-post-rel:: + $(verbose) : + +# Run target. + +ifeq ($(wildcard $(RELX_CONFIG)),) +run:: +else + +define get_relx_release.erl + {ok, Config} = file:consult("$(call core_native_path,$(RELX_CONFIG))"), + {release, {Name, Vsn0}, _} = lists:keyfind(release, 1, Config), + Vsn = case Vsn0 of + {cmd, Cmd} -> os:cmd(Cmd); + semver -> ""; + {semver, _} -> ""; + VsnStr -> Vsn0 + end, + Extended = case lists:keyfind(extended_start_script, 1, Config) of + {_, true} -> "1"; + _ -> "" + end, + io:format("~s ~s ~s", [Name, Vsn, Extended]), + halt(0). +endef + +RELX_REL := $(shell $(call erlang,$(get_relx_release.erl))) +RELX_REL_NAME := $(word 1,$(RELX_REL)) +RELX_REL_VSN := $(word 2,$(RELX_REL)) +RELX_REL_CMD := $(if $(word 3,$(RELX_REL)),console) + +ifeq ($(PLATFORM),msys2) +RELX_REL_EXT := .cmd +endif + +run:: all + $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) $(RELX_REL_CMD) + +ifdef RELOAD +rel:: + $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) ping + $(verbose) $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/bin/$(RELX_REL_NAME)$(RELX_REL_EXT) \ + eval "io:format(\"~p~n\", [c:lm()])" +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Relx targets:" \ + " run Compile the project, build the release and run it" + +endif + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# Copyright (c) 2014, M Robert Martin +# This file is contributed to erlang.mk and subject to the terms of the ISC License. + +.PHONY: shell + +# Configuration. + +SHELL_ERL ?= erl +SHELL_PATHS ?= $(CURDIR)/ebin $(TEST_DIR) +SHELL_OPTS ?= + +ALL_SHELL_DEPS_DIRS = $(addprefix $(DEPS_DIR)/,$(SHELL_DEPS)) + +# Core targets + +help:: + $(verbose) printf "%s\n" "" \ + "Shell targets:" \ + " shell Run an erlang shell with SHELL_OPTS or reasonable default" + +# Plugin-specific targets. + +$(foreach dep,$(SHELL_DEPS),$(eval $(call dep_target,$(dep)))) + +ifneq ($(SKIP_DEPS),) +build-shell-deps: +else +build-shell-deps: $(ALL_SHELL_DEPS_DIRS) + $(verbose) set -e; for dep in $(ALL_SHELL_DEPS_DIRS) ; do \ + if [ -z "$(strip $(FULL))" ] && [ ! -L $$dep ] && [ -f $$dep/ebin/dep_built ]; then \ + :; \ + else \ + $(MAKE) -C $$dep IS_DEP=1; \ + if [ ! -L $$dep ] && [ -d $$dep/ebin ]; then touch $$dep/ebin/dep_built; fi; \ + fi \ + done +endif + +shell:: build-shell-deps + $(gen_verbose) $(SHELL_ERL) -pa $(SHELL_PATHS) $(SHELL_OPTS) + +# Copyright 2017, Stanislaw Klekot +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: distclean-sphinx sphinx + +# Configuration. + +SPHINX_BUILD ?= sphinx-build +SPHINX_SOURCE ?= doc +SPHINX_CONFDIR ?= +SPHINX_FORMATS ?= html +SPHINX_DOCTREES ?= $(ERLANG_MK_TMP)/sphinx.doctrees +SPHINX_OPTS ?= + +#sphinx_html_opts = +#sphinx_html_output = html +#sphinx_man_opts = +#sphinx_man_output = man +#sphinx_latex_opts = +#sphinx_latex_output = latex + +# Helpers. + +sphinx_build_0 = @echo " SPHINX" $1; $(SPHINX_BUILD) -N -q +sphinx_build_1 = $(SPHINX_BUILD) -N +sphinx_build_2 = set -x; $(SPHINX_BUILD) +sphinx_build = $(sphinx_build_$(V)) + +define sphinx.build +$(call sphinx_build,$1) -b $1 -d $(SPHINX_DOCTREES) $(if $(SPHINX_CONFDIR),-c $(SPHINX_CONFDIR)) $(SPHINX_OPTS) $(sphinx_$1_opts) -- $(SPHINX_SOURCE) $(call sphinx.output,$1) + +endef + +define sphinx.output +$(if $(sphinx_$1_output),$(sphinx_$1_output),$1) +endef + +# Targets. + +ifneq ($(wildcard $(if $(SPHINX_CONFDIR),$(SPHINX_CONFDIR),$(SPHINX_SOURCE))/conf.py),) +docs:: sphinx +distclean:: distclean-sphinx +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Sphinx targets:" \ + " sphinx Generate Sphinx documentation." \ + "" \ + "ReST sources and 'conf.py' file are expected in directory pointed by" \ + "SPHINX_SOURCE ('doc' by default). SPHINX_FORMATS lists formats to build (only" \ + "'html' format is generated by default); target directory can be specified by" \ + 'setting sphinx_$${format}_output, for example: sphinx_html_output = output/html' \ + "Additional Sphinx options can be set in SPHINX_OPTS." + +# Plugin-specific targets. + +sphinx: + $(foreach F,$(SPHINX_FORMATS),$(call sphinx.build,$F)) + +distclean-sphinx: + $(gen_verbose) rm -rf $(filter-out $(SPHINX_SOURCE),$(foreach F,$(SPHINX_FORMATS),$(call sphinx.output,$F))) + +# Copyright (c) 2017, Jean-Sรฉbastien Pรฉdron +# This file is contributed to erlang.mk and subject to the terms of the ISC License. + +.PHONY: show-ERL_LIBS show-ERLC_OPTS show-TEST_ERLC_OPTS + +show-ERL_LIBS: + @echo $(ERL_LIBS) + +show-ERLC_OPTS: + @$(foreach opt,$(ERLC_OPTS) -pa ebin -I include,echo "$(opt)";) + +show-TEST_ERLC_OPTS: + @$(foreach opt,$(TEST_ERLC_OPTS) -pa ebin -I include,echo "$(opt)";) + +# Copyright (c) 2015-2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +ifeq ($(filter triq,$(DEPS) $(TEST_DEPS)),triq) +.PHONY: triq + +# Targets. + +tests:: triq + +define triq_check.erl + $(call cover.erl) + code:add_pathsa([ + "$(call core_native_path,$(CURDIR)/ebin)", + "$(call core_native_path,$(DEPS_DIR)/*/ebin)", + "$(call core_native_path,$(TEST_DIR))"]), + try begin + CoverSetup(), + Res = case $(1) of + all -> [true] =:= lists:usort([triq:check(M) || M <- [$(call comma_list,$(3))]]); + module -> triq:check($(2)); + function -> triq:check($(2)) + end, + CoverExport("$(COVER_DATA_DIR)/triq.coverdata"), + Res + end of + true -> halt(0); + _ -> halt(1) + catch error:undef -> + io:format("Undefined property or module?~n~p~n", [erlang:get_stacktrace()]), + halt(0) + end. +endef + +ifdef t +ifeq (,$(findstring :,$(t))) +triq: test-build cover-data-dir + $(verbose) $(call erlang,$(call triq_check.erl,module,$(t))) +else +triq: test-build cover-data-dir + $(verbose) echo Testing $(t)/0 + $(verbose) $(call erlang,$(call triq_check.erl,function,$(t)())) +endif +else +triq: test-build cover-data-dir + $(eval MODULES := $(patsubst %,'%',$(sort $(notdir $(basename \ + $(wildcard ebin/*.beam) $(call core_find,$(TEST_DIR)/,*.beam)))))) + $(gen_verbose) $(call erlang,$(call triq_check.erl,all,undefined,$(MODULES))) +endif +endif + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2015, Erlang Solutions Ltd. +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: xref distclean-xref + +# Configuration. + +ifeq ($(XREF_CONFIG),) + XREFR_ARGS := +else + XREFR_ARGS := -c $(XREF_CONFIG) +endif + +XREFR ?= $(CURDIR)/xrefr +export XREFR + +XREFR_URL ?= https://github.com/inaka/xref_runner/releases/download/1.1.0/xrefr + +# Core targets. + +help:: + $(verbose) printf '%s\n' '' \ + 'Xref targets:' \ + ' xref Run Xrefr using $$XREF_CONFIG as config file if defined' + +distclean:: distclean-xref + +# Plugin-specific targets. + +$(XREFR): + $(gen_verbose) $(call core_http_get,$(XREFR),$(XREFR_URL)) + $(verbose) chmod +x $(XREFR) + +xref: deps app $(XREFR) + $(gen_verbose) $(XREFR) $(XREFR_ARGS) + +distclean-xref: + $(gen_verbose) rm -rf $(XREFR) + +# Copyright (c) 2016, Loรฏc Hoguin +# Copyright (c) 2015, Viktor Sรถderqvist +# This file is part of erlang.mk and subject to the terms of the ISC License. + +COVER_REPORT_DIR ?= cover +COVER_DATA_DIR ?= $(COVER_REPORT_DIR) + +ifdef COVER +COVER_APPS ?= $(notdir $(ALL_APPS_DIRS)) +COVER_DEPS ?= +endif + +# Code coverage for Common Test. + +ifdef COVER +ifdef CT_RUN +ifneq ($(wildcard $(TEST_DIR)),) +test-build:: $(TEST_DIR)/ct.cover.spec + +$(TEST_DIR)/ct.cover.spec: cover-data-dir + $(gen_verbose) printf "%s\n" \ + "{incl_app, '$(PROJECT)', details}." \ + "{incl_dirs, '$(PROJECT)', [\"$(call core_native_path,$(CURDIR)/ebin)\" \ + $(foreach a,$(COVER_APPS),$(comma) \"$(call core_native_path,$(APPS_DIR)/$a/ebin)\") \ + $(foreach d,$(COVER_DEPS),$(comma) \"$(call core_native_path,$(DEPS_DIR)/$d/ebin)\")]}." \ + '{export,"$(call core_native_path,$(abspath $(COVER_DATA_DIR))/ct.coverdata)"}.' > $@ + +CT_RUN += -cover $(TEST_DIR)/ct.cover.spec +endif +endif +endif + +# Code coverage for other tools. + +ifdef COVER +define cover.erl + CoverSetup = fun() -> + Dirs = ["$(call core_native_path,$(CURDIR)/ebin)" + $(foreach a,$(COVER_APPS),$(comma) "$(call core_native_path,$(APPS_DIR)/$a/ebin)") + $(foreach d,$(COVER_DEPS),$(comma) "$(call core_native_path,$(DEPS_DIR)/$d/ebin)")], + [begin + case filelib:is_dir(Dir) of + false -> false; + true -> + case cover:compile_beam_directory(Dir) of + {error, _} -> halt(1); + _ -> true + end + end + end || Dir <- Dirs] + end, + CoverExport = fun(Filename) -> cover:export(Filename) end, +endef +else +define cover.erl + CoverSetup = fun() -> ok end, + CoverExport = fun(_) -> ok end, +endef +endif + +# Core targets + +ifdef COVER +ifneq ($(COVER_REPORT_DIR),) +tests:: + $(verbose) $(MAKE) --no-print-directory cover-report +endif + +cover-data-dir: | $(COVER_DATA_DIR) + +$(COVER_DATA_DIR): + $(verbose) mkdir -p $(COVER_DATA_DIR) +else +cover-data-dir: +endif + +clean:: coverdata-clean + +ifneq ($(COVER_REPORT_DIR),) +distclean:: cover-report-clean +endif + +help:: + $(verbose) printf "%s\n" "" \ + "Cover targets:" \ + " cover-report Generate a HTML coverage report from previously collected" \ + " cover data." \ + " all.coverdata Merge all coverdata files into all.coverdata." \ + "" \ + "If COVER=1 is set, coverage data is generated by the targets eunit and ct. The" \ + "target tests additionally generates a HTML coverage report from the combined" \ + "coverdata files from each of these testing tools. HTML reports can be disabled" \ + "by setting COVER_REPORT_DIR to empty." + +# Plugin specific targets + +COVERDATA = $(filter-out $(COVER_DATA_DIR)/all.coverdata,$(wildcard $(COVER_DATA_DIR)/*.coverdata)) + +.PHONY: coverdata-clean +coverdata-clean: + $(gen_verbose) rm -f $(COVER_DATA_DIR)/*.coverdata $(TEST_DIR)/ct.cover.spec + +# Merge all coverdata files into one. +define cover_export.erl + $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) + cover:export("$(COVER_DATA_DIR)/$@"), halt(0). +endef + +all.coverdata: $(COVERDATA) cover-data-dir + $(gen_verbose) $(call erlang,$(cover_export.erl)) + +# These are only defined if COVER_REPORT_DIR is non-empty. Set COVER_REPORT_DIR to +# empty if you want the coverdata files but not the HTML report. +ifneq ($(COVER_REPORT_DIR),) + +.PHONY: cover-report-clean cover-report + +cover-report-clean: + $(gen_verbose) rm -rf $(COVER_REPORT_DIR) +ifneq ($(COVER_REPORT_DIR),$(COVER_DATA_DIR)) + $(if $(shell ls -A $(COVER_DATA_DIR)/),,$(verbose) rmdir $(COVER_DATA_DIR)) +endif + +ifeq ($(COVERDATA),) +cover-report: +else + +# Modules which include eunit.hrl always contain one line without coverage +# because eunit defines test/0 which is never called. We compensate for this. +EUNIT_HRL_MODS = $(subst $(space),$(comma),$(shell \ + grep -H -e '^\s*-include.*include/eunit\.hrl"' src/*.erl \ + | sed "s/^src\/\(.*\)\.erl:.*/'\1'/" | uniq)) + +define cover_report.erl + $(foreach f,$(COVERDATA),cover:import("$(f)") == ok orelse halt(1),) + Ms = cover:imported_modules(), + [cover:analyse_to_file(M, "$(COVER_REPORT_DIR)/" ++ atom_to_list(M) + ++ ".COVER.html", [html]) || M <- Ms], + Report = [begin {ok, R} = cover:analyse(M, module), R end || M <- Ms], + EunitHrlMods = [$(EUNIT_HRL_MODS)], + Report1 = [{M, {Y, case lists:member(M, EunitHrlMods) of + true -> N - 1; false -> N end}} || {M, {Y, N}} <- Report], + TotalY = lists:sum([Y || {_, {Y, _}} <- Report1]), + TotalN = lists:sum([N || {_, {_, N}} <- Report1]), + Perc = fun(Y, N) -> case Y + N of 0 -> 100; S -> round(100 * Y / S) end end, + TotalPerc = Perc(TotalY, TotalN), + {ok, F} = file:open("$(COVER_REPORT_DIR)/index.html", [write]), + io:format(F, "~n" + "~n" + "Coverage report~n" + "~n", []), + io:format(F, "

Coverage

~n

Total: ~p%

~n", [TotalPerc]), + io:format(F, "~n", []), + [io:format(F, "" + "~n", + [M, M, Perc(Y, N)]) || {M, {Y, N}} <- Report1], + How = "$(subst $(space),$(comma)$(space),$(basename $(COVERDATA)))", + Date = "$(shell date -u "+%Y-%m-%dT%H:%M:%SZ")", + io:format(F, "
ModuleCoverage
~p~p%
~n" + "

Generated using ~s and erlang.mk on ~s.

~n" + "", [How, Date]), + halt(). +endef + +cover-report: + $(verbose) mkdir -p $(COVER_REPORT_DIR) + $(gen_verbose) $(call erlang,$(cover_report.erl)) + +endif +endif # ifneq ($(COVER_REPORT_DIR),) + +# Copyright (c) 2016, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +.PHONY: sfx + +ifdef RELX_REL +ifdef SFX + +# Configuration. + +SFX_ARCHIVE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME)/$(RELX_REL_NAME)-$(RELX_REL_VSN).tar.gz +SFX_OUTPUT_FILE ?= $(RELX_OUTPUT_DIR)/$(RELX_REL_NAME).run + +# Core targets. + +rel:: sfx + +# Plugin-specific targets. + +define sfx_stub +#!/bin/sh + +TMPDIR=`mktemp -d` +ARCHIVE=`awk '/^__ARCHIVE_BELOW__$$/ {print NR + 1; exit 0;}' $$0` +FILENAME=$$(basename $$0) +REL=$${FILENAME%.*} + +tail -n+$$ARCHIVE $$0 | tar -xzf - -C $$TMPDIR + +$$TMPDIR/bin/$$REL console +RET=$$? + +rm -rf $$TMPDIR + +exit $$RET + +__ARCHIVE_BELOW__ +endef + +sfx: + $(verbose) $(call core_render,sfx_stub,$(SFX_OUTPUT_FILE)) + $(gen_verbose) cat $(SFX_ARCHIVE) >> $(SFX_OUTPUT_FILE) + $(verbose) chmod +x $(SFX_OUTPUT_FILE) + +endif +endif + +# Copyright (c) 2013-2017, Loรฏc Hoguin +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# External plugins. + +DEP_PLUGINS ?= + +$(foreach p,$(DEP_PLUGINS),\ + $(eval $(if $(findstring /,$p),\ + $(call core_dep_plugin,$p,$(firstword $(subst /, ,$p))),\ + $(call core_dep_plugin,$p/plugins.mk,$p)))) + +help:: help-plugins + +help-plugins:: + $(verbose) : + +# Copyright (c) 2013-2015, Loรฏc Hoguin +# Copyright (c) 2015-2016, Jean-Sรฉbastien Pรฉdron +# This file is part of erlang.mk and subject to the terms of the ISC License. + +# Fetch dependencies recursively (without building them). + +.PHONY: fetch-deps fetch-doc-deps fetch-rel-deps fetch-test-deps \ + fetch-shell-deps + +.PHONY: $(ERLANG_MK_RECURSIVE_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \ + $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST) + +fetch-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST) +fetch-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) +fetch-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) +fetch-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) +fetch-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST) + +ifneq ($(SKIP_DEPS),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): + $(verbose) :> $@ +else +# By default, we fetch "normal" dependencies. They are also included no +# matter the type of requested dependencies. +# +# $(ALL_DEPS_DIRS) includes $(BUILD_DEPS). + +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_DOC_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_REL_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_TEST_DEPS_DIRS) +$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): $(LOCAL_DEPS_DIRS) $(ALL_DEPS_DIRS) $(ALL_SHELL_DEPS_DIRS) + +# Allow to use fetch-deps and $(DEP_TYPES) to fetch multiple types of +# dependencies with a single target. +ifneq ($(filter doc,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_DOC_DEPS_DIRS) +endif +ifneq ($(filter rel,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_REL_DEPS_DIRS) +endif +ifneq ($(filter test,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_TEST_DEPS_DIRS) +endif +ifneq ($(filter shell,$(DEP_TYPES)),) +$(ERLANG_MK_RECURSIVE_DEPS_LIST): $(ALL_SHELL_DEPS_DIRS) +endif + +ERLANG_MK_RECURSIVE_TMP_LIST := $(abspath $(ERLANG_MK_TMP)/recursive-tmp-deps-$(shell echo $$PPID).log) + +$(ERLANG_MK_RECURSIVE_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) \ +$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST): | $(ERLANG_MK_TMP) +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST) +endif + $(verbose) touch $(ERLANG_MK_RECURSIVE_TMP_LIST) + $(verbose) set -e; for dep in $^ ; do \ + if ! grep -qs ^$$dep$$ $(ERLANG_MK_RECURSIVE_TMP_LIST); then \ + echo $$dep >> $(ERLANG_MK_RECURSIVE_TMP_LIST); \ + if grep -qs -E "^[[:blank:]]*include[[:blank:]]+(erlang\.mk|.*/erlang\.mk|.*ERLANG_MK_FILENAME.*)$$" \ + $$dep/GNUmakefile $$dep/makefile $$dep/Makefile; then \ + $(MAKE) -C $$dep fetch-deps \ + IS_DEP=1 \ + ERLANG_MK_RECURSIVE_TMP_LIST=$(ERLANG_MK_RECURSIVE_TMP_LIST); \ + fi \ + fi \ + done +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) sort < $(ERLANG_MK_RECURSIVE_TMP_LIST) | \ + uniq > $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted + $(verbose) cmp -s $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ \ + || mv $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted $@ + $(verbose) rm -f $(ERLANG_MK_RECURSIVE_TMP_LIST).sorted + $(verbose) rm $(ERLANG_MK_RECURSIVE_TMP_LIST) +endif +endif # ifneq ($(SKIP_DEPS),) + +# List dependencies recursively. + +.PHONY: list-deps list-doc-deps list-rel-deps list-test-deps \ + list-shell-deps + +list-deps: $(ERLANG_MK_RECURSIVE_DEPS_LIST) +list-doc-deps: $(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST) +list-rel-deps: $(ERLANG_MK_RECURSIVE_REL_DEPS_LIST) +list-test-deps: $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) +list-shell-deps: $(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST) + +list-deps list-doc-deps list-rel-deps list-test-deps list-shell-deps: + $(verbose) cat $^ + +# Query dependencies recursively. + +.PHONY: query-deps query-doc-deps query-rel-deps query-test-deps \ + query-shell-deps + +QUERY ?= name fetch_method repo version + +define query_target +$(1): $(2) clean-tmp-query.log +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) rm -f $(4) +endif + $(verbose) $(foreach dep,$(3),\ + echo $(PROJECT): $(foreach q,$(QUERY),$(call query_$(q),$(dep))) >> $(4) ;) + $(if $(filter-out query-deps,$(1)),,\ + $(verbose) set -e; for dep in $(3) ; do \ + if grep -qs ^$$$$dep$$$$ $(ERLANG_MK_TMP)/query.log; then \ + :; \ + else \ + echo $$$$dep >> $(ERLANG_MK_TMP)/query.log; \ + $(MAKE) -C $(DEPS_DIR)/$$$$dep $$@ QUERY="$(QUERY)" IS_DEP=1 || true; \ + fi \ + done) +ifeq ($(IS_APP)$(IS_DEP),) + $(verbose) touch $(4) + $(verbose) cat $(4) +endif +endef + +clean-tmp-query.log: +ifeq ($(IS_DEP),) + $(verbose) rm -f $(ERLANG_MK_TMP)/query.log +endif + +$(eval $(call query_target,query-deps,$(ERLANG_MK_RECURSIVE_DEPS_LIST),$(BUILD_DEPS) $(DEPS),$(ERLANG_MK_QUERY_DEPS_FILE))) +$(eval $(call query_target,query-doc-deps,$(ERLANG_MK_RECURSIVE_DOC_DEPS_LIST),$(DOC_DEPS),$(ERLANG_MK_QUERY_DOC_DEPS_FILE))) +$(eval $(call query_target,query-rel-deps,$(ERLANG_MK_RECURSIVE_REL_DEPS_LIST),$(REL_DEPS),$(ERLANG_MK_QUERY_REL_DEPS_FILE))) +$(eval $(call query_target,query-test-deps,$(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST),$(TEST_DEPS),$(ERLANG_MK_QUERY_TEST_DEPS_FILE))) +$(eval $(call query_target,query-shell-deps,$(ERLANG_MK_RECURSIVE_SHELL_DEPS_LIST),$(SHELL_DEPS),$(ERLANG_MK_QUERY_SHELL_DEPS_FILE))) diff --git a/deps/cowlib/hex_metadata.config b/deps/cowlib/hex_metadata.config new file mode 100644 index 0000000..fd3ae1d --- /dev/null +++ b/deps/cowlib/hex_metadata.config @@ -0,0 +1,25 @@ +{<<"app">>,<<"cowlib">>}. +{<<"build_tools">>,[<<"make">>,<<"rebar3">>]}. +{<<"description">>,<<"Support library for manipulating Web protocols.">>}. +{<<"files">>, + [<<"ebin/cowlib.app">>,<<"erlang.mk">>,<<"include/cow_inline.hrl">>, + <<"include/cow_parse.hrl">>,<<"LICENSE">>,<<"Makefile">>, + <<"README.asciidoc">>,<<"src/cow_base64url.erl">>,<<"src/cow_date.erl">>, + <<"src/cow_http2.erl">>,<<"src/cow_multipart.erl">>,<<"src/cow_qs.erl">>, + <<"src/cow_spdy.erl">>,<<"src/cow_spdy.hrl">>,<<"src/cow_sse.erl">>, + <<"src/cow_uri.erl">>,<<"src/cow_iolists.erl">>,<<"src/cow_mimetypes.erl">>, + <<"src/cow_mimetypes.erl.src">>,<<"src/cow_ws.erl">>, + <<"src/cow_http_te.erl">>,<<"src/cow_http.erl">>, + <<"src/cow_uri_template.erl">>,<<"src/cow_hpack_dec_huffman_lookup.hrl">>, + <<"src/cow_http_hd.erl">>,<<"src/cow_http_struct_hd.erl">>, + <<"src/cow_link.erl">>,<<"src/cow_http2_machine.erl">>, + <<"src/cow_cookie.erl">>,<<"src/cow_hpack.erl">>]}. +{<<"licenses">>,[<<"ISC">>]}. +{<<"links">>, + [{<<"Function reference">>, + <<"https://ninenines.eu/docs/en/cowlib/2.11/manual/">>}, + {<<"GitHub">>,<<"https://github.com/ninenines/cowlib">>}, + {<<"Sponsor">>,<<"https://github.com/sponsors/essen">>}]}. +{<<"name">>,<<"cowlib">>}. +{<<"requirements">>,[]}. +{<<"version">>,<<"2.11.0">>}. diff --git a/deps/cowlib/include/cow_inline.hrl b/deps/cowlib/include/cow_inline.hrl new file mode 100644 index 0000000..f0d12eb --- /dev/null +++ b/deps/cowlib/include/cow_inline.hrl @@ -0,0 +1,447 @@ +%% Copyright (c) 2014-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-ifndef(COW_INLINE_HRL). +-define(COW_INLINE_HRL, 1). + +%% LC(Character) + +-define(LC(C), case C of + $A -> $a; + $B -> $b; + $C -> $c; + $D -> $d; + $E -> $e; + $F -> $f; + $G -> $g; + $H -> $h; + $I -> $i; + $J -> $j; + $K -> $k; + $L -> $l; + $M -> $m; + $N -> $n; + $O -> $o; + $P -> $p; + $Q -> $q; + $R -> $r; + $S -> $s; + $T -> $t; + $U -> $u; + $V -> $v; + $W -> $w; + $X -> $x; + $Y -> $y; + $Z -> $z; + _ -> C +end). + +%% LOWER(Bin) +%% +%% Lowercase the entire binary string in a binary comprehension. + +-define(LOWER(Bin), << << ?LC(C) >> || << C >> <= Bin >>). + +%% LOWERCASE(Function, Rest, Acc, ...) +%% +%% To be included at the end of a case block. +%% Defined for up to 10 extra arguments. + +-define(LOWER(Function, Rest, Acc), case C of + $A -> Function(Rest, << Acc/binary, $a >>); + $B -> Function(Rest, << Acc/binary, $b >>); + $C -> Function(Rest, << Acc/binary, $c >>); + $D -> Function(Rest, << Acc/binary, $d >>); + $E -> Function(Rest, << Acc/binary, $e >>); + $F -> Function(Rest, << Acc/binary, $f >>); + $G -> Function(Rest, << Acc/binary, $g >>); + $H -> Function(Rest, << Acc/binary, $h >>); + $I -> Function(Rest, << Acc/binary, $i >>); + $J -> Function(Rest, << Acc/binary, $j >>); + $K -> Function(Rest, << Acc/binary, $k >>); + $L -> Function(Rest, << Acc/binary, $l >>); + $M -> Function(Rest, << Acc/binary, $m >>); + $N -> Function(Rest, << Acc/binary, $n >>); + $O -> Function(Rest, << Acc/binary, $o >>); + $P -> Function(Rest, << Acc/binary, $p >>); + $Q -> Function(Rest, << Acc/binary, $q >>); + $R -> Function(Rest, << Acc/binary, $r >>); + $S -> Function(Rest, << Acc/binary, $s >>); + $T -> Function(Rest, << Acc/binary, $t >>); + $U -> Function(Rest, << Acc/binary, $u >>); + $V -> Function(Rest, << Acc/binary, $v >>); + $W -> Function(Rest, << Acc/binary, $w >>); + $X -> Function(Rest, << Acc/binary, $x >>); + $Y -> Function(Rest, << Acc/binary, $y >>); + $Z -> Function(Rest, << Acc/binary, $z >>); + C -> Function(Rest, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, Acc), case C of + $A -> Function(Rest, A0, << Acc/binary, $a >>); + $B -> Function(Rest, A0, << Acc/binary, $b >>); + $C -> Function(Rest, A0, << Acc/binary, $c >>); + $D -> Function(Rest, A0, << Acc/binary, $d >>); + $E -> Function(Rest, A0, << Acc/binary, $e >>); + $F -> Function(Rest, A0, << Acc/binary, $f >>); + $G -> Function(Rest, A0, << Acc/binary, $g >>); + $H -> Function(Rest, A0, << Acc/binary, $h >>); + $I -> Function(Rest, A0, << Acc/binary, $i >>); + $J -> Function(Rest, A0, << Acc/binary, $j >>); + $K -> Function(Rest, A0, << Acc/binary, $k >>); + $L -> Function(Rest, A0, << Acc/binary, $l >>); + $M -> Function(Rest, A0, << Acc/binary, $m >>); + $N -> Function(Rest, A0, << Acc/binary, $n >>); + $O -> Function(Rest, A0, << Acc/binary, $o >>); + $P -> Function(Rest, A0, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, << Acc/binary, $q >>); + $R -> Function(Rest, A0, << Acc/binary, $r >>); + $S -> Function(Rest, A0, << Acc/binary, $s >>); + $T -> Function(Rest, A0, << Acc/binary, $t >>); + $U -> Function(Rest, A0, << Acc/binary, $u >>); + $V -> Function(Rest, A0, << Acc/binary, $v >>); + $W -> Function(Rest, A0, << Acc/binary, $w >>); + $X -> Function(Rest, A0, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, << Acc/binary, $z >>); + C -> Function(Rest, A0, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, Acc), case C of + $A -> Function(Rest, A0, A1, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, Acc), case C of + $A -> Function(Rest, A0, A1, A2, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, A3, Acc), case C of + $A -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, A3, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, A3, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, A3, A4, Acc), case C of + $A -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, A3, A4, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, A3, A4, A5, Acc), case C of + $A -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, A3, A4, A5, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, A3, A4, A5, A6, Acc), case C of + $A -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, A3, A4, A5, A6, A7, Acc), case C of + $A -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, Acc), case C of + $A -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, << Acc/binary, C >>) +end). + +-define(LOWER(Function, Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, Acc), case C of + $A -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $a >>); + $B -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $b >>); + $C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $c >>); + $D -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $d >>); + $E -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $e >>); + $F -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $f >>); + $G -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $g >>); + $H -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $h >>); + $I -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $i >>); + $J -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $j >>); + $K -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $k >>); + $L -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $l >>); + $M -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $m >>); + $N -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $n >>); + $O -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $o >>); + $P -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $p >>); + $Q -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $q >>); + $R -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $r >>); + $S -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $s >>); + $T -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $t >>); + $U -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $u >>); + $V -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $v >>); + $W -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $w >>); + $X -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $x >>); + $Y -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $y >>); + $Z -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, $z >>); + C -> Function(Rest, A0, A1, A2, A3, A4, A5, A6, A7, A8, A9, << Acc/binary, C >>) +end). + +%% HEX(C) + +-define(HEX(C), (?HEXHL(C bsr 4)), (?HEXHL(C band 16#0f))). + +-define(HEXHL(HL), + case HL of + 0 -> $0; + 1 -> $1; + 2 -> $2; + 3 -> $3; + 4 -> $4; + 5 -> $5; + 6 -> $6; + 7 -> $7; + 8 -> $8; + 9 -> $9; + 10 -> $A; + 11 -> $B; + 12 -> $C; + 13 -> $D; + 14 -> $E; + 15 -> $F + end +). + +%% UNHEX(H, L) + +-define(UNHEX(H, L), (?UNHEX(H) bsl 4 bor ?UNHEX(L))). + +-define(UNHEX(C), + case C of + $0 -> 0; + $1 -> 1; + $2 -> 2; + $3 -> 3; + $4 -> 4; + $5 -> 5; + $6 -> 6; + $7 -> 7; + $8 -> 8; + $9 -> 9; + $A -> 10; + $B -> 11; + $C -> 12; + $D -> 13; + $E -> 14; + $F -> 15; + $a -> 10; + $b -> 11; + $c -> 12; + $d -> 13; + $e -> 14; + $f -> 15 + end +). + +-endif. diff --git a/deps/cowlib/include/cow_parse.hrl b/deps/cowlib/include/cow_parse.hrl new file mode 100644 index 0000000..ee4af70 --- /dev/null +++ b/deps/cowlib/include/cow_parse.hrl @@ -0,0 +1,83 @@ +%% Copyright (c) 2015-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-ifndef(COW_PARSE_HRL). +-define(COW_PARSE_HRL, 1). + +-define(IS_ALPHA(C), + (C =:= $a) or (C =:= $b) or (C =:= $c) or (C =:= $d) or (C =:= $e) or + (C =:= $f) or (C =:= $g) or (C =:= $h) or (C =:= $i) or (C =:= $j) or + (C =:= $k) or (C =:= $l) or (C =:= $m) or (C =:= $n) or (C =:= $o) or + (C =:= $p) or (C =:= $q) or (C =:= $r) or (C =:= $s) or (C =:= $t) or + (C =:= $u) or (C =:= $v) or (C =:= $w) or (C =:= $x) or (C =:= $y) or + (C =:= $z) or + (C =:= $A) or (C =:= $B) or (C =:= $C) or (C =:= $D) or (C =:= $E) or + (C =:= $F) or (C =:= $G) or (C =:= $H) or (C =:= $I) or (C =:= $J) or + (C =:= $K) or (C =:= $L) or (C =:= $M) or (C =:= $N) or (C =:= $O) or + (C =:= $P) or (C =:= $Q) or (C =:= $R) or (C =:= $S) or (C =:= $T) or + (C =:= $U) or (C =:= $V) or (C =:= $W) or (C =:= $X) or (C =:= $Y) or + (C =:= $Z) +). + +-define(IS_ALPHANUM(C), ?IS_ALPHA(C) or ?IS_DIGIT(C)). +-define(IS_CHAR(C), C > 0, C < 128). + +-define(IS_DIGIT(C), + (C =:= $0) or (C =:= $1) or (C =:= $2) or (C =:= $3) or (C =:= $4) or + (C =:= $5) or (C =:= $6) or (C =:= $7) or (C =:= $8) or (C =:= $9)). + +-define(IS_ETAGC(C), C =:= 16#21; C >= 16#23, C =/= 16#7f). + +-define(IS_HEX(C), + ?IS_DIGIT(C) or + (C =:= $a) or (C =:= $b) or (C =:= $c) or + (C =:= $d) or (C =:= $e) or (C =:= $f) or + (C =:= $A) or (C =:= $B) or (C =:= $C) or + (C =:= $D) or (C =:= $E) or (C =:= $F)). + +-define(IS_LHEX(C), + ?IS_DIGIT(C) or + (C =:= $a) or (C =:= $b) or (C =:= $c) or + (C =:= $d) or (C =:= $e) or (C =:= $f)). + +-define(IS_TOKEN(C), + ?IS_ALPHA(C) or ?IS_DIGIT(C) or + (C =:= $!) or (C =:= $#) or (C =:= $$) or (C =:= $%) or (C =:= $&) or + (C =:= $') or (C =:= $*) or (C =:= $+) or (C =:= $-) or (C =:= $.) or + (C =:= $^) or (C =:= $_) or (C =:= $`) or (C =:= $|) or (C =:= $~)). + +-define(IS_TOKEN68(C), + ?IS_ALPHA(C) or ?IS_DIGIT(C) or + (C =:= $-) or (C =:= $.) or (C =:= $_) or + (C =:= $~) or (C =:= $+) or (C =:= $/)). + +-define(IS_URI_UNRESERVED(C), + ?IS_ALPHA(C) or ?IS_DIGIT(C) or + (C =:= $-) or (C =:= $.) or (C =:= $_) or (C =:= $~)). + +-define(IS_URI_GEN_DELIMS(C), + (C =:= $:) or (C =:= $/) or (C =:= $?) or (C =:= $#) or + (C =:= $[) or (C =:= $]) or (C =:= $@)). + +-define(IS_URI_SUB_DELIMS(C), + (C =:= $!) or (C =:= $$) or (C =:= $&) or (C =:= $') or + (C =:= $() or (C =:= $)) or (C =:= $*) or (C =:= $+) or + (C =:= $,) or (C =:= $;) or (C =:= $=)). + +-define(IS_VCHAR(C), C =:= $\t; C > 31, C < 127). +-define(IS_VCHAR_OBS(C), C =:= $\t; C > 31, C =/= 127). +-define(IS_WS(C), (C =:= $\s) or (C =:= $\t)). +-define(IS_WS_COMMA(C), ?IS_WS(C) or (C =:= $,)). + +-endif. diff --git a/deps/cowlib/src/cow_base64url.erl b/deps/cowlib/src/cow_base64url.erl new file mode 100644 index 0000000..17ec46c --- /dev/null +++ b/deps/cowlib/src/cow_base64url.erl @@ -0,0 +1,81 @@ +%% Copyright (c) 2017-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% This module implements "base64url" following the algorithm +%% found in Appendix C of RFC7515. The option #{padding => false} +%% must be given to reproduce this variant exactly. The default +%% will leave the padding characters. +-module(cow_base64url). + +-export([decode/1]). +-export([decode/2]). +-export([encode/1]). +-export([encode/2]). + +-ifdef(TEST). +-include_lib("proper/include/proper.hrl"). +-endif. + +decode(Enc) -> + decode(Enc, #{}). + +decode(Enc0, Opts) -> + Enc1 = << << case C of + $- -> $+; + $_ -> $/; + _ -> C + end >> || << C >> <= Enc0 >>, + Enc = case Opts of + #{padding := false} -> + case byte_size(Enc1) rem 4 of + 0 -> Enc1; + 2 -> << Enc1/binary, "==" >>; + 3 -> << Enc1/binary, "=" >> + end; + _ -> + Enc1 + end, + base64:decode(Enc). + +encode(Dec) -> + encode(Dec, #{}). + +encode(Dec, Opts) -> + encode(base64:encode(Dec), Opts, <<>>). + +encode(<<$+, R/bits>>, Opts, Acc) -> encode(R, Opts, <>); +encode(<<$/, R/bits>>, Opts, Acc) -> encode(R, Opts, <>); +encode(<<$=, _/bits>>, #{padding := false}, Acc) -> Acc; +encode(<>, Opts, Acc) -> encode(R, Opts, <>); +encode(<<>>, _, Acc) -> Acc. + +-ifdef(TEST). + +rfc7515_test() -> + Dec = <<3,236,255,224,193>>, + Enc = <<"A-z_4ME">>, + Pad = <<"A-z_4ME=">>, + Dec = decode(<>), + Dec = decode(Enc, #{padding => false}), + Pad = encode(Dec), + Enc = encode(Dec, #{padding => false}), + ok. + +prop_identity() -> + ?FORALL(B, binary(), B =:= decode(encode(B))). + +prop_identity_no_padding() -> + ?FORALL(B, binary(), B =:= decode(encode(B, #{padding => false}), #{padding => false})). + +-endif. diff --git a/deps/cowlib/src/cow_cookie.erl b/deps/cowlib/src/cow_cookie.erl new file mode 100644 index 0000000..93a8e61 --- /dev/null +++ b/deps/cowlib/src/cow_cookie.erl @@ -0,0 +1,428 @@ +%% Copyright (c) 2013-2020, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_cookie). + +-export([parse_cookie/1]). +-export([parse_set_cookie/1]). +-export([cookie/1]). +-export([setcookie/3]). + +-type cookie_attrs() :: #{ + expires => calendar:datetime(), + max_age => calendar:datetime(), + domain => binary(), + path => binary(), + secure => true, + http_only => true, + same_site => strict | lax | none +}. +-export_type([cookie_attrs/0]). + +-type cookie_opts() :: #{ + domain => binary(), + http_only => boolean(), + max_age => non_neg_integer(), + path => binary(), + same_site => strict | lax | none, + secure => boolean() +}. +-export_type([cookie_opts/0]). + +-include("cow_inline.hrl"). + +%% Cookie header. + +-spec parse_cookie(binary()) -> [{binary(), binary()}]. +parse_cookie(Cookie) -> + parse_cookie(Cookie, []). + +parse_cookie(<<>>, Acc) -> + lists:reverse(Acc); +parse_cookie(<< $\s, Rest/binary >>, Acc) -> + parse_cookie(Rest, Acc); +parse_cookie(<< $\t, Rest/binary >>, Acc) -> + parse_cookie(Rest, Acc); +parse_cookie(<< $,, Rest/binary >>, Acc) -> + parse_cookie(Rest, Acc); +parse_cookie(<< $;, Rest/binary >>, Acc) -> + parse_cookie(Rest, Acc); +parse_cookie(Cookie, Acc) -> + parse_cookie_name(Cookie, Acc, <<>>). + +parse_cookie_name(<<>>, Acc, Name) -> + lists:reverse([{<<>>, parse_cookie_trim(Name)}|Acc]); +parse_cookie_name(<< $=, _/binary >>, _, <<>>) -> + error(badarg); +parse_cookie_name(<< $=, Rest/binary >>, Acc, Name) -> + parse_cookie_value(Rest, Acc, Name, <<>>); +parse_cookie_name(<< $,, _/binary >>, _, _) -> + error(badarg); +parse_cookie_name(<< $;, Rest/binary >>, Acc, Name) -> + parse_cookie(Rest, [{<<>>, parse_cookie_trim(Name)}|Acc]); +parse_cookie_name(<< $\t, _/binary >>, _, _) -> + error(badarg); +parse_cookie_name(<< $\r, _/binary >>, _, _) -> + error(badarg); +parse_cookie_name(<< $\n, _/binary >>, _, _) -> + error(badarg); +parse_cookie_name(<< $\013, _/binary >>, _, _) -> + error(badarg); +parse_cookie_name(<< $\014, _/binary >>, _, _) -> + error(badarg); +parse_cookie_name(<< C, Rest/binary >>, Acc, Name) -> + parse_cookie_name(Rest, Acc, << Name/binary, C >>). + +parse_cookie_value(<<>>, Acc, Name, Value) -> + lists:reverse([{Name, parse_cookie_trim(Value)}|Acc]); +parse_cookie_value(<< $;, Rest/binary >>, Acc, Name, Value) -> + parse_cookie(Rest, [{Name, parse_cookie_trim(Value)}|Acc]); +parse_cookie_value(<< $\t, _/binary >>, _, _, _) -> + error(badarg); +parse_cookie_value(<< $\r, _/binary >>, _, _, _) -> + error(badarg); +parse_cookie_value(<< $\n, _/binary >>, _, _, _) -> + error(badarg); +parse_cookie_value(<< $\013, _/binary >>, _, _, _) -> + error(badarg); +parse_cookie_value(<< $\014, _/binary >>, _, _, _) -> + error(badarg); +parse_cookie_value(<< C, Rest/binary >>, Acc, Name, Value) -> + parse_cookie_value(Rest, Acc, Name, << Value/binary, C >>). + +parse_cookie_trim(Value = <<>>) -> + Value; +parse_cookie_trim(Value) -> + case binary:last(Value) of + $\s -> + Size = byte_size(Value) - 1, + << Value2:Size/binary, _ >> = Value, + parse_cookie_trim(Value2); + _ -> + Value + end. + +-ifdef(TEST). +parse_cookie_test_() -> + %% {Value, Result}. + Tests = [ + {<<"name=value; name2=value2">>, [ + {<<"name">>, <<"value">>}, + {<<"name2">>, <<"value2">>} + ]}, + %% Space in value. + {<<"foo=Thu Jul 11 2013 15:38:43 GMT+0400 (MSK)">>, + [{<<"foo">>, <<"Thu Jul 11 2013 15:38:43 GMT+0400 (MSK)">>}]}, + %% Comma in value. Google Analytics sets that kind of cookies. + {<<"refk=sOUZDzq2w2; sk=B602064E0139D842D620C7569640DBB4C81C45080651" + "9CC124EF794863E10E80; __utma=64249653.825741573.1380181332.1400" + "015657.1400019557.703; __utmb=64249653.1.10.1400019557; __utmc=" + "64249653; __utmz=64249653.1400019557.703.13.utmcsr=bluesky.chic" + "agotribune.com|utmccn=(referral)|utmcmd=referral|utmcct=/origin" + "als/chi-12-indispensable-digital-tools-bsi,0,0.storygallery">>, [ + {<<"refk">>, <<"sOUZDzq2w2">>}, + {<<"sk">>, <<"B602064E0139D842D620C7569640DBB4C81C45080651" + "9CC124EF794863E10E80">>}, + {<<"__utma">>, <<"64249653.825741573.1380181332.1400" + "015657.1400019557.703">>}, + {<<"__utmb">>, <<"64249653.1.10.1400019557">>}, + {<<"__utmc">>, <<"64249653">>}, + {<<"__utmz">>, <<"64249653.1400019557.703.13.utmcsr=bluesky.chic" + "agotribune.com|utmccn=(referral)|utmcmd=referral|utmcct=/origin" + "als/chi-12-indispensable-digital-tools-bsi,0,0.storygallery">>} + ]}, + %% Potential edge cases (initially from Mochiweb). + {<<"foo=\\x">>, [{<<"foo">>, <<"\\x">>}]}, + {<<"foo=;bar=">>, [{<<"foo">>, <<>>}, {<<"bar">>, <<>>}]}, + {<<"foo=\\\";;bar=good ">>, + [{<<"foo">>, <<"\\\"">>}, {<<"bar">>, <<"good">>}]}, + {<<"foo=\"\\\";bar=good">>, + [{<<"foo">>, <<"\"\\\"">>}, {<<"bar">>, <<"good">>}]}, + {<<>>, []}, %% Flash player. + {<<"foo=bar , baz=wibble ">>, [{<<"foo">>, <<"bar , baz=wibble">>}]}, + %% Technically invalid, but seen in the wild + {<<"foo">>, [{<<>>, <<"foo">>}]}, + {<<"foo ">>, [{<<>>, <<"foo">>}]}, + {<<"foo;">>, [{<<>>, <<"foo">>}]}, + {<<"bar;foo=1">>, [{<<>>, <<"bar">>}, {<<"foo">>, <<"1">>}]} + ], + [{V, fun() -> R = parse_cookie(V) end} || {V, R} <- Tests]. + +parse_cookie_error_test_() -> + %% Value. + Tests = [ + <<"=">> + ], + [{V, fun() -> {'EXIT', {badarg, _}} = (catch parse_cookie(V)) end} || V <- Tests]. +-endif. + +%% Set-Cookie header. + +-spec parse_set_cookie(binary()) + -> {ok, binary(), binary(), cookie_attrs()} + | ignore. +parse_set_cookie(SetCookie) -> + {NameValuePair, UnparsedAttrs} = take_until_semicolon(SetCookie, <<>>), + {Name, Value} = case binary:split(NameValuePair, <<$=>>) of + [Value0] -> {<<>>, trim(Value0)}; + [Name0, Value0] -> {trim(Name0), trim(Value0)} + end, + case {Name, Value} of + {<<>>, <<>>} -> + ignore; + _ -> + Attrs = parse_set_cookie_attrs(UnparsedAttrs, #{}), + {ok, Name, Value, Attrs} + end. + +parse_set_cookie_attrs(<<>>, Attrs) -> + Attrs; +parse_set_cookie_attrs(<<$;,Rest0/bits>>, Attrs) -> + {Av, Rest} = take_until_semicolon(Rest0, <<>>), + {Name, Value} = case binary:split(Av, <<$=>>) of + [Name0] -> {trim(Name0), <<>>}; + [Name0, Value0] -> {trim(Name0), trim(Value0)} + end, + case parse_set_cookie_attr(?LOWER(Name), Value) of + {ok, AttrName, AttrValue} -> + parse_set_cookie_attrs(Rest, Attrs#{AttrName => AttrValue}); + {ignore, AttrName} -> + parse_set_cookie_attrs(Rest, maps:remove(AttrName, Attrs)); + ignore -> + parse_set_cookie_attrs(Rest, Attrs) + end. + +take_until_semicolon(Rest = <<$;,_/bits>>, Acc) -> {Acc, Rest}; +take_until_semicolon(<>, Acc) -> take_until_semicolon(R, <>); +take_until_semicolon(<<>>, Acc) -> {Acc, <<>>}. + +trim(String) -> + string:trim(String, both, [$\s, $\t]). + +parse_set_cookie_attr(<<"expires">>, Value) -> + try cow_date:parse_date(Value) of + DateTime -> + {ok, expires, DateTime} + catch _:_ -> + ignore + end; +parse_set_cookie_attr(<<"max-age">>, Value) -> + try binary_to_integer(Value) of + MaxAge when MaxAge =< 0 -> + %% Year 0 corresponds to 1 BC. + {ok, max_age, {{0, 1, 1}, {0, 0, 0}}}; + MaxAge -> + CurrentTime = erlang:universaltime(), + {ok, max_age, calendar:gregorian_seconds_to_datetime( + calendar:datetime_to_gregorian_seconds(CurrentTime) + MaxAge)} + catch _:_ -> + ignore + end; +parse_set_cookie_attr(<<"domain">>, Value) -> + case Value of + <<>> -> + ignore; + <<".",Rest/bits>> -> + {ok, domain, ?LOWER(Rest)}; + _ -> + {ok, domain, ?LOWER(Value)} + end; +parse_set_cookie_attr(<<"path">>, Value) -> + case Value of + <<"/",_/bits>> -> + {ok, path, Value}; + %% When the path is not absolute, or the path is empty, the default-path will be used. + %% Note that the default-path is also used when there are no path attributes, + %% so we are simply ignoring the attribute here. + _ -> + {ignore, path} + end; +parse_set_cookie_attr(<<"secure">>, _) -> + {ok, secure, true}; +parse_set_cookie_attr(<<"httponly">>, _) -> + {ok, http_only, true}; +parse_set_cookie_attr(<<"samesite">>, Value) -> + case ?LOWER(Value) of + <<"strict">> -> + {ok, same_site, strict}; + <<"lax">> -> + {ok, same_site, lax}; + %% Clients may have different defaults than "None". + <<"none">> -> + {ok, same_site, none}; + %% Unknown values and lack of value are equivalent. + _ -> + ignore + end; +parse_set_cookie_attr(_, _) -> + ignore. + +-ifdef(TEST). +parse_set_cookie_test_() -> + Tests = [ + {<<"a=b">>, {ok, <<"a">>, <<"b">>, #{}}}, + {<<"a=b; Secure">>, {ok, <<"a">>, <<"b">>, #{secure => true}}}, + {<<"a=b; HttpOnly">>, {ok, <<"a">>, <<"b">>, #{http_only => true}}}, + {<<"a=b; Expires=Wed, 21 Oct 2015 07:28:00 GMT; Expires=Wed, 21 Oct 2015 07:29:00 GMT">>, + {ok, <<"a">>, <<"b">>, #{expires => {{2015,10,21},{7,29,0}}}}}, + {<<"a=b; Max-Age=999; Max-Age=0">>, + {ok, <<"a">>, <<"b">>, #{max_age => {{0,1,1},{0,0,0}}}}}, + {<<"a=b; Domain=example.org; Domain=foo.example.org">>, + {ok, <<"a">>, <<"b">>, #{domain => <<"foo.example.org">>}}}, + {<<"a=b; Path=/path/to/resource; Path=/">>, + {ok, <<"a">>, <<"b">>, #{path => <<"/">>}}}, + {<<"a=b; SameSite=Lax; SameSite=Strict">>, + {ok, <<"a">>, <<"b">>, #{same_site => strict}}} + ], + [{SetCookie, fun() -> Res = parse_set_cookie(SetCookie) end} + || {SetCookie, Res} <- Tests]. +-endif. + +%% Build a cookie header. + +-spec cookie([{iodata(), iodata()}]) -> iolist(). +cookie([]) -> + []; +cookie([{<<>>, Value}]) -> + [Value]; +cookie([{Name, Value}]) -> + [Name, $=, Value]; +cookie([{<<>>, Value}|Tail]) -> + [Value, $;, $\s|cookie(Tail)]; +cookie([{Name, Value}|Tail]) -> + [Name, $=, Value, $;, $\s|cookie(Tail)]. + +-ifdef(TEST). +cookie_test_() -> + Tests = [ + {[], <<>>}, + {[{<<"a">>, <<"b">>}], <<"a=b">>}, + {[{<<"a">>, <<"b">>}, {<<"c">>, <<"d">>}], <<"a=b; c=d">>}, + {[{<<>>, <<"b">>}, {<<"c">>, <<"d">>}], <<"b; c=d">>}, + {[{<<"a">>, <<"b">>}, {<<>>, <<"d">>}], <<"a=b; d">>} + ], + [{Res, fun() -> Res = iolist_to_binary(cookie(Cookies)) end} + || {Cookies, Res} <- Tests]. +-endif. + +%% Convert a cookie name, value and options to its iodata form. +%% +%% Initially from Mochiweb: +%% * Copyright 2007 Mochi Media, Inc. +%% Initial binary implementation: +%% * Copyright 2011 Thomas Burdick +%% +%% @todo Rename the function to set_cookie eventually. + +-spec setcookie(iodata(), iodata(), cookie_opts()) -> iolist(). +setcookie(Name, Value, Opts) -> + nomatch = binary:match(iolist_to_binary(Name), [<<$=>>, <<$,>>, <<$;>>, + <<$\s>>, <<$\t>>, <<$\r>>, <<$\n>>, <<$\013>>, <<$\014>>]), + nomatch = binary:match(iolist_to_binary(Value), [<<$,>>, <<$;>>, + <<$\s>>, <<$\t>>, <<$\r>>, <<$\n>>, <<$\013>>, <<$\014>>]), + [Name, <<"=">>, Value, <<"; Version=1">>, attributes(maps:to_list(Opts))]. + +attributes([]) -> []; +attributes([{domain, Domain}|Tail]) -> [<<"; Domain=">>, Domain|attributes(Tail)]; +attributes([{http_only, false}|Tail]) -> attributes(Tail); +attributes([{http_only, true}|Tail]) -> [<<"; HttpOnly">>|attributes(Tail)]; +%% MSIE requires an Expires date in the past to delete a cookie. +attributes([{max_age, 0}|Tail]) -> + [<<"; Expires=Thu, 01-Jan-1970 00:00:01 GMT; Max-Age=0">>|attributes(Tail)]; +attributes([{max_age, MaxAge}|Tail]) when is_integer(MaxAge), MaxAge > 0 -> + Secs = calendar:datetime_to_gregorian_seconds(calendar:universal_time()), + Expires = cow_date:rfc2109(calendar:gregorian_seconds_to_datetime(Secs + MaxAge)), + [<<"; Expires=">>, Expires, <<"; Max-Age=">>, integer_to_list(MaxAge)|attributes(Tail)]; +attributes([Opt={max_age, _}|_]) -> + error({badarg, Opt}); +attributes([{path, Path}|Tail]) -> [<<"; Path=">>, Path|attributes(Tail)]; +attributes([{secure, false}|Tail]) -> attributes(Tail); +attributes([{secure, true}|Tail]) -> [<<"; Secure">>|attributes(Tail)]; +attributes([{same_site, lax}|Tail]) -> [<<"; SameSite=Lax">>|attributes(Tail)]; +attributes([{same_site, strict}|Tail]) -> [<<"; SameSite=Strict">>|attributes(Tail)]; +attributes([{same_site, none}|Tail]) -> [<<"; SameSite=None">>|attributes(Tail)]; +%% Skip unknown options. +attributes([_|Tail]) -> attributes(Tail). + +-ifdef(TEST). +setcookie_test_() -> + %% {Name, Value, Opts, Result} + Tests = [ + {<<"Customer">>, <<"WILE_E_COYOTE">>, + #{http_only => true, domain => <<"acme.com">>}, + <<"Customer=WILE_E_COYOTE; Version=1; " + "Domain=acme.com; HttpOnly">>}, + {<<"Customer">>, <<"WILE_E_COYOTE">>, + #{path => <<"/acme">>}, + <<"Customer=WILE_E_COYOTE; Version=1; Path=/acme">>}, + {<<"Customer">>, <<"WILE_E_COYOTE">>, + #{secure => true}, + <<"Customer=WILE_E_COYOTE; Version=1; Secure">>}, + {<<"Customer">>, <<"WILE_E_COYOTE">>, + #{secure => false, http_only => false}, + <<"Customer=WILE_E_COYOTE; Version=1">>}, + {<<"Customer">>, <<"WILE_E_COYOTE">>, + #{same_site => lax}, + <<"Customer=WILE_E_COYOTE; Version=1; SameSite=Lax">>}, + {<<"Customer">>, <<"WILE_E_COYOTE">>, + #{same_site => strict}, + <<"Customer=WILE_E_COYOTE; Version=1; SameSite=Strict">>}, + {<<"Customer">>, <<"WILE_E_COYOTE">>, + #{path => <<"/acme">>, badoption => <<"negatory">>}, + <<"Customer=WILE_E_COYOTE; Version=1; Path=/acme">>} + ], + [{R, fun() -> R = iolist_to_binary(setcookie(N, V, O)) end} + || {N, V, O, R} <- Tests]. + +setcookie_max_age_test() -> + F = fun(N, V, O) -> + binary:split(iolist_to_binary( + setcookie(N, V, O)), <<";">>, [global]) + end, + [<<"Customer=WILE_E_COYOTE">>, + <<" Version=1">>, + <<" Expires=", _/binary>>, + <<" Max-Age=111">>, + <<" Secure">>] = F(<<"Customer">>, <<"WILE_E_COYOTE">>, + #{max_age => 111, secure => true}), + case catch F(<<"Customer">>, <<"WILE_E_COYOTE">>, #{max_age => -111}) of + {'EXIT', {{badarg, {max_age, -111}}, _}} -> ok + end, + [<<"Customer=WILE_E_COYOTE">>, + <<" Version=1">>, + <<" Expires=", _/binary>>, + <<" Max-Age=86417">>] = F(<<"Customer">>, <<"WILE_E_COYOTE">>, + #{max_age => 86417}), + ok. + +setcookie_failures_test_() -> + F = fun(N, V) -> + try setcookie(N, V, #{}) of + _ -> + false + catch _:_ -> + true + end + end, + Tests = [ + {<<"Na=me">>, <<"Value">>}, + {<<"Name;">>, <<"Value">>}, + {<<"\r\name">>, <<"Value">>}, + {<<"Name">>, <<"Value;">>}, + {<<"Name">>, <<"\value">>} + ], + [{iolist_to_binary(io_lib:format("{~p, ~p} failure", [N, V])), + fun() -> true = F(N, V) end} + || {N, V} <- Tests]. +-endif. diff --git a/deps/cowlib/src/cow_date.erl b/deps/cowlib/src/cow_date.erl new file mode 100644 index 0000000..36ce861 --- /dev/null +++ b/deps/cowlib/src/cow_date.erl @@ -0,0 +1,434 @@ +%% Copyright (c) 2013-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_date). + +-export([parse_date/1]). +-export([rfc1123/1]). +-export([rfc2109/1]). +-export([rfc7231/1]). + +-ifdef(TEST). +-include_lib("proper/include/proper.hrl"). +-endif. + +%% @doc Parse the HTTP date (IMF-fixdate, rfc850, asctime). + +-define(DIGITS(A, B), ((A - $0) * 10 + (B - $0))). +-define(DIGITS(A, B, C, D), ((A - $0) * 1000 + (B - $0) * 100 + (C - $0) * 10 + (D - $0))). + +-spec parse_date(binary()) -> calendar:datetime(). +parse_date(DateBin) -> + Date = {{_, _, D}, {H, M, S}} = http_date(DateBin), + true = D >= 0 andalso D =< 31, + true = H >= 0 andalso H =< 23, + true = M >= 0 andalso M =< 59, + true = S >= 0 andalso S =< 60, %% Leap second. + Date. + +http_date(<<"Mon, ", D1, D2, " ", R/bits >>) -> fixdate(R, ?DIGITS(D1, D2)); +http_date(<<"Tue, ", D1, D2, " ", R/bits >>) -> fixdate(R, ?DIGITS(D1, D2)); +http_date(<<"Wed, ", D1, D2, " ", R/bits >>) -> fixdate(R, ?DIGITS(D1, D2)); +http_date(<<"Thu, ", D1, D2, " ", R/bits >>) -> fixdate(R, ?DIGITS(D1, D2)); +http_date(<<"Fri, ", D1, D2, " ", R/bits >>) -> fixdate(R, ?DIGITS(D1, D2)); +http_date(<<"Sat, ", D1, D2, " ", R/bits >>) -> fixdate(R, ?DIGITS(D1, D2)); +http_date(<<"Sun, ", D1, D2, " ", R/bits >>) -> fixdate(R, ?DIGITS(D1, D2)); +http_date(<<"Monday, ", D1, D2, "-", R/bits >>) -> rfc850_date(R, ?DIGITS(D1, D2)); +http_date(<<"Tuesday, ", D1, D2, "-", R/bits >>) -> rfc850_date(R, ?DIGITS(D1, D2)); +http_date(<<"Wednesday, ", D1, D2, "-", R/bits >>) -> rfc850_date(R, ?DIGITS(D1, D2)); +http_date(<<"Thursday, ", D1, D2, "-", R/bits >>) -> rfc850_date(R, ?DIGITS(D1, D2)); +http_date(<<"Friday, ", D1, D2, "-", R/bits >>) -> rfc850_date(R, ?DIGITS(D1, D2)); +http_date(<<"Saturday, ", D1, D2, "-", R/bits >>) -> rfc850_date(R, ?DIGITS(D1, D2)); +http_date(<<"Sunday, ", D1, D2, "-", R/bits >>) -> rfc850_date(R, ?DIGITS(D1, D2)); +http_date(<<"Mon ", R/bits >>) -> asctime_date(R); +http_date(<<"Tue ", R/bits >>) -> asctime_date(R); +http_date(<<"Wed ", R/bits >>) -> asctime_date(R); +http_date(<<"Thu ", R/bits >>) -> asctime_date(R); +http_date(<<"Fri ", R/bits >>) -> asctime_date(R); +http_date(<<"Sat ", R/bits >>) -> asctime_date(R); +http_date(<<"Sun ", R/bits >>) -> asctime_date(R). + +fixdate(<<"Jan ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 1, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Feb ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 2, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Mar ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 3, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Apr ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 4, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"May ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 5, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Jun ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 6, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Jul ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 7, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Aug ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 8, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Sep ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 9, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Oct ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 10, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Nov ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 11, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +fixdate(<<"Dec ", Y1, Y2, Y3, Y4, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 12, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}. + +rfc850_date(<<"Jan-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 1, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Feb-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 2, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Mar-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 3, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Apr-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 4, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"May-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 5, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Jun-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 6, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Jul-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 7, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Aug-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 8, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Sep-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 9, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Oct-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 10, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Nov-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 11, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +rfc850_date(<<"Dec-", Y1, Y2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " GMT">>, Day) -> + {{rfc850_year(?DIGITS(Y1, Y2)), 12, Day}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}. + +rfc850_year(Y) when Y > 50 -> Y + 1900; +rfc850_year(Y) -> Y + 2000. + +asctime_date(<<"Jan ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 1, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Feb ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 2, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Mar ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 3, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Apr ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 4, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"May ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 5, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Jun ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 6, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Jul ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 7, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Aug ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 8, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Sep ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 9, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Oct ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 10, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Nov ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 11, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}; +asctime_date(<<"Dec ", D1, D2, " ", H1, H2, ":", M1, M2, ":", S1, S2, " ", Y1, Y2, Y3, Y4 >>) -> + {{?DIGITS(Y1, Y2, Y3, Y4), 12, asctime_day(D1, D2)}, {?DIGITS(H1, H2), ?DIGITS(M1, M2), ?DIGITS(S1, S2)}}. + +asctime_day($\s, D2) -> (D2 - $0); +asctime_day(D1, D2) -> (D1 - $0) * 10 + (D2 - $0). + +-ifdef(TEST). +day_name() -> oneof(["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]). +day_name_l() -> oneof(["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]). +year() -> integer(1951, 2050). +month() -> integer(1, 12). +day() -> integer(1, 31). +hour() -> integer(0, 23). +minute() -> integer(0, 59). +second() -> integer(0, 60). + +fixdate_gen() -> + ?LET({DayName, Y, Mo, D, H, Mi, S}, + {day_name(), year(), month(), day(), hour(), minute(), second()}, + {{{Y, Mo, D}, {H, Mi, S}}, + list_to_binary([DayName, ", ", pad_int(D), " ", month(Mo), " ", integer_to_binary(Y), + " ", pad_int(H), ":", pad_int(Mi), ":", pad_int(S), " GMT"])}). + +rfc850_gen() -> + ?LET({DayName, Y, Mo, D, H, Mi, S}, + {day_name_l(), year(), month(), day(), hour(), minute(), second()}, + {{{Y, Mo, D}, {H, Mi, S}}, + list_to_binary([DayName, ", ", pad_int(D), "-", month(Mo), "-", pad_int(Y rem 100), + " ", pad_int(H), ":", pad_int(Mi), ":", pad_int(S), " GMT"])}). + +asctime_gen() -> + ?LET({DayName, Y, Mo, D, H, Mi, S}, + {day_name(), year(), month(), day(), hour(), minute(), second()}, + {{{Y, Mo, D}, {H, Mi, S}}, + list_to_binary([DayName, " ", month(Mo), " ", + if D < 10 -> << $\s, (D + $0) >>; true -> integer_to_binary(D) end, + " ", pad_int(H), ":", pad_int(Mi), ":", pad_int(S), " ", integer_to_binary(Y)])}). + +prop_http_date() -> + ?FORALL({Date, DateBin}, + oneof([fixdate_gen(), rfc850_gen(), asctime_gen()]), + Date =:= parse_date(DateBin)). + +http_date_test_() -> + Tests = [ + {<<"Sun, 06 Nov 1994 08:49:37 GMT">>, {{1994, 11, 6}, {8, 49, 37}}}, + {<<"Sunday, 06-Nov-94 08:49:37 GMT">>, {{1994, 11, 6}, {8, 49, 37}}}, + {<<"Sun Nov 6 08:49:37 1994">>, {{1994, 11, 6}, {8, 49, 37}}} + ], + [{V, fun() -> R = http_date(V) end} || {V, R} <- Tests]. + +horse_http_date_fixdate() -> + horse:repeat(200000, + http_date(<<"Sun, 06 Nov 1994 08:49:37 GMT">>) + ). + +horse_http_date_rfc850() -> + horse:repeat(200000, + http_date(<<"Sunday, 06-Nov-94 08:49:37 GMT">>) + ). + +horse_http_date_asctime() -> + horse:repeat(200000, + http_date(<<"Sun Nov 6 08:49:37 1994">>) + ). +-endif. + +%% @doc Return the date formatted according to RFC1123. + +-spec rfc1123(calendar:datetime()) -> binary(). +rfc1123(DateTime) -> + rfc7231(DateTime). + +%% @doc Return the date formatted according to RFC2109. + +-spec rfc2109(calendar:datetime()) -> binary(). +rfc2109({Date = {Y, Mo, D}, {H, Mi, S}}) -> + Wday = calendar:day_of_the_week(Date), + << (weekday(Wday))/binary, ", ", + (pad_int(D))/binary, "-", + (month(Mo))/binary, "-", + (year(Y))/binary, " ", + (pad_int(H))/binary, ":", + (pad_int(Mi))/binary, ":", + (pad_int(S))/binary, " GMT" >>. + +-ifdef(TEST). +rfc2109_test_() -> + Tests = [ + {<<"Sat, 14-May-2011 14:25:33 GMT">>, {{2011, 5, 14}, {14, 25, 33}}}, + {<<"Sun, 01-Jan-2012 00:00:00 GMT">>, {{2012, 1, 1}, { 0, 0, 0}}} + ], + [{R, fun() -> R = rfc2109(D) end} || {R, D} <- Tests]. + +horse_rfc2109_20130101_000000() -> + horse:repeat(100000, + rfc2109({{2013, 1, 1}, {0, 0, 0}}) + ). + +horse_rfc2109_20131231_235959() -> + horse:repeat(100000, + rfc2109({{2013, 12, 31}, {23, 59, 59}}) + ). + +horse_rfc2109_12340506_070809() -> + horse:repeat(100000, + rfc2109({{1234, 5, 6}, {7, 8, 9}}) + ). +-endif. + +%% @doc Return the date formatted according to RFC7231. + +-spec rfc7231(calendar:datetime()) -> binary(). +rfc7231({Date = {Y, Mo, D}, {H, Mi, S}}) -> + Wday = calendar:day_of_the_week(Date), + << (weekday(Wday))/binary, ", ", + (pad_int(D))/binary, " ", + (month(Mo))/binary, " ", + (year(Y))/binary, " ", + (pad_int(H))/binary, ":", + (pad_int(Mi))/binary, ":", + (pad_int(S))/binary, " GMT" >>. + +-ifdef(TEST). +rfc7231_test_() -> + Tests = [ + {<<"Sat, 14 May 2011 14:25:33 GMT">>, {{2011, 5, 14}, {14, 25, 33}}}, + {<<"Sun, 01 Jan 2012 00:00:00 GMT">>, {{2012, 1, 1}, { 0, 0, 0}}} + ], + [{R, fun() -> R = rfc7231(D) end} || {R, D} <- Tests]. + +horse_rfc7231_20130101_000000() -> + horse:repeat(100000, + rfc7231({{2013, 1, 1}, {0, 0, 0}}) + ). + +horse_rfc7231_20131231_235959() -> + horse:repeat(100000, + rfc7231({{2013, 12, 31}, {23, 59, 59}}) + ). + +horse_rfc7231_12340506_070809() -> + horse:repeat(100000, + rfc7231({{1234, 5, 6}, {7, 8, 9}}) + ). +-endif. + +%% Internal. + +-spec pad_int(0..59) -> <<_:16>>. +pad_int( 0) -> <<"00">>; +pad_int( 1) -> <<"01">>; +pad_int( 2) -> <<"02">>; +pad_int( 3) -> <<"03">>; +pad_int( 4) -> <<"04">>; +pad_int( 5) -> <<"05">>; +pad_int( 6) -> <<"06">>; +pad_int( 7) -> <<"07">>; +pad_int( 8) -> <<"08">>; +pad_int( 9) -> <<"09">>; +pad_int(10) -> <<"10">>; +pad_int(11) -> <<"11">>; +pad_int(12) -> <<"12">>; +pad_int(13) -> <<"13">>; +pad_int(14) -> <<"14">>; +pad_int(15) -> <<"15">>; +pad_int(16) -> <<"16">>; +pad_int(17) -> <<"17">>; +pad_int(18) -> <<"18">>; +pad_int(19) -> <<"19">>; +pad_int(20) -> <<"20">>; +pad_int(21) -> <<"21">>; +pad_int(22) -> <<"22">>; +pad_int(23) -> <<"23">>; +pad_int(24) -> <<"24">>; +pad_int(25) -> <<"25">>; +pad_int(26) -> <<"26">>; +pad_int(27) -> <<"27">>; +pad_int(28) -> <<"28">>; +pad_int(29) -> <<"29">>; +pad_int(30) -> <<"30">>; +pad_int(31) -> <<"31">>; +pad_int(32) -> <<"32">>; +pad_int(33) -> <<"33">>; +pad_int(34) -> <<"34">>; +pad_int(35) -> <<"35">>; +pad_int(36) -> <<"36">>; +pad_int(37) -> <<"37">>; +pad_int(38) -> <<"38">>; +pad_int(39) -> <<"39">>; +pad_int(40) -> <<"40">>; +pad_int(41) -> <<"41">>; +pad_int(42) -> <<"42">>; +pad_int(43) -> <<"43">>; +pad_int(44) -> <<"44">>; +pad_int(45) -> <<"45">>; +pad_int(46) -> <<"46">>; +pad_int(47) -> <<"47">>; +pad_int(48) -> <<"48">>; +pad_int(49) -> <<"49">>; +pad_int(50) -> <<"50">>; +pad_int(51) -> <<"51">>; +pad_int(52) -> <<"52">>; +pad_int(53) -> <<"53">>; +pad_int(54) -> <<"54">>; +pad_int(55) -> <<"55">>; +pad_int(56) -> <<"56">>; +pad_int(57) -> <<"57">>; +pad_int(58) -> <<"58">>; +pad_int(59) -> <<"59">>; +pad_int(60) -> <<"60">>; +pad_int(Int) -> integer_to_binary(Int). + +-spec weekday(1..7) -> <<_:24>>. +weekday(1) -> <<"Mon">>; +weekday(2) -> <<"Tue">>; +weekday(3) -> <<"Wed">>; +weekday(4) -> <<"Thu">>; +weekday(5) -> <<"Fri">>; +weekday(6) -> <<"Sat">>; +weekday(7) -> <<"Sun">>. + +-spec month(1..12) -> <<_:24>>. +month( 1) -> <<"Jan">>; +month( 2) -> <<"Feb">>; +month( 3) -> <<"Mar">>; +month( 4) -> <<"Apr">>; +month( 5) -> <<"May">>; +month( 6) -> <<"Jun">>; +month( 7) -> <<"Jul">>; +month( 8) -> <<"Aug">>; +month( 9) -> <<"Sep">>; +month(10) -> <<"Oct">>; +month(11) -> <<"Nov">>; +month(12) -> <<"Dec">>. + +-spec year(pos_integer()) -> <<_:32>>. +year(1970) -> <<"1970">>; +year(1971) -> <<"1971">>; +year(1972) -> <<"1972">>; +year(1973) -> <<"1973">>; +year(1974) -> <<"1974">>; +year(1975) -> <<"1975">>; +year(1976) -> <<"1976">>; +year(1977) -> <<"1977">>; +year(1978) -> <<"1978">>; +year(1979) -> <<"1979">>; +year(1980) -> <<"1980">>; +year(1981) -> <<"1981">>; +year(1982) -> <<"1982">>; +year(1983) -> <<"1983">>; +year(1984) -> <<"1984">>; +year(1985) -> <<"1985">>; +year(1986) -> <<"1986">>; +year(1987) -> <<"1987">>; +year(1988) -> <<"1988">>; +year(1989) -> <<"1989">>; +year(1990) -> <<"1990">>; +year(1991) -> <<"1991">>; +year(1992) -> <<"1992">>; +year(1993) -> <<"1993">>; +year(1994) -> <<"1994">>; +year(1995) -> <<"1995">>; +year(1996) -> <<"1996">>; +year(1997) -> <<"1997">>; +year(1998) -> <<"1998">>; +year(1999) -> <<"1999">>; +year(2000) -> <<"2000">>; +year(2001) -> <<"2001">>; +year(2002) -> <<"2002">>; +year(2003) -> <<"2003">>; +year(2004) -> <<"2004">>; +year(2005) -> <<"2005">>; +year(2006) -> <<"2006">>; +year(2007) -> <<"2007">>; +year(2008) -> <<"2008">>; +year(2009) -> <<"2009">>; +year(2010) -> <<"2010">>; +year(2011) -> <<"2011">>; +year(2012) -> <<"2012">>; +year(2013) -> <<"2013">>; +year(2014) -> <<"2014">>; +year(2015) -> <<"2015">>; +year(2016) -> <<"2016">>; +year(2017) -> <<"2017">>; +year(2018) -> <<"2018">>; +year(2019) -> <<"2019">>; +year(2020) -> <<"2020">>; +year(2021) -> <<"2021">>; +year(2022) -> <<"2022">>; +year(2023) -> <<"2023">>; +year(2024) -> <<"2024">>; +year(2025) -> <<"2025">>; +year(2026) -> <<"2026">>; +year(2027) -> <<"2027">>; +year(2028) -> <<"2028">>; +year(2029) -> <<"2029">>; +year(Year) -> integer_to_binary(Year). diff --git a/deps/cowlib/src/cow_hpack.erl b/deps/cowlib/src/cow_hpack.erl new file mode 100644 index 0000000..4a02d79 --- /dev/null +++ b/deps/cowlib/src/cow_hpack.erl @@ -0,0 +1,1449 @@ +%% Copyright (c) 2015-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% The current implementation is not suitable for use in +%% intermediaries as the information about headers that +%% should never be indexed is currently lost. + +-module(cow_hpack). +-dialyzer(no_improper_lists). + +-export([init/0]). +-export([init/1]). +-export([set_max_size/2]). + +-export([decode/1]). +-export([decode/2]). + +-export([encode/1]). +-export([encode/2]). +-export([encode/3]). + +-record(state, { + size = 0 :: non_neg_integer(), + max_size = 4096 :: non_neg_integer(), + configured_max_size = 4096 :: non_neg_integer(), + dyn_table = [] :: [{pos_integer(), {binary(), binary()}}] +}). + +-opaque state() :: #state{}. +-export_type([state/0]). + +-type opts() :: map(). +-export_type([opts/0]). + +-ifdef(TEST). +-include_lib("proper/include/proper.hrl"). +-endif. + +%% State initialization. + +-spec init() -> state(). +init() -> + #state{}. + +-spec init(non_neg_integer()) -> state(). +init(MaxSize) -> + #state{max_size=MaxSize, configured_max_size=MaxSize}. + +%% Update the configured max size. +%% +%% When decoding, the local endpoint also needs to send a SETTINGS +%% frame with this value and it is then up to the remote endpoint +%% to decide what actual limit it will use. The actual limit is +%% signaled via dynamic table size updates in the encoded data. +%% +%% When encoding, the local endpoint will call this function after +%% receiving a SETTINGS frame with this value. The encoder will +%% then use this value as the new max after signaling via a dynamic +%% table size update. The value given as argument may be lower +%% than the one received in the SETTINGS. + +-spec set_max_size(non_neg_integer(), State) -> State when State::state(). +set_max_size(MaxSize, State) -> + State#state{configured_max_size=MaxSize}. + +%% Decoding. + +-spec decode(binary()) -> {cow_http:headers(), state()}. +decode(Data) -> + decode(Data, init()). + +-spec decode(binary(), State) -> {cow_http:headers(), State} when State::state(). +%% Dynamic table size update is only allowed at the beginning of a HEADERS block. +decode(<< 0:2, 1:1, Rest/bits >>, State=#state{configured_max_size=ConfigMaxSize}) -> + {MaxSize, Rest2} = dec_int5(Rest), + if + MaxSize =< ConfigMaxSize -> + State2 = table_update_size(MaxSize, State), + decode(Rest2, State2) + end; +decode(Data, State) -> + decode(Data, State, []). + +decode(<<>>, State, Acc) -> + {lists:reverse(Acc), State}; +%% Indexed header field representation. +decode(<< 1:1, Rest/bits >>, State, Acc) -> + dec_indexed(Rest, State, Acc); +%% Literal header field with incremental indexing: new name. +decode(<< 0:1, 1:1, 0:6, Rest/bits >>, State, Acc) -> + dec_lit_index_new_name(Rest, State, Acc); +%% Literal header field with incremental indexing: indexed name. +decode(<< 0:1, 1:1, Rest/bits >>, State, Acc) -> + dec_lit_index_indexed_name(Rest, State, Acc); +%% Literal header field without indexing: new name. +decode(<< 0:8, Rest/bits >>, State, Acc) -> + dec_lit_no_index_new_name(Rest, State, Acc); +%% Literal header field without indexing: indexed name. +decode(<< 0:4, Rest/bits >>, State, Acc) -> + dec_lit_no_index_indexed_name(Rest, State, Acc); +%% Literal header field never indexed: new name. +%% @todo Keep track of "never indexed" headers. +decode(<< 0:3, 1:1, 0:4, Rest/bits >>, State, Acc) -> + dec_lit_no_index_new_name(Rest, State, Acc); +%% Literal header field never indexed: indexed name. +%% @todo Keep track of "never indexed" headers. +decode(<< 0:3, 1:1, Rest/bits >>, State, Acc) -> + dec_lit_no_index_indexed_name(Rest, State, Acc). + +%% Indexed header field representation. + +%% We do the integer decoding inline where appropriate, falling +%% back to dec_big_int for larger values. +dec_indexed(<<2#1111111:7, 0:1, Int:7, Rest/bits>>, State, Acc) -> + {Name, Value} = table_get(127 + Int, State), + decode(Rest, State, [{Name, Value}|Acc]); +dec_indexed(<<2#1111111:7, Rest0/bits>>, State, Acc) -> + {Index, Rest} = dec_big_int(Rest0, 127, 0), + {Name, Value} = table_get(Index, State), + decode(Rest, State, [{Name, Value}|Acc]); +dec_indexed(<>, State, Acc) -> + {Name, Value} = table_get(Index, State), + decode(Rest, State, [{Name, Value}|Acc]). + +%% Literal header field with incremental indexing. + +dec_lit_index_new_name(Rest, State, Acc) -> + {Name, Rest2} = dec_str(Rest), + dec_lit_index(Rest2, State, Acc, Name). + +%% We do the integer decoding inline where appropriate, falling +%% back to dec_big_int for larger values. +dec_lit_index_indexed_name(<<2#111111:6, 0:1, Int:7, Rest/bits>>, State, Acc) -> + Name = table_get_name(63 + Int, State), + dec_lit_index(Rest, State, Acc, Name); +dec_lit_index_indexed_name(<<2#111111:6, Rest0/bits>>, State, Acc) -> + {Index, Rest} = dec_big_int(Rest0, 63, 0), + Name = table_get_name(Index, State), + dec_lit_index(Rest, State, Acc, Name); +dec_lit_index_indexed_name(<>, State, Acc) -> + Name = table_get_name(Index, State), + dec_lit_index(Rest, State, Acc, Name). + +dec_lit_index(Rest, State, Acc, Name) -> + {Value, Rest2} = dec_str(Rest), + State2 = table_insert({Name, Value}, State), + decode(Rest2, State2, [{Name, Value}|Acc]). + +%% Literal header field without indexing. + +dec_lit_no_index_new_name(Rest, State, Acc) -> + {Name, Rest2} = dec_str(Rest), + dec_lit_no_index(Rest2, State, Acc, Name). + +%% We do the integer decoding inline where appropriate, falling +%% back to dec_big_int for larger values. +dec_lit_no_index_indexed_name(<<2#1111:4, 0:1, Int:7, Rest/bits>>, State, Acc) -> + Name = table_get_name(15 + Int, State), + dec_lit_no_index(Rest, State, Acc, Name); +dec_lit_no_index_indexed_name(<<2#1111:4, Rest0/bits>>, State, Acc) -> + {Index, Rest} = dec_big_int(Rest0, 15, 0), + Name = table_get_name(Index, State), + dec_lit_no_index(Rest, State, Acc, Name); +dec_lit_no_index_indexed_name(<>, State, Acc) -> + Name = table_get_name(Index, State), + dec_lit_no_index(Rest, State, Acc, Name). + +dec_lit_no_index(Rest, State, Acc, Name) -> + {Value, Rest2} = dec_str(Rest), + decode(Rest2, State, [{Name, Value}|Acc]). + +%% @todo Literal header field never indexed. + +%% Decode an integer. + +%% The HPACK format has 4 different integer prefixes length (from 4 to 7) +%% and each can be used to create an indefinite length integer if all bits +%% of the prefix are set to 1. + +dec_int5(<< 2#11111:5, Rest/bits >>) -> + dec_big_int(Rest, 31, 0); +dec_int5(<< Int:5, Rest/bits >>) -> + {Int, Rest}. + +dec_big_int(<< 0:1, Value:7, Rest/bits >>, Int, M) -> + {Int + (Value bsl M), Rest}; +dec_big_int(<< 1:1, Value:7, Rest/bits >>, Int, M) -> + dec_big_int(Rest, Int + (Value bsl M), M + 7). + +%% Decode a string. + +dec_str(<<0:1, 2#1111111:7, Rest0/bits>>) -> + {Length, Rest1} = dec_big_int(Rest0, 127, 0), + <> = Rest1, + {Str, Rest}; +dec_str(<<0:1, Length:7, Rest0/bits>>) -> + <> = Rest0, + {Str, Rest}; +dec_str(<<1:1, 2#1111111:7, Rest0/bits>>) -> + {Length, Rest} = dec_big_int(Rest0, 127, 0), + dec_huffman(Rest, Length, 0, <<>>); +dec_str(<<1:1, Length:7, Rest/bits>>) -> + dec_huffman(Rest, Length, 0, <<>>). + +%% We use a lookup table that allows us to benefit from +%% the binary match context optimization. A more naive +%% implementation using bit pattern matching cannot reuse +%% a match context because it wouldn't always match on +%% byte boundaries. +%% +%% See cow_hpack_dec_huffman_lookup.hrl for more details. + +dec_huffman(<>, Len, Huff0, Acc) when Len > 1 -> + {_, CharA, Huff1} = dec_huffman_lookup(Huff0, A), + {_, CharB, Huff} = dec_huffman_lookup(Huff1, B), + case {CharA, CharB} of + {undefined, undefined} -> dec_huffman(R, Len - 1, Huff, Acc); + {CharA, undefined} -> dec_huffman(R, Len - 1, Huff, <>); + {undefined, CharB} -> dec_huffman(R, Len - 1, Huff, <>); + {CharA, CharB} -> dec_huffman(R, Len - 1, Huff, <>) + end; +dec_huffman(<>, 1, Huff0, Acc) -> + {_, CharA, Huff} = dec_huffman_lookup(Huff0, A), + {ok, CharB, _} = dec_huffman_lookup(Huff, B), + case {CharA, CharB} of + %% {undefined, undefined} (> 7-bit final padding) is rejected with a crash. + {CharA, undefined} -> + {<>, Rest}; + {undefined, CharB} -> + {<>, Rest}; + _ -> + {<>, Rest} + end; +%% Can only be reached when the string length to decode is 0. +dec_huffman(Rest, 0, _, <<>>) -> + {<<>>, Rest}. + +-include("cow_hpack_dec_huffman_lookup.hrl"). + +-ifdef(TEST). +%% Test case extracted from h2spec. +decode_reject_eos_test() -> + {'EXIT', _} = (catch decode(<<16#0085f2b24a84ff874951fffffffa7f:120>>)), + ok. + +req_decode_test() -> + %% First request (raw then huffman). + {Headers1, State1} = decode(<< 16#828684410f7777772e6578616d706c652e636f6d:160 >>), + {Headers1, State1} = decode(<< 16#828684418cf1e3c2e5f23a6ba0ab90f4ff:136 >>), + Headers1 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>} + ], + #state{size=57, dyn_table=[{57,{<<":authority">>, <<"www.example.com">>}}]} = State1, + %% Second request (raw then huffman). + {Headers2, State2} = decode(<< 16#828684be58086e6f2d6361636865:112 >>, State1), + {Headers2, State2} = decode(<< 16#828684be5886a8eb10649cbf:96 >>, State1), + Headers2 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"cache-control">>, <<"no-cache">>} + ], + #state{size=110, dyn_table=[ + {53,{<<"cache-control">>, <<"no-cache">>}}, + {57,{<<":authority">>, <<"www.example.com">>}}]} = State2, + %% Third request (raw then huffman). + {Headers3, State3} = decode(<< 16#828785bf400a637573746f6d2d6b65790c637573746f6d2d76616c7565:232 >>, State2), + {Headers3, State3} = decode(<< 16#828785bf408825a849e95ba97d7f8925a849e95bb8e8b4bf:192 >>, State2), + Headers3 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"https">>}, + {<<":path">>, <<"/index.html">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"custom-key">>, <<"custom-value">>} + ], + #state{size=164, dyn_table=[ + {54,{<<"custom-key">>, <<"custom-value">>}}, + {53,{<<"cache-control">>, <<"no-cache">>}}, + {57,{<<":authority">>, <<"www.example.com">>}}]} = State3, + ok. + +resp_decode_test() -> + %% Use a max_size of 256 to trigger header evictions. + State0 = init(256), + %% First response (raw then huffman). + {Headers1, State1} = decode(<< 16#4803333032580770726976617465611d4d6f6e2c203231204f637420323031332032303a31333a323120474d546e1768747470733a2f2f7777772e6578616d706c652e636f6d:560 >>, State0), + {Headers1, State1} = decode(<< 16#488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e919d29ad171863c78f0b97c8e9ae82ae43d3:432 >>, State0), + Headers1 = [ + {<<":status">>, <<"302">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=222, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State1, + %% Second response (raw then huffman). + {Headers2, State2} = decode(<< 16#4803333037c1c0bf:64 >>, State1), + {Headers2, State2} = decode(<< 16#4883640effc1c0bf:64 >>, State1), + Headers2 = [ + {<<":status">>, <<"307">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=222, dyn_table=[ + {42,{<<":status">>, <<"307">>}}, + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}]} = State2, + %% Third response (raw then huffman). + {Headers3, State3} = decode(<< 16#88c1611d4d6f6e2c203231204f637420323031332032303a31333a323220474d54c05a04677a69707738666f6f3d4153444a4b48514b425a584f5157454f50495541585157454f49553b206d61782d6167653d333630303b2076657273696f6e3d31:784 >>, State2), + {Headers3, State3} = decode(<< 16#88c16196d07abe941054d444a8200595040b8166e084a62d1bffc05a839bd9ab77ad94e7821dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab270fb5291f9587316065c003ed4ee5b1063d5007:632 >>, State2), + Headers3 = [ + {<<":status">>, <<"200">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:22 GMT">>}, + {<<"location">>, <<"https://www.example.com">>}, + {<<"content-encoding">>, <<"gzip">>}, + {<<"set-cookie">>, <<"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1">>} + ], + #state{size=215, dyn_table=[ + {98,{<<"set-cookie">>, <<"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1">>}}, + {52,{<<"content-encoding">>, <<"gzip">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:22 GMT">>}}]} = State3, + ok. + +table_update_decode_test() -> + %% Use a max_size of 256 to trigger header evictions + %% when the code is not updating the max size. + State0 = init(256), + %% First response (raw then huffman). + {Headers1, State1} = decode(<< 16#4803333032580770726976617465611d4d6f6e2c203231204f637420323031332032303a31333a323120474d546e1768747470733a2f2f7777772e6578616d706c652e636f6d:560 >>, State0), + {Headers1, State1} = decode(<< 16#488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e919d29ad171863c78f0b97c8e9ae82ae43d3:432 >>, State0), + Headers1 = [ + {<<":status">>, <<"302">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=222, configured_max_size=256, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State1, + %% Set a new configured max_size to avoid header evictions. + State2 = set_max_size(512, State1), + %% Second response with the table size update (raw then huffman). + MaxSize = enc_big_int(512 - 31, <<>>), + {Headers2, State3} = decode( + iolist_to_binary([<< 2#00111111>>, MaxSize, <<16#4803333037c1c0bf:64>>]), + State2), + {Headers2, State3} = decode( + iolist_to_binary([<< 2#00111111>>, MaxSize, <<16#4883640effc1c0bf:64>>]), + State2), + Headers2 = [ + {<<":status">>, <<"307">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=264, configured_max_size=512, dyn_table=[ + {42,{<<":status">>, <<"307">>}}, + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State3, + ok. + +table_update_decode_smaller_test() -> + %% Use a max_size of 256 to trigger header evictions + %% when the code is not updating the max size. + State0 = init(256), + %% First response (raw then huffman). + {Headers1, State1} = decode(<< 16#4803333032580770726976617465611d4d6f6e2c203231204f637420323031332032303a31333a323120474d546e1768747470733a2f2f7777772e6578616d706c652e636f6d:560 >>, State0), + {Headers1, State1} = decode(<< 16#488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e919d29ad171863c78f0b97c8e9ae82ae43d3:432 >>, State0), + Headers1 = [ + {<<":status">>, <<"302">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=222, configured_max_size=256, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State1, + %% Set a new configured max_size to avoid header evictions. + State2 = set_max_size(512, State1), + %% Second response with the table size update smaller than the limit (raw then huffman). + MaxSize = enc_big_int(400 - 31, <<>>), + {Headers2, State3} = decode( + iolist_to_binary([<< 2#00111111>>, MaxSize, <<16#4803333037c1c0bf:64>>]), + State2), + {Headers2, State3} = decode( + iolist_to_binary([<< 2#00111111>>, MaxSize, <<16#4883640effc1c0bf:64>>]), + State2), + Headers2 = [ + {<<":status">>, <<"307">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=264, configured_max_size=512, dyn_table=[ + {42,{<<":status">>, <<"307">>}}, + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State3, + ok. + +table_update_decode_too_large_test() -> + %% Use a max_size of 256 to trigger header evictions + %% when the code is not updating the max size. + State0 = init(256), + %% First response (raw then huffman). + {Headers1, State1} = decode(<< 16#4803333032580770726976617465611d4d6f6e2c203231204f637420323031332032303a31333a323120474d546e1768747470733a2f2f7777772e6578616d706c652e636f6d:560 >>, State0), + {Headers1, State1} = decode(<< 16#488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e919d29ad171863c78f0b97c8e9ae82ae43d3:432 >>, State0), + Headers1 = [ + {<<":status">>, <<"302">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=222, configured_max_size=256, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State1, + %% Set a new configured max_size to avoid header evictions. + State2 = set_max_size(512, State1), + %% Second response with the table size update (raw then huffman). + MaxSize = enc_big_int(1024 - 31, <<>>), + {'EXIT', _} = (catch decode( + iolist_to_binary([<< 2#00111111>>, MaxSize, <<16#4803333037c1c0bf:64>>]), + State2)), + {'EXIT', _} = (catch decode( + iolist_to_binary([<< 2#00111111>>, MaxSize, <<16#4883640effc1c0bf:64>>]), + State2)), + ok. + +table_update_decode_zero_test() -> + State0 = init(256), + %% First response (raw then huffman). + {Headers1, State1} = decode(<< 16#4803333032580770726976617465611d4d6f6e2c203231204f637420323031332032303a31333a323120474d546e1768747470733a2f2f7777772e6578616d706c652e636f6d:560 >>, State0), + {Headers1, State1} = decode(<< 16#488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e919d29ad171863c78f0b97c8e9ae82ae43d3:432 >>, State0), + Headers1 = [ + {<<":status">>, <<"302">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + #state{size=222, configured_max_size=256, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State1, + %% Set a new configured max_size to avoid header evictions. + State2 = set_max_size(512, State1), + %% Second response with the table size update (raw then huffman). + %% We set the table size to 0 to evict all values before setting + %% it to 512 so we only get the second request indexed. + MaxSize = enc_big_int(512 - 31, <<>>), + {Headers1, State3} = decode(iolist_to_binary([ + <<2#00100000, 2#00111111>>, MaxSize, + <<16#4803333032580770726976617465611d4d6f6e2c203231204f637420323031332032303a31333a323120474d546e1768747470733a2f2f7777772e6578616d706c652e636f6d:560>>]), + State2), + {Headers1, State3} = decode(iolist_to_binary([ + <<2#00100000, 2#00111111>>, MaxSize, + <<16#488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e919d29ad171863c78f0b97c8e9ae82ae43d3:432>>]), + State2), + #state{size=222, configured_max_size=512, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State3, + ok. + +horse_decode_raw() -> + horse:repeat(20000, + do_horse_decode_raw() + ). + +do_horse_decode_raw() -> + {_, State1} = decode(<<16#828684410f7777772e6578616d706c652e636f6d:160>>), + {_, State2} = decode(<<16#828684be58086e6f2d6361636865:112>>, State1), + {_, _} = decode(<<16#828785bf400a637573746f6d2d6b65790c637573746f6d2d76616c7565:232>>, State2), + ok. + +horse_decode_huffman() -> + horse:repeat(20000, + do_horse_decode_huffman() + ). + +do_horse_decode_huffman() -> + {_, State1} = decode(<<16#828684418cf1e3c2e5f23a6ba0ab90f4ff:136>>), + {_, State2} = decode(<<16#828684be5886a8eb10649cbf:96>>, State1), + {_, _} = decode(<<16#828785bf408825a849e95ba97d7f8925a849e95bb8e8b4bf:192>>, State2), + ok. +-endif. + +%% Encoding. + +-spec encode(cow_http:headers()) -> {iodata(), state()}. +encode(Headers) -> + encode(Headers, init(), huffman, []). + +-spec encode(cow_http:headers(), State) -> {iodata(), State} when State::state(). +encode(Headers, State=#state{max_size=MaxSize, configured_max_size=MaxSize}) -> + encode(Headers, State, huffman, []); +encode(Headers, State0=#state{configured_max_size=MaxSize}) -> + State1 = table_update_size(MaxSize, State0), + {Data, State} = encode(Headers, State1, huffman, []), + {[enc_int5(MaxSize, 2#001)|Data], State}. + +-spec encode(cow_http:headers(), State, opts()) -> {iodata(), State} when State::state(). +encode(Headers, State=#state{max_size=MaxSize, configured_max_size=MaxSize}, Opts) -> + encode(Headers, State, huffman_opt(Opts), []); +encode(Headers, State0=#state{configured_max_size=MaxSize}, Opts) -> + State1 = table_update_size(MaxSize, State0), + {Data, State} = encode(Headers, State1, huffman_opt(Opts), []), + {[enc_int5(MaxSize, 2#001)|Data], State}. + +huffman_opt(#{huffman := false}) -> no_huffman; +huffman_opt(_) -> huffman. + +%% @todo Handle cases where no/never indexing is expected. +encode([], State, _, Acc) -> + {lists:reverse(Acc), State}; +encode([{Name, Value0}|Tail], State, HuffmanOpt, Acc) -> + %% We conditionally call iolist_to_binary/1 because a small + %% but noticeable speed improvement happens when we do this. + Value = if + is_binary(Value0) -> Value0; + true -> iolist_to_binary(Value0) + end, + Header = {Name, Value}, + case table_find(Header, State) of + %% Indexed header field representation. + {field, Index} -> + encode(Tail, State, HuffmanOpt, + [enc_int7(Index, 2#1)|Acc]); + %% Literal header field representation: indexed name. + {name, Index} -> + State2 = table_insert(Header, State), + encode(Tail, State2, HuffmanOpt, + [[enc_int6(Index, 2#01)|enc_str(Value, HuffmanOpt)]|Acc]); + %% Literal header field representation: new name. + not_found -> + State2 = table_insert(Header, State), + encode(Tail, State2, HuffmanOpt, + [[<< 0:1, 1:1, 0:6 >>|[enc_str(Name, HuffmanOpt)|enc_str(Value, HuffmanOpt)]]|Acc]) + end. + +%% Encode an integer. + +enc_int5(Int, Prefix) when Int < 31 -> + << Prefix:3, Int:5 >>; +enc_int5(Int, Prefix) -> + enc_big_int(Int - 31, << Prefix:3, 2#11111:5 >>). + +enc_int6(Int, Prefix) when Int < 63 -> + << Prefix:2, Int:6 >>; +enc_int6(Int, Prefix) -> + enc_big_int(Int - 63, << Prefix:2, 2#111111:6 >>). + +enc_int7(Int, Prefix) when Int < 127 -> + << Prefix:1, Int:7 >>; +enc_int7(Int, Prefix) -> + enc_big_int(Int - 127, << Prefix:1, 2#1111111:7 >>). + +enc_big_int(Int, Acc) when Int < 128 -> + <>; +enc_big_int(Int, Acc) -> + enc_big_int(Int bsr 7, <>). + +%% Encode a string. + +enc_str(Str, huffman) -> + Str2 = enc_huffman(Str, <<>>), + [enc_int7(byte_size(Str2), 2#1)|Str2]; +enc_str(Str, no_huffman) -> + [enc_int7(byte_size(Str), 2#0)|Str]. + +enc_huffman(<<>>, Acc) -> + case bit_size(Acc) rem 8 of + 1 -> << Acc/bits, 2#1111111:7 >>; + 2 -> << Acc/bits, 2#111111:6 >>; + 3 -> << Acc/bits, 2#11111:5 >>; + 4 -> << Acc/bits, 2#1111:4 >>; + 5 -> << Acc/bits, 2#111:3 >>; + 6 -> << Acc/bits, 2#11:2 >>; + 7 -> << Acc/bits, 2#1:1 >>; + 0 -> Acc + end; +enc_huffman(<< 0, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111000:13 >>); +enc_huffman(<< 1, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011000:23 >>); +enc_huffman(<< 2, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111100010:28 >>); +enc_huffman(<< 3, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111100011:28 >>); +enc_huffman(<< 4, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111100100:28 >>); +enc_huffman(<< 5, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111100101:28 >>); +enc_huffman(<< 6, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111100110:28 >>); +enc_huffman(<< 7, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111100111:28 >>); +enc_huffman(<< 8, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101000:28 >>); +enc_huffman(<< 9, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111101010:24 >>); +enc_huffman(<< 10, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111111111100:30 >>); +enc_huffman(<< 11, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101001:28 >>); +enc_huffman(<< 12, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101010:28 >>); +enc_huffman(<< 13, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111111111101:30 >>); +enc_huffman(<< 14, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101011:28 >>); +enc_huffman(<< 15, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101100:28 >>); +enc_huffman(<< 16, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101101:28 >>); +enc_huffman(<< 17, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101110:28 >>); +enc_huffman(<< 18, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111101111:28 >>); +enc_huffman(<< 19, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110000:28 >>); +enc_huffman(<< 20, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110001:28 >>); +enc_huffman(<< 21, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110010:28 >>); +enc_huffman(<< 22, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111111111110:30 >>); +enc_huffman(<< 23, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110011:28 >>); +enc_huffman(<< 24, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110100:28 >>); +enc_huffman(<< 25, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110101:28 >>); +enc_huffman(<< 26, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110110:28 >>); +enc_huffman(<< 27, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111110111:28 >>); +enc_huffman(<< 28, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111111000:28 >>); +enc_huffman(<< 29, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111111001:28 >>); +enc_huffman(<< 30, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111111010:28 >>); +enc_huffman(<< 31, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111111011:28 >>); +enc_huffman(<< 32, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#010100:6 >>); +enc_huffman(<< 33, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111000:10 >>); +enc_huffman(<< 34, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111001:10 >>); +enc_huffman(<< 35, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111010:12 >>); +enc_huffman(<< 36, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111001:13 >>); +enc_huffman(<< 37, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#010101:6 >>); +enc_huffman(<< 38, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111000:8 >>); +enc_huffman(<< 39, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111010:11 >>); +enc_huffman(<< 40, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111010:10 >>); +enc_huffman(<< 41, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111011:10 >>); +enc_huffman(<< 42, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111001:8 >>); +enc_huffman(<< 43, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111011:11 >>); +enc_huffman(<< 44, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111010:8 >>); +enc_huffman(<< 45, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#010110:6 >>); +enc_huffman(<< 46, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#010111:6 >>); +enc_huffman(<< 47, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011000:6 >>); +enc_huffman(<< 48, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00000:5 >>); +enc_huffman(<< 49, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00001:5 >>); +enc_huffman(<< 50, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00010:5 >>); +enc_huffman(<< 51, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011001:6 >>); +enc_huffman(<< 52, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011010:6 >>); +enc_huffman(<< 53, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011011:6 >>); +enc_huffman(<< 54, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011100:6 >>); +enc_huffman(<< 55, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011101:6 >>); +enc_huffman(<< 56, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011110:6 >>); +enc_huffman(<< 57, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#011111:6 >>); +enc_huffman(<< 58, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1011100:7 >>); +enc_huffman(<< 59, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111011:8 >>); +enc_huffman(<< 60, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111100:15 >>); +enc_huffman(<< 61, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100000:6 >>); +enc_huffman(<< 62, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111011:12 >>); +enc_huffman(<< 63, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111100:10 >>); +enc_huffman(<< 64, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111010:13 >>); +enc_huffman(<< 65, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100001:6 >>); +enc_huffman(<< 66, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1011101:7 >>); +enc_huffman(<< 67, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1011110:7 >>); +enc_huffman(<< 68, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1011111:7 >>); +enc_huffman(<< 69, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100000:7 >>); +enc_huffman(<< 70, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100001:7 >>); +enc_huffman(<< 71, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100010:7 >>); +enc_huffman(<< 72, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100011:7 >>); +enc_huffman(<< 73, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100100:7 >>); +enc_huffman(<< 74, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100101:7 >>); +enc_huffman(<< 75, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100110:7 >>); +enc_huffman(<< 76, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1100111:7 >>); +enc_huffman(<< 77, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101000:7 >>); +enc_huffman(<< 78, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101001:7 >>); +enc_huffman(<< 79, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101010:7 >>); +enc_huffman(<< 80, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101011:7 >>); +enc_huffman(<< 81, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101100:7 >>); +enc_huffman(<< 82, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101101:7 >>); +enc_huffman(<< 83, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101110:7 >>); +enc_huffman(<< 84, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1101111:7 >>); +enc_huffman(<< 85, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110000:7 >>); +enc_huffman(<< 86, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110001:7 >>); +enc_huffman(<< 87, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110010:7 >>); +enc_huffman(<< 88, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111100:8 >>); +enc_huffman(<< 89, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110011:7 >>); +enc_huffman(<< 90, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111101:8 >>); +enc_huffman(<< 91, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111011:13 >>); +enc_huffman(<< 92, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111110000:19 >>); +enc_huffman(<< 93, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111100:13 >>); +enc_huffman(<< 94, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111100:14 >>); +enc_huffman(<< 95, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100010:6 >>); +enc_huffman(<< 96, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111101:15 >>); +enc_huffman(<< 97, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00011:5 >>); +enc_huffman(<< 98, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100011:6 >>); +enc_huffman(<< 99, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00100:5 >>); +enc_huffman(<< 100, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100100:6 >>); +enc_huffman(<< 101, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00101:5 >>); +enc_huffman(<< 102, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100101:6 >>); +enc_huffman(<< 103, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100110:6 >>); +enc_huffman(<< 104, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#100111:6 >>); +enc_huffman(<< 105, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00110:5 >>); +enc_huffman(<< 106, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110100:7 >>); +enc_huffman(<< 107, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110101:7 >>); +enc_huffman(<< 108, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#101000:6 >>); +enc_huffman(<< 109, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#101001:6 >>); +enc_huffman(<< 110, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#101010:6 >>); +enc_huffman(<< 111, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#00111:5 >>); +enc_huffman(<< 112, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#101011:6 >>); +enc_huffman(<< 113, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110110:7 >>); +enc_huffman(<< 114, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#101100:6 >>); +enc_huffman(<< 115, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#01000:5 >>); +enc_huffman(<< 116, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#01001:5 >>); +enc_huffman(<< 117, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#101101:6 >>); +enc_huffman(<< 118, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1110111:7 >>); +enc_huffman(<< 119, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111000:7 >>); +enc_huffman(<< 120, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111001:7 >>); +enc_huffman(<< 121, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111010:7 >>); +enc_huffman(<< 122, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111011:7 >>); +enc_huffman(<< 123, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111110:15 >>); +enc_huffman(<< 124, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111100:11 >>); +enc_huffman(<< 125, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111101:14 >>); +enc_huffman(<< 126, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111101:13 >>); +enc_huffman(<< 127, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111111100:28 >>); +enc_huffman(<< 128, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111100110:20 >>); +enc_huffman(<< 129, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111010010:22 >>); +enc_huffman(<< 130, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111100111:20 >>); +enc_huffman(<< 131, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111101000:20 >>); +enc_huffman(<< 132, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111010011:22 >>); +enc_huffman(<< 133, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111010100:22 >>); +enc_huffman(<< 134, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111010101:22 >>); +enc_huffman(<< 135, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011001:23 >>); +enc_huffman(<< 136, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111010110:22 >>); +enc_huffman(<< 137, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011010:23 >>); +enc_huffman(<< 138, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011011:23 >>); +enc_huffman(<< 139, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011100:23 >>); +enc_huffman(<< 140, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011101:23 >>); +enc_huffman(<< 141, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011110:23 >>); +enc_huffman(<< 142, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111101011:24 >>); +enc_huffman(<< 143, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111011111:23 >>); +enc_huffman(<< 144, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111101100:24 >>); +enc_huffman(<< 145, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111101101:24 >>); +enc_huffman(<< 146, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111010111:22 >>); +enc_huffman(<< 147, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100000:23 >>); +enc_huffman(<< 148, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111101110:24 >>); +enc_huffman(<< 149, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100001:23 >>); +enc_huffman(<< 150, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100010:23 >>); +enc_huffman(<< 151, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100011:23 >>); +enc_huffman(<< 152, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100100:23 >>); +enc_huffman(<< 153, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111011100:21 >>); +enc_huffman(<< 154, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011000:22 >>); +enc_huffman(<< 155, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100101:23 >>); +enc_huffman(<< 156, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011001:22 >>); +enc_huffman(<< 157, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100110:23 >>); +enc_huffman(<< 158, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111100111:23 >>); +enc_huffman(<< 159, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111101111:24 >>); +enc_huffman(<< 160, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011010:22 >>); +enc_huffman(<< 161, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111011101:21 >>); +enc_huffman(<< 162, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111101001:20 >>); +enc_huffman(<< 163, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011011:22 >>); +enc_huffman(<< 164, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011100:22 >>); +enc_huffman(<< 165, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101000:23 >>); +enc_huffman(<< 166, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101001:23 >>); +enc_huffman(<< 167, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111011110:21 >>); +enc_huffman(<< 168, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101010:23 >>); +enc_huffman(<< 169, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011101:22 >>); +enc_huffman(<< 170, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011110:22 >>); +enc_huffman(<< 171, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111110000:24 >>); +enc_huffman(<< 172, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111011111:21 >>); +enc_huffman(<< 173, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111011111:22 >>); +enc_huffman(<< 174, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101011:23 >>); +enc_huffman(<< 175, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101100:23 >>); +enc_huffman(<< 176, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100000:21 >>); +enc_huffman(<< 177, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100001:21 >>); +enc_huffman(<< 178, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100000:22 >>); +enc_huffman(<< 179, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100010:21 >>); +enc_huffman(<< 180, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101101:23 >>); +enc_huffman(<< 181, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100001:22 >>); +enc_huffman(<< 182, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101110:23 >>); +enc_huffman(<< 183, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111101111:23 >>); +enc_huffman(<< 184, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111101010:20 >>); +enc_huffman(<< 185, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100010:22 >>); +enc_huffman(<< 186, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100011:22 >>); +enc_huffman(<< 187, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100100:22 >>); +enc_huffman(<< 188, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111110000:23 >>); +enc_huffman(<< 189, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100101:22 >>); +enc_huffman(<< 190, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100110:22 >>); +enc_huffman(<< 191, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111110001:23 >>); +enc_huffman(<< 192, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100000:26 >>); +enc_huffman(<< 193, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100001:26 >>); +enc_huffman(<< 194, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111101011:20 >>); +enc_huffman(<< 195, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111110001:19 >>); +enc_huffman(<< 196, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111100111:22 >>); +enc_huffman(<< 197, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111110010:23 >>); +enc_huffman(<< 198, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111101000:22 >>); +enc_huffman(<< 199, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111101100:25 >>); +enc_huffman(<< 200, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100010:26 >>); +enc_huffman(<< 201, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100011:26 >>); +enc_huffman(<< 202, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100100:26 >>); +enc_huffman(<< 203, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111011110:27 >>); +enc_huffman(<< 204, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111011111:27 >>); +enc_huffman(<< 205, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100101:26 >>); +enc_huffman(<< 206, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111110001:24 >>); +enc_huffman(<< 207, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111101101:25 >>); +enc_huffman(<< 208, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111110010:19 >>); +enc_huffman(<< 209, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100011:21 >>); +enc_huffman(<< 210, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100110:26 >>); +enc_huffman(<< 211, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100000:27 >>); +enc_huffman(<< 212, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100001:27 >>); +enc_huffman(<< 213, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111100111:26 >>); +enc_huffman(<< 214, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100010:27 >>); +enc_huffman(<< 215, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111110010:24 >>); +enc_huffman(<< 216, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100100:21 >>); +enc_huffman(<< 217, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100101:21 >>); +enc_huffman(<< 218, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111101000:26 >>); +enc_huffman(<< 219, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111101001:26 >>); +enc_huffman(<< 220, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111111101:28 >>); +enc_huffman(<< 221, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100011:27 >>); +enc_huffman(<< 222, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100100:27 >>); +enc_huffman(<< 223, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100101:27 >>); +enc_huffman(<< 224, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111101100:20 >>); +enc_huffman(<< 225, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111110011:24 >>); +enc_huffman(<< 226, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111101101:20 >>); +enc_huffman(<< 227, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100110:21 >>); +enc_huffman(<< 228, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111101001:22 >>); +enc_huffman(<< 229, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111100111:21 >>); +enc_huffman(<< 230, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111101000:21 >>); +enc_huffman(<< 231, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111110011:23 >>); +enc_huffman(<< 232, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111101010:22 >>); +enc_huffman(<< 233, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111101011:22 >>); +enc_huffman(<< 234, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111101110:25 >>); +enc_huffman(<< 235, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111101111:25 >>); +enc_huffman(<< 236, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111110100:24 >>); +enc_huffman(<< 237, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111110101:24 >>); +enc_huffman(<< 238, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111101010:26 >>); +enc_huffman(<< 239, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111110100:23 >>); +enc_huffman(<< 240, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111101011:26 >>); +enc_huffman(<< 241, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100110:27 >>); +enc_huffman(<< 242, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111101100:26 >>); +enc_huffman(<< 243, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111101101:26 >>); +enc_huffman(<< 244, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111100111:27 >>); +enc_huffman(<< 245, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101000:27 >>); +enc_huffman(<< 246, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101001:27 >>); +enc_huffman(<< 247, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101010:27 >>); +enc_huffman(<< 248, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101011:27 >>); +enc_huffman(<< 249, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#1111111111111111111111111110:28 >>); +enc_huffman(<< 250, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101100:27 >>); +enc_huffman(<< 251, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101101:27 >>); +enc_huffman(<< 252, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101110:27 >>); +enc_huffman(<< 253, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111101111:27 >>); +enc_huffman(<< 254, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#111111111111111111111110000:27 >>); +enc_huffman(<< 255, R/bits >>, A) -> enc_huffman(R, << A/bits, 2#11111111111111111111101110:26 >>). + +-ifdef(TEST). +req_encode_test() -> + %% First request (raw then huffman). + Headers1 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>} + ], + {Raw1, State1} = encode(Headers1, init(), #{huffman => false}), + << 16#828684410f7777772e6578616d706c652e636f6d:160 >> = iolist_to_binary(Raw1), + {Huff1, State1} = encode(Headers1), + << 16#828684418cf1e3c2e5f23a6ba0ab90f4ff:136 >> = iolist_to_binary(Huff1), + #state{size=57, dyn_table=[{57,{<<":authority">>, <<"www.example.com">>}}]} = State1, + %% Second request (raw then huffman). + Headers2 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"cache-control">>, <<"no-cache">>} + ], + {Raw2, State2} = encode(Headers2, State1, #{huffman => false}), + << 16#828684be58086e6f2d6361636865:112 >> = iolist_to_binary(Raw2), + {Huff2, State2} = encode(Headers2, State1), + << 16#828684be5886a8eb10649cbf:96 >> = iolist_to_binary(Huff2), + #state{size=110, dyn_table=[ + {53,{<<"cache-control">>, <<"no-cache">>}}, + {57,{<<":authority">>, <<"www.example.com">>}}]} = State2, + %% Third request (raw then huffman). + Headers3 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"https">>}, + {<<":path">>, <<"/index.html">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"custom-key">>, <<"custom-value">>} + ], + {Raw3, State3} = encode(Headers3, State2, #{huffman => false}), + << 16#828785bf400a637573746f6d2d6b65790c637573746f6d2d76616c7565:232 >> = iolist_to_binary(Raw3), + {Huff3, State3} = encode(Headers3, State2), + << 16#828785bf408825a849e95ba97d7f8925a849e95bb8e8b4bf:192 >> = iolist_to_binary(Huff3), + #state{size=164, dyn_table=[ + {54,{<<"custom-key">>, <<"custom-value">>}}, + {53,{<<"cache-control">>, <<"no-cache">>}}, + {57,{<<":authority">>, <<"www.example.com">>}}]} = State3, + ok. + +resp_encode_test() -> + %% Use a max_size of 256 to trigger header evictions. + State0 = init(256), + %% First response (raw then huffman). + Headers1 = [ + {<<":status">>, <<"302">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + {Raw1, State1} = encode(Headers1, State0, #{huffman => false}), + << 16#4803333032580770726976617465611d4d6f6e2c203231204f637420323031332032303a31333a323120474d546e1768747470733a2f2f7777772e6578616d706c652e636f6d:560 >> = iolist_to_binary(Raw1), + {Huff1, State1} = encode(Headers1, State0), + << 16#488264025885aec3771a4b6196d07abe941054d444a8200595040b8166e082a62d1bff6e919d29ad171863c78f0b97c8e9ae82ae43d3:432 >> = iolist_to_binary(Huff1), + #state{size=222, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = State1, + %% Second response (raw then huffman). + Headers2 = [ + {<<":status">>, <<"307">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + {Raw2, State2} = encode(Headers2, State1, #{huffman => false}), + << 16#4803333037c1c0bf:64 >> = iolist_to_binary(Raw2), + {Huff2, State2} = encode(Headers2, State1), + << 16#4883640effc1c0bf:64 >> = iolist_to_binary(Huff2), + #state{size=222, dyn_table=[ + {42,{<<":status">>, <<"307">>}}, + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}]} = State2, + %% Third response (raw then huffman). + Headers3 = [ + {<<":status">>, <<"200">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:22 GMT">>}, + {<<"location">>, <<"https://www.example.com">>}, + {<<"content-encoding">>, <<"gzip">>}, + {<<"set-cookie">>, <<"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1">>} + ], + {Raw3, State3} = encode(Headers3, State2, #{huffman => false}), + << 16#88c1611d4d6f6e2c203231204f637420323031332032303a31333a323220474d54c05a04677a69707738666f6f3d4153444a4b48514b425a584f5157454f50495541585157454f49553b206d61782d6167653d333630303b2076657273696f6e3d31:784 >> = iolist_to_binary(Raw3), + {Huff3, State3} = encode(Headers3, State2), + << 16#88c16196d07abe941054d444a8200595040b8166e084a62d1bffc05a839bd9ab77ad94e7821dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab270fb5291f9587316065c003ed4ee5b1063d5007:632 >> = iolist_to_binary(Huff3), + #state{size=215, dyn_table=[ + {98,{<<"set-cookie">>, <<"foo=ASDJKHQKBZXOQWEOPIUAXQWEOIU; max-age=3600; version=1">>}}, + {52,{<<"content-encoding">>, <<"gzip">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:22 GMT">>}}]} = State3, + ok. + +%% This test assumes that table updates work correctly when decoding. +table_update_encode_test() -> + %% Use a max_size of 256 to trigger header evictions + %% when the code is not updating the max size. + DecState0 = EncState0 = init(256), + %% First response. + Headers1 = [ + {<<":status">>, <<"302">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + {Encoded1, EncState1} = encode(Headers1, EncState0), + {Headers1, DecState1} = decode(iolist_to_binary(Encoded1), DecState0), + #state{size=222, configured_max_size=256, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = DecState1, + #state{size=222, configured_max_size=256, dyn_table=[ + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = EncState1, + %% Set a new configured max_size to avoid header evictions. + DecState2 = set_max_size(512, DecState1), + EncState2 = set_max_size(512, EncState1), + %% Second response. + Headers2 = [ + {<<":status">>, <<"307">>}, + {<<"cache-control">>, <<"private">>}, + {<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}, + {<<"location">>, <<"https://www.example.com">>} + ], + {Encoded2, EncState3} = encode(Headers2, EncState2), + {Headers2, DecState3} = decode(iolist_to_binary(Encoded2), DecState2), + #state{size=264, max_size=512, dyn_table=[ + {42,{<<":status">>, <<"307">>}}, + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = DecState3, + #state{size=264, max_size=512, dyn_table=[ + {42,{<<":status">>, <<"307">>}}, + {63,{<<"location">>, <<"https://www.example.com">>}}, + {65,{<<"date">>, <<"Mon, 21 Oct 2013 20:13:21 GMT">>}}, + {52,{<<"cache-control">>, <<"private">>}}, + {42,{<<":status">>, <<"302">>}}]} = EncState3, + ok. + +%% Check that encode/2 is using the new table size after calling +%% set_max_size/1 and that adding entries larger than the max size +%% results in an empty table. +table_update_encode_max_size_0_test() -> + %% Encoding starts with default max size + EncState0 = init(), + %% Decoding starts with max size of 0 + DecState0 = init(0), + %% First request. + Headers1 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>} + ], + {Encoded1, EncState1} = encode(Headers1, EncState0), + {Headers1, DecState1} = decode(iolist_to_binary(Encoded1), DecState0), + #state{size=57, dyn_table=[{57,{<<":authority">>, <<"www.example.com">>}}]} = EncState1, + #state{size=0, dyn_table=[]} = DecState1, + %% Settings received after the first request. + EncState2 = set_max_size(0, EncState1), + #state{configured_max_size=0, max_size=4096, + size=57, dyn_table=[{57,{<<":authority">>, <<"www.example.com">>}}]} = EncState2, + %% Second request. + Headers2 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"cache-control">>, <<"no-cache">>} + ], + {Encoded2, EncState3} = encode(Headers2, EncState2), + {Headers2, DecState2} = decode(iolist_to_binary(Encoded2), DecState1), + #state{configured_max_size=0, max_size=0, size=0, dyn_table=[]} = EncState3, + #state{size=0, dyn_table=[]} = DecState2, + ok. + +encode_iolist_test() -> + Headers = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"content-type">>, [<<"image">>,<<"/">>,<<"png">>,<<>>]} + ], + {_, _} = encode(Headers), + ok. + +horse_encode_raw() -> + horse:repeat(20000, + do_horse_encode_raw() + ). + +do_horse_encode_raw() -> + Headers1 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>} + ], + {_, State1} = encode(Headers1, init(), #{huffman => false}), + Headers2 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"cache-control">>, <<"no-cache">>} + ], + {_, State2} = encode(Headers2, State1, #{huffman => false}), + Headers3 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"https">>}, + {<<":path">>, <<"/index.html">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"custom-key">>, <<"custom-value">>} + ], + {_, _} = encode(Headers3, State2, #{huffman => false}), + ok. + +horse_encode_huffman() -> + horse:repeat(20000, + do_horse_encode_huffman() + ). + +do_horse_encode_huffman() -> + Headers1 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>} + ], + {_, State1} = encode(Headers1), + Headers2 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"http">>}, + {<<":path">>, <<"/">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"cache-control">>, <<"no-cache">>} + ], + {_, State2} = encode(Headers2, State1), + Headers3 = [ + {<<":method">>, <<"GET">>}, + {<<":scheme">>, <<"https">>}, + {<<":path">>, <<"/index.html">>}, + {<<":authority">>, <<"www.example.com">>}, + {<<"custom-key">>, <<"custom-value">>} + ], + {_, _} = encode(Headers3, State2), + ok. +-endif. + +%% Static and dynamic tables. + +%% @todo There must be a more efficient way. +table_find(Header = {Name, _}, State) -> + case table_find_field(Header, State) of + not_found -> + case table_find_name(Name, State) of + NotFound = not_found -> + NotFound; + Found -> + {name, Found} + end; + Found -> + {field, Found} + end. + +table_find_field({<<":authority">>, <<>>}, _) -> 1; +table_find_field({<<":method">>, <<"GET">>}, _) -> 2; +table_find_field({<<":method">>, <<"POST">>}, _) -> 3; +table_find_field({<<":path">>, <<"/">>}, _) -> 4; +table_find_field({<<":path">>, <<"/index.html">>}, _) -> 5; +table_find_field({<<":scheme">>, <<"http">>}, _) -> 6; +table_find_field({<<":scheme">>, <<"https">>}, _) -> 7; +table_find_field({<<":status">>, <<"200">>}, _) -> 8; +table_find_field({<<":status">>, <<"204">>}, _) -> 9; +table_find_field({<<":status">>, <<"206">>}, _) -> 10; +table_find_field({<<":status">>, <<"304">>}, _) -> 11; +table_find_field({<<":status">>, <<"400">>}, _) -> 12; +table_find_field({<<":status">>, <<"404">>}, _) -> 13; +table_find_field({<<":status">>, <<"500">>}, _) -> 14; +table_find_field({<<"accept-charset">>, <<>>}, _) -> 15; +table_find_field({<<"accept-encoding">>, <<"gzip, deflate">>}, _) -> 16; +table_find_field({<<"accept-language">>, <<>>}, _) -> 17; +table_find_field({<<"accept-ranges">>, <<>>}, _) -> 18; +table_find_field({<<"accept">>, <<>>}, _) -> 19; +table_find_field({<<"access-control-allow-origin">>, <<>>}, _) -> 20; +table_find_field({<<"age">>, <<>>}, _) -> 21; +table_find_field({<<"allow">>, <<>>}, _) -> 22; +table_find_field({<<"authorization">>, <<>>}, _) -> 23; +table_find_field({<<"cache-control">>, <<>>}, _) -> 24; +table_find_field({<<"content-disposition">>, <<>>}, _) -> 25; +table_find_field({<<"content-encoding">>, <<>>}, _) -> 26; +table_find_field({<<"content-language">>, <<>>}, _) -> 27; +table_find_field({<<"content-length">>, <<>>}, _) -> 28; +table_find_field({<<"content-location">>, <<>>}, _) -> 29; +table_find_field({<<"content-range">>, <<>>}, _) -> 30; +table_find_field({<<"content-type">>, <<>>}, _) -> 31; +table_find_field({<<"cookie">>, <<>>}, _) -> 32; +table_find_field({<<"date">>, <<>>}, _) -> 33; +table_find_field({<<"etag">>, <<>>}, _) -> 34; +table_find_field({<<"expect">>, <<>>}, _) -> 35; +table_find_field({<<"expires">>, <<>>}, _) -> 36; +table_find_field({<<"from">>, <<>>}, _) -> 37; +table_find_field({<<"host">>, <<>>}, _) -> 38; +table_find_field({<<"if-match">>, <<>>}, _) -> 39; +table_find_field({<<"if-modified-since">>, <<>>}, _) -> 40; +table_find_field({<<"if-none-match">>, <<>>}, _) -> 41; +table_find_field({<<"if-range">>, <<>>}, _) -> 42; +table_find_field({<<"if-unmodified-since">>, <<>>}, _) -> 43; +table_find_field({<<"last-modified">>, <<>>}, _) -> 44; +table_find_field({<<"link">>, <<>>}, _) -> 45; +table_find_field({<<"location">>, <<>>}, _) -> 46; +table_find_field({<<"max-forwards">>, <<>>}, _) -> 47; +table_find_field({<<"proxy-authenticate">>, <<>>}, _) -> 48; +table_find_field({<<"proxy-authorization">>, <<>>}, _) -> 49; +table_find_field({<<"range">>, <<>>}, _) -> 50; +table_find_field({<<"referer">>, <<>>}, _) -> 51; +table_find_field({<<"refresh">>, <<>>}, _) -> 52; +table_find_field({<<"retry-after">>, <<>>}, _) -> 53; +table_find_field({<<"server">>, <<>>}, _) -> 54; +table_find_field({<<"set-cookie">>, <<>>}, _) -> 55; +table_find_field({<<"strict-transport-security">>, <<>>}, _) -> 56; +table_find_field({<<"transfer-encoding">>, <<>>}, _) -> 57; +table_find_field({<<"user-agent">>, <<>>}, _) -> 58; +table_find_field({<<"vary">>, <<>>}, _) -> 59; +table_find_field({<<"via">>, <<>>}, _) -> 60; +table_find_field({<<"www-authenticate">>, <<>>}, _) -> 61; +table_find_field(Header, #state{dyn_table=DynamicTable}) -> + table_find_field_dyn(Header, DynamicTable, 62). + +table_find_field_dyn(_, [], _) -> not_found; +table_find_field_dyn(Header, [{_, Header}|_], Index) -> Index; +table_find_field_dyn(Header, [_|Tail], Index) -> table_find_field_dyn(Header, Tail, Index + 1). + +table_find_name(<<":authority">>, _) -> 1; +table_find_name(<<":method">>, _) -> 2; +table_find_name(<<":path">>, _) -> 4; +table_find_name(<<":scheme">>, _) -> 6; +table_find_name(<<":status">>, _) -> 8; +table_find_name(<<"accept-charset">>, _) -> 15; +table_find_name(<<"accept-encoding">>, _) -> 16; +table_find_name(<<"accept-language">>, _) -> 17; +table_find_name(<<"accept-ranges">>, _) -> 18; +table_find_name(<<"accept">>, _) -> 19; +table_find_name(<<"access-control-allow-origin">>, _) -> 20; +table_find_name(<<"age">>, _) -> 21; +table_find_name(<<"allow">>, _) -> 22; +table_find_name(<<"authorization">>, _) -> 23; +table_find_name(<<"cache-control">>, _) -> 24; +table_find_name(<<"content-disposition">>, _) -> 25; +table_find_name(<<"content-encoding">>, _) -> 26; +table_find_name(<<"content-language">>, _) -> 27; +table_find_name(<<"content-length">>, _) -> 28; +table_find_name(<<"content-location">>, _) -> 29; +table_find_name(<<"content-range">>, _) -> 30; +table_find_name(<<"content-type">>, _) -> 31; +table_find_name(<<"cookie">>, _) -> 32; +table_find_name(<<"date">>, _) -> 33; +table_find_name(<<"etag">>, _) -> 34; +table_find_name(<<"expect">>, _) -> 35; +table_find_name(<<"expires">>, _) -> 36; +table_find_name(<<"from">>, _) -> 37; +table_find_name(<<"host">>, _) -> 38; +table_find_name(<<"if-match">>, _) -> 39; +table_find_name(<<"if-modified-since">>, _) -> 40; +table_find_name(<<"if-none-match">>, _) -> 41; +table_find_name(<<"if-range">>, _) -> 42; +table_find_name(<<"if-unmodified-since">>, _) -> 43; +table_find_name(<<"last-modified">>, _) -> 44; +table_find_name(<<"link">>, _) -> 45; +table_find_name(<<"location">>, _) -> 46; +table_find_name(<<"max-forwards">>, _) -> 47; +table_find_name(<<"proxy-authenticate">>, _) -> 48; +table_find_name(<<"proxy-authorization">>, _) -> 49; +table_find_name(<<"range">>, _) -> 50; +table_find_name(<<"referer">>, _) -> 51; +table_find_name(<<"refresh">>, _) -> 52; +table_find_name(<<"retry-after">>, _) -> 53; +table_find_name(<<"server">>, _) -> 54; +table_find_name(<<"set-cookie">>, _) -> 55; +table_find_name(<<"strict-transport-security">>, _) -> 56; +table_find_name(<<"transfer-encoding">>, _) -> 57; +table_find_name(<<"user-agent">>, _) -> 58; +table_find_name(<<"vary">>, _) -> 59; +table_find_name(<<"via">>, _) -> 60; +table_find_name(<<"www-authenticate">>, _) -> 61; +table_find_name(Name, #state{dyn_table=DynamicTable}) -> + table_find_name_dyn(Name, DynamicTable, 62). + +table_find_name_dyn(_, [], _) -> not_found; +table_find_name_dyn(Name, [{Name, _}|_], Index) -> Index; +table_find_name_dyn(Name, [_|Tail], Index) -> table_find_name_dyn(Name, Tail, Index + 1). + +table_get(1, _) -> {<<":authority">>, <<>>}; +table_get(2, _) -> {<<":method">>, <<"GET">>}; +table_get(3, _) -> {<<":method">>, <<"POST">>}; +table_get(4, _) -> {<<":path">>, <<"/">>}; +table_get(5, _) -> {<<":path">>, <<"/index.html">>}; +table_get(6, _) -> {<<":scheme">>, <<"http">>}; +table_get(7, _) -> {<<":scheme">>, <<"https">>}; +table_get(8, _) -> {<<":status">>, <<"200">>}; +table_get(9, _) -> {<<":status">>, <<"204">>}; +table_get(10, _) -> {<<":status">>, <<"206">>}; +table_get(11, _) -> {<<":status">>, <<"304">>}; +table_get(12, _) -> {<<":status">>, <<"400">>}; +table_get(13, _) -> {<<":status">>, <<"404">>}; +table_get(14, _) -> {<<":status">>, <<"500">>}; +table_get(15, _) -> {<<"accept-charset">>, <<>>}; +table_get(16, _) -> {<<"accept-encoding">>, <<"gzip, deflate">>}; +table_get(17, _) -> {<<"accept-language">>, <<>>}; +table_get(18, _) -> {<<"accept-ranges">>, <<>>}; +table_get(19, _) -> {<<"accept">>, <<>>}; +table_get(20, _) -> {<<"access-control-allow-origin">>, <<>>}; +table_get(21, _) -> {<<"age">>, <<>>}; +table_get(22, _) -> {<<"allow">>, <<>>}; +table_get(23, _) -> {<<"authorization">>, <<>>}; +table_get(24, _) -> {<<"cache-control">>, <<>>}; +table_get(25, _) -> {<<"content-disposition">>, <<>>}; +table_get(26, _) -> {<<"content-encoding">>, <<>>}; +table_get(27, _) -> {<<"content-language">>, <<>>}; +table_get(28, _) -> {<<"content-length">>, <<>>}; +table_get(29, _) -> {<<"content-location">>, <<>>}; +table_get(30, _) -> {<<"content-range">>, <<>>}; +table_get(31, _) -> {<<"content-type">>, <<>>}; +table_get(32, _) -> {<<"cookie">>, <<>>}; +table_get(33, _) -> {<<"date">>, <<>>}; +table_get(34, _) -> {<<"etag">>, <<>>}; +table_get(35, _) -> {<<"expect">>, <<>>}; +table_get(36, _) -> {<<"expires">>, <<>>}; +table_get(37, _) -> {<<"from">>, <<>>}; +table_get(38, _) -> {<<"host">>, <<>>}; +table_get(39, _) -> {<<"if-match">>, <<>>}; +table_get(40, _) -> {<<"if-modified-since">>, <<>>}; +table_get(41, _) -> {<<"if-none-match">>, <<>>}; +table_get(42, _) -> {<<"if-range">>, <<>>}; +table_get(43, _) -> {<<"if-unmodified-since">>, <<>>}; +table_get(44, _) -> {<<"last-modified">>, <<>>}; +table_get(45, _) -> {<<"link">>, <<>>}; +table_get(46, _) -> {<<"location">>, <<>>}; +table_get(47, _) -> {<<"max-forwards">>, <<>>}; +table_get(48, _) -> {<<"proxy-authenticate">>, <<>>}; +table_get(49, _) -> {<<"proxy-authorization">>, <<>>}; +table_get(50, _) -> {<<"range">>, <<>>}; +table_get(51, _) -> {<<"referer">>, <<>>}; +table_get(52, _) -> {<<"refresh">>, <<>>}; +table_get(53, _) -> {<<"retry-after">>, <<>>}; +table_get(54, _) -> {<<"server">>, <<>>}; +table_get(55, _) -> {<<"set-cookie">>, <<>>}; +table_get(56, _) -> {<<"strict-transport-security">>, <<>>}; +table_get(57, _) -> {<<"transfer-encoding">>, <<>>}; +table_get(58, _) -> {<<"user-agent">>, <<>>}; +table_get(59, _) -> {<<"vary">>, <<>>}; +table_get(60, _) -> {<<"via">>, <<>>}; +table_get(61, _) -> {<<"www-authenticate">>, <<>>}; +table_get(Index, #state{dyn_table=DynamicTable}) -> + {_, Header} = lists:nth(Index - 61, DynamicTable), + Header. + +table_get_name(1, _) -> <<":authority">>; +table_get_name(2, _) -> <<":method">>; +table_get_name(3, _) -> <<":method">>; +table_get_name(4, _) -> <<":path">>; +table_get_name(5, _) -> <<":path">>; +table_get_name(6, _) -> <<":scheme">>; +table_get_name(7, _) -> <<":scheme">>; +table_get_name(8, _) -> <<":status">>; +table_get_name(9, _) -> <<":status">>; +table_get_name(10, _) -> <<":status">>; +table_get_name(11, _) -> <<":status">>; +table_get_name(12, _) -> <<":status">>; +table_get_name(13, _) -> <<":status">>; +table_get_name(14, _) -> <<":status">>; +table_get_name(15, _) -> <<"accept-charset">>; +table_get_name(16, _) -> <<"accept-encoding">>; +table_get_name(17, _) -> <<"accept-language">>; +table_get_name(18, _) -> <<"accept-ranges">>; +table_get_name(19, _) -> <<"accept">>; +table_get_name(20, _) -> <<"access-control-allow-origin">>; +table_get_name(21, _) -> <<"age">>; +table_get_name(22, _) -> <<"allow">>; +table_get_name(23, _) -> <<"authorization">>; +table_get_name(24, _) -> <<"cache-control">>; +table_get_name(25, _) -> <<"content-disposition">>; +table_get_name(26, _) -> <<"content-encoding">>; +table_get_name(27, _) -> <<"content-language">>; +table_get_name(28, _) -> <<"content-length">>; +table_get_name(29, _) -> <<"content-location">>; +table_get_name(30, _) -> <<"content-range">>; +table_get_name(31, _) -> <<"content-type">>; +table_get_name(32, _) -> <<"cookie">>; +table_get_name(33, _) -> <<"date">>; +table_get_name(34, _) -> <<"etag">>; +table_get_name(35, _) -> <<"expect">>; +table_get_name(36, _) -> <<"expires">>; +table_get_name(37, _) -> <<"from">>; +table_get_name(38, _) -> <<"host">>; +table_get_name(39, _) -> <<"if-match">>; +table_get_name(40, _) -> <<"if-modified-since">>; +table_get_name(41, _) -> <<"if-none-match">>; +table_get_name(42, _) -> <<"if-range">>; +table_get_name(43, _) -> <<"if-unmodified-since">>; +table_get_name(44, _) -> <<"last-modified">>; +table_get_name(45, _) -> <<"link">>; +table_get_name(46, _) -> <<"location">>; +table_get_name(47, _) -> <<"max-forwards">>; +table_get_name(48, _) -> <<"proxy-authenticate">>; +table_get_name(49, _) -> <<"proxy-authorization">>; +table_get_name(50, _) -> <<"range">>; +table_get_name(51, _) -> <<"referer">>; +table_get_name(52, _) -> <<"refresh">>; +table_get_name(53, _) -> <<"retry-after">>; +table_get_name(54, _) -> <<"server">>; +table_get_name(55, _) -> <<"set-cookie">>; +table_get_name(56, _) -> <<"strict-transport-security">>; +table_get_name(57, _) -> <<"transfer-encoding">>; +table_get_name(58, _) -> <<"user-agent">>; +table_get_name(59, _) -> <<"vary">>; +table_get_name(60, _) -> <<"via">>; +table_get_name(61, _) -> <<"www-authenticate">>; +table_get_name(Index, #state{dyn_table=DynamicTable}) -> + {_, {Name, _}} = lists:nth(Index - 61, DynamicTable), + Name. + +table_insert(Entry = {Name, Value}, State=#state{size=Size, max_size=MaxSize, dyn_table=DynamicTable}) -> + EntrySize = byte_size(Name) + byte_size(Value) + 32, + if + EntrySize + Size =< MaxSize -> + %% Add entry without eviction + State#state{size=Size + EntrySize, dyn_table=[{EntrySize, Entry}|DynamicTable]}; + EntrySize =< MaxSize -> + %% Evict, then add entry + {DynamicTable2, Size2} = table_resize(DynamicTable, MaxSize - EntrySize, 0, []), + State#state{size=Size2 + EntrySize, dyn_table=[{EntrySize, Entry}|DynamicTable2]}; + EntrySize > MaxSize -> + %% "an attempt to add an entry larger than the + %% maximum size causes the table to be emptied + %% of all existing entries and results in an + %% empty table" (RFC 7541, 4.4) + State#state{size=0, dyn_table=[]} + end. + +table_resize([], _, Size, Acc) -> + {lists:reverse(Acc), Size}; +table_resize([{EntrySize, _}|_], MaxSize, Size, Acc) when Size + EntrySize > MaxSize -> + {lists:reverse(Acc), Size}; +table_resize([Entry = {EntrySize, _}|Tail], MaxSize, Size, Acc) -> + table_resize(Tail, MaxSize, Size + EntrySize, [Entry|Acc]). + +table_update_size(0, State) -> + State#state{size=0, max_size=0, dyn_table=[]}; +table_update_size(MaxSize, State=#state{size=CurrentSize}) + when CurrentSize =< MaxSize -> + State#state{max_size=MaxSize}; +table_update_size(MaxSize, State=#state{dyn_table=DynTable}) -> + {DynTable2, Size} = table_resize(DynTable, MaxSize, 0, []), + State#state{size=Size, max_size=MaxSize, dyn_table=DynTable2}. + +-ifdef(TEST). +prop_str_raw() -> + ?FORALL(Str, binary(), begin + {Str, <<>>} =:= dec_str(iolist_to_binary(enc_str(Str, no_huffman))) + end). + +prop_str_huffman() -> + ?FORALL(Str, binary(), begin + {Str, <<>>} =:= dec_str(iolist_to_binary(enc_str(Str, huffman))) + end). +-endif. diff --git a/deps/cowlib/src/cow_hpack_dec_huffman_lookup.hrl b/deps/cowlib/src/cow_hpack_dec_huffman_lookup.hrl new file mode 100644 index 0000000..5ed4d39 --- /dev/null +++ b/deps/cowlib/src/cow_hpack_dec_huffman_lookup.hrl @@ -0,0 +1,4132 @@ +%% Copyright (c) 2019, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% This lookup function was created by converting the +%% table from Nginx[1] into a form better suitable for +%% Erlang/OTP. This particular table takes a byte-sized +%% state and 4 bits to determine whether to emit a +%% character and what the next state is. It is most +%% appropriate for Erlang/OTP because we can benefit +%% from binary pattern matching optimizations by +%% matching the binary one byte at a time, calling +%% this lookup function twice. This and similar +%% algorithms are discussed here[2] and there[3]. +%% +%% It is possible to write a lookup table taking +%% a full byte instead of just 4 bits, but this +%% would make this function take 65536 clauses instead +%% of the current 4096. This could be done later +%% as a further optimization but might not yield +%% significant improvements. +%% +%% [1] https://hg.nginx.org/nginx/file/tip/src/http/v2/ngx_http_v2_huff_decode.c +%% [2] http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.9.4248&rep=rep1&type=pdf +%% [3] https://commandlinefanatic.com/cgi-bin/showarticle.cgi?article=art007 + +dec_huffman_lookup(16#00, 16#0) -> {more, undefined, 16#04}; +dec_huffman_lookup(16#00, 16#1) -> {more, undefined, 16#05}; +dec_huffman_lookup(16#00, 16#2) -> {more, undefined, 16#07}; +dec_huffman_lookup(16#00, 16#3) -> {more, undefined, 16#08}; +dec_huffman_lookup(16#00, 16#4) -> {more, undefined, 16#0b}; +dec_huffman_lookup(16#00, 16#5) -> {more, undefined, 16#0c}; +dec_huffman_lookup(16#00, 16#6) -> {more, undefined, 16#10}; +dec_huffman_lookup(16#00, 16#7) -> {more, undefined, 16#13}; +dec_huffman_lookup(16#00, 16#8) -> {more, undefined, 16#19}; +dec_huffman_lookup(16#00, 16#9) -> {more, undefined, 16#1c}; +dec_huffman_lookup(16#00, 16#a) -> {more, undefined, 16#20}; +dec_huffman_lookup(16#00, 16#b) -> {more, undefined, 16#23}; +dec_huffman_lookup(16#00, 16#c) -> {more, undefined, 16#2a}; +dec_huffman_lookup(16#00, 16#d) -> {more, undefined, 16#31}; +dec_huffman_lookup(16#00, 16#e) -> {more, undefined, 16#39}; +dec_huffman_lookup(16#00, 16#f) -> {ok, undefined, 16#40}; +dec_huffman_lookup(16#01, 16#0) -> {ok, 16#30, 16#00}; +dec_huffman_lookup(16#01, 16#1) -> {ok, 16#31, 16#00}; +dec_huffman_lookup(16#01, 16#2) -> {ok, 16#32, 16#00}; +dec_huffman_lookup(16#01, 16#3) -> {ok, 16#61, 16#00}; +dec_huffman_lookup(16#01, 16#4) -> {ok, 16#63, 16#00}; +dec_huffman_lookup(16#01, 16#5) -> {ok, 16#65, 16#00}; +dec_huffman_lookup(16#01, 16#6) -> {ok, 16#69, 16#00}; +dec_huffman_lookup(16#01, 16#7) -> {ok, 16#6f, 16#00}; +dec_huffman_lookup(16#01, 16#8) -> {ok, 16#73, 16#00}; +dec_huffman_lookup(16#01, 16#9) -> {ok, 16#74, 16#00}; +dec_huffman_lookup(16#01, 16#a) -> {more, undefined, 16#0d}; +dec_huffman_lookup(16#01, 16#b) -> {more, undefined, 16#0e}; +dec_huffman_lookup(16#01, 16#c) -> {more, undefined, 16#11}; +dec_huffman_lookup(16#01, 16#d) -> {more, undefined, 16#12}; +dec_huffman_lookup(16#01, 16#e) -> {more, undefined, 16#14}; +dec_huffman_lookup(16#01, 16#f) -> {more, undefined, 16#15}; +dec_huffman_lookup(16#02, 16#0) -> {more, 16#30, 16#01}; +dec_huffman_lookup(16#02, 16#1) -> {ok, 16#30, 16#16}; +dec_huffman_lookup(16#02, 16#2) -> {more, 16#31, 16#01}; +dec_huffman_lookup(16#02, 16#3) -> {ok, 16#31, 16#16}; +dec_huffman_lookup(16#02, 16#4) -> {more, 16#32, 16#01}; +dec_huffman_lookup(16#02, 16#5) -> {ok, 16#32, 16#16}; +dec_huffman_lookup(16#02, 16#6) -> {more, 16#61, 16#01}; +dec_huffman_lookup(16#02, 16#7) -> {ok, 16#61, 16#16}; +dec_huffman_lookup(16#02, 16#8) -> {more, 16#63, 16#01}; +dec_huffman_lookup(16#02, 16#9) -> {ok, 16#63, 16#16}; +dec_huffman_lookup(16#02, 16#a) -> {more, 16#65, 16#01}; +dec_huffman_lookup(16#02, 16#b) -> {ok, 16#65, 16#16}; +dec_huffman_lookup(16#02, 16#c) -> {more, 16#69, 16#01}; +dec_huffman_lookup(16#02, 16#d) -> {ok, 16#69, 16#16}; +dec_huffman_lookup(16#02, 16#e) -> {more, 16#6f, 16#01}; +dec_huffman_lookup(16#02, 16#f) -> {ok, 16#6f, 16#16}; +dec_huffman_lookup(16#03, 16#0) -> {more, 16#30, 16#02}; +dec_huffman_lookup(16#03, 16#1) -> {more, 16#30, 16#09}; +dec_huffman_lookup(16#03, 16#2) -> {more, 16#30, 16#17}; +dec_huffman_lookup(16#03, 16#3) -> {ok, 16#30, 16#28}; +dec_huffman_lookup(16#03, 16#4) -> {more, 16#31, 16#02}; +dec_huffman_lookup(16#03, 16#5) -> {more, 16#31, 16#09}; +dec_huffman_lookup(16#03, 16#6) -> {more, 16#31, 16#17}; +dec_huffman_lookup(16#03, 16#7) -> {ok, 16#31, 16#28}; +dec_huffman_lookup(16#03, 16#8) -> {more, 16#32, 16#02}; +dec_huffman_lookup(16#03, 16#9) -> {more, 16#32, 16#09}; +dec_huffman_lookup(16#03, 16#a) -> {more, 16#32, 16#17}; +dec_huffman_lookup(16#03, 16#b) -> {ok, 16#32, 16#28}; +dec_huffman_lookup(16#03, 16#c) -> {more, 16#61, 16#02}; +dec_huffman_lookup(16#03, 16#d) -> {more, 16#61, 16#09}; +dec_huffman_lookup(16#03, 16#e) -> {more, 16#61, 16#17}; +dec_huffman_lookup(16#03, 16#f) -> {ok, 16#61, 16#28}; +dec_huffman_lookup(16#04, 16#0) -> {more, 16#30, 16#03}; +dec_huffman_lookup(16#04, 16#1) -> {more, 16#30, 16#06}; +dec_huffman_lookup(16#04, 16#2) -> {more, 16#30, 16#0a}; +dec_huffman_lookup(16#04, 16#3) -> {more, 16#30, 16#0f}; +dec_huffman_lookup(16#04, 16#4) -> {more, 16#30, 16#18}; +dec_huffman_lookup(16#04, 16#5) -> {more, 16#30, 16#1f}; +dec_huffman_lookup(16#04, 16#6) -> {more, 16#30, 16#29}; +dec_huffman_lookup(16#04, 16#7) -> {ok, 16#30, 16#38}; +dec_huffman_lookup(16#04, 16#8) -> {more, 16#31, 16#03}; +dec_huffman_lookup(16#04, 16#9) -> {more, 16#31, 16#06}; +dec_huffman_lookup(16#04, 16#a) -> {more, 16#31, 16#0a}; +dec_huffman_lookup(16#04, 16#b) -> {more, 16#31, 16#0f}; +dec_huffman_lookup(16#04, 16#c) -> {more, 16#31, 16#18}; +dec_huffman_lookup(16#04, 16#d) -> {more, 16#31, 16#1f}; +dec_huffman_lookup(16#04, 16#e) -> {more, 16#31, 16#29}; +dec_huffman_lookup(16#04, 16#f) -> {ok, 16#31, 16#38}; +dec_huffman_lookup(16#05, 16#0) -> {more, 16#32, 16#03}; +dec_huffman_lookup(16#05, 16#1) -> {more, 16#32, 16#06}; +dec_huffman_lookup(16#05, 16#2) -> {more, 16#32, 16#0a}; +dec_huffman_lookup(16#05, 16#3) -> {more, 16#32, 16#0f}; +dec_huffman_lookup(16#05, 16#4) -> {more, 16#32, 16#18}; +dec_huffman_lookup(16#05, 16#5) -> {more, 16#32, 16#1f}; +dec_huffman_lookup(16#05, 16#6) -> {more, 16#32, 16#29}; +dec_huffman_lookup(16#05, 16#7) -> {ok, 16#32, 16#38}; +dec_huffman_lookup(16#05, 16#8) -> {more, 16#61, 16#03}; +dec_huffman_lookup(16#05, 16#9) -> {more, 16#61, 16#06}; +dec_huffman_lookup(16#05, 16#a) -> {more, 16#61, 16#0a}; +dec_huffman_lookup(16#05, 16#b) -> {more, 16#61, 16#0f}; +dec_huffman_lookup(16#05, 16#c) -> {more, 16#61, 16#18}; +dec_huffman_lookup(16#05, 16#d) -> {more, 16#61, 16#1f}; +dec_huffman_lookup(16#05, 16#e) -> {more, 16#61, 16#29}; +dec_huffman_lookup(16#05, 16#f) -> {ok, 16#61, 16#38}; +dec_huffman_lookup(16#06, 16#0) -> {more, 16#63, 16#02}; +dec_huffman_lookup(16#06, 16#1) -> {more, 16#63, 16#09}; +dec_huffman_lookup(16#06, 16#2) -> {more, 16#63, 16#17}; +dec_huffman_lookup(16#06, 16#3) -> {ok, 16#63, 16#28}; +dec_huffman_lookup(16#06, 16#4) -> {more, 16#65, 16#02}; +dec_huffman_lookup(16#06, 16#5) -> {more, 16#65, 16#09}; +dec_huffman_lookup(16#06, 16#6) -> {more, 16#65, 16#17}; +dec_huffman_lookup(16#06, 16#7) -> {ok, 16#65, 16#28}; +dec_huffman_lookup(16#06, 16#8) -> {more, 16#69, 16#02}; +dec_huffman_lookup(16#06, 16#9) -> {more, 16#69, 16#09}; +dec_huffman_lookup(16#06, 16#a) -> {more, 16#69, 16#17}; +dec_huffman_lookup(16#06, 16#b) -> {ok, 16#69, 16#28}; +dec_huffman_lookup(16#06, 16#c) -> {more, 16#6f, 16#02}; +dec_huffman_lookup(16#06, 16#d) -> {more, 16#6f, 16#09}; +dec_huffman_lookup(16#06, 16#e) -> {more, 16#6f, 16#17}; +dec_huffman_lookup(16#06, 16#f) -> {ok, 16#6f, 16#28}; +dec_huffman_lookup(16#07, 16#0) -> {more, 16#63, 16#03}; +dec_huffman_lookup(16#07, 16#1) -> {more, 16#63, 16#06}; +dec_huffman_lookup(16#07, 16#2) -> {more, 16#63, 16#0a}; +dec_huffman_lookup(16#07, 16#3) -> {more, 16#63, 16#0f}; +dec_huffman_lookup(16#07, 16#4) -> {more, 16#63, 16#18}; +dec_huffman_lookup(16#07, 16#5) -> {more, 16#63, 16#1f}; +dec_huffman_lookup(16#07, 16#6) -> {more, 16#63, 16#29}; +dec_huffman_lookup(16#07, 16#7) -> {ok, 16#63, 16#38}; +dec_huffman_lookup(16#07, 16#8) -> {more, 16#65, 16#03}; +dec_huffman_lookup(16#07, 16#9) -> {more, 16#65, 16#06}; +dec_huffman_lookup(16#07, 16#a) -> {more, 16#65, 16#0a}; +dec_huffman_lookup(16#07, 16#b) -> {more, 16#65, 16#0f}; +dec_huffman_lookup(16#07, 16#c) -> {more, 16#65, 16#18}; +dec_huffman_lookup(16#07, 16#d) -> {more, 16#65, 16#1f}; +dec_huffman_lookup(16#07, 16#e) -> {more, 16#65, 16#29}; +dec_huffman_lookup(16#07, 16#f) -> {ok, 16#65, 16#38}; +dec_huffman_lookup(16#08, 16#0) -> {more, 16#69, 16#03}; +dec_huffman_lookup(16#08, 16#1) -> {more, 16#69, 16#06}; +dec_huffman_lookup(16#08, 16#2) -> {more, 16#69, 16#0a}; +dec_huffman_lookup(16#08, 16#3) -> {more, 16#69, 16#0f}; +dec_huffman_lookup(16#08, 16#4) -> {more, 16#69, 16#18}; +dec_huffman_lookup(16#08, 16#5) -> {more, 16#69, 16#1f}; +dec_huffman_lookup(16#08, 16#6) -> {more, 16#69, 16#29}; +dec_huffman_lookup(16#08, 16#7) -> {ok, 16#69, 16#38}; +dec_huffman_lookup(16#08, 16#8) -> {more, 16#6f, 16#03}; +dec_huffman_lookup(16#08, 16#9) -> {more, 16#6f, 16#06}; +dec_huffman_lookup(16#08, 16#a) -> {more, 16#6f, 16#0a}; +dec_huffman_lookup(16#08, 16#b) -> {more, 16#6f, 16#0f}; +dec_huffman_lookup(16#08, 16#c) -> {more, 16#6f, 16#18}; +dec_huffman_lookup(16#08, 16#d) -> {more, 16#6f, 16#1f}; +dec_huffman_lookup(16#08, 16#e) -> {more, 16#6f, 16#29}; +dec_huffman_lookup(16#08, 16#f) -> {ok, 16#6f, 16#38}; +dec_huffman_lookup(16#09, 16#0) -> {more, 16#73, 16#01}; +dec_huffman_lookup(16#09, 16#1) -> {ok, 16#73, 16#16}; +dec_huffman_lookup(16#09, 16#2) -> {more, 16#74, 16#01}; +dec_huffman_lookup(16#09, 16#3) -> {ok, 16#74, 16#16}; +dec_huffman_lookup(16#09, 16#4) -> {ok, 16#20, 16#00}; +dec_huffman_lookup(16#09, 16#5) -> {ok, 16#25, 16#00}; +dec_huffman_lookup(16#09, 16#6) -> {ok, 16#2d, 16#00}; +dec_huffman_lookup(16#09, 16#7) -> {ok, 16#2e, 16#00}; +dec_huffman_lookup(16#09, 16#8) -> {ok, 16#2f, 16#00}; +dec_huffman_lookup(16#09, 16#9) -> {ok, 16#33, 16#00}; +dec_huffman_lookup(16#09, 16#a) -> {ok, 16#34, 16#00}; +dec_huffman_lookup(16#09, 16#b) -> {ok, 16#35, 16#00}; +dec_huffman_lookup(16#09, 16#c) -> {ok, 16#36, 16#00}; +dec_huffman_lookup(16#09, 16#d) -> {ok, 16#37, 16#00}; +dec_huffman_lookup(16#09, 16#e) -> {ok, 16#38, 16#00}; +dec_huffman_lookup(16#09, 16#f) -> {ok, 16#39, 16#00}; +dec_huffman_lookup(16#0a, 16#0) -> {more, 16#73, 16#02}; +dec_huffman_lookup(16#0a, 16#1) -> {more, 16#73, 16#09}; +dec_huffman_lookup(16#0a, 16#2) -> {more, 16#73, 16#17}; +dec_huffman_lookup(16#0a, 16#3) -> {ok, 16#73, 16#28}; +dec_huffman_lookup(16#0a, 16#4) -> {more, 16#74, 16#02}; +dec_huffman_lookup(16#0a, 16#5) -> {more, 16#74, 16#09}; +dec_huffman_lookup(16#0a, 16#6) -> {more, 16#74, 16#17}; +dec_huffman_lookup(16#0a, 16#7) -> {ok, 16#74, 16#28}; +dec_huffman_lookup(16#0a, 16#8) -> {more, 16#20, 16#01}; +dec_huffman_lookup(16#0a, 16#9) -> {ok, 16#20, 16#16}; +dec_huffman_lookup(16#0a, 16#a) -> {more, 16#25, 16#01}; +dec_huffman_lookup(16#0a, 16#b) -> {ok, 16#25, 16#16}; +dec_huffman_lookup(16#0a, 16#c) -> {more, 16#2d, 16#01}; +dec_huffman_lookup(16#0a, 16#d) -> {ok, 16#2d, 16#16}; +dec_huffman_lookup(16#0a, 16#e) -> {more, 16#2e, 16#01}; +dec_huffman_lookup(16#0a, 16#f) -> {ok, 16#2e, 16#16}; +dec_huffman_lookup(16#0b, 16#0) -> {more, 16#73, 16#03}; +dec_huffman_lookup(16#0b, 16#1) -> {more, 16#73, 16#06}; +dec_huffman_lookup(16#0b, 16#2) -> {more, 16#73, 16#0a}; +dec_huffman_lookup(16#0b, 16#3) -> {more, 16#73, 16#0f}; +dec_huffman_lookup(16#0b, 16#4) -> {more, 16#73, 16#18}; +dec_huffman_lookup(16#0b, 16#5) -> {more, 16#73, 16#1f}; +dec_huffman_lookup(16#0b, 16#6) -> {more, 16#73, 16#29}; +dec_huffman_lookup(16#0b, 16#7) -> {ok, 16#73, 16#38}; +dec_huffman_lookup(16#0b, 16#8) -> {more, 16#74, 16#03}; +dec_huffman_lookup(16#0b, 16#9) -> {more, 16#74, 16#06}; +dec_huffman_lookup(16#0b, 16#a) -> {more, 16#74, 16#0a}; +dec_huffman_lookup(16#0b, 16#b) -> {more, 16#74, 16#0f}; +dec_huffman_lookup(16#0b, 16#c) -> {more, 16#74, 16#18}; +dec_huffman_lookup(16#0b, 16#d) -> {more, 16#74, 16#1f}; +dec_huffman_lookup(16#0b, 16#e) -> {more, 16#74, 16#29}; +dec_huffman_lookup(16#0b, 16#f) -> {ok, 16#74, 16#38}; +dec_huffman_lookup(16#0c, 16#0) -> {more, 16#20, 16#02}; +dec_huffman_lookup(16#0c, 16#1) -> {more, 16#20, 16#09}; +dec_huffman_lookup(16#0c, 16#2) -> {more, 16#20, 16#17}; +dec_huffman_lookup(16#0c, 16#3) -> {ok, 16#20, 16#28}; +dec_huffman_lookup(16#0c, 16#4) -> {more, 16#25, 16#02}; +dec_huffman_lookup(16#0c, 16#5) -> {more, 16#25, 16#09}; +dec_huffman_lookup(16#0c, 16#6) -> {more, 16#25, 16#17}; +dec_huffman_lookup(16#0c, 16#7) -> {ok, 16#25, 16#28}; +dec_huffman_lookup(16#0c, 16#8) -> {more, 16#2d, 16#02}; +dec_huffman_lookup(16#0c, 16#9) -> {more, 16#2d, 16#09}; +dec_huffman_lookup(16#0c, 16#a) -> {more, 16#2d, 16#17}; +dec_huffman_lookup(16#0c, 16#b) -> {ok, 16#2d, 16#28}; +dec_huffman_lookup(16#0c, 16#c) -> {more, 16#2e, 16#02}; +dec_huffman_lookup(16#0c, 16#d) -> {more, 16#2e, 16#09}; +dec_huffman_lookup(16#0c, 16#e) -> {more, 16#2e, 16#17}; +dec_huffman_lookup(16#0c, 16#f) -> {ok, 16#2e, 16#28}; +dec_huffman_lookup(16#0d, 16#0) -> {more, 16#20, 16#03}; +dec_huffman_lookup(16#0d, 16#1) -> {more, 16#20, 16#06}; +dec_huffman_lookup(16#0d, 16#2) -> {more, 16#20, 16#0a}; +dec_huffman_lookup(16#0d, 16#3) -> {more, 16#20, 16#0f}; +dec_huffman_lookup(16#0d, 16#4) -> {more, 16#20, 16#18}; +dec_huffman_lookup(16#0d, 16#5) -> {more, 16#20, 16#1f}; +dec_huffman_lookup(16#0d, 16#6) -> {more, 16#20, 16#29}; +dec_huffman_lookup(16#0d, 16#7) -> {ok, 16#20, 16#38}; +dec_huffman_lookup(16#0d, 16#8) -> {more, 16#25, 16#03}; +dec_huffman_lookup(16#0d, 16#9) -> {more, 16#25, 16#06}; +dec_huffman_lookup(16#0d, 16#a) -> {more, 16#25, 16#0a}; +dec_huffman_lookup(16#0d, 16#b) -> {more, 16#25, 16#0f}; +dec_huffman_lookup(16#0d, 16#c) -> {more, 16#25, 16#18}; +dec_huffman_lookup(16#0d, 16#d) -> {more, 16#25, 16#1f}; +dec_huffman_lookup(16#0d, 16#e) -> {more, 16#25, 16#29}; +dec_huffman_lookup(16#0d, 16#f) -> {ok, 16#25, 16#38}; +dec_huffman_lookup(16#0e, 16#0) -> {more, 16#2d, 16#03}; +dec_huffman_lookup(16#0e, 16#1) -> {more, 16#2d, 16#06}; +dec_huffman_lookup(16#0e, 16#2) -> {more, 16#2d, 16#0a}; +dec_huffman_lookup(16#0e, 16#3) -> {more, 16#2d, 16#0f}; +dec_huffman_lookup(16#0e, 16#4) -> {more, 16#2d, 16#18}; +dec_huffman_lookup(16#0e, 16#5) -> {more, 16#2d, 16#1f}; +dec_huffman_lookup(16#0e, 16#6) -> {more, 16#2d, 16#29}; +dec_huffman_lookup(16#0e, 16#7) -> {ok, 16#2d, 16#38}; +dec_huffman_lookup(16#0e, 16#8) -> {more, 16#2e, 16#03}; +dec_huffman_lookup(16#0e, 16#9) -> {more, 16#2e, 16#06}; +dec_huffman_lookup(16#0e, 16#a) -> {more, 16#2e, 16#0a}; +dec_huffman_lookup(16#0e, 16#b) -> {more, 16#2e, 16#0f}; +dec_huffman_lookup(16#0e, 16#c) -> {more, 16#2e, 16#18}; +dec_huffman_lookup(16#0e, 16#d) -> {more, 16#2e, 16#1f}; +dec_huffman_lookup(16#0e, 16#e) -> {more, 16#2e, 16#29}; +dec_huffman_lookup(16#0e, 16#f) -> {ok, 16#2e, 16#38}; +dec_huffman_lookup(16#0f, 16#0) -> {more, 16#2f, 16#01}; +dec_huffman_lookup(16#0f, 16#1) -> {ok, 16#2f, 16#16}; +dec_huffman_lookup(16#0f, 16#2) -> {more, 16#33, 16#01}; +dec_huffman_lookup(16#0f, 16#3) -> {ok, 16#33, 16#16}; +dec_huffman_lookup(16#0f, 16#4) -> {more, 16#34, 16#01}; +dec_huffman_lookup(16#0f, 16#5) -> {ok, 16#34, 16#16}; +dec_huffman_lookup(16#0f, 16#6) -> {more, 16#35, 16#01}; +dec_huffman_lookup(16#0f, 16#7) -> {ok, 16#35, 16#16}; +dec_huffman_lookup(16#0f, 16#8) -> {more, 16#36, 16#01}; +dec_huffman_lookup(16#0f, 16#9) -> {ok, 16#36, 16#16}; +dec_huffman_lookup(16#0f, 16#a) -> {more, 16#37, 16#01}; +dec_huffman_lookup(16#0f, 16#b) -> {ok, 16#37, 16#16}; +dec_huffman_lookup(16#0f, 16#c) -> {more, 16#38, 16#01}; +dec_huffman_lookup(16#0f, 16#d) -> {ok, 16#38, 16#16}; +dec_huffman_lookup(16#0f, 16#e) -> {more, 16#39, 16#01}; +dec_huffman_lookup(16#0f, 16#f) -> {ok, 16#39, 16#16}; +dec_huffman_lookup(16#10, 16#0) -> {more, 16#2f, 16#02}; +dec_huffman_lookup(16#10, 16#1) -> {more, 16#2f, 16#09}; +dec_huffman_lookup(16#10, 16#2) -> {more, 16#2f, 16#17}; +dec_huffman_lookup(16#10, 16#3) -> {ok, 16#2f, 16#28}; +dec_huffman_lookup(16#10, 16#4) -> {more, 16#33, 16#02}; +dec_huffman_lookup(16#10, 16#5) -> {more, 16#33, 16#09}; +dec_huffman_lookup(16#10, 16#6) -> {more, 16#33, 16#17}; +dec_huffman_lookup(16#10, 16#7) -> {ok, 16#33, 16#28}; +dec_huffman_lookup(16#10, 16#8) -> {more, 16#34, 16#02}; +dec_huffman_lookup(16#10, 16#9) -> {more, 16#34, 16#09}; +dec_huffman_lookup(16#10, 16#a) -> {more, 16#34, 16#17}; +dec_huffman_lookup(16#10, 16#b) -> {ok, 16#34, 16#28}; +dec_huffman_lookup(16#10, 16#c) -> {more, 16#35, 16#02}; +dec_huffman_lookup(16#10, 16#d) -> {more, 16#35, 16#09}; +dec_huffman_lookup(16#10, 16#e) -> {more, 16#35, 16#17}; +dec_huffman_lookup(16#10, 16#f) -> {ok, 16#35, 16#28}; +dec_huffman_lookup(16#11, 16#0) -> {more, 16#2f, 16#03}; +dec_huffman_lookup(16#11, 16#1) -> {more, 16#2f, 16#06}; +dec_huffman_lookup(16#11, 16#2) -> {more, 16#2f, 16#0a}; +dec_huffman_lookup(16#11, 16#3) -> {more, 16#2f, 16#0f}; +dec_huffman_lookup(16#11, 16#4) -> {more, 16#2f, 16#18}; +dec_huffman_lookup(16#11, 16#5) -> {more, 16#2f, 16#1f}; +dec_huffman_lookup(16#11, 16#6) -> {more, 16#2f, 16#29}; +dec_huffman_lookup(16#11, 16#7) -> {ok, 16#2f, 16#38}; +dec_huffman_lookup(16#11, 16#8) -> {more, 16#33, 16#03}; +dec_huffman_lookup(16#11, 16#9) -> {more, 16#33, 16#06}; +dec_huffman_lookup(16#11, 16#a) -> {more, 16#33, 16#0a}; +dec_huffman_lookup(16#11, 16#b) -> {more, 16#33, 16#0f}; +dec_huffman_lookup(16#11, 16#c) -> {more, 16#33, 16#18}; +dec_huffman_lookup(16#11, 16#d) -> {more, 16#33, 16#1f}; +dec_huffman_lookup(16#11, 16#e) -> {more, 16#33, 16#29}; +dec_huffman_lookup(16#11, 16#f) -> {ok, 16#33, 16#38}; +dec_huffman_lookup(16#12, 16#0) -> {more, 16#34, 16#03}; +dec_huffman_lookup(16#12, 16#1) -> {more, 16#34, 16#06}; +dec_huffman_lookup(16#12, 16#2) -> {more, 16#34, 16#0a}; +dec_huffman_lookup(16#12, 16#3) -> {more, 16#34, 16#0f}; +dec_huffman_lookup(16#12, 16#4) -> {more, 16#34, 16#18}; +dec_huffman_lookup(16#12, 16#5) -> {more, 16#34, 16#1f}; +dec_huffman_lookup(16#12, 16#6) -> {more, 16#34, 16#29}; +dec_huffman_lookup(16#12, 16#7) -> {ok, 16#34, 16#38}; +dec_huffman_lookup(16#12, 16#8) -> {more, 16#35, 16#03}; +dec_huffman_lookup(16#12, 16#9) -> {more, 16#35, 16#06}; +dec_huffman_lookup(16#12, 16#a) -> {more, 16#35, 16#0a}; +dec_huffman_lookup(16#12, 16#b) -> {more, 16#35, 16#0f}; +dec_huffman_lookup(16#12, 16#c) -> {more, 16#35, 16#18}; +dec_huffman_lookup(16#12, 16#d) -> {more, 16#35, 16#1f}; +dec_huffman_lookup(16#12, 16#e) -> {more, 16#35, 16#29}; +dec_huffman_lookup(16#12, 16#f) -> {ok, 16#35, 16#38}; +dec_huffman_lookup(16#13, 16#0) -> {more, 16#36, 16#02}; +dec_huffman_lookup(16#13, 16#1) -> {more, 16#36, 16#09}; +dec_huffman_lookup(16#13, 16#2) -> {more, 16#36, 16#17}; +dec_huffman_lookup(16#13, 16#3) -> {ok, 16#36, 16#28}; +dec_huffman_lookup(16#13, 16#4) -> {more, 16#37, 16#02}; +dec_huffman_lookup(16#13, 16#5) -> {more, 16#37, 16#09}; +dec_huffman_lookup(16#13, 16#6) -> {more, 16#37, 16#17}; +dec_huffman_lookup(16#13, 16#7) -> {ok, 16#37, 16#28}; +dec_huffman_lookup(16#13, 16#8) -> {more, 16#38, 16#02}; +dec_huffman_lookup(16#13, 16#9) -> {more, 16#38, 16#09}; +dec_huffman_lookup(16#13, 16#a) -> {more, 16#38, 16#17}; +dec_huffman_lookup(16#13, 16#b) -> {ok, 16#38, 16#28}; +dec_huffman_lookup(16#13, 16#c) -> {more, 16#39, 16#02}; +dec_huffman_lookup(16#13, 16#d) -> {more, 16#39, 16#09}; +dec_huffman_lookup(16#13, 16#e) -> {more, 16#39, 16#17}; +dec_huffman_lookup(16#13, 16#f) -> {ok, 16#39, 16#28}; +dec_huffman_lookup(16#14, 16#0) -> {more, 16#36, 16#03}; +dec_huffman_lookup(16#14, 16#1) -> {more, 16#36, 16#06}; +dec_huffman_lookup(16#14, 16#2) -> {more, 16#36, 16#0a}; +dec_huffman_lookup(16#14, 16#3) -> {more, 16#36, 16#0f}; +dec_huffman_lookup(16#14, 16#4) -> {more, 16#36, 16#18}; +dec_huffman_lookup(16#14, 16#5) -> {more, 16#36, 16#1f}; +dec_huffman_lookup(16#14, 16#6) -> {more, 16#36, 16#29}; +dec_huffman_lookup(16#14, 16#7) -> {ok, 16#36, 16#38}; +dec_huffman_lookup(16#14, 16#8) -> {more, 16#37, 16#03}; +dec_huffman_lookup(16#14, 16#9) -> {more, 16#37, 16#06}; +dec_huffman_lookup(16#14, 16#a) -> {more, 16#37, 16#0a}; +dec_huffman_lookup(16#14, 16#b) -> {more, 16#37, 16#0f}; +dec_huffman_lookup(16#14, 16#c) -> {more, 16#37, 16#18}; +dec_huffman_lookup(16#14, 16#d) -> {more, 16#37, 16#1f}; +dec_huffman_lookup(16#14, 16#e) -> {more, 16#37, 16#29}; +dec_huffman_lookup(16#14, 16#f) -> {ok, 16#37, 16#38}; +dec_huffman_lookup(16#15, 16#0) -> {more, 16#38, 16#03}; +dec_huffman_lookup(16#15, 16#1) -> {more, 16#38, 16#06}; +dec_huffman_lookup(16#15, 16#2) -> {more, 16#38, 16#0a}; +dec_huffman_lookup(16#15, 16#3) -> {more, 16#38, 16#0f}; +dec_huffman_lookup(16#15, 16#4) -> {more, 16#38, 16#18}; +dec_huffman_lookup(16#15, 16#5) -> {more, 16#38, 16#1f}; +dec_huffman_lookup(16#15, 16#6) -> {more, 16#38, 16#29}; +dec_huffman_lookup(16#15, 16#7) -> {ok, 16#38, 16#38}; +dec_huffman_lookup(16#15, 16#8) -> {more, 16#39, 16#03}; +dec_huffman_lookup(16#15, 16#9) -> {more, 16#39, 16#06}; +dec_huffman_lookup(16#15, 16#a) -> {more, 16#39, 16#0a}; +dec_huffman_lookup(16#15, 16#b) -> {more, 16#39, 16#0f}; +dec_huffman_lookup(16#15, 16#c) -> {more, 16#39, 16#18}; +dec_huffman_lookup(16#15, 16#d) -> {more, 16#39, 16#1f}; +dec_huffman_lookup(16#15, 16#e) -> {more, 16#39, 16#29}; +dec_huffman_lookup(16#15, 16#f) -> {ok, 16#39, 16#38}; +dec_huffman_lookup(16#16, 16#0) -> {more, undefined, 16#1a}; +dec_huffman_lookup(16#16, 16#1) -> {more, undefined, 16#1b}; +dec_huffman_lookup(16#16, 16#2) -> {more, undefined, 16#1d}; +dec_huffman_lookup(16#16, 16#3) -> {more, undefined, 16#1e}; +dec_huffman_lookup(16#16, 16#4) -> {more, undefined, 16#21}; +dec_huffman_lookup(16#16, 16#5) -> {more, undefined, 16#22}; +dec_huffman_lookup(16#16, 16#6) -> {more, undefined, 16#24}; +dec_huffman_lookup(16#16, 16#7) -> {more, undefined, 16#25}; +dec_huffman_lookup(16#16, 16#8) -> {more, undefined, 16#2b}; +dec_huffman_lookup(16#16, 16#9) -> {more, undefined, 16#2e}; +dec_huffman_lookup(16#16, 16#a) -> {more, undefined, 16#32}; +dec_huffman_lookup(16#16, 16#b) -> {more, undefined, 16#35}; +dec_huffman_lookup(16#16, 16#c) -> {more, undefined, 16#3a}; +dec_huffman_lookup(16#16, 16#d) -> {more, undefined, 16#3d}; +dec_huffman_lookup(16#16, 16#e) -> {more, undefined, 16#41}; +dec_huffman_lookup(16#16, 16#f) -> {ok, undefined, 16#44}; +dec_huffman_lookup(16#17, 16#0) -> {ok, 16#3d, 16#00}; +dec_huffman_lookup(16#17, 16#1) -> {ok, 16#41, 16#00}; +dec_huffman_lookup(16#17, 16#2) -> {ok, 16#5f, 16#00}; +dec_huffman_lookup(16#17, 16#3) -> {ok, 16#62, 16#00}; +dec_huffman_lookup(16#17, 16#4) -> {ok, 16#64, 16#00}; +dec_huffman_lookup(16#17, 16#5) -> {ok, 16#66, 16#00}; +dec_huffman_lookup(16#17, 16#6) -> {ok, 16#67, 16#00}; +dec_huffman_lookup(16#17, 16#7) -> {ok, 16#68, 16#00}; +dec_huffman_lookup(16#17, 16#8) -> {ok, 16#6c, 16#00}; +dec_huffman_lookup(16#17, 16#9) -> {ok, 16#6d, 16#00}; +dec_huffman_lookup(16#17, 16#a) -> {ok, 16#6e, 16#00}; +dec_huffman_lookup(16#17, 16#b) -> {ok, 16#70, 16#00}; +dec_huffman_lookup(16#17, 16#c) -> {ok, 16#72, 16#00}; +dec_huffman_lookup(16#17, 16#d) -> {ok, 16#75, 16#00}; +dec_huffman_lookup(16#17, 16#e) -> {more, undefined, 16#26}; +dec_huffman_lookup(16#17, 16#f) -> {more, undefined, 16#27}; +dec_huffman_lookup(16#18, 16#0) -> {more, 16#3d, 16#01}; +dec_huffman_lookup(16#18, 16#1) -> {ok, 16#3d, 16#16}; +dec_huffman_lookup(16#18, 16#2) -> {more, 16#41, 16#01}; +dec_huffman_lookup(16#18, 16#3) -> {ok, 16#41, 16#16}; +dec_huffman_lookup(16#18, 16#4) -> {more, 16#5f, 16#01}; +dec_huffman_lookup(16#18, 16#5) -> {ok, 16#5f, 16#16}; +dec_huffman_lookup(16#18, 16#6) -> {more, 16#62, 16#01}; +dec_huffman_lookup(16#18, 16#7) -> {ok, 16#62, 16#16}; +dec_huffman_lookup(16#18, 16#8) -> {more, 16#64, 16#01}; +dec_huffman_lookup(16#18, 16#9) -> {ok, 16#64, 16#16}; +dec_huffman_lookup(16#18, 16#a) -> {more, 16#66, 16#01}; +dec_huffman_lookup(16#18, 16#b) -> {ok, 16#66, 16#16}; +dec_huffman_lookup(16#18, 16#c) -> {more, 16#67, 16#01}; +dec_huffman_lookup(16#18, 16#d) -> {ok, 16#67, 16#16}; +dec_huffman_lookup(16#18, 16#e) -> {more, 16#68, 16#01}; +dec_huffman_lookup(16#18, 16#f) -> {ok, 16#68, 16#16}; +dec_huffman_lookup(16#19, 16#0) -> {more, 16#3d, 16#02}; +dec_huffman_lookup(16#19, 16#1) -> {more, 16#3d, 16#09}; +dec_huffman_lookup(16#19, 16#2) -> {more, 16#3d, 16#17}; +dec_huffman_lookup(16#19, 16#3) -> {ok, 16#3d, 16#28}; +dec_huffman_lookup(16#19, 16#4) -> {more, 16#41, 16#02}; +dec_huffman_lookup(16#19, 16#5) -> {more, 16#41, 16#09}; +dec_huffman_lookup(16#19, 16#6) -> {more, 16#41, 16#17}; +dec_huffman_lookup(16#19, 16#7) -> {ok, 16#41, 16#28}; +dec_huffman_lookup(16#19, 16#8) -> {more, 16#5f, 16#02}; +dec_huffman_lookup(16#19, 16#9) -> {more, 16#5f, 16#09}; +dec_huffman_lookup(16#19, 16#a) -> {more, 16#5f, 16#17}; +dec_huffman_lookup(16#19, 16#b) -> {ok, 16#5f, 16#28}; +dec_huffman_lookup(16#19, 16#c) -> {more, 16#62, 16#02}; +dec_huffman_lookup(16#19, 16#d) -> {more, 16#62, 16#09}; +dec_huffman_lookup(16#19, 16#e) -> {more, 16#62, 16#17}; +dec_huffman_lookup(16#19, 16#f) -> {ok, 16#62, 16#28}; +dec_huffman_lookup(16#1a, 16#0) -> {more, 16#3d, 16#03}; +dec_huffman_lookup(16#1a, 16#1) -> {more, 16#3d, 16#06}; +dec_huffman_lookup(16#1a, 16#2) -> {more, 16#3d, 16#0a}; +dec_huffman_lookup(16#1a, 16#3) -> {more, 16#3d, 16#0f}; +dec_huffman_lookup(16#1a, 16#4) -> {more, 16#3d, 16#18}; +dec_huffman_lookup(16#1a, 16#5) -> {more, 16#3d, 16#1f}; +dec_huffman_lookup(16#1a, 16#6) -> {more, 16#3d, 16#29}; +dec_huffman_lookup(16#1a, 16#7) -> {ok, 16#3d, 16#38}; +dec_huffman_lookup(16#1a, 16#8) -> {more, 16#41, 16#03}; +dec_huffman_lookup(16#1a, 16#9) -> {more, 16#41, 16#06}; +dec_huffman_lookup(16#1a, 16#a) -> {more, 16#41, 16#0a}; +dec_huffman_lookup(16#1a, 16#b) -> {more, 16#41, 16#0f}; +dec_huffman_lookup(16#1a, 16#c) -> {more, 16#41, 16#18}; +dec_huffman_lookup(16#1a, 16#d) -> {more, 16#41, 16#1f}; +dec_huffman_lookup(16#1a, 16#e) -> {more, 16#41, 16#29}; +dec_huffman_lookup(16#1a, 16#f) -> {ok, 16#41, 16#38}; +dec_huffman_lookup(16#1b, 16#0) -> {more, 16#5f, 16#03}; +dec_huffman_lookup(16#1b, 16#1) -> {more, 16#5f, 16#06}; +dec_huffman_lookup(16#1b, 16#2) -> {more, 16#5f, 16#0a}; +dec_huffman_lookup(16#1b, 16#3) -> {more, 16#5f, 16#0f}; +dec_huffman_lookup(16#1b, 16#4) -> {more, 16#5f, 16#18}; +dec_huffman_lookup(16#1b, 16#5) -> {more, 16#5f, 16#1f}; +dec_huffman_lookup(16#1b, 16#6) -> {more, 16#5f, 16#29}; +dec_huffman_lookup(16#1b, 16#7) -> {ok, 16#5f, 16#38}; +dec_huffman_lookup(16#1b, 16#8) -> {more, 16#62, 16#03}; +dec_huffman_lookup(16#1b, 16#9) -> {more, 16#62, 16#06}; +dec_huffman_lookup(16#1b, 16#a) -> {more, 16#62, 16#0a}; +dec_huffman_lookup(16#1b, 16#b) -> {more, 16#62, 16#0f}; +dec_huffman_lookup(16#1b, 16#c) -> {more, 16#62, 16#18}; +dec_huffman_lookup(16#1b, 16#d) -> {more, 16#62, 16#1f}; +dec_huffman_lookup(16#1b, 16#e) -> {more, 16#62, 16#29}; +dec_huffman_lookup(16#1b, 16#f) -> {ok, 16#62, 16#38}; +dec_huffman_lookup(16#1c, 16#0) -> {more, 16#64, 16#02}; +dec_huffman_lookup(16#1c, 16#1) -> {more, 16#64, 16#09}; +dec_huffman_lookup(16#1c, 16#2) -> {more, 16#64, 16#17}; +dec_huffman_lookup(16#1c, 16#3) -> {ok, 16#64, 16#28}; +dec_huffman_lookup(16#1c, 16#4) -> {more, 16#66, 16#02}; +dec_huffman_lookup(16#1c, 16#5) -> {more, 16#66, 16#09}; +dec_huffman_lookup(16#1c, 16#6) -> {more, 16#66, 16#17}; +dec_huffman_lookup(16#1c, 16#7) -> {ok, 16#66, 16#28}; +dec_huffman_lookup(16#1c, 16#8) -> {more, 16#67, 16#02}; +dec_huffman_lookup(16#1c, 16#9) -> {more, 16#67, 16#09}; +dec_huffman_lookup(16#1c, 16#a) -> {more, 16#67, 16#17}; +dec_huffman_lookup(16#1c, 16#b) -> {ok, 16#67, 16#28}; +dec_huffman_lookup(16#1c, 16#c) -> {more, 16#68, 16#02}; +dec_huffman_lookup(16#1c, 16#d) -> {more, 16#68, 16#09}; +dec_huffman_lookup(16#1c, 16#e) -> {more, 16#68, 16#17}; +dec_huffman_lookup(16#1c, 16#f) -> {ok, 16#68, 16#28}; +dec_huffman_lookup(16#1d, 16#0) -> {more, 16#64, 16#03}; +dec_huffman_lookup(16#1d, 16#1) -> {more, 16#64, 16#06}; +dec_huffman_lookup(16#1d, 16#2) -> {more, 16#64, 16#0a}; +dec_huffman_lookup(16#1d, 16#3) -> {more, 16#64, 16#0f}; +dec_huffman_lookup(16#1d, 16#4) -> {more, 16#64, 16#18}; +dec_huffman_lookup(16#1d, 16#5) -> {more, 16#64, 16#1f}; +dec_huffman_lookup(16#1d, 16#6) -> {more, 16#64, 16#29}; +dec_huffman_lookup(16#1d, 16#7) -> {ok, 16#64, 16#38}; +dec_huffman_lookup(16#1d, 16#8) -> {more, 16#66, 16#03}; +dec_huffman_lookup(16#1d, 16#9) -> {more, 16#66, 16#06}; +dec_huffman_lookup(16#1d, 16#a) -> {more, 16#66, 16#0a}; +dec_huffman_lookup(16#1d, 16#b) -> {more, 16#66, 16#0f}; +dec_huffman_lookup(16#1d, 16#c) -> {more, 16#66, 16#18}; +dec_huffman_lookup(16#1d, 16#d) -> {more, 16#66, 16#1f}; +dec_huffman_lookup(16#1d, 16#e) -> {more, 16#66, 16#29}; +dec_huffman_lookup(16#1d, 16#f) -> {ok, 16#66, 16#38}; +dec_huffman_lookup(16#1e, 16#0) -> {more, 16#67, 16#03}; +dec_huffman_lookup(16#1e, 16#1) -> {more, 16#67, 16#06}; +dec_huffman_lookup(16#1e, 16#2) -> {more, 16#67, 16#0a}; +dec_huffman_lookup(16#1e, 16#3) -> {more, 16#67, 16#0f}; +dec_huffman_lookup(16#1e, 16#4) -> {more, 16#67, 16#18}; +dec_huffman_lookup(16#1e, 16#5) -> {more, 16#67, 16#1f}; +dec_huffman_lookup(16#1e, 16#6) -> {more, 16#67, 16#29}; +dec_huffman_lookup(16#1e, 16#7) -> {ok, 16#67, 16#38}; +dec_huffman_lookup(16#1e, 16#8) -> {more, 16#68, 16#03}; +dec_huffman_lookup(16#1e, 16#9) -> {more, 16#68, 16#06}; +dec_huffman_lookup(16#1e, 16#a) -> {more, 16#68, 16#0a}; +dec_huffman_lookup(16#1e, 16#b) -> {more, 16#68, 16#0f}; +dec_huffman_lookup(16#1e, 16#c) -> {more, 16#68, 16#18}; +dec_huffman_lookup(16#1e, 16#d) -> {more, 16#68, 16#1f}; +dec_huffman_lookup(16#1e, 16#e) -> {more, 16#68, 16#29}; +dec_huffman_lookup(16#1e, 16#f) -> {ok, 16#68, 16#38}; +dec_huffman_lookup(16#1f, 16#0) -> {more, 16#6c, 16#01}; +dec_huffman_lookup(16#1f, 16#1) -> {ok, 16#6c, 16#16}; +dec_huffman_lookup(16#1f, 16#2) -> {more, 16#6d, 16#01}; +dec_huffman_lookup(16#1f, 16#3) -> {ok, 16#6d, 16#16}; +dec_huffman_lookup(16#1f, 16#4) -> {more, 16#6e, 16#01}; +dec_huffman_lookup(16#1f, 16#5) -> {ok, 16#6e, 16#16}; +dec_huffman_lookup(16#1f, 16#6) -> {more, 16#70, 16#01}; +dec_huffman_lookup(16#1f, 16#7) -> {ok, 16#70, 16#16}; +dec_huffman_lookup(16#1f, 16#8) -> {more, 16#72, 16#01}; +dec_huffman_lookup(16#1f, 16#9) -> {ok, 16#72, 16#16}; +dec_huffman_lookup(16#1f, 16#a) -> {more, 16#75, 16#01}; +dec_huffman_lookup(16#1f, 16#b) -> {ok, 16#75, 16#16}; +dec_huffman_lookup(16#1f, 16#c) -> {ok, 16#3a, 16#00}; +dec_huffman_lookup(16#1f, 16#d) -> {ok, 16#42, 16#00}; +dec_huffman_lookup(16#1f, 16#e) -> {ok, 16#43, 16#00}; +dec_huffman_lookup(16#1f, 16#f) -> {ok, 16#44, 16#00}; +dec_huffman_lookup(16#20, 16#0) -> {more, 16#6c, 16#02}; +dec_huffman_lookup(16#20, 16#1) -> {more, 16#6c, 16#09}; +dec_huffman_lookup(16#20, 16#2) -> {more, 16#6c, 16#17}; +dec_huffman_lookup(16#20, 16#3) -> {ok, 16#6c, 16#28}; +dec_huffman_lookup(16#20, 16#4) -> {more, 16#6d, 16#02}; +dec_huffman_lookup(16#20, 16#5) -> {more, 16#6d, 16#09}; +dec_huffman_lookup(16#20, 16#6) -> {more, 16#6d, 16#17}; +dec_huffman_lookup(16#20, 16#7) -> {ok, 16#6d, 16#28}; +dec_huffman_lookup(16#20, 16#8) -> {more, 16#6e, 16#02}; +dec_huffman_lookup(16#20, 16#9) -> {more, 16#6e, 16#09}; +dec_huffman_lookup(16#20, 16#a) -> {more, 16#6e, 16#17}; +dec_huffman_lookup(16#20, 16#b) -> {ok, 16#6e, 16#28}; +dec_huffman_lookup(16#20, 16#c) -> {more, 16#70, 16#02}; +dec_huffman_lookup(16#20, 16#d) -> {more, 16#70, 16#09}; +dec_huffman_lookup(16#20, 16#e) -> {more, 16#70, 16#17}; +dec_huffman_lookup(16#20, 16#f) -> {ok, 16#70, 16#28}; +dec_huffman_lookup(16#21, 16#0) -> {more, 16#6c, 16#03}; +dec_huffman_lookup(16#21, 16#1) -> {more, 16#6c, 16#06}; +dec_huffman_lookup(16#21, 16#2) -> {more, 16#6c, 16#0a}; +dec_huffman_lookup(16#21, 16#3) -> {more, 16#6c, 16#0f}; +dec_huffman_lookup(16#21, 16#4) -> {more, 16#6c, 16#18}; +dec_huffman_lookup(16#21, 16#5) -> {more, 16#6c, 16#1f}; +dec_huffman_lookup(16#21, 16#6) -> {more, 16#6c, 16#29}; +dec_huffman_lookup(16#21, 16#7) -> {ok, 16#6c, 16#38}; +dec_huffman_lookup(16#21, 16#8) -> {more, 16#6d, 16#03}; +dec_huffman_lookup(16#21, 16#9) -> {more, 16#6d, 16#06}; +dec_huffman_lookup(16#21, 16#a) -> {more, 16#6d, 16#0a}; +dec_huffman_lookup(16#21, 16#b) -> {more, 16#6d, 16#0f}; +dec_huffman_lookup(16#21, 16#c) -> {more, 16#6d, 16#18}; +dec_huffman_lookup(16#21, 16#d) -> {more, 16#6d, 16#1f}; +dec_huffman_lookup(16#21, 16#e) -> {more, 16#6d, 16#29}; +dec_huffman_lookup(16#21, 16#f) -> {ok, 16#6d, 16#38}; +dec_huffman_lookup(16#22, 16#0) -> {more, 16#6e, 16#03}; +dec_huffman_lookup(16#22, 16#1) -> {more, 16#6e, 16#06}; +dec_huffman_lookup(16#22, 16#2) -> {more, 16#6e, 16#0a}; +dec_huffman_lookup(16#22, 16#3) -> {more, 16#6e, 16#0f}; +dec_huffman_lookup(16#22, 16#4) -> {more, 16#6e, 16#18}; +dec_huffman_lookup(16#22, 16#5) -> {more, 16#6e, 16#1f}; +dec_huffman_lookup(16#22, 16#6) -> {more, 16#6e, 16#29}; +dec_huffman_lookup(16#22, 16#7) -> {ok, 16#6e, 16#38}; +dec_huffman_lookup(16#22, 16#8) -> {more, 16#70, 16#03}; +dec_huffman_lookup(16#22, 16#9) -> {more, 16#70, 16#06}; +dec_huffman_lookup(16#22, 16#a) -> {more, 16#70, 16#0a}; +dec_huffman_lookup(16#22, 16#b) -> {more, 16#70, 16#0f}; +dec_huffman_lookup(16#22, 16#c) -> {more, 16#70, 16#18}; +dec_huffman_lookup(16#22, 16#d) -> {more, 16#70, 16#1f}; +dec_huffman_lookup(16#22, 16#e) -> {more, 16#70, 16#29}; +dec_huffman_lookup(16#22, 16#f) -> {ok, 16#70, 16#38}; +dec_huffman_lookup(16#23, 16#0) -> {more, 16#72, 16#02}; +dec_huffman_lookup(16#23, 16#1) -> {more, 16#72, 16#09}; +dec_huffman_lookup(16#23, 16#2) -> {more, 16#72, 16#17}; +dec_huffman_lookup(16#23, 16#3) -> {ok, 16#72, 16#28}; +dec_huffman_lookup(16#23, 16#4) -> {more, 16#75, 16#02}; +dec_huffman_lookup(16#23, 16#5) -> {more, 16#75, 16#09}; +dec_huffman_lookup(16#23, 16#6) -> {more, 16#75, 16#17}; +dec_huffman_lookup(16#23, 16#7) -> {ok, 16#75, 16#28}; +dec_huffman_lookup(16#23, 16#8) -> {more, 16#3a, 16#01}; +dec_huffman_lookup(16#23, 16#9) -> {ok, 16#3a, 16#16}; +dec_huffman_lookup(16#23, 16#a) -> {more, 16#42, 16#01}; +dec_huffman_lookup(16#23, 16#b) -> {ok, 16#42, 16#16}; +dec_huffman_lookup(16#23, 16#c) -> {more, 16#43, 16#01}; +dec_huffman_lookup(16#23, 16#d) -> {ok, 16#43, 16#16}; +dec_huffman_lookup(16#23, 16#e) -> {more, 16#44, 16#01}; +dec_huffman_lookup(16#23, 16#f) -> {ok, 16#44, 16#16}; +dec_huffman_lookup(16#24, 16#0) -> {more, 16#72, 16#03}; +dec_huffman_lookup(16#24, 16#1) -> {more, 16#72, 16#06}; +dec_huffman_lookup(16#24, 16#2) -> {more, 16#72, 16#0a}; +dec_huffman_lookup(16#24, 16#3) -> {more, 16#72, 16#0f}; +dec_huffman_lookup(16#24, 16#4) -> {more, 16#72, 16#18}; +dec_huffman_lookup(16#24, 16#5) -> {more, 16#72, 16#1f}; +dec_huffman_lookup(16#24, 16#6) -> {more, 16#72, 16#29}; +dec_huffman_lookup(16#24, 16#7) -> {ok, 16#72, 16#38}; +dec_huffman_lookup(16#24, 16#8) -> {more, 16#75, 16#03}; +dec_huffman_lookup(16#24, 16#9) -> {more, 16#75, 16#06}; +dec_huffman_lookup(16#24, 16#a) -> {more, 16#75, 16#0a}; +dec_huffman_lookup(16#24, 16#b) -> {more, 16#75, 16#0f}; +dec_huffman_lookup(16#24, 16#c) -> {more, 16#75, 16#18}; +dec_huffman_lookup(16#24, 16#d) -> {more, 16#75, 16#1f}; +dec_huffman_lookup(16#24, 16#e) -> {more, 16#75, 16#29}; +dec_huffman_lookup(16#24, 16#f) -> {ok, 16#75, 16#38}; +dec_huffman_lookup(16#25, 16#0) -> {more, 16#3a, 16#02}; +dec_huffman_lookup(16#25, 16#1) -> {more, 16#3a, 16#09}; +dec_huffman_lookup(16#25, 16#2) -> {more, 16#3a, 16#17}; +dec_huffman_lookup(16#25, 16#3) -> {ok, 16#3a, 16#28}; +dec_huffman_lookup(16#25, 16#4) -> {more, 16#42, 16#02}; +dec_huffman_lookup(16#25, 16#5) -> {more, 16#42, 16#09}; +dec_huffman_lookup(16#25, 16#6) -> {more, 16#42, 16#17}; +dec_huffman_lookup(16#25, 16#7) -> {ok, 16#42, 16#28}; +dec_huffman_lookup(16#25, 16#8) -> {more, 16#43, 16#02}; +dec_huffman_lookup(16#25, 16#9) -> {more, 16#43, 16#09}; +dec_huffman_lookup(16#25, 16#a) -> {more, 16#43, 16#17}; +dec_huffman_lookup(16#25, 16#b) -> {ok, 16#43, 16#28}; +dec_huffman_lookup(16#25, 16#c) -> {more, 16#44, 16#02}; +dec_huffman_lookup(16#25, 16#d) -> {more, 16#44, 16#09}; +dec_huffman_lookup(16#25, 16#e) -> {more, 16#44, 16#17}; +dec_huffman_lookup(16#25, 16#f) -> {ok, 16#44, 16#28}; +dec_huffman_lookup(16#26, 16#0) -> {more, 16#3a, 16#03}; +dec_huffman_lookup(16#26, 16#1) -> {more, 16#3a, 16#06}; +dec_huffman_lookup(16#26, 16#2) -> {more, 16#3a, 16#0a}; +dec_huffman_lookup(16#26, 16#3) -> {more, 16#3a, 16#0f}; +dec_huffman_lookup(16#26, 16#4) -> {more, 16#3a, 16#18}; +dec_huffman_lookup(16#26, 16#5) -> {more, 16#3a, 16#1f}; +dec_huffman_lookup(16#26, 16#6) -> {more, 16#3a, 16#29}; +dec_huffman_lookup(16#26, 16#7) -> {ok, 16#3a, 16#38}; +dec_huffman_lookup(16#26, 16#8) -> {more, 16#42, 16#03}; +dec_huffman_lookup(16#26, 16#9) -> {more, 16#42, 16#06}; +dec_huffman_lookup(16#26, 16#a) -> {more, 16#42, 16#0a}; +dec_huffman_lookup(16#26, 16#b) -> {more, 16#42, 16#0f}; +dec_huffman_lookup(16#26, 16#c) -> {more, 16#42, 16#18}; +dec_huffman_lookup(16#26, 16#d) -> {more, 16#42, 16#1f}; +dec_huffman_lookup(16#26, 16#e) -> {more, 16#42, 16#29}; +dec_huffman_lookup(16#26, 16#f) -> {ok, 16#42, 16#38}; +dec_huffman_lookup(16#27, 16#0) -> {more, 16#43, 16#03}; +dec_huffman_lookup(16#27, 16#1) -> {more, 16#43, 16#06}; +dec_huffman_lookup(16#27, 16#2) -> {more, 16#43, 16#0a}; +dec_huffman_lookup(16#27, 16#3) -> {more, 16#43, 16#0f}; +dec_huffman_lookup(16#27, 16#4) -> {more, 16#43, 16#18}; +dec_huffman_lookup(16#27, 16#5) -> {more, 16#43, 16#1f}; +dec_huffman_lookup(16#27, 16#6) -> {more, 16#43, 16#29}; +dec_huffman_lookup(16#27, 16#7) -> {ok, 16#43, 16#38}; +dec_huffman_lookup(16#27, 16#8) -> {more, 16#44, 16#03}; +dec_huffman_lookup(16#27, 16#9) -> {more, 16#44, 16#06}; +dec_huffman_lookup(16#27, 16#a) -> {more, 16#44, 16#0a}; +dec_huffman_lookup(16#27, 16#b) -> {more, 16#44, 16#0f}; +dec_huffman_lookup(16#27, 16#c) -> {more, 16#44, 16#18}; +dec_huffman_lookup(16#27, 16#d) -> {more, 16#44, 16#1f}; +dec_huffman_lookup(16#27, 16#e) -> {more, 16#44, 16#29}; +dec_huffman_lookup(16#27, 16#f) -> {ok, 16#44, 16#38}; +dec_huffman_lookup(16#28, 16#0) -> {more, undefined, 16#2c}; +dec_huffman_lookup(16#28, 16#1) -> {more, undefined, 16#2d}; +dec_huffman_lookup(16#28, 16#2) -> {more, undefined, 16#2f}; +dec_huffman_lookup(16#28, 16#3) -> {more, undefined, 16#30}; +dec_huffman_lookup(16#28, 16#4) -> {more, undefined, 16#33}; +dec_huffman_lookup(16#28, 16#5) -> {more, undefined, 16#34}; +dec_huffman_lookup(16#28, 16#6) -> {more, undefined, 16#36}; +dec_huffman_lookup(16#28, 16#7) -> {more, undefined, 16#37}; +dec_huffman_lookup(16#28, 16#8) -> {more, undefined, 16#3b}; +dec_huffman_lookup(16#28, 16#9) -> {more, undefined, 16#3c}; +dec_huffman_lookup(16#28, 16#a) -> {more, undefined, 16#3e}; +dec_huffman_lookup(16#28, 16#b) -> {more, undefined, 16#3f}; +dec_huffman_lookup(16#28, 16#c) -> {more, undefined, 16#42}; +dec_huffman_lookup(16#28, 16#d) -> {more, undefined, 16#43}; +dec_huffman_lookup(16#28, 16#e) -> {more, undefined, 16#45}; +dec_huffman_lookup(16#28, 16#f) -> {ok, undefined, 16#48}; +dec_huffman_lookup(16#29, 16#0) -> {ok, 16#45, 16#00}; +dec_huffman_lookup(16#29, 16#1) -> {ok, 16#46, 16#00}; +dec_huffman_lookup(16#29, 16#2) -> {ok, 16#47, 16#00}; +dec_huffman_lookup(16#29, 16#3) -> {ok, 16#48, 16#00}; +dec_huffman_lookup(16#29, 16#4) -> {ok, 16#49, 16#00}; +dec_huffman_lookup(16#29, 16#5) -> {ok, 16#4a, 16#00}; +dec_huffman_lookup(16#29, 16#6) -> {ok, 16#4b, 16#00}; +dec_huffman_lookup(16#29, 16#7) -> {ok, 16#4c, 16#00}; +dec_huffman_lookup(16#29, 16#8) -> {ok, 16#4d, 16#00}; +dec_huffman_lookup(16#29, 16#9) -> {ok, 16#4e, 16#00}; +dec_huffman_lookup(16#29, 16#a) -> {ok, 16#4f, 16#00}; +dec_huffman_lookup(16#29, 16#b) -> {ok, 16#50, 16#00}; +dec_huffman_lookup(16#29, 16#c) -> {ok, 16#51, 16#00}; +dec_huffman_lookup(16#29, 16#d) -> {ok, 16#52, 16#00}; +dec_huffman_lookup(16#29, 16#e) -> {ok, 16#53, 16#00}; +dec_huffman_lookup(16#29, 16#f) -> {ok, 16#54, 16#00}; +dec_huffman_lookup(16#2a, 16#0) -> {more, 16#45, 16#01}; +dec_huffman_lookup(16#2a, 16#1) -> {ok, 16#45, 16#16}; +dec_huffman_lookup(16#2a, 16#2) -> {more, 16#46, 16#01}; +dec_huffman_lookup(16#2a, 16#3) -> {ok, 16#46, 16#16}; +dec_huffman_lookup(16#2a, 16#4) -> {more, 16#47, 16#01}; +dec_huffman_lookup(16#2a, 16#5) -> {ok, 16#47, 16#16}; +dec_huffman_lookup(16#2a, 16#6) -> {more, 16#48, 16#01}; +dec_huffman_lookup(16#2a, 16#7) -> {ok, 16#48, 16#16}; +dec_huffman_lookup(16#2a, 16#8) -> {more, 16#49, 16#01}; +dec_huffman_lookup(16#2a, 16#9) -> {ok, 16#49, 16#16}; +dec_huffman_lookup(16#2a, 16#a) -> {more, 16#4a, 16#01}; +dec_huffman_lookup(16#2a, 16#b) -> {ok, 16#4a, 16#16}; +dec_huffman_lookup(16#2a, 16#c) -> {more, 16#4b, 16#01}; +dec_huffman_lookup(16#2a, 16#d) -> {ok, 16#4b, 16#16}; +dec_huffman_lookup(16#2a, 16#e) -> {more, 16#4c, 16#01}; +dec_huffman_lookup(16#2a, 16#f) -> {ok, 16#4c, 16#16}; +dec_huffman_lookup(16#2b, 16#0) -> {more, 16#45, 16#02}; +dec_huffman_lookup(16#2b, 16#1) -> {more, 16#45, 16#09}; +dec_huffman_lookup(16#2b, 16#2) -> {more, 16#45, 16#17}; +dec_huffman_lookup(16#2b, 16#3) -> {ok, 16#45, 16#28}; +dec_huffman_lookup(16#2b, 16#4) -> {more, 16#46, 16#02}; +dec_huffman_lookup(16#2b, 16#5) -> {more, 16#46, 16#09}; +dec_huffman_lookup(16#2b, 16#6) -> {more, 16#46, 16#17}; +dec_huffman_lookup(16#2b, 16#7) -> {ok, 16#46, 16#28}; +dec_huffman_lookup(16#2b, 16#8) -> {more, 16#47, 16#02}; +dec_huffman_lookup(16#2b, 16#9) -> {more, 16#47, 16#09}; +dec_huffman_lookup(16#2b, 16#a) -> {more, 16#47, 16#17}; +dec_huffman_lookup(16#2b, 16#b) -> {ok, 16#47, 16#28}; +dec_huffman_lookup(16#2b, 16#c) -> {more, 16#48, 16#02}; +dec_huffman_lookup(16#2b, 16#d) -> {more, 16#48, 16#09}; +dec_huffman_lookup(16#2b, 16#e) -> {more, 16#48, 16#17}; +dec_huffman_lookup(16#2b, 16#f) -> {ok, 16#48, 16#28}; +dec_huffman_lookup(16#2c, 16#0) -> {more, 16#45, 16#03}; +dec_huffman_lookup(16#2c, 16#1) -> {more, 16#45, 16#06}; +dec_huffman_lookup(16#2c, 16#2) -> {more, 16#45, 16#0a}; +dec_huffman_lookup(16#2c, 16#3) -> {more, 16#45, 16#0f}; +dec_huffman_lookup(16#2c, 16#4) -> {more, 16#45, 16#18}; +dec_huffman_lookup(16#2c, 16#5) -> {more, 16#45, 16#1f}; +dec_huffman_lookup(16#2c, 16#6) -> {more, 16#45, 16#29}; +dec_huffman_lookup(16#2c, 16#7) -> {ok, 16#45, 16#38}; +dec_huffman_lookup(16#2c, 16#8) -> {more, 16#46, 16#03}; +dec_huffman_lookup(16#2c, 16#9) -> {more, 16#46, 16#06}; +dec_huffman_lookup(16#2c, 16#a) -> {more, 16#46, 16#0a}; +dec_huffman_lookup(16#2c, 16#b) -> {more, 16#46, 16#0f}; +dec_huffman_lookup(16#2c, 16#c) -> {more, 16#46, 16#18}; +dec_huffman_lookup(16#2c, 16#d) -> {more, 16#46, 16#1f}; +dec_huffman_lookup(16#2c, 16#e) -> {more, 16#46, 16#29}; +dec_huffman_lookup(16#2c, 16#f) -> {ok, 16#46, 16#38}; +dec_huffman_lookup(16#2d, 16#0) -> {more, 16#47, 16#03}; +dec_huffman_lookup(16#2d, 16#1) -> {more, 16#47, 16#06}; +dec_huffman_lookup(16#2d, 16#2) -> {more, 16#47, 16#0a}; +dec_huffman_lookup(16#2d, 16#3) -> {more, 16#47, 16#0f}; +dec_huffman_lookup(16#2d, 16#4) -> {more, 16#47, 16#18}; +dec_huffman_lookup(16#2d, 16#5) -> {more, 16#47, 16#1f}; +dec_huffman_lookup(16#2d, 16#6) -> {more, 16#47, 16#29}; +dec_huffman_lookup(16#2d, 16#7) -> {ok, 16#47, 16#38}; +dec_huffman_lookup(16#2d, 16#8) -> {more, 16#48, 16#03}; +dec_huffman_lookup(16#2d, 16#9) -> {more, 16#48, 16#06}; +dec_huffman_lookup(16#2d, 16#a) -> {more, 16#48, 16#0a}; +dec_huffman_lookup(16#2d, 16#b) -> {more, 16#48, 16#0f}; +dec_huffman_lookup(16#2d, 16#c) -> {more, 16#48, 16#18}; +dec_huffman_lookup(16#2d, 16#d) -> {more, 16#48, 16#1f}; +dec_huffman_lookup(16#2d, 16#e) -> {more, 16#48, 16#29}; +dec_huffman_lookup(16#2d, 16#f) -> {ok, 16#48, 16#38}; +dec_huffman_lookup(16#2e, 16#0) -> {more, 16#49, 16#02}; +dec_huffman_lookup(16#2e, 16#1) -> {more, 16#49, 16#09}; +dec_huffman_lookup(16#2e, 16#2) -> {more, 16#49, 16#17}; +dec_huffman_lookup(16#2e, 16#3) -> {ok, 16#49, 16#28}; +dec_huffman_lookup(16#2e, 16#4) -> {more, 16#4a, 16#02}; +dec_huffman_lookup(16#2e, 16#5) -> {more, 16#4a, 16#09}; +dec_huffman_lookup(16#2e, 16#6) -> {more, 16#4a, 16#17}; +dec_huffman_lookup(16#2e, 16#7) -> {ok, 16#4a, 16#28}; +dec_huffman_lookup(16#2e, 16#8) -> {more, 16#4b, 16#02}; +dec_huffman_lookup(16#2e, 16#9) -> {more, 16#4b, 16#09}; +dec_huffman_lookup(16#2e, 16#a) -> {more, 16#4b, 16#17}; +dec_huffman_lookup(16#2e, 16#b) -> {ok, 16#4b, 16#28}; +dec_huffman_lookup(16#2e, 16#c) -> {more, 16#4c, 16#02}; +dec_huffman_lookup(16#2e, 16#d) -> {more, 16#4c, 16#09}; +dec_huffman_lookup(16#2e, 16#e) -> {more, 16#4c, 16#17}; +dec_huffman_lookup(16#2e, 16#f) -> {ok, 16#4c, 16#28}; +dec_huffman_lookup(16#2f, 16#0) -> {more, 16#49, 16#03}; +dec_huffman_lookup(16#2f, 16#1) -> {more, 16#49, 16#06}; +dec_huffman_lookup(16#2f, 16#2) -> {more, 16#49, 16#0a}; +dec_huffman_lookup(16#2f, 16#3) -> {more, 16#49, 16#0f}; +dec_huffman_lookup(16#2f, 16#4) -> {more, 16#49, 16#18}; +dec_huffman_lookup(16#2f, 16#5) -> {more, 16#49, 16#1f}; +dec_huffman_lookup(16#2f, 16#6) -> {more, 16#49, 16#29}; +dec_huffman_lookup(16#2f, 16#7) -> {ok, 16#49, 16#38}; +dec_huffman_lookup(16#2f, 16#8) -> {more, 16#4a, 16#03}; +dec_huffman_lookup(16#2f, 16#9) -> {more, 16#4a, 16#06}; +dec_huffman_lookup(16#2f, 16#a) -> {more, 16#4a, 16#0a}; +dec_huffman_lookup(16#2f, 16#b) -> {more, 16#4a, 16#0f}; +dec_huffman_lookup(16#2f, 16#c) -> {more, 16#4a, 16#18}; +dec_huffman_lookup(16#2f, 16#d) -> {more, 16#4a, 16#1f}; +dec_huffman_lookup(16#2f, 16#e) -> {more, 16#4a, 16#29}; +dec_huffman_lookup(16#2f, 16#f) -> {ok, 16#4a, 16#38}; +dec_huffman_lookup(16#30, 16#0) -> {more, 16#4b, 16#03}; +dec_huffman_lookup(16#30, 16#1) -> {more, 16#4b, 16#06}; +dec_huffman_lookup(16#30, 16#2) -> {more, 16#4b, 16#0a}; +dec_huffman_lookup(16#30, 16#3) -> {more, 16#4b, 16#0f}; +dec_huffman_lookup(16#30, 16#4) -> {more, 16#4b, 16#18}; +dec_huffman_lookup(16#30, 16#5) -> {more, 16#4b, 16#1f}; +dec_huffman_lookup(16#30, 16#6) -> {more, 16#4b, 16#29}; +dec_huffman_lookup(16#30, 16#7) -> {ok, 16#4b, 16#38}; +dec_huffman_lookup(16#30, 16#8) -> {more, 16#4c, 16#03}; +dec_huffman_lookup(16#30, 16#9) -> {more, 16#4c, 16#06}; +dec_huffman_lookup(16#30, 16#a) -> {more, 16#4c, 16#0a}; +dec_huffman_lookup(16#30, 16#b) -> {more, 16#4c, 16#0f}; +dec_huffman_lookup(16#30, 16#c) -> {more, 16#4c, 16#18}; +dec_huffman_lookup(16#30, 16#d) -> {more, 16#4c, 16#1f}; +dec_huffman_lookup(16#30, 16#e) -> {more, 16#4c, 16#29}; +dec_huffman_lookup(16#30, 16#f) -> {ok, 16#4c, 16#38}; +dec_huffman_lookup(16#31, 16#0) -> {more, 16#4d, 16#01}; +dec_huffman_lookup(16#31, 16#1) -> {ok, 16#4d, 16#16}; +dec_huffman_lookup(16#31, 16#2) -> {more, 16#4e, 16#01}; +dec_huffman_lookup(16#31, 16#3) -> {ok, 16#4e, 16#16}; +dec_huffman_lookup(16#31, 16#4) -> {more, 16#4f, 16#01}; +dec_huffman_lookup(16#31, 16#5) -> {ok, 16#4f, 16#16}; +dec_huffman_lookup(16#31, 16#6) -> {more, 16#50, 16#01}; +dec_huffman_lookup(16#31, 16#7) -> {ok, 16#50, 16#16}; +dec_huffman_lookup(16#31, 16#8) -> {more, 16#51, 16#01}; +dec_huffman_lookup(16#31, 16#9) -> {ok, 16#51, 16#16}; +dec_huffman_lookup(16#31, 16#a) -> {more, 16#52, 16#01}; +dec_huffman_lookup(16#31, 16#b) -> {ok, 16#52, 16#16}; +dec_huffman_lookup(16#31, 16#c) -> {more, 16#53, 16#01}; +dec_huffman_lookup(16#31, 16#d) -> {ok, 16#53, 16#16}; +dec_huffman_lookup(16#31, 16#e) -> {more, 16#54, 16#01}; +dec_huffman_lookup(16#31, 16#f) -> {ok, 16#54, 16#16}; +dec_huffman_lookup(16#32, 16#0) -> {more, 16#4d, 16#02}; +dec_huffman_lookup(16#32, 16#1) -> {more, 16#4d, 16#09}; +dec_huffman_lookup(16#32, 16#2) -> {more, 16#4d, 16#17}; +dec_huffman_lookup(16#32, 16#3) -> {ok, 16#4d, 16#28}; +dec_huffman_lookup(16#32, 16#4) -> {more, 16#4e, 16#02}; +dec_huffman_lookup(16#32, 16#5) -> {more, 16#4e, 16#09}; +dec_huffman_lookup(16#32, 16#6) -> {more, 16#4e, 16#17}; +dec_huffman_lookup(16#32, 16#7) -> {ok, 16#4e, 16#28}; +dec_huffman_lookup(16#32, 16#8) -> {more, 16#4f, 16#02}; +dec_huffman_lookup(16#32, 16#9) -> {more, 16#4f, 16#09}; +dec_huffman_lookup(16#32, 16#a) -> {more, 16#4f, 16#17}; +dec_huffman_lookup(16#32, 16#b) -> {ok, 16#4f, 16#28}; +dec_huffman_lookup(16#32, 16#c) -> {more, 16#50, 16#02}; +dec_huffman_lookup(16#32, 16#d) -> {more, 16#50, 16#09}; +dec_huffman_lookup(16#32, 16#e) -> {more, 16#50, 16#17}; +dec_huffman_lookup(16#32, 16#f) -> {ok, 16#50, 16#28}; +dec_huffman_lookup(16#33, 16#0) -> {more, 16#4d, 16#03}; +dec_huffman_lookup(16#33, 16#1) -> {more, 16#4d, 16#06}; +dec_huffman_lookup(16#33, 16#2) -> {more, 16#4d, 16#0a}; +dec_huffman_lookup(16#33, 16#3) -> {more, 16#4d, 16#0f}; +dec_huffman_lookup(16#33, 16#4) -> {more, 16#4d, 16#18}; +dec_huffman_lookup(16#33, 16#5) -> {more, 16#4d, 16#1f}; +dec_huffman_lookup(16#33, 16#6) -> {more, 16#4d, 16#29}; +dec_huffman_lookup(16#33, 16#7) -> {ok, 16#4d, 16#38}; +dec_huffman_lookup(16#33, 16#8) -> {more, 16#4e, 16#03}; +dec_huffman_lookup(16#33, 16#9) -> {more, 16#4e, 16#06}; +dec_huffman_lookup(16#33, 16#a) -> {more, 16#4e, 16#0a}; +dec_huffman_lookup(16#33, 16#b) -> {more, 16#4e, 16#0f}; +dec_huffman_lookup(16#33, 16#c) -> {more, 16#4e, 16#18}; +dec_huffman_lookup(16#33, 16#d) -> {more, 16#4e, 16#1f}; +dec_huffman_lookup(16#33, 16#e) -> {more, 16#4e, 16#29}; +dec_huffman_lookup(16#33, 16#f) -> {ok, 16#4e, 16#38}; +dec_huffman_lookup(16#34, 16#0) -> {more, 16#4f, 16#03}; +dec_huffman_lookup(16#34, 16#1) -> {more, 16#4f, 16#06}; +dec_huffman_lookup(16#34, 16#2) -> {more, 16#4f, 16#0a}; +dec_huffman_lookup(16#34, 16#3) -> {more, 16#4f, 16#0f}; +dec_huffman_lookup(16#34, 16#4) -> {more, 16#4f, 16#18}; +dec_huffman_lookup(16#34, 16#5) -> {more, 16#4f, 16#1f}; +dec_huffman_lookup(16#34, 16#6) -> {more, 16#4f, 16#29}; +dec_huffman_lookup(16#34, 16#7) -> {ok, 16#4f, 16#38}; +dec_huffman_lookup(16#34, 16#8) -> {more, 16#50, 16#03}; +dec_huffman_lookup(16#34, 16#9) -> {more, 16#50, 16#06}; +dec_huffman_lookup(16#34, 16#a) -> {more, 16#50, 16#0a}; +dec_huffman_lookup(16#34, 16#b) -> {more, 16#50, 16#0f}; +dec_huffman_lookup(16#34, 16#c) -> {more, 16#50, 16#18}; +dec_huffman_lookup(16#34, 16#d) -> {more, 16#50, 16#1f}; +dec_huffman_lookup(16#34, 16#e) -> {more, 16#50, 16#29}; +dec_huffman_lookup(16#34, 16#f) -> {ok, 16#50, 16#38}; +dec_huffman_lookup(16#35, 16#0) -> {more, 16#51, 16#02}; +dec_huffman_lookup(16#35, 16#1) -> {more, 16#51, 16#09}; +dec_huffman_lookup(16#35, 16#2) -> {more, 16#51, 16#17}; +dec_huffman_lookup(16#35, 16#3) -> {ok, 16#51, 16#28}; +dec_huffman_lookup(16#35, 16#4) -> {more, 16#52, 16#02}; +dec_huffman_lookup(16#35, 16#5) -> {more, 16#52, 16#09}; +dec_huffman_lookup(16#35, 16#6) -> {more, 16#52, 16#17}; +dec_huffman_lookup(16#35, 16#7) -> {ok, 16#52, 16#28}; +dec_huffman_lookup(16#35, 16#8) -> {more, 16#53, 16#02}; +dec_huffman_lookup(16#35, 16#9) -> {more, 16#53, 16#09}; +dec_huffman_lookup(16#35, 16#a) -> {more, 16#53, 16#17}; +dec_huffman_lookup(16#35, 16#b) -> {ok, 16#53, 16#28}; +dec_huffman_lookup(16#35, 16#c) -> {more, 16#54, 16#02}; +dec_huffman_lookup(16#35, 16#d) -> {more, 16#54, 16#09}; +dec_huffman_lookup(16#35, 16#e) -> {more, 16#54, 16#17}; +dec_huffman_lookup(16#35, 16#f) -> {ok, 16#54, 16#28}; +dec_huffman_lookup(16#36, 16#0) -> {more, 16#51, 16#03}; +dec_huffman_lookup(16#36, 16#1) -> {more, 16#51, 16#06}; +dec_huffman_lookup(16#36, 16#2) -> {more, 16#51, 16#0a}; +dec_huffman_lookup(16#36, 16#3) -> {more, 16#51, 16#0f}; +dec_huffman_lookup(16#36, 16#4) -> {more, 16#51, 16#18}; +dec_huffman_lookup(16#36, 16#5) -> {more, 16#51, 16#1f}; +dec_huffman_lookup(16#36, 16#6) -> {more, 16#51, 16#29}; +dec_huffman_lookup(16#36, 16#7) -> {ok, 16#51, 16#38}; +dec_huffman_lookup(16#36, 16#8) -> {more, 16#52, 16#03}; +dec_huffman_lookup(16#36, 16#9) -> {more, 16#52, 16#06}; +dec_huffman_lookup(16#36, 16#a) -> {more, 16#52, 16#0a}; +dec_huffman_lookup(16#36, 16#b) -> {more, 16#52, 16#0f}; +dec_huffman_lookup(16#36, 16#c) -> {more, 16#52, 16#18}; +dec_huffman_lookup(16#36, 16#d) -> {more, 16#52, 16#1f}; +dec_huffman_lookup(16#36, 16#e) -> {more, 16#52, 16#29}; +dec_huffman_lookup(16#36, 16#f) -> {ok, 16#52, 16#38}; +dec_huffman_lookup(16#37, 16#0) -> {more, 16#53, 16#03}; +dec_huffman_lookup(16#37, 16#1) -> {more, 16#53, 16#06}; +dec_huffman_lookup(16#37, 16#2) -> {more, 16#53, 16#0a}; +dec_huffman_lookup(16#37, 16#3) -> {more, 16#53, 16#0f}; +dec_huffman_lookup(16#37, 16#4) -> {more, 16#53, 16#18}; +dec_huffman_lookup(16#37, 16#5) -> {more, 16#53, 16#1f}; +dec_huffman_lookup(16#37, 16#6) -> {more, 16#53, 16#29}; +dec_huffman_lookup(16#37, 16#7) -> {ok, 16#53, 16#38}; +dec_huffman_lookup(16#37, 16#8) -> {more, 16#54, 16#03}; +dec_huffman_lookup(16#37, 16#9) -> {more, 16#54, 16#06}; +dec_huffman_lookup(16#37, 16#a) -> {more, 16#54, 16#0a}; +dec_huffman_lookup(16#37, 16#b) -> {more, 16#54, 16#0f}; +dec_huffman_lookup(16#37, 16#c) -> {more, 16#54, 16#18}; +dec_huffman_lookup(16#37, 16#d) -> {more, 16#54, 16#1f}; +dec_huffman_lookup(16#37, 16#e) -> {more, 16#54, 16#29}; +dec_huffman_lookup(16#37, 16#f) -> {ok, 16#54, 16#38}; +dec_huffman_lookup(16#38, 16#0) -> {ok, 16#55, 16#00}; +dec_huffman_lookup(16#38, 16#1) -> {ok, 16#56, 16#00}; +dec_huffman_lookup(16#38, 16#2) -> {ok, 16#57, 16#00}; +dec_huffman_lookup(16#38, 16#3) -> {ok, 16#59, 16#00}; +dec_huffman_lookup(16#38, 16#4) -> {ok, 16#6a, 16#00}; +dec_huffman_lookup(16#38, 16#5) -> {ok, 16#6b, 16#00}; +dec_huffman_lookup(16#38, 16#6) -> {ok, 16#71, 16#00}; +dec_huffman_lookup(16#38, 16#7) -> {ok, 16#76, 16#00}; +dec_huffman_lookup(16#38, 16#8) -> {ok, 16#77, 16#00}; +dec_huffman_lookup(16#38, 16#9) -> {ok, 16#78, 16#00}; +dec_huffman_lookup(16#38, 16#a) -> {ok, 16#79, 16#00}; +dec_huffman_lookup(16#38, 16#b) -> {ok, 16#7a, 16#00}; +dec_huffman_lookup(16#38, 16#c) -> {more, undefined, 16#46}; +dec_huffman_lookup(16#38, 16#d) -> {more, undefined, 16#47}; +dec_huffman_lookup(16#38, 16#e) -> {more, undefined, 16#49}; +dec_huffman_lookup(16#38, 16#f) -> {ok, undefined, 16#4a}; +dec_huffman_lookup(16#39, 16#0) -> {more, 16#55, 16#01}; +dec_huffman_lookup(16#39, 16#1) -> {ok, 16#55, 16#16}; +dec_huffman_lookup(16#39, 16#2) -> {more, 16#56, 16#01}; +dec_huffman_lookup(16#39, 16#3) -> {ok, 16#56, 16#16}; +dec_huffman_lookup(16#39, 16#4) -> {more, 16#57, 16#01}; +dec_huffman_lookup(16#39, 16#5) -> {ok, 16#57, 16#16}; +dec_huffman_lookup(16#39, 16#6) -> {more, 16#59, 16#01}; +dec_huffman_lookup(16#39, 16#7) -> {ok, 16#59, 16#16}; +dec_huffman_lookup(16#39, 16#8) -> {more, 16#6a, 16#01}; +dec_huffman_lookup(16#39, 16#9) -> {ok, 16#6a, 16#16}; +dec_huffman_lookup(16#39, 16#a) -> {more, 16#6b, 16#01}; +dec_huffman_lookup(16#39, 16#b) -> {ok, 16#6b, 16#16}; +dec_huffman_lookup(16#39, 16#c) -> {more, 16#71, 16#01}; +dec_huffman_lookup(16#39, 16#d) -> {ok, 16#71, 16#16}; +dec_huffman_lookup(16#39, 16#e) -> {more, 16#76, 16#01}; +dec_huffman_lookup(16#39, 16#f) -> {ok, 16#76, 16#16}; +dec_huffman_lookup(16#3a, 16#0) -> {more, 16#55, 16#02}; +dec_huffman_lookup(16#3a, 16#1) -> {more, 16#55, 16#09}; +dec_huffman_lookup(16#3a, 16#2) -> {more, 16#55, 16#17}; +dec_huffman_lookup(16#3a, 16#3) -> {ok, 16#55, 16#28}; +dec_huffman_lookup(16#3a, 16#4) -> {more, 16#56, 16#02}; +dec_huffman_lookup(16#3a, 16#5) -> {more, 16#56, 16#09}; +dec_huffman_lookup(16#3a, 16#6) -> {more, 16#56, 16#17}; +dec_huffman_lookup(16#3a, 16#7) -> {ok, 16#56, 16#28}; +dec_huffman_lookup(16#3a, 16#8) -> {more, 16#57, 16#02}; +dec_huffman_lookup(16#3a, 16#9) -> {more, 16#57, 16#09}; +dec_huffman_lookup(16#3a, 16#a) -> {more, 16#57, 16#17}; +dec_huffman_lookup(16#3a, 16#b) -> {ok, 16#57, 16#28}; +dec_huffman_lookup(16#3a, 16#c) -> {more, 16#59, 16#02}; +dec_huffman_lookup(16#3a, 16#d) -> {more, 16#59, 16#09}; +dec_huffman_lookup(16#3a, 16#e) -> {more, 16#59, 16#17}; +dec_huffman_lookup(16#3a, 16#f) -> {ok, 16#59, 16#28}; +dec_huffman_lookup(16#3b, 16#0) -> {more, 16#55, 16#03}; +dec_huffman_lookup(16#3b, 16#1) -> {more, 16#55, 16#06}; +dec_huffman_lookup(16#3b, 16#2) -> {more, 16#55, 16#0a}; +dec_huffman_lookup(16#3b, 16#3) -> {more, 16#55, 16#0f}; +dec_huffman_lookup(16#3b, 16#4) -> {more, 16#55, 16#18}; +dec_huffman_lookup(16#3b, 16#5) -> {more, 16#55, 16#1f}; +dec_huffman_lookup(16#3b, 16#6) -> {more, 16#55, 16#29}; +dec_huffman_lookup(16#3b, 16#7) -> {ok, 16#55, 16#38}; +dec_huffman_lookup(16#3b, 16#8) -> {more, 16#56, 16#03}; +dec_huffman_lookup(16#3b, 16#9) -> {more, 16#56, 16#06}; +dec_huffman_lookup(16#3b, 16#a) -> {more, 16#56, 16#0a}; +dec_huffman_lookup(16#3b, 16#b) -> {more, 16#56, 16#0f}; +dec_huffman_lookup(16#3b, 16#c) -> {more, 16#56, 16#18}; +dec_huffman_lookup(16#3b, 16#d) -> {more, 16#56, 16#1f}; +dec_huffman_lookup(16#3b, 16#e) -> {more, 16#56, 16#29}; +dec_huffman_lookup(16#3b, 16#f) -> {ok, 16#56, 16#38}; +dec_huffman_lookup(16#3c, 16#0) -> {more, 16#57, 16#03}; +dec_huffman_lookup(16#3c, 16#1) -> {more, 16#57, 16#06}; +dec_huffman_lookup(16#3c, 16#2) -> {more, 16#57, 16#0a}; +dec_huffman_lookup(16#3c, 16#3) -> {more, 16#57, 16#0f}; +dec_huffman_lookup(16#3c, 16#4) -> {more, 16#57, 16#18}; +dec_huffman_lookup(16#3c, 16#5) -> {more, 16#57, 16#1f}; +dec_huffman_lookup(16#3c, 16#6) -> {more, 16#57, 16#29}; +dec_huffman_lookup(16#3c, 16#7) -> {ok, 16#57, 16#38}; +dec_huffman_lookup(16#3c, 16#8) -> {more, 16#59, 16#03}; +dec_huffman_lookup(16#3c, 16#9) -> {more, 16#59, 16#06}; +dec_huffman_lookup(16#3c, 16#a) -> {more, 16#59, 16#0a}; +dec_huffman_lookup(16#3c, 16#b) -> {more, 16#59, 16#0f}; +dec_huffman_lookup(16#3c, 16#c) -> {more, 16#59, 16#18}; +dec_huffman_lookup(16#3c, 16#d) -> {more, 16#59, 16#1f}; +dec_huffman_lookup(16#3c, 16#e) -> {more, 16#59, 16#29}; +dec_huffman_lookup(16#3c, 16#f) -> {ok, 16#59, 16#38}; +dec_huffman_lookup(16#3d, 16#0) -> {more, 16#6a, 16#02}; +dec_huffman_lookup(16#3d, 16#1) -> {more, 16#6a, 16#09}; +dec_huffman_lookup(16#3d, 16#2) -> {more, 16#6a, 16#17}; +dec_huffman_lookup(16#3d, 16#3) -> {ok, 16#6a, 16#28}; +dec_huffman_lookup(16#3d, 16#4) -> {more, 16#6b, 16#02}; +dec_huffman_lookup(16#3d, 16#5) -> {more, 16#6b, 16#09}; +dec_huffman_lookup(16#3d, 16#6) -> {more, 16#6b, 16#17}; +dec_huffman_lookup(16#3d, 16#7) -> {ok, 16#6b, 16#28}; +dec_huffman_lookup(16#3d, 16#8) -> {more, 16#71, 16#02}; +dec_huffman_lookup(16#3d, 16#9) -> {more, 16#71, 16#09}; +dec_huffman_lookup(16#3d, 16#a) -> {more, 16#71, 16#17}; +dec_huffman_lookup(16#3d, 16#b) -> {ok, 16#71, 16#28}; +dec_huffman_lookup(16#3d, 16#c) -> {more, 16#76, 16#02}; +dec_huffman_lookup(16#3d, 16#d) -> {more, 16#76, 16#09}; +dec_huffman_lookup(16#3d, 16#e) -> {more, 16#76, 16#17}; +dec_huffman_lookup(16#3d, 16#f) -> {ok, 16#76, 16#28}; +dec_huffman_lookup(16#3e, 16#0) -> {more, 16#6a, 16#03}; +dec_huffman_lookup(16#3e, 16#1) -> {more, 16#6a, 16#06}; +dec_huffman_lookup(16#3e, 16#2) -> {more, 16#6a, 16#0a}; +dec_huffman_lookup(16#3e, 16#3) -> {more, 16#6a, 16#0f}; +dec_huffman_lookup(16#3e, 16#4) -> {more, 16#6a, 16#18}; +dec_huffman_lookup(16#3e, 16#5) -> {more, 16#6a, 16#1f}; +dec_huffman_lookup(16#3e, 16#6) -> {more, 16#6a, 16#29}; +dec_huffman_lookup(16#3e, 16#7) -> {ok, 16#6a, 16#38}; +dec_huffman_lookup(16#3e, 16#8) -> {more, 16#6b, 16#03}; +dec_huffman_lookup(16#3e, 16#9) -> {more, 16#6b, 16#06}; +dec_huffman_lookup(16#3e, 16#a) -> {more, 16#6b, 16#0a}; +dec_huffman_lookup(16#3e, 16#b) -> {more, 16#6b, 16#0f}; +dec_huffman_lookup(16#3e, 16#c) -> {more, 16#6b, 16#18}; +dec_huffman_lookup(16#3e, 16#d) -> {more, 16#6b, 16#1f}; +dec_huffman_lookup(16#3e, 16#e) -> {more, 16#6b, 16#29}; +dec_huffman_lookup(16#3e, 16#f) -> {ok, 16#6b, 16#38}; +dec_huffman_lookup(16#3f, 16#0) -> {more, 16#71, 16#03}; +dec_huffman_lookup(16#3f, 16#1) -> {more, 16#71, 16#06}; +dec_huffman_lookup(16#3f, 16#2) -> {more, 16#71, 16#0a}; +dec_huffman_lookup(16#3f, 16#3) -> {more, 16#71, 16#0f}; +dec_huffman_lookup(16#3f, 16#4) -> {more, 16#71, 16#18}; +dec_huffman_lookup(16#3f, 16#5) -> {more, 16#71, 16#1f}; +dec_huffman_lookup(16#3f, 16#6) -> {more, 16#71, 16#29}; +dec_huffman_lookup(16#3f, 16#7) -> {ok, 16#71, 16#38}; +dec_huffman_lookup(16#3f, 16#8) -> {more, 16#76, 16#03}; +dec_huffman_lookup(16#3f, 16#9) -> {more, 16#76, 16#06}; +dec_huffman_lookup(16#3f, 16#a) -> {more, 16#76, 16#0a}; +dec_huffman_lookup(16#3f, 16#b) -> {more, 16#76, 16#0f}; +dec_huffman_lookup(16#3f, 16#c) -> {more, 16#76, 16#18}; +dec_huffman_lookup(16#3f, 16#d) -> {more, 16#76, 16#1f}; +dec_huffman_lookup(16#3f, 16#e) -> {more, 16#76, 16#29}; +dec_huffman_lookup(16#3f, 16#f) -> {ok, 16#76, 16#38}; +dec_huffman_lookup(16#40, 16#0) -> {more, 16#77, 16#01}; +dec_huffman_lookup(16#40, 16#1) -> {ok, 16#77, 16#16}; +dec_huffman_lookup(16#40, 16#2) -> {more, 16#78, 16#01}; +dec_huffman_lookup(16#40, 16#3) -> {ok, 16#78, 16#16}; +dec_huffman_lookup(16#40, 16#4) -> {more, 16#79, 16#01}; +dec_huffman_lookup(16#40, 16#5) -> {ok, 16#79, 16#16}; +dec_huffman_lookup(16#40, 16#6) -> {more, 16#7a, 16#01}; +dec_huffman_lookup(16#40, 16#7) -> {ok, 16#7a, 16#16}; +dec_huffman_lookup(16#40, 16#8) -> {ok, 16#26, 16#00}; +dec_huffman_lookup(16#40, 16#9) -> {ok, 16#2a, 16#00}; +dec_huffman_lookup(16#40, 16#a) -> {ok, 16#2c, 16#00}; +dec_huffman_lookup(16#40, 16#b) -> {ok, 16#3b, 16#00}; +dec_huffman_lookup(16#40, 16#c) -> {ok, 16#58, 16#00}; +dec_huffman_lookup(16#40, 16#d) -> {ok, 16#5a, 16#00}; +dec_huffman_lookup(16#40, 16#e) -> {more, undefined, 16#4b}; +dec_huffman_lookup(16#40, 16#f) -> {ok, undefined, 16#4e}; +dec_huffman_lookup(16#41, 16#0) -> {more, 16#77, 16#02}; +dec_huffman_lookup(16#41, 16#1) -> {more, 16#77, 16#09}; +dec_huffman_lookup(16#41, 16#2) -> {more, 16#77, 16#17}; +dec_huffman_lookup(16#41, 16#3) -> {ok, 16#77, 16#28}; +dec_huffman_lookup(16#41, 16#4) -> {more, 16#78, 16#02}; +dec_huffman_lookup(16#41, 16#5) -> {more, 16#78, 16#09}; +dec_huffman_lookup(16#41, 16#6) -> {more, 16#78, 16#17}; +dec_huffman_lookup(16#41, 16#7) -> {ok, 16#78, 16#28}; +dec_huffman_lookup(16#41, 16#8) -> {more, 16#79, 16#02}; +dec_huffman_lookup(16#41, 16#9) -> {more, 16#79, 16#09}; +dec_huffman_lookup(16#41, 16#a) -> {more, 16#79, 16#17}; +dec_huffman_lookup(16#41, 16#b) -> {ok, 16#79, 16#28}; +dec_huffman_lookup(16#41, 16#c) -> {more, 16#7a, 16#02}; +dec_huffman_lookup(16#41, 16#d) -> {more, 16#7a, 16#09}; +dec_huffman_lookup(16#41, 16#e) -> {more, 16#7a, 16#17}; +dec_huffman_lookup(16#41, 16#f) -> {ok, 16#7a, 16#28}; +dec_huffman_lookup(16#42, 16#0) -> {more, 16#77, 16#03}; +dec_huffman_lookup(16#42, 16#1) -> {more, 16#77, 16#06}; +dec_huffman_lookup(16#42, 16#2) -> {more, 16#77, 16#0a}; +dec_huffman_lookup(16#42, 16#3) -> {more, 16#77, 16#0f}; +dec_huffman_lookup(16#42, 16#4) -> {more, 16#77, 16#18}; +dec_huffman_lookup(16#42, 16#5) -> {more, 16#77, 16#1f}; +dec_huffman_lookup(16#42, 16#6) -> {more, 16#77, 16#29}; +dec_huffman_lookup(16#42, 16#7) -> {ok, 16#77, 16#38}; +dec_huffman_lookup(16#42, 16#8) -> {more, 16#78, 16#03}; +dec_huffman_lookup(16#42, 16#9) -> {more, 16#78, 16#06}; +dec_huffman_lookup(16#42, 16#a) -> {more, 16#78, 16#0a}; +dec_huffman_lookup(16#42, 16#b) -> {more, 16#78, 16#0f}; +dec_huffman_lookup(16#42, 16#c) -> {more, 16#78, 16#18}; +dec_huffman_lookup(16#42, 16#d) -> {more, 16#78, 16#1f}; +dec_huffman_lookup(16#42, 16#e) -> {more, 16#78, 16#29}; +dec_huffman_lookup(16#42, 16#f) -> {ok, 16#78, 16#38}; +dec_huffman_lookup(16#43, 16#0) -> {more, 16#79, 16#03}; +dec_huffman_lookup(16#43, 16#1) -> {more, 16#79, 16#06}; +dec_huffman_lookup(16#43, 16#2) -> {more, 16#79, 16#0a}; +dec_huffman_lookup(16#43, 16#3) -> {more, 16#79, 16#0f}; +dec_huffman_lookup(16#43, 16#4) -> {more, 16#79, 16#18}; +dec_huffman_lookup(16#43, 16#5) -> {more, 16#79, 16#1f}; +dec_huffman_lookup(16#43, 16#6) -> {more, 16#79, 16#29}; +dec_huffman_lookup(16#43, 16#7) -> {ok, 16#79, 16#38}; +dec_huffman_lookup(16#43, 16#8) -> {more, 16#7a, 16#03}; +dec_huffman_lookup(16#43, 16#9) -> {more, 16#7a, 16#06}; +dec_huffman_lookup(16#43, 16#a) -> {more, 16#7a, 16#0a}; +dec_huffman_lookup(16#43, 16#b) -> {more, 16#7a, 16#0f}; +dec_huffman_lookup(16#43, 16#c) -> {more, 16#7a, 16#18}; +dec_huffman_lookup(16#43, 16#d) -> {more, 16#7a, 16#1f}; +dec_huffman_lookup(16#43, 16#e) -> {more, 16#7a, 16#29}; +dec_huffman_lookup(16#43, 16#f) -> {ok, 16#7a, 16#38}; +dec_huffman_lookup(16#44, 16#0) -> {more, 16#26, 16#01}; +dec_huffman_lookup(16#44, 16#1) -> {ok, 16#26, 16#16}; +dec_huffman_lookup(16#44, 16#2) -> {more, 16#2a, 16#01}; +dec_huffman_lookup(16#44, 16#3) -> {ok, 16#2a, 16#16}; +dec_huffman_lookup(16#44, 16#4) -> {more, 16#2c, 16#01}; +dec_huffman_lookup(16#44, 16#5) -> {ok, 16#2c, 16#16}; +dec_huffman_lookup(16#44, 16#6) -> {more, 16#3b, 16#01}; +dec_huffman_lookup(16#44, 16#7) -> {ok, 16#3b, 16#16}; +dec_huffman_lookup(16#44, 16#8) -> {more, 16#58, 16#01}; +dec_huffman_lookup(16#44, 16#9) -> {ok, 16#58, 16#16}; +dec_huffman_lookup(16#44, 16#a) -> {more, 16#5a, 16#01}; +dec_huffman_lookup(16#44, 16#b) -> {ok, 16#5a, 16#16}; +dec_huffman_lookup(16#44, 16#c) -> {more, undefined, 16#4c}; +dec_huffman_lookup(16#44, 16#d) -> {more, undefined, 16#4d}; +dec_huffman_lookup(16#44, 16#e) -> {more, undefined, 16#4f}; +dec_huffman_lookup(16#44, 16#f) -> {ok, undefined, 16#51}; +dec_huffman_lookup(16#45, 16#0) -> {more, 16#26, 16#02}; +dec_huffman_lookup(16#45, 16#1) -> {more, 16#26, 16#09}; +dec_huffman_lookup(16#45, 16#2) -> {more, 16#26, 16#17}; +dec_huffman_lookup(16#45, 16#3) -> {ok, 16#26, 16#28}; +dec_huffman_lookup(16#45, 16#4) -> {more, 16#2a, 16#02}; +dec_huffman_lookup(16#45, 16#5) -> {more, 16#2a, 16#09}; +dec_huffman_lookup(16#45, 16#6) -> {more, 16#2a, 16#17}; +dec_huffman_lookup(16#45, 16#7) -> {ok, 16#2a, 16#28}; +dec_huffman_lookup(16#45, 16#8) -> {more, 16#2c, 16#02}; +dec_huffman_lookup(16#45, 16#9) -> {more, 16#2c, 16#09}; +dec_huffman_lookup(16#45, 16#a) -> {more, 16#2c, 16#17}; +dec_huffman_lookup(16#45, 16#b) -> {ok, 16#2c, 16#28}; +dec_huffman_lookup(16#45, 16#c) -> {more, 16#3b, 16#02}; +dec_huffman_lookup(16#45, 16#d) -> {more, 16#3b, 16#09}; +dec_huffman_lookup(16#45, 16#e) -> {more, 16#3b, 16#17}; +dec_huffman_lookup(16#45, 16#f) -> {ok, 16#3b, 16#28}; +dec_huffman_lookup(16#46, 16#0) -> {more, 16#26, 16#03}; +dec_huffman_lookup(16#46, 16#1) -> {more, 16#26, 16#06}; +dec_huffman_lookup(16#46, 16#2) -> {more, 16#26, 16#0a}; +dec_huffman_lookup(16#46, 16#3) -> {more, 16#26, 16#0f}; +dec_huffman_lookup(16#46, 16#4) -> {more, 16#26, 16#18}; +dec_huffman_lookup(16#46, 16#5) -> {more, 16#26, 16#1f}; +dec_huffman_lookup(16#46, 16#6) -> {more, 16#26, 16#29}; +dec_huffman_lookup(16#46, 16#7) -> {ok, 16#26, 16#38}; +dec_huffman_lookup(16#46, 16#8) -> {more, 16#2a, 16#03}; +dec_huffman_lookup(16#46, 16#9) -> {more, 16#2a, 16#06}; +dec_huffman_lookup(16#46, 16#a) -> {more, 16#2a, 16#0a}; +dec_huffman_lookup(16#46, 16#b) -> {more, 16#2a, 16#0f}; +dec_huffman_lookup(16#46, 16#c) -> {more, 16#2a, 16#18}; +dec_huffman_lookup(16#46, 16#d) -> {more, 16#2a, 16#1f}; +dec_huffman_lookup(16#46, 16#e) -> {more, 16#2a, 16#29}; +dec_huffman_lookup(16#46, 16#f) -> {ok, 16#2a, 16#38}; +dec_huffman_lookup(16#47, 16#0) -> {more, 16#2c, 16#03}; +dec_huffman_lookup(16#47, 16#1) -> {more, 16#2c, 16#06}; +dec_huffman_lookup(16#47, 16#2) -> {more, 16#2c, 16#0a}; +dec_huffman_lookup(16#47, 16#3) -> {more, 16#2c, 16#0f}; +dec_huffman_lookup(16#47, 16#4) -> {more, 16#2c, 16#18}; +dec_huffman_lookup(16#47, 16#5) -> {more, 16#2c, 16#1f}; +dec_huffman_lookup(16#47, 16#6) -> {more, 16#2c, 16#29}; +dec_huffman_lookup(16#47, 16#7) -> {ok, 16#2c, 16#38}; +dec_huffman_lookup(16#47, 16#8) -> {more, 16#3b, 16#03}; +dec_huffman_lookup(16#47, 16#9) -> {more, 16#3b, 16#06}; +dec_huffman_lookup(16#47, 16#a) -> {more, 16#3b, 16#0a}; +dec_huffman_lookup(16#47, 16#b) -> {more, 16#3b, 16#0f}; +dec_huffman_lookup(16#47, 16#c) -> {more, 16#3b, 16#18}; +dec_huffman_lookup(16#47, 16#d) -> {more, 16#3b, 16#1f}; +dec_huffman_lookup(16#47, 16#e) -> {more, 16#3b, 16#29}; +dec_huffman_lookup(16#47, 16#f) -> {ok, 16#3b, 16#38}; +dec_huffman_lookup(16#48, 16#0) -> {more, 16#58, 16#02}; +dec_huffman_lookup(16#48, 16#1) -> {more, 16#58, 16#09}; +dec_huffman_lookup(16#48, 16#2) -> {more, 16#58, 16#17}; +dec_huffman_lookup(16#48, 16#3) -> {ok, 16#58, 16#28}; +dec_huffman_lookup(16#48, 16#4) -> {more, 16#5a, 16#02}; +dec_huffman_lookup(16#48, 16#5) -> {more, 16#5a, 16#09}; +dec_huffman_lookup(16#48, 16#6) -> {more, 16#5a, 16#17}; +dec_huffman_lookup(16#48, 16#7) -> {ok, 16#5a, 16#28}; +dec_huffman_lookup(16#48, 16#8) -> {ok, 16#21, 16#00}; +dec_huffman_lookup(16#48, 16#9) -> {ok, 16#22, 16#00}; +dec_huffman_lookup(16#48, 16#a) -> {ok, 16#28, 16#00}; +dec_huffman_lookup(16#48, 16#b) -> {ok, 16#29, 16#00}; +dec_huffman_lookup(16#48, 16#c) -> {ok, 16#3f, 16#00}; +dec_huffman_lookup(16#48, 16#d) -> {more, undefined, 16#50}; +dec_huffman_lookup(16#48, 16#e) -> {more, undefined, 16#52}; +dec_huffman_lookup(16#48, 16#f) -> {ok, undefined, 16#54}; +dec_huffman_lookup(16#49, 16#0) -> {more, 16#58, 16#03}; +dec_huffman_lookup(16#49, 16#1) -> {more, 16#58, 16#06}; +dec_huffman_lookup(16#49, 16#2) -> {more, 16#58, 16#0a}; +dec_huffman_lookup(16#49, 16#3) -> {more, 16#58, 16#0f}; +dec_huffman_lookup(16#49, 16#4) -> {more, 16#58, 16#18}; +dec_huffman_lookup(16#49, 16#5) -> {more, 16#58, 16#1f}; +dec_huffman_lookup(16#49, 16#6) -> {more, 16#58, 16#29}; +dec_huffman_lookup(16#49, 16#7) -> {ok, 16#58, 16#38}; +dec_huffman_lookup(16#49, 16#8) -> {more, 16#5a, 16#03}; +dec_huffman_lookup(16#49, 16#9) -> {more, 16#5a, 16#06}; +dec_huffman_lookup(16#49, 16#a) -> {more, 16#5a, 16#0a}; +dec_huffman_lookup(16#49, 16#b) -> {more, 16#5a, 16#0f}; +dec_huffman_lookup(16#49, 16#c) -> {more, 16#5a, 16#18}; +dec_huffman_lookup(16#49, 16#d) -> {more, 16#5a, 16#1f}; +dec_huffman_lookup(16#49, 16#e) -> {more, 16#5a, 16#29}; +dec_huffman_lookup(16#49, 16#f) -> {ok, 16#5a, 16#38}; +dec_huffman_lookup(16#4a, 16#0) -> {more, 16#21, 16#01}; +dec_huffman_lookup(16#4a, 16#1) -> {ok, 16#21, 16#16}; +dec_huffman_lookup(16#4a, 16#2) -> {more, 16#22, 16#01}; +dec_huffman_lookup(16#4a, 16#3) -> {ok, 16#22, 16#16}; +dec_huffman_lookup(16#4a, 16#4) -> {more, 16#28, 16#01}; +dec_huffman_lookup(16#4a, 16#5) -> {ok, 16#28, 16#16}; +dec_huffman_lookup(16#4a, 16#6) -> {more, 16#29, 16#01}; +dec_huffman_lookup(16#4a, 16#7) -> {ok, 16#29, 16#16}; +dec_huffman_lookup(16#4a, 16#8) -> {more, 16#3f, 16#01}; +dec_huffman_lookup(16#4a, 16#9) -> {ok, 16#3f, 16#16}; +dec_huffman_lookup(16#4a, 16#a) -> {ok, 16#27, 16#00}; +dec_huffman_lookup(16#4a, 16#b) -> {ok, 16#2b, 16#00}; +dec_huffman_lookup(16#4a, 16#c) -> {ok, 16#7c, 16#00}; +dec_huffman_lookup(16#4a, 16#d) -> {more, undefined, 16#53}; +dec_huffman_lookup(16#4a, 16#e) -> {more, undefined, 16#55}; +dec_huffman_lookup(16#4a, 16#f) -> {ok, undefined, 16#58}; +dec_huffman_lookup(16#4b, 16#0) -> {more, 16#21, 16#02}; +dec_huffman_lookup(16#4b, 16#1) -> {more, 16#21, 16#09}; +dec_huffman_lookup(16#4b, 16#2) -> {more, 16#21, 16#17}; +dec_huffman_lookup(16#4b, 16#3) -> {ok, 16#21, 16#28}; +dec_huffman_lookup(16#4b, 16#4) -> {more, 16#22, 16#02}; +dec_huffman_lookup(16#4b, 16#5) -> {more, 16#22, 16#09}; +dec_huffman_lookup(16#4b, 16#6) -> {more, 16#22, 16#17}; +dec_huffman_lookup(16#4b, 16#7) -> {ok, 16#22, 16#28}; +dec_huffman_lookup(16#4b, 16#8) -> {more, 16#28, 16#02}; +dec_huffman_lookup(16#4b, 16#9) -> {more, 16#28, 16#09}; +dec_huffman_lookup(16#4b, 16#a) -> {more, 16#28, 16#17}; +dec_huffman_lookup(16#4b, 16#b) -> {ok, 16#28, 16#28}; +dec_huffman_lookup(16#4b, 16#c) -> {more, 16#29, 16#02}; +dec_huffman_lookup(16#4b, 16#d) -> {more, 16#29, 16#09}; +dec_huffman_lookup(16#4b, 16#e) -> {more, 16#29, 16#17}; +dec_huffman_lookup(16#4b, 16#f) -> {ok, 16#29, 16#28}; +dec_huffman_lookup(16#4c, 16#0) -> {more, 16#21, 16#03}; +dec_huffman_lookup(16#4c, 16#1) -> {more, 16#21, 16#06}; +dec_huffman_lookup(16#4c, 16#2) -> {more, 16#21, 16#0a}; +dec_huffman_lookup(16#4c, 16#3) -> {more, 16#21, 16#0f}; +dec_huffman_lookup(16#4c, 16#4) -> {more, 16#21, 16#18}; +dec_huffman_lookup(16#4c, 16#5) -> {more, 16#21, 16#1f}; +dec_huffman_lookup(16#4c, 16#6) -> {more, 16#21, 16#29}; +dec_huffman_lookup(16#4c, 16#7) -> {ok, 16#21, 16#38}; +dec_huffman_lookup(16#4c, 16#8) -> {more, 16#22, 16#03}; +dec_huffman_lookup(16#4c, 16#9) -> {more, 16#22, 16#06}; +dec_huffman_lookup(16#4c, 16#a) -> {more, 16#22, 16#0a}; +dec_huffman_lookup(16#4c, 16#b) -> {more, 16#22, 16#0f}; +dec_huffman_lookup(16#4c, 16#c) -> {more, 16#22, 16#18}; +dec_huffman_lookup(16#4c, 16#d) -> {more, 16#22, 16#1f}; +dec_huffman_lookup(16#4c, 16#e) -> {more, 16#22, 16#29}; +dec_huffman_lookup(16#4c, 16#f) -> {ok, 16#22, 16#38}; +dec_huffman_lookup(16#4d, 16#0) -> {more, 16#28, 16#03}; +dec_huffman_lookup(16#4d, 16#1) -> {more, 16#28, 16#06}; +dec_huffman_lookup(16#4d, 16#2) -> {more, 16#28, 16#0a}; +dec_huffman_lookup(16#4d, 16#3) -> {more, 16#28, 16#0f}; +dec_huffman_lookup(16#4d, 16#4) -> {more, 16#28, 16#18}; +dec_huffman_lookup(16#4d, 16#5) -> {more, 16#28, 16#1f}; +dec_huffman_lookup(16#4d, 16#6) -> {more, 16#28, 16#29}; +dec_huffman_lookup(16#4d, 16#7) -> {ok, 16#28, 16#38}; +dec_huffman_lookup(16#4d, 16#8) -> {more, 16#29, 16#03}; +dec_huffman_lookup(16#4d, 16#9) -> {more, 16#29, 16#06}; +dec_huffman_lookup(16#4d, 16#a) -> {more, 16#29, 16#0a}; +dec_huffman_lookup(16#4d, 16#b) -> {more, 16#29, 16#0f}; +dec_huffman_lookup(16#4d, 16#c) -> {more, 16#29, 16#18}; +dec_huffman_lookup(16#4d, 16#d) -> {more, 16#29, 16#1f}; +dec_huffman_lookup(16#4d, 16#e) -> {more, 16#29, 16#29}; +dec_huffman_lookup(16#4d, 16#f) -> {ok, 16#29, 16#38}; +dec_huffman_lookup(16#4e, 16#0) -> {more, 16#3f, 16#02}; +dec_huffman_lookup(16#4e, 16#1) -> {more, 16#3f, 16#09}; +dec_huffman_lookup(16#4e, 16#2) -> {more, 16#3f, 16#17}; +dec_huffman_lookup(16#4e, 16#3) -> {ok, 16#3f, 16#28}; +dec_huffman_lookup(16#4e, 16#4) -> {more, 16#27, 16#01}; +dec_huffman_lookup(16#4e, 16#5) -> {ok, 16#27, 16#16}; +dec_huffman_lookup(16#4e, 16#6) -> {more, 16#2b, 16#01}; +dec_huffman_lookup(16#4e, 16#7) -> {ok, 16#2b, 16#16}; +dec_huffman_lookup(16#4e, 16#8) -> {more, 16#7c, 16#01}; +dec_huffman_lookup(16#4e, 16#9) -> {ok, 16#7c, 16#16}; +dec_huffman_lookup(16#4e, 16#a) -> {ok, 16#23, 16#00}; +dec_huffman_lookup(16#4e, 16#b) -> {ok, 16#3e, 16#00}; +dec_huffman_lookup(16#4e, 16#c) -> {more, undefined, 16#56}; +dec_huffman_lookup(16#4e, 16#d) -> {more, undefined, 16#57}; +dec_huffman_lookup(16#4e, 16#e) -> {more, undefined, 16#59}; +dec_huffman_lookup(16#4e, 16#f) -> {ok, undefined, 16#5a}; +dec_huffman_lookup(16#4f, 16#0) -> {more, 16#3f, 16#03}; +dec_huffman_lookup(16#4f, 16#1) -> {more, 16#3f, 16#06}; +dec_huffman_lookup(16#4f, 16#2) -> {more, 16#3f, 16#0a}; +dec_huffman_lookup(16#4f, 16#3) -> {more, 16#3f, 16#0f}; +dec_huffman_lookup(16#4f, 16#4) -> {more, 16#3f, 16#18}; +dec_huffman_lookup(16#4f, 16#5) -> {more, 16#3f, 16#1f}; +dec_huffman_lookup(16#4f, 16#6) -> {more, 16#3f, 16#29}; +dec_huffman_lookup(16#4f, 16#7) -> {ok, 16#3f, 16#38}; +dec_huffman_lookup(16#4f, 16#8) -> {more, 16#27, 16#02}; +dec_huffman_lookup(16#4f, 16#9) -> {more, 16#27, 16#09}; +dec_huffman_lookup(16#4f, 16#a) -> {more, 16#27, 16#17}; +dec_huffman_lookup(16#4f, 16#b) -> {ok, 16#27, 16#28}; +dec_huffman_lookup(16#4f, 16#c) -> {more, 16#2b, 16#02}; +dec_huffman_lookup(16#4f, 16#d) -> {more, 16#2b, 16#09}; +dec_huffman_lookup(16#4f, 16#e) -> {more, 16#2b, 16#17}; +dec_huffman_lookup(16#4f, 16#f) -> {ok, 16#2b, 16#28}; +dec_huffman_lookup(16#50, 16#0) -> {more, 16#27, 16#03}; +dec_huffman_lookup(16#50, 16#1) -> {more, 16#27, 16#06}; +dec_huffman_lookup(16#50, 16#2) -> {more, 16#27, 16#0a}; +dec_huffman_lookup(16#50, 16#3) -> {more, 16#27, 16#0f}; +dec_huffman_lookup(16#50, 16#4) -> {more, 16#27, 16#18}; +dec_huffman_lookup(16#50, 16#5) -> {more, 16#27, 16#1f}; +dec_huffman_lookup(16#50, 16#6) -> {more, 16#27, 16#29}; +dec_huffman_lookup(16#50, 16#7) -> {ok, 16#27, 16#38}; +dec_huffman_lookup(16#50, 16#8) -> {more, 16#2b, 16#03}; +dec_huffman_lookup(16#50, 16#9) -> {more, 16#2b, 16#06}; +dec_huffman_lookup(16#50, 16#a) -> {more, 16#2b, 16#0a}; +dec_huffman_lookup(16#50, 16#b) -> {more, 16#2b, 16#0f}; +dec_huffman_lookup(16#50, 16#c) -> {more, 16#2b, 16#18}; +dec_huffman_lookup(16#50, 16#d) -> {more, 16#2b, 16#1f}; +dec_huffman_lookup(16#50, 16#e) -> {more, 16#2b, 16#29}; +dec_huffman_lookup(16#50, 16#f) -> {ok, 16#2b, 16#38}; +dec_huffman_lookup(16#51, 16#0) -> {more, 16#7c, 16#02}; +dec_huffman_lookup(16#51, 16#1) -> {more, 16#7c, 16#09}; +dec_huffman_lookup(16#51, 16#2) -> {more, 16#7c, 16#17}; +dec_huffman_lookup(16#51, 16#3) -> {ok, 16#7c, 16#28}; +dec_huffman_lookup(16#51, 16#4) -> {more, 16#23, 16#01}; +dec_huffman_lookup(16#51, 16#5) -> {ok, 16#23, 16#16}; +dec_huffman_lookup(16#51, 16#6) -> {more, 16#3e, 16#01}; +dec_huffman_lookup(16#51, 16#7) -> {ok, 16#3e, 16#16}; +dec_huffman_lookup(16#51, 16#8) -> {ok, 16#00, 16#00}; +dec_huffman_lookup(16#51, 16#9) -> {ok, 16#24, 16#00}; +dec_huffman_lookup(16#51, 16#a) -> {ok, 16#40, 16#00}; +dec_huffman_lookup(16#51, 16#b) -> {ok, 16#5b, 16#00}; +dec_huffman_lookup(16#51, 16#c) -> {ok, 16#5d, 16#00}; +dec_huffman_lookup(16#51, 16#d) -> {ok, 16#7e, 16#00}; +dec_huffman_lookup(16#51, 16#e) -> {more, undefined, 16#5b}; +dec_huffman_lookup(16#51, 16#f) -> {ok, undefined, 16#5c}; +dec_huffman_lookup(16#52, 16#0) -> {more, 16#7c, 16#03}; +dec_huffman_lookup(16#52, 16#1) -> {more, 16#7c, 16#06}; +dec_huffman_lookup(16#52, 16#2) -> {more, 16#7c, 16#0a}; +dec_huffman_lookup(16#52, 16#3) -> {more, 16#7c, 16#0f}; +dec_huffman_lookup(16#52, 16#4) -> {more, 16#7c, 16#18}; +dec_huffman_lookup(16#52, 16#5) -> {more, 16#7c, 16#1f}; +dec_huffman_lookup(16#52, 16#6) -> {more, 16#7c, 16#29}; +dec_huffman_lookup(16#52, 16#7) -> {ok, 16#7c, 16#38}; +dec_huffman_lookup(16#52, 16#8) -> {more, 16#23, 16#02}; +dec_huffman_lookup(16#52, 16#9) -> {more, 16#23, 16#09}; +dec_huffman_lookup(16#52, 16#a) -> {more, 16#23, 16#17}; +dec_huffman_lookup(16#52, 16#b) -> {ok, 16#23, 16#28}; +dec_huffman_lookup(16#52, 16#c) -> {more, 16#3e, 16#02}; +dec_huffman_lookup(16#52, 16#d) -> {more, 16#3e, 16#09}; +dec_huffman_lookup(16#52, 16#e) -> {more, 16#3e, 16#17}; +dec_huffman_lookup(16#52, 16#f) -> {ok, 16#3e, 16#28}; +dec_huffman_lookup(16#53, 16#0) -> {more, 16#23, 16#03}; +dec_huffman_lookup(16#53, 16#1) -> {more, 16#23, 16#06}; +dec_huffman_lookup(16#53, 16#2) -> {more, 16#23, 16#0a}; +dec_huffman_lookup(16#53, 16#3) -> {more, 16#23, 16#0f}; +dec_huffman_lookup(16#53, 16#4) -> {more, 16#23, 16#18}; +dec_huffman_lookup(16#53, 16#5) -> {more, 16#23, 16#1f}; +dec_huffman_lookup(16#53, 16#6) -> {more, 16#23, 16#29}; +dec_huffman_lookup(16#53, 16#7) -> {ok, 16#23, 16#38}; +dec_huffman_lookup(16#53, 16#8) -> {more, 16#3e, 16#03}; +dec_huffman_lookup(16#53, 16#9) -> {more, 16#3e, 16#06}; +dec_huffman_lookup(16#53, 16#a) -> {more, 16#3e, 16#0a}; +dec_huffman_lookup(16#53, 16#b) -> {more, 16#3e, 16#0f}; +dec_huffman_lookup(16#53, 16#c) -> {more, 16#3e, 16#18}; +dec_huffman_lookup(16#53, 16#d) -> {more, 16#3e, 16#1f}; +dec_huffman_lookup(16#53, 16#e) -> {more, 16#3e, 16#29}; +dec_huffman_lookup(16#53, 16#f) -> {ok, 16#3e, 16#38}; +dec_huffman_lookup(16#54, 16#0) -> {more, 16#00, 16#01}; +dec_huffman_lookup(16#54, 16#1) -> {ok, 16#00, 16#16}; +dec_huffman_lookup(16#54, 16#2) -> {more, 16#24, 16#01}; +dec_huffman_lookup(16#54, 16#3) -> {ok, 16#24, 16#16}; +dec_huffman_lookup(16#54, 16#4) -> {more, 16#40, 16#01}; +dec_huffman_lookup(16#54, 16#5) -> {ok, 16#40, 16#16}; +dec_huffman_lookup(16#54, 16#6) -> {more, 16#5b, 16#01}; +dec_huffman_lookup(16#54, 16#7) -> {ok, 16#5b, 16#16}; +dec_huffman_lookup(16#54, 16#8) -> {more, 16#5d, 16#01}; +dec_huffman_lookup(16#54, 16#9) -> {ok, 16#5d, 16#16}; +dec_huffman_lookup(16#54, 16#a) -> {more, 16#7e, 16#01}; +dec_huffman_lookup(16#54, 16#b) -> {ok, 16#7e, 16#16}; +dec_huffman_lookup(16#54, 16#c) -> {ok, 16#5e, 16#00}; +dec_huffman_lookup(16#54, 16#d) -> {ok, 16#7d, 16#00}; +dec_huffman_lookup(16#54, 16#e) -> {more, undefined, 16#5d}; +dec_huffman_lookup(16#54, 16#f) -> {ok, undefined, 16#5e}; +dec_huffman_lookup(16#55, 16#0) -> {more, 16#00, 16#02}; +dec_huffman_lookup(16#55, 16#1) -> {more, 16#00, 16#09}; +dec_huffman_lookup(16#55, 16#2) -> {more, 16#00, 16#17}; +dec_huffman_lookup(16#55, 16#3) -> {ok, 16#00, 16#28}; +dec_huffman_lookup(16#55, 16#4) -> {more, 16#24, 16#02}; +dec_huffman_lookup(16#55, 16#5) -> {more, 16#24, 16#09}; +dec_huffman_lookup(16#55, 16#6) -> {more, 16#24, 16#17}; +dec_huffman_lookup(16#55, 16#7) -> {ok, 16#24, 16#28}; +dec_huffman_lookup(16#55, 16#8) -> {more, 16#40, 16#02}; +dec_huffman_lookup(16#55, 16#9) -> {more, 16#40, 16#09}; +dec_huffman_lookup(16#55, 16#a) -> {more, 16#40, 16#17}; +dec_huffman_lookup(16#55, 16#b) -> {ok, 16#40, 16#28}; +dec_huffman_lookup(16#55, 16#c) -> {more, 16#5b, 16#02}; +dec_huffman_lookup(16#55, 16#d) -> {more, 16#5b, 16#09}; +dec_huffman_lookup(16#55, 16#e) -> {more, 16#5b, 16#17}; +dec_huffman_lookup(16#55, 16#f) -> {ok, 16#5b, 16#28}; +dec_huffman_lookup(16#56, 16#0) -> {more, 16#00, 16#03}; +dec_huffman_lookup(16#56, 16#1) -> {more, 16#00, 16#06}; +dec_huffman_lookup(16#56, 16#2) -> {more, 16#00, 16#0a}; +dec_huffman_lookup(16#56, 16#3) -> {more, 16#00, 16#0f}; +dec_huffman_lookup(16#56, 16#4) -> {more, 16#00, 16#18}; +dec_huffman_lookup(16#56, 16#5) -> {more, 16#00, 16#1f}; +dec_huffman_lookup(16#56, 16#6) -> {more, 16#00, 16#29}; +dec_huffman_lookup(16#56, 16#7) -> {ok, 16#00, 16#38}; +dec_huffman_lookup(16#56, 16#8) -> {more, 16#24, 16#03}; +dec_huffman_lookup(16#56, 16#9) -> {more, 16#24, 16#06}; +dec_huffman_lookup(16#56, 16#a) -> {more, 16#24, 16#0a}; +dec_huffman_lookup(16#56, 16#b) -> {more, 16#24, 16#0f}; +dec_huffman_lookup(16#56, 16#c) -> {more, 16#24, 16#18}; +dec_huffman_lookup(16#56, 16#d) -> {more, 16#24, 16#1f}; +dec_huffman_lookup(16#56, 16#e) -> {more, 16#24, 16#29}; +dec_huffman_lookup(16#56, 16#f) -> {ok, 16#24, 16#38}; +dec_huffman_lookup(16#57, 16#0) -> {more, 16#40, 16#03}; +dec_huffman_lookup(16#57, 16#1) -> {more, 16#40, 16#06}; +dec_huffman_lookup(16#57, 16#2) -> {more, 16#40, 16#0a}; +dec_huffman_lookup(16#57, 16#3) -> {more, 16#40, 16#0f}; +dec_huffman_lookup(16#57, 16#4) -> {more, 16#40, 16#18}; +dec_huffman_lookup(16#57, 16#5) -> {more, 16#40, 16#1f}; +dec_huffman_lookup(16#57, 16#6) -> {more, 16#40, 16#29}; +dec_huffman_lookup(16#57, 16#7) -> {ok, 16#40, 16#38}; +dec_huffman_lookup(16#57, 16#8) -> {more, 16#5b, 16#03}; +dec_huffman_lookup(16#57, 16#9) -> {more, 16#5b, 16#06}; +dec_huffman_lookup(16#57, 16#a) -> {more, 16#5b, 16#0a}; +dec_huffman_lookup(16#57, 16#b) -> {more, 16#5b, 16#0f}; +dec_huffman_lookup(16#57, 16#c) -> {more, 16#5b, 16#18}; +dec_huffman_lookup(16#57, 16#d) -> {more, 16#5b, 16#1f}; +dec_huffman_lookup(16#57, 16#e) -> {more, 16#5b, 16#29}; +dec_huffman_lookup(16#57, 16#f) -> {ok, 16#5b, 16#38}; +dec_huffman_lookup(16#58, 16#0) -> {more, 16#5d, 16#02}; +dec_huffman_lookup(16#58, 16#1) -> {more, 16#5d, 16#09}; +dec_huffman_lookup(16#58, 16#2) -> {more, 16#5d, 16#17}; +dec_huffman_lookup(16#58, 16#3) -> {ok, 16#5d, 16#28}; +dec_huffman_lookup(16#58, 16#4) -> {more, 16#7e, 16#02}; +dec_huffman_lookup(16#58, 16#5) -> {more, 16#7e, 16#09}; +dec_huffman_lookup(16#58, 16#6) -> {more, 16#7e, 16#17}; +dec_huffman_lookup(16#58, 16#7) -> {ok, 16#7e, 16#28}; +dec_huffman_lookup(16#58, 16#8) -> {more, 16#5e, 16#01}; +dec_huffman_lookup(16#58, 16#9) -> {ok, 16#5e, 16#16}; +dec_huffman_lookup(16#58, 16#a) -> {more, 16#7d, 16#01}; +dec_huffman_lookup(16#58, 16#b) -> {ok, 16#7d, 16#16}; +dec_huffman_lookup(16#58, 16#c) -> {ok, 16#3c, 16#00}; +dec_huffman_lookup(16#58, 16#d) -> {ok, 16#60, 16#00}; +dec_huffman_lookup(16#58, 16#e) -> {ok, 16#7b, 16#00}; +dec_huffman_lookup(16#58, 16#f) -> {ok, undefined, 16#5f}; +dec_huffman_lookup(16#59, 16#0) -> {more, 16#5d, 16#03}; +dec_huffman_lookup(16#59, 16#1) -> {more, 16#5d, 16#06}; +dec_huffman_lookup(16#59, 16#2) -> {more, 16#5d, 16#0a}; +dec_huffman_lookup(16#59, 16#3) -> {more, 16#5d, 16#0f}; +dec_huffman_lookup(16#59, 16#4) -> {more, 16#5d, 16#18}; +dec_huffman_lookup(16#59, 16#5) -> {more, 16#5d, 16#1f}; +dec_huffman_lookup(16#59, 16#6) -> {more, 16#5d, 16#29}; +dec_huffman_lookup(16#59, 16#7) -> {ok, 16#5d, 16#38}; +dec_huffman_lookup(16#59, 16#8) -> {more, 16#7e, 16#03}; +dec_huffman_lookup(16#59, 16#9) -> {more, 16#7e, 16#06}; +dec_huffman_lookup(16#59, 16#a) -> {more, 16#7e, 16#0a}; +dec_huffman_lookup(16#59, 16#b) -> {more, 16#7e, 16#0f}; +dec_huffman_lookup(16#59, 16#c) -> {more, 16#7e, 16#18}; +dec_huffman_lookup(16#59, 16#d) -> {more, 16#7e, 16#1f}; +dec_huffman_lookup(16#59, 16#e) -> {more, 16#7e, 16#29}; +dec_huffman_lookup(16#59, 16#f) -> {ok, 16#7e, 16#38}; +dec_huffman_lookup(16#5a, 16#0) -> {more, 16#5e, 16#02}; +dec_huffman_lookup(16#5a, 16#1) -> {more, 16#5e, 16#09}; +dec_huffman_lookup(16#5a, 16#2) -> {more, 16#5e, 16#17}; +dec_huffman_lookup(16#5a, 16#3) -> {ok, 16#5e, 16#28}; +dec_huffman_lookup(16#5a, 16#4) -> {more, 16#7d, 16#02}; +dec_huffman_lookup(16#5a, 16#5) -> {more, 16#7d, 16#09}; +dec_huffman_lookup(16#5a, 16#6) -> {more, 16#7d, 16#17}; +dec_huffman_lookup(16#5a, 16#7) -> {ok, 16#7d, 16#28}; +dec_huffman_lookup(16#5a, 16#8) -> {more, 16#3c, 16#01}; +dec_huffman_lookup(16#5a, 16#9) -> {ok, 16#3c, 16#16}; +dec_huffman_lookup(16#5a, 16#a) -> {more, 16#60, 16#01}; +dec_huffman_lookup(16#5a, 16#b) -> {ok, 16#60, 16#16}; +dec_huffman_lookup(16#5a, 16#c) -> {more, 16#7b, 16#01}; +dec_huffman_lookup(16#5a, 16#d) -> {ok, 16#7b, 16#16}; +dec_huffman_lookup(16#5a, 16#e) -> {more, undefined, 16#60}; +dec_huffman_lookup(16#5a, 16#f) -> {ok, undefined, 16#6e}; +dec_huffman_lookup(16#5b, 16#0) -> {more, 16#5e, 16#03}; +dec_huffman_lookup(16#5b, 16#1) -> {more, 16#5e, 16#06}; +dec_huffman_lookup(16#5b, 16#2) -> {more, 16#5e, 16#0a}; +dec_huffman_lookup(16#5b, 16#3) -> {more, 16#5e, 16#0f}; +dec_huffman_lookup(16#5b, 16#4) -> {more, 16#5e, 16#18}; +dec_huffman_lookup(16#5b, 16#5) -> {more, 16#5e, 16#1f}; +dec_huffman_lookup(16#5b, 16#6) -> {more, 16#5e, 16#29}; +dec_huffman_lookup(16#5b, 16#7) -> {ok, 16#5e, 16#38}; +dec_huffman_lookup(16#5b, 16#8) -> {more, 16#7d, 16#03}; +dec_huffman_lookup(16#5b, 16#9) -> {more, 16#7d, 16#06}; +dec_huffman_lookup(16#5b, 16#a) -> {more, 16#7d, 16#0a}; +dec_huffman_lookup(16#5b, 16#b) -> {more, 16#7d, 16#0f}; +dec_huffman_lookup(16#5b, 16#c) -> {more, 16#7d, 16#18}; +dec_huffman_lookup(16#5b, 16#d) -> {more, 16#7d, 16#1f}; +dec_huffman_lookup(16#5b, 16#e) -> {more, 16#7d, 16#29}; +dec_huffman_lookup(16#5b, 16#f) -> {ok, 16#7d, 16#38}; +dec_huffman_lookup(16#5c, 16#0) -> {more, 16#3c, 16#02}; +dec_huffman_lookup(16#5c, 16#1) -> {more, 16#3c, 16#09}; +dec_huffman_lookup(16#5c, 16#2) -> {more, 16#3c, 16#17}; +dec_huffman_lookup(16#5c, 16#3) -> {ok, 16#3c, 16#28}; +dec_huffman_lookup(16#5c, 16#4) -> {more, 16#60, 16#02}; +dec_huffman_lookup(16#5c, 16#5) -> {more, 16#60, 16#09}; +dec_huffman_lookup(16#5c, 16#6) -> {more, 16#60, 16#17}; +dec_huffman_lookup(16#5c, 16#7) -> {ok, 16#60, 16#28}; +dec_huffman_lookup(16#5c, 16#8) -> {more, 16#7b, 16#02}; +dec_huffman_lookup(16#5c, 16#9) -> {more, 16#7b, 16#09}; +dec_huffman_lookup(16#5c, 16#a) -> {more, 16#7b, 16#17}; +dec_huffman_lookup(16#5c, 16#b) -> {ok, 16#7b, 16#28}; +dec_huffman_lookup(16#5c, 16#c) -> {more, undefined, 16#61}; +dec_huffman_lookup(16#5c, 16#d) -> {more, undefined, 16#65}; +dec_huffman_lookup(16#5c, 16#e) -> {more, undefined, 16#6f}; +dec_huffman_lookup(16#5c, 16#f) -> {ok, undefined, 16#85}; +dec_huffman_lookup(16#5d, 16#0) -> {more, 16#3c, 16#03}; +dec_huffman_lookup(16#5d, 16#1) -> {more, 16#3c, 16#06}; +dec_huffman_lookup(16#5d, 16#2) -> {more, 16#3c, 16#0a}; +dec_huffman_lookup(16#5d, 16#3) -> {more, 16#3c, 16#0f}; +dec_huffman_lookup(16#5d, 16#4) -> {more, 16#3c, 16#18}; +dec_huffman_lookup(16#5d, 16#5) -> {more, 16#3c, 16#1f}; +dec_huffman_lookup(16#5d, 16#6) -> {more, 16#3c, 16#29}; +dec_huffman_lookup(16#5d, 16#7) -> {ok, 16#3c, 16#38}; +dec_huffman_lookup(16#5d, 16#8) -> {more, 16#60, 16#03}; +dec_huffman_lookup(16#5d, 16#9) -> {more, 16#60, 16#06}; +dec_huffman_lookup(16#5d, 16#a) -> {more, 16#60, 16#0a}; +dec_huffman_lookup(16#5d, 16#b) -> {more, 16#60, 16#0f}; +dec_huffman_lookup(16#5d, 16#c) -> {more, 16#60, 16#18}; +dec_huffman_lookup(16#5d, 16#d) -> {more, 16#60, 16#1f}; +dec_huffman_lookup(16#5d, 16#e) -> {more, 16#60, 16#29}; +dec_huffman_lookup(16#5d, 16#f) -> {ok, 16#60, 16#38}; +dec_huffman_lookup(16#5e, 16#0) -> {more, 16#7b, 16#03}; +dec_huffman_lookup(16#5e, 16#1) -> {more, 16#7b, 16#06}; +dec_huffman_lookup(16#5e, 16#2) -> {more, 16#7b, 16#0a}; +dec_huffman_lookup(16#5e, 16#3) -> {more, 16#7b, 16#0f}; +dec_huffman_lookup(16#5e, 16#4) -> {more, 16#7b, 16#18}; +dec_huffman_lookup(16#5e, 16#5) -> {more, 16#7b, 16#1f}; +dec_huffman_lookup(16#5e, 16#6) -> {more, 16#7b, 16#29}; +dec_huffman_lookup(16#5e, 16#7) -> {ok, 16#7b, 16#38}; +dec_huffman_lookup(16#5e, 16#8) -> {more, undefined, 16#62}; +dec_huffman_lookup(16#5e, 16#9) -> {more, undefined, 16#63}; +dec_huffman_lookup(16#5e, 16#a) -> {more, undefined, 16#66}; +dec_huffman_lookup(16#5e, 16#b) -> {more, undefined, 16#69}; +dec_huffman_lookup(16#5e, 16#c) -> {more, undefined, 16#70}; +dec_huffman_lookup(16#5e, 16#d) -> {more, undefined, 16#77}; +dec_huffman_lookup(16#5e, 16#e) -> {more, undefined, 16#86}; +dec_huffman_lookup(16#5e, 16#f) -> {ok, undefined, 16#99}; +dec_huffman_lookup(16#5f, 16#0) -> {ok, 16#5c, 16#00}; +dec_huffman_lookup(16#5f, 16#1) -> {ok, 16#c3, 16#00}; +dec_huffman_lookup(16#5f, 16#2) -> {ok, 16#d0, 16#00}; +dec_huffman_lookup(16#5f, 16#3) -> {more, undefined, 16#64}; +dec_huffman_lookup(16#5f, 16#4) -> {more, undefined, 16#67}; +dec_huffman_lookup(16#5f, 16#5) -> {more, undefined, 16#68}; +dec_huffman_lookup(16#5f, 16#6) -> {more, undefined, 16#6a}; +dec_huffman_lookup(16#5f, 16#7) -> {more, undefined, 16#6b}; +dec_huffman_lookup(16#5f, 16#8) -> {more, undefined, 16#71}; +dec_huffman_lookup(16#5f, 16#9) -> {more, undefined, 16#74}; +dec_huffman_lookup(16#5f, 16#a) -> {more, undefined, 16#78}; +dec_huffman_lookup(16#5f, 16#b) -> {more, undefined, 16#7e}; +dec_huffman_lookup(16#5f, 16#c) -> {more, undefined, 16#87}; +dec_huffman_lookup(16#5f, 16#d) -> {more, undefined, 16#8e}; +dec_huffman_lookup(16#5f, 16#e) -> {more, undefined, 16#9a}; +dec_huffman_lookup(16#5f, 16#f) -> {ok, undefined, 16#a9}; +dec_huffman_lookup(16#60, 16#0) -> {more, 16#5c, 16#01}; +dec_huffman_lookup(16#60, 16#1) -> {ok, 16#5c, 16#16}; +dec_huffman_lookup(16#60, 16#2) -> {more, 16#c3, 16#01}; +dec_huffman_lookup(16#60, 16#3) -> {ok, 16#c3, 16#16}; +dec_huffman_lookup(16#60, 16#4) -> {more, 16#d0, 16#01}; +dec_huffman_lookup(16#60, 16#5) -> {ok, 16#d0, 16#16}; +dec_huffman_lookup(16#60, 16#6) -> {ok, 16#80, 16#00}; +dec_huffman_lookup(16#60, 16#7) -> {ok, 16#82, 16#00}; +dec_huffman_lookup(16#60, 16#8) -> {ok, 16#83, 16#00}; +dec_huffman_lookup(16#60, 16#9) -> {ok, 16#a2, 16#00}; +dec_huffman_lookup(16#60, 16#a) -> {ok, 16#b8, 16#00}; +dec_huffman_lookup(16#60, 16#b) -> {ok, 16#c2, 16#00}; +dec_huffman_lookup(16#60, 16#c) -> {ok, 16#e0, 16#00}; +dec_huffman_lookup(16#60, 16#d) -> {ok, 16#e2, 16#00}; +dec_huffman_lookup(16#60, 16#e) -> {more, undefined, 16#6c}; +dec_huffman_lookup(16#60, 16#f) -> {more, undefined, 16#6d}; +dec_huffman_lookup(16#61, 16#0) -> {more, 16#5c, 16#02}; +dec_huffman_lookup(16#61, 16#1) -> {more, 16#5c, 16#09}; +dec_huffman_lookup(16#61, 16#2) -> {more, 16#5c, 16#17}; +dec_huffman_lookup(16#61, 16#3) -> {ok, 16#5c, 16#28}; +dec_huffman_lookup(16#61, 16#4) -> {more, 16#c3, 16#02}; +dec_huffman_lookup(16#61, 16#5) -> {more, 16#c3, 16#09}; +dec_huffman_lookup(16#61, 16#6) -> {more, 16#c3, 16#17}; +dec_huffman_lookup(16#61, 16#7) -> {ok, 16#c3, 16#28}; +dec_huffman_lookup(16#61, 16#8) -> {more, 16#d0, 16#02}; +dec_huffman_lookup(16#61, 16#9) -> {more, 16#d0, 16#09}; +dec_huffman_lookup(16#61, 16#a) -> {more, 16#d0, 16#17}; +dec_huffman_lookup(16#61, 16#b) -> {ok, 16#d0, 16#28}; +dec_huffman_lookup(16#61, 16#c) -> {more, 16#80, 16#01}; +dec_huffman_lookup(16#61, 16#d) -> {ok, 16#80, 16#16}; +dec_huffman_lookup(16#61, 16#e) -> {more, 16#82, 16#01}; +dec_huffman_lookup(16#61, 16#f) -> {ok, 16#82, 16#16}; +dec_huffman_lookup(16#62, 16#0) -> {more, 16#5c, 16#03}; +dec_huffman_lookup(16#62, 16#1) -> {more, 16#5c, 16#06}; +dec_huffman_lookup(16#62, 16#2) -> {more, 16#5c, 16#0a}; +dec_huffman_lookup(16#62, 16#3) -> {more, 16#5c, 16#0f}; +dec_huffman_lookup(16#62, 16#4) -> {more, 16#5c, 16#18}; +dec_huffman_lookup(16#62, 16#5) -> {more, 16#5c, 16#1f}; +dec_huffman_lookup(16#62, 16#6) -> {more, 16#5c, 16#29}; +dec_huffman_lookup(16#62, 16#7) -> {ok, 16#5c, 16#38}; +dec_huffman_lookup(16#62, 16#8) -> {more, 16#c3, 16#03}; +dec_huffman_lookup(16#62, 16#9) -> {more, 16#c3, 16#06}; +dec_huffman_lookup(16#62, 16#a) -> {more, 16#c3, 16#0a}; +dec_huffman_lookup(16#62, 16#b) -> {more, 16#c3, 16#0f}; +dec_huffman_lookup(16#62, 16#c) -> {more, 16#c3, 16#18}; +dec_huffman_lookup(16#62, 16#d) -> {more, 16#c3, 16#1f}; +dec_huffman_lookup(16#62, 16#e) -> {more, 16#c3, 16#29}; +dec_huffman_lookup(16#62, 16#f) -> {ok, 16#c3, 16#38}; +dec_huffman_lookup(16#63, 16#0) -> {more, 16#d0, 16#03}; +dec_huffman_lookup(16#63, 16#1) -> {more, 16#d0, 16#06}; +dec_huffman_lookup(16#63, 16#2) -> {more, 16#d0, 16#0a}; +dec_huffman_lookup(16#63, 16#3) -> {more, 16#d0, 16#0f}; +dec_huffman_lookup(16#63, 16#4) -> {more, 16#d0, 16#18}; +dec_huffman_lookup(16#63, 16#5) -> {more, 16#d0, 16#1f}; +dec_huffman_lookup(16#63, 16#6) -> {more, 16#d0, 16#29}; +dec_huffman_lookup(16#63, 16#7) -> {ok, 16#d0, 16#38}; +dec_huffman_lookup(16#63, 16#8) -> {more, 16#80, 16#02}; +dec_huffman_lookup(16#63, 16#9) -> {more, 16#80, 16#09}; +dec_huffman_lookup(16#63, 16#a) -> {more, 16#80, 16#17}; +dec_huffman_lookup(16#63, 16#b) -> {ok, 16#80, 16#28}; +dec_huffman_lookup(16#63, 16#c) -> {more, 16#82, 16#02}; +dec_huffman_lookup(16#63, 16#d) -> {more, 16#82, 16#09}; +dec_huffman_lookup(16#63, 16#e) -> {more, 16#82, 16#17}; +dec_huffman_lookup(16#63, 16#f) -> {ok, 16#82, 16#28}; +dec_huffman_lookup(16#64, 16#0) -> {more, 16#80, 16#03}; +dec_huffman_lookup(16#64, 16#1) -> {more, 16#80, 16#06}; +dec_huffman_lookup(16#64, 16#2) -> {more, 16#80, 16#0a}; +dec_huffman_lookup(16#64, 16#3) -> {more, 16#80, 16#0f}; +dec_huffman_lookup(16#64, 16#4) -> {more, 16#80, 16#18}; +dec_huffman_lookup(16#64, 16#5) -> {more, 16#80, 16#1f}; +dec_huffman_lookup(16#64, 16#6) -> {more, 16#80, 16#29}; +dec_huffman_lookup(16#64, 16#7) -> {ok, 16#80, 16#38}; +dec_huffman_lookup(16#64, 16#8) -> {more, 16#82, 16#03}; +dec_huffman_lookup(16#64, 16#9) -> {more, 16#82, 16#06}; +dec_huffman_lookup(16#64, 16#a) -> {more, 16#82, 16#0a}; +dec_huffman_lookup(16#64, 16#b) -> {more, 16#82, 16#0f}; +dec_huffman_lookup(16#64, 16#c) -> {more, 16#82, 16#18}; +dec_huffman_lookup(16#64, 16#d) -> {more, 16#82, 16#1f}; +dec_huffman_lookup(16#64, 16#e) -> {more, 16#82, 16#29}; +dec_huffman_lookup(16#64, 16#f) -> {ok, 16#82, 16#38}; +dec_huffman_lookup(16#65, 16#0) -> {more, 16#83, 16#01}; +dec_huffman_lookup(16#65, 16#1) -> {ok, 16#83, 16#16}; +dec_huffman_lookup(16#65, 16#2) -> {more, 16#a2, 16#01}; +dec_huffman_lookup(16#65, 16#3) -> {ok, 16#a2, 16#16}; +dec_huffman_lookup(16#65, 16#4) -> {more, 16#b8, 16#01}; +dec_huffman_lookup(16#65, 16#5) -> {ok, 16#b8, 16#16}; +dec_huffman_lookup(16#65, 16#6) -> {more, 16#c2, 16#01}; +dec_huffman_lookup(16#65, 16#7) -> {ok, 16#c2, 16#16}; +dec_huffman_lookup(16#65, 16#8) -> {more, 16#e0, 16#01}; +dec_huffman_lookup(16#65, 16#9) -> {ok, 16#e0, 16#16}; +dec_huffman_lookup(16#65, 16#a) -> {more, 16#e2, 16#01}; +dec_huffman_lookup(16#65, 16#b) -> {ok, 16#e2, 16#16}; +dec_huffman_lookup(16#65, 16#c) -> {ok, 16#99, 16#00}; +dec_huffman_lookup(16#65, 16#d) -> {ok, 16#a1, 16#00}; +dec_huffman_lookup(16#65, 16#e) -> {ok, 16#a7, 16#00}; +dec_huffman_lookup(16#65, 16#f) -> {ok, 16#ac, 16#00}; +dec_huffman_lookup(16#66, 16#0) -> {more, 16#83, 16#02}; +dec_huffman_lookup(16#66, 16#1) -> {more, 16#83, 16#09}; +dec_huffman_lookup(16#66, 16#2) -> {more, 16#83, 16#17}; +dec_huffman_lookup(16#66, 16#3) -> {ok, 16#83, 16#28}; +dec_huffman_lookup(16#66, 16#4) -> {more, 16#a2, 16#02}; +dec_huffman_lookup(16#66, 16#5) -> {more, 16#a2, 16#09}; +dec_huffman_lookup(16#66, 16#6) -> {more, 16#a2, 16#17}; +dec_huffman_lookup(16#66, 16#7) -> {ok, 16#a2, 16#28}; +dec_huffman_lookup(16#66, 16#8) -> {more, 16#b8, 16#02}; +dec_huffman_lookup(16#66, 16#9) -> {more, 16#b8, 16#09}; +dec_huffman_lookup(16#66, 16#a) -> {more, 16#b8, 16#17}; +dec_huffman_lookup(16#66, 16#b) -> {ok, 16#b8, 16#28}; +dec_huffman_lookup(16#66, 16#c) -> {more, 16#c2, 16#02}; +dec_huffman_lookup(16#66, 16#d) -> {more, 16#c2, 16#09}; +dec_huffman_lookup(16#66, 16#e) -> {more, 16#c2, 16#17}; +dec_huffman_lookup(16#66, 16#f) -> {ok, 16#c2, 16#28}; +dec_huffman_lookup(16#67, 16#0) -> {more, 16#83, 16#03}; +dec_huffman_lookup(16#67, 16#1) -> {more, 16#83, 16#06}; +dec_huffman_lookup(16#67, 16#2) -> {more, 16#83, 16#0a}; +dec_huffman_lookup(16#67, 16#3) -> {more, 16#83, 16#0f}; +dec_huffman_lookup(16#67, 16#4) -> {more, 16#83, 16#18}; +dec_huffman_lookup(16#67, 16#5) -> {more, 16#83, 16#1f}; +dec_huffman_lookup(16#67, 16#6) -> {more, 16#83, 16#29}; +dec_huffman_lookup(16#67, 16#7) -> {ok, 16#83, 16#38}; +dec_huffman_lookup(16#67, 16#8) -> {more, 16#a2, 16#03}; +dec_huffman_lookup(16#67, 16#9) -> {more, 16#a2, 16#06}; +dec_huffman_lookup(16#67, 16#a) -> {more, 16#a2, 16#0a}; +dec_huffman_lookup(16#67, 16#b) -> {more, 16#a2, 16#0f}; +dec_huffman_lookup(16#67, 16#c) -> {more, 16#a2, 16#18}; +dec_huffman_lookup(16#67, 16#d) -> {more, 16#a2, 16#1f}; +dec_huffman_lookup(16#67, 16#e) -> {more, 16#a2, 16#29}; +dec_huffman_lookup(16#67, 16#f) -> {ok, 16#a2, 16#38}; +dec_huffman_lookup(16#68, 16#0) -> {more, 16#b8, 16#03}; +dec_huffman_lookup(16#68, 16#1) -> {more, 16#b8, 16#06}; +dec_huffman_lookup(16#68, 16#2) -> {more, 16#b8, 16#0a}; +dec_huffman_lookup(16#68, 16#3) -> {more, 16#b8, 16#0f}; +dec_huffman_lookup(16#68, 16#4) -> {more, 16#b8, 16#18}; +dec_huffman_lookup(16#68, 16#5) -> {more, 16#b8, 16#1f}; +dec_huffman_lookup(16#68, 16#6) -> {more, 16#b8, 16#29}; +dec_huffman_lookup(16#68, 16#7) -> {ok, 16#b8, 16#38}; +dec_huffman_lookup(16#68, 16#8) -> {more, 16#c2, 16#03}; +dec_huffman_lookup(16#68, 16#9) -> {more, 16#c2, 16#06}; +dec_huffman_lookup(16#68, 16#a) -> {more, 16#c2, 16#0a}; +dec_huffman_lookup(16#68, 16#b) -> {more, 16#c2, 16#0f}; +dec_huffman_lookup(16#68, 16#c) -> {more, 16#c2, 16#18}; +dec_huffman_lookup(16#68, 16#d) -> {more, 16#c2, 16#1f}; +dec_huffman_lookup(16#68, 16#e) -> {more, 16#c2, 16#29}; +dec_huffman_lookup(16#68, 16#f) -> {ok, 16#c2, 16#38}; +dec_huffman_lookup(16#69, 16#0) -> {more, 16#e0, 16#02}; +dec_huffman_lookup(16#69, 16#1) -> {more, 16#e0, 16#09}; +dec_huffman_lookup(16#69, 16#2) -> {more, 16#e0, 16#17}; +dec_huffman_lookup(16#69, 16#3) -> {ok, 16#e0, 16#28}; +dec_huffman_lookup(16#69, 16#4) -> {more, 16#e2, 16#02}; +dec_huffman_lookup(16#69, 16#5) -> {more, 16#e2, 16#09}; +dec_huffman_lookup(16#69, 16#6) -> {more, 16#e2, 16#17}; +dec_huffman_lookup(16#69, 16#7) -> {ok, 16#e2, 16#28}; +dec_huffman_lookup(16#69, 16#8) -> {more, 16#99, 16#01}; +dec_huffman_lookup(16#69, 16#9) -> {ok, 16#99, 16#16}; +dec_huffman_lookup(16#69, 16#a) -> {more, 16#a1, 16#01}; +dec_huffman_lookup(16#69, 16#b) -> {ok, 16#a1, 16#16}; +dec_huffman_lookup(16#69, 16#c) -> {more, 16#a7, 16#01}; +dec_huffman_lookup(16#69, 16#d) -> {ok, 16#a7, 16#16}; +dec_huffman_lookup(16#69, 16#e) -> {more, 16#ac, 16#01}; +dec_huffman_lookup(16#69, 16#f) -> {ok, 16#ac, 16#16}; +dec_huffman_lookup(16#6a, 16#0) -> {more, 16#e0, 16#03}; +dec_huffman_lookup(16#6a, 16#1) -> {more, 16#e0, 16#06}; +dec_huffman_lookup(16#6a, 16#2) -> {more, 16#e0, 16#0a}; +dec_huffman_lookup(16#6a, 16#3) -> {more, 16#e0, 16#0f}; +dec_huffman_lookup(16#6a, 16#4) -> {more, 16#e0, 16#18}; +dec_huffman_lookup(16#6a, 16#5) -> {more, 16#e0, 16#1f}; +dec_huffman_lookup(16#6a, 16#6) -> {more, 16#e0, 16#29}; +dec_huffman_lookup(16#6a, 16#7) -> {ok, 16#e0, 16#38}; +dec_huffman_lookup(16#6a, 16#8) -> {more, 16#e2, 16#03}; +dec_huffman_lookup(16#6a, 16#9) -> {more, 16#e2, 16#06}; +dec_huffman_lookup(16#6a, 16#a) -> {more, 16#e2, 16#0a}; +dec_huffman_lookup(16#6a, 16#b) -> {more, 16#e2, 16#0f}; +dec_huffman_lookup(16#6a, 16#c) -> {more, 16#e2, 16#18}; +dec_huffman_lookup(16#6a, 16#d) -> {more, 16#e2, 16#1f}; +dec_huffman_lookup(16#6a, 16#e) -> {more, 16#e2, 16#29}; +dec_huffman_lookup(16#6a, 16#f) -> {ok, 16#e2, 16#38}; +dec_huffman_lookup(16#6b, 16#0) -> {more, 16#99, 16#02}; +dec_huffman_lookup(16#6b, 16#1) -> {more, 16#99, 16#09}; +dec_huffman_lookup(16#6b, 16#2) -> {more, 16#99, 16#17}; +dec_huffman_lookup(16#6b, 16#3) -> {ok, 16#99, 16#28}; +dec_huffman_lookup(16#6b, 16#4) -> {more, 16#a1, 16#02}; +dec_huffman_lookup(16#6b, 16#5) -> {more, 16#a1, 16#09}; +dec_huffman_lookup(16#6b, 16#6) -> {more, 16#a1, 16#17}; +dec_huffman_lookup(16#6b, 16#7) -> {ok, 16#a1, 16#28}; +dec_huffman_lookup(16#6b, 16#8) -> {more, 16#a7, 16#02}; +dec_huffman_lookup(16#6b, 16#9) -> {more, 16#a7, 16#09}; +dec_huffman_lookup(16#6b, 16#a) -> {more, 16#a7, 16#17}; +dec_huffman_lookup(16#6b, 16#b) -> {ok, 16#a7, 16#28}; +dec_huffman_lookup(16#6b, 16#c) -> {more, 16#ac, 16#02}; +dec_huffman_lookup(16#6b, 16#d) -> {more, 16#ac, 16#09}; +dec_huffman_lookup(16#6b, 16#e) -> {more, 16#ac, 16#17}; +dec_huffman_lookup(16#6b, 16#f) -> {ok, 16#ac, 16#28}; +dec_huffman_lookup(16#6c, 16#0) -> {more, 16#99, 16#03}; +dec_huffman_lookup(16#6c, 16#1) -> {more, 16#99, 16#06}; +dec_huffman_lookup(16#6c, 16#2) -> {more, 16#99, 16#0a}; +dec_huffman_lookup(16#6c, 16#3) -> {more, 16#99, 16#0f}; +dec_huffman_lookup(16#6c, 16#4) -> {more, 16#99, 16#18}; +dec_huffman_lookup(16#6c, 16#5) -> {more, 16#99, 16#1f}; +dec_huffman_lookup(16#6c, 16#6) -> {more, 16#99, 16#29}; +dec_huffman_lookup(16#6c, 16#7) -> {ok, 16#99, 16#38}; +dec_huffman_lookup(16#6c, 16#8) -> {more, 16#a1, 16#03}; +dec_huffman_lookup(16#6c, 16#9) -> {more, 16#a1, 16#06}; +dec_huffman_lookup(16#6c, 16#a) -> {more, 16#a1, 16#0a}; +dec_huffman_lookup(16#6c, 16#b) -> {more, 16#a1, 16#0f}; +dec_huffman_lookup(16#6c, 16#c) -> {more, 16#a1, 16#18}; +dec_huffman_lookup(16#6c, 16#d) -> {more, 16#a1, 16#1f}; +dec_huffman_lookup(16#6c, 16#e) -> {more, 16#a1, 16#29}; +dec_huffman_lookup(16#6c, 16#f) -> {ok, 16#a1, 16#38}; +dec_huffman_lookup(16#6d, 16#0) -> {more, 16#a7, 16#03}; +dec_huffman_lookup(16#6d, 16#1) -> {more, 16#a7, 16#06}; +dec_huffman_lookup(16#6d, 16#2) -> {more, 16#a7, 16#0a}; +dec_huffman_lookup(16#6d, 16#3) -> {more, 16#a7, 16#0f}; +dec_huffman_lookup(16#6d, 16#4) -> {more, 16#a7, 16#18}; +dec_huffman_lookup(16#6d, 16#5) -> {more, 16#a7, 16#1f}; +dec_huffman_lookup(16#6d, 16#6) -> {more, 16#a7, 16#29}; +dec_huffman_lookup(16#6d, 16#7) -> {ok, 16#a7, 16#38}; +dec_huffman_lookup(16#6d, 16#8) -> {more, 16#ac, 16#03}; +dec_huffman_lookup(16#6d, 16#9) -> {more, 16#ac, 16#06}; +dec_huffman_lookup(16#6d, 16#a) -> {more, 16#ac, 16#0a}; +dec_huffman_lookup(16#6d, 16#b) -> {more, 16#ac, 16#0f}; +dec_huffman_lookup(16#6d, 16#c) -> {more, 16#ac, 16#18}; +dec_huffman_lookup(16#6d, 16#d) -> {more, 16#ac, 16#1f}; +dec_huffman_lookup(16#6d, 16#e) -> {more, 16#ac, 16#29}; +dec_huffman_lookup(16#6d, 16#f) -> {ok, 16#ac, 16#38}; +dec_huffman_lookup(16#6e, 16#0) -> {more, undefined, 16#72}; +dec_huffman_lookup(16#6e, 16#1) -> {more, undefined, 16#73}; +dec_huffman_lookup(16#6e, 16#2) -> {more, undefined, 16#75}; +dec_huffman_lookup(16#6e, 16#3) -> {more, undefined, 16#76}; +dec_huffman_lookup(16#6e, 16#4) -> {more, undefined, 16#79}; +dec_huffman_lookup(16#6e, 16#5) -> {more, undefined, 16#7b}; +dec_huffman_lookup(16#6e, 16#6) -> {more, undefined, 16#7f}; +dec_huffman_lookup(16#6e, 16#7) -> {more, undefined, 16#82}; +dec_huffman_lookup(16#6e, 16#8) -> {more, undefined, 16#88}; +dec_huffman_lookup(16#6e, 16#9) -> {more, undefined, 16#8b}; +dec_huffman_lookup(16#6e, 16#a) -> {more, undefined, 16#8f}; +dec_huffman_lookup(16#6e, 16#b) -> {more, undefined, 16#92}; +dec_huffman_lookup(16#6e, 16#c) -> {more, undefined, 16#9b}; +dec_huffman_lookup(16#6e, 16#d) -> {more, undefined, 16#a2}; +dec_huffman_lookup(16#6e, 16#e) -> {more, undefined, 16#aa}; +dec_huffman_lookup(16#6e, 16#f) -> {ok, undefined, 16#b4}; +dec_huffman_lookup(16#6f, 16#0) -> {ok, 16#b0, 16#00}; +dec_huffman_lookup(16#6f, 16#1) -> {ok, 16#b1, 16#00}; +dec_huffman_lookup(16#6f, 16#2) -> {ok, 16#b3, 16#00}; +dec_huffman_lookup(16#6f, 16#3) -> {ok, 16#d1, 16#00}; +dec_huffman_lookup(16#6f, 16#4) -> {ok, 16#d8, 16#00}; +dec_huffman_lookup(16#6f, 16#5) -> {ok, 16#d9, 16#00}; +dec_huffman_lookup(16#6f, 16#6) -> {ok, 16#e3, 16#00}; +dec_huffman_lookup(16#6f, 16#7) -> {ok, 16#e5, 16#00}; +dec_huffman_lookup(16#6f, 16#8) -> {ok, 16#e6, 16#00}; +dec_huffman_lookup(16#6f, 16#9) -> {more, undefined, 16#7a}; +dec_huffman_lookup(16#6f, 16#a) -> {more, undefined, 16#7c}; +dec_huffman_lookup(16#6f, 16#b) -> {more, undefined, 16#7d}; +dec_huffman_lookup(16#6f, 16#c) -> {more, undefined, 16#80}; +dec_huffman_lookup(16#6f, 16#d) -> {more, undefined, 16#81}; +dec_huffman_lookup(16#6f, 16#e) -> {more, undefined, 16#83}; +dec_huffman_lookup(16#6f, 16#f) -> {more, undefined, 16#84}; +dec_huffman_lookup(16#70, 16#0) -> {more, 16#b0, 16#01}; +dec_huffman_lookup(16#70, 16#1) -> {ok, 16#b0, 16#16}; +dec_huffman_lookup(16#70, 16#2) -> {more, 16#b1, 16#01}; +dec_huffman_lookup(16#70, 16#3) -> {ok, 16#b1, 16#16}; +dec_huffman_lookup(16#70, 16#4) -> {more, 16#b3, 16#01}; +dec_huffman_lookup(16#70, 16#5) -> {ok, 16#b3, 16#16}; +dec_huffman_lookup(16#70, 16#6) -> {more, 16#d1, 16#01}; +dec_huffman_lookup(16#70, 16#7) -> {ok, 16#d1, 16#16}; +dec_huffman_lookup(16#70, 16#8) -> {more, 16#d8, 16#01}; +dec_huffman_lookup(16#70, 16#9) -> {ok, 16#d8, 16#16}; +dec_huffman_lookup(16#70, 16#a) -> {more, 16#d9, 16#01}; +dec_huffman_lookup(16#70, 16#b) -> {ok, 16#d9, 16#16}; +dec_huffman_lookup(16#70, 16#c) -> {more, 16#e3, 16#01}; +dec_huffman_lookup(16#70, 16#d) -> {ok, 16#e3, 16#16}; +dec_huffman_lookup(16#70, 16#e) -> {more, 16#e5, 16#01}; +dec_huffman_lookup(16#70, 16#f) -> {ok, 16#e5, 16#16}; +dec_huffman_lookup(16#71, 16#0) -> {more, 16#b0, 16#02}; +dec_huffman_lookup(16#71, 16#1) -> {more, 16#b0, 16#09}; +dec_huffman_lookup(16#71, 16#2) -> {more, 16#b0, 16#17}; +dec_huffman_lookup(16#71, 16#3) -> {ok, 16#b0, 16#28}; +dec_huffman_lookup(16#71, 16#4) -> {more, 16#b1, 16#02}; +dec_huffman_lookup(16#71, 16#5) -> {more, 16#b1, 16#09}; +dec_huffman_lookup(16#71, 16#6) -> {more, 16#b1, 16#17}; +dec_huffman_lookup(16#71, 16#7) -> {ok, 16#b1, 16#28}; +dec_huffman_lookup(16#71, 16#8) -> {more, 16#b3, 16#02}; +dec_huffman_lookup(16#71, 16#9) -> {more, 16#b3, 16#09}; +dec_huffman_lookup(16#71, 16#a) -> {more, 16#b3, 16#17}; +dec_huffman_lookup(16#71, 16#b) -> {ok, 16#b3, 16#28}; +dec_huffman_lookup(16#71, 16#c) -> {more, 16#d1, 16#02}; +dec_huffman_lookup(16#71, 16#d) -> {more, 16#d1, 16#09}; +dec_huffman_lookup(16#71, 16#e) -> {more, 16#d1, 16#17}; +dec_huffman_lookup(16#71, 16#f) -> {ok, 16#d1, 16#28}; +dec_huffman_lookup(16#72, 16#0) -> {more, 16#b0, 16#03}; +dec_huffman_lookup(16#72, 16#1) -> {more, 16#b0, 16#06}; +dec_huffman_lookup(16#72, 16#2) -> {more, 16#b0, 16#0a}; +dec_huffman_lookup(16#72, 16#3) -> {more, 16#b0, 16#0f}; +dec_huffman_lookup(16#72, 16#4) -> {more, 16#b0, 16#18}; +dec_huffman_lookup(16#72, 16#5) -> {more, 16#b0, 16#1f}; +dec_huffman_lookup(16#72, 16#6) -> {more, 16#b0, 16#29}; +dec_huffman_lookup(16#72, 16#7) -> {ok, 16#b0, 16#38}; +dec_huffman_lookup(16#72, 16#8) -> {more, 16#b1, 16#03}; +dec_huffman_lookup(16#72, 16#9) -> {more, 16#b1, 16#06}; +dec_huffman_lookup(16#72, 16#a) -> {more, 16#b1, 16#0a}; +dec_huffman_lookup(16#72, 16#b) -> {more, 16#b1, 16#0f}; +dec_huffman_lookup(16#72, 16#c) -> {more, 16#b1, 16#18}; +dec_huffman_lookup(16#72, 16#d) -> {more, 16#b1, 16#1f}; +dec_huffman_lookup(16#72, 16#e) -> {more, 16#b1, 16#29}; +dec_huffman_lookup(16#72, 16#f) -> {ok, 16#b1, 16#38}; +dec_huffman_lookup(16#73, 16#0) -> {more, 16#b3, 16#03}; +dec_huffman_lookup(16#73, 16#1) -> {more, 16#b3, 16#06}; +dec_huffman_lookup(16#73, 16#2) -> {more, 16#b3, 16#0a}; +dec_huffman_lookup(16#73, 16#3) -> {more, 16#b3, 16#0f}; +dec_huffman_lookup(16#73, 16#4) -> {more, 16#b3, 16#18}; +dec_huffman_lookup(16#73, 16#5) -> {more, 16#b3, 16#1f}; +dec_huffman_lookup(16#73, 16#6) -> {more, 16#b3, 16#29}; +dec_huffman_lookup(16#73, 16#7) -> {ok, 16#b3, 16#38}; +dec_huffman_lookup(16#73, 16#8) -> {more, 16#d1, 16#03}; +dec_huffman_lookup(16#73, 16#9) -> {more, 16#d1, 16#06}; +dec_huffman_lookup(16#73, 16#a) -> {more, 16#d1, 16#0a}; +dec_huffman_lookup(16#73, 16#b) -> {more, 16#d1, 16#0f}; +dec_huffman_lookup(16#73, 16#c) -> {more, 16#d1, 16#18}; +dec_huffman_lookup(16#73, 16#d) -> {more, 16#d1, 16#1f}; +dec_huffman_lookup(16#73, 16#e) -> {more, 16#d1, 16#29}; +dec_huffman_lookup(16#73, 16#f) -> {ok, 16#d1, 16#38}; +dec_huffman_lookup(16#74, 16#0) -> {more, 16#d8, 16#02}; +dec_huffman_lookup(16#74, 16#1) -> {more, 16#d8, 16#09}; +dec_huffman_lookup(16#74, 16#2) -> {more, 16#d8, 16#17}; +dec_huffman_lookup(16#74, 16#3) -> {ok, 16#d8, 16#28}; +dec_huffman_lookup(16#74, 16#4) -> {more, 16#d9, 16#02}; +dec_huffman_lookup(16#74, 16#5) -> {more, 16#d9, 16#09}; +dec_huffman_lookup(16#74, 16#6) -> {more, 16#d9, 16#17}; +dec_huffman_lookup(16#74, 16#7) -> {ok, 16#d9, 16#28}; +dec_huffman_lookup(16#74, 16#8) -> {more, 16#e3, 16#02}; +dec_huffman_lookup(16#74, 16#9) -> {more, 16#e3, 16#09}; +dec_huffman_lookup(16#74, 16#a) -> {more, 16#e3, 16#17}; +dec_huffman_lookup(16#74, 16#b) -> {ok, 16#e3, 16#28}; +dec_huffman_lookup(16#74, 16#c) -> {more, 16#e5, 16#02}; +dec_huffman_lookup(16#74, 16#d) -> {more, 16#e5, 16#09}; +dec_huffman_lookup(16#74, 16#e) -> {more, 16#e5, 16#17}; +dec_huffman_lookup(16#74, 16#f) -> {ok, 16#e5, 16#28}; +dec_huffman_lookup(16#75, 16#0) -> {more, 16#d8, 16#03}; +dec_huffman_lookup(16#75, 16#1) -> {more, 16#d8, 16#06}; +dec_huffman_lookup(16#75, 16#2) -> {more, 16#d8, 16#0a}; +dec_huffman_lookup(16#75, 16#3) -> {more, 16#d8, 16#0f}; +dec_huffman_lookup(16#75, 16#4) -> {more, 16#d8, 16#18}; +dec_huffman_lookup(16#75, 16#5) -> {more, 16#d8, 16#1f}; +dec_huffman_lookup(16#75, 16#6) -> {more, 16#d8, 16#29}; +dec_huffman_lookup(16#75, 16#7) -> {ok, 16#d8, 16#38}; +dec_huffman_lookup(16#75, 16#8) -> {more, 16#d9, 16#03}; +dec_huffman_lookup(16#75, 16#9) -> {more, 16#d9, 16#06}; +dec_huffman_lookup(16#75, 16#a) -> {more, 16#d9, 16#0a}; +dec_huffman_lookup(16#75, 16#b) -> {more, 16#d9, 16#0f}; +dec_huffman_lookup(16#75, 16#c) -> {more, 16#d9, 16#18}; +dec_huffman_lookup(16#75, 16#d) -> {more, 16#d9, 16#1f}; +dec_huffman_lookup(16#75, 16#e) -> {more, 16#d9, 16#29}; +dec_huffman_lookup(16#75, 16#f) -> {ok, 16#d9, 16#38}; +dec_huffman_lookup(16#76, 16#0) -> {more, 16#e3, 16#03}; +dec_huffman_lookup(16#76, 16#1) -> {more, 16#e3, 16#06}; +dec_huffman_lookup(16#76, 16#2) -> {more, 16#e3, 16#0a}; +dec_huffman_lookup(16#76, 16#3) -> {more, 16#e3, 16#0f}; +dec_huffman_lookup(16#76, 16#4) -> {more, 16#e3, 16#18}; +dec_huffman_lookup(16#76, 16#5) -> {more, 16#e3, 16#1f}; +dec_huffman_lookup(16#76, 16#6) -> {more, 16#e3, 16#29}; +dec_huffman_lookup(16#76, 16#7) -> {ok, 16#e3, 16#38}; +dec_huffman_lookup(16#76, 16#8) -> {more, 16#e5, 16#03}; +dec_huffman_lookup(16#76, 16#9) -> {more, 16#e5, 16#06}; +dec_huffman_lookup(16#76, 16#a) -> {more, 16#e5, 16#0a}; +dec_huffman_lookup(16#76, 16#b) -> {more, 16#e5, 16#0f}; +dec_huffman_lookup(16#76, 16#c) -> {more, 16#e5, 16#18}; +dec_huffman_lookup(16#76, 16#d) -> {more, 16#e5, 16#1f}; +dec_huffman_lookup(16#76, 16#e) -> {more, 16#e5, 16#29}; +dec_huffman_lookup(16#76, 16#f) -> {ok, 16#e5, 16#38}; +dec_huffman_lookup(16#77, 16#0) -> {more, 16#e6, 16#01}; +dec_huffman_lookup(16#77, 16#1) -> {ok, 16#e6, 16#16}; +dec_huffman_lookup(16#77, 16#2) -> {ok, 16#81, 16#00}; +dec_huffman_lookup(16#77, 16#3) -> {ok, 16#84, 16#00}; +dec_huffman_lookup(16#77, 16#4) -> {ok, 16#85, 16#00}; +dec_huffman_lookup(16#77, 16#5) -> {ok, 16#86, 16#00}; +dec_huffman_lookup(16#77, 16#6) -> {ok, 16#88, 16#00}; +dec_huffman_lookup(16#77, 16#7) -> {ok, 16#92, 16#00}; +dec_huffman_lookup(16#77, 16#8) -> {ok, 16#9a, 16#00}; +dec_huffman_lookup(16#77, 16#9) -> {ok, 16#9c, 16#00}; +dec_huffman_lookup(16#77, 16#a) -> {ok, 16#a0, 16#00}; +dec_huffman_lookup(16#77, 16#b) -> {ok, 16#a3, 16#00}; +dec_huffman_lookup(16#77, 16#c) -> {ok, 16#a4, 16#00}; +dec_huffman_lookup(16#77, 16#d) -> {ok, 16#a9, 16#00}; +dec_huffman_lookup(16#77, 16#e) -> {ok, 16#aa, 16#00}; +dec_huffman_lookup(16#77, 16#f) -> {ok, 16#ad, 16#00}; +dec_huffman_lookup(16#78, 16#0) -> {more, 16#e6, 16#02}; +dec_huffman_lookup(16#78, 16#1) -> {more, 16#e6, 16#09}; +dec_huffman_lookup(16#78, 16#2) -> {more, 16#e6, 16#17}; +dec_huffman_lookup(16#78, 16#3) -> {ok, 16#e6, 16#28}; +dec_huffman_lookup(16#78, 16#4) -> {more, 16#81, 16#01}; +dec_huffman_lookup(16#78, 16#5) -> {ok, 16#81, 16#16}; +dec_huffman_lookup(16#78, 16#6) -> {more, 16#84, 16#01}; +dec_huffman_lookup(16#78, 16#7) -> {ok, 16#84, 16#16}; +dec_huffman_lookup(16#78, 16#8) -> {more, 16#85, 16#01}; +dec_huffman_lookup(16#78, 16#9) -> {ok, 16#85, 16#16}; +dec_huffman_lookup(16#78, 16#a) -> {more, 16#86, 16#01}; +dec_huffman_lookup(16#78, 16#b) -> {ok, 16#86, 16#16}; +dec_huffman_lookup(16#78, 16#c) -> {more, 16#88, 16#01}; +dec_huffman_lookup(16#78, 16#d) -> {ok, 16#88, 16#16}; +dec_huffman_lookup(16#78, 16#e) -> {more, 16#92, 16#01}; +dec_huffman_lookup(16#78, 16#f) -> {ok, 16#92, 16#16}; +dec_huffman_lookup(16#79, 16#0) -> {more, 16#e6, 16#03}; +dec_huffman_lookup(16#79, 16#1) -> {more, 16#e6, 16#06}; +dec_huffman_lookup(16#79, 16#2) -> {more, 16#e6, 16#0a}; +dec_huffman_lookup(16#79, 16#3) -> {more, 16#e6, 16#0f}; +dec_huffman_lookup(16#79, 16#4) -> {more, 16#e6, 16#18}; +dec_huffman_lookup(16#79, 16#5) -> {more, 16#e6, 16#1f}; +dec_huffman_lookup(16#79, 16#6) -> {more, 16#e6, 16#29}; +dec_huffman_lookup(16#79, 16#7) -> {ok, 16#e6, 16#38}; +dec_huffman_lookup(16#79, 16#8) -> {more, 16#81, 16#02}; +dec_huffman_lookup(16#79, 16#9) -> {more, 16#81, 16#09}; +dec_huffman_lookup(16#79, 16#a) -> {more, 16#81, 16#17}; +dec_huffman_lookup(16#79, 16#b) -> {ok, 16#81, 16#28}; +dec_huffman_lookup(16#79, 16#c) -> {more, 16#84, 16#02}; +dec_huffman_lookup(16#79, 16#d) -> {more, 16#84, 16#09}; +dec_huffman_lookup(16#79, 16#e) -> {more, 16#84, 16#17}; +dec_huffman_lookup(16#79, 16#f) -> {ok, 16#84, 16#28}; +dec_huffman_lookup(16#7a, 16#0) -> {more, 16#81, 16#03}; +dec_huffman_lookup(16#7a, 16#1) -> {more, 16#81, 16#06}; +dec_huffman_lookup(16#7a, 16#2) -> {more, 16#81, 16#0a}; +dec_huffman_lookup(16#7a, 16#3) -> {more, 16#81, 16#0f}; +dec_huffman_lookup(16#7a, 16#4) -> {more, 16#81, 16#18}; +dec_huffman_lookup(16#7a, 16#5) -> {more, 16#81, 16#1f}; +dec_huffman_lookup(16#7a, 16#6) -> {more, 16#81, 16#29}; +dec_huffman_lookup(16#7a, 16#7) -> {ok, 16#81, 16#38}; +dec_huffman_lookup(16#7a, 16#8) -> {more, 16#84, 16#03}; +dec_huffman_lookup(16#7a, 16#9) -> {more, 16#84, 16#06}; +dec_huffman_lookup(16#7a, 16#a) -> {more, 16#84, 16#0a}; +dec_huffman_lookup(16#7a, 16#b) -> {more, 16#84, 16#0f}; +dec_huffman_lookup(16#7a, 16#c) -> {more, 16#84, 16#18}; +dec_huffman_lookup(16#7a, 16#d) -> {more, 16#84, 16#1f}; +dec_huffman_lookup(16#7a, 16#e) -> {more, 16#84, 16#29}; +dec_huffman_lookup(16#7a, 16#f) -> {ok, 16#84, 16#38}; +dec_huffman_lookup(16#7b, 16#0) -> {more, 16#85, 16#02}; +dec_huffman_lookup(16#7b, 16#1) -> {more, 16#85, 16#09}; +dec_huffman_lookup(16#7b, 16#2) -> {more, 16#85, 16#17}; +dec_huffman_lookup(16#7b, 16#3) -> {ok, 16#85, 16#28}; +dec_huffman_lookup(16#7b, 16#4) -> {more, 16#86, 16#02}; +dec_huffman_lookup(16#7b, 16#5) -> {more, 16#86, 16#09}; +dec_huffman_lookup(16#7b, 16#6) -> {more, 16#86, 16#17}; +dec_huffman_lookup(16#7b, 16#7) -> {ok, 16#86, 16#28}; +dec_huffman_lookup(16#7b, 16#8) -> {more, 16#88, 16#02}; +dec_huffman_lookup(16#7b, 16#9) -> {more, 16#88, 16#09}; +dec_huffman_lookup(16#7b, 16#a) -> {more, 16#88, 16#17}; +dec_huffman_lookup(16#7b, 16#b) -> {ok, 16#88, 16#28}; +dec_huffman_lookup(16#7b, 16#c) -> {more, 16#92, 16#02}; +dec_huffman_lookup(16#7b, 16#d) -> {more, 16#92, 16#09}; +dec_huffman_lookup(16#7b, 16#e) -> {more, 16#92, 16#17}; +dec_huffman_lookup(16#7b, 16#f) -> {ok, 16#92, 16#28}; +dec_huffman_lookup(16#7c, 16#0) -> {more, 16#85, 16#03}; +dec_huffman_lookup(16#7c, 16#1) -> {more, 16#85, 16#06}; +dec_huffman_lookup(16#7c, 16#2) -> {more, 16#85, 16#0a}; +dec_huffman_lookup(16#7c, 16#3) -> {more, 16#85, 16#0f}; +dec_huffman_lookup(16#7c, 16#4) -> {more, 16#85, 16#18}; +dec_huffman_lookup(16#7c, 16#5) -> {more, 16#85, 16#1f}; +dec_huffman_lookup(16#7c, 16#6) -> {more, 16#85, 16#29}; +dec_huffman_lookup(16#7c, 16#7) -> {ok, 16#85, 16#38}; +dec_huffman_lookup(16#7c, 16#8) -> {more, 16#86, 16#03}; +dec_huffman_lookup(16#7c, 16#9) -> {more, 16#86, 16#06}; +dec_huffman_lookup(16#7c, 16#a) -> {more, 16#86, 16#0a}; +dec_huffman_lookup(16#7c, 16#b) -> {more, 16#86, 16#0f}; +dec_huffman_lookup(16#7c, 16#c) -> {more, 16#86, 16#18}; +dec_huffman_lookup(16#7c, 16#d) -> {more, 16#86, 16#1f}; +dec_huffman_lookup(16#7c, 16#e) -> {more, 16#86, 16#29}; +dec_huffman_lookup(16#7c, 16#f) -> {ok, 16#86, 16#38}; +dec_huffman_lookup(16#7d, 16#0) -> {more, 16#88, 16#03}; +dec_huffman_lookup(16#7d, 16#1) -> {more, 16#88, 16#06}; +dec_huffman_lookup(16#7d, 16#2) -> {more, 16#88, 16#0a}; +dec_huffman_lookup(16#7d, 16#3) -> {more, 16#88, 16#0f}; +dec_huffman_lookup(16#7d, 16#4) -> {more, 16#88, 16#18}; +dec_huffman_lookup(16#7d, 16#5) -> {more, 16#88, 16#1f}; +dec_huffman_lookup(16#7d, 16#6) -> {more, 16#88, 16#29}; +dec_huffman_lookup(16#7d, 16#7) -> {ok, 16#88, 16#38}; +dec_huffman_lookup(16#7d, 16#8) -> {more, 16#92, 16#03}; +dec_huffman_lookup(16#7d, 16#9) -> {more, 16#92, 16#06}; +dec_huffman_lookup(16#7d, 16#a) -> {more, 16#92, 16#0a}; +dec_huffman_lookup(16#7d, 16#b) -> {more, 16#92, 16#0f}; +dec_huffman_lookup(16#7d, 16#c) -> {more, 16#92, 16#18}; +dec_huffman_lookup(16#7d, 16#d) -> {more, 16#92, 16#1f}; +dec_huffman_lookup(16#7d, 16#e) -> {more, 16#92, 16#29}; +dec_huffman_lookup(16#7d, 16#f) -> {ok, 16#92, 16#38}; +dec_huffman_lookup(16#7e, 16#0) -> {more, 16#9a, 16#01}; +dec_huffman_lookup(16#7e, 16#1) -> {ok, 16#9a, 16#16}; +dec_huffman_lookup(16#7e, 16#2) -> {more, 16#9c, 16#01}; +dec_huffman_lookup(16#7e, 16#3) -> {ok, 16#9c, 16#16}; +dec_huffman_lookup(16#7e, 16#4) -> {more, 16#a0, 16#01}; +dec_huffman_lookup(16#7e, 16#5) -> {ok, 16#a0, 16#16}; +dec_huffman_lookup(16#7e, 16#6) -> {more, 16#a3, 16#01}; +dec_huffman_lookup(16#7e, 16#7) -> {ok, 16#a3, 16#16}; +dec_huffman_lookup(16#7e, 16#8) -> {more, 16#a4, 16#01}; +dec_huffman_lookup(16#7e, 16#9) -> {ok, 16#a4, 16#16}; +dec_huffman_lookup(16#7e, 16#a) -> {more, 16#a9, 16#01}; +dec_huffman_lookup(16#7e, 16#b) -> {ok, 16#a9, 16#16}; +dec_huffman_lookup(16#7e, 16#c) -> {more, 16#aa, 16#01}; +dec_huffman_lookup(16#7e, 16#d) -> {ok, 16#aa, 16#16}; +dec_huffman_lookup(16#7e, 16#e) -> {more, 16#ad, 16#01}; +dec_huffman_lookup(16#7e, 16#f) -> {ok, 16#ad, 16#16}; +dec_huffman_lookup(16#7f, 16#0) -> {more, 16#9a, 16#02}; +dec_huffman_lookup(16#7f, 16#1) -> {more, 16#9a, 16#09}; +dec_huffman_lookup(16#7f, 16#2) -> {more, 16#9a, 16#17}; +dec_huffman_lookup(16#7f, 16#3) -> {ok, 16#9a, 16#28}; +dec_huffman_lookup(16#7f, 16#4) -> {more, 16#9c, 16#02}; +dec_huffman_lookup(16#7f, 16#5) -> {more, 16#9c, 16#09}; +dec_huffman_lookup(16#7f, 16#6) -> {more, 16#9c, 16#17}; +dec_huffman_lookup(16#7f, 16#7) -> {ok, 16#9c, 16#28}; +dec_huffman_lookup(16#7f, 16#8) -> {more, 16#a0, 16#02}; +dec_huffman_lookup(16#7f, 16#9) -> {more, 16#a0, 16#09}; +dec_huffman_lookup(16#7f, 16#a) -> {more, 16#a0, 16#17}; +dec_huffman_lookup(16#7f, 16#b) -> {ok, 16#a0, 16#28}; +dec_huffman_lookup(16#7f, 16#c) -> {more, 16#a3, 16#02}; +dec_huffman_lookup(16#7f, 16#d) -> {more, 16#a3, 16#09}; +dec_huffman_lookup(16#7f, 16#e) -> {more, 16#a3, 16#17}; +dec_huffman_lookup(16#7f, 16#f) -> {ok, 16#a3, 16#28}; +dec_huffman_lookup(16#80, 16#0) -> {more, 16#9a, 16#03}; +dec_huffman_lookup(16#80, 16#1) -> {more, 16#9a, 16#06}; +dec_huffman_lookup(16#80, 16#2) -> {more, 16#9a, 16#0a}; +dec_huffman_lookup(16#80, 16#3) -> {more, 16#9a, 16#0f}; +dec_huffman_lookup(16#80, 16#4) -> {more, 16#9a, 16#18}; +dec_huffman_lookup(16#80, 16#5) -> {more, 16#9a, 16#1f}; +dec_huffman_lookup(16#80, 16#6) -> {more, 16#9a, 16#29}; +dec_huffman_lookup(16#80, 16#7) -> {ok, 16#9a, 16#38}; +dec_huffman_lookup(16#80, 16#8) -> {more, 16#9c, 16#03}; +dec_huffman_lookup(16#80, 16#9) -> {more, 16#9c, 16#06}; +dec_huffman_lookup(16#80, 16#a) -> {more, 16#9c, 16#0a}; +dec_huffman_lookup(16#80, 16#b) -> {more, 16#9c, 16#0f}; +dec_huffman_lookup(16#80, 16#c) -> {more, 16#9c, 16#18}; +dec_huffman_lookup(16#80, 16#d) -> {more, 16#9c, 16#1f}; +dec_huffman_lookup(16#80, 16#e) -> {more, 16#9c, 16#29}; +dec_huffman_lookup(16#80, 16#f) -> {ok, 16#9c, 16#38}; +dec_huffman_lookup(16#81, 16#0) -> {more, 16#a0, 16#03}; +dec_huffman_lookup(16#81, 16#1) -> {more, 16#a0, 16#06}; +dec_huffman_lookup(16#81, 16#2) -> {more, 16#a0, 16#0a}; +dec_huffman_lookup(16#81, 16#3) -> {more, 16#a0, 16#0f}; +dec_huffman_lookup(16#81, 16#4) -> {more, 16#a0, 16#18}; +dec_huffman_lookup(16#81, 16#5) -> {more, 16#a0, 16#1f}; +dec_huffman_lookup(16#81, 16#6) -> {more, 16#a0, 16#29}; +dec_huffman_lookup(16#81, 16#7) -> {ok, 16#a0, 16#38}; +dec_huffman_lookup(16#81, 16#8) -> {more, 16#a3, 16#03}; +dec_huffman_lookup(16#81, 16#9) -> {more, 16#a3, 16#06}; +dec_huffman_lookup(16#81, 16#a) -> {more, 16#a3, 16#0a}; +dec_huffman_lookup(16#81, 16#b) -> {more, 16#a3, 16#0f}; +dec_huffman_lookup(16#81, 16#c) -> {more, 16#a3, 16#18}; +dec_huffman_lookup(16#81, 16#d) -> {more, 16#a3, 16#1f}; +dec_huffman_lookup(16#81, 16#e) -> {more, 16#a3, 16#29}; +dec_huffman_lookup(16#81, 16#f) -> {ok, 16#a3, 16#38}; +dec_huffman_lookup(16#82, 16#0) -> {more, 16#a4, 16#02}; +dec_huffman_lookup(16#82, 16#1) -> {more, 16#a4, 16#09}; +dec_huffman_lookup(16#82, 16#2) -> {more, 16#a4, 16#17}; +dec_huffman_lookup(16#82, 16#3) -> {ok, 16#a4, 16#28}; +dec_huffman_lookup(16#82, 16#4) -> {more, 16#a9, 16#02}; +dec_huffman_lookup(16#82, 16#5) -> {more, 16#a9, 16#09}; +dec_huffman_lookup(16#82, 16#6) -> {more, 16#a9, 16#17}; +dec_huffman_lookup(16#82, 16#7) -> {ok, 16#a9, 16#28}; +dec_huffman_lookup(16#82, 16#8) -> {more, 16#aa, 16#02}; +dec_huffman_lookup(16#82, 16#9) -> {more, 16#aa, 16#09}; +dec_huffman_lookup(16#82, 16#a) -> {more, 16#aa, 16#17}; +dec_huffman_lookup(16#82, 16#b) -> {ok, 16#aa, 16#28}; +dec_huffman_lookup(16#82, 16#c) -> {more, 16#ad, 16#02}; +dec_huffman_lookup(16#82, 16#d) -> {more, 16#ad, 16#09}; +dec_huffman_lookup(16#82, 16#e) -> {more, 16#ad, 16#17}; +dec_huffman_lookup(16#82, 16#f) -> {ok, 16#ad, 16#28}; +dec_huffman_lookup(16#83, 16#0) -> {more, 16#a4, 16#03}; +dec_huffman_lookup(16#83, 16#1) -> {more, 16#a4, 16#06}; +dec_huffman_lookup(16#83, 16#2) -> {more, 16#a4, 16#0a}; +dec_huffman_lookup(16#83, 16#3) -> {more, 16#a4, 16#0f}; +dec_huffman_lookup(16#83, 16#4) -> {more, 16#a4, 16#18}; +dec_huffman_lookup(16#83, 16#5) -> {more, 16#a4, 16#1f}; +dec_huffman_lookup(16#83, 16#6) -> {more, 16#a4, 16#29}; +dec_huffman_lookup(16#83, 16#7) -> {ok, 16#a4, 16#38}; +dec_huffman_lookup(16#83, 16#8) -> {more, 16#a9, 16#03}; +dec_huffman_lookup(16#83, 16#9) -> {more, 16#a9, 16#06}; +dec_huffman_lookup(16#83, 16#a) -> {more, 16#a9, 16#0a}; +dec_huffman_lookup(16#83, 16#b) -> {more, 16#a9, 16#0f}; +dec_huffman_lookup(16#83, 16#c) -> {more, 16#a9, 16#18}; +dec_huffman_lookup(16#83, 16#d) -> {more, 16#a9, 16#1f}; +dec_huffman_lookup(16#83, 16#e) -> {more, 16#a9, 16#29}; +dec_huffman_lookup(16#83, 16#f) -> {ok, 16#a9, 16#38}; +dec_huffman_lookup(16#84, 16#0) -> {more, 16#aa, 16#03}; +dec_huffman_lookup(16#84, 16#1) -> {more, 16#aa, 16#06}; +dec_huffman_lookup(16#84, 16#2) -> {more, 16#aa, 16#0a}; +dec_huffman_lookup(16#84, 16#3) -> {more, 16#aa, 16#0f}; +dec_huffman_lookup(16#84, 16#4) -> {more, 16#aa, 16#18}; +dec_huffman_lookup(16#84, 16#5) -> {more, 16#aa, 16#1f}; +dec_huffman_lookup(16#84, 16#6) -> {more, 16#aa, 16#29}; +dec_huffman_lookup(16#84, 16#7) -> {ok, 16#aa, 16#38}; +dec_huffman_lookup(16#84, 16#8) -> {more, 16#ad, 16#03}; +dec_huffman_lookup(16#84, 16#9) -> {more, 16#ad, 16#06}; +dec_huffman_lookup(16#84, 16#a) -> {more, 16#ad, 16#0a}; +dec_huffman_lookup(16#84, 16#b) -> {more, 16#ad, 16#0f}; +dec_huffman_lookup(16#84, 16#c) -> {more, 16#ad, 16#18}; +dec_huffman_lookup(16#84, 16#d) -> {more, 16#ad, 16#1f}; +dec_huffman_lookup(16#84, 16#e) -> {more, 16#ad, 16#29}; +dec_huffman_lookup(16#84, 16#f) -> {ok, 16#ad, 16#38}; +dec_huffman_lookup(16#85, 16#0) -> {more, undefined, 16#89}; +dec_huffman_lookup(16#85, 16#1) -> {more, undefined, 16#8a}; +dec_huffman_lookup(16#85, 16#2) -> {more, undefined, 16#8c}; +dec_huffman_lookup(16#85, 16#3) -> {more, undefined, 16#8d}; +dec_huffman_lookup(16#85, 16#4) -> {more, undefined, 16#90}; +dec_huffman_lookup(16#85, 16#5) -> {more, undefined, 16#91}; +dec_huffman_lookup(16#85, 16#6) -> {more, undefined, 16#93}; +dec_huffman_lookup(16#85, 16#7) -> {more, undefined, 16#96}; +dec_huffman_lookup(16#85, 16#8) -> {more, undefined, 16#9c}; +dec_huffman_lookup(16#85, 16#9) -> {more, undefined, 16#9f}; +dec_huffman_lookup(16#85, 16#a) -> {more, undefined, 16#a3}; +dec_huffman_lookup(16#85, 16#b) -> {more, undefined, 16#a6}; +dec_huffman_lookup(16#85, 16#c) -> {more, undefined, 16#ab}; +dec_huffman_lookup(16#85, 16#d) -> {more, undefined, 16#ae}; +dec_huffman_lookup(16#85, 16#e) -> {more, undefined, 16#b5}; +dec_huffman_lookup(16#85, 16#f) -> {ok, undefined, 16#be}; +dec_huffman_lookup(16#86, 16#0) -> {ok, 16#b2, 16#00}; +dec_huffman_lookup(16#86, 16#1) -> {ok, 16#b5, 16#00}; +dec_huffman_lookup(16#86, 16#2) -> {ok, 16#b9, 16#00}; +dec_huffman_lookup(16#86, 16#3) -> {ok, 16#ba, 16#00}; +dec_huffman_lookup(16#86, 16#4) -> {ok, 16#bb, 16#00}; +dec_huffman_lookup(16#86, 16#5) -> {ok, 16#bd, 16#00}; +dec_huffman_lookup(16#86, 16#6) -> {ok, 16#be, 16#00}; +dec_huffman_lookup(16#86, 16#7) -> {ok, 16#c4, 16#00}; +dec_huffman_lookup(16#86, 16#8) -> {ok, 16#c6, 16#00}; +dec_huffman_lookup(16#86, 16#9) -> {ok, 16#e4, 16#00}; +dec_huffman_lookup(16#86, 16#a) -> {ok, 16#e8, 16#00}; +dec_huffman_lookup(16#86, 16#b) -> {ok, 16#e9, 16#00}; +dec_huffman_lookup(16#86, 16#c) -> {more, undefined, 16#94}; +dec_huffman_lookup(16#86, 16#d) -> {more, undefined, 16#95}; +dec_huffman_lookup(16#86, 16#e) -> {more, undefined, 16#97}; +dec_huffman_lookup(16#86, 16#f) -> {more, undefined, 16#98}; +dec_huffman_lookup(16#87, 16#0) -> {more, 16#b2, 16#01}; +dec_huffman_lookup(16#87, 16#1) -> {ok, 16#b2, 16#16}; +dec_huffman_lookup(16#87, 16#2) -> {more, 16#b5, 16#01}; +dec_huffman_lookup(16#87, 16#3) -> {ok, 16#b5, 16#16}; +dec_huffman_lookup(16#87, 16#4) -> {more, 16#b9, 16#01}; +dec_huffman_lookup(16#87, 16#5) -> {ok, 16#b9, 16#16}; +dec_huffman_lookup(16#87, 16#6) -> {more, 16#ba, 16#01}; +dec_huffman_lookup(16#87, 16#7) -> {ok, 16#ba, 16#16}; +dec_huffman_lookup(16#87, 16#8) -> {more, 16#bb, 16#01}; +dec_huffman_lookup(16#87, 16#9) -> {ok, 16#bb, 16#16}; +dec_huffman_lookup(16#87, 16#a) -> {more, 16#bd, 16#01}; +dec_huffman_lookup(16#87, 16#b) -> {ok, 16#bd, 16#16}; +dec_huffman_lookup(16#87, 16#c) -> {more, 16#be, 16#01}; +dec_huffman_lookup(16#87, 16#d) -> {ok, 16#be, 16#16}; +dec_huffman_lookup(16#87, 16#e) -> {more, 16#c4, 16#01}; +dec_huffman_lookup(16#87, 16#f) -> {ok, 16#c4, 16#16}; +dec_huffman_lookup(16#88, 16#0) -> {more, 16#b2, 16#02}; +dec_huffman_lookup(16#88, 16#1) -> {more, 16#b2, 16#09}; +dec_huffman_lookup(16#88, 16#2) -> {more, 16#b2, 16#17}; +dec_huffman_lookup(16#88, 16#3) -> {ok, 16#b2, 16#28}; +dec_huffman_lookup(16#88, 16#4) -> {more, 16#b5, 16#02}; +dec_huffman_lookup(16#88, 16#5) -> {more, 16#b5, 16#09}; +dec_huffman_lookup(16#88, 16#6) -> {more, 16#b5, 16#17}; +dec_huffman_lookup(16#88, 16#7) -> {ok, 16#b5, 16#28}; +dec_huffman_lookup(16#88, 16#8) -> {more, 16#b9, 16#02}; +dec_huffman_lookup(16#88, 16#9) -> {more, 16#b9, 16#09}; +dec_huffman_lookup(16#88, 16#a) -> {more, 16#b9, 16#17}; +dec_huffman_lookup(16#88, 16#b) -> {ok, 16#b9, 16#28}; +dec_huffman_lookup(16#88, 16#c) -> {more, 16#ba, 16#02}; +dec_huffman_lookup(16#88, 16#d) -> {more, 16#ba, 16#09}; +dec_huffman_lookup(16#88, 16#e) -> {more, 16#ba, 16#17}; +dec_huffman_lookup(16#88, 16#f) -> {ok, 16#ba, 16#28}; +dec_huffman_lookup(16#89, 16#0) -> {more, 16#b2, 16#03}; +dec_huffman_lookup(16#89, 16#1) -> {more, 16#b2, 16#06}; +dec_huffman_lookup(16#89, 16#2) -> {more, 16#b2, 16#0a}; +dec_huffman_lookup(16#89, 16#3) -> {more, 16#b2, 16#0f}; +dec_huffman_lookup(16#89, 16#4) -> {more, 16#b2, 16#18}; +dec_huffman_lookup(16#89, 16#5) -> {more, 16#b2, 16#1f}; +dec_huffman_lookup(16#89, 16#6) -> {more, 16#b2, 16#29}; +dec_huffman_lookup(16#89, 16#7) -> {ok, 16#b2, 16#38}; +dec_huffman_lookup(16#89, 16#8) -> {more, 16#b5, 16#03}; +dec_huffman_lookup(16#89, 16#9) -> {more, 16#b5, 16#06}; +dec_huffman_lookup(16#89, 16#a) -> {more, 16#b5, 16#0a}; +dec_huffman_lookup(16#89, 16#b) -> {more, 16#b5, 16#0f}; +dec_huffman_lookup(16#89, 16#c) -> {more, 16#b5, 16#18}; +dec_huffman_lookup(16#89, 16#d) -> {more, 16#b5, 16#1f}; +dec_huffman_lookup(16#89, 16#e) -> {more, 16#b5, 16#29}; +dec_huffman_lookup(16#89, 16#f) -> {ok, 16#b5, 16#38}; +dec_huffman_lookup(16#8a, 16#0) -> {more, 16#b9, 16#03}; +dec_huffman_lookup(16#8a, 16#1) -> {more, 16#b9, 16#06}; +dec_huffman_lookup(16#8a, 16#2) -> {more, 16#b9, 16#0a}; +dec_huffman_lookup(16#8a, 16#3) -> {more, 16#b9, 16#0f}; +dec_huffman_lookup(16#8a, 16#4) -> {more, 16#b9, 16#18}; +dec_huffman_lookup(16#8a, 16#5) -> {more, 16#b9, 16#1f}; +dec_huffman_lookup(16#8a, 16#6) -> {more, 16#b9, 16#29}; +dec_huffman_lookup(16#8a, 16#7) -> {ok, 16#b9, 16#38}; +dec_huffman_lookup(16#8a, 16#8) -> {more, 16#ba, 16#03}; +dec_huffman_lookup(16#8a, 16#9) -> {more, 16#ba, 16#06}; +dec_huffman_lookup(16#8a, 16#a) -> {more, 16#ba, 16#0a}; +dec_huffman_lookup(16#8a, 16#b) -> {more, 16#ba, 16#0f}; +dec_huffman_lookup(16#8a, 16#c) -> {more, 16#ba, 16#18}; +dec_huffman_lookup(16#8a, 16#d) -> {more, 16#ba, 16#1f}; +dec_huffman_lookup(16#8a, 16#e) -> {more, 16#ba, 16#29}; +dec_huffman_lookup(16#8a, 16#f) -> {ok, 16#ba, 16#38}; +dec_huffman_lookup(16#8b, 16#0) -> {more, 16#bb, 16#02}; +dec_huffman_lookup(16#8b, 16#1) -> {more, 16#bb, 16#09}; +dec_huffman_lookup(16#8b, 16#2) -> {more, 16#bb, 16#17}; +dec_huffman_lookup(16#8b, 16#3) -> {ok, 16#bb, 16#28}; +dec_huffman_lookup(16#8b, 16#4) -> {more, 16#bd, 16#02}; +dec_huffman_lookup(16#8b, 16#5) -> {more, 16#bd, 16#09}; +dec_huffman_lookup(16#8b, 16#6) -> {more, 16#bd, 16#17}; +dec_huffman_lookup(16#8b, 16#7) -> {ok, 16#bd, 16#28}; +dec_huffman_lookup(16#8b, 16#8) -> {more, 16#be, 16#02}; +dec_huffman_lookup(16#8b, 16#9) -> {more, 16#be, 16#09}; +dec_huffman_lookup(16#8b, 16#a) -> {more, 16#be, 16#17}; +dec_huffman_lookup(16#8b, 16#b) -> {ok, 16#be, 16#28}; +dec_huffman_lookup(16#8b, 16#c) -> {more, 16#c4, 16#02}; +dec_huffman_lookup(16#8b, 16#d) -> {more, 16#c4, 16#09}; +dec_huffman_lookup(16#8b, 16#e) -> {more, 16#c4, 16#17}; +dec_huffman_lookup(16#8b, 16#f) -> {ok, 16#c4, 16#28}; +dec_huffman_lookup(16#8c, 16#0) -> {more, 16#bb, 16#03}; +dec_huffman_lookup(16#8c, 16#1) -> {more, 16#bb, 16#06}; +dec_huffman_lookup(16#8c, 16#2) -> {more, 16#bb, 16#0a}; +dec_huffman_lookup(16#8c, 16#3) -> {more, 16#bb, 16#0f}; +dec_huffman_lookup(16#8c, 16#4) -> {more, 16#bb, 16#18}; +dec_huffman_lookup(16#8c, 16#5) -> {more, 16#bb, 16#1f}; +dec_huffman_lookup(16#8c, 16#6) -> {more, 16#bb, 16#29}; +dec_huffman_lookup(16#8c, 16#7) -> {ok, 16#bb, 16#38}; +dec_huffman_lookup(16#8c, 16#8) -> {more, 16#bd, 16#03}; +dec_huffman_lookup(16#8c, 16#9) -> {more, 16#bd, 16#06}; +dec_huffman_lookup(16#8c, 16#a) -> {more, 16#bd, 16#0a}; +dec_huffman_lookup(16#8c, 16#b) -> {more, 16#bd, 16#0f}; +dec_huffman_lookup(16#8c, 16#c) -> {more, 16#bd, 16#18}; +dec_huffman_lookup(16#8c, 16#d) -> {more, 16#bd, 16#1f}; +dec_huffman_lookup(16#8c, 16#e) -> {more, 16#bd, 16#29}; +dec_huffman_lookup(16#8c, 16#f) -> {ok, 16#bd, 16#38}; +dec_huffman_lookup(16#8d, 16#0) -> {more, 16#be, 16#03}; +dec_huffman_lookup(16#8d, 16#1) -> {more, 16#be, 16#06}; +dec_huffman_lookup(16#8d, 16#2) -> {more, 16#be, 16#0a}; +dec_huffman_lookup(16#8d, 16#3) -> {more, 16#be, 16#0f}; +dec_huffman_lookup(16#8d, 16#4) -> {more, 16#be, 16#18}; +dec_huffman_lookup(16#8d, 16#5) -> {more, 16#be, 16#1f}; +dec_huffman_lookup(16#8d, 16#6) -> {more, 16#be, 16#29}; +dec_huffman_lookup(16#8d, 16#7) -> {ok, 16#be, 16#38}; +dec_huffman_lookup(16#8d, 16#8) -> {more, 16#c4, 16#03}; +dec_huffman_lookup(16#8d, 16#9) -> {more, 16#c4, 16#06}; +dec_huffman_lookup(16#8d, 16#a) -> {more, 16#c4, 16#0a}; +dec_huffman_lookup(16#8d, 16#b) -> {more, 16#c4, 16#0f}; +dec_huffman_lookup(16#8d, 16#c) -> {more, 16#c4, 16#18}; +dec_huffman_lookup(16#8d, 16#d) -> {more, 16#c4, 16#1f}; +dec_huffman_lookup(16#8d, 16#e) -> {more, 16#c4, 16#29}; +dec_huffman_lookup(16#8d, 16#f) -> {ok, 16#c4, 16#38}; +dec_huffman_lookup(16#8e, 16#0) -> {more, 16#c6, 16#01}; +dec_huffman_lookup(16#8e, 16#1) -> {ok, 16#c6, 16#16}; +dec_huffman_lookup(16#8e, 16#2) -> {more, 16#e4, 16#01}; +dec_huffman_lookup(16#8e, 16#3) -> {ok, 16#e4, 16#16}; +dec_huffman_lookup(16#8e, 16#4) -> {more, 16#e8, 16#01}; +dec_huffman_lookup(16#8e, 16#5) -> {ok, 16#e8, 16#16}; +dec_huffman_lookup(16#8e, 16#6) -> {more, 16#e9, 16#01}; +dec_huffman_lookup(16#8e, 16#7) -> {ok, 16#e9, 16#16}; +dec_huffman_lookup(16#8e, 16#8) -> {ok, 16#01, 16#00}; +dec_huffman_lookup(16#8e, 16#9) -> {ok, 16#87, 16#00}; +dec_huffman_lookup(16#8e, 16#a) -> {ok, 16#89, 16#00}; +dec_huffman_lookup(16#8e, 16#b) -> {ok, 16#8a, 16#00}; +dec_huffman_lookup(16#8e, 16#c) -> {ok, 16#8b, 16#00}; +dec_huffman_lookup(16#8e, 16#d) -> {ok, 16#8c, 16#00}; +dec_huffman_lookup(16#8e, 16#e) -> {ok, 16#8d, 16#00}; +dec_huffman_lookup(16#8e, 16#f) -> {ok, 16#8f, 16#00}; +dec_huffman_lookup(16#8f, 16#0) -> {more, 16#c6, 16#02}; +dec_huffman_lookup(16#8f, 16#1) -> {more, 16#c6, 16#09}; +dec_huffman_lookup(16#8f, 16#2) -> {more, 16#c6, 16#17}; +dec_huffman_lookup(16#8f, 16#3) -> {ok, 16#c6, 16#28}; +dec_huffman_lookup(16#8f, 16#4) -> {more, 16#e4, 16#02}; +dec_huffman_lookup(16#8f, 16#5) -> {more, 16#e4, 16#09}; +dec_huffman_lookup(16#8f, 16#6) -> {more, 16#e4, 16#17}; +dec_huffman_lookup(16#8f, 16#7) -> {ok, 16#e4, 16#28}; +dec_huffman_lookup(16#8f, 16#8) -> {more, 16#e8, 16#02}; +dec_huffman_lookup(16#8f, 16#9) -> {more, 16#e8, 16#09}; +dec_huffman_lookup(16#8f, 16#a) -> {more, 16#e8, 16#17}; +dec_huffman_lookup(16#8f, 16#b) -> {ok, 16#e8, 16#28}; +dec_huffman_lookup(16#8f, 16#c) -> {more, 16#e9, 16#02}; +dec_huffman_lookup(16#8f, 16#d) -> {more, 16#e9, 16#09}; +dec_huffman_lookup(16#8f, 16#e) -> {more, 16#e9, 16#17}; +dec_huffman_lookup(16#8f, 16#f) -> {ok, 16#e9, 16#28}; +dec_huffman_lookup(16#90, 16#0) -> {more, 16#c6, 16#03}; +dec_huffman_lookup(16#90, 16#1) -> {more, 16#c6, 16#06}; +dec_huffman_lookup(16#90, 16#2) -> {more, 16#c6, 16#0a}; +dec_huffman_lookup(16#90, 16#3) -> {more, 16#c6, 16#0f}; +dec_huffman_lookup(16#90, 16#4) -> {more, 16#c6, 16#18}; +dec_huffman_lookup(16#90, 16#5) -> {more, 16#c6, 16#1f}; +dec_huffman_lookup(16#90, 16#6) -> {more, 16#c6, 16#29}; +dec_huffman_lookup(16#90, 16#7) -> {ok, 16#c6, 16#38}; +dec_huffman_lookup(16#90, 16#8) -> {more, 16#e4, 16#03}; +dec_huffman_lookup(16#90, 16#9) -> {more, 16#e4, 16#06}; +dec_huffman_lookup(16#90, 16#a) -> {more, 16#e4, 16#0a}; +dec_huffman_lookup(16#90, 16#b) -> {more, 16#e4, 16#0f}; +dec_huffman_lookup(16#90, 16#c) -> {more, 16#e4, 16#18}; +dec_huffman_lookup(16#90, 16#d) -> {more, 16#e4, 16#1f}; +dec_huffman_lookup(16#90, 16#e) -> {more, 16#e4, 16#29}; +dec_huffman_lookup(16#90, 16#f) -> {ok, 16#e4, 16#38}; +dec_huffman_lookup(16#91, 16#0) -> {more, 16#e8, 16#03}; +dec_huffman_lookup(16#91, 16#1) -> {more, 16#e8, 16#06}; +dec_huffman_lookup(16#91, 16#2) -> {more, 16#e8, 16#0a}; +dec_huffman_lookup(16#91, 16#3) -> {more, 16#e8, 16#0f}; +dec_huffman_lookup(16#91, 16#4) -> {more, 16#e8, 16#18}; +dec_huffman_lookup(16#91, 16#5) -> {more, 16#e8, 16#1f}; +dec_huffman_lookup(16#91, 16#6) -> {more, 16#e8, 16#29}; +dec_huffman_lookup(16#91, 16#7) -> {ok, 16#e8, 16#38}; +dec_huffman_lookup(16#91, 16#8) -> {more, 16#e9, 16#03}; +dec_huffman_lookup(16#91, 16#9) -> {more, 16#e9, 16#06}; +dec_huffman_lookup(16#91, 16#a) -> {more, 16#e9, 16#0a}; +dec_huffman_lookup(16#91, 16#b) -> {more, 16#e9, 16#0f}; +dec_huffman_lookup(16#91, 16#c) -> {more, 16#e9, 16#18}; +dec_huffman_lookup(16#91, 16#d) -> {more, 16#e9, 16#1f}; +dec_huffman_lookup(16#91, 16#e) -> {more, 16#e9, 16#29}; +dec_huffman_lookup(16#91, 16#f) -> {ok, 16#e9, 16#38}; +dec_huffman_lookup(16#92, 16#0) -> {more, 16#01, 16#01}; +dec_huffman_lookup(16#92, 16#1) -> {ok, 16#01, 16#16}; +dec_huffman_lookup(16#92, 16#2) -> {more, 16#87, 16#01}; +dec_huffman_lookup(16#92, 16#3) -> {ok, 16#87, 16#16}; +dec_huffman_lookup(16#92, 16#4) -> {more, 16#89, 16#01}; +dec_huffman_lookup(16#92, 16#5) -> {ok, 16#89, 16#16}; +dec_huffman_lookup(16#92, 16#6) -> {more, 16#8a, 16#01}; +dec_huffman_lookup(16#92, 16#7) -> {ok, 16#8a, 16#16}; +dec_huffman_lookup(16#92, 16#8) -> {more, 16#8b, 16#01}; +dec_huffman_lookup(16#92, 16#9) -> {ok, 16#8b, 16#16}; +dec_huffman_lookup(16#92, 16#a) -> {more, 16#8c, 16#01}; +dec_huffman_lookup(16#92, 16#b) -> {ok, 16#8c, 16#16}; +dec_huffman_lookup(16#92, 16#c) -> {more, 16#8d, 16#01}; +dec_huffman_lookup(16#92, 16#d) -> {ok, 16#8d, 16#16}; +dec_huffman_lookup(16#92, 16#e) -> {more, 16#8f, 16#01}; +dec_huffman_lookup(16#92, 16#f) -> {ok, 16#8f, 16#16}; +dec_huffman_lookup(16#93, 16#0) -> {more, 16#01, 16#02}; +dec_huffman_lookup(16#93, 16#1) -> {more, 16#01, 16#09}; +dec_huffman_lookup(16#93, 16#2) -> {more, 16#01, 16#17}; +dec_huffman_lookup(16#93, 16#3) -> {ok, 16#01, 16#28}; +dec_huffman_lookup(16#93, 16#4) -> {more, 16#87, 16#02}; +dec_huffman_lookup(16#93, 16#5) -> {more, 16#87, 16#09}; +dec_huffman_lookup(16#93, 16#6) -> {more, 16#87, 16#17}; +dec_huffman_lookup(16#93, 16#7) -> {ok, 16#87, 16#28}; +dec_huffman_lookup(16#93, 16#8) -> {more, 16#89, 16#02}; +dec_huffman_lookup(16#93, 16#9) -> {more, 16#89, 16#09}; +dec_huffman_lookup(16#93, 16#a) -> {more, 16#89, 16#17}; +dec_huffman_lookup(16#93, 16#b) -> {ok, 16#89, 16#28}; +dec_huffman_lookup(16#93, 16#c) -> {more, 16#8a, 16#02}; +dec_huffman_lookup(16#93, 16#d) -> {more, 16#8a, 16#09}; +dec_huffman_lookup(16#93, 16#e) -> {more, 16#8a, 16#17}; +dec_huffman_lookup(16#93, 16#f) -> {ok, 16#8a, 16#28}; +dec_huffman_lookup(16#94, 16#0) -> {more, 16#01, 16#03}; +dec_huffman_lookup(16#94, 16#1) -> {more, 16#01, 16#06}; +dec_huffman_lookup(16#94, 16#2) -> {more, 16#01, 16#0a}; +dec_huffman_lookup(16#94, 16#3) -> {more, 16#01, 16#0f}; +dec_huffman_lookup(16#94, 16#4) -> {more, 16#01, 16#18}; +dec_huffman_lookup(16#94, 16#5) -> {more, 16#01, 16#1f}; +dec_huffman_lookup(16#94, 16#6) -> {more, 16#01, 16#29}; +dec_huffman_lookup(16#94, 16#7) -> {ok, 16#01, 16#38}; +dec_huffman_lookup(16#94, 16#8) -> {more, 16#87, 16#03}; +dec_huffman_lookup(16#94, 16#9) -> {more, 16#87, 16#06}; +dec_huffman_lookup(16#94, 16#a) -> {more, 16#87, 16#0a}; +dec_huffman_lookup(16#94, 16#b) -> {more, 16#87, 16#0f}; +dec_huffman_lookup(16#94, 16#c) -> {more, 16#87, 16#18}; +dec_huffman_lookup(16#94, 16#d) -> {more, 16#87, 16#1f}; +dec_huffman_lookup(16#94, 16#e) -> {more, 16#87, 16#29}; +dec_huffman_lookup(16#94, 16#f) -> {ok, 16#87, 16#38}; +dec_huffman_lookup(16#95, 16#0) -> {more, 16#89, 16#03}; +dec_huffman_lookup(16#95, 16#1) -> {more, 16#89, 16#06}; +dec_huffman_lookup(16#95, 16#2) -> {more, 16#89, 16#0a}; +dec_huffman_lookup(16#95, 16#3) -> {more, 16#89, 16#0f}; +dec_huffman_lookup(16#95, 16#4) -> {more, 16#89, 16#18}; +dec_huffman_lookup(16#95, 16#5) -> {more, 16#89, 16#1f}; +dec_huffman_lookup(16#95, 16#6) -> {more, 16#89, 16#29}; +dec_huffman_lookup(16#95, 16#7) -> {ok, 16#89, 16#38}; +dec_huffman_lookup(16#95, 16#8) -> {more, 16#8a, 16#03}; +dec_huffman_lookup(16#95, 16#9) -> {more, 16#8a, 16#06}; +dec_huffman_lookup(16#95, 16#a) -> {more, 16#8a, 16#0a}; +dec_huffman_lookup(16#95, 16#b) -> {more, 16#8a, 16#0f}; +dec_huffman_lookup(16#95, 16#c) -> {more, 16#8a, 16#18}; +dec_huffman_lookup(16#95, 16#d) -> {more, 16#8a, 16#1f}; +dec_huffman_lookup(16#95, 16#e) -> {more, 16#8a, 16#29}; +dec_huffman_lookup(16#95, 16#f) -> {ok, 16#8a, 16#38}; +dec_huffman_lookup(16#96, 16#0) -> {more, 16#8b, 16#02}; +dec_huffman_lookup(16#96, 16#1) -> {more, 16#8b, 16#09}; +dec_huffman_lookup(16#96, 16#2) -> {more, 16#8b, 16#17}; +dec_huffman_lookup(16#96, 16#3) -> {ok, 16#8b, 16#28}; +dec_huffman_lookup(16#96, 16#4) -> {more, 16#8c, 16#02}; +dec_huffman_lookup(16#96, 16#5) -> {more, 16#8c, 16#09}; +dec_huffman_lookup(16#96, 16#6) -> {more, 16#8c, 16#17}; +dec_huffman_lookup(16#96, 16#7) -> {ok, 16#8c, 16#28}; +dec_huffman_lookup(16#96, 16#8) -> {more, 16#8d, 16#02}; +dec_huffman_lookup(16#96, 16#9) -> {more, 16#8d, 16#09}; +dec_huffman_lookup(16#96, 16#a) -> {more, 16#8d, 16#17}; +dec_huffman_lookup(16#96, 16#b) -> {ok, 16#8d, 16#28}; +dec_huffman_lookup(16#96, 16#c) -> {more, 16#8f, 16#02}; +dec_huffman_lookup(16#96, 16#d) -> {more, 16#8f, 16#09}; +dec_huffman_lookup(16#96, 16#e) -> {more, 16#8f, 16#17}; +dec_huffman_lookup(16#96, 16#f) -> {ok, 16#8f, 16#28}; +dec_huffman_lookup(16#97, 16#0) -> {more, 16#8b, 16#03}; +dec_huffman_lookup(16#97, 16#1) -> {more, 16#8b, 16#06}; +dec_huffman_lookup(16#97, 16#2) -> {more, 16#8b, 16#0a}; +dec_huffman_lookup(16#97, 16#3) -> {more, 16#8b, 16#0f}; +dec_huffman_lookup(16#97, 16#4) -> {more, 16#8b, 16#18}; +dec_huffman_lookup(16#97, 16#5) -> {more, 16#8b, 16#1f}; +dec_huffman_lookup(16#97, 16#6) -> {more, 16#8b, 16#29}; +dec_huffman_lookup(16#97, 16#7) -> {ok, 16#8b, 16#38}; +dec_huffman_lookup(16#97, 16#8) -> {more, 16#8c, 16#03}; +dec_huffman_lookup(16#97, 16#9) -> {more, 16#8c, 16#06}; +dec_huffman_lookup(16#97, 16#a) -> {more, 16#8c, 16#0a}; +dec_huffman_lookup(16#97, 16#b) -> {more, 16#8c, 16#0f}; +dec_huffman_lookup(16#97, 16#c) -> {more, 16#8c, 16#18}; +dec_huffman_lookup(16#97, 16#d) -> {more, 16#8c, 16#1f}; +dec_huffman_lookup(16#97, 16#e) -> {more, 16#8c, 16#29}; +dec_huffman_lookup(16#97, 16#f) -> {ok, 16#8c, 16#38}; +dec_huffman_lookup(16#98, 16#0) -> {more, 16#8d, 16#03}; +dec_huffman_lookup(16#98, 16#1) -> {more, 16#8d, 16#06}; +dec_huffman_lookup(16#98, 16#2) -> {more, 16#8d, 16#0a}; +dec_huffman_lookup(16#98, 16#3) -> {more, 16#8d, 16#0f}; +dec_huffman_lookup(16#98, 16#4) -> {more, 16#8d, 16#18}; +dec_huffman_lookup(16#98, 16#5) -> {more, 16#8d, 16#1f}; +dec_huffman_lookup(16#98, 16#6) -> {more, 16#8d, 16#29}; +dec_huffman_lookup(16#98, 16#7) -> {ok, 16#8d, 16#38}; +dec_huffman_lookup(16#98, 16#8) -> {more, 16#8f, 16#03}; +dec_huffman_lookup(16#98, 16#9) -> {more, 16#8f, 16#06}; +dec_huffman_lookup(16#98, 16#a) -> {more, 16#8f, 16#0a}; +dec_huffman_lookup(16#98, 16#b) -> {more, 16#8f, 16#0f}; +dec_huffman_lookup(16#98, 16#c) -> {more, 16#8f, 16#18}; +dec_huffman_lookup(16#98, 16#d) -> {more, 16#8f, 16#1f}; +dec_huffman_lookup(16#98, 16#e) -> {more, 16#8f, 16#29}; +dec_huffman_lookup(16#98, 16#f) -> {ok, 16#8f, 16#38}; +dec_huffman_lookup(16#99, 16#0) -> {more, undefined, 16#9d}; +dec_huffman_lookup(16#99, 16#1) -> {more, undefined, 16#9e}; +dec_huffman_lookup(16#99, 16#2) -> {more, undefined, 16#a0}; +dec_huffman_lookup(16#99, 16#3) -> {more, undefined, 16#a1}; +dec_huffman_lookup(16#99, 16#4) -> {more, undefined, 16#a4}; +dec_huffman_lookup(16#99, 16#5) -> {more, undefined, 16#a5}; +dec_huffman_lookup(16#99, 16#6) -> {more, undefined, 16#a7}; +dec_huffman_lookup(16#99, 16#7) -> {more, undefined, 16#a8}; +dec_huffman_lookup(16#99, 16#8) -> {more, undefined, 16#ac}; +dec_huffman_lookup(16#99, 16#9) -> {more, undefined, 16#ad}; +dec_huffman_lookup(16#99, 16#a) -> {more, undefined, 16#af}; +dec_huffman_lookup(16#99, 16#b) -> {more, undefined, 16#b1}; +dec_huffman_lookup(16#99, 16#c) -> {more, undefined, 16#b6}; +dec_huffman_lookup(16#99, 16#d) -> {more, undefined, 16#b9}; +dec_huffman_lookup(16#99, 16#e) -> {more, undefined, 16#bf}; +dec_huffman_lookup(16#99, 16#f) -> {ok, undefined, 16#cf}; +dec_huffman_lookup(16#9a, 16#0) -> {ok, 16#93, 16#00}; +dec_huffman_lookup(16#9a, 16#1) -> {ok, 16#95, 16#00}; +dec_huffman_lookup(16#9a, 16#2) -> {ok, 16#96, 16#00}; +dec_huffman_lookup(16#9a, 16#3) -> {ok, 16#97, 16#00}; +dec_huffman_lookup(16#9a, 16#4) -> {ok, 16#98, 16#00}; +dec_huffman_lookup(16#9a, 16#5) -> {ok, 16#9b, 16#00}; +dec_huffman_lookup(16#9a, 16#6) -> {ok, 16#9d, 16#00}; +dec_huffman_lookup(16#9a, 16#7) -> {ok, 16#9e, 16#00}; +dec_huffman_lookup(16#9a, 16#8) -> {ok, 16#a5, 16#00}; +dec_huffman_lookup(16#9a, 16#9) -> {ok, 16#a6, 16#00}; +dec_huffman_lookup(16#9a, 16#a) -> {ok, 16#a8, 16#00}; +dec_huffman_lookup(16#9a, 16#b) -> {ok, 16#ae, 16#00}; +dec_huffman_lookup(16#9a, 16#c) -> {ok, 16#af, 16#00}; +dec_huffman_lookup(16#9a, 16#d) -> {ok, 16#b4, 16#00}; +dec_huffman_lookup(16#9a, 16#e) -> {ok, 16#b6, 16#00}; +dec_huffman_lookup(16#9a, 16#f) -> {ok, 16#b7, 16#00}; +dec_huffman_lookup(16#9b, 16#0) -> {more, 16#93, 16#01}; +dec_huffman_lookup(16#9b, 16#1) -> {ok, 16#93, 16#16}; +dec_huffman_lookup(16#9b, 16#2) -> {more, 16#95, 16#01}; +dec_huffman_lookup(16#9b, 16#3) -> {ok, 16#95, 16#16}; +dec_huffman_lookup(16#9b, 16#4) -> {more, 16#96, 16#01}; +dec_huffman_lookup(16#9b, 16#5) -> {ok, 16#96, 16#16}; +dec_huffman_lookup(16#9b, 16#6) -> {more, 16#97, 16#01}; +dec_huffman_lookup(16#9b, 16#7) -> {ok, 16#97, 16#16}; +dec_huffman_lookup(16#9b, 16#8) -> {more, 16#98, 16#01}; +dec_huffman_lookup(16#9b, 16#9) -> {ok, 16#98, 16#16}; +dec_huffman_lookup(16#9b, 16#a) -> {more, 16#9b, 16#01}; +dec_huffman_lookup(16#9b, 16#b) -> {ok, 16#9b, 16#16}; +dec_huffman_lookup(16#9b, 16#c) -> {more, 16#9d, 16#01}; +dec_huffman_lookup(16#9b, 16#d) -> {ok, 16#9d, 16#16}; +dec_huffman_lookup(16#9b, 16#e) -> {more, 16#9e, 16#01}; +dec_huffman_lookup(16#9b, 16#f) -> {ok, 16#9e, 16#16}; +dec_huffman_lookup(16#9c, 16#0) -> {more, 16#93, 16#02}; +dec_huffman_lookup(16#9c, 16#1) -> {more, 16#93, 16#09}; +dec_huffman_lookup(16#9c, 16#2) -> {more, 16#93, 16#17}; +dec_huffman_lookup(16#9c, 16#3) -> {ok, 16#93, 16#28}; +dec_huffman_lookup(16#9c, 16#4) -> {more, 16#95, 16#02}; +dec_huffman_lookup(16#9c, 16#5) -> {more, 16#95, 16#09}; +dec_huffman_lookup(16#9c, 16#6) -> {more, 16#95, 16#17}; +dec_huffman_lookup(16#9c, 16#7) -> {ok, 16#95, 16#28}; +dec_huffman_lookup(16#9c, 16#8) -> {more, 16#96, 16#02}; +dec_huffman_lookup(16#9c, 16#9) -> {more, 16#96, 16#09}; +dec_huffman_lookup(16#9c, 16#a) -> {more, 16#96, 16#17}; +dec_huffman_lookup(16#9c, 16#b) -> {ok, 16#96, 16#28}; +dec_huffman_lookup(16#9c, 16#c) -> {more, 16#97, 16#02}; +dec_huffman_lookup(16#9c, 16#d) -> {more, 16#97, 16#09}; +dec_huffman_lookup(16#9c, 16#e) -> {more, 16#97, 16#17}; +dec_huffman_lookup(16#9c, 16#f) -> {ok, 16#97, 16#28}; +dec_huffman_lookup(16#9d, 16#0) -> {more, 16#93, 16#03}; +dec_huffman_lookup(16#9d, 16#1) -> {more, 16#93, 16#06}; +dec_huffman_lookup(16#9d, 16#2) -> {more, 16#93, 16#0a}; +dec_huffman_lookup(16#9d, 16#3) -> {more, 16#93, 16#0f}; +dec_huffman_lookup(16#9d, 16#4) -> {more, 16#93, 16#18}; +dec_huffman_lookup(16#9d, 16#5) -> {more, 16#93, 16#1f}; +dec_huffman_lookup(16#9d, 16#6) -> {more, 16#93, 16#29}; +dec_huffman_lookup(16#9d, 16#7) -> {ok, 16#93, 16#38}; +dec_huffman_lookup(16#9d, 16#8) -> {more, 16#95, 16#03}; +dec_huffman_lookup(16#9d, 16#9) -> {more, 16#95, 16#06}; +dec_huffman_lookup(16#9d, 16#a) -> {more, 16#95, 16#0a}; +dec_huffman_lookup(16#9d, 16#b) -> {more, 16#95, 16#0f}; +dec_huffman_lookup(16#9d, 16#c) -> {more, 16#95, 16#18}; +dec_huffman_lookup(16#9d, 16#d) -> {more, 16#95, 16#1f}; +dec_huffman_lookup(16#9d, 16#e) -> {more, 16#95, 16#29}; +dec_huffman_lookup(16#9d, 16#f) -> {ok, 16#95, 16#38}; +dec_huffman_lookup(16#9e, 16#0) -> {more, 16#96, 16#03}; +dec_huffman_lookup(16#9e, 16#1) -> {more, 16#96, 16#06}; +dec_huffman_lookup(16#9e, 16#2) -> {more, 16#96, 16#0a}; +dec_huffman_lookup(16#9e, 16#3) -> {more, 16#96, 16#0f}; +dec_huffman_lookup(16#9e, 16#4) -> {more, 16#96, 16#18}; +dec_huffman_lookup(16#9e, 16#5) -> {more, 16#96, 16#1f}; +dec_huffman_lookup(16#9e, 16#6) -> {more, 16#96, 16#29}; +dec_huffman_lookup(16#9e, 16#7) -> {ok, 16#96, 16#38}; +dec_huffman_lookup(16#9e, 16#8) -> {more, 16#97, 16#03}; +dec_huffman_lookup(16#9e, 16#9) -> {more, 16#97, 16#06}; +dec_huffman_lookup(16#9e, 16#a) -> {more, 16#97, 16#0a}; +dec_huffman_lookup(16#9e, 16#b) -> {more, 16#97, 16#0f}; +dec_huffman_lookup(16#9e, 16#c) -> {more, 16#97, 16#18}; +dec_huffman_lookup(16#9e, 16#d) -> {more, 16#97, 16#1f}; +dec_huffman_lookup(16#9e, 16#e) -> {more, 16#97, 16#29}; +dec_huffman_lookup(16#9e, 16#f) -> {ok, 16#97, 16#38}; +dec_huffman_lookup(16#9f, 16#0) -> {more, 16#98, 16#02}; +dec_huffman_lookup(16#9f, 16#1) -> {more, 16#98, 16#09}; +dec_huffman_lookup(16#9f, 16#2) -> {more, 16#98, 16#17}; +dec_huffman_lookup(16#9f, 16#3) -> {ok, 16#98, 16#28}; +dec_huffman_lookup(16#9f, 16#4) -> {more, 16#9b, 16#02}; +dec_huffman_lookup(16#9f, 16#5) -> {more, 16#9b, 16#09}; +dec_huffman_lookup(16#9f, 16#6) -> {more, 16#9b, 16#17}; +dec_huffman_lookup(16#9f, 16#7) -> {ok, 16#9b, 16#28}; +dec_huffman_lookup(16#9f, 16#8) -> {more, 16#9d, 16#02}; +dec_huffman_lookup(16#9f, 16#9) -> {more, 16#9d, 16#09}; +dec_huffman_lookup(16#9f, 16#a) -> {more, 16#9d, 16#17}; +dec_huffman_lookup(16#9f, 16#b) -> {ok, 16#9d, 16#28}; +dec_huffman_lookup(16#9f, 16#c) -> {more, 16#9e, 16#02}; +dec_huffman_lookup(16#9f, 16#d) -> {more, 16#9e, 16#09}; +dec_huffman_lookup(16#9f, 16#e) -> {more, 16#9e, 16#17}; +dec_huffman_lookup(16#9f, 16#f) -> {ok, 16#9e, 16#28}; +dec_huffman_lookup(16#a0, 16#0) -> {more, 16#98, 16#03}; +dec_huffman_lookup(16#a0, 16#1) -> {more, 16#98, 16#06}; +dec_huffman_lookup(16#a0, 16#2) -> {more, 16#98, 16#0a}; +dec_huffman_lookup(16#a0, 16#3) -> {more, 16#98, 16#0f}; +dec_huffman_lookup(16#a0, 16#4) -> {more, 16#98, 16#18}; +dec_huffman_lookup(16#a0, 16#5) -> {more, 16#98, 16#1f}; +dec_huffman_lookup(16#a0, 16#6) -> {more, 16#98, 16#29}; +dec_huffman_lookup(16#a0, 16#7) -> {ok, 16#98, 16#38}; +dec_huffman_lookup(16#a0, 16#8) -> {more, 16#9b, 16#03}; +dec_huffman_lookup(16#a0, 16#9) -> {more, 16#9b, 16#06}; +dec_huffman_lookup(16#a0, 16#a) -> {more, 16#9b, 16#0a}; +dec_huffman_lookup(16#a0, 16#b) -> {more, 16#9b, 16#0f}; +dec_huffman_lookup(16#a0, 16#c) -> {more, 16#9b, 16#18}; +dec_huffman_lookup(16#a0, 16#d) -> {more, 16#9b, 16#1f}; +dec_huffman_lookup(16#a0, 16#e) -> {more, 16#9b, 16#29}; +dec_huffman_lookup(16#a0, 16#f) -> {ok, 16#9b, 16#38}; +dec_huffman_lookup(16#a1, 16#0) -> {more, 16#9d, 16#03}; +dec_huffman_lookup(16#a1, 16#1) -> {more, 16#9d, 16#06}; +dec_huffman_lookup(16#a1, 16#2) -> {more, 16#9d, 16#0a}; +dec_huffman_lookup(16#a1, 16#3) -> {more, 16#9d, 16#0f}; +dec_huffman_lookup(16#a1, 16#4) -> {more, 16#9d, 16#18}; +dec_huffman_lookup(16#a1, 16#5) -> {more, 16#9d, 16#1f}; +dec_huffman_lookup(16#a1, 16#6) -> {more, 16#9d, 16#29}; +dec_huffman_lookup(16#a1, 16#7) -> {ok, 16#9d, 16#38}; +dec_huffman_lookup(16#a1, 16#8) -> {more, 16#9e, 16#03}; +dec_huffman_lookup(16#a1, 16#9) -> {more, 16#9e, 16#06}; +dec_huffman_lookup(16#a1, 16#a) -> {more, 16#9e, 16#0a}; +dec_huffman_lookup(16#a1, 16#b) -> {more, 16#9e, 16#0f}; +dec_huffman_lookup(16#a1, 16#c) -> {more, 16#9e, 16#18}; +dec_huffman_lookup(16#a1, 16#d) -> {more, 16#9e, 16#1f}; +dec_huffman_lookup(16#a1, 16#e) -> {more, 16#9e, 16#29}; +dec_huffman_lookup(16#a1, 16#f) -> {ok, 16#9e, 16#38}; +dec_huffman_lookup(16#a2, 16#0) -> {more, 16#a5, 16#01}; +dec_huffman_lookup(16#a2, 16#1) -> {ok, 16#a5, 16#16}; +dec_huffman_lookup(16#a2, 16#2) -> {more, 16#a6, 16#01}; +dec_huffman_lookup(16#a2, 16#3) -> {ok, 16#a6, 16#16}; +dec_huffman_lookup(16#a2, 16#4) -> {more, 16#a8, 16#01}; +dec_huffman_lookup(16#a2, 16#5) -> {ok, 16#a8, 16#16}; +dec_huffman_lookup(16#a2, 16#6) -> {more, 16#ae, 16#01}; +dec_huffman_lookup(16#a2, 16#7) -> {ok, 16#ae, 16#16}; +dec_huffman_lookup(16#a2, 16#8) -> {more, 16#af, 16#01}; +dec_huffman_lookup(16#a2, 16#9) -> {ok, 16#af, 16#16}; +dec_huffman_lookup(16#a2, 16#a) -> {more, 16#b4, 16#01}; +dec_huffman_lookup(16#a2, 16#b) -> {ok, 16#b4, 16#16}; +dec_huffman_lookup(16#a2, 16#c) -> {more, 16#b6, 16#01}; +dec_huffman_lookup(16#a2, 16#d) -> {ok, 16#b6, 16#16}; +dec_huffman_lookup(16#a2, 16#e) -> {more, 16#b7, 16#01}; +dec_huffman_lookup(16#a2, 16#f) -> {ok, 16#b7, 16#16}; +dec_huffman_lookup(16#a3, 16#0) -> {more, 16#a5, 16#02}; +dec_huffman_lookup(16#a3, 16#1) -> {more, 16#a5, 16#09}; +dec_huffman_lookup(16#a3, 16#2) -> {more, 16#a5, 16#17}; +dec_huffman_lookup(16#a3, 16#3) -> {ok, 16#a5, 16#28}; +dec_huffman_lookup(16#a3, 16#4) -> {more, 16#a6, 16#02}; +dec_huffman_lookup(16#a3, 16#5) -> {more, 16#a6, 16#09}; +dec_huffman_lookup(16#a3, 16#6) -> {more, 16#a6, 16#17}; +dec_huffman_lookup(16#a3, 16#7) -> {ok, 16#a6, 16#28}; +dec_huffman_lookup(16#a3, 16#8) -> {more, 16#a8, 16#02}; +dec_huffman_lookup(16#a3, 16#9) -> {more, 16#a8, 16#09}; +dec_huffman_lookup(16#a3, 16#a) -> {more, 16#a8, 16#17}; +dec_huffman_lookup(16#a3, 16#b) -> {ok, 16#a8, 16#28}; +dec_huffman_lookup(16#a3, 16#c) -> {more, 16#ae, 16#02}; +dec_huffman_lookup(16#a3, 16#d) -> {more, 16#ae, 16#09}; +dec_huffman_lookup(16#a3, 16#e) -> {more, 16#ae, 16#17}; +dec_huffman_lookup(16#a3, 16#f) -> {ok, 16#ae, 16#28}; +dec_huffman_lookup(16#a4, 16#0) -> {more, 16#a5, 16#03}; +dec_huffman_lookup(16#a4, 16#1) -> {more, 16#a5, 16#06}; +dec_huffman_lookup(16#a4, 16#2) -> {more, 16#a5, 16#0a}; +dec_huffman_lookup(16#a4, 16#3) -> {more, 16#a5, 16#0f}; +dec_huffman_lookup(16#a4, 16#4) -> {more, 16#a5, 16#18}; +dec_huffman_lookup(16#a4, 16#5) -> {more, 16#a5, 16#1f}; +dec_huffman_lookup(16#a4, 16#6) -> {more, 16#a5, 16#29}; +dec_huffman_lookup(16#a4, 16#7) -> {ok, 16#a5, 16#38}; +dec_huffman_lookup(16#a4, 16#8) -> {more, 16#a6, 16#03}; +dec_huffman_lookup(16#a4, 16#9) -> {more, 16#a6, 16#06}; +dec_huffman_lookup(16#a4, 16#a) -> {more, 16#a6, 16#0a}; +dec_huffman_lookup(16#a4, 16#b) -> {more, 16#a6, 16#0f}; +dec_huffman_lookup(16#a4, 16#c) -> {more, 16#a6, 16#18}; +dec_huffman_lookup(16#a4, 16#d) -> {more, 16#a6, 16#1f}; +dec_huffman_lookup(16#a4, 16#e) -> {more, 16#a6, 16#29}; +dec_huffman_lookup(16#a4, 16#f) -> {ok, 16#a6, 16#38}; +dec_huffman_lookup(16#a5, 16#0) -> {more, 16#a8, 16#03}; +dec_huffman_lookup(16#a5, 16#1) -> {more, 16#a8, 16#06}; +dec_huffman_lookup(16#a5, 16#2) -> {more, 16#a8, 16#0a}; +dec_huffman_lookup(16#a5, 16#3) -> {more, 16#a8, 16#0f}; +dec_huffman_lookup(16#a5, 16#4) -> {more, 16#a8, 16#18}; +dec_huffman_lookup(16#a5, 16#5) -> {more, 16#a8, 16#1f}; +dec_huffman_lookup(16#a5, 16#6) -> {more, 16#a8, 16#29}; +dec_huffman_lookup(16#a5, 16#7) -> {ok, 16#a8, 16#38}; +dec_huffman_lookup(16#a5, 16#8) -> {more, 16#ae, 16#03}; +dec_huffman_lookup(16#a5, 16#9) -> {more, 16#ae, 16#06}; +dec_huffman_lookup(16#a5, 16#a) -> {more, 16#ae, 16#0a}; +dec_huffman_lookup(16#a5, 16#b) -> {more, 16#ae, 16#0f}; +dec_huffman_lookup(16#a5, 16#c) -> {more, 16#ae, 16#18}; +dec_huffman_lookup(16#a5, 16#d) -> {more, 16#ae, 16#1f}; +dec_huffman_lookup(16#a5, 16#e) -> {more, 16#ae, 16#29}; +dec_huffman_lookup(16#a5, 16#f) -> {ok, 16#ae, 16#38}; +dec_huffman_lookup(16#a6, 16#0) -> {more, 16#af, 16#02}; +dec_huffman_lookup(16#a6, 16#1) -> {more, 16#af, 16#09}; +dec_huffman_lookup(16#a6, 16#2) -> {more, 16#af, 16#17}; +dec_huffman_lookup(16#a6, 16#3) -> {ok, 16#af, 16#28}; +dec_huffman_lookup(16#a6, 16#4) -> {more, 16#b4, 16#02}; +dec_huffman_lookup(16#a6, 16#5) -> {more, 16#b4, 16#09}; +dec_huffman_lookup(16#a6, 16#6) -> {more, 16#b4, 16#17}; +dec_huffman_lookup(16#a6, 16#7) -> {ok, 16#b4, 16#28}; +dec_huffman_lookup(16#a6, 16#8) -> {more, 16#b6, 16#02}; +dec_huffman_lookup(16#a6, 16#9) -> {more, 16#b6, 16#09}; +dec_huffman_lookup(16#a6, 16#a) -> {more, 16#b6, 16#17}; +dec_huffman_lookup(16#a6, 16#b) -> {ok, 16#b6, 16#28}; +dec_huffman_lookup(16#a6, 16#c) -> {more, 16#b7, 16#02}; +dec_huffman_lookup(16#a6, 16#d) -> {more, 16#b7, 16#09}; +dec_huffman_lookup(16#a6, 16#e) -> {more, 16#b7, 16#17}; +dec_huffman_lookup(16#a6, 16#f) -> {ok, 16#b7, 16#28}; +dec_huffman_lookup(16#a7, 16#0) -> {more, 16#af, 16#03}; +dec_huffman_lookup(16#a7, 16#1) -> {more, 16#af, 16#06}; +dec_huffman_lookup(16#a7, 16#2) -> {more, 16#af, 16#0a}; +dec_huffman_lookup(16#a7, 16#3) -> {more, 16#af, 16#0f}; +dec_huffman_lookup(16#a7, 16#4) -> {more, 16#af, 16#18}; +dec_huffman_lookup(16#a7, 16#5) -> {more, 16#af, 16#1f}; +dec_huffman_lookup(16#a7, 16#6) -> {more, 16#af, 16#29}; +dec_huffman_lookup(16#a7, 16#7) -> {ok, 16#af, 16#38}; +dec_huffman_lookup(16#a7, 16#8) -> {more, 16#b4, 16#03}; +dec_huffman_lookup(16#a7, 16#9) -> {more, 16#b4, 16#06}; +dec_huffman_lookup(16#a7, 16#a) -> {more, 16#b4, 16#0a}; +dec_huffman_lookup(16#a7, 16#b) -> {more, 16#b4, 16#0f}; +dec_huffman_lookup(16#a7, 16#c) -> {more, 16#b4, 16#18}; +dec_huffman_lookup(16#a7, 16#d) -> {more, 16#b4, 16#1f}; +dec_huffman_lookup(16#a7, 16#e) -> {more, 16#b4, 16#29}; +dec_huffman_lookup(16#a7, 16#f) -> {ok, 16#b4, 16#38}; +dec_huffman_lookup(16#a8, 16#0) -> {more, 16#b6, 16#03}; +dec_huffman_lookup(16#a8, 16#1) -> {more, 16#b6, 16#06}; +dec_huffman_lookup(16#a8, 16#2) -> {more, 16#b6, 16#0a}; +dec_huffman_lookup(16#a8, 16#3) -> {more, 16#b6, 16#0f}; +dec_huffman_lookup(16#a8, 16#4) -> {more, 16#b6, 16#18}; +dec_huffman_lookup(16#a8, 16#5) -> {more, 16#b6, 16#1f}; +dec_huffman_lookup(16#a8, 16#6) -> {more, 16#b6, 16#29}; +dec_huffman_lookup(16#a8, 16#7) -> {ok, 16#b6, 16#38}; +dec_huffman_lookup(16#a8, 16#8) -> {more, 16#b7, 16#03}; +dec_huffman_lookup(16#a8, 16#9) -> {more, 16#b7, 16#06}; +dec_huffman_lookup(16#a8, 16#a) -> {more, 16#b7, 16#0a}; +dec_huffman_lookup(16#a8, 16#b) -> {more, 16#b7, 16#0f}; +dec_huffman_lookup(16#a8, 16#c) -> {more, 16#b7, 16#18}; +dec_huffman_lookup(16#a8, 16#d) -> {more, 16#b7, 16#1f}; +dec_huffman_lookup(16#a8, 16#e) -> {more, 16#b7, 16#29}; +dec_huffman_lookup(16#a8, 16#f) -> {ok, 16#b7, 16#38}; +dec_huffman_lookup(16#a9, 16#0) -> {ok, 16#bc, 16#00}; +dec_huffman_lookup(16#a9, 16#1) -> {ok, 16#bf, 16#00}; +dec_huffman_lookup(16#a9, 16#2) -> {ok, 16#c5, 16#00}; +dec_huffman_lookup(16#a9, 16#3) -> {ok, 16#e7, 16#00}; +dec_huffman_lookup(16#a9, 16#4) -> {ok, 16#ef, 16#00}; +dec_huffman_lookup(16#a9, 16#5) -> {more, undefined, 16#b0}; +dec_huffman_lookup(16#a9, 16#6) -> {more, undefined, 16#b2}; +dec_huffman_lookup(16#a9, 16#7) -> {more, undefined, 16#b3}; +dec_huffman_lookup(16#a9, 16#8) -> {more, undefined, 16#b7}; +dec_huffman_lookup(16#a9, 16#9) -> {more, undefined, 16#b8}; +dec_huffman_lookup(16#a9, 16#a) -> {more, undefined, 16#ba}; +dec_huffman_lookup(16#a9, 16#b) -> {more, undefined, 16#bb}; +dec_huffman_lookup(16#a9, 16#c) -> {more, undefined, 16#c0}; +dec_huffman_lookup(16#a9, 16#d) -> {more, undefined, 16#c7}; +dec_huffman_lookup(16#a9, 16#e) -> {more, undefined, 16#d0}; +dec_huffman_lookup(16#a9, 16#f) -> {ok, undefined, 16#df}; +dec_huffman_lookup(16#aa, 16#0) -> {more, 16#bc, 16#01}; +dec_huffman_lookup(16#aa, 16#1) -> {ok, 16#bc, 16#16}; +dec_huffman_lookup(16#aa, 16#2) -> {more, 16#bf, 16#01}; +dec_huffman_lookup(16#aa, 16#3) -> {ok, 16#bf, 16#16}; +dec_huffman_lookup(16#aa, 16#4) -> {more, 16#c5, 16#01}; +dec_huffman_lookup(16#aa, 16#5) -> {ok, 16#c5, 16#16}; +dec_huffman_lookup(16#aa, 16#6) -> {more, 16#e7, 16#01}; +dec_huffman_lookup(16#aa, 16#7) -> {ok, 16#e7, 16#16}; +dec_huffman_lookup(16#aa, 16#8) -> {more, 16#ef, 16#01}; +dec_huffman_lookup(16#aa, 16#9) -> {ok, 16#ef, 16#16}; +dec_huffman_lookup(16#aa, 16#a) -> {ok, 16#09, 16#00}; +dec_huffman_lookup(16#aa, 16#b) -> {ok, 16#8e, 16#00}; +dec_huffman_lookup(16#aa, 16#c) -> {ok, 16#90, 16#00}; +dec_huffman_lookup(16#aa, 16#d) -> {ok, 16#91, 16#00}; +dec_huffman_lookup(16#aa, 16#e) -> {ok, 16#94, 16#00}; +dec_huffman_lookup(16#aa, 16#f) -> {ok, 16#9f, 16#00}; +dec_huffman_lookup(16#ab, 16#0) -> {more, 16#bc, 16#02}; +dec_huffman_lookup(16#ab, 16#1) -> {more, 16#bc, 16#09}; +dec_huffman_lookup(16#ab, 16#2) -> {more, 16#bc, 16#17}; +dec_huffman_lookup(16#ab, 16#3) -> {ok, 16#bc, 16#28}; +dec_huffman_lookup(16#ab, 16#4) -> {more, 16#bf, 16#02}; +dec_huffman_lookup(16#ab, 16#5) -> {more, 16#bf, 16#09}; +dec_huffman_lookup(16#ab, 16#6) -> {more, 16#bf, 16#17}; +dec_huffman_lookup(16#ab, 16#7) -> {ok, 16#bf, 16#28}; +dec_huffman_lookup(16#ab, 16#8) -> {more, 16#c5, 16#02}; +dec_huffman_lookup(16#ab, 16#9) -> {more, 16#c5, 16#09}; +dec_huffman_lookup(16#ab, 16#a) -> {more, 16#c5, 16#17}; +dec_huffman_lookup(16#ab, 16#b) -> {ok, 16#c5, 16#28}; +dec_huffman_lookup(16#ab, 16#c) -> {more, 16#e7, 16#02}; +dec_huffman_lookup(16#ab, 16#d) -> {more, 16#e7, 16#09}; +dec_huffman_lookup(16#ab, 16#e) -> {more, 16#e7, 16#17}; +dec_huffman_lookup(16#ab, 16#f) -> {ok, 16#e7, 16#28}; +dec_huffman_lookup(16#ac, 16#0) -> {more, 16#bc, 16#03}; +dec_huffman_lookup(16#ac, 16#1) -> {more, 16#bc, 16#06}; +dec_huffman_lookup(16#ac, 16#2) -> {more, 16#bc, 16#0a}; +dec_huffman_lookup(16#ac, 16#3) -> {more, 16#bc, 16#0f}; +dec_huffman_lookup(16#ac, 16#4) -> {more, 16#bc, 16#18}; +dec_huffman_lookup(16#ac, 16#5) -> {more, 16#bc, 16#1f}; +dec_huffman_lookup(16#ac, 16#6) -> {more, 16#bc, 16#29}; +dec_huffman_lookup(16#ac, 16#7) -> {ok, 16#bc, 16#38}; +dec_huffman_lookup(16#ac, 16#8) -> {more, 16#bf, 16#03}; +dec_huffman_lookup(16#ac, 16#9) -> {more, 16#bf, 16#06}; +dec_huffman_lookup(16#ac, 16#a) -> {more, 16#bf, 16#0a}; +dec_huffman_lookup(16#ac, 16#b) -> {more, 16#bf, 16#0f}; +dec_huffman_lookup(16#ac, 16#c) -> {more, 16#bf, 16#18}; +dec_huffman_lookup(16#ac, 16#d) -> {more, 16#bf, 16#1f}; +dec_huffman_lookup(16#ac, 16#e) -> {more, 16#bf, 16#29}; +dec_huffman_lookup(16#ac, 16#f) -> {ok, 16#bf, 16#38}; +dec_huffman_lookup(16#ad, 16#0) -> {more, 16#c5, 16#03}; +dec_huffman_lookup(16#ad, 16#1) -> {more, 16#c5, 16#06}; +dec_huffman_lookup(16#ad, 16#2) -> {more, 16#c5, 16#0a}; +dec_huffman_lookup(16#ad, 16#3) -> {more, 16#c5, 16#0f}; +dec_huffman_lookup(16#ad, 16#4) -> {more, 16#c5, 16#18}; +dec_huffman_lookup(16#ad, 16#5) -> {more, 16#c5, 16#1f}; +dec_huffman_lookup(16#ad, 16#6) -> {more, 16#c5, 16#29}; +dec_huffman_lookup(16#ad, 16#7) -> {ok, 16#c5, 16#38}; +dec_huffman_lookup(16#ad, 16#8) -> {more, 16#e7, 16#03}; +dec_huffman_lookup(16#ad, 16#9) -> {more, 16#e7, 16#06}; +dec_huffman_lookup(16#ad, 16#a) -> {more, 16#e7, 16#0a}; +dec_huffman_lookup(16#ad, 16#b) -> {more, 16#e7, 16#0f}; +dec_huffman_lookup(16#ad, 16#c) -> {more, 16#e7, 16#18}; +dec_huffman_lookup(16#ad, 16#d) -> {more, 16#e7, 16#1f}; +dec_huffman_lookup(16#ad, 16#e) -> {more, 16#e7, 16#29}; +dec_huffman_lookup(16#ad, 16#f) -> {ok, 16#e7, 16#38}; +dec_huffman_lookup(16#ae, 16#0) -> {more, 16#ef, 16#02}; +dec_huffman_lookup(16#ae, 16#1) -> {more, 16#ef, 16#09}; +dec_huffman_lookup(16#ae, 16#2) -> {more, 16#ef, 16#17}; +dec_huffman_lookup(16#ae, 16#3) -> {ok, 16#ef, 16#28}; +dec_huffman_lookup(16#ae, 16#4) -> {more, 16#09, 16#01}; +dec_huffman_lookup(16#ae, 16#5) -> {ok, 16#09, 16#16}; +dec_huffman_lookup(16#ae, 16#6) -> {more, 16#8e, 16#01}; +dec_huffman_lookup(16#ae, 16#7) -> {ok, 16#8e, 16#16}; +dec_huffman_lookup(16#ae, 16#8) -> {more, 16#90, 16#01}; +dec_huffman_lookup(16#ae, 16#9) -> {ok, 16#90, 16#16}; +dec_huffman_lookup(16#ae, 16#a) -> {more, 16#91, 16#01}; +dec_huffman_lookup(16#ae, 16#b) -> {ok, 16#91, 16#16}; +dec_huffman_lookup(16#ae, 16#c) -> {more, 16#94, 16#01}; +dec_huffman_lookup(16#ae, 16#d) -> {ok, 16#94, 16#16}; +dec_huffman_lookup(16#ae, 16#e) -> {more, 16#9f, 16#01}; +dec_huffman_lookup(16#ae, 16#f) -> {ok, 16#9f, 16#16}; +dec_huffman_lookup(16#af, 16#0) -> {more, 16#ef, 16#03}; +dec_huffman_lookup(16#af, 16#1) -> {more, 16#ef, 16#06}; +dec_huffman_lookup(16#af, 16#2) -> {more, 16#ef, 16#0a}; +dec_huffman_lookup(16#af, 16#3) -> {more, 16#ef, 16#0f}; +dec_huffman_lookup(16#af, 16#4) -> {more, 16#ef, 16#18}; +dec_huffman_lookup(16#af, 16#5) -> {more, 16#ef, 16#1f}; +dec_huffman_lookup(16#af, 16#6) -> {more, 16#ef, 16#29}; +dec_huffman_lookup(16#af, 16#7) -> {ok, 16#ef, 16#38}; +dec_huffman_lookup(16#af, 16#8) -> {more, 16#09, 16#02}; +dec_huffman_lookup(16#af, 16#9) -> {more, 16#09, 16#09}; +dec_huffman_lookup(16#af, 16#a) -> {more, 16#09, 16#17}; +dec_huffman_lookup(16#af, 16#b) -> {ok, 16#09, 16#28}; +dec_huffman_lookup(16#af, 16#c) -> {more, 16#8e, 16#02}; +dec_huffman_lookup(16#af, 16#d) -> {more, 16#8e, 16#09}; +dec_huffman_lookup(16#af, 16#e) -> {more, 16#8e, 16#17}; +dec_huffman_lookup(16#af, 16#f) -> {ok, 16#8e, 16#28}; +dec_huffman_lookup(16#b0, 16#0) -> {more, 16#09, 16#03}; +dec_huffman_lookup(16#b0, 16#1) -> {more, 16#09, 16#06}; +dec_huffman_lookup(16#b0, 16#2) -> {more, 16#09, 16#0a}; +dec_huffman_lookup(16#b0, 16#3) -> {more, 16#09, 16#0f}; +dec_huffman_lookup(16#b0, 16#4) -> {more, 16#09, 16#18}; +dec_huffman_lookup(16#b0, 16#5) -> {more, 16#09, 16#1f}; +dec_huffman_lookup(16#b0, 16#6) -> {more, 16#09, 16#29}; +dec_huffman_lookup(16#b0, 16#7) -> {ok, 16#09, 16#38}; +dec_huffman_lookup(16#b0, 16#8) -> {more, 16#8e, 16#03}; +dec_huffman_lookup(16#b0, 16#9) -> {more, 16#8e, 16#06}; +dec_huffman_lookup(16#b0, 16#a) -> {more, 16#8e, 16#0a}; +dec_huffman_lookup(16#b0, 16#b) -> {more, 16#8e, 16#0f}; +dec_huffman_lookup(16#b0, 16#c) -> {more, 16#8e, 16#18}; +dec_huffman_lookup(16#b0, 16#d) -> {more, 16#8e, 16#1f}; +dec_huffman_lookup(16#b0, 16#e) -> {more, 16#8e, 16#29}; +dec_huffman_lookup(16#b0, 16#f) -> {ok, 16#8e, 16#38}; +dec_huffman_lookup(16#b1, 16#0) -> {more, 16#90, 16#02}; +dec_huffman_lookup(16#b1, 16#1) -> {more, 16#90, 16#09}; +dec_huffman_lookup(16#b1, 16#2) -> {more, 16#90, 16#17}; +dec_huffman_lookup(16#b1, 16#3) -> {ok, 16#90, 16#28}; +dec_huffman_lookup(16#b1, 16#4) -> {more, 16#91, 16#02}; +dec_huffman_lookup(16#b1, 16#5) -> {more, 16#91, 16#09}; +dec_huffman_lookup(16#b1, 16#6) -> {more, 16#91, 16#17}; +dec_huffman_lookup(16#b1, 16#7) -> {ok, 16#91, 16#28}; +dec_huffman_lookup(16#b1, 16#8) -> {more, 16#94, 16#02}; +dec_huffman_lookup(16#b1, 16#9) -> {more, 16#94, 16#09}; +dec_huffman_lookup(16#b1, 16#a) -> {more, 16#94, 16#17}; +dec_huffman_lookup(16#b1, 16#b) -> {ok, 16#94, 16#28}; +dec_huffman_lookup(16#b1, 16#c) -> {more, 16#9f, 16#02}; +dec_huffman_lookup(16#b1, 16#d) -> {more, 16#9f, 16#09}; +dec_huffman_lookup(16#b1, 16#e) -> {more, 16#9f, 16#17}; +dec_huffman_lookup(16#b1, 16#f) -> {ok, 16#9f, 16#28}; +dec_huffman_lookup(16#b2, 16#0) -> {more, 16#90, 16#03}; +dec_huffman_lookup(16#b2, 16#1) -> {more, 16#90, 16#06}; +dec_huffman_lookup(16#b2, 16#2) -> {more, 16#90, 16#0a}; +dec_huffman_lookup(16#b2, 16#3) -> {more, 16#90, 16#0f}; +dec_huffman_lookup(16#b2, 16#4) -> {more, 16#90, 16#18}; +dec_huffman_lookup(16#b2, 16#5) -> {more, 16#90, 16#1f}; +dec_huffman_lookup(16#b2, 16#6) -> {more, 16#90, 16#29}; +dec_huffman_lookup(16#b2, 16#7) -> {ok, 16#90, 16#38}; +dec_huffman_lookup(16#b2, 16#8) -> {more, 16#91, 16#03}; +dec_huffman_lookup(16#b2, 16#9) -> {more, 16#91, 16#06}; +dec_huffman_lookup(16#b2, 16#a) -> {more, 16#91, 16#0a}; +dec_huffman_lookup(16#b2, 16#b) -> {more, 16#91, 16#0f}; +dec_huffman_lookup(16#b2, 16#c) -> {more, 16#91, 16#18}; +dec_huffman_lookup(16#b2, 16#d) -> {more, 16#91, 16#1f}; +dec_huffman_lookup(16#b2, 16#e) -> {more, 16#91, 16#29}; +dec_huffman_lookup(16#b2, 16#f) -> {ok, 16#91, 16#38}; +dec_huffman_lookup(16#b3, 16#0) -> {more, 16#94, 16#03}; +dec_huffman_lookup(16#b3, 16#1) -> {more, 16#94, 16#06}; +dec_huffman_lookup(16#b3, 16#2) -> {more, 16#94, 16#0a}; +dec_huffman_lookup(16#b3, 16#3) -> {more, 16#94, 16#0f}; +dec_huffman_lookup(16#b3, 16#4) -> {more, 16#94, 16#18}; +dec_huffman_lookup(16#b3, 16#5) -> {more, 16#94, 16#1f}; +dec_huffman_lookup(16#b3, 16#6) -> {more, 16#94, 16#29}; +dec_huffman_lookup(16#b3, 16#7) -> {ok, 16#94, 16#38}; +dec_huffman_lookup(16#b3, 16#8) -> {more, 16#9f, 16#03}; +dec_huffman_lookup(16#b3, 16#9) -> {more, 16#9f, 16#06}; +dec_huffman_lookup(16#b3, 16#a) -> {more, 16#9f, 16#0a}; +dec_huffman_lookup(16#b3, 16#b) -> {more, 16#9f, 16#0f}; +dec_huffman_lookup(16#b3, 16#c) -> {more, 16#9f, 16#18}; +dec_huffman_lookup(16#b3, 16#d) -> {more, 16#9f, 16#1f}; +dec_huffman_lookup(16#b3, 16#e) -> {more, 16#9f, 16#29}; +dec_huffman_lookup(16#b3, 16#f) -> {ok, 16#9f, 16#38}; +dec_huffman_lookup(16#b4, 16#0) -> {ok, 16#ab, 16#00}; +dec_huffman_lookup(16#b4, 16#1) -> {ok, 16#ce, 16#00}; +dec_huffman_lookup(16#b4, 16#2) -> {ok, 16#d7, 16#00}; +dec_huffman_lookup(16#b4, 16#3) -> {ok, 16#e1, 16#00}; +dec_huffman_lookup(16#b4, 16#4) -> {ok, 16#ec, 16#00}; +dec_huffman_lookup(16#b4, 16#5) -> {ok, 16#ed, 16#00}; +dec_huffman_lookup(16#b4, 16#6) -> {more, undefined, 16#bc}; +dec_huffman_lookup(16#b4, 16#7) -> {more, undefined, 16#bd}; +dec_huffman_lookup(16#b4, 16#8) -> {more, undefined, 16#c1}; +dec_huffman_lookup(16#b4, 16#9) -> {more, undefined, 16#c4}; +dec_huffman_lookup(16#b4, 16#a) -> {more, undefined, 16#c8}; +dec_huffman_lookup(16#b4, 16#b) -> {more, undefined, 16#cb}; +dec_huffman_lookup(16#b4, 16#c) -> {more, undefined, 16#d1}; +dec_huffman_lookup(16#b4, 16#d) -> {more, undefined, 16#d8}; +dec_huffman_lookup(16#b4, 16#e) -> {more, undefined, 16#e0}; +dec_huffman_lookup(16#b4, 16#f) -> {ok, undefined, 16#ee}; +dec_huffman_lookup(16#b5, 16#0) -> {more, 16#ab, 16#01}; +dec_huffman_lookup(16#b5, 16#1) -> {ok, 16#ab, 16#16}; +dec_huffman_lookup(16#b5, 16#2) -> {more, 16#ce, 16#01}; +dec_huffman_lookup(16#b5, 16#3) -> {ok, 16#ce, 16#16}; +dec_huffman_lookup(16#b5, 16#4) -> {more, 16#d7, 16#01}; +dec_huffman_lookup(16#b5, 16#5) -> {ok, 16#d7, 16#16}; +dec_huffman_lookup(16#b5, 16#6) -> {more, 16#e1, 16#01}; +dec_huffman_lookup(16#b5, 16#7) -> {ok, 16#e1, 16#16}; +dec_huffman_lookup(16#b5, 16#8) -> {more, 16#ec, 16#01}; +dec_huffman_lookup(16#b5, 16#9) -> {ok, 16#ec, 16#16}; +dec_huffman_lookup(16#b5, 16#a) -> {more, 16#ed, 16#01}; +dec_huffman_lookup(16#b5, 16#b) -> {ok, 16#ed, 16#16}; +dec_huffman_lookup(16#b5, 16#c) -> {ok, 16#c7, 16#00}; +dec_huffman_lookup(16#b5, 16#d) -> {ok, 16#cf, 16#00}; +dec_huffman_lookup(16#b5, 16#e) -> {ok, 16#ea, 16#00}; +dec_huffman_lookup(16#b5, 16#f) -> {ok, 16#eb, 16#00}; +dec_huffman_lookup(16#b6, 16#0) -> {more, 16#ab, 16#02}; +dec_huffman_lookup(16#b6, 16#1) -> {more, 16#ab, 16#09}; +dec_huffman_lookup(16#b6, 16#2) -> {more, 16#ab, 16#17}; +dec_huffman_lookup(16#b6, 16#3) -> {ok, 16#ab, 16#28}; +dec_huffman_lookup(16#b6, 16#4) -> {more, 16#ce, 16#02}; +dec_huffman_lookup(16#b6, 16#5) -> {more, 16#ce, 16#09}; +dec_huffman_lookup(16#b6, 16#6) -> {more, 16#ce, 16#17}; +dec_huffman_lookup(16#b6, 16#7) -> {ok, 16#ce, 16#28}; +dec_huffman_lookup(16#b6, 16#8) -> {more, 16#d7, 16#02}; +dec_huffman_lookup(16#b6, 16#9) -> {more, 16#d7, 16#09}; +dec_huffman_lookup(16#b6, 16#a) -> {more, 16#d7, 16#17}; +dec_huffman_lookup(16#b6, 16#b) -> {ok, 16#d7, 16#28}; +dec_huffman_lookup(16#b6, 16#c) -> {more, 16#e1, 16#02}; +dec_huffman_lookup(16#b6, 16#d) -> {more, 16#e1, 16#09}; +dec_huffman_lookup(16#b6, 16#e) -> {more, 16#e1, 16#17}; +dec_huffman_lookup(16#b6, 16#f) -> {ok, 16#e1, 16#28}; +dec_huffman_lookup(16#b7, 16#0) -> {more, 16#ab, 16#03}; +dec_huffman_lookup(16#b7, 16#1) -> {more, 16#ab, 16#06}; +dec_huffman_lookup(16#b7, 16#2) -> {more, 16#ab, 16#0a}; +dec_huffman_lookup(16#b7, 16#3) -> {more, 16#ab, 16#0f}; +dec_huffman_lookup(16#b7, 16#4) -> {more, 16#ab, 16#18}; +dec_huffman_lookup(16#b7, 16#5) -> {more, 16#ab, 16#1f}; +dec_huffman_lookup(16#b7, 16#6) -> {more, 16#ab, 16#29}; +dec_huffman_lookup(16#b7, 16#7) -> {ok, 16#ab, 16#38}; +dec_huffman_lookup(16#b7, 16#8) -> {more, 16#ce, 16#03}; +dec_huffman_lookup(16#b7, 16#9) -> {more, 16#ce, 16#06}; +dec_huffman_lookup(16#b7, 16#a) -> {more, 16#ce, 16#0a}; +dec_huffman_lookup(16#b7, 16#b) -> {more, 16#ce, 16#0f}; +dec_huffman_lookup(16#b7, 16#c) -> {more, 16#ce, 16#18}; +dec_huffman_lookup(16#b7, 16#d) -> {more, 16#ce, 16#1f}; +dec_huffman_lookup(16#b7, 16#e) -> {more, 16#ce, 16#29}; +dec_huffman_lookup(16#b7, 16#f) -> {ok, 16#ce, 16#38}; +dec_huffman_lookup(16#b8, 16#0) -> {more, 16#d7, 16#03}; +dec_huffman_lookup(16#b8, 16#1) -> {more, 16#d7, 16#06}; +dec_huffman_lookup(16#b8, 16#2) -> {more, 16#d7, 16#0a}; +dec_huffman_lookup(16#b8, 16#3) -> {more, 16#d7, 16#0f}; +dec_huffman_lookup(16#b8, 16#4) -> {more, 16#d7, 16#18}; +dec_huffman_lookup(16#b8, 16#5) -> {more, 16#d7, 16#1f}; +dec_huffman_lookup(16#b8, 16#6) -> {more, 16#d7, 16#29}; +dec_huffman_lookup(16#b8, 16#7) -> {ok, 16#d7, 16#38}; +dec_huffman_lookup(16#b8, 16#8) -> {more, 16#e1, 16#03}; +dec_huffman_lookup(16#b8, 16#9) -> {more, 16#e1, 16#06}; +dec_huffman_lookup(16#b8, 16#a) -> {more, 16#e1, 16#0a}; +dec_huffman_lookup(16#b8, 16#b) -> {more, 16#e1, 16#0f}; +dec_huffman_lookup(16#b8, 16#c) -> {more, 16#e1, 16#18}; +dec_huffman_lookup(16#b8, 16#d) -> {more, 16#e1, 16#1f}; +dec_huffman_lookup(16#b8, 16#e) -> {more, 16#e1, 16#29}; +dec_huffman_lookup(16#b8, 16#f) -> {ok, 16#e1, 16#38}; +dec_huffman_lookup(16#b9, 16#0) -> {more, 16#ec, 16#02}; +dec_huffman_lookup(16#b9, 16#1) -> {more, 16#ec, 16#09}; +dec_huffman_lookup(16#b9, 16#2) -> {more, 16#ec, 16#17}; +dec_huffman_lookup(16#b9, 16#3) -> {ok, 16#ec, 16#28}; +dec_huffman_lookup(16#b9, 16#4) -> {more, 16#ed, 16#02}; +dec_huffman_lookup(16#b9, 16#5) -> {more, 16#ed, 16#09}; +dec_huffman_lookup(16#b9, 16#6) -> {more, 16#ed, 16#17}; +dec_huffman_lookup(16#b9, 16#7) -> {ok, 16#ed, 16#28}; +dec_huffman_lookup(16#b9, 16#8) -> {more, 16#c7, 16#01}; +dec_huffman_lookup(16#b9, 16#9) -> {ok, 16#c7, 16#16}; +dec_huffman_lookup(16#b9, 16#a) -> {more, 16#cf, 16#01}; +dec_huffman_lookup(16#b9, 16#b) -> {ok, 16#cf, 16#16}; +dec_huffman_lookup(16#b9, 16#c) -> {more, 16#ea, 16#01}; +dec_huffman_lookup(16#b9, 16#d) -> {ok, 16#ea, 16#16}; +dec_huffman_lookup(16#b9, 16#e) -> {more, 16#eb, 16#01}; +dec_huffman_lookup(16#b9, 16#f) -> {ok, 16#eb, 16#16}; +dec_huffman_lookup(16#ba, 16#0) -> {more, 16#ec, 16#03}; +dec_huffman_lookup(16#ba, 16#1) -> {more, 16#ec, 16#06}; +dec_huffman_lookup(16#ba, 16#2) -> {more, 16#ec, 16#0a}; +dec_huffman_lookup(16#ba, 16#3) -> {more, 16#ec, 16#0f}; +dec_huffman_lookup(16#ba, 16#4) -> {more, 16#ec, 16#18}; +dec_huffman_lookup(16#ba, 16#5) -> {more, 16#ec, 16#1f}; +dec_huffman_lookup(16#ba, 16#6) -> {more, 16#ec, 16#29}; +dec_huffman_lookup(16#ba, 16#7) -> {ok, 16#ec, 16#38}; +dec_huffman_lookup(16#ba, 16#8) -> {more, 16#ed, 16#03}; +dec_huffman_lookup(16#ba, 16#9) -> {more, 16#ed, 16#06}; +dec_huffman_lookup(16#ba, 16#a) -> {more, 16#ed, 16#0a}; +dec_huffman_lookup(16#ba, 16#b) -> {more, 16#ed, 16#0f}; +dec_huffman_lookup(16#ba, 16#c) -> {more, 16#ed, 16#18}; +dec_huffman_lookup(16#ba, 16#d) -> {more, 16#ed, 16#1f}; +dec_huffman_lookup(16#ba, 16#e) -> {more, 16#ed, 16#29}; +dec_huffman_lookup(16#ba, 16#f) -> {ok, 16#ed, 16#38}; +dec_huffman_lookup(16#bb, 16#0) -> {more, 16#c7, 16#02}; +dec_huffman_lookup(16#bb, 16#1) -> {more, 16#c7, 16#09}; +dec_huffman_lookup(16#bb, 16#2) -> {more, 16#c7, 16#17}; +dec_huffman_lookup(16#bb, 16#3) -> {ok, 16#c7, 16#28}; +dec_huffman_lookup(16#bb, 16#4) -> {more, 16#cf, 16#02}; +dec_huffman_lookup(16#bb, 16#5) -> {more, 16#cf, 16#09}; +dec_huffman_lookup(16#bb, 16#6) -> {more, 16#cf, 16#17}; +dec_huffman_lookup(16#bb, 16#7) -> {ok, 16#cf, 16#28}; +dec_huffman_lookup(16#bb, 16#8) -> {more, 16#ea, 16#02}; +dec_huffman_lookup(16#bb, 16#9) -> {more, 16#ea, 16#09}; +dec_huffman_lookup(16#bb, 16#a) -> {more, 16#ea, 16#17}; +dec_huffman_lookup(16#bb, 16#b) -> {ok, 16#ea, 16#28}; +dec_huffman_lookup(16#bb, 16#c) -> {more, 16#eb, 16#02}; +dec_huffman_lookup(16#bb, 16#d) -> {more, 16#eb, 16#09}; +dec_huffman_lookup(16#bb, 16#e) -> {more, 16#eb, 16#17}; +dec_huffman_lookup(16#bb, 16#f) -> {ok, 16#eb, 16#28}; +dec_huffman_lookup(16#bc, 16#0) -> {more, 16#c7, 16#03}; +dec_huffman_lookup(16#bc, 16#1) -> {more, 16#c7, 16#06}; +dec_huffman_lookup(16#bc, 16#2) -> {more, 16#c7, 16#0a}; +dec_huffman_lookup(16#bc, 16#3) -> {more, 16#c7, 16#0f}; +dec_huffman_lookup(16#bc, 16#4) -> {more, 16#c7, 16#18}; +dec_huffman_lookup(16#bc, 16#5) -> {more, 16#c7, 16#1f}; +dec_huffman_lookup(16#bc, 16#6) -> {more, 16#c7, 16#29}; +dec_huffman_lookup(16#bc, 16#7) -> {ok, 16#c7, 16#38}; +dec_huffman_lookup(16#bc, 16#8) -> {more, 16#cf, 16#03}; +dec_huffman_lookup(16#bc, 16#9) -> {more, 16#cf, 16#06}; +dec_huffman_lookup(16#bc, 16#a) -> {more, 16#cf, 16#0a}; +dec_huffman_lookup(16#bc, 16#b) -> {more, 16#cf, 16#0f}; +dec_huffman_lookup(16#bc, 16#c) -> {more, 16#cf, 16#18}; +dec_huffman_lookup(16#bc, 16#d) -> {more, 16#cf, 16#1f}; +dec_huffman_lookup(16#bc, 16#e) -> {more, 16#cf, 16#29}; +dec_huffman_lookup(16#bc, 16#f) -> {ok, 16#cf, 16#38}; +dec_huffman_lookup(16#bd, 16#0) -> {more, 16#ea, 16#03}; +dec_huffman_lookup(16#bd, 16#1) -> {more, 16#ea, 16#06}; +dec_huffman_lookup(16#bd, 16#2) -> {more, 16#ea, 16#0a}; +dec_huffman_lookup(16#bd, 16#3) -> {more, 16#ea, 16#0f}; +dec_huffman_lookup(16#bd, 16#4) -> {more, 16#ea, 16#18}; +dec_huffman_lookup(16#bd, 16#5) -> {more, 16#ea, 16#1f}; +dec_huffman_lookup(16#bd, 16#6) -> {more, 16#ea, 16#29}; +dec_huffman_lookup(16#bd, 16#7) -> {ok, 16#ea, 16#38}; +dec_huffman_lookup(16#bd, 16#8) -> {more, 16#eb, 16#03}; +dec_huffman_lookup(16#bd, 16#9) -> {more, 16#eb, 16#06}; +dec_huffman_lookup(16#bd, 16#a) -> {more, 16#eb, 16#0a}; +dec_huffman_lookup(16#bd, 16#b) -> {more, 16#eb, 16#0f}; +dec_huffman_lookup(16#bd, 16#c) -> {more, 16#eb, 16#18}; +dec_huffman_lookup(16#bd, 16#d) -> {more, 16#eb, 16#1f}; +dec_huffman_lookup(16#bd, 16#e) -> {more, 16#eb, 16#29}; +dec_huffman_lookup(16#bd, 16#f) -> {ok, 16#eb, 16#38}; +dec_huffman_lookup(16#be, 16#0) -> {more, undefined, 16#c2}; +dec_huffman_lookup(16#be, 16#1) -> {more, undefined, 16#c3}; +dec_huffman_lookup(16#be, 16#2) -> {more, undefined, 16#c5}; +dec_huffman_lookup(16#be, 16#3) -> {more, undefined, 16#c6}; +dec_huffman_lookup(16#be, 16#4) -> {more, undefined, 16#c9}; +dec_huffman_lookup(16#be, 16#5) -> {more, undefined, 16#ca}; +dec_huffman_lookup(16#be, 16#6) -> {more, undefined, 16#cc}; +dec_huffman_lookup(16#be, 16#7) -> {more, undefined, 16#cd}; +dec_huffman_lookup(16#be, 16#8) -> {more, undefined, 16#d2}; +dec_huffman_lookup(16#be, 16#9) -> {more, undefined, 16#d5}; +dec_huffman_lookup(16#be, 16#a) -> {more, undefined, 16#d9}; +dec_huffman_lookup(16#be, 16#b) -> {more, undefined, 16#dc}; +dec_huffman_lookup(16#be, 16#c) -> {more, undefined, 16#e1}; +dec_huffman_lookup(16#be, 16#d) -> {more, undefined, 16#e7}; +dec_huffman_lookup(16#be, 16#e) -> {more, undefined, 16#ef}; +dec_huffman_lookup(16#be, 16#f) -> {ok, undefined, 16#f6}; +dec_huffman_lookup(16#bf, 16#0) -> {ok, 16#c0, 16#00}; +dec_huffman_lookup(16#bf, 16#1) -> {ok, 16#c1, 16#00}; +dec_huffman_lookup(16#bf, 16#2) -> {ok, 16#c8, 16#00}; +dec_huffman_lookup(16#bf, 16#3) -> {ok, 16#c9, 16#00}; +dec_huffman_lookup(16#bf, 16#4) -> {ok, 16#ca, 16#00}; +dec_huffman_lookup(16#bf, 16#5) -> {ok, 16#cd, 16#00}; +dec_huffman_lookup(16#bf, 16#6) -> {ok, 16#d2, 16#00}; +dec_huffman_lookup(16#bf, 16#7) -> {ok, 16#d5, 16#00}; +dec_huffman_lookup(16#bf, 16#8) -> {ok, 16#da, 16#00}; +dec_huffman_lookup(16#bf, 16#9) -> {ok, 16#db, 16#00}; +dec_huffman_lookup(16#bf, 16#a) -> {ok, 16#ee, 16#00}; +dec_huffman_lookup(16#bf, 16#b) -> {ok, 16#f0, 16#00}; +dec_huffman_lookup(16#bf, 16#c) -> {ok, 16#f2, 16#00}; +dec_huffman_lookup(16#bf, 16#d) -> {ok, 16#f3, 16#00}; +dec_huffman_lookup(16#bf, 16#e) -> {ok, 16#ff, 16#00}; +dec_huffman_lookup(16#bf, 16#f) -> {more, undefined, 16#ce}; +dec_huffman_lookup(16#c0, 16#0) -> {more, 16#c0, 16#01}; +dec_huffman_lookup(16#c0, 16#1) -> {ok, 16#c0, 16#16}; +dec_huffman_lookup(16#c0, 16#2) -> {more, 16#c1, 16#01}; +dec_huffman_lookup(16#c0, 16#3) -> {ok, 16#c1, 16#16}; +dec_huffman_lookup(16#c0, 16#4) -> {more, 16#c8, 16#01}; +dec_huffman_lookup(16#c0, 16#5) -> {ok, 16#c8, 16#16}; +dec_huffman_lookup(16#c0, 16#6) -> {more, 16#c9, 16#01}; +dec_huffman_lookup(16#c0, 16#7) -> {ok, 16#c9, 16#16}; +dec_huffman_lookup(16#c0, 16#8) -> {more, 16#ca, 16#01}; +dec_huffman_lookup(16#c0, 16#9) -> {ok, 16#ca, 16#16}; +dec_huffman_lookup(16#c0, 16#a) -> {more, 16#cd, 16#01}; +dec_huffman_lookup(16#c0, 16#b) -> {ok, 16#cd, 16#16}; +dec_huffman_lookup(16#c0, 16#c) -> {more, 16#d2, 16#01}; +dec_huffman_lookup(16#c0, 16#d) -> {ok, 16#d2, 16#16}; +dec_huffman_lookup(16#c0, 16#e) -> {more, 16#d5, 16#01}; +dec_huffman_lookup(16#c0, 16#f) -> {ok, 16#d5, 16#16}; +dec_huffman_lookup(16#c1, 16#0) -> {more, 16#c0, 16#02}; +dec_huffman_lookup(16#c1, 16#1) -> {more, 16#c0, 16#09}; +dec_huffman_lookup(16#c1, 16#2) -> {more, 16#c0, 16#17}; +dec_huffman_lookup(16#c1, 16#3) -> {ok, 16#c0, 16#28}; +dec_huffman_lookup(16#c1, 16#4) -> {more, 16#c1, 16#02}; +dec_huffman_lookup(16#c1, 16#5) -> {more, 16#c1, 16#09}; +dec_huffman_lookup(16#c1, 16#6) -> {more, 16#c1, 16#17}; +dec_huffman_lookup(16#c1, 16#7) -> {ok, 16#c1, 16#28}; +dec_huffman_lookup(16#c1, 16#8) -> {more, 16#c8, 16#02}; +dec_huffman_lookup(16#c1, 16#9) -> {more, 16#c8, 16#09}; +dec_huffman_lookup(16#c1, 16#a) -> {more, 16#c8, 16#17}; +dec_huffman_lookup(16#c1, 16#b) -> {ok, 16#c8, 16#28}; +dec_huffman_lookup(16#c1, 16#c) -> {more, 16#c9, 16#02}; +dec_huffman_lookup(16#c1, 16#d) -> {more, 16#c9, 16#09}; +dec_huffman_lookup(16#c1, 16#e) -> {more, 16#c9, 16#17}; +dec_huffman_lookup(16#c1, 16#f) -> {ok, 16#c9, 16#28}; +dec_huffman_lookup(16#c2, 16#0) -> {more, 16#c0, 16#03}; +dec_huffman_lookup(16#c2, 16#1) -> {more, 16#c0, 16#06}; +dec_huffman_lookup(16#c2, 16#2) -> {more, 16#c0, 16#0a}; +dec_huffman_lookup(16#c2, 16#3) -> {more, 16#c0, 16#0f}; +dec_huffman_lookup(16#c2, 16#4) -> {more, 16#c0, 16#18}; +dec_huffman_lookup(16#c2, 16#5) -> {more, 16#c0, 16#1f}; +dec_huffman_lookup(16#c2, 16#6) -> {more, 16#c0, 16#29}; +dec_huffman_lookup(16#c2, 16#7) -> {ok, 16#c0, 16#38}; +dec_huffman_lookup(16#c2, 16#8) -> {more, 16#c1, 16#03}; +dec_huffman_lookup(16#c2, 16#9) -> {more, 16#c1, 16#06}; +dec_huffman_lookup(16#c2, 16#a) -> {more, 16#c1, 16#0a}; +dec_huffman_lookup(16#c2, 16#b) -> {more, 16#c1, 16#0f}; +dec_huffman_lookup(16#c2, 16#c) -> {more, 16#c1, 16#18}; +dec_huffman_lookup(16#c2, 16#d) -> {more, 16#c1, 16#1f}; +dec_huffman_lookup(16#c2, 16#e) -> {more, 16#c1, 16#29}; +dec_huffman_lookup(16#c2, 16#f) -> {ok, 16#c1, 16#38}; +dec_huffman_lookup(16#c3, 16#0) -> {more, 16#c8, 16#03}; +dec_huffman_lookup(16#c3, 16#1) -> {more, 16#c8, 16#06}; +dec_huffman_lookup(16#c3, 16#2) -> {more, 16#c8, 16#0a}; +dec_huffman_lookup(16#c3, 16#3) -> {more, 16#c8, 16#0f}; +dec_huffman_lookup(16#c3, 16#4) -> {more, 16#c8, 16#18}; +dec_huffman_lookup(16#c3, 16#5) -> {more, 16#c8, 16#1f}; +dec_huffman_lookup(16#c3, 16#6) -> {more, 16#c8, 16#29}; +dec_huffman_lookup(16#c3, 16#7) -> {ok, 16#c8, 16#38}; +dec_huffman_lookup(16#c3, 16#8) -> {more, 16#c9, 16#03}; +dec_huffman_lookup(16#c3, 16#9) -> {more, 16#c9, 16#06}; +dec_huffman_lookup(16#c3, 16#a) -> {more, 16#c9, 16#0a}; +dec_huffman_lookup(16#c3, 16#b) -> {more, 16#c9, 16#0f}; +dec_huffman_lookup(16#c3, 16#c) -> {more, 16#c9, 16#18}; +dec_huffman_lookup(16#c3, 16#d) -> {more, 16#c9, 16#1f}; +dec_huffman_lookup(16#c3, 16#e) -> {more, 16#c9, 16#29}; +dec_huffman_lookup(16#c3, 16#f) -> {ok, 16#c9, 16#38}; +dec_huffman_lookup(16#c4, 16#0) -> {more, 16#ca, 16#02}; +dec_huffman_lookup(16#c4, 16#1) -> {more, 16#ca, 16#09}; +dec_huffman_lookup(16#c4, 16#2) -> {more, 16#ca, 16#17}; +dec_huffman_lookup(16#c4, 16#3) -> {ok, 16#ca, 16#28}; +dec_huffman_lookup(16#c4, 16#4) -> {more, 16#cd, 16#02}; +dec_huffman_lookup(16#c4, 16#5) -> {more, 16#cd, 16#09}; +dec_huffman_lookup(16#c4, 16#6) -> {more, 16#cd, 16#17}; +dec_huffman_lookup(16#c4, 16#7) -> {ok, 16#cd, 16#28}; +dec_huffman_lookup(16#c4, 16#8) -> {more, 16#d2, 16#02}; +dec_huffman_lookup(16#c4, 16#9) -> {more, 16#d2, 16#09}; +dec_huffman_lookup(16#c4, 16#a) -> {more, 16#d2, 16#17}; +dec_huffman_lookup(16#c4, 16#b) -> {ok, 16#d2, 16#28}; +dec_huffman_lookup(16#c4, 16#c) -> {more, 16#d5, 16#02}; +dec_huffman_lookup(16#c4, 16#d) -> {more, 16#d5, 16#09}; +dec_huffman_lookup(16#c4, 16#e) -> {more, 16#d5, 16#17}; +dec_huffman_lookup(16#c4, 16#f) -> {ok, 16#d5, 16#28}; +dec_huffman_lookup(16#c5, 16#0) -> {more, 16#ca, 16#03}; +dec_huffman_lookup(16#c5, 16#1) -> {more, 16#ca, 16#06}; +dec_huffman_lookup(16#c5, 16#2) -> {more, 16#ca, 16#0a}; +dec_huffman_lookup(16#c5, 16#3) -> {more, 16#ca, 16#0f}; +dec_huffman_lookup(16#c5, 16#4) -> {more, 16#ca, 16#18}; +dec_huffman_lookup(16#c5, 16#5) -> {more, 16#ca, 16#1f}; +dec_huffman_lookup(16#c5, 16#6) -> {more, 16#ca, 16#29}; +dec_huffman_lookup(16#c5, 16#7) -> {ok, 16#ca, 16#38}; +dec_huffman_lookup(16#c5, 16#8) -> {more, 16#cd, 16#03}; +dec_huffman_lookup(16#c5, 16#9) -> {more, 16#cd, 16#06}; +dec_huffman_lookup(16#c5, 16#a) -> {more, 16#cd, 16#0a}; +dec_huffman_lookup(16#c5, 16#b) -> {more, 16#cd, 16#0f}; +dec_huffman_lookup(16#c5, 16#c) -> {more, 16#cd, 16#18}; +dec_huffman_lookup(16#c5, 16#d) -> {more, 16#cd, 16#1f}; +dec_huffman_lookup(16#c5, 16#e) -> {more, 16#cd, 16#29}; +dec_huffman_lookup(16#c5, 16#f) -> {ok, 16#cd, 16#38}; +dec_huffman_lookup(16#c6, 16#0) -> {more, 16#d2, 16#03}; +dec_huffman_lookup(16#c6, 16#1) -> {more, 16#d2, 16#06}; +dec_huffman_lookup(16#c6, 16#2) -> {more, 16#d2, 16#0a}; +dec_huffman_lookup(16#c6, 16#3) -> {more, 16#d2, 16#0f}; +dec_huffman_lookup(16#c6, 16#4) -> {more, 16#d2, 16#18}; +dec_huffman_lookup(16#c6, 16#5) -> {more, 16#d2, 16#1f}; +dec_huffman_lookup(16#c6, 16#6) -> {more, 16#d2, 16#29}; +dec_huffman_lookup(16#c6, 16#7) -> {ok, 16#d2, 16#38}; +dec_huffman_lookup(16#c6, 16#8) -> {more, 16#d5, 16#03}; +dec_huffman_lookup(16#c6, 16#9) -> {more, 16#d5, 16#06}; +dec_huffman_lookup(16#c6, 16#a) -> {more, 16#d5, 16#0a}; +dec_huffman_lookup(16#c6, 16#b) -> {more, 16#d5, 16#0f}; +dec_huffman_lookup(16#c6, 16#c) -> {more, 16#d5, 16#18}; +dec_huffman_lookup(16#c6, 16#d) -> {more, 16#d5, 16#1f}; +dec_huffman_lookup(16#c6, 16#e) -> {more, 16#d5, 16#29}; +dec_huffman_lookup(16#c6, 16#f) -> {ok, 16#d5, 16#38}; +dec_huffman_lookup(16#c7, 16#0) -> {more, 16#da, 16#01}; +dec_huffman_lookup(16#c7, 16#1) -> {ok, 16#da, 16#16}; +dec_huffman_lookup(16#c7, 16#2) -> {more, 16#db, 16#01}; +dec_huffman_lookup(16#c7, 16#3) -> {ok, 16#db, 16#16}; +dec_huffman_lookup(16#c7, 16#4) -> {more, 16#ee, 16#01}; +dec_huffman_lookup(16#c7, 16#5) -> {ok, 16#ee, 16#16}; +dec_huffman_lookup(16#c7, 16#6) -> {more, 16#f0, 16#01}; +dec_huffman_lookup(16#c7, 16#7) -> {ok, 16#f0, 16#16}; +dec_huffman_lookup(16#c7, 16#8) -> {more, 16#f2, 16#01}; +dec_huffman_lookup(16#c7, 16#9) -> {ok, 16#f2, 16#16}; +dec_huffman_lookup(16#c7, 16#a) -> {more, 16#f3, 16#01}; +dec_huffman_lookup(16#c7, 16#b) -> {ok, 16#f3, 16#16}; +dec_huffman_lookup(16#c7, 16#c) -> {more, 16#ff, 16#01}; +dec_huffman_lookup(16#c7, 16#d) -> {ok, 16#ff, 16#16}; +dec_huffman_lookup(16#c7, 16#e) -> {ok, 16#cb, 16#00}; +dec_huffman_lookup(16#c7, 16#f) -> {ok, 16#cc, 16#00}; +dec_huffman_lookup(16#c8, 16#0) -> {more, 16#da, 16#02}; +dec_huffman_lookup(16#c8, 16#1) -> {more, 16#da, 16#09}; +dec_huffman_lookup(16#c8, 16#2) -> {more, 16#da, 16#17}; +dec_huffman_lookup(16#c8, 16#3) -> {ok, 16#da, 16#28}; +dec_huffman_lookup(16#c8, 16#4) -> {more, 16#db, 16#02}; +dec_huffman_lookup(16#c8, 16#5) -> {more, 16#db, 16#09}; +dec_huffman_lookup(16#c8, 16#6) -> {more, 16#db, 16#17}; +dec_huffman_lookup(16#c8, 16#7) -> {ok, 16#db, 16#28}; +dec_huffman_lookup(16#c8, 16#8) -> {more, 16#ee, 16#02}; +dec_huffman_lookup(16#c8, 16#9) -> {more, 16#ee, 16#09}; +dec_huffman_lookup(16#c8, 16#a) -> {more, 16#ee, 16#17}; +dec_huffman_lookup(16#c8, 16#b) -> {ok, 16#ee, 16#28}; +dec_huffman_lookup(16#c8, 16#c) -> {more, 16#f0, 16#02}; +dec_huffman_lookup(16#c8, 16#d) -> {more, 16#f0, 16#09}; +dec_huffman_lookup(16#c8, 16#e) -> {more, 16#f0, 16#17}; +dec_huffman_lookup(16#c8, 16#f) -> {ok, 16#f0, 16#28}; +dec_huffman_lookup(16#c9, 16#0) -> {more, 16#da, 16#03}; +dec_huffman_lookup(16#c9, 16#1) -> {more, 16#da, 16#06}; +dec_huffman_lookup(16#c9, 16#2) -> {more, 16#da, 16#0a}; +dec_huffman_lookup(16#c9, 16#3) -> {more, 16#da, 16#0f}; +dec_huffman_lookup(16#c9, 16#4) -> {more, 16#da, 16#18}; +dec_huffman_lookup(16#c9, 16#5) -> {more, 16#da, 16#1f}; +dec_huffman_lookup(16#c9, 16#6) -> {more, 16#da, 16#29}; +dec_huffman_lookup(16#c9, 16#7) -> {ok, 16#da, 16#38}; +dec_huffman_lookup(16#c9, 16#8) -> {more, 16#db, 16#03}; +dec_huffman_lookup(16#c9, 16#9) -> {more, 16#db, 16#06}; +dec_huffman_lookup(16#c9, 16#a) -> {more, 16#db, 16#0a}; +dec_huffman_lookup(16#c9, 16#b) -> {more, 16#db, 16#0f}; +dec_huffman_lookup(16#c9, 16#c) -> {more, 16#db, 16#18}; +dec_huffman_lookup(16#c9, 16#d) -> {more, 16#db, 16#1f}; +dec_huffman_lookup(16#c9, 16#e) -> {more, 16#db, 16#29}; +dec_huffman_lookup(16#c9, 16#f) -> {ok, 16#db, 16#38}; +dec_huffman_lookup(16#ca, 16#0) -> {more, 16#ee, 16#03}; +dec_huffman_lookup(16#ca, 16#1) -> {more, 16#ee, 16#06}; +dec_huffman_lookup(16#ca, 16#2) -> {more, 16#ee, 16#0a}; +dec_huffman_lookup(16#ca, 16#3) -> {more, 16#ee, 16#0f}; +dec_huffman_lookup(16#ca, 16#4) -> {more, 16#ee, 16#18}; +dec_huffman_lookup(16#ca, 16#5) -> {more, 16#ee, 16#1f}; +dec_huffman_lookup(16#ca, 16#6) -> {more, 16#ee, 16#29}; +dec_huffman_lookup(16#ca, 16#7) -> {ok, 16#ee, 16#38}; +dec_huffman_lookup(16#ca, 16#8) -> {more, 16#f0, 16#03}; +dec_huffman_lookup(16#ca, 16#9) -> {more, 16#f0, 16#06}; +dec_huffman_lookup(16#ca, 16#a) -> {more, 16#f0, 16#0a}; +dec_huffman_lookup(16#ca, 16#b) -> {more, 16#f0, 16#0f}; +dec_huffman_lookup(16#ca, 16#c) -> {more, 16#f0, 16#18}; +dec_huffman_lookup(16#ca, 16#d) -> {more, 16#f0, 16#1f}; +dec_huffman_lookup(16#ca, 16#e) -> {more, 16#f0, 16#29}; +dec_huffman_lookup(16#ca, 16#f) -> {ok, 16#f0, 16#38}; +dec_huffman_lookup(16#cb, 16#0) -> {more, 16#f2, 16#02}; +dec_huffman_lookup(16#cb, 16#1) -> {more, 16#f2, 16#09}; +dec_huffman_lookup(16#cb, 16#2) -> {more, 16#f2, 16#17}; +dec_huffman_lookup(16#cb, 16#3) -> {ok, 16#f2, 16#28}; +dec_huffman_lookup(16#cb, 16#4) -> {more, 16#f3, 16#02}; +dec_huffman_lookup(16#cb, 16#5) -> {more, 16#f3, 16#09}; +dec_huffman_lookup(16#cb, 16#6) -> {more, 16#f3, 16#17}; +dec_huffman_lookup(16#cb, 16#7) -> {ok, 16#f3, 16#28}; +dec_huffman_lookup(16#cb, 16#8) -> {more, 16#ff, 16#02}; +dec_huffman_lookup(16#cb, 16#9) -> {more, 16#ff, 16#09}; +dec_huffman_lookup(16#cb, 16#a) -> {more, 16#ff, 16#17}; +dec_huffman_lookup(16#cb, 16#b) -> {ok, 16#ff, 16#28}; +dec_huffman_lookup(16#cb, 16#c) -> {more, 16#cb, 16#01}; +dec_huffman_lookup(16#cb, 16#d) -> {ok, 16#cb, 16#16}; +dec_huffman_lookup(16#cb, 16#e) -> {more, 16#cc, 16#01}; +dec_huffman_lookup(16#cb, 16#f) -> {ok, 16#cc, 16#16}; +dec_huffman_lookup(16#cc, 16#0) -> {more, 16#f2, 16#03}; +dec_huffman_lookup(16#cc, 16#1) -> {more, 16#f2, 16#06}; +dec_huffman_lookup(16#cc, 16#2) -> {more, 16#f2, 16#0a}; +dec_huffman_lookup(16#cc, 16#3) -> {more, 16#f2, 16#0f}; +dec_huffman_lookup(16#cc, 16#4) -> {more, 16#f2, 16#18}; +dec_huffman_lookup(16#cc, 16#5) -> {more, 16#f2, 16#1f}; +dec_huffman_lookup(16#cc, 16#6) -> {more, 16#f2, 16#29}; +dec_huffman_lookup(16#cc, 16#7) -> {ok, 16#f2, 16#38}; +dec_huffman_lookup(16#cc, 16#8) -> {more, 16#f3, 16#03}; +dec_huffman_lookup(16#cc, 16#9) -> {more, 16#f3, 16#06}; +dec_huffman_lookup(16#cc, 16#a) -> {more, 16#f3, 16#0a}; +dec_huffman_lookup(16#cc, 16#b) -> {more, 16#f3, 16#0f}; +dec_huffman_lookup(16#cc, 16#c) -> {more, 16#f3, 16#18}; +dec_huffman_lookup(16#cc, 16#d) -> {more, 16#f3, 16#1f}; +dec_huffman_lookup(16#cc, 16#e) -> {more, 16#f3, 16#29}; +dec_huffman_lookup(16#cc, 16#f) -> {ok, 16#f3, 16#38}; +dec_huffman_lookup(16#cd, 16#0) -> {more, 16#ff, 16#03}; +dec_huffman_lookup(16#cd, 16#1) -> {more, 16#ff, 16#06}; +dec_huffman_lookup(16#cd, 16#2) -> {more, 16#ff, 16#0a}; +dec_huffman_lookup(16#cd, 16#3) -> {more, 16#ff, 16#0f}; +dec_huffman_lookup(16#cd, 16#4) -> {more, 16#ff, 16#18}; +dec_huffman_lookup(16#cd, 16#5) -> {more, 16#ff, 16#1f}; +dec_huffman_lookup(16#cd, 16#6) -> {more, 16#ff, 16#29}; +dec_huffman_lookup(16#cd, 16#7) -> {ok, 16#ff, 16#38}; +dec_huffman_lookup(16#cd, 16#8) -> {more, 16#cb, 16#02}; +dec_huffman_lookup(16#cd, 16#9) -> {more, 16#cb, 16#09}; +dec_huffman_lookup(16#cd, 16#a) -> {more, 16#cb, 16#17}; +dec_huffman_lookup(16#cd, 16#b) -> {ok, 16#cb, 16#28}; +dec_huffman_lookup(16#cd, 16#c) -> {more, 16#cc, 16#02}; +dec_huffman_lookup(16#cd, 16#d) -> {more, 16#cc, 16#09}; +dec_huffman_lookup(16#cd, 16#e) -> {more, 16#cc, 16#17}; +dec_huffman_lookup(16#cd, 16#f) -> {ok, 16#cc, 16#28}; +dec_huffman_lookup(16#ce, 16#0) -> {more, 16#cb, 16#03}; +dec_huffman_lookup(16#ce, 16#1) -> {more, 16#cb, 16#06}; +dec_huffman_lookup(16#ce, 16#2) -> {more, 16#cb, 16#0a}; +dec_huffman_lookup(16#ce, 16#3) -> {more, 16#cb, 16#0f}; +dec_huffman_lookup(16#ce, 16#4) -> {more, 16#cb, 16#18}; +dec_huffman_lookup(16#ce, 16#5) -> {more, 16#cb, 16#1f}; +dec_huffman_lookup(16#ce, 16#6) -> {more, 16#cb, 16#29}; +dec_huffman_lookup(16#ce, 16#7) -> {ok, 16#cb, 16#38}; +dec_huffman_lookup(16#ce, 16#8) -> {more, 16#cc, 16#03}; +dec_huffman_lookup(16#ce, 16#9) -> {more, 16#cc, 16#06}; +dec_huffman_lookup(16#ce, 16#a) -> {more, 16#cc, 16#0a}; +dec_huffman_lookup(16#ce, 16#b) -> {more, 16#cc, 16#0f}; +dec_huffman_lookup(16#ce, 16#c) -> {more, 16#cc, 16#18}; +dec_huffman_lookup(16#ce, 16#d) -> {more, 16#cc, 16#1f}; +dec_huffman_lookup(16#ce, 16#e) -> {more, 16#cc, 16#29}; +dec_huffman_lookup(16#ce, 16#f) -> {ok, 16#cc, 16#38}; +dec_huffman_lookup(16#cf, 16#0) -> {more, undefined, 16#d3}; +dec_huffman_lookup(16#cf, 16#1) -> {more, undefined, 16#d4}; +dec_huffman_lookup(16#cf, 16#2) -> {more, undefined, 16#d6}; +dec_huffman_lookup(16#cf, 16#3) -> {more, undefined, 16#d7}; +dec_huffman_lookup(16#cf, 16#4) -> {more, undefined, 16#da}; +dec_huffman_lookup(16#cf, 16#5) -> {more, undefined, 16#db}; +dec_huffman_lookup(16#cf, 16#6) -> {more, undefined, 16#dd}; +dec_huffman_lookup(16#cf, 16#7) -> {more, undefined, 16#de}; +dec_huffman_lookup(16#cf, 16#8) -> {more, undefined, 16#e2}; +dec_huffman_lookup(16#cf, 16#9) -> {more, undefined, 16#e4}; +dec_huffman_lookup(16#cf, 16#a) -> {more, undefined, 16#e8}; +dec_huffman_lookup(16#cf, 16#b) -> {more, undefined, 16#eb}; +dec_huffman_lookup(16#cf, 16#c) -> {more, undefined, 16#f0}; +dec_huffman_lookup(16#cf, 16#d) -> {more, undefined, 16#f3}; +dec_huffman_lookup(16#cf, 16#e) -> {more, undefined, 16#f7}; +dec_huffman_lookup(16#cf, 16#f) -> {ok, undefined, 16#fa}; +dec_huffman_lookup(16#d0, 16#0) -> {ok, 16#d3, 16#00}; +dec_huffman_lookup(16#d0, 16#1) -> {ok, 16#d4, 16#00}; +dec_huffman_lookup(16#d0, 16#2) -> {ok, 16#d6, 16#00}; +dec_huffman_lookup(16#d0, 16#3) -> {ok, 16#dd, 16#00}; +dec_huffman_lookup(16#d0, 16#4) -> {ok, 16#de, 16#00}; +dec_huffman_lookup(16#d0, 16#5) -> {ok, 16#df, 16#00}; +dec_huffman_lookup(16#d0, 16#6) -> {ok, 16#f1, 16#00}; +dec_huffman_lookup(16#d0, 16#7) -> {ok, 16#f4, 16#00}; +dec_huffman_lookup(16#d0, 16#8) -> {ok, 16#f5, 16#00}; +dec_huffman_lookup(16#d0, 16#9) -> {ok, 16#f6, 16#00}; +dec_huffman_lookup(16#d0, 16#a) -> {ok, 16#f7, 16#00}; +dec_huffman_lookup(16#d0, 16#b) -> {ok, 16#f8, 16#00}; +dec_huffman_lookup(16#d0, 16#c) -> {ok, 16#fa, 16#00}; +dec_huffman_lookup(16#d0, 16#d) -> {ok, 16#fb, 16#00}; +dec_huffman_lookup(16#d0, 16#e) -> {ok, 16#fc, 16#00}; +dec_huffman_lookup(16#d0, 16#f) -> {ok, 16#fd, 16#00}; +dec_huffman_lookup(16#d1, 16#0) -> {more, 16#d3, 16#01}; +dec_huffman_lookup(16#d1, 16#1) -> {ok, 16#d3, 16#16}; +dec_huffman_lookup(16#d1, 16#2) -> {more, 16#d4, 16#01}; +dec_huffman_lookup(16#d1, 16#3) -> {ok, 16#d4, 16#16}; +dec_huffman_lookup(16#d1, 16#4) -> {more, 16#d6, 16#01}; +dec_huffman_lookup(16#d1, 16#5) -> {ok, 16#d6, 16#16}; +dec_huffman_lookup(16#d1, 16#6) -> {more, 16#dd, 16#01}; +dec_huffman_lookup(16#d1, 16#7) -> {ok, 16#dd, 16#16}; +dec_huffman_lookup(16#d1, 16#8) -> {more, 16#de, 16#01}; +dec_huffman_lookup(16#d1, 16#9) -> {ok, 16#de, 16#16}; +dec_huffman_lookup(16#d1, 16#a) -> {more, 16#df, 16#01}; +dec_huffman_lookup(16#d1, 16#b) -> {ok, 16#df, 16#16}; +dec_huffman_lookup(16#d1, 16#c) -> {more, 16#f1, 16#01}; +dec_huffman_lookup(16#d1, 16#d) -> {ok, 16#f1, 16#16}; +dec_huffman_lookup(16#d1, 16#e) -> {more, 16#f4, 16#01}; +dec_huffman_lookup(16#d1, 16#f) -> {ok, 16#f4, 16#16}; +dec_huffman_lookup(16#d2, 16#0) -> {more, 16#d3, 16#02}; +dec_huffman_lookup(16#d2, 16#1) -> {more, 16#d3, 16#09}; +dec_huffman_lookup(16#d2, 16#2) -> {more, 16#d3, 16#17}; +dec_huffman_lookup(16#d2, 16#3) -> {ok, 16#d3, 16#28}; +dec_huffman_lookup(16#d2, 16#4) -> {more, 16#d4, 16#02}; +dec_huffman_lookup(16#d2, 16#5) -> {more, 16#d4, 16#09}; +dec_huffman_lookup(16#d2, 16#6) -> {more, 16#d4, 16#17}; +dec_huffman_lookup(16#d2, 16#7) -> {ok, 16#d4, 16#28}; +dec_huffman_lookup(16#d2, 16#8) -> {more, 16#d6, 16#02}; +dec_huffman_lookup(16#d2, 16#9) -> {more, 16#d6, 16#09}; +dec_huffman_lookup(16#d2, 16#a) -> {more, 16#d6, 16#17}; +dec_huffman_lookup(16#d2, 16#b) -> {ok, 16#d6, 16#28}; +dec_huffman_lookup(16#d2, 16#c) -> {more, 16#dd, 16#02}; +dec_huffman_lookup(16#d2, 16#d) -> {more, 16#dd, 16#09}; +dec_huffman_lookup(16#d2, 16#e) -> {more, 16#dd, 16#17}; +dec_huffman_lookup(16#d2, 16#f) -> {ok, 16#dd, 16#28}; +dec_huffman_lookup(16#d3, 16#0) -> {more, 16#d3, 16#03}; +dec_huffman_lookup(16#d3, 16#1) -> {more, 16#d3, 16#06}; +dec_huffman_lookup(16#d3, 16#2) -> {more, 16#d3, 16#0a}; +dec_huffman_lookup(16#d3, 16#3) -> {more, 16#d3, 16#0f}; +dec_huffman_lookup(16#d3, 16#4) -> {more, 16#d3, 16#18}; +dec_huffman_lookup(16#d3, 16#5) -> {more, 16#d3, 16#1f}; +dec_huffman_lookup(16#d3, 16#6) -> {more, 16#d3, 16#29}; +dec_huffman_lookup(16#d3, 16#7) -> {ok, 16#d3, 16#38}; +dec_huffman_lookup(16#d3, 16#8) -> {more, 16#d4, 16#03}; +dec_huffman_lookup(16#d3, 16#9) -> {more, 16#d4, 16#06}; +dec_huffman_lookup(16#d3, 16#a) -> {more, 16#d4, 16#0a}; +dec_huffman_lookup(16#d3, 16#b) -> {more, 16#d4, 16#0f}; +dec_huffman_lookup(16#d3, 16#c) -> {more, 16#d4, 16#18}; +dec_huffman_lookup(16#d3, 16#d) -> {more, 16#d4, 16#1f}; +dec_huffman_lookup(16#d3, 16#e) -> {more, 16#d4, 16#29}; +dec_huffman_lookup(16#d3, 16#f) -> {ok, 16#d4, 16#38}; +dec_huffman_lookup(16#d4, 16#0) -> {more, 16#d6, 16#03}; +dec_huffman_lookup(16#d4, 16#1) -> {more, 16#d6, 16#06}; +dec_huffman_lookup(16#d4, 16#2) -> {more, 16#d6, 16#0a}; +dec_huffman_lookup(16#d4, 16#3) -> {more, 16#d6, 16#0f}; +dec_huffman_lookup(16#d4, 16#4) -> {more, 16#d6, 16#18}; +dec_huffman_lookup(16#d4, 16#5) -> {more, 16#d6, 16#1f}; +dec_huffman_lookup(16#d4, 16#6) -> {more, 16#d6, 16#29}; +dec_huffman_lookup(16#d4, 16#7) -> {ok, 16#d6, 16#38}; +dec_huffman_lookup(16#d4, 16#8) -> {more, 16#dd, 16#03}; +dec_huffman_lookup(16#d4, 16#9) -> {more, 16#dd, 16#06}; +dec_huffman_lookup(16#d4, 16#a) -> {more, 16#dd, 16#0a}; +dec_huffman_lookup(16#d4, 16#b) -> {more, 16#dd, 16#0f}; +dec_huffman_lookup(16#d4, 16#c) -> {more, 16#dd, 16#18}; +dec_huffman_lookup(16#d4, 16#d) -> {more, 16#dd, 16#1f}; +dec_huffman_lookup(16#d4, 16#e) -> {more, 16#dd, 16#29}; +dec_huffman_lookup(16#d4, 16#f) -> {ok, 16#dd, 16#38}; +dec_huffman_lookup(16#d5, 16#0) -> {more, 16#de, 16#02}; +dec_huffman_lookup(16#d5, 16#1) -> {more, 16#de, 16#09}; +dec_huffman_lookup(16#d5, 16#2) -> {more, 16#de, 16#17}; +dec_huffman_lookup(16#d5, 16#3) -> {ok, 16#de, 16#28}; +dec_huffman_lookup(16#d5, 16#4) -> {more, 16#df, 16#02}; +dec_huffman_lookup(16#d5, 16#5) -> {more, 16#df, 16#09}; +dec_huffman_lookup(16#d5, 16#6) -> {more, 16#df, 16#17}; +dec_huffman_lookup(16#d5, 16#7) -> {ok, 16#df, 16#28}; +dec_huffman_lookup(16#d5, 16#8) -> {more, 16#f1, 16#02}; +dec_huffman_lookup(16#d5, 16#9) -> {more, 16#f1, 16#09}; +dec_huffman_lookup(16#d5, 16#a) -> {more, 16#f1, 16#17}; +dec_huffman_lookup(16#d5, 16#b) -> {ok, 16#f1, 16#28}; +dec_huffman_lookup(16#d5, 16#c) -> {more, 16#f4, 16#02}; +dec_huffman_lookup(16#d5, 16#d) -> {more, 16#f4, 16#09}; +dec_huffman_lookup(16#d5, 16#e) -> {more, 16#f4, 16#17}; +dec_huffman_lookup(16#d5, 16#f) -> {ok, 16#f4, 16#28}; +dec_huffman_lookup(16#d6, 16#0) -> {more, 16#de, 16#03}; +dec_huffman_lookup(16#d6, 16#1) -> {more, 16#de, 16#06}; +dec_huffman_lookup(16#d6, 16#2) -> {more, 16#de, 16#0a}; +dec_huffman_lookup(16#d6, 16#3) -> {more, 16#de, 16#0f}; +dec_huffman_lookup(16#d6, 16#4) -> {more, 16#de, 16#18}; +dec_huffman_lookup(16#d6, 16#5) -> {more, 16#de, 16#1f}; +dec_huffman_lookup(16#d6, 16#6) -> {more, 16#de, 16#29}; +dec_huffman_lookup(16#d6, 16#7) -> {ok, 16#de, 16#38}; +dec_huffman_lookup(16#d6, 16#8) -> {more, 16#df, 16#03}; +dec_huffman_lookup(16#d6, 16#9) -> {more, 16#df, 16#06}; +dec_huffman_lookup(16#d6, 16#a) -> {more, 16#df, 16#0a}; +dec_huffman_lookup(16#d6, 16#b) -> {more, 16#df, 16#0f}; +dec_huffman_lookup(16#d6, 16#c) -> {more, 16#df, 16#18}; +dec_huffman_lookup(16#d6, 16#d) -> {more, 16#df, 16#1f}; +dec_huffman_lookup(16#d6, 16#e) -> {more, 16#df, 16#29}; +dec_huffman_lookup(16#d6, 16#f) -> {ok, 16#df, 16#38}; +dec_huffman_lookup(16#d7, 16#0) -> {more, 16#f1, 16#03}; +dec_huffman_lookup(16#d7, 16#1) -> {more, 16#f1, 16#06}; +dec_huffman_lookup(16#d7, 16#2) -> {more, 16#f1, 16#0a}; +dec_huffman_lookup(16#d7, 16#3) -> {more, 16#f1, 16#0f}; +dec_huffman_lookup(16#d7, 16#4) -> {more, 16#f1, 16#18}; +dec_huffman_lookup(16#d7, 16#5) -> {more, 16#f1, 16#1f}; +dec_huffman_lookup(16#d7, 16#6) -> {more, 16#f1, 16#29}; +dec_huffman_lookup(16#d7, 16#7) -> {ok, 16#f1, 16#38}; +dec_huffman_lookup(16#d7, 16#8) -> {more, 16#f4, 16#03}; +dec_huffman_lookup(16#d7, 16#9) -> {more, 16#f4, 16#06}; +dec_huffman_lookup(16#d7, 16#a) -> {more, 16#f4, 16#0a}; +dec_huffman_lookup(16#d7, 16#b) -> {more, 16#f4, 16#0f}; +dec_huffman_lookup(16#d7, 16#c) -> {more, 16#f4, 16#18}; +dec_huffman_lookup(16#d7, 16#d) -> {more, 16#f4, 16#1f}; +dec_huffman_lookup(16#d7, 16#e) -> {more, 16#f4, 16#29}; +dec_huffman_lookup(16#d7, 16#f) -> {ok, 16#f4, 16#38}; +dec_huffman_lookup(16#d8, 16#0) -> {more, 16#f5, 16#01}; +dec_huffman_lookup(16#d8, 16#1) -> {ok, 16#f5, 16#16}; +dec_huffman_lookup(16#d8, 16#2) -> {more, 16#f6, 16#01}; +dec_huffman_lookup(16#d8, 16#3) -> {ok, 16#f6, 16#16}; +dec_huffman_lookup(16#d8, 16#4) -> {more, 16#f7, 16#01}; +dec_huffman_lookup(16#d8, 16#5) -> {ok, 16#f7, 16#16}; +dec_huffman_lookup(16#d8, 16#6) -> {more, 16#f8, 16#01}; +dec_huffman_lookup(16#d8, 16#7) -> {ok, 16#f8, 16#16}; +dec_huffman_lookup(16#d8, 16#8) -> {more, 16#fa, 16#01}; +dec_huffman_lookup(16#d8, 16#9) -> {ok, 16#fa, 16#16}; +dec_huffman_lookup(16#d8, 16#a) -> {more, 16#fb, 16#01}; +dec_huffman_lookup(16#d8, 16#b) -> {ok, 16#fb, 16#16}; +dec_huffman_lookup(16#d8, 16#c) -> {more, 16#fc, 16#01}; +dec_huffman_lookup(16#d8, 16#d) -> {ok, 16#fc, 16#16}; +dec_huffman_lookup(16#d8, 16#e) -> {more, 16#fd, 16#01}; +dec_huffman_lookup(16#d8, 16#f) -> {ok, 16#fd, 16#16}; +dec_huffman_lookup(16#d9, 16#0) -> {more, 16#f5, 16#02}; +dec_huffman_lookup(16#d9, 16#1) -> {more, 16#f5, 16#09}; +dec_huffman_lookup(16#d9, 16#2) -> {more, 16#f5, 16#17}; +dec_huffman_lookup(16#d9, 16#3) -> {ok, 16#f5, 16#28}; +dec_huffman_lookup(16#d9, 16#4) -> {more, 16#f6, 16#02}; +dec_huffman_lookup(16#d9, 16#5) -> {more, 16#f6, 16#09}; +dec_huffman_lookup(16#d9, 16#6) -> {more, 16#f6, 16#17}; +dec_huffman_lookup(16#d9, 16#7) -> {ok, 16#f6, 16#28}; +dec_huffman_lookup(16#d9, 16#8) -> {more, 16#f7, 16#02}; +dec_huffman_lookup(16#d9, 16#9) -> {more, 16#f7, 16#09}; +dec_huffman_lookup(16#d9, 16#a) -> {more, 16#f7, 16#17}; +dec_huffman_lookup(16#d9, 16#b) -> {ok, 16#f7, 16#28}; +dec_huffman_lookup(16#d9, 16#c) -> {more, 16#f8, 16#02}; +dec_huffman_lookup(16#d9, 16#d) -> {more, 16#f8, 16#09}; +dec_huffman_lookup(16#d9, 16#e) -> {more, 16#f8, 16#17}; +dec_huffman_lookup(16#d9, 16#f) -> {ok, 16#f8, 16#28}; +dec_huffman_lookup(16#da, 16#0) -> {more, 16#f5, 16#03}; +dec_huffman_lookup(16#da, 16#1) -> {more, 16#f5, 16#06}; +dec_huffman_lookup(16#da, 16#2) -> {more, 16#f5, 16#0a}; +dec_huffman_lookup(16#da, 16#3) -> {more, 16#f5, 16#0f}; +dec_huffman_lookup(16#da, 16#4) -> {more, 16#f5, 16#18}; +dec_huffman_lookup(16#da, 16#5) -> {more, 16#f5, 16#1f}; +dec_huffman_lookup(16#da, 16#6) -> {more, 16#f5, 16#29}; +dec_huffman_lookup(16#da, 16#7) -> {ok, 16#f5, 16#38}; +dec_huffman_lookup(16#da, 16#8) -> {more, 16#f6, 16#03}; +dec_huffman_lookup(16#da, 16#9) -> {more, 16#f6, 16#06}; +dec_huffman_lookup(16#da, 16#a) -> {more, 16#f6, 16#0a}; +dec_huffman_lookup(16#da, 16#b) -> {more, 16#f6, 16#0f}; +dec_huffman_lookup(16#da, 16#c) -> {more, 16#f6, 16#18}; +dec_huffman_lookup(16#da, 16#d) -> {more, 16#f6, 16#1f}; +dec_huffman_lookup(16#da, 16#e) -> {more, 16#f6, 16#29}; +dec_huffman_lookup(16#da, 16#f) -> {ok, 16#f6, 16#38}; +dec_huffman_lookup(16#db, 16#0) -> {more, 16#f7, 16#03}; +dec_huffman_lookup(16#db, 16#1) -> {more, 16#f7, 16#06}; +dec_huffman_lookup(16#db, 16#2) -> {more, 16#f7, 16#0a}; +dec_huffman_lookup(16#db, 16#3) -> {more, 16#f7, 16#0f}; +dec_huffman_lookup(16#db, 16#4) -> {more, 16#f7, 16#18}; +dec_huffman_lookup(16#db, 16#5) -> {more, 16#f7, 16#1f}; +dec_huffman_lookup(16#db, 16#6) -> {more, 16#f7, 16#29}; +dec_huffman_lookup(16#db, 16#7) -> {ok, 16#f7, 16#38}; +dec_huffman_lookup(16#db, 16#8) -> {more, 16#f8, 16#03}; +dec_huffman_lookup(16#db, 16#9) -> {more, 16#f8, 16#06}; +dec_huffman_lookup(16#db, 16#a) -> {more, 16#f8, 16#0a}; +dec_huffman_lookup(16#db, 16#b) -> {more, 16#f8, 16#0f}; +dec_huffman_lookup(16#db, 16#c) -> {more, 16#f8, 16#18}; +dec_huffman_lookup(16#db, 16#d) -> {more, 16#f8, 16#1f}; +dec_huffman_lookup(16#db, 16#e) -> {more, 16#f8, 16#29}; +dec_huffman_lookup(16#db, 16#f) -> {ok, 16#f8, 16#38}; +dec_huffman_lookup(16#dc, 16#0) -> {more, 16#fa, 16#02}; +dec_huffman_lookup(16#dc, 16#1) -> {more, 16#fa, 16#09}; +dec_huffman_lookup(16#dc, 16#2) -> {more, 16#fa, 16#17}; +dec_huffman_lookup(16#dc, 16#3) -> {ok, 16#fa, 16#28}; +dec_huffman_lookup(16#dc, 16#4) -> {more, 16#fb, 16#02}; +dec_huffman_lookup(16#dc, 16#5) -> {more, 16#fb, 16#09}; +dec_huffman_lookup(16#dc, 16#6) -> {more, 16#fb, 16#17}; +dec_huffman_lookup(16#dc, 16#7) -> {ok, 16#fb, 16#28}; +dec_huffman_lookup(16#dc, 16#8) -> {more, 16#fc, 16#02}; +dec_huffman_lookup(16#dc, 16#9) -> {more, 16#fc, 16#09}; +dec_huffman_lookup(16#dc, 16#a) -> {more, 16#fc, 16#17}; +dec_huffman_lookup(16#dc, 16#b) -> {ok, 16#fc, 16#28}; +dec_huffman_lookup(16#dc, 16#c) -> {more, 16#fd, 16#02}; +dec_huffman_lookup(16#dc, 16#d) -> {more, 16#fd, 16#09}; +dec_huffman_lookup(16#dc, 16#e) -> {more, 16#fd, 16#17}; +dec_huffman_lookup(16#dc, 16#f) -> {ok, 16#fd, 16#28}; +dec_huffman_lookup(16#dd, 16#0) -> {more, 16#fa, 16#03}; +dec_huffman_lookup(16#dd, 16#1) -> {more, 16#fa, 16#06}; +dec_huffman_lookup(16#dd, 16#2) -> {more, 16#fa, 16#0a}; +dec_huffman_lookup(16#dd, 16#3) -> {more, 16#fa, 16#0f}; +dec_huffman_lookup(16#dd, 16#4) -> {more, 16#fa, 16#18}; +dec_huffman_lookup(16#dd, 16#5) -> {more, 16#fa, 16#1f}; +dec_huffman_lookup(16#dd, 16#6) -> {more, 16#fa, 16#29}; +dec_huffman_lookup(16#dd, 16#7) -> {ok, 16#fa, 16#38}; +dec_huffman_lookup(16#dd, 16#8) -> {more, 16#fb, 16#03}; +dec_huffman_lookup(16#dd, 16#9) -> {more, 16#fb, 16#06}; +dec_huffman_lookup(16#dd, 16#a) -> {more, 16#fb, 16#0a}; +dec_huffman_lookup(16#dd, 16#b) -> {more, 16#fb, 16#0f}; +dec_huffman_lookup(16#dd, 16#c) -> {more, 16#fb, 16#18}; +dec_huffman_lookup(16#dd, 16#d) -> {more, 16#fb, 16#1f}; +dec_huffman_lookup(16#dd, 16#e) -> {more, 16#fb, 16#29}; +dec_huffman_lookup(16#dd, 16#f) -> {ok, 16#fb, 16#38}; +dec_huffman_lookup(16#de, 16#0) -> {more, 16#fc, 16#03}; +dec_huffman_lookup(16#de, 16#1) -> {more, 16#fc, 16#06}; +dec_huffman_lookup(16#de, 16#2) -> {more, 16#fc, 16#0a}; +dec_huffman_lookup(16#de, 16#3) -> {more, 16#fc, 16#0f}; +dec_huffman_lookup(16#de, 16#4) -> {more, 16#fc, 16#18}; +dec_huffman_lookup(16#de, 16#5) -> {more, 16#fc, 16#1f}; +dec_huffman_lookup(16#de, 16#6) -> {more, 16#fc, 16#29}; +dec_huffman_lookup(16#de, 16#7) -> {ok, 16#fc, 16#38}; +dec_huffman_lookup(16#de, 16#8) -> {more, 16#fd, 16#03}; +dec_huffman_lookup(16#de, 16#9) -> {more, 16#fd, 16#06}; +dec_huffman_lookup(16#de, 16#a) -> {more, 16#fd, 16#0a}; +dec_huffman_lookup(16#de, 16#b) -> {more, 16#fd, 16#0f}; +dec_huffman_lookup(16#de, 16#c) -> {more, 16#fd, 16#18}; +dec_huffman_lookup(16#de, 16#d) -> {more, 16#fd, 16#1f}; +dec_huffman_lookup(16#de, 16#e) -> {more, 16#fd, 16#29}; +dec_huffman_lookup(16#de, 16#f) -> {ok, 16#fd, 16#38}; +dec_huffman_lookup(16#df, 16#0) -> {ok, 16#fe, 16#00}; +dec_huffman_lookup(16#df, 16#1) -> {more, undefined, 16#e3}; +dec_huffman_lookup(16#df, 16#2) -> {more, undefined, 16#e5}; +dec_huffman_lookup(16#df, 16#3) -> {more, undefined, 16#e6}; +dec_huffman_lookup(16#df, 16#4) -> {more, undefined, 16#e9}; +dec_huffman_lookup(16#df, 16#5) -> {more, undefined, 16#ea}; +dec_huffman_lookup(16#df, 16#6) -> {more, undefined, 16#ec}; +dec_huffman_lookup(16#df, 16#7) -> {more, undefined, 16#ed}; +dec_huffman_lookup(16#df, 16#8) -> {more, undefined, 16#f1}; +dec_huffman_lookup(16#df, 16#9) -> {more, undefined, 16#f2}; +dec_huffman_lookup(16#df, 16#a) -> {more, undefined, 16#f4}; +dec_huffman_lookup(16#df, 16#b) -> {more, undefined, 16#f5}; +dec_huffman_lookup(16#df, 16#c) -> {more, undefined, 16#f8}; +dec_huffman_lookup(16#df, 16#d) -> {more, undefined, 16#f9}; +dec_huffman_lookup(16#df, 16#e) -> {more, undefined, 16#fb}; +dec_huffman_lookup(16#df, 16#f) -> {ok, undefined, 16#fc}; +dec_huffman_lookup(16#e0, 16#0) -> {more, 16#fe, 16#01}; +dec_huffman_lookup(16#e0, 16#1) -> {ok, 16#fe, 16#16}; +dec_huffman_lookup(16#e0, 16#2) -> {ok, 16#02, 16#00}; +dec_huffman_lookup(16#e0, 16#3) -> {ok, 16#03, 16#00}; +dec_huffman_lookup(16#e0, 16#4) -> {ok, 16#04, 16#00}; +dec_huffman_lookup(16#e0, 16#5) -> {ok, 16#05, 16#00}; +dec_huffman_lookup(16#e0, 16#6) -> {ok, 16#06, 16#00}; +dec_huffman_lookup(16#e0, 16#7) -> {ok, 16#07, 16#00}; +dec_huffman_lookup(16#e0, 16#8) -> {ok, 16#08, 16#00}; +dec_huffman_lookup(16#e0, 16#9) -> {ok, 16#0b, 16#00}; +dec_huffman_lookup(16#e0, 16#a) -> {ok, 16#0c, 16#00}; +dec_huffman_lookup(16#e0, 16#b) -> {ok, 16#0e, 16#00}; +dec_huffman_lookup(16#e0, 16#c) -> {ok, 16#0f, 16#00}; +dec_huffman_lookup(16#e0, 16#d) -> {ok, 16#10, 16#00}; +dec_huffman_lookup(16#e0, 16#e) -> {ok, 16#11, 16#00}; +dec_huffman_lookup(16#e0, 16#f) -> {ok, 16#12, 16#00}; +dec_huffman_lookup(16#e1, 16#0) -> {more, 16#fe, 16#02}; +dec_huffman_lookup(16#e1, 16#1) -> {more, 16#fe, 16#09}; +dec_huffman_lookup(16#e1, 16#2) -> {more, 16#fe, 16#17}; +dec_huffman_lookup(16#e1, 16#3) -> {ok, 16#fe, 16#28}; +dec_huffman_lookup(16#e1, 16#4) -> {more, 16#02, 16#01}; +dec_huffman_lookup(16#e1, 16#5) -> {ok, 16#02, 16#16}; +dec_huffman_lookup(16#e1, 16#6) -> {more, 16#03, 16#01}; +dec_huffman_lookup(16#e1, 16#7) -> {ok, 16#03, 16#16}; +dec_huffman_lookup(16#e1, 16#8) -> {more, 16#04, 16#01}; +dec_huffman_lookup(16#e1, 16#9) -> {ok, 16#04, 16#16}; +dec_huffman_lookup(16#e1, 16#a) -> {more, 16#05, 16#01}; +dec_huffman_lookup(16#e1, 16#b) -> {ok, 16#05, 16#16}; +dec_huffman_lookup(16#e1, 16#c) -> {more, 16#06, 16#01}; +dec_huffman_lookup(16#e1, 16#d) -> {ok, 16#06, 16#16}; +dec_huffman_lookup(16#e1, 16#e) -> {more, 16#07, 16#01}; +dec_huffman_lookup(16#e1, 16#f) -> {ok, 16#07, 16#16}; +dec_huffman_lookup(16#e2, 16#0) -> {more, 16#fe, 16#03}; +dec_huffman_lookup(16#e2, 16#1) -> {more, 16#fe, 16#06}; +dec_huffman_lookup(16#e2, 16#2) -> {more, 16#fe, 16#0a}; +dec_huffman_lookup(16#e2, 16#3) -> {more, 16#fe, 16#0f}; +dec_huffman_lookup(16#e2, 16#4) -> {more, 16#fe, 16#18}; +dec_huffman_lookup(16#e2, 16#5) -> {more, 16#fe, 16#1f}; +dec_huffman_lookup(16#e2, 16#6) -> {more, 16#fe, 16#29}; +dec_huffman_lookup(16#e2, 16#7) -> {ok, 16#fe, 16#38}; +dec_huffman_lookup(16#e2, 16#8) -> {more, 16#02, 16#02}; +dec_huffman_lookup(16#e2, 16#9) -> {more, 16#02, 16#09}; +dec_huffman_lookup(16#e2, 16#a) -> {more, 16#02, 16#17}; +dec_huffman_lookup(16#e2, 16#b) -> {ok, 16#02, 16#28}; +dec_huffman_lookup(16#e2, 16#c) -> {more, 16#03, 16#02}; +dec_huffman_lookup(16#e2, 16#d) -> {more, 16#03, 16#09}; +dec_huffman_lookup(16#e2, 16#e) -> {more, 16#03, 16#17}; +dec_huffman_lookup(16#e2, 16#f) -> {ok, 16#03, 16#28}; +dec_huffman_lookup(16#e3, 16#0) -> {more, 16#02, 16#03}; +dec_huffman_lookup(16#e3, 16#1) -> {more, 16#02, 16#06}; +dec_huffman_lookup(16#e3, 16#2) -> {more, 16#02, 16#0a}; +dec_huffman_lookup(16#e3, 16#3) -> {more, 16#02, 16#0f}; +dec_huffman_lookup(16#e3, 16#4) -> {more, 16#02, 16#18}; +dec_huffman_lookup(16#e3, 16#5) -> {more, 16#02, 16#1f}; +dec_huffman_lookup(16#e3, 16#6) -> {more, 16#02, 16#29}; +dec_huffman_lookup(16#e3, 16#7) -> {ok, 16#02, 16#38}; +dec_huffman_lookup(16#e3, 16#8) -> {more, 16#03, 16#03}; +dec_huffman_lookup(16#e3, 16#9) -> {more, 16#03, 16#06}; +dec_huffman_lookup(16#e3, 16#a) -> {more, 16#03, 16#0a}; +dec_huffman_lookup(16#e3, 16#b) -> {more, 16#03, 16#0f}; +dec_huffman_lookup(16#e3, 16#c) -> {more, 16#03, 16#18}; +dec_huffman_lookup(16#e3, 16#d) -> {more, 16#03, 16#1f}; +dec_huffman_lookup(16#e3, 16#e) -> {more, 16#03, 16#29}; +dec_huffman_lookup(16#e3, 16#f) -> {ok, 16#03, 16#38}; +dec_huffman_lookup(16#e4, 16#0) -> {more, 16#04, 16#02}; +dec_huffman_lookup(16#e4, 16#1) -> {more, 16#04, 16#09}; +dec_huffman_lookup(16#e4, 16#2) -> {more, 16#04, 16#17}; +dec_huffman_lookup(16#e4, 16#3) -> {ok, 16#04, 16#28}; +dec_huffman_lookup(16#e4, 16#4) -> {more, 16#05, 16#02}; +dec_huffman_lookup(16#e4, 16#5) -> {more, 16#05, 16#09}; +dec_huffman_lookup(16#e4, 16#6) -> {more, 16#05, 16#17}; +dec_huffman_lookup(16#e4, 16#7) -> {ok, 16#05, 16#28}; +dec_huffman_lookup(16#e4, 16#8) -> {more, 16#06, 16#02}; +dec_huffman_lookup(16#e4, 16#9) -> {more, 16#06, 16#09}; +dec_huffman_lookup(16#e4, 16#a) -> {more, 16#06, 16#17}; +dec_huffman_lookup(16#e4, 16#b) -> {ok, 16#06, 16#28}; +dec_huffman_lookup(16#e4, 16#c) -> {more, 16#07, 16#02}; +dec_huffman_lookup(16#e4, 16#d) -> {more, 16#07, 16#09}; +dec_huffman_lookup(16#e4, 16#e) -> {more, 16#07, 16#17}; +dec_huffman_lookup(16#e4, 16#f) -> {ok, 16#07, 16#28}; +dec_huffman_lookup(16#e5, 16#0) -> {more, 16#04, 16#03}; +dec_huffman_lookup(16#e5, 16#1) -> {more, 16#04, 16#06}; +dec_huffman_lookup(16#e5, 16#2) -> {more, 16#04, 16#0a}; +dec_huffman_lookup(16#e5, 16#3) -> {more, 16#04, 16#0f}; +dec_huffman_lookup(16#e5, 16#4) -> {more, 16#04, 16#18}; +dec_huffman_lookup(16#e5, 16#5) -> {more, 16#04, 16#1f}; +dec_huffman_lookup(16#e5, 16#6) -> {more, 16#04, 16#29}; +dec_huffman_lookup(16#e5, 16#7) -> {ok, 16#04, 16#38}; +dec_huffman_lookup(16#e5, 16#8) -> {more, 16#05, 16#03}; +dec_huffman_lookup(16#e5, 16#9) -> {more, 16#05, 16#06}; +dec_huffman_lookup(16#e5, 16#a) -> {more, 16#05, 16#0a}; +dec_huffman_lookup(16#e5, 16#b) -> {more, 16#05, 16#0f}; +dec_huffman_lookup(16#e5, 16#c) -> {more, 16#05, 16#18}; +dec_huffman_lookup(16#e5, 16#d) -> {more, 16#05, 16#1f}; +dec_huffman_lookup(16#e5, 16#e) -> {more, 16#05, 16#29}; +dec_huffman_lookup(16#e5, 16#f) -> {ok, 16#05, 16#38}; +dec_huffman_lookup(16#e6, 16#0) -> {more, 16#06, 16#03}; +dec_huffman_lookup(16#e6, 16#1) -> {more, 16#06, 16#06}; +dec_huffman_lookup(16#e6, 16#2) -> {more, 16#06, 16#0a}; +dec_huffman_lookup(16#e6, 16#3) -> {more, 16#06, 16#0f}; +dec_huffman_lookup(16#e6, 16#4) -> {more, 16#06, 16#18}; +dec_huffman_lookup(16#e6, 16#5) -> {more, 16#06, 16#1f}; +dec_huffman_lookup(16#e6, 16#6) -> {more, 16#06, 16#29}; +dec_huffman_lookup(16#e6, 16#7) -> {ok, 16#06, 16#38}; +dec_huffman_lookup(16#e6, 16#8) -> {more, 16#07, 16#03}; +dec_huffman_lookup(16#e6, 16#9) -> {more, 16#07, 16#06}; +dec_huffman_lookup(16#e6, 16#a) -> {more, 16#07, 16#0a}; +dec_huffman_lookup(16#e6, 16#b) -> {more, 16#07, 16#0f}; +dec_huffman_lookup(16#e6, 16#c) -> {more, 16#07, 16#18}; +dec_huffman_lookup(16#e6, 16#d) -> {more, 16#07, 16#1f}; +dec_huffman_lookup(16#e6, 16#e) -> {more, 16#07, 16#29}; +dec_huffman_lookup(16#e6, 16#f) -> {ok, 16#07, 16#38}; +dec_huffman_lookup(16#e7, 16#0) -> {more, 16#08, 16#01}; +dec_huffman_lookup(16#e7, 16#1) -> {ok, 16#08, 16#16}; +dec_huffman_lookup(16#e7, 16#2) -> {more, 16#0b, 16#01}; +dec_huffman_lookup(16#e7, 16#3) -> {ok, 16#0b, 16#16}; +dec_huffman_lookup(16#e7, 16#4) -> {more, 16#0c, 16#01}; +dec_huffman_lookup(16#e7, 16#5) -> {ok, 16#0c, 16#16}; +dec_huffman_lookup(16#e7, 16#6) -> {more, 16#0e, 16#01}; +dec_huffman_lookup(16#e7, 16#7) -> {ok, 16#0e, 16#16}; +dec_huffman_lookup(16#e7, 16#8) -> {more, 16#0f, 16#01}; +dec_huffman_lookup(16#e7, 16#9) -> {ok, 16#0f, 16#16}; +dec_huffman_lookup(16#e7, 16#a) -> {more, 16#10, 16#01}; +dec_huffman_lookup(16#e7, 16#b) -> {ok, 16#10, 16#16}; +dec_huffman_lookup(16#e7, 16#c) -> {more, 16#11, 16#01}; +dec_huffman_lookup(16#e7, 16#d) -> {ok, 16#11, 16#16}; +dec_huffman_lookup(16#e7, 16#e) -> {more, 16#12, 16#01}; +dec_huffman_lookup(16#e7, 16#f) -> {ok, 16#12, 16#16}; +dec_huffman_lookup(16#e8, 16#0) -> {more, 16#08, 16#02}; +dec_huffman_lookup(16#e8, 16#1) -> {more, 16#08, 16#09}; +dec_huffman_lookup(16#e8, 16#2) -> {more, 16#08, 16#17}; +dec_huffman_lookup(16#e8, 16#3) -> {ok, 16#08, 16#28}; +dec_huffman_lookup(16#e8, 16#4) -> {more, 16#0b, 16#02}; +dec_huffman_lookup(16#e8, 16#5) -> {more, 16#0b, 16#09}; +dec_huffman_lookup(16#e8, 16#6) -> {more, 16#0b, 16#17}; +dec_huffman_lookup(16#e8, 16#7) -> {ok, 16#0b, 16#28}; +dec_huffman_lookup(16#e8, 16#8) -> {more, 16#0c, 16#02}; +dec_huffman_lookup(16#e8, 16#9) -> {more, 16#0c, 16#09}; +dec_huffman_lookup(16#e8, 16#a) -> {more, 16#0c, 16#17}; +dec_huffman_lookup(16#e8, 16#b) -> {ok, 16#0c, 16#28}; +dec_huffman_lookup(16#e8, 16#c) -> {more, 16#0e, 16#02}; +dec_huffman_lookup(16#e8, 16#d) -> {more, 16#0e, 16#09}; +dec_huffman_lookup(16#e8, 16#e) -> {more, 16#0e, 16#17}; +dec_huffman_lookup(16#e8, 16#f) -> {ok, 16#0e, 16#28}; +dec_huffman_lookup(16#e9, 16#0) -> {more, 16#08, 16#03}; +dec_huffman_lookup(16#e9, 16#1) -> {more, 16#08, 16#06}; +dec_huffman_lookup(16#e9, 16#2) -> {more, 16#08, 16#0a}; +dec_huffman_lookup(16#e9, 16#3) -> {more, 16#08, 16#0f}; +dec_huffman_lookup(16#e9, 16#4) -> {more, 16#08, 16#18}; +dec_huffman_lookup(16#e9, 16#5) -> {more, 16#08, 16#1f}; +dec_huffman_lookup(16#e9, 16#6) -> {more, 16#08, 16#29}; +dec_huffman_lookup(16#e9, 16#7) -> {ok, 16#08, 16#38}; +dec_huffman_lookup(16#e9, 16#8) -> {more, 16#0b, 16#03}; +dec_huffman_lookup(16#e9, 16#9) -> {more, 16#0b, 16#06}; +dec_huffman_lookup(16#e9, 16#a) -> {more, 16#0b, 16#0a}; +dec_huffman_lookup(16#e9, 16#b) -> {more, 16#0b, 16#0f}; +dec_huffman_lookup(16#e9, 16#c) -> {more, 16#0b, 16#18}; +dec_huffman_lookup(16#e9, 16#d) -> {more, 16#0b, 16#1f}; +dec_huffman_lookup(16#e9, 16#e) -> {more, 16#0b, 16#29}; +dec_huffman_lookup(16#e9, 16#f) -> {ok, 16#0b, 16#38}; +dec_huffman_lookup(16#ea, 16#0) -> {more, 16#0c, 16#03}; +dec_huffman_lookup(16#ea, 16#1) -> {more, 16#0c, 16#06}; +dec_huffman_lookup(16#ea, 16#2) -> {more, 16#0c, 16#0a}; +dec_huffman_lookup(16#ea, 16#3) -> {more, 16#0c, 16#0f}; +dec_huffman_lookup(16#ea, 16#4) -> {more, 16#0c, 16#18}; +dec_huffman_lookup(16#ea, 16#5) -> {more, 16#0c, 16#1f}; +dec_huffman_lookup(16#ea, 16#6) -> {more, 16#0c, 16#29}; +dec_huffman_lookup(16#ea, 16#7) -> {ok, 16#0c, 16#38}; +dec_huffman_lookup(16#ea, 16#8) -> {more, 16#0e, 16#03}; +dec_huffman_lookup(16#ea, 16#9) -> {more, 16#0e, 16#06}; +dec_huffman_lookup(16#ea, 16#a) -> {more, 16#0e, 16#0a}; +dec_huffman_lookup(16#ea, 16#b) -> {more, 16#0e, 16#0f}; +dec_huffman_lookup(16#ea, 16#c) -> {more, 16#0e, 16#18}; +dec_huffman_lookup(16#ea, 16#d) -> {more, 16#0e, 16#1f}; +dec_huffman_lookup(16#ea, 16#e) -> {more, 16#0e, 16#29}; +dec_huffman_lookup(16#ea, 16#f) -> {ok, 16#0e, 16#38}; +dec_huffman_lookup(16#eb, 16#0) -> {more, 16#0f, 16#02}; +dec_huffman_lookup(16#eb, 16#1) -> {more, 16#0f, 16#09}; +dec_huffman_lookup(16#eb, 16#2) -> {more, 16#0f, 16#17}; +dec_huffman_lookup(16#eb, 16#3) -> {ok, 16#0f, 16#28}; +dec_huffman_lookup(16#eb, 16#4) -> {more, 16#10, 16#02}; +dec_huffman_lookup(16#eb, 16#5) -> {more, 16#10, 16#09}; +dec_huffman_lookup(16#eb, 16#6) -> {more, 16#10, 16#17}; +dec_huffman_lookup(16#eb, 16#7) -> {ok, 16#10, 16#28}; +dec_huffman_lookup(16#eb, 16#8) -> {more, 16#11, 16#02}; +dec_huffman_lookup(16#eb, 16#9) -> {more, 16#11, 16#09}; +dec_huffman_lookup(16#eb, 16#a) -> {more, 16#11, 16#17}; +dec_huffman_lookup(16#eb, 16#b) -> {ok, 16#11, 16#28}; +dec_huffman_lookup(16#eb, 16#c) -> {more, 16#12, 16#02}; +dec_huffman_lookup(16#eb, 16#d) -> {more, 16#12, 16#09}; +dec_huffman_lookup(16#eb, 16#e) -> {more, 16#12, 16#17}; +dec_huffman_lookup(16#eb, 16#f) -> {ok, 16#12, 16#28}; +dec_huffman_lookup(16#ec, 16#0) -> {more, 16#0f, 16#03}; +dec_huffman_lookup(16#ec, 16#1) -> {more, 16#0f, 16#06}; +dec_huffman_lookup(16#ec, 16#2) -> {more, 16#0f, 16#0a}; +dec_huffman_lookup(16#ec, 16#3) -> {more, 16#0f, 16#0f}; +dec_huffman_lookup(16#ec, 16#4) -> {more, 16#0f, 16#18}; +dec_huffman_lookup(16#ec, 16#5) -> {more, 16#0f, 16#1f}; +dec_huffman_lookup(16#ec, 16#6) -> {more, 16#0f, 16#29}; +dec_huffman_lookup(16#ec, 16#7) -> {ok, 16#0f, 16#38}; +dec_huffman_lookup(16#ec, 16#8) -> {more, 16#10, 16#03}; +dec_huffman_lookup(16#ec, 16#9) -> {more, 16#10, 16#06}; +dec_huffman_lookup(16#ec, 16#a) -> {more, 16#10, 16#0a}; +dec_huffman_lookup(16#ec, 16#b) -> {more, 16#10, 16#0f}; +dec_huffman_lookup(16#ec, 16#c) -> {more, 16#10, 16#18}; +dec_huffman_lookup(16#ec, 16#d) -> {more, 16#10, 16#1f}; +dec_huffman_lookup(16#ec, 16#e) -> {more, 16#10, 16#29}; +dec_huffman_lookup(16#ec, 16#f) -> {ok, 16#10, 16#38}; +dec_huffman_lookup(16#ed, 16#0) -> {more, 16#11, 16#03}; +dec_huffman_lookup(16#ed, 16#1) -> {more, 16#11, 16#06}; +dec_huffman_lookup(16#ed, 16#2) -> {more, 16#11, 16#0a}; +dec_huffman_lookup(16#ed, 16#3) -> {more, 16#11, 16#0f}; +dec_huffman_lookup(16#ed, 16#4) -> {more, 16#11, 16#18}; +dec_huffman_lookup(16#ed, 16#5) -> {more, 16#11, 16#1f}; +dec_huffman_lookup(16#ed, 16#6) -> {more, 16#11, 16#29}; +dec_huffman_lookup(16#ed, 16#7) -> {ok, 16#11, 16#38}; +dec_huffman_lookup(16#ed, 16#8) -> {more, 16#12, 16#03}; +dec_huffman_lookup(16#ed, 16#9) -> {more, 16#12, 16#06}; +dec_huffman_lookup(16#ed, 16#a) -> {more, 16#12, 16#0a}; +dec_huffman_lookup(16#ed, 16#b) -> {more, 16#12, 16#0f}; +dec_huffman_lookup(16#ed, 16#c) -> {more, 16#12, 16#18}; +dec_huffman_lookup(16#ed, 16#d) -> {more, 16#12, 16#1f}; +dec_huffman_lookup(16#ed, 16#e) -> {more, 16#12, 16#29}; +dec_huffman_lookup(16#ed, 16#f) -> {ok, 16#12, 16#38}; +dec_huffman_lookup(16#ee, 16#0) -> {ok, 16#13, 16#00}; +dec_huffman_lookup(16#ee, 16#1) -> {ok, 16#14, 16#00}; +dec_huffman_lookup(16#ee, 16#2) -> {ok, 16#15, 16#00}; +dec_huffman_lookup(16#ee, 16#3) -> {ok, 16#17, 16#00}; +dec_huffman_lookup(16#ee, 16#4) -> {ok, 16#18, 16#00}; +dec_huffman_lookup(16#ee, 16#5) -> {ok, 16#19, 16#00}; +dec_huffman_lookup(16#ee, 16#6) -> {ok, 16#1a, 16#00}; +dec_huffman_lookup(16#ee, 16#7) -> {ok, 16#1b, 16#00}; +dec_huffman_lookup(16#ee, 16#8) -> {ok, 16#1c, 16#00}; +dec_huffman_lookup(16#ee, 16#9) -> {ok, 16#1d, 16#00}; +dec_huffman_lookup(16#ee, 16#a) -> {ok, 16#1e, 16#00}; +dec_huffman_lookup(16#ee, 16#b) -> {ok, 16#1f, 16#00}; +dec_huffman_lookup(16#ee, 16#c) -> {ok, 16#7f, 16#00}; +dec_huffman_lookup(16#ee, 16#d) -> {ok, 16#dc, 16#00}; +dec_huffman_lookup(16#ee, 16#e) -> {ok, 16#f9, 16#00}; +dec_huffman_lookup(16#ee, 16#f) -> {ok, undefined, 16#fd}; +dec_huffman_lookup(16#ef, 16#0) -> {more, 16#13, 16#01}; +dec_huffman_lookup(16#ef, 16#1) -> {ok, 16#13, 16#16}; +dec_huffman_lookup(16#ef, 16#2) -> {more, 16#14, 16#01}; +dec_huffman_lookup(16#ef, 16#3) -> {ok, 16#14, 16#16}; +dec_huffman_lookup(16#ef, 16#4) -> {more, 16#15, 16#01}; +dec_huffman_lookup(16#ef, 16#5) -> {ok, 16#15, 16#16}; +dec_huffman_lookup(16#ef, 16#6) -> {more, 16#17, 16#01}; +dec_huffman_lookup(16#ef, 16#7) -> {ok, 16#17, 16#16}; +dec_huffman_lookup(16#ef, 16#8) -> {more, 16#18, 16#01}; +dec_huffman_lookup(16#ef, 16#9) -> {ok, 16#18, 16#16}; +dec_huffman_lookup(16#ef, 16#a) -> {more, 16#19, 16#01}; +dec_huffman_lookup(16#ef, 16#b) -> {ok, 16#19, 16#16}; +dec_huffman_lookup(16#ef, 16#c) -> {more, 16#1a, 16#01}; +dec_huffman_lookup(16#ef, 16#d) -> {ok, 16#1a, 16#16}; +dec_huffman_lookup(16#ef, 16#e) -> {more, 16#1b, 16#01}; +dec_huffman_lookup(16#ef, 16#f) -> {ok, 16#1b, 16#16}; +dec_huffman_lookup(16#f0, 16#0) -> {more, 16#13, 16#02}; +dec_huffman_lookup(16#f0, 16#1) -> {more, 16#13, 16#09}; +dec_huffman_lookup(16#f0, 16#2) -> {more, 16#13, 16#17}; +dec_huffman_lookup(16#f0, 16#3) -> {ok, 16#13, 16#28}; +dec_huffman_lookup(16#f0, 16#4) -> {more, 16#14, 16#02}; +dec_huffman_lookup(16#f0, 16#5) -> {more, 16#14, 16#09}; +dec_huffman_lookup(16#f0, 16#6) -> {more, 16#14, 16#17}; +dec_huffman_lookup(16#f0, 16#7) -> {ok, 16#14, 16#28}; +dec_huffman_lookup(16#f0, 16#8) -> {more, 16#15, 16#02}; +dec_huffman_lookup(16#f0, 16#9) -> {more, 16#15, 16#09}; +dec_huffman_lookup(16#f0, 16#a) -> {more, 16#15, 16#17}; +dec_huffman_lookup(16#f0, 16#b) -> {ok, 16#15, 16#28}; +dec_huffman_lookup(16#f0, 16#c) -> {more, 16#17, 16#02}; +dec_huffman_lookup(16#f0, 16#d) -> {more, 16#17, 16#09}; +dec_huffman_lookup(16#f0, 16#e) -> {more, 16#17, 16#17}; +dec_huffman_lookup(16#f0, 16#f) -> {ok, 16#17, 16#28}; +dec_huffman_lookup(16#f1, 16#0) -> {more, 16#13, 16#03}; +dec_huffman_lookup(16#f1, 16#1) -> {more, 16#13, 16#06}; +dec_huffman_lookup(16#f1, 16#2) -> {more, 16#13, 16#0a}; +dec_huffman_lookup(16#f1, 16#3) -> {more, 16#13, 16#0f}; +dec_huffman_lookup(16#f1, 16#4) -> {more, 16#13, 16#18}; +dec_huffman_lookup(16#f1, 16#5) -> {more, 16#13, 16#1f}; +dec_huffman_lookup(16#f1, 16#6) -> {more, 16#13, 16#29}; +dec_huffman_lookup(16#f1, 16#7) -> {ok, 16#13, 16#38}; +dec_huffman_lookup(16#f1, 16#8) -> {more, 16#14, 16#03}; +dec_huffman_lookup(16#f1, 16#9) -> {more, 16#14, 16#06}; +dec_huffman_lookup(16#f1, 16#a) -> {more, 16#14, 16#0a}; +dec_huffman_lookup(16#f1, 16#b) -> {more, 16#14, 16#0f}; +dec_huffman_lookup(16#f1, 16#c) -> {more, 16#14, 16#18}; +dec_huffman_lookup(16#f1, 16#d) -> {more, 16#14, 16#1f}; +dec_huffman_lookup(16#f1, 16#e) -> {more, 16#14, 16#29}; +dec_huffman_lookup(16#f1, 16#f) -> {ok, 16#14, 16#38}; +dec_huffman_lookup(16#f2, 16#0) -> {more, 16#15, 16#03}; +dec_huffman_lookup(16#f2, 16#1) -> {more, 16#15, 16#06}; +dec_huffman_lookup(16#f2, 16#2) -> {more, 16#15, 16#0a}; +dec_huffman_lookup(16#f2, 16#3) -> {more, 16#15, 16#0f}; +dec_huffman_lookup(16#f2, 16#4) -> {more, 16#15, 16#18}; +dec_huffman_lookup(16#f2, 16#5) -> {more, 16#15, 16#1f}; +dec_huffman_lookup(16#f2, 16#6) -> {more, 16#15, 16#29}; +dec_huffman_lookup(16#f2, 16#7) -> {ok, 16#15, 16#38}; +dec_huffman_lookup(16#f2, 16#8) -> {more, 16#17, 16#03}; +dec_huffman_lookup(16#f2, 16#9) -> {more, 16#17, 16#06}; +dec_huffman_lookup(16#f2, 16#a) -> {more, 16#17, 16#0a}; +dec_huffman_lookup(16#f2, 16#b) -> {more, 16#17, 16#0f}; +dec_huffman_lookup(16#f2, 16#c) -> {more, 16#17, 16#18}; +dec_huffman_lookup(16#f2, 16#d) -> {more, 16#17, 16#1f}; +dec_huffman_lookup(16#f2, 16#e) -> {more, 16#17, 16#29}; +dec_huffman_lookup(16#f2, 16#f) -> {ok, 16#17, 16#38}; +dec_huffman_lookup(16#f3, 16#0) -> {more, 16#18, 16#02}; +dec_huffman_lookup(16#f3, 16#1) -> {more, 16#18, 16#09}; +dec_huffman_lookup(16#f3, 16#2) -> {more, 16#18, 16#17}; +dec_huffman_lookup(16#f3, 16#3) -> {ok, 16#18, 16#28}; +dec_huffman_lookup(16#f3, 16#4) -> {more, 16#19, 16#02}; +dec_huffman_lookup(16#f3, 16#5) -> {more, 16#19, 16#09}; +dec_huffman_lookup(16#f3, 16#6) -> {more, 16#19, 16#17}; +dec_huffman_lookup(16#f3, 16#7) -> {ok, 16#19, 16#28}; +dec_huffman_lookup(16#f3, 16#8) -> {more, 16#1a, 16#02}; +dec_huffman_lookup(16#f3, 16#9) -> {more, 16#1a, 16#09}; +dec_huffman_lookup(16#f3, 16#a) -> {more, 16#1a, 16#17}; +dec_huffman_lookup(16#f3, 16#b) -> {ok, 16#1a, 16#28}; +dec_huffman_lookup(16#f3, 16#c) -> {more, 16#1b, 16#02}; +dec_huffman_lookup(16#f3, 16#d) -> {more, 16#1b, 16#09}; +dec_huffman_lookup(16#f3, 16#e) -> {more, 16#1b, 16#17}; +dec_huffman_lookup(16#f3, 16#f) -> {ok, 16#1b, 16#28}; +dec_huffman_lookup(16#f4, 16#0) -> {more, 16#18, 16#03}; +dec_huffman_lookup(16#f4, 16#1) -> {more, 16#18, 16#06}; +dec_huffman_lookup(16#f4, 16#2) -> {more, 16#18, 16#0a}; +dec_huffman_lookup(16#f4, 16#3) -> {more, 16#18, 16#0f}; +dec_huffman_lookup(16#f4, 16#4) -> {more, 16#18, 16#18}; +dec_huffman_lookup(16#f4, 16#5) -> {more, 16#18, 16#1f}; +dec_huffman_lookup(16#f4, 16#6) -> {more, 16#18, 16#29}; +dec_huffman_lookup(16#f4, 16#7) -> {ok, 16#18, 16#38}; +dec_huffman_lookup(16#f4, 16#8) -> {more, 16#19, 16#03}; +dec_huffman_lookup(16#f4, 16#9) -> {more, 16#19, 16#06}; +dec_huffman_lookup(16#f4, 16#a) -> {more, 16#19, 16#0a}; +dec_huffman_lookup(16#f4, 16#b) -> {more, 16#19, 16#0f}; +dec_huffman_lookup(16#f4, 16#c) -> {more, 16#19, 16#18}; +dec_huffman_lookup(16#f4, 16#d) -> {more, 16#19, 16#1f}; +dec_huffman_lookup(16#f4, 16#e) -> {more, 16#19, 16#29}; +dec_huffman_lookup(16#f4, 16#f) -> {ok, 16#19, 16#38}; +dec_huffman_lookup(16#f5, 16#0) -> {more, 16#1a, 16#03}; +dec_huffman_lookup(16#f5, 16#1) -> {more, 16#1a, 16#06}; +dec_huffman_lookup(16#f5, 16#2) -> {more, 16#1a, 16#0a}; +dec_huffman_lookup(16#f5, 16#3) -> {more, 16#1a, 16#0f}; +dec_huffman_lookup(16#f5, 16#4) -> {more, 16#1a, 16#18}; +dec_huffman_lookup(16#f5, 16#5) -> {more, 16#1a, 16#1f}; +dec_huffman_lookup(16#f5, 16#6) -> {more, 16#1a, 16#29}; +dec_huffman_lookup(16#f5, 16#7) -> {ok, 16#1a, 16#38}; +dec_huffman_lookup(16#f5, 16#8) -> {more, 16#1b, 16#03}; +dec_huffman_lookup(16#f5, 16#9) -> {more, 16#1b, 16#06}; +dec_huffman_lookup(16#f5, 16#a) -> {more, 16#1b, 16#0a}; +dec_huffman_lookup(16#f5, 16#b) -> {more, 16#1b, 16#0f}; +dec_huffman_lookup(16#f5, 16#c) -> {more, 16#1b, 16#18}; +dec_huffman_lookup(16#f5, 16#d) -> {more, 16#1b, 16#1f}; +dec_huffman_lookup(16#f5, 16#e) -> {more, 16#1b, 16#29}; +dec_huffman_lookup(16#f5, 16#f) -> {ok, 16#1b, 16#38}; +dec_huffman_lookup(16#f6, 16#0) -> {more, 16#1c, 16#01}; +dec_huffman_lookup(16#f6, 16#1) -> {ok, 16#1c, 16#16}; +dec_huffman_lookup(16#f6, 16#2) -> {more, 16#1d, 16#01}; +dec_huffman_lookup(16#f6, 16#3) -> {ok, 16#1d, 16#16}; +dec_huffman_lookup(16#f6, 16#4) -> {more, 16#1e, 16#01}; +dec_huffman_lookup(16#f6, 16#5) -> {ok, 16#1e, 16#16}; +dec_huffman_lookup(16#f6, 16#6) -> {more, 16#1f, 16#01}; +dec_huffman_lookup(16#f6, 16#7) -> {ok, 16#1f, 16#16}; +dec_huffman_lookup(16#f6, 16#8) -> {more, 16#7f, 16#01}; +dec_huffman_lookup(16#f6, 16#9) -> {ok, 16#7f, 16#16}; +dec_huffman_lookup(16#f6, 16#a) -> {more, 16#dc, 16#01}; +dec_huffman_lookup(16#f6, 16#b) -> {ok, 16#dc, 16#16}; +dec_huffman_lookup(16#f6, 16#c) -> {more, 16#f9, 16#01}; +dec_huffman_lookup(16#f6, 16#d) -> {ok, 16#f9, 16#16}; +dec_huffman_lookup(16#f6, 16#e) -> {more, undefined, 16#fe}; +dec_huffman_lookup(16#f6, 16#f) -> {ok, undefined, 16#ff}; +dec_huffman_lookup(16#f7, 16#0) -> {more, 16#1c, 16#02}; +dec_huffman_lookup(16#f7, 16#1) -> {more, 16#1c, 16#09}; +dec_huffman_lookup(16#f7, 16#2) -> {more, 16#1c, 16#17}; +dec_huffman_lookup(16#f7, 16#3) -> {ok, 16#1c, 16#28}; +dec_huffman_lookup(16#f7, 16#4) -> {more, 16#1d, 16#02}; +dec_huffman_lookup(16#f7, 16#5) -> {more, 16#1d, 16#09}; +dec_huffman_lookup(16#f7, 16#6) -> {more, 16#1d, 16#17}; +dec_huffman_lookup(16#f7, 16#7) -> {ok, 16#1d, 16#28}; +dec_huffman_lookup(16#f7, 16#8) -> {more, 16#1e, 16#02}; +dec_huffman_lookup(16#f7, 16#9) -> {more, 16#1e, 16#09}; +dec_huffman_lookup(16#f7, 16#a) -> {more, 16#1e, 16#17}; +dec_huffman_lookup(16#f7, 16#b) -> {ok, 16#1e, 16#28}; +dec_huffman_lookup(16#f7, 16#c) -> {more, 16#1f, 16#02}; +dec_huffman_lookup(16#f7, 16#d) -> {more, 16#1f, 16#09}; +dec_huffman_lookup(16#f7, 16#e) -> {more, 16#1f, 16#17}; +dec_huffman_lookup(16#f7, 16#f) -> {ok, 16#1f, 16#28}; +dec_huffman_lookup(16#f8, 16#0) -> {more, 16#1c, 16#03}; +dec_huffman_lookup(16#f8, 16#1) -> {more, 16#1c, 16#06}; +dec_huffman_lookup(16#f8, 16#2) -> {more, 16#1c, 16#0a}; +dec_huffman_lookup(16#f8, 16#3) -> {more, 16#1c, 16#0f}; +dec_huffman_lookup(16#f8, 16#4) -> {more, 16#1c, 16#18}; +dec_huffman_lookup(16#f8, 16#5) -> {more, 16#1c, 16#1f}; +dec_huffman_lookup(16#f8, 16#6) -> {more, 16#1c, 16#29}; +dec_huffman_lookup(16#f8, 16#7) -> {ok, 16#1c, 16#38}; +dec_huffman_lookup(16#f8, 16#8) -> {more, 16#1d, 16#03}; +dec_huffman_lookup(16#f8, 16#9) -> {more, 16#1d, 16#06}; +dec_huffman_lookup(16#f8, 16#a) -> {more, 16#1d, 16#0a}; +dec_huffman_lookup(16#f8, 16#b) -> {more, 16#1d, 16#0f}; +dec_huffman_lookup(16#f8, 16#c) -> {more, 16#1d, 16#18}; +dec_huffman_lookup(16#f8, 16#d) -> {more, 16#1d, 16#1f}; +dec_huffman_lookup(16#f8, 16#e) -> {more, 16#1d, 16#29}; +dec_huffman_lookup(16#f8, 16#f) -> {ok, 16#1d, 16#38}; +dec_huffman_lookup(16#f9, 16#0) -> {more, 16#1e, 16#03}; +dec_huffman_lookup(16#f9, 16#1) -> {more, 16#1e, 16#06}; +dec_huffman_lookup(16#f9, 16#2) -> {more, 16#1e, 16#0a}; +dec_huffman_lookup(16#f9, 16#3) -> {more, 16#1e, 16#0f}; +dec_huffman_lookup(16#f9, 16#4) -> {more, 16#1e, 16#18}; +dec_huffman_lookup(16#f9, 16#5) -> {more, 16#1e, 16#1f}; +dec_huffman_lookup(16#f9, 16#6) -> {more, 16#1e, 16#29}; +dec_huffman_lookup(16#f9, 16#7) -> {ok, 16#1e, 16#38}; +dec_huffman_lookup(16#f9, 16#8) -> {more, 16#1f, 16#03}; +dec_huffman_lookup(16#f9, 16#9) -> {more, 16#1f, 16#06}; +dec_huffman_lookup(16#f9, 16#a) -> {more, 16#1f, 16#0a}; +dec_huffman_lookup(16#f9, 16#b) -> {more, 16#1f, 16#0f}; +dec_huffman_lookup(16#f9, 16#c) -> {more, 16#1f, 16#18}; +dec_huffman_lookup(16#f9, 16#d) -> {more, 16#1f, 16#1f}; +dec_huffman_lookup(16#f9, 16#e) -> {more, 16#1f, 16#29}; +dec_huffman_lookup(16#f9, 16#f) -> {ok, 16#1f, 16#38}; +dec_huffman_lookup(16#fa, 16#0) -> {more, 16#7f, 16#02}; +dec_huffman_lookup(16#fa, 16#1) -> {more, 16#7f, 16#09}; +dec_huffman_lookup(16#fa, 16#2) -> {more, 16#7f, 16#17}; +dec_huffman_lookup(16#fa, 16#3) -> {ok, 16#7f, 16#28}; +dec_huffman_lookup(16#fa, 16#4) -> {more, 16#dc, 16#02}; +dec_huffman_lookup(16#fa, 16#5) -> {more, 16#dc, 16#09}; +dec_huffman_lookup(16#fa, 16#6) -> {more, 16#dc, 16#17}; +dec_huffman_lookup(16#fa, 16#7) -> {ok, 16#dc, 16#28}; +dec_huffman_lookup(16#fa, 16#8) -> {more, 16#f9, 16#02}; +dec_huffman_lookup(16#fa, 16#9) -> {more, 16#f9, 16#09}; +dec_huffman_lookup(16#fa, 16#a) -> {more, 16#f9, 16#17}; +dec_huffman_lookup(16#fa, 16#b) -> {ok, 16#f9, 16#28}; +dec_huffman_lookup(16#fa, 16#c) -> {ok, 16#0a, 16#00}; +dec_huffman_lookup(16#fa, 16#d) -> {ok, 16#0d, 16#00}; +dec_huffman_lookup(16#fa, 16#e) -> {ok, 16#16, 16#00}; +dec_huffman_lookup(16#fa, 16#f) -> error; +dec_huffman_lookup(16#fb, 16#0) -> {more, 16#7f, 16#03}; +dec_huffman_lookup(16#fb, 16#1) -> {more, 16#7f, 16#06}; +dec_huffman_lookup(16#fb, 16#2) -> {more, 16#7f, 16#0a}; +dec_huffman_lookup(16#fb, 16#3) -> {more, 16#7f, 16#0f}; +dec_huffman_lookup(16#fb, 16#4) -> {more, 16#7f, 16#18}; +dec_huffman_lookup(16#fb, 16#5) -> {more, 16#7f, 16#1f}; +dec_huffman_lookup(16#fb, 16#6) -> {more, 16#7f, 16#29}; +dec_huffman_lookup(16#fb, 16#7) -> {ok, 16#7f, 16#38}; +dec_huffman_lookup(16#fb, 16#8) -> {more, 16#dc, 16#03}; +dec_huffman_lookup(16#fb, 16#9) -> {more, 16#dc, 16#06}; +dec_huffman_lookup(16#fb, 16#a) -> {more, 16#dc, 16#0a}; +dec_huffman_lookup(16#fb, 16#b) -> {more, 16#dc, 16#0f}; +dec_huffman_lookup(16#fb, 16#c) -> {more, 16#dc, 16#18}; +dec_huffman_lookup(16#fb, 16#d) -> {more, 16#dc, 16#1f}; +dec_huffman_lookup(16#fb, 16#e) -> {more, 16#dc, 16#29}; +dec_huffman_lookup(16#fb, 16#f) -> {ok, 16#dc, 16#38}; +dec_huffman_lookup(16#fc, 16#0) -> {more, 16#f9, 16#03}; +dec_huffman_lookup(16#fc, 16#1) -> {more, 16#f9, 16#06}; +dec_huffman_lookup(16#fc, 16#2) -> {more, 16#f9, 16#0a}; +dec_huffman_lookup(16#fc, 16#3) -> {more, 16#f9, 16#0f}; +dec_huffman_lookup(16#fc, 16#4) -> {more, 16#f9, 16#18}; +dec_huffman_lookup(16#fc, 16#5) -> {more, 16#f9, 16#1f}; +dec_huffman_lookup(16#fc, 16#6) -> {more, 16#f9, 16#29}; +dec_huffman_lookup(16#fc, 16#7) -> {ok, 16#f9, 16#38}; +dec_huffman_lookup(16#fc, 16#8) -> {more, 16#0a, 16#01}; +dec_huffman_lookup(16#fc, 16#9) -> {ok, 16#0a, 16#16}; +dec_huffman_lookup(16#fc, 16#a) -> {more, 16#0d, 16#01}; +dec_huffman_lookup(16#fc, 16#b) -> {ok, 16#0d, 16#16}; +dec_huffman_lookup(16#fc, 16#c) -> {more, 16#16, 16#01}; +dec_huffman_lookup(16#fc, 16#d) -> {ok, 16#16, 16#16}; +dec_huffman_lookup(16#fc, 16#e) -> error; +dec_huffman_lookup(16#fc, 16#f) -> error; +dec_huffman_lookup(16#fd, 16#0) -> {more, 16#0a, 16#02}; +dec_huffman_lookup(16#fd, 16#1) -> {more, 16#0a, 16#09}; +dec_huffman_lookup(16#fd, 16#2) -> {more, 16#0a, 16#17}; +dec_huffman_lookup(16#fd, 16#3) -> {ok, 16#0a, 16#28}; +dec_huffman_lookup(16#fd, 16#4) -> {more, 16#0d, 16#02}; +dec_huffman_lookup(16#fd, 16#5) -> {more, 16#0d, 16#09}; +dec_huffman_lookup(16#fd, 16#6) -> {more, 16#0d, 16#17}; +dec_huffman_lookup(16#fd, 16#7) -> {ok, 16#0d, 16#28}; +dec_huffman_lookup(16#fd, 16#8) -> {more, 16#16, 16#02}; +dec_huffman_lookup(16#fd, 16#9) -> {more, 16#16, 16#09}; +dec_huffman_lookup(16#fd, 16#a) -> {more, 16#16, 16#17}; +dec_huffman_lookup(16#fd, 16#b) -> {ok, 16#16, 16#28}; +dec_huffman_lookup(16#fd, 16#c) -> error; +dec_huffman_lookup(16#fd, 16#d) -> error; +dec_huffman_lookup(16#fd, 16#e) -> error; +dec_huffman_lookup(16#fd, 16#f) -> error; +dec_huffman_lookup(16#fe, 16#0) -> {more, 16#0a, 16#03}; +dec_huffman_lookup(16#fe, 16#1) -> {more, 16#0a, 16#06}; +dec_huffman_lookup(16#fe, 16#2) -> {more, 16#0a, 16#0a}; +dec_huffman_lookup(16#fe, 16#3) -> {more, 16#0a, 16#0f}; +dec_huffman_lookup(16#fe, 16#4) -> {more, 16#0a, 16#18}; +dec_huffman_lookup(16#fe, 16#5) -> {more, 16#0a, 16#1f}; +dec_huffman_lookup(16#fe, 16#6) -> {more, 16#0a, 16#29}; +dec_huffman_lookup(16#fe, 16#7) -> {ok, 16#0a, 16#38}; +dec_huffman_lookup(16#fe, 16#8) -> {more, 16#0d, 16#03}; +dec_huffman_lookup(16#fe, 16#9) -> {more, 16#0d, 16#06}; +dec_huffman_lookup(16#fe, 16#a) -> {more, 16#0d, 16#0a}; +dec_huffman_lookup(16#fe, 16#b) -> {more, 16#0d, 16#0f}; +dec_huffman_lookup(16#fe, 16#c) -> {more, 16#0d, 16#18}; +dec_huffman_lookup(16#fe, 16#d) -> {more, 16#0d, 16#1f}; +dec_huffman_lookup(16#fe, 16#e) -> {more, 16#0d, 16#29}; +dec_huffman_lookup(16#fe, 16#f) -> {ok, 16#0d, 16#38}; +dec_huffman_lookup(16#ff, 16#0) -> {more, 16#16, 16#03}; +dec_huffman_lookup(16#ff, 16#1) -> {more, 16#16, 16#06}; +dec_huffman_lookup(16#ff, 16#2) -> {more, 16#16, 16#0a}; +dec_huffman_lookup(16#ff, 16#3) -> {more, 16#16, 16#0f}; +dec_huffman_lookup(16#ff, 16#4) -> {more, 16#16, 16#18}; +dec_huffman_lookup(16#ff, 16#5) -> {more, 16#16, 16#1f}; +dec_huffman_lookup(16#ff, 16#6) -> {more, 16#16, 16#29}; +dec_huffman_lookup(16#ff, 16#7) -> {ok, 16#16, 16#38}; +dec_huffman_lookup(16#ff, 16#8) -> error; +dec_huffman_lookup(16#ff, 16#9) -> error; +dec_huffman_lookup(16#ff, 16#a) -> error; +dec_huffman_lookup(16#ff, 16#b) -> error; +dec_huffman_lookup(16#ff, 16#c) -> error; +dec_huffman_lookup(16#ff, 16#d) -> error; +dec_huffman_lookup(16#ff, 16#e) -> error; +dec_huffman_lookup(16#ff, 16#f) -> error. diff --git a/deps/cowlib/src/cow_http.erl b/deps/cowlib/src/cow_http.erl new file mode 100644 index 0000000..bfaace3 --- /dev/null +++ b/deps/cowlib/src/cow_http.erl @@ -0,0 +1,426 @@ +%% Copyright (c) 2013-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_http). + +-export([parse_request_line/1]). +-export([parse_status_line/1]). +-export([status_to_integer/1]). +-export([parse_headers/1]). + +-export([parse_fullpath/1]). +-export([parse_version/1]). + +-export([request/4]). +-export([response/3]). +-export([headers/1]). +-export([version/1]). + +-type version() :: 'HTTP/1.0' | 'HTTP/1.1'. +-export_type([version/0]). + +-type status() :: 100..999. +-export_type([status/0]). + +-type headers() :: [{binary(), iodata()}]. +-export_type([headers/0]). + +-include("cow_inline.hrl"). + +%% @doc Parse the request line. + +-spec parse_request_line(binary()) -> {binary(), binary(), version(), binary()}. +parse_request_line(Data) -> + {Pos, _} = binary:match(Data, <<"\r">>), + <> = Data, + [Method, Target, Version0] = binary:split(RequestLine, <<$\s>>, [trim_all, global]), + Version = case Version0 of + <<"HTTP/1.1">> -> 'HTTP/1.1'; + <<"HTTP/1.0">> -> 'HTTP/1.0' + end, + {Method, Target, Version, Rest}. + +-ifdef(TEST). +parse_request_line_test_() -> + Tests = [ + {<<"GET /path HTTP/1.0\r\nRest">>, + {<<"GET">>, <<"/path">>, 'HTTP/1.0', <<"Rest">>}}, + {<<"GET /path HTTP/1.1\r\nRest">>, + {<<"GET">>, <<"/path">>, 'HTTP/1.1', <<"Rest">>}}, + {<<"CONNECT proxy.example.org:1080 HTTP/1.1\r\nRest">>, + {<<"CONNECT">>, <<"proxy.example.org:1080">>, 'HTTP/1.1', <<"Rest">>}} + ], + [{V, fun() -> R = parse_request_line(V) end} + || {V, R} <- Tests]. + +parse_request_line_error_test_() -> + Tests = [ + <<>>, + <<"GET">>, + <<"GET /path\r\n">>, + <<"GET /path HTTP/1.1">>, + <<"GET /path HTTP/1.1\r">>, + <<"GET /path HTTP/1.1\n">>, + <<"GET /path HTTP/0.9\r\n">>, + <<"content-type: text/plain\r\n">>, + <<0:80, "\r\n">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_request_line(V)) end} + || V <- Tests]. + +horse_parse_request_line_get_path() -> + horse:repeat(200000, + parse_request_line(<<"GET /path HTTP/1.1\r\n">>) + ). +-endif. + +%% @doc Parse the status line. + +-spec parse_status_line(binary()) -> {version(), status(), binary(), binary()}. +parse_status_line(<< "HTTP/1.1 200 OK\r\n", Rest/bits >>) -> + {'HTTP/1.1', 200, <<"OK">>, Rest}; +parse_status_line(<< "HTTP/1.1 404 Not Found\r\n", Rest/bits >>) -> + {'HTTP/1.1', 404, <<"Not Found">>, Rest}; +parse_status_line(<< "HTTP/1.1 500 Internal Server Error\r\n", Rest/bits >>) -> + {'HTTP/1.1', 500, <<"Internal Server Error">>, Rest}; +parse_status_line(<< "HTTP/1.1 ", Status/bits >>) -> + parse_status_line(Status, 'HTTP/1.1'); +parse_status_line(<< "HTTP/1.0 ", Status/bits >>) -> + parse_status_line(Status, 'HTTP/1.0'). + +parse_status_line(<>, Version) -> + Status = status_to_integer(H, T, U), + {Pos, _} = binary:match(Rest, <<"\r">>), + << StatusStr:Pos/binary, "\r\n", Rest2/bits >> = Rest, + {Version, Status, StatusStr, Rest2}. + +-spec status_to_integer(status() | binary()) -> status(). +status_to_integer(Status) when is_integer(Status) -> + Status; +status_to_integer(Status) -> + case Status of + <> -> + status_to_integer(H, T, U); + <> -> + status_to_integer(H, T, U) + end. + +status_to_integer(H, T, U) + when $0 =< H, H =< $9, $0 =< T, T =< $9, $0 =< U, U =< $9 -> + (H - $0) * 100 + (T - $0) * 10 + (U - $0). + +-ifdef(TEST). +parse_status_line_test_() -> + Tests = [ + {<<"HTTP/1.1 200 OK\r\nRest">>, + {'HTTP/1.1', 200, <<"OK">>, <<"Rest">>}}, + {<<"HTTP/1.0 404 Not Found\r\nRest">>, + {'HTTP/1.0', 404, <<"Not Found">>, <<"Rest">>}}, + {<<"HTTP/1.1 500 Something very funny here\r\nRest">>, + {'HTTP/1.1', 500, <<"Something very funny here">>, <<"Rest">>}}, + {<<"HTTP/1.1 200 \r\nRest">>, + {'HTTP/1.1', 200, <<>>, <<"Rest">>}} + ], + [{V, fun() -> R = parse_status_line(V) end} + || {V, R} <- Tests]. + +parse_status_line_error_test_() -> + Tests = [ + <<>>, + <<"HTTP/1.1">>, + <<"HTTP/1.1 200\r\n">>, + <<"HTTP/1.1 200 OK">>, + <<"HTTP/1.1 200 OK\r">>, + <<"HTTP/1.1 200 OK\n">>, + <<"HTTP/0.9 200 OK\r\n">>, + <<"HTTP/1.1 42 Answer\r\n">>, + <<"HTTP/1.1 999999999 More than OK\r\n">>, + <<"content-type: text/plain\r\n">>, + <<0:80, "\r\n">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_status_line(V)) end} + || V <- Tests]. + +horse_parse_status_line_200() -> + horse:repeat(200000, + parse_status_line(<<"HTTP/1.1 200 OK\r\n">>) + ). + +horse_parse_status_line_404() -> + horse:repeat(200000, + parse_status_line(<<"HTTP/1.1 404 Not Found\r\n">>) + ). + +horse_parse_status_line_500() -> + horse:repeat(200000, + parse_status_line(<<"HTTP/1.1 500 Internal Server Error\r\n">>) + ). + +horse_parse_status_line_other() -> + horse:repeat(200000, + parse_status_line(<<"HTTP/1.1 416 Requested range not satisfiable\r\n">>) + ). +-endif. + +%% @doc Parse the list of headers. + +-spec parse_headers(binary()) -> {[{binary(), binary()}], binary()}. +parse_headers(Data) -> + parse_header(Data, []). + +parse_header(<< $\r, $\n, Rest/bits >>, Acc) -> + {lists:reverse(Acc), Rest}; +parse_header(Data, Acc) -> + parse_hd_name(Data, Acc, <<>>). + +parse_hd_name(<< C, Rest/bits >>, Acc, SoFar) -> + case C of + $: -> parse_hd_before_value(Rest, Acc, SoFar); + $\s -> parse_hd_name_ws(Rest, Acc, SoFar); + $\t -> parse_hd_name_ws(Rest, Acc, SoFar); + _ -> ?LOWER(parse_hd_name, Rest, Acc, SoFar) + end. + +parse_hd_name_ws(<< C, Rest/bits >>, Acc, Name) -> + case C of + $: -> parse_hd_before_value(Rest, Acc, Name); + $\s -> parse_hd_name_ws(Rest, Acc, Name); + $\t -> parse_hd_name_ws(Rest, Acc, Name) + end. + +parse_hd_before_value(<< $\s, Rest/bits >>, Acc, Name) -> + parse_hd_before_value(Rest, Acc, Name); +parse_hd_before_value(<< $\t, Rest/bits >>, Acc, Name) -> + parse_hd_before_value(Rest, Acc, Name); +parse_hd_before_value(Data, Acc, Name) -> + parse_hd_value(Data, Acc, Name, <<>>). + +parse_hd_value(<< $\r, Rest/bits >>, Acc, Name, SoFar) -> + case Rest of + << $\n, C, Rest2/bits >> when C =:= $\s; C =:= $\t -> + parse_hd_value(Rest2, Acc, Name, << SoFar/binary, C >>); + << $\n, Rest2/bits >> -> + Value = clean_value_ws_end(SoFar, byte_size(SoFar) - 1), + parse_header(Rest2, [{Name, Value}|Acc]) + end; +parse_hd_value(<< C, Rest/bits >>, Acc, Name, SoFar) -> + parse_hd_value(Rest, Acc, Name, << SoFar/binary, C >>). + +%% This function has been copied from cowboy_http. +clean_value_ws_end(_, -1) -> + <<>>; +clean_value_ws_end(Value, N) -> + case binary:at(Value, N) of + $\s -> clean_value_ws_end(Value, N - 1); + $\t -> clean_value_ws_end(Value, N - 1); + _ -> + S = N + 1, + << Value2:S/binary, _/bits >> = Value, + Value2 + end. + +-ifdef(TEST). +parse_headers_test_() -> + Tests = [ + {<<"\r\nRest">>, + {[], <<"Rest">>}}, + {<<"Server: Erlang/R17 \r\n\r\n">>, + {[{<<"server">>, <<"Erlang/R17">>}], <<>>}}, + {<<"Server: Erlang/R17\r\n" + "Date: Sun, 23 Feb 2014 09:30:39 GMT\r\n" + "Multiline-Header: why hello!\r\n" + " I didn't see you all the way over there!\r\n" + "Content-Length: 12\r\n" + "Content-Type: text/plain\r\n" + "\r\nRest">>, + {[{<<"server">>, <<"Erlang/R17">>}, + {<<"date">>, <<"Sun, 23 Feb 2014 09:30:39 GMT">>}, + {<<"multiline-header">>, + <<"why hello! I didn't see you all the way over there!">>}, + {<<"content-length">>, <<"12">>}, + {<<"content-type">>, <<"text/plain">>}], + <<"Rest">>}} + ], + [{V, fun() -> R = parse_headers(V) end} + || {V, R} <- Tests]. + +parse_headers_error_test_() -> + Tests = [ + <<>>, + <<"\r">>, + <<"Malformed\r\n\r\n">>, + <<"content-type: text/plain\r\nMalformed\r\n\r\n">>, + <<"HTTP/1.1 200 OK\r\n\r\n">>, + <<0:80, "\r\n\r\n">>, + <<"content-type: text/plain\r\ncontent-length: 12\r\n">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_headers(V)) end} + || V <- Tests]. + +horse_parse_headers() -> + horse:repeat(50000, + parse_headers(<<"Server: Erlang/R17\r\n" + "Date: Sun, 23 Feb 2014 09:30:39 GMT\r\n" + "Multiline-Header: why hello!\r\n" + " I didn't see you all the way over there!\r\n" + "Content-Length: 12\r\n" + "Content-Type: text/plain\r\n" + "\r\nRest">>) + ). +-endif. + +%% @doc Extract path and query string from a binary, +%% removing any fragment component. + +-spec parse_fullpath(binary()) -> {binary(), binary()}. +parse_fullpath(Fullpath) -> + parse_fullpath(Fullpath, <<>>). + +parse_fullpath(<<>>, Path) -> {Path, <<>>}; +parse_fullpath(<< $#, _/bits >>, Path) -> {Path, <<>>}; +parse_fullpath(<< $?, Qs/bits >>, Path) -> parse_fullpath_query(Qs, Path, <<>>); +parse_fullpath(<< C, Rest/bits >>, SoFar) -> parse_fullpath(Rest, << SoFar/binary, C >>). + +parse_fullpath_query(<<>>, Path, Query) -> {Path, Query}; +parse_fullpath_query(<< $#, _/bits >>, Path, Query) -> {Path, Query}; +parse_fullpath_query(<< C, Rest/bits >>, Path, SoFar) -> + parse_fullpath_query(Rest, Path, << SoFar/binary, C >>). + +-ifdef(TEST). +parse_fullpath_test() -> + {<<"*">>, <<>>} = parse_fullpath(<<"*">>), + {<<"/">>, <<>>} = parse_fullpath(<<"/">>), + {<<"/path/to/resource">>, <<>>} = parse_fullpath(<<"/path/to/resource#fragment">>), + {<<"/path/to/resource">>, <<>>} = parse_fullpath(<<"/path/to/resource">>), + {<<"/">>, <<>>} = parse_fullpath(<<"/?">>), + {<<"/">>, <<"q=cowboy">>} = parse_fullpath(<<"/?q=cowboy#fragment">>), + {<<"/">>, <<"q=cowboy">>} = parse_fullpath(<<"/?q=cowboy">>), + {<<"/path/to/resource">>, <<"q=cowboy">>} + = parse_fullpath(<<"/path/to/resource?q=cowboy">>), + ok. +-endif. + +%% @doc Convert an HTTP version to atom. + +-spec parse_version(binary()) -> version(). +parse_version(<<"HTTP/1.1">>) -> 'HTTP/1.1'; +parse_version(<<"HTTP/1.0">>) -> 'HTTP/1.0'. + +-ifdef(TEST). +parse_version_test() -> + 'HTTP/1.1' = parse_version(<<"HTTP/1.1">>), + 'HTTP/1.0' = parse_version(<<"HTTP/1.0">>), + {'EXIT', _} = (catch parse_version(<<"HTTP/1.2">>)), + ok. +-endif. + +%% @doc Return formatted request-line and headers. +%% @todo Add tests when the corresponding reverse functions are added. + +-spec request(binary(), iodata(), version(), headers()) -> iodata(). +request(Method, Path, Version, Headers) -> + [Method, <<" ">>, Path, <<" ">>, version(Version), <<"\r\n">>, + [[N, <<": ">>, V, <<"\r\n">>] || {N, V} <- Headers], + <<"\r\n">>]. + +-spec response(status() | binary(), version(), headers()) -> iodata(). +response(Status, Version, Headers) -> + [version(Version), <<" ">>, status(Status), <<"\r\n">>, + headers(Headers), <<"\r\n">>]. + +-spec headers(headers()) -> iodata(). +headers(Headers) -> + [[N, <<": ">>, V, <<"\r\n">>] || {N, V} <- Headers]. + +%% @doc Return the version as a binary. + +-spec version(version()) -> binary(). +version('HTTP/1.1') -> <<"HTTP/1.1">>; +version('HTTP/1.0') -> <<"HTTP/1.0">>. + +-ifdef(TEST). +version_test() -> + <<"HTTP/1.1">> = version('HTTP/1.1'), + <<"HTTP/1.0">> = version('HTTP/1.0'), + {'EXIT', _} = (catch version('HTTP/1.2')), + ok. +-endif. + +%% @doc Return the status code and string as binary. + +-spec status(status() | binary()) -> binary(). +status(100) -> <<"100 Continue">>; +status(101) -> <<"101 Switching Protocols">>; +status(102) -> <<"102 Processing">>; +status(103) -> <<"103 Early Hints">>; +status(200) -> <<"200 OK">>; +status(201) -> <<"201 Created">>; +status(202) -> <<"202 Accepted">>; +status(203) -> <<"203 Non-Authoritative Information">>; +status(204) -> <<"204 No Content">>; +status(205) -> <<"205 Reset Content">>; +status(206) -> <<"206 Partial Content">>; +status(207) -> <<"207 Multi-Status">>; +status(208) -> <<"208 Already Reported">>; +status(226) -> <<"226 IM Used">>; +status(300) -> <<"300 Multiple Choices">>; +status(301) -> <<"301 Moved Permanently">>; +status(302) -> <<"302 Found">>; +status(303) -> <<"303 See Other">>; +status(304) -> <<"304 Not Modified">>; +status(305) -> <<"305 Use Proxy">>; +status(306) -> <<"306 Switch Proxy">>; +status(307) -> <<"307 Temporary Redirect">>; +status(308) -> <<"308 Permanent Redirect">>; +status(400) -> <<"400 Bad Request">>; +status(401) -> <<"401 Unauthorized">>; +status(402) -> <<"402 Payment Required">>; +status(403) -> <<"403 Forbidden">>; +status(404) -> <<"404 Not Found">>; +status(405) -> <<"405 Method Not Allowed">>; +status(406) -> <<"406 Not Acceptable">>; +status(407) -> <<"407 Proxy Authentication Required">>; +status(408) -> <<"408 Request Timeout">>; +status(409) -> <<"409 Conflict">>; +status(410) -> <<"410 Gone">>; +status(411) -> <<"411 Length Required">>; +status(412) -> <<"412 Precondition Failed">>; +status(413) -> <<"413 Request Entity Too Large">>; +status(414) -> <<"414 Request-URI Too Long">>; +status(415) -> <<"415 Unsupported Media Type">>; +status(416) -> <<"416 Requested Range Not Satisfiable">>; +status(417) -> <<"417 Expectation Failed">>; +status(418) -> <<"418 I'm a teapot">>; +status(421) -> <<"421 Misdirected Request">>; +status(422) -> <<"422 Unprocessable Entity">>; +status(423) -> <<"423 Locked">>; +status(424) -> <<"424 Failed Dependency">>; +status(425) -> <<"425 Unordered Collection">>; +status(426) -> <<"426 Upgrade Required">>; +status(428) -> <<"428 Precondition Required">>; +status(429) -> <<"429 Too Many Requests">>; +status(431) -> <<"431 Request Header Fields Too Large">>; +status(451) -> <<"451 Unavailable For Legal Reasons">>; +status(500) -> <<"500 Internal Server Error">>; +status(501) -> <<"501 Not Implemented">>; +status(502) -> <<"502 Bad Gateway">>; +status(503) -> <<"503 Service Unavailable">>; +status(504) -> <<"504 Gateway Timeout">>; +status(505) -> <<"505 HTTP Version Not Supported">>; +status(506) -> <<"506 Variant Also Negotiates">>; +status(507) -> <<"507 Insufficient Storage">>; +status(508) -> <<"508 Loop Detected">>; +status(510) -> <<"510 Not Extended">>; +status(511) -> <<"511 Network Authentication Required">>; +status(B) when is_binary(B) -> B. diff --git a/deps/cowlib/src/cow_http2.erl b/deps/cowlib/src/cow_http2.erl new file mode 100644 index 0000000..225d2ec --- /dev/null +++ b/deps/cowlib/src/cow_http2.erl @@ -0,0 +1,483 @@ +%% Copyright (c) 2015-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_http2). + +%% Parsing. +-export([parse_sequence/1]). +-export([parse/1]). +-export([parse/2]). +-export([parse_settings_payload/1]). + +%% Building. +-export([data/3]). +-export([data_header/3]). +-export([headers/3]). +-export([priority/4]). +-export([rst_stream/2]). +-export([settings/1]). +-export([settings_payload/1]). +-export([settings_ack/0]). +-export([push_promise/3]). +-export([ping/1]). +-export([ping_ack/1]). +-export([goaway/3]). +-export([window_update/1]). +-export([window_update/2]). + +-type streamid() :: pos_integer(). +-export_type([streamid/0]). + +-type fin() :: fin | nofin. +-export_type([fin/0]). + +-type head_fin() :: head_fin | head_nofin. +-export_type([head_fin/0]). + +-type exclusive() :: exclusive | shared. +-type weight() :: 1..256. +-type settings() :: map(). + +-type error() :: no_error + | protocol_error + | internal_error + | flow_control_error + | settings_timeout + | stream_closed + | frame_size_error + | refused_stream + | cancel + | compression_error + | connect_error + | enhance_your_calm + | inadequate_security + | http_1_1_required + | unknown_error. +-export_type([error/0]). + +-type frame() :: {data, streamid(), fin(), binary()} + | {headers, streamid(), fin(), head_fin(), binary()} + | {headers, streamid(), fin(), head_fin(), exclusive(), streamid(), weight(), binary()} + | {priority, streamid(), exclusive(), streamid(), weight()} + | {rst_stream, streamid(), error()} + | {settings, settings()} + | settings_ack + | {push_promise, streamid(), head_fin(), streamid(), binary()} + | {ping, integer()} + | {ping_ack, integer()} + | {goaway, streamid(), error(), binary()} + | {window_update, non_neg_integer()} + | {window_update, streamid(), non_neg_integer()} + | {continuation, streamid(), head_fin(), binary()}. +-export_type([frame/0]). + +%% Parsing. + +-spec parse_sequence(binary()) + -> {ok, binary()} | more | {connection_error, error(), atom()}. +parse_sequence(<<"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n", Rest/bits>>) -> + {ok, Rest}; +parse_sequence(Data) when byte_size(Data) >= 24 -> + {connection_error, protocol_error, + 'The connection preface was invalid. (RFC7540 3.5)'}; +parse_sequence(Data) -> + Len = byte_size(Data), + <> = <<"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n">>, + case Data of + Preface -> + more; + _ -> + {connection_error, protocol_error, + 'The connection preface was invalid. (RFC7540 3.5)'} + end. + +parse(<< Len:24, _/bits >>, MaxFrameSize) when Len > MaxFrameSize -> + {connection_error, frame_size_error, 'The frame size exceeded SETTINGS_MAX_FRAME_SIZE. (RFC7540 4.2)'}; +parse(Data, _) -> + parse(Data). + +%% +%% DATA frames. +%% +parse(<< _:24, 0:8, _:9, 0:31, _/bits >>) -> + {connection_error, protocol_error, 'DATA frames MUST be associated with a stream. (RFC7540 6.1)'}; +parse(<< 0:24, 0:8, _:4, 1:1, _:35, _/bits >>) -> + {connection_error, frame_size_error, 'DATA frames with padding flag MUST have a length > 0. (RFC7540 6.1)'}; +parse(<< Len0:24, 0:8, _:4, 1:1, _:35, PadLen:8, _/bits >>) when PadLen >= Len0 -> + {connection_error, protocol_error, 'Length of padding MUST be less than length of payload. (RFC7540 6.1)'}; +%% No padding. +parse(<< Len:24, 0:8, _:4, 0:1, _:2, FlagEndStream:1, _:1, StreamID:31, Data:Len/binary, Rest/bits >>) -> + {ok, {data, StreamID, parse_fin(FlagEndStream), Data}, Rest}; +%% Padding. +parse(<< Len0:24, 0:8, _:4, 1:1, _:2, FlagEndStream:1, _:1, StreamID:31, PadLen:8, Rest0/bits >>) + when byte_size(Rest0) >= Len0 - 1 -> + Len = Len0 - PadLen - 1, + case Rest0 of + << Data:Len/binary, 0:PadLen/unit:8, Rest/bits >> -> + {ok, {data, StreamID, parse_fin(FlagEndStream), Data}, Rest}; + _ -> + {connection_error, protocol_error, 'Padding octets MUST be set to zero. (RFC7540 6.1)'} + end; +%% +%% HEADERS frames. +%% +parse(<< _:24, 1:8, _:9, 0:31, _/bits >>) -> + {connection_error, protocol_error, 'HEADERS frames MUST be associated with a stream. (RFC7540 6.2)'}; +parse(<< 0:24, 1:8, _:4, 1:1, _:35, _/bits >>) -> + {connection_error, frame_size_error, 'HEADERS frames with padding flag MUST have a length > 0. (RFC7540 6.1)'}; +parse(<< Len:24, 1:8, _:2, 1:1, _:37, _/bits >>) when Len < 5 -> + {connection_error, frame_size_error, 'HEADERS frames with priority flag MUST have a length >= 5. (RFC7540 6.1)'}; +parse(<< Len:24, 1:8, _:2, 1:1, _:1, 1:1, _:35, _/bits >>) when Len < 6 -> + {connection_error, frame_size_error, 'HEADERS frames with padding and priority flags MUST have a length >= 6. (RFC7540 6.1)'}; +parse(<< Len0:24, 1:8, _:4, 1:1, _:35, PadLen:8, _/bits >>) when PadLen >= Len0 -> + {connection_error, protocol_error, 'Length of padding MUST be less than length of payload. (RFC7540 6.2)'}; +parse(<< Len0:24, 1:8, _:2, 1:1, _:1, 1:1, _:35, PadLen:8, _/bits >>) when PadLen >= Len0 - 5 -> + {connection_error, protocol_error, 'Length of padding MUST be less than length of payload. (RFC7540 6.2)'}; +%% No padding, no priority. +parse(<< Len:24, 1:8, _:2, 0:1, _:1, 0:1, FlagEndHeaders:1, _:1, FlagEndStream:1, _:1, StreamID:31, + HeaderBlockFragment:Len/binary, Rest/bits >>) -> + {ok, {headers, StreamID, parse_fin(FlagEndStream), parse_head_fin(FlagEndHeaders), HeaderBlockFragment}, Rest}; +%% Padding, no priority. +parse(<< Len0:24, 1:8, _:2, 0:1, _:1, 1:1, FlagEndHeaders:1, _:1, FlagEndStream:1, _:1, StreamID:31, + PadLen:8, Rest0/bits >>) when byte_size(Rest0) >= Len0 - 1 -> + Len = Len0 - PadLen - 1, + case Rest0 of + << HeaderBlockFragment:Len/binary, 0:PadLen/unit:8, Rest/bits >> -> + {ok, {headers, StreamID, parse_fin(FlagEndStream), parse_head_fin(FlagEndHeaders), HeaderBlockFragment}, Rest}; + _ -> + {connection_error, protocol_error, 'Padding octets MUST be set to zero. (RFC7540 6.2)'} + end; +%% No padding, priority. +parse(<< _:24, 1:8, _:2, 1:1, _:1, 0:1, _:4, StreamID:31, _:1, StreamID:31, _/bits >>) -> + {connection_error, protocol_error, + 'HEADERS frames cannot define a stream that depends on itself. (RFC7540 5.3.1)'}; +parse(<< Len0:24, 1:8, _:2, 1:1, _:1, 0:1, FlagEndHeaders:1, _:1, FlagEndStream:1, _:1, StreamID:31, + E:1, DepStreamID:31, Weight:8, Rest0/bits >>) when byte_size(Rest0) >= Len0 - 5 -> + Len = Len0 - 5, + << HeaderBlockFragment:Len/binary, Rest/bits >> = Rest0, + {ok, {headers, StreamID, parse_fin(FlagEndStream), parse_head_fin(FlagEndHeaders), + parse_exclusive(E), DepStreamID, Weight + 1, HeaderBlockFragment}, Rest}; +%% Padding, priority. +parse(<< _:24, 1:8, _:2, 1:1, _:1, 1:1, _:4, StreamID:31, _:9, StreamID:31, _/bits >>) -> + {connection_error, protocol_error, + 'HEADERS frames cannot define a stream that depends on itself. (RFC7540 5.3.1)'}; +parse(<< Len0:24, 1:8, _:2, 1:1, _:1, 1:1, FlagEndHeaders:1, _:1, FlagEndStream:1, _:1, StreamID:31, + PadLen:8, E:1, DepStreamID:31, Weight:8, Rest0/bits >>) when byte_size(Rest0) >= Len0 - 6 -> + Len = Len0 - PadLen - 6, + case Rest0 of + << HeaderBlockFragment:Len/binary, 0:PadLen/unit:8, Rest/bits >> -> + {ok, {headers, StreamID, parse_fin(FlagEndStream), parse_head_fin(FlagEndHeaders), + parse_exclusive(E), DepStreamID, Weight + 1, HeaderBlockFragment}, Rest}; + _ -> + {connection_error, protocol_error, 'Padding octets MUST be set to zero. (RFC7540 6.2)'} + end; +%% +%% PRIORITY frames. +%% +parse(<< 5:24, 2:8, _:9, 0:31, _/bits >>) -> + {connection_error, protocol_error, 'PRIORITY frames MUST be associated with a stream. (RFC7540 6.3)'}; +parse(<< 5:24, 2:8, _:9, StreamID:31, _:1, StreamID:31, _:8, Rest/bits >>) -> + {stream_error, StreamID, protocol_error, + 'PRIORITY frames cannot make a stream depend on itself. (RFC7540 5.3.1)', Rest}; +parse(<< 5:24, 2:8, _:9, StreamID:31, E:1, DepStreamID:31, Weight:8, Rest/bits >>) -> + {ok, {priority, StreamID, parse_exclusive(E), DepStreamID, Weight + 1}, Rest}; +%% @todo figure out how to best deal with frame size errors; if we have everything fine +%% if not we might want to inform the caller how much he should expect so that it can +%% decide if it should just close the connection +parse(<< BadLen:24, 2:8, _:9, StreamID:31, _:BadLen/binary, Rest/bits >>) -> + {stream_error, StreamID, frame_size_error, 'PRIORITY frames MUST be 5 bytes wide. (RFC7540 6.3)', Rest}; +%% +%% RST_STREAM frames. +%% +parse(<< 4:24, 3:8, _:9, 0:31, _/bits >>) -> + {connection_error, protocol_error, 'RST_STREAM frames MUST be associated with a stream. (RFC7540 6.4)'}; +parse(<< 4:24, 3:8, _:9, StreamID:31, ErrorCode:32, Rest/bits >>) -> + {ok, {rst_stream, StreamID, parse_error_code(ErrorCode)}, Rest}; +%% @todo same as priority +parse(<< _:24, 3:8, _:9, _:31, _/bits >>) -> + {connection_error, frame_size_error, 'RST_STREAM frames MUST be 4 bytes wide. (RFC7540 6.4)'}; +%% +%% SETTINGS frames. +%% +parse(<< 0:24, 4:8, _:7, 1:1, _:1, 0:31, Rest/bits >>) -> + {ok, settings_ack, Rest}; +parse(<< _:24, 4:8, _:7, 1:1, _:1, 0:31, _/bits >>) -> + {connection_error, frame_size_error, 'SETTINGS frames with the ACK flag set MUST have a length of 0. (RFC7540 6.5)'}; +parse(<< Len:24, 4:8, _:7, 0:1, _:1, 0:31, _/bits >>) when Len rem 6 =/= 0 -> + {connection_error, frame_size_error, 'SETTINGS frames MUST have a length multiple of 6. (RFC7540 6.5)'}; +parse(<< Len:24, 4:8, _:7, 0:1, _:1, 0:31, Rest/bits >>) when byte_size(Rest) >= Len -> + parse_settings_payload(Rest, Len, #{}); +parse(<< _:24, 4:8, _:8, _:1, StreamID:31, _/bits >>) when StreamID =/= 0 -> + {connection_error, protocol_error, 'SETTINGS frames MUST NOT be associated with a stream. (RFC7540 6.5)'}; +%% +%% PUSH_PROMISE frames. +%% +parse(<< Len:24, 5:8, _:40, _/bits >>) when Len < 4 -> + {connection_error, frame_size_error, 'PUSH_PROMISE frames MUST have a length >= 4. (RFC7540 4.2, RFC7540 6.6)'}; +parse(<< Len:24, 5:8, _:4, 1:1, _:35, _/bits >>) when Len < 5 -> + {connection_error, frame_size_error, 'PUSH_PROMISE frames with padding flag MUST have a length >= 5. (RFC7540 4.2, RFC7540 6.6)'}; +parse(<< _:24, 5:8, _:9, 0:31, _/bits >>) -> + {connection_error, protocol_error, 'PUSH_PROMISE frames MUST be associated with a stream. (RFC7540 6.6)'}; +parse(<< Len0:24, 5:8, _:4, 1:1, _:35, PadLen:8, _/bits >>) when PadLen >= Len0 - 4 -> + {connection_error, protocol_error, 'Length of padding MUST be less than length of payload. (RFC7540 6.6)'}; +parse(<< Len0:24, 5:8, _:4, 0:1, FlagEndHeaders:1, _:3, StreamID:31, _:1, PromisedStreamID:31, Rest0/bits >>) + when byte_size(Rest0) >= Len0 - 4 -> + Len = Len0 - 4, + << HeaderBlockFragment:Len/binary, Rest/bits >> = Rest0, + {ok, {push_promise, StreamID, parse_head_fin(FlagEndHeaders), PromisedStreamID, HeaderBlockFragment}, Rest}; +parse(<< Len0:24, 5:8, _:4, 1:1, FlagEndHeaders:1, _:2, StreamID:31, PadLen:8, _:1, PromisedStreamID:31, Rest0/bits >>) + when byte_size(Rest0) >= Len0 - 5 -> + Len = Len0 - 5, + case Rest0 of + << HeaderBlockFragment:Len/binary, 0:PadLen/unit:8, Rest/bits >> -> + {ok, {push_promise, StreamID, parse_head_fin(FlagEndHeaders), PromisedStreamID, HeaderBlockFragment}, Rest}; + _ -> + {connection_error, protocol_error, 'Padding octets MUST be set to zero. (RFC7540 6.6)'} + end; +%% +%% PING frames. +%% +parse(<< 8:24, 6:8, _:7, 1:1, _:1, 0:31, Opaque:64, Rest/bits >>) -> + {ok, {ping_ack, Opaque}, Rest}; +parse(<< 8:24, 6:8, _:7, 0:1, _:1, 0:31, Opaque:64, Rest/bits >>) -> + {ok, {ping, Opaque}, Rest}; +parse(<< 8:24, 6:8, _:104, _/bits >>) -> + {connection_error, protocol_error, 'PING frames MUST NOT be associated with a stream. (RFC7540 6.7)'}; +parse(<< Len:24, 6:8, _/bits >>) when Len =/= 8 -> + {connection_error, frame_size_error, 'PING frames MUST be 8 bytes wide. (RFC7540 6.7)'}; +%% +%% GOAWAY frames. +%% +parse(<< Len0:24, 7:8, _:9, 0:31, _:1, LastStreamID:31, ErrorCode:32, Rest0/bits >>) when byte_size(Rest0) >= Len0 - 8 -> + Len = Len0 - 8, + << DebugData:Len/binary, Rest/bits >> = Rest0, + {ok, {goaway, LastStreamID, parse_error_code(ErrorCode), DebugData}, Rest}; +parse(<< Len:24, 7:8, _:40, _/bits >>) when Len < 8 -> + {connection_error, frame_size_error, 'GOAWAY frames MUST have a length >= 8. (RFC7540 4.2, RFC7540 6.8)'}; +parse(<< _:24, 7:8, _:40, _/bits >>) -> + {connection_error, protocol_error, 'GOAWAY frames MUST NOT be associated with a stream. (RFC7540 6.8)'}; +%% +%% WINDOW_UPDATE frames. +%% +parse(<< 4:24, 8:8, _:9, 0:31, _:1, 0:31, _/bits >>) -> + {connection_error, protocol_error, 'WINDOW_UPDATE frames MUST have a non-zero increment. (RFC7540 6.9)'}; +parse(<< 4:24, 8:8, _:9, 0:31, _:1, Increment:31, Rest/bits >>) -> + {ok, {window_update, Increment}, Rest}; +parse(<< 4:24, 8:8, _:9, StreamID:31, _:1, 0:31, Rest/bits >>) -> + {stream_error, StreamID, protocol_error, 'WINDOW_UPDATE frames MUST have a non-zero increment. (RFC7540 6.9)', Rest}; +parse(<< 4:24, 8:8, _:9, StreamID:31, _:1, Increment:31, Rest/bits >>) -> + {ok, {window_update, StreamID, Increment}, Rest}; +parse(<< Len:24, 8:8, _/bits >>) when Len =/= 4-> + {connection_error, frame_size_error, 'WINDOW_UPDATE frames MUST be 4 bytes wide. (RFC7540 6.9)'}; +%% +%% CONTINUATION frames. +%% +parse(<< _:24, 9:8, _:9, 0:31, _/bits >>) -> + {connection_error, protocol_error, 'CONTINUATION frames MUST be associated with a stream. (RFC7540 6.10)'}; +parse(<< Len:24, 9:8, _:5, FlagEndHeaders:1, _:3, StreamID:31, HeaderBlockFragment:Len/binary, Rest/bits >>) -> + {ok, {continuation, StreamID, parse_head_fin(FlagEndHeaders), HeaderBlockFragment}, Rest}; +%% +%% Unknown frames are ignored. +%% +parse(<< Len:24, Type:8, _:40, _:Len/binary, Rest/bits >>) when Type > 9 -> + {ignore, Rest}; +%% +%% Incomplete frames. +%% +parse(_) -> + more. + +-ifdef(TEST). +parse_ping_test() -> + Ping = ping(1234567890), + _ = [more = parse(binary:part(Ping, 0, I)) || I <- lists:seq(1, byte_size(Ping) - 1)], + {ok, {ping, 1234567890}, <<>>} = parse(Ping), + {ok, {ping, 1234567890}, << 42 >>} = parse(<< Ping/binary, 42 >>), + ok. + +parse_windows_update_test() -> + WindowUpdate = << 4:24, 8:8, 0:9, 0:31, 0:1, 12345:31 >>, + _ = [more = parse(binary:part(WindowUpdate, 0, I)) || I <- lists:seq(1, byte_size(WindowUpdate) - 1)], + {ok, {window_update, 12345}, <<>>} = parse(WindowUpdate), + {ok, {window_update, 12345}, << 42 >>} = parse(<< WindowUpdate/binary, 42 >>), + ok. + +parse_settings_test() -> + more = parse(<< 0:24, 4:8, 1:8, 0:8 >>), + {ok, settings_ack, <<>>} = parse(<< 0:24, 4:8, 1:8, 0:32 >>), + {connection_error, protocol_error, _} = parse(<< 0:24, 4:8, 1:8, 0:1, 1:31 >>), + ok. +-endif. + +parse_fin(0) -> nofin; +parse_fin(1) -> fin. + +parse_head_fin(0) -> head_nofin; +parse_head_fin(1) -> head_fin. + +parse_exclusive(0) -> shared; +parse_exclusive(1) -> exclusive. + +parse_error_code( 0) -> no_error; +parse_error_code( 1) -> protocol_error; +parse_error_code( 2) -> internal_error; +parse_error_code( 3) -> flow_control_error; +parse_error_code( 4) -> settings_timeout; +parse_error_code( 5) -> stream_closed; +parse_error_code( 6) -> frame_size_error; +parse_error_code( 7) -> refused_stream; +parse_error_code( 8) -> cancel; +parse_error_code( 9) -> compression_error; +parse_error_code(10) -> connect_error; +parse_error_code(11) -> enhance_your_calm; +parse_error_code(12) -> inadequate_security; +parse_error_code(13) -> http_1_1_required; +parse_error_code(_) -> unknown_error. + +parse_settings_payload(SettingsPayload) -> + {ok, {settings, Settings}, <<>>} + = parse_settings_payload(SettingsPayload, byte_size(SettingsPayload), #{}), + Settings. + +parse_settings_payload(Rest, 0, Settings) -> + {ok, {settings, Settings}, Rest}; +%% SETTINGS_HEADER_TABLE_SIZE. +parse_settings_payload(<< 1:16, Value:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{header_table_size => Value}); +%% SETTINGS_ENABLE_PUSH. +parse_settings_payload(<< 2:16, 0:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{enable_push => false}); +parse_settings_payload(<< 2:16, 1:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{enable_push => true}); +parse_settings_payload(<< 2:16, _:32, _/bits >>, _, _) -> + {connection_error, protocol_error, 'The SETTINGS_ENABLE_PUSH value MUST be 0 or 1. (RFC7540 6.5.2)'}; +%% SETTINGS_MAX_CONCURRENT_STREAMS. +parse_settings_payload(<< 3:16, Value:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{max_concurrent_streams => Value}); +%% SETTINGS_INITIAL_WINDOW_SIZE. +parse_settings_payload(<< 4:16, Value:32, _/bits >>, _, _) when Value > 16#7fffffff -> + {connection_error, flow_control_error, 'The maximum SETTINGS_INITIAL_WINDOW_SIZE value is 0x7fffffff. (RFC7540 6.5.2)'}; +parse_settings_payload(<< 4:16, Value:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{initial_window_size => Value}); +%% SETTINGS_MAX_FRAME_SIZE. +parse_settings_payload(<< 5:16, Value:32, _/bits >>, _, _) when Value =< 16#3fff -> + {connection_error, protocol_error, 'The SETTINGS_MAX_FRAME_SIZE value must be > 0x3fff. (RFC7540 6.5.2)'}; +parse_settings_payload(<< 5:16, Value:32, Rest/bits >>, Len, Settings) when Value =< 16#ffffff -> + parse_settings_payload(Rest, Len - 6, Settings#{max_frame_size => Value}); +parse_settings_payload(<< 5:16, _:32, _/bits >>, _, _) -> + {connection_error, protocol_error, 'The SETTINGS_MAX_FRAME_SIZE value must be =< 0xffffff. (RFC7540 6.5.2)'}; +%% SETTINGS_MAX_HEADER_LIST_SIZE. +parse_settings_payload(<< 6:16, Value:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{max_header_list_size => Value}); +%% SETTINGS_ENABLE_CONNECT_PROTOCOL. +parse_settings_payload(<< 8:16, 0:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{enable_connect_protocol => false}); +parse_settings_payload(<< 8:16, 1:32, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings#{enable_connect_protocol => true}); +parse_settings_payload(<< 8:16, _:32, _/bits >>, _, _) -> + {connection_error, protocol_error, 'The SETTINGS_ENABLE_CONNECT_PROTOCOL value MUST be 0 or 1. (draft-h2-websockets-01 3)'}; +%% Ignore unknown settings. +parse_settings_payload(<< _:48, Rest/bits >>, Len, Settings) -> + parse_settings_payload(Rest, Len - 6, Settings). + +%% Building. + +data(StreamID, IsFin, Data) -> + [data_header(StreamID, IsFin, iolist_size(Data)), Data]. + +data_header(StreamID, IsFin, Len) -> + FlagEndStream = flag_fin(IsFin), + << Len:24, 0:15, FlagEndStream:1, 0:1, StreamID:31 >>. + +%% @todo Check size of HeaderBlock and use CONTINUATION frames if needed. +headers(StreamID, IsFin, HeaderBlock) -> + Len = iolist_size(HeaderBlock), + FlagEndStream = flag_fin(IsFin), + FlagEndHeaders = 1, + [<< Len:24, 1:8, 0:5, FlagEndHeaders:1, 0:1, FlagEndStream:1, 0:1, StreamID:31 >>, HeaderBlock]. + +priority(StreamID, E, DepStreamID, Weight) -> + FlagExclusive = exclusive(E), + << 5:24, 2:8, 0:9, StreamID:31, FlagExclusive:1, DepStreamID:31, Weight:8 >>. + +rst_stream(StreamID, Reason) -> + ErrorCode = error_code(Reason), + << 4:24, 3:8, 0:9, StreamID:31, ErrorCode:32 >>. + +settings(Settings) -> + Payload = settings_payload(Settings), + Len = iolist_size(Payload), + [<< Len:24, 4:8, 0:40 >>, Payload]. + +settings_payload(Settings) -> + [case Key of + header_table_size -> <<1:16, Value:32>>; + enable_push when Value -> <<2:16, 1:32>>; + enable_push -> <<2:16, 0:32>>; + max_concurrent_streams when Value =:= infinity -> <<>>; + max_concurrent_streams -> <<3:16, Value:32>>; + initial_window_size -> <<4:16, Value:32>>; + max_frame_size -> <<5:16, Value:32>>; + max_header_list_size when Value =:= infinity -> <<>>; + max_header_list_size -> <<6:16, Value:32>>; + enable_connect_protocol when Value -> <<8:16, 1:32>>; + enable_connect_protocol -> <<8:16, 0:32>> + end || {Key, Value} <- maps:to_list(Settings)]. + +settings_ack() -> + << 0:24, 4:8, 1:8, 0:32 >>. + +%% @todo Check size of HeaderBlock and use CONTINUATION frames if needed. +push_promise(StreamID, PromisedStreamID, HeaderBlock) -> + Len = iolist_size(HeaderBlock) + 4, + FlagEndHeaders = 1, + [<< Len:24, 5:8, 0:5, FlagEndHeaders:1, 0:3, StreamID:31, 0:1, PromisedStreamID:31 >>, HeaderBlock]. + +ping(Opaque) -> + << 8:24, 6:8, 0:40, Opaque:64 >>. + +ping_ack(Opaque) -> + << 8:24, 6:8, 0:7, 1:1, 0:32, Opaque:64 >>. + +goaway(LastStreamID, Reason, DebugData) -> + ErrorCode = error_code(Reason), + Len = iolist_size(DebugData) + 8, + [<< Len:24, 7:8, 0:41, LastStreamID:31, ErrorCode:32 >>, DebugData]. + +window_update(Increment) -> + window_update(0, Increment). + +window_update(StreamID, Increment) when Increment =< 16#7fffffff -> + << 4:24, 8:8, 0:8, StreamID:32, 0:1, Increment:31 >>. + +flag_fin(nofin) -> 0; +flag_fin(fin) -> 1. + +exclusive(shared) -> 0; +exclusive(exclusive) -> 1. + +error_code(no_error) -> 0; +error_code(protocol_error) -> 1; +error_code(internal_error) -> 2; +error_code(flow_control_error) -> 3; +error_code(settings_timeout) -> 4; +error_code(stream_closed) -> 5; +error_code(frame_size_error) -> 6; +error_code(refused_stream) -> 7; +error_code(cancel) -> 8; +error_code(compression_error) -> 9; +error_code(connect_error) -> 10; +error_code(enhance_your_calm) -> 11; +error_code(inadequate_security) -> 12; +error_code(http_1_1_required) -> 13. diff --git a/deps/cowlib/src/cow_http2_machine.erl b/deps/cowlib/src/cow_http2_machine.erl new file mode 100644 index 0000000..35eb72e --- /dev/null +++ b/deps/cowlib/src/cow_http2_machine.erl @@ -0,0 +1,1647 @@ +%% Copyright (c) 2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_http2_machine). + +-export([init/2]). +-export([init_stream/2]). +-export([init_upgrade_stream/2]). +-export([frame/2]). +-export([ignored_frame/1]). +-export([timeout/3]). +-export([prepare_headers/5]). +-export([prepare_push_promise/4]). +-export([prepare_trailers/3]). +-export([send_or_queue_data/4]). +-export([ensure_window/2]). +-export([ensure_window/3]). +-export([update_window/2]). +-export([update_window/3]). +-export([reset_stream/2]). +-export([get_connection_local_buffer_size/1]). +-export([get_local_setting/2]). +-export([get_remote_settings/1]). +-export([get_last_streamid/1]). +-export([set_last_streamid/1]). +-export([get_stream_local_buffer_size/2]). +-export([get_stream_local_state/2]). +-export([get_stream_remote_state/2]). +-export([is_lingering_stream/2]). + +-type opts() :: #{ + connection_window_margin_size => 0..16#7fffffff, + connection_window_update_threshold => 0..16#7fffffff, + enable_connect_protocol => boolean(), + initial_connection_window_size => 65535..16#7fffffff, + initial_stream_window_size => 0..16#7fffffff, + max_connection_window_size => 0..16#7fffffff, + max_concurrent_streams => non_neg_integer() | infinity, + max_decode_table_size => non_neg_integer(), + max_encode_table_size => non_neg_integer(), + max_frame_size_received => 16384..16777215, + max_frame_size_sent => 16384..16777215 | infinity, + max_stream_window_size => 0..16#7fffffff, + message_tag => any(), + preface_timeout => timeout(), + settings_timeout => timeout(), + stream_window_data_threshold => 0..16#7fffffff, + stream_window_margin_size => 0..16#7fffffff, + stream_window_update_threshold => 0..16#7fffffff +}. +-export_type([opts/0]). + +%% The order of the fields is significant. +-record(sendfile, { + offset :: non_neg_integer(), + bytes :: pos_integer(), + path :: file:name_all() +}). + +-record(stream, { + id = undefined :: cow_http2:streamid(), + + %% Request method. + method = undefined :: binary(), + + %% Whether we finished sending data. + local = idle :: idle | cow_http2:fin(), + + %% Local flow control window (how much we can send). + local_window :: integer(), + + %% Buffered data waiting for the flow control window to increase. + local_buffer = queue:new() :: + queue:queue({cow_http2:fin(), non_neg_integer(), {data, iodata()} | #sendfile{}}), + local_buffer_size = 0 :: non_neg_integer(), + local_trailers = undefined :: undefined | cow_http:headers(), + + %% Whether we finished receiving data. + remote = idle :: idle | cow_http2:fin(), + + %% Remote flow control window (how much we accept to receive). + remote_window :: integer(), + + %% Size expected and read from the request body. + remote_expected_size = undefined :: undefined | non_neg_integer(), + remote_read_size = 0 :: non_neg_integer(), + + %% Unparsed te header. Used to know if we can send trailers. + %% Note that we can always send trailers to the server. + te :: undefined | binary() +}). + +-type stream() :: #stream{}. + +-type continued_frame() :: + {headers, cow_http2:streamid(), cow_http2:fin(), cow_http2:head_fin(), binary()} | + {push_promise, cow_http2:streamid(), cow_http2:head_fin(), cow_http2:streamid(), binary()}. + +-record(http2_machine, { + %% Whether the HTTP/2 endpoint is a client or a server. + mode :: client | server, + + %% HTTP/2 SETTINGS customization. + opts = #{} :: opts(), + + %% Connection-wide frame processing state. + state = settings :: settings | normal + | {continuation, request | response | trailers | push_promise, continued_frame()}, + + %% Timer for the connection preface. + preface_timer = undefined :: undefined | reference(), + + %% Timer for the ack for a SETTINGS frame we sent. + settings_timer = undefined :: undefined | reference(), + + %% Settings are separate for each endpoint. In addition, settings + %% must be acknowledged before they can be expected to be applied. + local_settings = #{ +% header_table_size => 4096, +% enable_push => true, +% max_concurrent_streams => infinity, + initial_window_size => 65535 +% max_frame_size => 16384 +% max_header_list_size => infinity + } :: map(), + next_settings = undefined :: undefined | map(), + remote_settings = #{ + initial_window_size => 65535 + } :: map(), + + %% Connection-wide flow control window. + local_window = 65535 :: integer(), %% How much we can send. + remote_window = 65535 :: integer(), %% How much we accept to receive. + + %% Stream identifiers. + local_streamid :: pos_integer(), %% The next streamid to be used. + remote_streamid = 0 :: non_neg_integer(), %% The last streamid received. + last_remote_streamid = 16#7fffffff :: non_neg_integer(), %% Used in GOAWAY. + + %% Currently active HTTP/2 streams. Streams may be initiated either + %% by the client or by the server through PUSH_PROMISE frames. + streams = #{} :: #{cow_http2:streamid() => stream()}, + + %% HTTP/2 streams that have recently been reset locally. + %% We are expected to keep receiving additional frames after + %% sending an RST_STREAM. + local_lingering_streams = [] :: [cow_http2:streamid()], + + %% HTTP/2 streams that have recently been reset remotely. + %% We keep a few of these around in order to reject subsequent + %% frames on these streams. + remote_lingering_streams = [] :: [cow_http2:streamid()], + + %% HPACK decoding and encoding state. + decode_state = cow_hpack:init() :: cow_hpack:state(), + encode_state = cow_hpack:init() :: cow_hpack:state() +}). + +-opaque http2_machine() :: #http2_machine{}. +-export_type([http2_machine/0]). + +-type pseudo_headers() :: #{} %% Trailers + | #{ %% Responses. + status := cow_http:status() + } | #{ %% Normal CONNECT requests. + method := binary(), + authority := binary() + } | #{ %% Other requests and extended CONNECT requests. + method := binary(), + scheme := binary(), + authority := binary(), + path := binary(), + protocol => binary() + }. + +%% Returns true when the given StreamID is for a local-initiated stream. +-define(IS_SERVER_LOCAL(StreamID), ((StreamID rem 2) =:= 0)). +-define(IS_CLIENT_LOCAL(StreamID), ((StreamID rem 2) =:= 1)). +-define(IS_LOCAL(Mode, StreamID), ( + ((Mode =:= server) andalso ?IS_SERVER_LOCAL(StreamID)) + orelse + ((Mode =:= client) andalso ?IS_CLIENT_LOCAL(StreamID)) +)). + +-spec init(client | server, opts()) -> {ok, iodata(), http2_machine()}. +init(client, Opts) -> + NextSettings = settings_init(Opts), + client_preface(#http2_machine{ + mode=client, + opts=Opts, + preface_timer=start_timer(preface_timeout, Opts), + settings_timer=start_timer(settings_timeout, Opts), + next_settings=NextSettings, + local_streamid=1 + }); +init(server, Opts) -> + NextSettings = settings_init(Opts), + common_preface(#http2_machine{ + mode=server, + opts=Opts, + preface_timer=start_timer(preface_timeout, Opts), + settings_timer=start_timer(settings_timeout, Opts), + next_settings=NextSettings, + local_streamid=2 + }). + +%% @todo In Cowlib 3.0 we should always include MessageTag in the message. +%% It can be set to 'undefined' if the option is missing. +start_timer(Name, Opts=#{message_tag := MessageTag}) -> + case maps:get(Name, Opts, 5000) of + infinity -> undefined; + Timeout -> erlang:start_timer(Timeout, self(), {?MODULE, MessageTag, Name}) + end; +start_timer(Name, Opts) -> + case maps:get(Name, Opts, 5000) of + infinity -> undefined; + Timeout -> erlang:start_timer(Timeout, self(), {?MODULE, Name}) + end. + +client_preface(State0) -> + {ok, CommonPreface, State} = common_preface(State0), + {ok, [ + <<"PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n">>, + CommonPreface + ], State}. + +%% We send next_settings and use defaults until we get an ack. +%% +%% We also send a WINDOW_UPDATE frame for the connection when +%% the user specified an initial_connection_window_size. +common_preface(State=#http2_machine{opts=Opts, next_settings=NextSettings}) -> + case maps:get(initial_connection_window_size, Opts, 65535) of + 65535 -> + {ok, cow_http2:settings(NextSettings), State}; + Size -> + {ok, [ + cow_http2:settings(NextSettings), + cow_http2:window_update(Size - 65535) + ], update_window(Size - 65535, State)} + end. + +settings_init(Opts) -> + S0 = setting_from_opt(#{}, Opts, max_decode_table_size, + header_table_size, 4096), + S1 = setting_from_opt(S0, Opts, max_concurrent_streams, + max_concurrent_streams, infinity), + S2 = setting_from_opt(S1, Opts, initial_stream_window_size, + initial_window_size, 65535), + S3 = setting_from_opt(S2, Opts, max_frame_size_received, + max_frame_size, 16384), + %% @todo max_header_list_size + setting_from_opt(S3, Opts, enable_connect_protocol, + enable_connect_protocol, false). + +setting_from_opt(Settings, Opts, OptName, SettingName, Default) -> + case maps:get(OptName, Opts, Default) of + Default -> Settings; + Value -> Settings#{SettingName => Value} + end. + +-spec init_stream(binary(), State) + -> {ok, cow_http2:streamid(), State} when State::http2_machine(). +init_stream(Method, State=#http2_machine{mode=client, local_streamid=LocalStreamID, + local_settings=#{initial_window_size := RemoteWindow}, + remote_settings=#{initial_window_size := LocalWindow}}) -> + Stream = #stream{id=LocalStreamID, method=Method, + local_window=LocalWindow, remote_window=RemoteWindow}, + {ok, LocalStreamID, stream_store(Stream, State#http2_machine{ + local_streamid=LocalStreamID + 2})}. + +-spec init_upgrade_stream(binary(), State) + -> {ok, cow_http2:streamid(), State} when State::http2_machine(). +init_upgrade_stream(Method, State=#http2_machine{mode=server, remote_streamid=0, + local_settings=#{initial_window_size := RemoteWindow}, + remote_settings=#{initial_window_size := LocalWindow}}) -> + Stream = #stream{id=1, method=Method, + remote=fin, remote_expected_size=0, + local_window=LocalWindow, remote_window=RemoteWindow, te=undefined}, + {ok, 1, stream_store(Stream, State#http2_machine{remote_streamid=1})}. + +-spec frame(cow_http2:frame(), State) + -> {ok, State} + | {ok, {data, cow_http2:streamid(), cow_http2:fin(), binary()}, State} + | {ok, {headers, cow_http2:streamid(), cow_http2:fin(), + cow_http:headers(), pseudo_headers(), non_neg_integer() | undefined}, State} + | {ok, {trailers, cow_http2:streamid(), cow_http:headers()}, State} + | {ok, {rst_stream, cow_http2:streamid(), cow_http2:error()}, State} + | {ok, {push_promise, cow_http2:streamid(), cow_http2:streamid(), + cow_http:headers(), pseudo_headers()}, State} + | {ok, {goaway, cow_http2:streamid(), cow_http2:error(), binary()}, State} + | {send, [{cow_http2:streamid(), cow_http2:fin(), + [{data, iodata()} | #sendfile{} | {trailers, cow_http:headers()}]}], State} + | {error, {stream_error, cow_http2:streamid(), cow_http2:error(), atom()}, State} + | {error, {connection_error, cow_http2:error(), atom()}, State} + when State::http2_machine(). +frame(Frame, State=#http2_machine{state=settings, preface_timer=TRef}) -> + ok = case TRef of + undefined -> ok; + _ -> erlang:cancel_timer(TRef, [{async, true}, {info, false}]) + end, + settings_frame(Frame, State#http2_machine{state=normal, preface_timer=undefined}); +frame(Frame, State=#http2_machine{state={continuation, _, _}}) -> + maybe_discard_result(continuation_frame(Frame, State)); +frame(settings_ack, State=#http2_machine{state=normal}) -> + settings_ack_frame(State); +frame(Frame, State=#http2_machine{state=normal}) -> + Result = case element(1, Frame) of + data -> data_frame(Frame, State); + headers -> headers_frame(Frame, State); + priority -> priority_frame(Frame, State); + rst_stream -> rst_stream_frame(Frame, State); + settings -> settings_frame(Frame, State); + push_promise -> push_promise_frame(Frame, State); + ping -> ping_frame(Frame, State); + ping_ack -> ping_ack_frame(Frame, State); + goaway -> goaway_frame(Frame, State); + window_update -> window_update_frame(Frame, State); + continuation -> unexpected_continuation_frame(Frame, State); + _ -> ignored_frame(State) + end, + maybe_discard_result(Result). + +%% RFC7540 6.9. After sending a GOAWAY frame, the sender can discard frames for +%% streams initiated by the receiver with identifiers higher than the identified +%% last stream. However, any frames that alter connection state cannot be +%% completely ignored. For instance, HEADERS, PUSH_PROMISE, and CONTINUATION +%% frames MUST be minimally processed to ensure the state maintained for header +%% compression is consistent. +maybe_discard_result(FrameResult={ok, Result, State=#http2_machine{mode=Mode, + last_remote_streamid=MaxID}}) + when element(1, Result) =/= goaway -> + case element(2, Result) of + StreamID when StreamID > MaxID, not ?IS_LOCAL(Mode, StreamID) -> + {ok, State}; + _StreamID -> + FrameResult + end; +maybe_discard_result(FrameResult) -> + FrameResult. + +%% DATA frame. + +data_frame({data, StreamID, _, _}, State=#http2_machine{mode=Mode, + local_streamid=LocalStreamID, remote_streamid=RemoteStreamID}) + when (?IS_LOCAL(Mode, StreamID) andalso (StreamID >= LocalStreamID)) + orelse ((not ?IS_LOCAL(Mode, StreamID)) andalso (StreamID > RemoteStreamID)) -> + {error, {connection_error, protocol_error, + 'DATA frame received on a stream in idle state. (RFC7540 5.1)'}, + State}; +data_frame({data, _, _, Data}, State=#http2_machine{remote_window=ConnWindow}) + when byte_size(Data) > ConnWindow -> + {error, {connection_error, flow_control_error, + 'DATA frame overflowed the connection flow control window. (RFC7540 6.9, RFC7540 6.9.1)'}, + State}; +data_frame(Frame={data, StreamID, _, Data}, State0=#http2_machine{ + remote_window=ConnWindow, local_lingering_streams=Lingering}) -> + DataLen = byte_size(Data), + State = State0#http2_machine{remote_window=ConnWindow - DataLen}, + case stream_get(StreamID, State) of + #stream{remote_window=StreamWindow} when StreamWindow < DataLen -> + stream_reset(StreamID, State, flow_control_error, + 'DATA frame overflowed the stream flow control window. (RFC7540 6.9, RFC7540 6.9.1)'); + Stream = #stream{remote=nofin} -> + data_frame(Frame, State, Stream, DataLen); + #stream{remote=idle} -> + stream_reset(StreamID, State, protocol_error, + 'DATA frame received before a HEADERS frame. (RFC7540 8.1, RFC7540 8.1.2.6)'); + #stream{remote=fin} -> + stream_reset(StreamID, State, stream_closed, + 'DATA frame received for a half-closed (remote) stream. (RFC7540 5.1)'); + undefined -> + %% After we send an RST_STREAM frame and terminate a stream, + %% the remote endpoint might still be sending us some more + %% frames until it can process this RST_STREAM. + case lists:member(StreamID, Lingering) of + true -> + {ok, State}; + false -> + {error, {connection_error, stream_closed, + 'DATA frame received for a closed stream. (RFC7540 5.1)'}, + State} + end + end. + +data_frame(Frame={data, _, IsFin, _}, State0, Stream0=#stream{id=StreamID, + remote_window=StreamWindow, remote_read_size=StreamRead}, DataLen) -> + Stream = Stream0#stream{remote=IsFin, + remote_window=StreamWindow - DataLen, + remote_read_size=StreamRead + DataLen}, + State = stream_store(Stream, State0), + case is_body_size_valid(Stream) of + true -> + {ok, Frame, State}; + false -> + stream_reset(StreamID, State, protocol_error, + 'The total size of DATA frames is different than the content-length. (RFC7540 8.1.2.6)') + end. + +%% It's always valid when no content-length header was specified. +is_body_size_valid(#stream{remote_expected_size=undefined}) -> + true; +%% We didn't finish reading the body but the size is already larger than expected. +is_body_size_valid(#stream{remote=nofin, remote_expected_size=Expected, + remote_read_size=Read}) when Read > Expected -> + false; +is_body_size_valid(#stream{remote=nofin}) -> + true; +is_body_size_valid(#stream{remote=fin, remote_expected_size=Expected, + remote_read_size=Expected}) -> + true; +%% We finished reading the body and the size read is not the one expected. +is_body_size_valid(_) -> + false. + +%% HEADERS frame. +%% +%% We always close the connection when we detect errors before +%% decoding the headers to not waste resources on non-compliant +%% endpoints, making us stricter than the RFC requires. + +%% Convenience record to manipulate the tuple. +%% The order of the fields matter. +-record(headers, { + id :: cow_http2:streamid(), + fin :: cow_http2:fin(), + head :: cow_http2:head_fin(), + data :: binary() +}). + +headers_frame(Frame=#headers{}, State=#http2_machine{mode=Mode}) -> + case Mode of + server -> server_headers_frame(Frame, State); + client -> client_headers_frame(Frame, State) + end; +%% @todo Handle the PRIORITY data, but only if this returns an ok tuple. +%% @todo Do not lose the PRIORITY information if CONTINUATION frames follow. +headers_frame({headers, StreamID, IsFin, IsHeadFin, + _IsExclusive, _DepStreamID, _Weight, HeaderData}, + State=#http2_machine{mode=Mode}) -> + HeadersFrame = #headers{id=StreamID, fin=IsFin, head=IsHeadFin, data=HeaderData}, + case Mode of + server -> server_headers_frame(HeadersFrame, State); + client -> client_headers_frame(HeadersFrame, State) + end. + +%% Reject HEADERS frames with even-numbered streamid. +server_headers_frame(#headers{id=StreamID}, State) + when ?IS_SERVER_LOCAL(StreamID) -> + {error, {connection_error, protocol_error, + 'HEADERS frame received with even-numbered streamid. (RFC7540 5.1.1)'}, + State}; +%% HEADERS frame on an idle stream: new request. +server_headers_frame(Frame=#headers{id=StreamID, head=IsHeadFin}, + State=#http2_machine{mode=server, remote_streamid=RemoteStreamID}) + when StreamID > RemoteStreamID -> + case IsHeadFin of + head_fin -> + headers_decode(Frame, State, request, undefined); + head_nofin -> + {ok, State#http2_machine{state={continuation, request, Frame}}} + end; +%% Either a HEADERS frame received on (half-)closed stream, +%% or a HEADERS frame containing the trailers. +server_headers_frame(Frame=#headers{id=StreamID, fin=IsFin, head=IsHeadFin}, State) -> + case stream_get(StreamID, State) of + %% Trailers. + Stream = #stream{remote=nofin} when IsFin =:= fin -> + case IsHeadFin of + head_fin -> + headers_decode(Frame, State, trailers, Stream); + head_nofin -> + {ok, State#http2_machine{state={continuation, trailers, Frame}}} + end; + #stream{remote=nofin} -> + {error, {connection_error, protocol_error, + 'Trailing HEADERS frame received without the END_STREAM flag set. (RFC7540 8.1, RFC7540 8.1.2.6)'}, + State}; + _ -> + {error, {connection_error, stream_closed, + 'HEADERS frame received on a stream in closed or half-closed state. (RFC7540 5.1)'}, + State} + end. + +%% Either a HEADERS frame received on an (half-)closed stream, +%% or a HEADERS frame containing the response or the trailers. +client_headers_frame(Frame=#headers{id=StreamID, fin=IsFin, head=IsHeadFin}, + State=#http2_machine{local_streamid=LocalStreamID, remote_streamid=RemoteStreamID}) + when (?IS_CLIENT_LOCAL(StreamID) andalso (StreamID < LocalStreamID)) + orelse ((not ?IS_CLIENT_LOCAL(StreamID)) andalso (StreamID =< RemoteStreamID)) -> + case stream_get(StreamID, State) of + Stream = #stream{remote=idle} -> + case IsHeadFin of + head_fin -> + headers_decode(Frame, State, response, Stream); + head_nofin -> + {ok, State#http2_machine{state={continuation, response, Frame}}} + end; + Stream = #stream{remote=nofin} when IsFin =:= fin -> + case IsHeadFin of + head_fin -> + headers_decode(Frame, State, trailers, Stream); + head_nofin -> + {ok, State#http2_machine{state={continuation, trailers, Frame}}} + end; + #stream{remote=nofin} -> + {error, {connection_error, protocol_error, + 'Trailing HEADERS frame received without the END_STREAM flag set. (RFC7540 8.1, RFC7540 8.1.2.6)'}, + State}; + _ -> + {error, {connection_error, stream_closed, + 'HEADERS frame received on a stream in closed or half-closed state. (RFC7540 5.1)'}, + State} + end; +%% Reject HEADERS frames received on idle streams. +client_headers_frame(_, State) -> + {error, {connection_error, protocol_error, + 'HEADERS frame received on an idle stream. (RFC7540 5.1.1)'}, + State}. + +headers_decode(Frame=#headers{head=head_fin, data=HeaderData}, + State=#http2_machine{decode_state=DecodeState0}, Type, Stream) -> + try cow_hpack:decode(HeaderData, DecodeState0) of + {Headers, DecodeState} when Type =:= request -> + headers_enforce_concurrency_limit(Frame, + State#http2_machine{decode_state=DecodeState}, Type, Stream, Headers); + {Headers, DecodeState} -> + headers_pseudo_headers(Frame, + State#http2_machine{decode_state=DecodeState}, Type, Stream, Headers) + catch _:_ -> + {error, {connection_error, compression_error, + 'Error while trying to decode HPACK-encoded header block. (RFC7540 4.3)'}, + State} + end. + +headers_enforce_concurrency_limit(Frame=#headers{id=StreamID}, + State=#http2_machine{local_settings=LocalSettings, streams=Streams}, + Type, Stream, Headers) -> + MaxConcurrentStreams = maps:get(max_concurrent_streams, LocalSettings, infinity), + %% Using < is correct because this new stream is not included + %% in the Streams variable yet and so we'll end up with +1 stream. + case map_size(Streams) < MaxConcurrentStreams of + true -> + headers_pseudo_headers(Frame, State, Type, Stream, Headers); + false -> + {error, {stream_error, StreamID, refused_stream, + 'Maximum number of concurrent streams has been reached. (RFC7540 5.1.2)'}, + State} + end. + +headers_pseudo_headers(Frame, State=#http2_machine{local_settings=LocalSettings}, + Type, Stream, Headers0) when Type =:= request; Type =:= push_promise -> + IsExtendedConnectEnabled = maps:get(enable_connect_protocol, LocalSettings, false), + case request_pseudo_headers(Headers0, #{}) of + %% Extended CONNECT method (RFC8441). + {ok, PseudoHeaders=#{method := <<"CONNECT">>, scheme := _, + authority := _, path := _, protocol := _}, Headers} + when IsExtendedConnectEnabled -> + headers_regular_headers(Frame, State, Type, Stream, PseudoHeaders, Headers); + {ok, #{method := <<"CONNECT">>, scheme := _, + authority := _, path := _}, _} + when IsExtendedConnectEnabled -> + headers_malformed(Frame, State, + 'The :protocol pseudo-header MUST be sent with an extended CONNECT. (RFC8441 4)'); + {ok, #{protocol := _}, _} -> + headers_malformed(Frame, State, + 'The :protocol pseudo-header is only defined for the extended CONNECT. (RFC8441 4)'); + %% Normal CONNECT (no scheme/path). + {ok, PseudoHeaders=#{method := <<"CONNECT">>, authority := _}, Headers} + when map_size(PseudoHeaders) =:= 2 -> + headers_regular_headers(Frame, State, Type, Stream, PseudoHeaders, Headers); + {ok, #{method := <<"CONNECT">>}, _} -> + headers_malformed(Frame, State, + 'CONNECT requests only use the :method and :authority pseudo-headers. (RFC7540 8.3)'); + %% Other requests. + {ok, PseudoHeaders=#{method := _, scheme := _, path := _}, Headers} -> + headers_regular_headers(Frame, State, Type, Stream, PseudoHeaders, Headers); + {ok, _, _} -> + headers_malformed(Frame, State, + 'A required pseudo-header was not found. (RFC7540 8.1.2.3)'); + {error, HumanReadable} -> + headers_malformed(Frame, State, HumanReadable) + end; +headers_pseudo_headers(Frame=#headers{id=StreamID}, + State, Type=response, Stream, Headers0) -> + case response_pseudo_headers(Headers0, #{}) of + {ok, PseudoHeaders=#{status := _}, Headers} -> + headers_regular_headers(Frame, State, Type, Stream, PseudoHeaders, Headers); + {ok, _, _} -> + stream_reset(StreamID, State, protocol_error, + 'A required pseudo-header was not found. (RFC7540 8.1.2.4)'); + {error, HumanReadable} -> + stream_reset(StreamID, State, protocol_error, HumanReadable) + end; +headers_pseudo_headers(Frame=#headers{id=StreamID}, + State, Type=trailers, Stream, Headers) -> + case trailers_contain_pseudo_headers(Headers) of + false -> + headers_regular_headers(Frame, State, Type, Stream, #{}, Headers); + true -> + stream_reset(StreamID, State, protocol_error, + 'Trailer header blocks must not contain pseudo-headers. (RFC7540 8.1.2.1)') + end. + +headers_malformed(#headers{id=StreamID}, State, HumanReadable) -> + {error, {stream_error, StreamID, protocol_error, HumanReadable}, State}. + +request_pseudo_headers([{<<":method">>, _}|_], #{method := _}) -> + {error, 'Multiple :method pseudo-headers were found. (RFC7540 8.1.2.3)'}; +request_pseudo_headers([{<<":method">>, Method}|Tail], PseudoHeaders) -> + request_pseudo_headers(Tail, PseudoHeaders#{method => Method}); +request_pseudo_headers([{<<":scheme">>, _}|_], #{scheme := _}) -> + {error, 'Multiple :scheme pseudo-headers were found. (RFC7540 8.1.2.3)'}; +request_pseudo_headers([{<<":scheme">>, Scheme}|Tail], PseudoHeaders) -> + request_pseudo_headers(Tail, PseudoHeaders#{scheme => Scheme}); +request_pseudo_headers([{<<":authority">>, _}|_], #{authority := _}) -> + {error, 'Multiple :authority pseudo-headers were found. (RFC7540 8.1.2.3)'}; +request_pseudo_headers([{<<":authority">>, Authority}|Tail], PseudoHeaders) -> + request_pseudo_headers(Tail, PseudoHeaders#{authority => Authority}); +request_pseudo_headers([{<<":path">>, _}|_], #{path := _}) -> + {error, 'Multiple :path pseudo-headers were found. (RFC7540 8.1.2.3)'}; +request_pseudo_headers([{<<":path">>, Path}|Tail], PseudoHeaders) -> + request_pseudo_headers(Tail, PseudoHeaders#{path => Path}); +request_pseudo_headers([{<<":protocol">>, _}|_], #{protocol := _}) -> + {error, 'Multiple :protocol pseudo-headers were found. (RFC7540 8.1.2.3)'}; +request_pseudo_headers([{<<":protocol">>, Protocol}|Tail], PseudoHeaders) -> + request_pseudo_headers(Tail, PseudoHeaders#{protocol => Protocol}); +request_pseudo_headers([{<<":", _/bits>>, _}|_], _) -> + {error, 'An unknown or invalid pseudo-header was found. (RFC7540 8.1.2.1)'}; +request_pseudo_headers(Headers, PseudoHeaders) -> + {ok, PseudoHeaders, Headers}. + +response_pseudo_headers([{<<":status">>, _}|_], #{status := _}) -> + {error, 'Multiple :status pseudo-headers were found. (RFC7540 8.1.2.3)'}; +response_pseudo_headers([{<<":status">>, Status}|Tail], PseudoHeaders) -> + try cow_http:status_to_integer(Status) of + IntStatus -> + response_pseudo_headers(Tail, PseudoHeaders#{status => IntStatus}) + catch _:_ -> + {error, 'The :status pseudo-header value is invalid. (RFC7540 8.1.2.4)'} + end; +response_pseudo_headers([{<<":", _/bits>>, _}|_], _) -> + {error, 'An unknown or invalid pseudo-header was found. (RFC7540 8.1.2.1)'}; +response_pseudo_headers(Headers, PseudoHeaders) -> + {ok, PseudoHeaders, Headers}. + +trailers_contain_pseudo_headers([]) -> + false; +trailers_contain_pseudo_headers([{<<":", _/bits>>, _}|_]) -> + true; +trailers_contain_pseudo_headers([_|Tail]) -> + trailers_contain_pseudo_headers(Tail). + +%% Rejecting invalid regular headers might be a bit too strong for clients. +headers_regular_headers(Frame=#headers{id=StreamID}, + State, Type, Stream, PseudoHeaders, Headers) -> + case regular_headers(Headers, Type) of + ok when Type =:= request -> + request_expected_size(Frame, State, Type, Stream, PseudoHeaders, Headers); + ok when Type =:= push_promise -> + push_promise_frame(Frame, State, Stream, PseudoHeaders, Headers); + ok when Type =:= response -> + response_expected_size(Frame, State, Type, Stream, PseudoHeaders, Headers); + ok when Type =:= trailers -> + trailers_frame(Frame, State, Stream, Headers); + {error, HumanReadable} when Type =:= request -> + headers_malformed(Frame, State, HumanReadable); + {error, HumanReadable} -> + stream_reset(StreamID, State, protocol_error, HumanReadable) + end. + +regular_headers([{<<>>, _}|_], _) -> + {error, 'Empty header names are not valid regular headers. (CVE-2019-9516)'}; +regular_headers([{<<":", _/bits>>, _}|_], _) -> + {error, 'Pseudo-headers were found after regular headers. (RFC7540 8.1.2.1)'}; +regular_headers([{<<"connection">>, _}|_], _) -> + {error, 'The connection header is not allowed. (RFC7540 8.1.2.2)'}; +regular_headers([{<<"keep-alive">>, _}|_], _) -> + {error, 'The keep-alive header is not allowed. (RFC7540 8.1.2.2)'}; +regular_headers([{<<"proxy-authenticate">>, _}|_], _) -> + {error, 'The proxy-authenticate header is not allowed. (RFC7540 8.1.2.2)'}; +regular_headers([{<<"proxy-authorization">>, _}|_], _) -> + {error, 'The proxy-authorization header is not allowed. (RFC7540 8.1.2.2)'}; +regular_headers([{<<"transfer-encoding">>, _}|_], _) -> + {error, 'The transfer-encoding header is not allowed. (RFC7540 8.1.2.2)'}; +regular_headers([{<<"upgrade">>, _}|_], _) -> + {error, 'The upgrade header is not allowed. (RFC7540 8.1.2.2)'}; +regular_headers([{<<"te">>, Value}|_], request) when Value =/= <<"trailers">> -> + {error, 'The te header with a value other than "trailers" is not allowed. (RFC7540 8.1.2.2)'}; +regular_headers([{<<"te">>, _}|_], Type) when Type =/= request -> + {error, 'The te header is only allowed in request headers. (RFC7540 8.1.2.2)'}; +regular_headers([{Name, _}|Tail], Type) -> + Pattern = [ + <<$A>>, <<$B>>, <<$C>>, <<$D>>, <<$E>>, <<$F>>, <<$G>>, <<$H>>, <<$I>>, + <<$J>>, <<$K>>, <<$L>>, <<$M>>, <<$N>>, <<$O>>, <<$P>>, <<$Q>>, <<$R>>, + <<$S>>, <<$T>>, <<$U>>, <<$V>>, <<$W>>, <<$X>>, <<$Y>>, <<$Z>> + ], + case binary:match(Name, Pattern) of + nomatch -> regular_headers(Tail, Type); + _ -> {error, 'Header names must be lowercase. (RFC7540 8.1.2)'} + end; +regular_headers([], _) -> + ok. + +request_expected_size(Frame=#headers{fin=IsFin}, State, Type, Stream, PseudoHeaders, Headers) -> + case [CL || {<<"content-length">>, CL} <- Headers] of + [] when IsFin =:= fin -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, 0); + [] -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, undefined); + [<<"0">>] when IsFin =:= fin -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, 0); + [_] when IsFin =:= fin -> + headers_malformed(Frame, State, + 'HEADERS frame with the END_STREAM flag contains a non-zero content-length. (RFC7540 8.1.2.6)'); + [BinLen] -> + headers_parse_expected_size(Frame, State, Type, Stream, + PseudoHeaders, Headers, BinLen); + _ -> + headers_malformed(Frame, State, + 'Multiple content-length headers were received. (RFC7230 3.3.2)') + end. + +response_expected_size(Frame=#headers{id=StreamID, fin=IsFin}, State, Type, + Stream=#stream{method=Method}, PseudoHeaders=#{status := Status}, Headers) -> + case [CL || {<<"content-length">>, CL} <- Headers] of + [] when IsFin =:= fin -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, 0); + [] -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, undefined); + [_] when Status >= 100, Status =< 199 -> + stream_reset(StreamID, State, protocol_error, + 'Content-length header received in a 1xx response. (RFC7230 3.3.2)'); + [_] when Status =:= 204 -> + stream_reset(StreamID, State, protocol_error, + 'Content-length header received in a 204 response. (RFC7230 3.3.2)'); + [_] when Status >= 200, Status =< 299, Method =:= <<"CONNECT">> -> + stream_reset(StreamID, State, protocol_error, + 'Content-length header received in a 2xx response to a CONNECT request. (RFC7230 3.3.2).'); + %% Responses to HEAD requests, and 304 responses may contain + %% a content-length header that must be ignored. (RFC7230 3.3.2) + [_] when Method =:= <<"HEAD">> -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, 0); + [_] when Status =:= 304 -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, 0); + [<<"0">>] when IsFin =:= fin -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, 0); + [_] when IsFin =:= fin -> + stream_reset(StreamID, State, protocol_error, + 'HEADERS frame with the END_STREAM flag contains a non-zero content-length. (RFC7540 8.1.2.6)'); + [BinLen] -> + headers_parse_expected_size(Frame, State, Type, Stream, + PseudoHeaders, Headers, BinLen); + _ -> + stream_reset(StreamID, State, protocol_error, + 'Multiple content-length headers were received. (RFC7230 3.3.2)') + end. + +headers_parse_expected_size(Frame=#headers{id=StreamID}, + State, Type, Stream, PseudoHeaders, Headers, BinLen) -> + try cow_http_hd:parse_content_length(BinLen) of + Len -> + headers_frame(Frame, State, Type, Stream, PseudoHeaders, Headers, Len) + catch + _:_ -> + HumanReadable = 'The content-length header is invalid. (RFC7230 3.3.2)', + case Type of + request -> headers_malformed(Frame, State, HumanReadable); + response -> stream_reset(StreamID, State, protocol_error, HumanReadable) + end + end. + +headers_frame(#headers{id=StreamID, fin=IsFin}, State0=#http2_machine{ + local_settings=#{initial_window_size := RemoteWindow}, + remote_settings=#{initial_window_size := LocalWindow}}, + Type, Stream0, PseudoHeaders, Headers, Len) -> + {Stream, State1} = case Type of + request -> + TE = case lists:keyfind(<<"te">>, 1, Headers) of + {_, TE0} -> TE0; + false -> undefined + end, + {#stream{id=StreamID, method=maps:get(method, PseudoHeaders), + remote=IsFin, remote_expected_size=Len, + local_window=LocalWindow, remote_window=RemoteWindow, te=TE}, + State0#http2_machine{remote_streamid=StreamID}}; + response -> + Stream1 = case PseudoHeaders of + #{status := Status} when Status >= 100, Status =< 199 -> Stream0; + _ -> Stream0#stream{remote=IsFin, remote_expected_size=Len} + end, + {Stream1, State0} + end, + State = stream_store(Stream, State1), + {ok, {headers, StreamID, IsFin, Headers, PseudoHeaders, Len}, State}. + +trailers_frame(#headers{id=StreamID}, State0, Stream0, Headers) -> + Stream = Stream0#stream{remote=fin}, + State = stream_store(Stream, State0), + case is_body_size_valid(Stream) of + true -> + {ok, {trailers, StreamID, Headers}, State}; + false -> + stream_reset(StreamID, State, protocol_error, + 'The total size of DATA frames is different than the content-length. (RFC7540 8.1.2.6)') + end. + +%% PRIORITY frame. +%% +%% @todo Handle PRIORITY frames. + +priority_frame(_Frame, State) -> + {ok, State}. + +%% RST_STREAM frame. + +rst_stream_frame({rst_stream, StreamID, _}, State=#http2_machine{mode=Mode, + local_streamid=LocalStreamID, remote_streamid=RemoteStreamID}) + when (?IS_LOCAL(Mode, StreamID) andalso (StreamID >= LocalStreamID)) + orelse ((not ?IS_LOCAL(Mode, StreamID)) andalso (StreamID > RemoteStreamID)) -> + {error, {connection_error, protocol_error, + 'RST_STREAM frame received on a stream in idle state. (RFC7540 5.1)'}, + State}; +rst_stream_frame({rst_stream, StreamID, Reason}, State=#http2_machine{ + streams=Streams0, remote_lingering_streams=Lingering0}) -> + Streams = maps:remove(StreamID, Streams0), + %% We only keep up to 10 streams in this state. @todo Make it configurable? + Lingering = [StreamID|lists:sublist(Lingering0, 10 - 1)], + {ok, {rst_stream, StreamID, Reason}, + State#http2_machine{streams=Streams, remote_lingering_streams=Lingering}}. + +%% SETTINGS frame. + +settings_frame({settings, Settings}, State0=#http2_machine{ + opts=Opts, remote_settings=Settings0}) -> + State1 = State0#http2_machine{remote_settings=maps:merge(Settings0, Settings)}, + State2 = maps:fold(fun + (header_table_size, NewSize, State=#http2_machine{encode_state=EncodeState0}) -> + MaxSize = maps:get(max_encode_table_size, Opts, 4096), + EncodeState = cow_hpack:set_max_size(min(NewSize, MaxSize), EncodeState0), + State#http2_machine{encode_state=EncodeState}; + (initial_window_size, NewWindowSize, State) -> + OldWindowSize = maps:get(initial_window_size, Settings0, 65535), + streams_update_local_window(State, NewWindowSize - OldWindowSize); + (_, _, State) -> + State + end, State1, Settings), + case Settings of + #{initial_window_size := _} -> send_data(State2); + _ -> {ok, State2} + end; +%% We expect to receive a SETTINGS frame as part of the preface. +settings_frame(_F, State=#http2_machine{mode=server}) -> + {error, {connection_error, protocol_error, + 'The preface sequence must be followed by a SETTINGS frame. (RFC7540 3.5)'}, + State}; +settings_frame(_F, State) -> + {error, {connection_error, protocol_error, + 'The preface must begin with a SETTINGS frame. (RFC7540 3.5)'}, + State}. + +%% When SETTINGS_INITIAL_WINDOW_SIZE changes we need to update +%% the local stream windows for all active streams and perhaps +%% resume sending data. +streams_update_local_window(State=#http2_machine{streams=Streams0}, Increment) -> + Streams = maps:map(fun(_, S=#stream{local_window=StreamWindow}) -> + S#stream{local_window=StreamWindow + Increment} + end, Streams0), + State#http2_machine{streams=Streams}. + +%% Ack for a previously sent SETTINGS frame. + +settings_ack_frame(State0=#http2_machine{settings_timer=TRef, + local_settings=Local0, next_settings=NextSettings}) -> + ok = case TRef of + undefined -> ok; + _ -> erlang:cancel_timer(TRef, [{async, true}, {info, false}]) + end, + Local = maps:merge(Local0, NextSettings), + State1 = State0#http2_machine{settings_timer=undefined, + local_settings=Local, next_settings=#{}}, + {ok, maps:fold(fun + (header_table_size, MaxSize, State=#http2_machine{decode_state=DecodeState0}) -> + DecodeState = cow_hpack:set_max_size(MaxSize, DecodeState0), + State#http2_machine{decode_state=DecodeState}; + (initial_window_size, NewWindowSize, State) -> + OldWindowSize = maps:get(initial_window_size, Local0, 65535), + streams_update_remote_window(State, NewWindowSize - OldWindowSize); + (_, _, State) -> + State + end, State1, NextSettings)}. + +%% When we receive an ack to a SETTINGS frame we sent we need to update +%% the remote stream windows for all active streams. +streams_update_remote_window(State=#http2_machine{streams=Streams0}, Increment) -> + Streams = maps:map(fun(_, S=#stream{remote_window=StreamWindow}) -> + S#stream{remote_window=StreamWindow + Increment} + end, Streams0), + State#http2_machine{streams=Streams}. + +%% PUSH_PROMISE frame. + +%% Convenience record to manipulate the tuple. +%% The order of the fields matter. +-record(push_promise, { + id :: cow_http2:streamid(), + head :: cow_http2:head_fin(), + promised_id :: cow_http2:streamid(), + data :: binary() +}). + +push_promise_frame(_, State=#http2_machine{mode=server}) -> + {error, {connection_error, protocol_error, + 'PUSH_PROMISE frames MUST NOT be sent by the client. (RFC7540 6.6)'}, + State}; +push_promise_frame(_, State=#http2_machine{local_settings=#{enable_push := false}}) -> + {error, {connection_error, protocol_error, + 'PUSH_PROMISE frame received despite SETTINGS_ENABLE_PUSH set to 0. (RFC7540 6.6)'}, + State}; +push_promise_frame(#push_promise{promised_id=PromisedStreamID}, + State=#http2_machine{remote_streamid=RemoteStreamID}) + when PromisedStreamID =< RemoteStreamID -> + {error, {connection_error, protocol_error, + 'PUSH_PROMISE frame received for a promised stream in closed or half-closed state. (RFC7540 5.1, RFC7540 6.6)'}, + State}; +push_promise_frame(#push_promise{id=StreamID}, State) + when not ?IS_CLIENT_LOCAL(StreamID) -> + {error, {connection_error, protocol_error, + 'PUSH_PROMISE frame received on a server-initiated stream. (RFC7540 6.6)'}, + State}; +push_promise_frame(Frame=#push_promise{id=StreamID, head=IsHeadFin, + promised_id=PromisedStreamID, data=HeaderData}, State) -> + case stream_get(StreamID, State) of + Stream=#stream{remote=idle} -> + case IsHeadFin of + head_fin -> + headers_decode(#headers{id=PromisedStreamID, + fin=fin, head=IsHeadFin, data=HeaderData}, + State, push_promise, Stream); + head_nofin -> + {ok, State#http2_machine{state={continuation, push_promise, Frame}}} + end; + _ -> +%% @todo Check if the stream is lingering. If it is, decode the frame +%% and do what? That's the big question and why it's not implemented yet. +% However, an endpoint that +% has sent RST_STREAM on the associated stream MUST handle PUSH_PROMISE +% frames that might have been created before the RST_STREAM frame is +% received and processed. (RFC7540 6.6) + {error, {connection_error, stream_closed, + 'PUSH_PROMISE frame received on a stream in closed or half-closed state. (RFC7540 5.1, RFC7540 6.6)'}, + State} + end. + +push_promise_frame(#headers{id=PromisedStreamID}, + State0=#http2_machine{ + local_settings=#{initial_window_size := RemoteWindow}, + remote_settings=#{initial_window_size := LocalWindow}}, + #stream{id=StreamID}, PseudoHeaders=#{method := Method}, Headers) -> + TE = case lists:keyfind(<<"te">>, 1, Headers) of + {_, TE0} -> TE0; + false -> undefined + end, + PromisedStream = #stream{id=PromisedStreamID, method=Method, + local=fin, local_window=LocalWindow, + remote_window=RemoteWindow, te=TE}, + State = stream_store(PromisedStream, + State0#http2_machine{remote_streamid=PromisedStreamID}), + {ok, {push_promise, StreamID, PromisedStreamID, Headers, PseudoHeaders}, State}. + +%% PING frame. + +ping_frame({ping, _}, State) -> + {ok, State}. + +%% Ack for a previously sent PING frame. +%% +%% @todo Might want to check contents but probably a waste of time. + +ping_ack_frame({ping_ack, _}, State) -> + {ok, State}. + +%% GOAWAY frame. + +goaway_frame(Frame={goaway, _, _, _}, State) -> + {ok, Frame, State}. + +%% WINDOW_UPDATE frame. + +%% Connection-wide WINDOW_UPDATE frame. +window_update_frame({window_update, Increment}, State=#http2_machine{local_window=ConnWindow}) + when ConnWindow + Increment > 16#7fffffff -> + {error, {connection_error, flow_control_error, + 'The flow control window must not be greater than 2^31-1. (RFC7540 6.9.1)'}, + State}; +window_update_frame({window_update, Increment}, State=#http2_machine{local_window=ConnWindow}) -> + send_data(State#http2_machine{local_window=ConnWindow + Increment}); +%% Stream-specific WINDOW_UPDATE frame. +window_update_frame({window_update, StreamID, _}, State=#http2_machine{mode=Mode, + local_streamid=LocalStreamID, remote_streamid=RemoteStreamID}) + when (?IS_LOCAL(Mode, StreamID) andalso (StreamID >= LocalStreamID)) + orelse ((not ?IS_LOCAL(Mode, StreamID)) andalso (StreamID > RemoteStreamID)) -> + {error, {connection_error, protocol_error, + 'WINDOW_UPDATE frame received on a stream in idle state. (RFC7540 5.1)'}, + State}; +window_update_frame({window_update, StreamID, Increment}, + State0=#http2_machine{remote_lingering_streams=Lingering}) -> + case stream_get(StreamID, State0) of + #stream{local_window=StreamWindow} when StreamWindow + Increment > 16#7fffffff -> + stream_reset(StreamID, State0, flow_control_error, + 'The flow control window must not be greater than 2^31-1. (RFC7540 6.9.1)'); + Stream0 = #stream{local_window=StreamWindow} -> + send_data(Stream0#stream{local_window=StreamWindow + Increment}, State0); + undefined -> + %% WINDOW_UPDATE frames may be received for a short period of time + %% after a stream is closed. They must be ignored. + case lists:member(StreamID, Lingering) of + false -> {ok, State0}; + true -> stream_reset(StreamID, State0, stream_closed, + 'WINDOW_UPDATE frame received after the stream was reset. (RFC7540 5.1)') + end + end. + +%% CONTINUATION frame. + +%% Convenience record to manipulate the tuple. +%% The order of the fields matter. +-record(continuation, { + id :: cow_http2:streamid(), + head :: cow_http2:head_fin(), + data :: binary() +}). + +unexpected_continuation_frame(#continuation{}, State) -> + {error, {connection_error, protocol_error, + 'CONTINUATION frames MUST be preceded by a HEADERS or PUSH_PROMISE frame. (RFC7540 6.10)'}, + State}. + +continuation_frame(#continuation{id=StreamID, head=head_fin, data=HeaderFragment1}, + State=#http2_machine{state={continuation, Type, + Frame=#headers{id=StreamID, data=HeaderFragment0}}}) -> + HeaderData = <>, + headers_decode(Frame#headers{head=head_fin, data=HeaderData}, + State#http2_machine{state=normal}, Type, stream_get(StreamID, State)); +continuation_frame(#continuation{id=StreamID, head=head_fin, data=HeaderFragment1}, + State=#http2_machine{state={continuation, Type, #push_promise{ + id=StreamID, promised_id=PromisedStreamID, data=HeaderFragment0}}}) -> + HeaderData = <>, + headers_decode(#headers{id=PromisedStreamID, fin=fin, head=head_fin, data=HeaderData}, + State#http2_machine{state=normal}, Type, undefined); +continuation_frame(#continuation{id=StreamID, data=HeaderFragment1}, + State=#http2_machine{state={continuation, Type, ContinuedFrame0}}) + when element(2, ContinuedFrame0) =:= StreamID -> + ContinuedFrame = case ContinuedFrame0 of + #headers{data=HeaderFragment0} -> + HeaderData = <>, + ContinuedFrame0#headers{data=HeaderData}; + #push_promise{data=HeaderFragment0} -> + HeaderData = <>, + ContinuedFrame0#push_promise{data=HeaderData} + end, + {ok, State#http2_machine{state={continuation, Type, ContinuedFrame}}}; +continuation_frame(_F, State) -> + {error, {connection_error, protocol_error, + 'An invalid frame was received in the middle of a header block. (RFC7540 6.2)'}, + State}. + +%% Ignored frames. + +-spec ignored_frame(State) + -> {ok, State} + | {error, {connection_error, protocol_error, atom()}, State} + when State::http2_machine(). +ignored_frame(State=#http2_machine{state={continuation, _, _}}) -> + {error, {connection_error, protocol_error, + 'An invalid frame was received in the middle of a header block. (RFC7540 6.2)'}, + State}; +%% @todo It might be useful to error out when we receive +%% too many unknown frames. (RFC7540 10.5) +ignored_frame(State) -> + {ok, State}. + +%% Timeouts. + +-spec timeout(preface_timeout | settings_timeout, reference(), State) + -> {ok, State} + | {error, {connection_error, cow_http2:error(), atom()}, State} + when State::http2_machine(). +timeout(preface_timeout, TRef, State=#http2_machine{preface_timer=TRef}) -> + {error, {connection_error, protocol_error, + 'The preface was not received in a reasonable amount of time.'}, + State}; +timeout(settings_timeout, TRef, State=#http2_machine{settings_timer=TRef}) -> + {error, {connection_error, settings_timeout, + 'The SETTINGS ack was not received within the configured time. (RFC7540 6.5.3)'}, + State}; +timeout(_, _, State) -> + {ok, State}. + +%% Functions for sending a message header or body. Note that +%% this module does not send data directly, instead it returns +%% a value that can then be used to send the frames. + +-spec prepare_headers(cow_http2:streamid(), State, idle | cow_http2:fin(), + pseudo_headers(), cow_http:headers()) + -> {ok, cow_http2:fin(), iodata(), State} when State::http2_machine(). +prepare_headers(StreamID, State=#http2_machine{encode_state=EncodeState0}, + IsFin0, PseudoHeaders, Headers0) -> + Stream = #stream{method=Method, local=idle} = stream_get(StreamID, State), + IsFin = case {IsFin0, Method} of + {idle, _} -> nofin; + {_, <<"HEAD">>} -> fin; + _ -> IsFin0 + end, + Headers = merge_pseudo_headers(PseudoHeaders, remove_http11_headers(Headers0)), + {HeaderBlock, EncodeState} = cow_hpack:encode(Headers, EncodeState0), + {ok, IsFin, HeaderBlock, stream_store(Stream#stream{local=IsFin0}, + State#http2_machine{encode_state=EncodeState})}. + +-spec prepare_push_promise(cow_http2:streamid(), State, pseudo_headers(), cow_http:headers()) + -> {ok, cow_http2:streamid(), iodata(), State} + | {error, no_push} when State::http2_machine(). +prepare_push_promise(_, #http2_machine{remote_settings=#{enable_push := false}}, _, _) -> + {error, no_push}; +prepare_push_promise(StreamID, State=#http2_machine{encode_state=EncodeState0, + local_settings=#{initial_window_size := RemoteWindow}, + remote_settings=#{initial_window_size := LocalWindow}, + local_streamid=LocalStreamID}, PseudoHeaders, Headers0) -> + #stream{local=idle} = stream_get(StreamID, State), + TE = case lists:keyfind(<<"te">>, 1, Headers0) of + {_, TE0} -> TE0; + false -> undefined + end, + Headers = merge_pseudo_headers(PseudoHeaders, remove_http11_headers(Headers0)), + {HeaderBlock, EncodeState} = cow_hpack:encode(Headers, EncodeState0), + {ok, LocalStreamID, HeaderBlock, stream_store( + #stream{id=LocalStreamID, method=maps:get(method, PseudoHeaders), + remote=fin, remote_expected_size=0, + local_window=LocalWindow, remote_window=RemoteWindow, te=TE}, + State#http2_machine{encode_state=EncodeState, local_streamid=LocalStreamID + 2})}. + +remove_http11_headers(Headers) -> + RemoveHeaders0 = [ + <<"keep-alive">>, + <<"proxy-connection">>, + <<"transfer-encoding">>, + <<"upgrade">> + ], + RemoveHeaders = case lists:keyfind(<<"connection">>, 1, Headers) of + false -> + RemoveHeaders0; + {_, ConnHd} -> + %% We do not need to worry about any "close" header because + %% that header name is reserved. + Connection = cow_http_hd:parse_connection(ConnHd), + Connection ++ [<<"connection">>|RemoveHeaders0] + end, + lists:filter(fun({Name, _}) -> + not lists:member(Name, RemoveHeaders) + end, Headers). + +merge_pseudo_headers(PseudoHeaders, Headers0) -> + lists:foldl(fun + ({status, Status}, Acc) when is_integer(Status) -> + [{<<":status">>, integer_to_binary(Status)}|Acc]; + ({Name, Value}, Acc) -> + [{iolist_to_binary([$:, atom_to_binary(Name, latin1)]), Value}|Acc] + end, Headers0, maps:to_list(PseudoHeaders)). + +-spec prepare_trailers(cow_http2:streamid(), State, cow_http:headers()) + -> {ok, iodata(), State} when State::http2_machine(). +prepare_trailers(StreamID, State=#http2_machine{encode_state=EncodeState0}, Trailers) -> + Stream = #stream{local=nofin} = stream_get(StreamID, State), + {HeaderBlock, EncodeState} = cow_hpack:encode(Trailers, EncodeState0), + {ok, HeaderBlock, stream_store(Stream#stream{local=fin}, + State#http2_machine{encode_state=EncodeState})}. + +-spec send_or_queue_data(cow_http2:streamid(), State, cow_http2:fin(), DataOrFileOrTrailers) + -> {ok, State} + | {send, [{cow_http2:streamid(), cow_http2:fin(), [DataOrFileOrTrailers]}], State} + when State::http2_machine(), DataOrFileOrTrailers:: + {data, iodata()} | #sendfile{} | {trailers, cow_http:headers()}. +send_or_queue_data(StreamID, State0=#http2_machine{opts=Opts, local_window=ConnWindow}, + IsFin0, DataOrFileOrTrailers0) -> + %% @todo Probably just ignore if the method was HEAD. + Stream0 = #stream{ + local=nofin, + local_window=StreamWindow, + local_buffer_size=BufferSize, + te=TE0 + } = stream_get(StreamID, State0), + DataOrFileOrTrailers = case DataOrFileOrTrailers0 of + {trailers, _} -> + %% We only accept TE headers containing exactly "trailers" (RFC7540 8.1.2.1). + TE = try cow_http_hd:parse_te(TE0) of + {trailers, []} -> trailers; + _ -> no_trailers + catch _:_ -> + %% If we can't parse the TE header, assume we can't send trailers. + no_trailers + end, + case TE of + trailers -> + DataOrFileOrTrailers0; + no_trailers -> + {data, <<>>} + end; + _ -> + DataOrFileOrTrailers0 + end, + SendSize = case DataOrFileOrTrailers of + {data, D} -> BufferSize + iolist_size(D); + #sendfile{bytes=B} -> BufferSize + B; + {trailers, _} -> 0 + end, + MinSendSize = maps:get(stream_window_data_threshold, Opts, 16384), + if + %% If we cannot send the data all at once and the window + %% is smaller than we are willing to send at a minimum, + %% we queue the data directly. + (StreamWindow < MinSendSize) + andalso ((StreamWindow < SendSize) orelse (ConnWindow < SendSize)) -> + {ok, stream_store(queue_data(Stream0, IsFin0, DataOrFileOrTrailers, in), State0)}; + true -> + case send_or_queue_data(Stream0, State0, [], IsFin0, DataOrFileOrTrailers, in) of + {ok, Stream, State, []} -> + {ok, stream_store(Stream, State)}; + {ok, Stream=#stream{local=IsFin}, State, SendData} -> + {send, [{StreamID, IsFin, lists:reverse(SendData)}], stream_store(Stream, State)} + end + end. + +%% Internal data sending/queuing functions. + +%% @todo Should we ever want to implement the PRIORITY mechanism, +%% this would be the place to do it. Right now, we just go over +%% all streams and send what we can until either everything is +%% sent or we run out of space in the window. +send_data(State0=#http2_machine{streams=Streams0}) -> + Iterator = maps:iterator(Streams0), + case send_data_for_all_streams(maps:next(Iterator), Streams0, State0, []) of + {ok, Streams, State, []} -> + {ok, State#http2_machine{streams=Streams}}; + {ok, Streams, State, Send} -> + {send, Send, State#http2_machine{streams=Streams}} + end. + +send_data_for_all_streams(none, Streams, State, Send) -> + {ok, Streams, State, Send}; +%% While technically we should never get < 0 here, let's be on the safe side. +send_data_for_all_streams(_, Streams, State=#http2_machine{local_window=ConnWindow}, Send) + when ConnWindow =< 0 -> + {ok, Streams, State, Send}; +%% We rely on send_data_for_one_stream/3 to do all the necessary checks about the stream. +send_data_for_all_streams({StreamID, Stream0, Iterator}, Streams, State0, Send) -> + case send_data_for_one_stream(Stream0, State0, []) of + {ok, Stream, State, []} -> + send_data_for_all_streams(maps:next(Iterator), + Streams#{StreamID => Stream}, State, Send); + %% We need to remove the stream here because we do not use stream_store/2. + {ok, #stream{local=fin, remote=fin}, State, SendData} -> + send_data_for_all_streams(maps:next(Iterator), + maps:remove(StreamID, Streams), State, [{StreamID, fin, SendData}|Send]); + {ok, Stream=#stream{local=IsFin}, State, SendData} -> + send_data_for_all_streams(maps:next(Iterator), + Streams#{StreamID => Stream}, State, [{StreamID, IsFin, SendData}|Send]) + end. + +send_data(Stream0, State0) -> + case send_data_for_one_stream(Stream0, State0, []) of + {ok, Stream, State, []} -> + {ok, stream_store(Stream, State)}; + {ok, Stream=#stream{id=StreamID, local=IsFin}, State, SendData} -> + {send, [{StreamID, IsFin, SendData}], stream_store(Stream, State)} + end. + +send_data_for_one_stream(Stream=#stream{local=nofin, local_buffer_size=0, + local_trailers=Trailers}, State, SendAcc) when Trailers =/= undefined -> + {ok, Stream, State, lists:reverse([{trailers, Trailers}|SendAcc])}; +send_data_for_one_stream(Stream=#stream{local=nofin, local_buffer=Q0, local_buffer_size=0}, + State, SendAcc) -> + case queue:len(Q0) of + 0 -> + {ok, Stream, State, lists:reverse(SendAcc)}; + 1 -> + %% We know there is a final empty data frame in the queue. + %% We need to mark the stream as complete. + {{value, {fin, 0, _}}, Q} = queue:out(Q0), + {ok, Stream#stream{local=fin, local_buffer=Q}, State, lists:reverse(SendAcc)} + end; +send_data_for_one_stream(Stream=#stream{local=IsFin, local_window=StreamWindow, + local_buffer_size=BufferSize}, State=#http2_machine{local_window=ConnWindow}, SendAcc) + when ConnWindow =< 0; IsFin =:= fin; StreamWindow =< 0; BufferSize =:= 0 -> + {ok, Stream, State, lists:reverse(SendAcc)}; +send_data_for_one_stream(Stream0=#stream{local_window=StreamWindow, + local_buffer=Q0, local_buffer_size=BufferSize}, + State0=#http2_machine{opts=Opts, local_window=ConnWindow}, SendAcc0) -> + MinSendSize = maps:get(stream_window_data_threshold, Opts, 16384), + if + %% If we cannot send the entire buffer at once and the window + %% is smaller than we are willing to send at a minimum, do nothing. + %% + %% We only do this check the first time we go through this function; + %% we want to send as much data as possible IF we send some. + (SendAcc0 =:= []) andalso (StreamWindow < MinSendSize) + andalso ((StreamWindow < BufferSize) orelse (ConnWindow < BufferSize)) -> + {ok, Stream0, State0, []}; + true -> + %% We know there is an item in the queue. + {{value, {IsFin, DataSize, Data}}, Q} = queue:out(Q0), + Stream1 = Stream0#stream{local_buffer=Q, local_buffer_size=BufferSize - DataSize}, + {ok, Stream, State, SendAcc} + = send_or_queue_data(Stream1, State0, SendAcc0, IsFin, Data, in_r), + send_data_for_one_stream(Stream, State, SendAcc) + end. + +%% We can send trailers immediately if the queue is empty, otherwise we queue. +%% We always send trailer frames even if the window is empty. +send_or_queue_data(Stream=#stream{local_buffer_size=0}, + State, SendAcc, fin, {trailers, Trailers}, _) -> + {ok, Stream, State, [{trailers, Trailers}|SendAcc]}; +send_or_queue_data(Stream, State, SendAcc, fin, {trailers, Trailers}, _) -> + {ok, Stream#stream{local_trailers=Trailers}, State, SendAcc}; +%% Send data immediately if we can, buffer otherwise. +send_or_queue_data(Stream=#stream{local_window=StreamWindow}, + State=#http2_machine{local_window=ConnWindow}, + SendAcc, IsFin, Data, In) + when ConnWindow =< 0; StreamWindow =< 0 -> + {ok, queue_data(Stream, IsFin, Data, In), State, SendAcc}; +send_or_queue_data(Stream=#stream{local_window=StreamWindow}, + State=#http2_machine{opts=Opts, remote_settings=RemoteSettings, + local_window=ConnWindow}, SendAcc, IsFin, Data, In) -> + RemoteMaxFrameSize = maps:get(max_frame_size, RemoteSettings, 16384), + ConfiguredMaxFrameSize = maps:get(max_frame_size_sent, Opts, infinity), + MaxSendSize = min( + min(ConnWindow, StreamWindow), + min(RemoteMaxFrameSize, ConfiguredMaxFrameSize) + ), + case Data of + File = #sendfile{bytes=Bytes} when Bytes =< MaxSendSize -> + {ok, Stream#stream{local=IsFin, local_window=StreamWindow - Bytes}, + State#http2_machine{local_window=ConnWindow - Bytes}, + [File|SendAcc]}; + File = #sendfile{offset=Offset, bytes=Bytes} -> + send_or_queue_data(Stream#stream{local_window=StreamWindow - MaxSendSize}, + State#http2_machine{local_window=ConnWindow - MaxSendSize}, + [File#sendfile{bytes=MaxSendSize}|SendAcc], IsFin, + File#sendfile{offset=Offset + MaxSendSize, bytes=Bytes - MaxSendSize}, In); + {data, Iolist0} -> + IolistSize = iolist_size(Iolist0), + if + IolistSize =< MaxSendSize -> + {ok, Stream#stream{local=IsFin, local_window=StreamWindow - IolistSize}, + State#http2_machine{local_window=ConnWindow - IolistSize}, + [{data, Iolist0}|SendAcc]}; + true -> + {Iolist, More} = cow_iolists:split(MaxSendSize, Iolist0), + send_or_queue_data(Stream#stream{local_window=StreamWindow - MaxSendSize}, + State#http2_machine{local_window=ConnWindow - MaxSendSize}, + [{data, Iolist}|SendAcc], IsFin, {data, More}, In) + end + end. + +queue_data(Stream=#stream{local_buffer=Q0, local_buffer_size=Size0}, IsFin, Data, In) -> + DataSize = case Data of + {sendfile, _, Bytes, _} -> Bytes; + {data, Iolist} -> iolist_size(Iolist) + end, + %% Never queue non-final empty data frames. + case {DataSize, IsFin} of + {0, nofin} -> + Stream; + _ -> + Q = queue:In({IsFin, DataSize, Data}, Q0), + Stream#stream{local_buffer=Q, local_buffer_size=Size0 + DataSize} + end. + +%% Public interface to update the flow control window. +%% +%% The ensure_window function applies heuristics to avoid updating the +%% window when it is not necessary. The update_window function updates +%% the window unconditionally. +%% +%% The ensure_window function should be called when requesting more +%% data (for example when reading a request or response body) as well +%% as when receiving new data. Failure to do so may result in the +%% window being depleted. +%% +%% The heuristics dictating whether the window must be updated and +%% what the window size is depends on three options (margin, max +%% and threshold) along with the Size argument. The window increment +%% returned by this function may therefore be smaller than the Size +%% argument. On the other hand the total window allocated over many +%% calls may end up being larger than the initial Size argument. As +%% a result, it is the responsibility of the caller to ensure that +%% the Size argument is never lower than 0. + +-spec ensure_window(non_neg_integer(), State) + -> ok | {ok, pos_integer(), State} when State::http2_machine(). +ensure_window(Size, State=#http2_machine{opts=Opts, remote_window=RemoteWindow}) -> + case ensure_window(Size, RemoteWindow, connection, Opts) of + ok -> + ok; + {ok, Increment} -> + {ok, Increment, State#http2_machine{remote_window=RemoteWindow + Increment}} + end. + +-spec ensure_window(cow_http2:streamid(), non_neg_integer(), State) + -> ok | {ok, pos_integer(), State} when State::http2_machine(). +ensure_window(StreamID, Size, State=#http2_machine{opts=Opts}) -> + case stream_get(StreamID, State) of + %% For simplicity's sake, we do not consider attempts to ensure the window + %% of a terminated stream to be errors. We simply act as if the stream + %% window is large enough. + undefined -> + ok; + Stream = #stream{remote_window=RemoteWindow} -> + case ensure_window(Size, RemoteWindow, stream, Opts) of + ok -> + ok; + {ok, Increment} -> + {ok, Increment, stream_store(Stream#stream{remote_window=RemoteWindow + Increment}, State)} + end + end. + +%% No need to update the window when we are not expecting data. +ensure_window(0, _, _, _) -> + ok; +%% No need to update the window when it is already high enough. +ensure_window(Size, Window, _, _) when Size =< Window -> + ok; +ensure_window(Size0, Window, Type, Opts) -> + Threshold = ensure_window_threshold(Type, Opts), + if + %% We do not update the window when it is higher than the threshold. + Window > Threshold -> + ok; + true -> + Margin = ensure_window_margin(Type, Opts), + Size = Size0 + Margin, + MaxWindow = ensure_window_max(Type, Opts), + Increment = if + %% We cannot go above the maximum window size. + Size > MaxWindow -> MaxWindow - Window; + true -> Size - Window + end, + case Increment of + 0 -> ok; + _ -> {ok, Increment} + end + end. + +%% Margin defaults to the default initial window size. +ensure_window_margin(connection, Opts) -> + maps:get(connection_window_margin_size, Opts, 65535); +ensure_window_margin(stream, Opts) -> + maps:get(stream_window_margin_size, Opts, 65535). + +%% Max window defaults to the max value allowed by the protocol. +ensure_window_max(connection, Opts) -> + maps:get(max_connection_window_size, Opts, 16#7fffffff); +ensure_window_max(stream, Opts) -> + maps:get(max_stream_window_size, Opts, 16#7fffffff). + +%% Threshold defaults to 10 times the default frame size. +ensure_window_threshold(connection, Opts) -> + maps:get(connection_window_update_threshold, Opts, 163840); +ensure_window_threshold(stream, Opts) -> + maps:get(stream_window_update_threshold, Opts, 163840). + +-spec update_window(1..16#7fffffff, State) + -> State when State::http2_machine(). +update_window(Size, State=#http2_machine{remote_window=RemoteWindow}) + when Size > 0 -> + State#http2_machine{remote_window=RemoteWindow + Size}. + +-spec update_window(cow_http2:streamid(), 1..16#7fffffff, State) + -> State when State::http2_machine(). +update_window(StreamID, Size, State) + when Size > 0 -> + Stream = #stream{remote_window=RemoteWindow} = stream_get(StreamID, State), + stream_store(Stream#stream{remote_window=RemoteWindow + Size}, State). + +%% Public interface to reset streams. + +-spec reset_stream(cow_http2:streamid(), State) + -> {ok, State} | {error, not_found} when State::http2_machine(). +reset_stream(StreamID, State=#http2_machine{streams=Streams0}) -> + case maps:take(StreamID, Streams0) of + {_, Streams} -> + {ok, stream_linger(StreamID, State#http2_machine{streams=Streams})}; + error -> + {error, not_found} + end. + +%% Retrieve the buffer size for all streams. + +-spec get_connection_local_buffer_size(http2_machine()) -> non_neg_integer(). +get_connection_local_buffer_size(#http2_machine{streams=Streams}) -> + maps:fold(fun(_, #stream{local_buffer_size=Size}, Acc) -> + Acc + Size + end, 0, Streams). + +%% Retrieve a setting value, or its default value if not set. + +-spec get_local_setting(atom(), http2_machine()) -> atom() | integer(). +get_local_setting(Key, #http2_machine{local_settings=Settings}) -> + maps:get(Key, Settings, default_setting_value(Key)). + +-spec get_remote_settings(http2_machine()) -> map(). +get_remote_settings(#http2_machine{mode=Mode, remote_settings=Settings}) -> + Defaults0 = #{ + header_table_size => default_setting_value(header_table_size), + enable_push => default_setting_value(enable_push), + max_concurrent_streams => default_setting_value(max_concurrent_streams), + initial_window_size => default_setting_value(initial_window_size), + max_frame_size => default_setting_value(max_frame_size), + max_header_list_size => default_setting_value(max_header_list_size) + }, + Defaults = case Mode of + server -> + Defaults0#{enable_connect_protocol => default_setting_value(enable_connect_protocol)}; + client -> + Defaults0 + end, + maps:merge(Defaults, Settings). + +default_setting_value(header_table_size) -> 4096; +default_setting_value(enable_push) -> true; +default_setting_value(max_concurrent_streams) -> infinity; +default_setting_value(initial_window_size) -> 65535; +default_setting_value(max_frame_size) -> 16384; +default_setting_value(max_header_list_size) -> infinity; +default_setting_value(enable_connect_protocol) -> false. + +%% Function to obtain the last known streamid received +%% for the purposes of sending a GOAWAY frame and closing the connection. + +-spec get_last_streamid(http2_machine()) -> cow_http2:streamid(). +get_last_streamid(#http2_machine{remote_streamid=RemoteStreamID}) -> + RemoteStreamID. + +%% Set last accepted streamid to the last known streamid, for the purpose +%% ignoring frames for remote streams created after sending GOAWAY. + +-spec set_last_streamid(http2_machine()) -> {cow_http2:streamid(), http2_machine()}. +set_last_streamid(State=#http2_machine{remote_streamid=StreamID, + last_remote_streamid=LastStreamID}) when StreamID =< LastStreamID-> + {StreamID, State#http2_machine{last_remote_streamid = StreamID}}. + +%% Retrieve the local buffer size for a stream. + +-spec get_stream_local_buffer_size(cow_http2:streamid(), http2_machine()) + -> {ok, non_neg_integer()} | {error, not_found | closed}. +get_stream_local_buffer_size(StreamID, State=#http2_machine{mode=Mode, + local_streamid=LocalStreamID, remote_streamid=RemoteStreamID}) -> + case stream_get(StreamID, State) of + #stream{local_buffer_size=Size} -> + {ok, Size}; + undefined when (?IS_LOCAL(Mode, StreamID) andalso (StreamID < LocalStreamID)) + orelse ((not ?IS_LOCAL(Mode, StreamID)) andalso (StreamID =< RemoteStreamID)) -> + {error, closed}; + undefined -> + {error, not_found} + end. + +%% Retrieve the local state for a stream, including the state in the queue. + +-spec get_stream_local_state(cow_http2:streamid(), http2_machine()) + -> {ok, idle | cow_http2:fin(), empty | nofin | fin} | {error, not_found | closed}. +get_stream_local_state(StreamID, State=#http2_machine{mode=Mode, + local_streamid=LocalStreamID, remote_streamid=RemoteStreamID}) -> + case stream_get(StreamID, State) of + #stream{local=IsFin, local_buffer=Q, local_trailers=undefined} -> + IsQueueFin = case queue:peek_r(Q) of + empty -> empty; + {value, {IsQueueFin0, _, _}} -> IsQueueFin0 + end, + {ok, IsFin, IsQueueFin}; + %% Trailers are queued so the local state is fin after the queue is drained. + #stream{local=IsFin} -> + {ok, IsFin, fin}; + undefined when (?IS_LOCAL(Mode, StreamID) andalso (StreamID < LocalStreamID)) + orelse ((not ?IS_LOCAL(Mode, StreamID)) andalso (StreamID =< RemoteStreamID)) -> + {error, closed}; + undefined -> + {error, not_found} + end. + +%% Retrieve the remote state for a stream. + +-spec get_stream_remote_state(cow_http2:streamid(), http2_machine()) + -> {ok, idle | cow_http2:fin()} | {error, not_found | closed}. +get_stream_remote_state(StreamID, State=#http2_machine{mode=Mode, + local_streamid=LocalStreamID, remote_streamid=RemoteStreamID}) -> + case stream_get(StreamID, State) of + #stream{remote=IsFin} -> + {ok, IsFin}; + undefined when (?IS_LOCAL(Mode, StreamID) andalso (StreamID < LocalStreamID)) + orelse ((not ?IS_LOCAL(Mode, StreamID)) andalso (StreamID =< RemoteStreamID)) -> + {error, closed}; + undefined -> + {error, not_found} + end. + +%% Query whether the stream was reset recently by the remote endpoint. + +-spec is_lingering_stream(cow_http2:streamid(), http2_machine()) -> boolean(). +is_lingering_stream(StreamID, #http2_machine{ + local_lingering_streams=Local, remote_lingering_streams=Remote}) -> + case lists:member(StreamID, Local) of + true -> true; + false -> lists:member(StreamID, Remote) + end. + +%% Stream-related functions. + +stream_get(StreamID, #http2_machine{streams=Streams}) -> + maps:get(StreamID, Streams, undefined). + +stream_store(#stream{id=StreamID, local=fin, remote=fin}, + State=#http2_machine{streams=Streams0}) -> + Streams = maps:remove(StreamID, Streams0), + State#http2_machine{streams=Streams}; +stream_store(Stream=#stream{id=StreamID}, + State=#http2_machine{streams=Streams}) -> + State#http2_machine{streams=Streams#{StreamID => Stream}}. + +%% @todo Don't send an RST_STREAM if one was already sent. +stream_reset(StreamID, State, Reason, HumanReadable) -> + {error, {stream_error, StreamID, Reason, HumanReadable}, + stream_linger(StreamID, State)}. + +stream_linger(StreamID, State=#http2_machine{local_lingering_streams=Lingering0}) -> + %% We only keep up to 100 streams in this state. @todo Make it configurable? + Lingering = [StreamID|lists:sublist(Lingering0, 100 - 1)], + State#http2_machine{local_lingering_streams=Lingering}. diff --git a/deps/cowlib/src/cow_http_hd.erl b/deps/cowlib/src/cow_http_hd.erl new file mode 100644 index 0000000..e2a0a1d --- /dev/null +++ b/deps/cowlib/src/cow_http_hd.erl @@ -0,0 +1,3622 @@ +%% Copyright (c) 2014-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_http_hd). + +%% Functions are ordered by header name, with the parse +%% function before the build function. + +-export([parse_accept/1]). +-export([parse_accept_charset/1]). +% @todo -export([parse_accept_datetime/1]). RFC7089 +-export([parse_accept_encoding/1]). +% @todo -export([parse_accept_features/1]). RFC2295 +-export([parse_accept_language/1]). +-export([parse_accept_ranges/1]). +% @todo -export([parse_access_control_allow_credentials/1]). CORS +-export([access_control_allow_credentials/0]). +% @todo -export([parse_access_control_allow_headers/1]). CORS +-export([access_control_allow_headers/1]). +% @todo -export([parse_access_control_allow_methods/1]). CORS +-export([access_control_allow_methods/1]). +% @todo -export([parse_access_control_allow_origin/1]). CORS +-export([access_control_allow_origin/1]). +% @todo -export([parse_access_control_expose_headers/1]). CORS +-export([access_control_expose_headers/1]). +% @todo -export([parse_access_control_max_age/1]). CORS +-export([access_control_max_age/1]). +-export([parse_access_control_request_headers/1]). +-export([parse_access_control_request_method/1]). +-export([parse_age/1]). +-export([parse_allow/1]). +% @todo -export([parse_alternates/1]). RFC2295 +% @todo -export([parse_authentication_info/1]). RFC2617 +-export([parse_authorization/1]). +-export([parse_cache_control/1]). +-export([parse_connection/1]). +% @todo -export([parse_content_disposition/1]). RFC6266 +-export([parse_content_encoding/1]). +-export([parse_content_language/1]). +-export([parse_content_length/1]). +% @todo -export([parse_content_location/1]). RFC7231 +% @todo -export([parse_content_md5/1]). RFC2616 (deprecated) +-export([parse_content_range/1]). +% @todo -export([parse_content_security_policy/1]). CSP +% @todo -export([parse_content_security_policy_report_only/1]). CSP +-export([parse_content_type/1]). +-export([parse_cookie/1]). +-export([parse_date/1]). +% @todo -export([parse_digest/1]). RFC3230 +% @todo -export([parse_dnt/1]). http://donottrack.us/ +-export([parse_etag/1]). +-export([parse_expect/1]). +-export([parse_expires/1]). +% @todo -export([parse_forwarded/1]). RFC7239 +% @todo -export([parse_from/1]). RFC7231 +-export([parse_host/1]). +-export([parse_http2_settings/1]). +-export([parse_if_match/1]). +-export([parse_if_modified_since/1]). +-export([parse_if_none_match/1]). +-export([parse_if_range/1]). +-export([parse_if_unmodified_since/1]). +% @todo -export([parse_last_event_id/1]). eventsource +-export([parse_last_modified/1]). +-export([parse_link/1]). +% @todo -export([parse_location/1]). RFC7231 +-export([parse_max_forwards/1]). +% @todo -export([parse_memento_datetime/1]). RFC7089 +% @todo -export([parse_negotiate/1]). RFC2295 +-export([parse_origin/1]). +-export([parse_pragma/1]). +% @todo -export([parse_prefer/1]). RFC7240 +-export([parse_proxy_authenticate/1]). +% @todo -export([parse_proxy_authentication_info/1]). RFC2617 +-export([parse_proxy_authorization/1]). +% @todo -export([parse_proxy_support/1]). RFC4559 +% @todo -export([parse_public_key_pins/1]). Key Pinning (upcoming) +% @todo -export([parse_public_key_pins_report_only/1]). Key Pinning (upcoming) +-export([parse_range/1]). +% @todo -export([parse_referer/1]). RFC7231 +% @todo -export([parse_refresh/1]). Non-standard (examples: "5", "5; url=http://example.com/") +-export([parse_retry_after/1]). +-export([parse_sec_websocket_accept/1]). +-export([parse_sec_websocket_extensions/1]). +-export([parse_sec_websocket_key/1]). +% @todo -export([parse_sec_websocket_origin/1]). Websocket drafts 7 and 8 +-export([parse_sec_websocket_protocol_req/1]). +-export([parse_sec_websocket_protocol_resp/1]). +-export([parse_sec_websocket_version_req/1]). +-export([parse_sec_websocket_version_resp/1]). +% @todo -export([parse_server/1]). RFC7231 +-export([parse_set_cookie/1]). +% @todo -export([parse_strict_transport_security/1]). RFC6797 +% @todo -export([parse_tcn/1]). RFC2295 +-export([parse_te/1]). +-export([parse_trailer/1]). +-export([parse_transfer_encoding/1]). +-export([parse_upgrade/1]). +% @todo -export([parse_user_agent/1]). RFC7231 +% @todo -export([parse_variant_vary/1]). RFC2295 +-export([parse_variant_key/2]). +-export([variant_key/1]). +-export([parse_variants/1]). +-export([variants/1]). +-export([parse_vary/1]). +% @todo -export([parse_via/1]). RFC7230 +% @todo -export([parse_want_digest/1]). RFC3230 +% @todo -export([parse_warning/1]). RFC7234 +-export([parse_www_authenticate/1]). +% @todo -export([parse_x_content_duration/1]). Gecko/MDN (value: float) +% @todo -export([parse_x_dns_prefetch_control/1]). Various (value: "on"|"off") +-export([parse_x_forwarded_for/1]). +% @todo -export([parse_x_frame_options/1]). RFC7034 + +-type etag() :: {weak | strong, binary()}. +-export_type([etag/0]). + +-type media_type() :: {binary(), binary(), [{binary(), binary()}]}. +-export_type([media_type/0]). + +-type qvalue() :: 0..1000. +-export_type([qvalue/0]). + +-type websocket_version() :: 0..255. +-export_type([websocket_version/0]). + +-include("cow_inline.hrl"). +-include("cow_parse.hrl"). + +-ifdef(TEST). +-include_lib("proper/include/proper.hrl"). + +vector(Min, Max, Dom) -> ?LET(N, choose(Min, Max), vector(N, Dom)). +small_list(Dom) -> vector(0, 10, Dom). +small_non_empty_list(Dom) -> vector(1, 10, Dom). + +alpha_chars() -> "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ". +alphanum_chars() -> "0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ". +digit_chars() -> "0123456789". + +ows() -> list(elements([$\s, $\t])). +alpha() -> elements(alpha_chars()). +alphanum() -> elements(alphanum_chars()). +digit() -> elements(digit_chars()). + +tchar() -> + frequency([ + {1, elements([$!, $#, $$, $%, $&, $', $*, $+, $-, $., $^, $_, $`, $|, $~])}, + {99, elements(alphanum_chars())} + ]). + +token() -> + ?LET(T, + non_empty(list(tchar())), + list_to_binary(T)). + +abnf_char() -> + integer(1, 127). + +vchar() -> + integer(33, 126). + +obs_text() -> + integer(128, 255). + +qdtext() -> + frequency([ + {99, elements("\t\s!#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[]^_`abcdefghijklmnopqrstuvwxyz{|}~")}, + {1, obs_text()} + ]). + +quoted_pair() -> + [$\\, frequency([ + {99, elements("\t\s!\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijklmnopqrstuvwxyz{|}~")}, + {1, obs_text()} + ])]. + +quoted_string() -> + [$", list(frequency([{100, qdtext()}, {1, quoted_pair()}])), $"]. + +%% Helper function for ( token / quoted-string ) values. +unquote([$", V, $"]) -> unquote(V, <<>>); +unquote(V) -> V. + +unquote([], Acc) -> Acc; +unquote([[$\\, C]|Tail], Acc) -> unquote(Tail, << Acc/binary, C >>); +unquote([C|Tail], Acc) -> unquote(Tail, << Acc/binary, C >>). + +parameter() -> + ?SUCHTHAT({K, _, _, _}, + {token(), oneof([token(), quoted_string()]), ows(), ows()}, + K =/= <<"q">>). + +weight() -> + frequency([ + {90, integer(0, 1000)}, + {10, undefined} + ]). + +%% Helper function for weight's qvalue formatting. +qvalue_to_iodata(0) -> <<"0">>; +qvalue_to_iodata(Q) when Q < 10 -> [<<"0.00">>, integer_to_binary(Q)]; +qvalue_to_iodata(Q) when Q < 100 -> [<<"0.0">>, integer_to_binary(Q)]; +qvalue_to_iodata(Q) when Q < 1000 -> [<<"0.">>, integer_to_binary(Q)]; +qvalue_to_iodata(1000) -> <<"1">>. +-endif. + +%% Accept header. + +-spec parse_accept(binary()) -> [{media_type(), qvalue(), [binary() | {binary(), binary()}]}]. +parse_accept(<<"*/*">>) -> + [{{<<"*">>, <<"*">>, []}, 1000, []}]; +parse_accept(Accept) -> + media_range_list(Accept, []). + +media_range_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> ?LOWER(media_range_type, R, Acc, <<>>); +media_range_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> media_range_list(R, Acc); +media_range_list(<<>>, Acc) -> lists:reverse(Acc). + +media_range_type(<< C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> ?LOWER(media_range_type, R, Acc, T); +media_range_type(<< $/, C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> ?LOWER(media_range_subtype, R, Acc, T, <<>>); +%% Special clause for badly behaving user agents that send * instead of */*. +media_range_type(<< $;, R/bits >>, Acc, <<"*">>) -> media_range_before_param(R, Acc, <<"*">>, <<"*">>, []). + +media_range_subtype(<< C, R/bits >>, Acc, T, S) when ?IS_TOKEN(C) -> ?LOWER(media_range_subtype, R, Acc, T, S); +media_range_subtype(R, Acc, T, S) -> media_range_param_sep(R, Acc, T, S, []). + +media_range_param_sep(<<>>, Acc, T, S, P) -> lists:reverse([{{T, S, lists:reverse(P)}, 1000, []}|Acc]); +media_range_param_sep(<< $,, R/bits >>, Acc, T, S, P) -> media_range_list(R, [{{T, S, lists:reverse(P)}, 1000, []}|Acc]); +media_range_param_sep(<< $;, R/bits >>, Acc, T, S, P) -> media_range_before_param(R, Acc, T, S, P); +media_range_param_sep(<< C, R/bits >>, Acc, T, S, P) when ?IS_WS(C) -> media_range_param_sep(R, Acc, T, S, P). + +media_range_before_param(<< C, R/bits >>, Acc, T, S, P) when ?IS_WS(C) -> media_range_before_param(R, Acc, T, S, P); +media_range_before_param(<< $q, $=, R/bits >>, Acc, T, S, P) -> media_range_weight(R, Acc, T, S, P); +media_range_before_param(<< "charset=", $", R/bits >>, Acc, T, S, P) -> media_range_charset_quoted(R, Acc, T, S, P, <<>>); +media_range_before_param(<< "charset=", R/bits >>, Acc, T, S, P) -> media_range_charset(R, Acc, T, S, P, <<>>); +media_range_before_param(<< C, R/bits >>, Acc, T, S, P) when ?IS_TOKEN(C) -> ?LOWER(media_range_param, R, Acc, T, S, P, <<>>). + +media_range_charset_quoted(<< $", R/bits >>, Acc, T, S, P, V) -> + media_range_param_sep(R, Acc, T, S, [{<<"charset">>, V}|P]); +media_range_charset_quoted(<< $\\, C, R/bits >>, Acc, T, S, P, V) when ?IS_VCHAR_OBS(C) -> + ?LOWER(media_range_charset_quoted, R, Acc, T, S, P, V); +media_range_charset_quoted(<< C, R/bits >>, Acc, T, S, P, V) when ?IS_VCHAR_OBS(C) -> + ?LOWER(media_range_charset_quoted, R, Acc, T, S, P, V). + +media_range_charset(<< C, R/bits >>, Acc, T, S, P, V) when ?IS_TOKEN(C) -> + ?LOWER(media_range_charset, R, Acc, T, S, P, V); +media_range_charset(R, Acc, T, S, P, V) -> + media_range_param_sep(R, Acc, T, S, [{<<"charset">>, V}|P]). + +media_range_param(<< $=, $", R/bits >>, Acc, T, S, P, K) -> media_range_quoted(R, Acc, T, S, P, K, <<>>); +media_range_param(<< $=, C, R/bits >>, Acc, T, S, P, K) when ?IS_TOKEN(C) -> media_range_value(R, Acc, T, S, P, K, << C >>); +media_range_param(<< C, R/bits >>, Acc, T, S, P, K) when ?IS_TOKEN(C) -> ?LOWER(media_range_param, R, Acc, T, S, P, K). + +media_range_quoted(<< $", R/bits >>, Acc, T, S, P, K, V) -> media_range_param_sep(R, Acc, T, S, [{K, V}|P]); +media_range_quoted(<< $\\, C, R/bits >>, Acc, T, S, P, K, V) when ?IS_VCHAR_OBS(C) -> media_range_quoted(R, Acc, T, S, P, K, << V/binary, C >>); +media_range_quoted(<< C, R/bits >>, Acc, T, S, P, K, V) when ?IS_VCHAR_OBS(C) -> media_range_quoted(R, Acc, T, S, P, K, << V/binary, C >>). + +media_range_value(<< C, R/bits >>, Acc, T, S, P, K, V) when ?IS_TOKEN(C) -> media_range_value(R, Acc, T, S, P, K, << V/binary, C >>); +media_range_value(R, Acc, T, S, P, K, V) -> media_range_param_sep(R, Acc, T, S, [{K, V}|P]). + +media_range_weight(<< "1.000", R/bits >>, Acc, T, S, P) -> accept_ext_sep(R, Acc, T, S, P, 1000, []); +media_range_weight(<< "1.00", R/bits >>, Acc, T, S, P) -> accept_ext_sep(R, Acc, T, S, P, 1000, []); +media_range_weight(<< "1.0", R/bits >>, Acc, T, S, P) -> accept_ext_sep(R, Acc, T, S, P, 1000, []); +media_range_weight(<< "1.", R/bits >>, Acc, T, S, P) -> accept_ext_sep(R, Acc, T, S, P, 1000, []); +media_range_weight(<< "1", R/bits >>, Acc, T, S, P) -> accept_ext_sep(R, Acc, T, S, P, 1000, []); +media_range_weight(<< "0.", A, B, C, R/bits >>, Acc, T, S, P) when ?IS_DIGIT(A), ?IS_DIGIT(B), ?IS_DIGIT(C) -> + accept_ext_sep(R, Acc, T, S, P, (A - $0) * 100 + (B - $0) * 10 + (C - $0), []); +media_range_weight(<< "0.", A, B, R/bits >>, Acc, T, S, P) when ?IS_DIGIT(A), ?IS_DIGIT(B) -> + accept_ext_sep(R, Acc, T, S, P, (A - $0) * 100 + (B - $0) * 10, []); +media_range_weight(<< "0.", A, R/bits >>, Acc, T, S, P) when ?IS_DIGIT(A) -> + accept_ext_sep(R, Acc, T, S, P, (A - $0) * 100, []); +media_range_weight(<< "0.", R/bits >>, Acc, T, S, P) -> accept_ext_sep(R, Acc, T, S, P, 0, []); +media_range_weight(<< "0", R/bits >>, Acc, T, S, P) -> accept_ext_sep(R, Acc, T, S, P, 0, []); +%% Special clauses for badly behaving user agents that send .123 instead of 0.123. +media_range_weight(<< ".", A, B, C, R/bits >>, Acc, T, S, P) when ?IS_DIGIT(A), ?IS_DIGIT(B), ?IS_DIGIT(C) -> + accept_ext_sep(R, Acc, T, S, P, (A - $0) * 100 + (B - $0) * 10 + (C - $0), []); +media_range_weight(<< ".", A, B, R/bits >>, Acc, T, S, P) when ?IS_DIGIT(A), ?IS_DIGIT(B) -> + accept_ext_sep(R, Acc, T, S, P, (A - $0) * 100 + (B - $0) * 10, []); +media_range_weight(<< ".", A, R/bits >>, Acc, T, S, P) when ?IS_DIGIT(A) -> + accept_ext_sep(R, Acc, T, S, P, (A - $0) * 100, []). + +accept_ext_sep(<<>>, Acc, T, S, P, Q, E) -> lists:reverse([{{T, S, lists:reverse(P)}, Q, lists:reverse(E)}|Acc]); +accept_ext_sep(<< $,, R/bits >>, Acc, T, S, P, Q, E) -> media_range_list(R, [{{T, S, lists:reverse(P)}, Q, lists:reverse(E)}|Acc]); +accept_ext_sep(<< $;, R/bits >>, Acc, T, S, P, Q, E) -> accept_before_ext(R, Acc, T, S, P, Q, E); +accept_ext_sep(<< C, R/bits >>, Acc, T, S, P, Q, E) when ?IS_WS(C) -> accept_ext_sep(R, Acc, T, S, P, Q, E). + +accept_before_ext(<< C, R/bits >>, Acc, T, S, P, Q, E) when ?IS_WS(C) -> accept_before_ext(R, Acc, T, S, P, Q, E); +accept_before_ext(<< C, R/bits >>, Acc, T, S, P, Q, E) when ?IS_TOKEN(C) -> ?LOWER(accept_ext, R, Acc, T, S, P, Q, E, <<>>). + +accept_ext(<< $=, $", R/bits >>, Acc, T, S, P, Q, E, K) -> accept_quoted(R, Acc, T, S, P, Q, E, K, <<>>); +accept_ext(<< $=, C, R/bits >>, Acc, T, S, P, Q, E, K) when ?IS_TOKEN(C) -> accept_value(R, Acc, T, S, P, Q, E, K, << C >>); +accept_ext(<< C, R/bits >>, Acc, T, S, P, Q, E, K) when ?IS_TOKEN(C) -> ?LOWER(accept_ext, R, Acc, T, S, P, Q, E, K); +accept_ext(R, Acc, T, S, P, Q, E, K) -> accept_ext_sep(R, Acc, T, S, P, Q, [K|E]). + +accept_quoted(<< $", R/bits >>, Acc, T, S, P, Q, E, K, V) -> accept_ext_sep(R, Acc, T, S, P, Q, [{K, V}|E]); +accept_quoted(<< $\\, C, R/bits >>, Acc, T, S, P, Q, E, K, V) when ?IS_VCHAR_OBS(C) -> accept_quoted(R, Acc, T, S, P, Q, E, K, << V/binary, C >>); +accept_quoted(<< C, R/bits >>, Acc, T, S, P, Q, E, K, V) when ?IS_VCHAR_OBS(C) -> accept_quoted(R, Acc, T, S, P, Q, E, K, << V/binary, C >>). + +accept_value(<< C, R/bits >>, Acc, T, S, P, Q, E, K, V) when ?IS_TOKEN(C) -> accept_value(R, Acc, T, S, P, Q, E, K, << V/binary, C >>); +accept_value(R, Acc, T, S, P, Q, E, K, V) -> accept_ext_sep(R, Acc, T, S, P, Q, [{K, V}|E]). + +-ifdef(TEST). +accept_ext() -> + oneof([token(), parameter()]). + +accept_exts() -> + frequency([ + {90, []}, + {10, small_list(accept_ext())} + ]). + +accept_param() -> + frequency([ + {90, parameter()}, + {10, {<<"charset">>, oneof([token(), quoted_string()]), <<>>, <<>>}} + ]). + +accept_params() -> + small_list(accept_param()). + +accept() -> + ?LET({T, S, P, W, E}, + {token(), token(), accept_params(), weight(), accept_exts()}, + {T, S, P, W, E, iolist_to_binary([T, $/, S, + [[OWS1, $;, OWS2, K, $=, V] || {K, V, OWS1, OWS2} <- P], + case W of + undefined -> []; + _ -> [ + [<<";q=">>, qvalue_to_iodata(W)], + [case Ext of + {K, V, OWS1, OWS2} -> [OWS1, $;, OWS2, K, $=, V]; + K -> [$;, K] + end || Ext <- E]] + end])} + ). + +prop_parse_accept() -> + ?FORALL(L, + vector(1, 50, accept()), + begin + << _, Accept/binary >> = iolist_to_binary([[$,, A] || {_, _, _, _, _, A} <- L]), + ResL = parse_accept(Accept), + CheckedL = [begin + ExpectedP = [case ?LOWER(K) of + <<"charset">> -> {<<"charset">>, ?LOWER(unquote(V))}; + LowK -> {LowK, unquote(V)} + end || {K, V, _, _} <- P], + ExpectedE = [case Ext of + {K, V, _, _} -> {?LOWER(K), unquote(V)}; + K -> ?LOWER(K) + end || Ext <- E], + ResT =:= ?LOWER(T) + andalso ResS =:= ?LOWER(S) + andalso ResP =:= ExpectedP + andalso (ResW =:= W orelse (W =:= undefined andalso ResW =:= 1000)) + andalso ((W =:= undefined andalso ResE =:= []) orelse (W =/= undefined andalso ResE =:= ExpectedE)) + end || {{T, S, P, W, E, _}, {{ResT, ResS, ResP}, ResW, ResE}} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end + ). + +parse_accept_test_() -> + Tests = [ + {<<>>, []}, + {<<" ">>, []}, + {<<"audio/*; q=0.2, audio/basic">>, [ + {{<<"audio">>, <<"*">>, []}, 200, []}, + {{<<"audio">>, <<"basic">>, []}, 1000, []} + ]}, + {<<"text/plain; q=0.5, text/html, " + "text/x-dvi; q=0.8, text/x-c">>, [ + {{<<"text">>, <<"plain">>, []}, 500, []}, + {{<<"text">>, <<"html">>, []}, 1000, []}, + {{<<"text">>, <<"x-dvi">>, []}, 800, []}, + {{<<"text">>, <<"x-c">>, []}, 1000, []} + ]}, + {<<"text/*, text/html, text/html;level=1, */*">>, [ + {{<<"text">>, <<"*">>, []}, 1000, []}, + {{<<"text">>, <<"html">>, []}, 1000, []}, + {{<<"text">>, <<"html">>, [{<<"level">>, <<"1">>}]}, 1000, []}, + {{<<"*">>, <<"*">>, []}, 1000, []} + ]}, + {<<"text/*;q=0.3, text/html;q=0.7, text/html;level=1, " + "text/html;level=2;q=0.4, */*;q=0.5">>, [ + {{<<"text">>, <<"*">>, []}, 300, []}, + {{<<"text">>, <<"html">>, []}, 700, []}, + {{<<"text">>, <<"html">>, [{<<"level">>, <<"1">>}]}, 1000, []}, + {{<<"text">>, <<"html">>, [{<<"level">>, <<"2">>}]}, 400, []}, + {{<<"*">>, <<"*">>, []}, 500, []} + ]}, + {<<"text/html;level=1;quoted=\"hi hi hi\";" + "q=0.123;standalone;complex=gits, text/plain">>, [ + {{<<"text">>, <<"html">>, + [{<<"level">>, <<"1">>}, {<<"quoted">>, <<"hi hi hi">>}]}, 123, + [<<"standalone">>, {<<"complex">>, <<"gits">>}]}, + {{<<"text">>, <<"plain">>, []}, 1000, []} + ]}, + {<<"text/html, image/gif, image/jpeg, *; q=.2, */*; q=.2">>, [ + {{<<"text">>, <<"html">>, []}, 1000, []}, + {{<<"image">>, <<"gif">>, []}, 1000, []}, + {{<<"image">>, <<"jpeg">>, []}, 1000, []}, + {{<<"*">>, <<"*">>, []}, 200, []}, + {{<<"*">>, <<"*">>, []}, 200, []} + ]}, + {<<"text/plain; charset=UTF-8">>, [ + {{<<"text">>, <<"plain">>, [{<<"charset">>, <<"utf-8">>}]}, 1000, []} + ]} + ], + [{V, fun() -> R = parse_accept(V) end} || {V, R} <- Tests]. + +parse_accept_error_test_() -> + Tests = [ + <<"audio/basic, */;q=0.5">>, + <<"audio/, audio/basic">>, + <<"aud\tio/basic">>, + <<"audio/basic;t=\"zero \\", 0, " woo\"">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_accept(V)) end} || V <- Tests]. + +horse_parse_accept() -> + horse:repeat(20000, + parse_accept(<<"text/*;q=0.3, text/html;q=0.7, text/html;level=1, " + "text/html;level=2;q=0.4, */*;q=0.5">>) + ). +-endif. + +%% Accept-Charset header. + +-spec parse_accept_charset(binary()) -> [{binary(), qvalue()}]. +parse_accept_charset(Charset) -> + nonempty(conneg_list(Charset, [])). + +conneg_list(<<>>, Acc) -> lists:reverse(Acc); +conneg_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> conneg_list(R, Acc); +conneg_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> ?LOWER(conneg, R, Acc, <<>>). + +conneg(<< C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> ?LOWER(conneg, R, Acc, T); +conneg(R, Acc, T) -> conneg_param_sep(R, Acc, T). + +conneg_param_sep(<<>>, Acc, T) -> lists:reverse([{T, 1000}|Acc]); +conneg_param_sep(<< $,, R/bits >>, Acc, T) -> conneg_list(R, [{T, 1000}|Acc]); +conneg_param_sep(<< $;, R/bits >>, Acc, T) -> conneg_before_weight(R, Acc, T); +conneg_param_sep(<< C, R/bits >>, Acc, T) when ?IS_WS(C) -> conneg_param_sep(R, Acc, T). + +conneg_before_weight(<< C, R/bits >>, Acc, T) when ?IS_WS(C) -> conneg_before_weight(R, Acc, T); +conneg_before_weight(<< $q, $=, R/bits >>, Acc, T) -> conneg_weight(R, Acc, T); +%% Special clause for broken user agents that confuse ; and , separators. +conneg_before_weight(<< C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> ?LOWER(conneg, R, [{T, 1000}|Acc], <<>>). + +conneg_weight(<< "1.000", R/bits >>, Acc, T) -> conneg_list_sep(R, [{T, 1000}|Acc]); +conneg_weight(<< "1.00", R/bits >>, Acc, T) -> conneg_list_sep(R, [{T, 1000}|Acc]); +conneg_weight(<< "1.0", R/bits >>, Acc, T) -> conneg_list_sep(R, [{T, 1000}|Acc]); +conneg_weight(<< "1.", R/bits >>, Acc, T) -> conneg_list_sep(R, [{T, 1000}|Acc]); +conneg_weight(<< "1", R/bits >>, Acc, T) -> conneg_list_sep(R, [{T, 1000}|Acc]); +conneg_weight(<< "0.", A, B, C, R/bits >>, Acc, T) when ?IS_DIGIT(A), ?IS_DIGIT(B), ?IS_DIGIT(C) -> + conneg_list_sep(R, [{T, (A - $0) * 100 + (B - $0) * 10 + (C - $0)}|Acc]); +conneg_weight(<< "0.", A, B, R/bits >>, Acc, T) when ?IS_DIGIT(A), ?IS_DIGIT(B) -> + conneg_list_sep(R, [{T, (A - $0) * 100 + (B - $0) * 10}|Acc]); +conneg_weight(<< "0.", A, R/bits >>, Acc, T) when ?IS_DIGIT(A) -> + conneg_list_sep(R, [{T, (A - $0) * 100}|Acc]); +conneg_weight(<< "0.", R/bits >>, Acc, T) -> conneg_list_sep(R, [{T, 0}|Acc]); +conneg_weight(<< "0", R/bits >>, Acc, T) -> conneg_list_sep(R, [{T, 0}|Acc]). + +conneg_list_sep(<<>>, Acc) -> lists:reverse(Acc); +conneg_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> conneg_list_sep(R, Acc); +conneg_list_sep(<< $,, R/bits >>, Acc) -> conneg_list(R, Acc). + +-ifdef(TEST). +accept_charset() -> + ?LET({C, W}, + {token(), weight()}, + {C, W, iolist_to_binary([C, case W of + undefined -> []; + _ -> [<<";q=">>, qvalue_to_iodata(W)] + end])} + ). + +prop_parse_accept_charset() -> + ?FORALL(L, + non_empty(list(accept_charset())), + begin + << _, AcceptCharset/binary >> = iolist_to_binary([[$,, A] || {_, _, A} <- L]), + ResL = parse_accept_charset(AcceptCharset), + CheckedL = [begin + ResC =:= ?LOWER(Ch) + andalso (ResW =:= W orelse (W =:= undefined andalso ResW =:= 1000)) + end || {{Ch, W, _}, {ResC, ResW}} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_accept_charset_test_() -> + Tests = [ + {<<"iso-8859-5, unicode-1-1;q=0.8">>, [ + {<<"iso-8859-5">>, 1000}, + {<<"unicode-1-1">>, 800} + ]}, + %% Some user agents send this invalid value for the Accept-Charset header + {<<"ISO-8859-1;utf-8;q=0.7,*;q=0.7">>, [ + {<<"iso-8859-1">>, 1000}, + {<<"utf-8">>, 700}, + {<<"*">>, 700} + ]} + ], + [{V, fun() -> R = parse_accept_charset(V) end} || {V, R} <- Tests]. + +parse_accept_charset_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_accept_charset(V)) end} || V <- Tests]. + +horse_parse_accept_charset() -> + horse:repeat(20000, + parse_accept_charset(<<"iso-8859-5, unicode-1-1;q=0.8">>) + ). +-endif. + +%% Accept-Encoding header. + +-spec parse_accept_encoding(binary()) -> [{binary(), qvalue()}]. +parse_accept_encoding(Encoding) -> + conneg_list(Encoding, []). + +-ifdef(TEST). +accept_encoding() -> + ?LET({E, W}, + {token(), weight()}, + {E, W, iolist_to_binary([E, case W of + undefined -> []; + _ -> [<<";q=">>, qvalue_to_iodata(W)] + end])} + ). + +%% @todo This property seems useless, see prop_accept_charset. +prop_parse_accept_encoding() -> + ?FORALL(L, + non_empty(list(accept_encoding())), + begin + << _, AcceptEncoding/binary >> = iolist_to_binary([[$,, A] || {_, _, A} <- L]), + ResL = parse_accept_encoding(AcceptEncoding), + CheckedL = [begin + ResE =:= ?LOWER(E) + andalso (ResW =:= W orelse (W =:= undefined andalso ResW =:= 1000)) + end || {{E, W, _}, {ResE, ResW}} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_accept_encoding_test_() -> + Tests = [ + {<<>>, []}, + {<<"*">>, [{<<"*">>, 1000}]}, + {<<"compress, gzip">>, [ + {<<"compress">>, 1000}, + {<<"gzip">>, 1000} + ]}, + {<<"compress;q=0.5, gzip;q=1.0">>, [ + {<<"compress">>, 500}, + {<<"gzip">>, 1000} + ]}, + {<<"gzip;q=1.0, identity; q=0.5, *;q=0">>, [ + {<<"gzip">>, 1000}, + {<<"identity">>, 500}, + {<<"*">>, 0} + ]} + ], + [{V, fun() -> R = parse_accept_encoding(V) end} || {V, R} <- Tests]. + +horse_parse_accept_encoding() -> + horse:repeat(20000, + parse_accept_encoding(<<"gzip;q=1.0, identity; q=0.5, *;q=0">>) + ). +-endif. + +%% Accept-Language header. + +-spec parse_accept_language(binary()) -> [{binary(), qvalue()}]. +parse_accept_language(LanguageRange) -> + nonempty(language_range_list(LanguageRange, [])). + +language_range_list(<<>>, Acc) -> lists:reverse(Acc); +language_range_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> language_range_list(R, Acc); +language_range_list(<< $*, R/bits >>, Acc) -> language_range_param_sep(R, Acc, <<"*">>); +language_range_list(<< C, R/bits >>, Acc) when ?IS_ALPHA(C) -> + ?LOWER(language_range, R, Acc, 1, <<>>). + +language_range(<< $-, C, R/bits >>, Acc, _, T) when ?IS_ALPHANUM(C) -> + ?LOWER(language_range_sub, R, Acc, 1, << T/binary, $- >>); +language_range(<< C, R/bits >>, Acc, N, T) when ?IS_ALPHA(C), N < 8 -> + ?LOWER(language_range, R, Acc, N + 1, T); +language_range(R, Acc, _, T) -> language_range_param_sep(R, Acc, T). + +language_range_sub(<< $-, R/bits >>, Acc, _, T) -> language_range_sub(R, Acc, 0, << T/binary, $- >>); +language_range_sub(<< C, R/bits >>, Acc, N, T) when ?IS_ALPHANUM(C), N < 8 -> + ?LOWER(language_range_sub, R, Acc, N + 1, T); +language_range_sub(R, Acc, _, T) -> language_range_param_sep(R, Acc, T). + +language_range_param_sep(<<>>, Acc, T) -> lists:reverse([{T, 1000}|Acc]); +language_range_param_sep(<< $,, R/bits >>, Acc, T) -> language_range_list(R, [{T, 1000}|Acc]); +language_range_param_sep(<< $;, R/bits >>, Acc, T) -> language_range_before_weight(R, Acc, T); +language_range_param_sep(<< C, R/bits >>, Acc, T) when ?IS_WS(C) -> language_range_param_sep(R, Acc, T). + +language_range_before_weight(<< C, R/bits >>, Acc, T) when ?IS_WS(C) -> language_range_before_weight(R, Acc, T); +language_range_before_weight(<< $q, $=, R/bits >>, Acc, T) -> language_range_weight(R, Acc, T); +%% Special clause for broken user agents that confuse ; and , separators. +language_range_before_weight(<< C, R/bits >>, Acc, T) when ?IS_ALPHA(C) -> + ?LOWER(language_range, R, [{T, 1000}|Acc], 1, <<>>). + +language_range_weight(<< "1.000", R/bits >>, Acc, T) -> language_range_list_sep(R, [{T, 1000}|Acc]); +language_range_weight(<< "1.00", R/bits >>, Acc, T) -> language_range_list_sep(R, [{T, 1000}|Acc]); +language_range_weight(<< "1.0", R/bits >>, Acc, T) -> language_range_list_sep(R, [{T, 1000}|Acc]); +language_range_weight(<< "1.", R/bits >>, Acc, T) -> language_range_list_sep(R, [{T, 1000}|Acc]); +language_range_weight(<< "1", R/bits >>, Acc, T) -> language_range_list_sep(R, [{T, 1000}|Acc]); +language_range_weight(<< "0.", A, B, C, R/bits >>, Acc, T) when ?IS_DIGIT(A), ?IS_DIGIT(B), ?IS_DIGIT(C) -> + language_range_list_sep(R, [{T, (A - $0) * 100 + (B - $0) * 10 + (C - $0)}|Acc]); +language_range_weight(<< "0.", A, B, R/bits >>, Acc, T) when ?IS_DIGIT(A), ?IS_DIGIT(B) -> + language_range_list_sep(R, [{T, (A - $0) * 100 + (B - $0) * 10}|Acc]); +language_range_weight(<< "0.", A, R/bits >>, Acc, T) when ?IS_DIGIT(A) -> + language_range_list_sep(R, [{T, (A - $0) * 100}|Acc]); +language_range_weight(<< "0.", R/bits >>, Acc, T) -> language_range_list_sep(R, [{T, 0}|Acc]); +language_range_weight(<< "0", R/bits >>, Acc, T) -> language_range_list_sep(R, [{T, 0}|Acc]). + +language_range_list_sep(<<>>, Acc) -> lists:reverse(Acc); +language_range_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> language_range_list_sep(R, Acc); +language_range_list_sep(<< $,, R/bits >>, Acc) -> language_range_list(R, Acc). + +-ifdef(TEST). +language_range_tag() -> + vector(1, 8, alpha()). + +language_range_subtag() -> + [$-, vector(1, 8, alphanum())]. + +language_range() -> + [language_range_tag(), small_list(language_range_subtag())]. + +accept_language() -> + ?LET({R, W}, + {language_range(), weight()}, + {iolist_to_binary(R), W, iolist_to_binary([R, case W of + undefined -> []; + _ -> [<<";q=">>, qvalue_to_iodata(W)] + end])} + ). + +prop_parse_accept_language() -> + ?FORALL(L, + non_empty(list(accept_language())), + begin + << _, AcceptLanguage/binary >> = iolist_to_binary([[$,, A] || {_, _, A} <- L]), + ResL = parse_accept_language(AcceptLanguage), + CheckedL = [begin + ResR =:= ?LOWER(R) + andalso (ResW =:= W orelse (W =:= undefined andalso ResW =:= 1000)) + end || {{R, W, _}, {ResR, ResW}} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_accept_language_test_() -> + Tests = [ + {<<"da, en-gb;q=0.8, en;q=0.7">>, [ + {<<"da">>, 1000}, + {<<"en-gb">>, 800}, + {<<"en">>, 700} + ]}, + {<<"en, en-US, en-cockney, i-cherokee, x-pig-latin, es-419">>, [ + {<<"en">>, 1000}, + {<<"en-us">>, 1000}, + {<<"en-cockney">>, 1000}, + {<<"i-cherokee">>, 1000}, + {<<"x-pig-latin">>, 1000}, + {<<"es-419">>, 1000} + ]} + ], + [{V, fun() -> R = parse_accept_language(V) end} || {V, R} <- Tests]. + +parse_accept_language_error_test_() -> + Tests = [ + <<>>, + <<"loooooong">>, + <<"en-us-loooooong">>, + <<"419-en-us">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_accept_language(V)) end} || V <- Tests]. + +horse_parse_accept_language() -> + horse:repeat(20000, + parse_accept_language(<<"da, en-gb;q=0.8, en;q=0.7">>) + ). +-endif. + +%% Accept-Ranges header. + +-spec parse_accept_ranges(binary()) -> [binary()]. +parse_accept_ranges(<<"none">>) -> []; +parse_accept_ranges(<<"bytes">>) -> [<<"bytes">>]; +parse_accept_ranges(AcceptRanges) -> + nonempty(token_ci_list(AcceptRanges, [])). + +-ifdef(TEST). +parse_accept_ranges_test_() -> + Tests = [ + {<<"bytes">>, [<<"bytes">>]}, + {<<"none">>, []}, + {<<"bytes, pages, kilos">>, [<<"bytes">>, <<"pages">>, <<"kilos">>]} + ], + [{V, fun() -> R = parse_accept_ranges(V) end} || {V, R} <- Tests]. + +parse_accept_ranges_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_accept_ranges(V)) end} || V <- Tests]. + +horse_parse_accept_ranges_none() -> + horse:repeat(200000, + parse_accept_ranges(<<"none">>) + ). + +horse_parse_accept_ranges_bytes() -> + horse:repeat(200000, + parse_accept_ranges(<<"bytes">>) + ). + +horse_parse_accept_ranges_other() -> + horse:repeat(200000, + parse_accept_ranges(<<"bytes, pages, kilos">>) + ). +-endif. + +%% Access-Control-Allow-Credentials header. + +-spec access_control_allow_credentials() -> iodata(). +access_control_allow_credentials() -> <<"true">>. + +%% Access-Control-Allow-Headers header. + +-spec access_control_allow_headers([binary()]) -> iodata(). +access_control_allow_headers(Headers) -> + join_token_list(nonempty(Headers)). + +-ifdef(TEST). +access_control_allow_headers_test_() -> + Tests = [ + {[<<"accept">>], <<"accept">>}, + {[<<"accept">>, <<"authorization">>, <<"content-type">>], <<"accept, authorization, content-type">>} + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> R = iolist_to_binary(access_control_allow_headers(V)) end} || {V, R} <- Tests]. + +access_control_allow_headers_error_test_() -> + Tests = [ + [] + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> {'EXIT', _} = (catch access_control_allow_headers(V)) end} || V <- Tests]. + +horse_access_control_allow_headers() -> + horse:repeat(200000, + access_control_allow_headers([<<"accept">>, <<"authorization">>, <<"content-type">>]) + ). +-endif. + +%% Access-Control-Allow-Methods header. + +-spec access_control_allow_methods([binary()]) -> iodata(). +access_control_allow_methods(Methods) -> + join_token_list(nonempty(Methods)). + +-ifdef(TEST). +access_control_allow_methods_test_() -> + Tests = [ + {[<<"GET">>], <<"GET">>}, + {[<<"GET">>, <<"POST">>, <<"DELETE">>], <<"GET, POST, DELETE">>} + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> R = iolist_to_binary(access_control_allow_methods(V)) end} || {V, R} <- Tests]. + +access_control_allow_methods_error_test_() -> + Tests = [ + [] + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> {'EXIT', _} = (catch access_control_allow_methods(V)) end} || V <- Tests]. + +horse_access_control_allow_methods() -> + horse:repeat(200000, + access_control_allow_methods([<<"GET">>, <<"POST">>, <<"DELETE">>]) + ). +-endif. + +%% Access-Control-Allow-Origin header. + +-spec access_control_allow_origin({binary(), binary(), 0..65535} | reference() | '*') -> iodata(). +access_control_allow_origin({Scheme, Host, Port}) -> + case default_port(Scheme) of + Port -> [Scheme, <<"://">>, Host]; + _ -> [Scheme, <<"://">>, Host, <<":">>, integer_to_binary(Port)] + end; +access_control_allow_origin('*') -> <<$*>>; +access_control_allow_origin(Ref) when is_reference(Ref) -> <<"null">>. + +-ifdef(TEST). +access_control_allow_origin_test_() -> + Tests = [ + {{<<"http">>, <<"www.example.org">>, 8080}, <<"http://www.example.org:8080">>}, + {{<<"http">>, <<"www.example.org">>, 80}, <<"http://www.example.org">>}, + {{<<"http">>, <<"192.0.2.1">>, 8080}, <<"http://192.0.2.1:8080">>}, + {{<<"http">>, <<"192.0.2.1">>, 80}, <<"http://192.0.2.1">>}, + {{<<"http">>, <<"[2001:db8::1]">>, 8080}, <<"http://[2001:db8::1]:8080">>}, + {{<<"http">>, <<"[2001:db8::1]">>, 80}, <<"http://[2001:db8::1]">>}, + {{<<"http">>, <<"[::ffff:192.0.2.1]">>, 8080}, <<"http://[::ffff:192.0.2.1]:8080">>}, + {{<<"http">>, <<"[::ffff:192.0.2.1]">>, 80}, <<"http://[::ffff:192.0.2.1]">>}, + {make_ref(), <<"null">>}, + {'*', <<$*>>} + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> R = iolist_to_binary(access_control_allow_origin(V)) end} || {V, R} <- Tests]. + +horse_access_control_allow_origin() -> + horse:repeat(200000, + access_control_allow_origin({<<"http">>, <<"example.org">>, 8080}) + ). +-endif. + +%% Access-Control-Expose-Headers header. + +-spec access_control_expose_headers([binary()]) -> iodata(). +access_control_expose_headers(Headers) -> + join_token_list(nonempty(Headers)). + +-ifdef(TEST). +access_control_expose_headers_test_() -> + Tests = [ + {[<<"accept">>], <<"accept">>}, + {[<<"accept">>, <<"authorization">>, <<"content-type">>], <<"accept, authorization, content-type">>} + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> R = iolist_to_binary(access_control_expose_headers(V)) end} || {V, R} <- Tests]. + +access_control_expose_headers_error_test_() -> + Tests = [ + [] + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> {'EXIT', _} = (catch access_control_expose_headers(V)) end} || V <- Tests]. + +horse_access_control_expose_headers() -> + horse:repeat(200000, + access_control_expose_headers([<<"accept">>, <<"authorization">>, <<"content-type">>]) + ). +-endif. + +%% Access-Control-Max-Age header. + +-spec access_control_max_age(non_neg_integer()) -> iodata(). +access_control_max_age(MaxAge) -> integer_to_binary(MaxAge). + +-ifdef(TEST). +access_control_max_age_test_() -> + Tests = [ + {0, <<"0">>}, + {42, <<"42">>}, + {69, <<"69">>}, + {1337, <<"1337">>}, + {3495, <<"3495">>}, + {1234567890, <<"1234567890">>} + ], + [{V, fun() -> R = access_control_max_age(V) end} || {V, R} <- Tests]. +-endif. + +%% Access-Control-Request-Headers header. + +-spec parse_access_control_request_headers(binary()) -> [binary()]. +parse_access_control_request_headers(Headers) -> + token_ci_list(Headers, []). + +-ifdef(TEST). +headers() -> + ?LET(L, + list({ows(), ows(), token()}), + case L of + [] -> {[], <<>>}; + _ -> + << _, Headers/binary >> = iolist_to_binary([[OWS1, $,, OWS2, M] || {OWS1, OWS2, M} <- L]), + {[?LOWER(M) || {_, _, M} <- L], Headers} + end). + +prop_parse_access_control_request_headers() -> + ?FORALL({L, Headers}, + headers(), + L =:= parse_access_control_request_headers(Headers)). + +parse_access_control_request_headers_test_() -> + Tests = [ + {<<>>, []}, + {<<"Content-Type">>, [<<"content-type">>]}, + {<<"accept, authorization, content-type">>, [<<"accept">>, <<"authorization">>, <<"content-type">>]}, + {<<"accept,, , authorization,content-type">>, [<<"accept">>, <<"authorization">>, <<"content-type">>]} + ], + [{V, fun() -> R = parse_access_control_request_headers(V) end} || {V, R} <- Tests]. + +horse_parse_access_control_request_headers() -> + horse:repeat(200000, + parse_access_control_request_headers(<<"accept, authorization, content-type">>) + ). +-endif. + +%% Access-Control-Request-Method header. + +-spec parse_access_control_request_method(binary()) -> binary(). +parse_access_control_request_method(Method) -> + true = <<>> =/= Method, + ok = validate_token(Method), + Method. + +validate_token(<< C, R/bits >>) when ?IS_TOKEN(C) -> validate_token(R); +validate_token(<<>>) -> ok. + +-ifdef(TEST). +parse_access_control_request_method_test_() -> + Tests = [ + <<"GET">>, + <<"HEAD">>, + <<"POST">>, + <<"PUT">>, + <<"DELETE">>, + <<"TRACE">>, + <<"CONNECT">>, + <<"whatever">> + ], + [{V, fun() -> R = parse_access_control_request_method(V) end} || {V, R} <- Tests]. + +parse_access_control_request_method_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_access_control_request_method(V)) end} || V <- Tests]. + +horse_parse_access_control_request_method() -> + horse:repeat(200000, + parse_access_control_request_method(<<"POST">>) + ). +-endif. + +%% Age header. + +-spec parse_age(binary()) -> non_neg_integer(). +parse_age(Age) -> + I = binary_to_integer(Age), + true = I >= 0, + I. + +-ifdef(TEST). +parse_age_test_() -> + Tests = [ + {<<"0">>, 0}, + {<<"42">>, 42}, + {<<"69">>, 69}, + {<<"1337">>, 1337}, + {<<"3495">>, 3495}, + {<<"1234567890">>, 1234567890} + ], + [{V, fun() -> R = parse_age(V) end} || {V, R} <- Tests]. + +parse_age_error_test_() -> + Tests = [ + <<>>, + <<"123, 123">>, + <<"4.17">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_age(V)) end} || V <- Tests]. +-endif. + +%% Allow header. + +-spec parse_allow(binary()) -> [binary()]. +parse_allow(Allow) -> + token_list(Allow, []). + +-ifdef(TEST). +allow() -> + ?LET(L, + list({ows(), ows(), token()}), + case L of + [] -> {[], <<>>}; + _ -> + << _, Allow/binary >> = iolist_to_binary([[OWS1, $,, OWS2, M] || {OWS1, OWS2, M} <- L]), + {[M || {_, _, M} <- L], Allow} + end). + +prop_parse_allow() -> + ?FORALL({L, Allow}, + allow(), + L =:= parse_allow(Allow)). + +parse_allow_test_() -> + Tests = [ + {<<>>, []}, + {<<"GET, HEAD, PUT">>, [<<"GET">>, <<"HEAD">>, <<"PUT">>]} + ], + [{V, fun() -> R = parse_allow(V) end} || {V, R} <- Tests]. + +horse_parse_allow() -> + horse:repeat(200000, + parse_allow(<<"GET, HEAD, PUT">>) + ). +-endif. + +%% Authorization header. +%% +%% We support Basic, Digest and Bearer schemes only. +%% +%% In the Digest case we do not validate that the mandatory +%% fields are present. When parsing auth-params, we do not +%% accept BWS characters around the "=". + +-spec parse_authorization(binary()) + -> {basic, binary(), binary()} + | {bearer, binary()} + | {digest, [{binary(), binary()}]}. +parse_authorization(<>) + when ((B =:= $B) or (B =:= $b)), ((A =:= $A) or (A =:= $a)), + ((S =:= $S) or (S =:= $s)), ((I =:= $I) or (I =:= $i)), + ((C =:= $C) or (C =:= $c)) -> + auth_basic(base64:decode(R), <<>>); +parse_authorization(<>) + when (R =/= <<>>), ((B =:= $B) or (B =:= $b)), + ((E1 =:= $E) or (E1 =:= $e)), ((A =:= $A) or (A =:= $a)), + ((R1 =:= $R) or (R1 =:= $r)), ((E2 =:= $E) or (E2 =:= $e)), + ((R2 =:= $R) or (R2 =:= $r)) -> + validate_auth_bearer(R), + {bearer, R}; +parse_authorization(<>) + when ((D =:= $D) or (D =:= $d)), ((I =:= $I) or (I =:= $i)), + ((G =:= $G) or (G =:= $g)), ((E =:= $E) or (E =:= $e)), + ((S =:= $S) or (S =:= $s)), ((T =:= $T) or (T =:= $t)) -> + {digest, nonempty(auth_digest_list(R, []))}. + +auth_basic(<< $:, Password/bits >>, UserID) -> {basic, UserID, Password}; +auth_basic(<< C, R/bits >>, UserID) -> auth_basic(R, << UserID/binary, C >>). + +validate_auth_bearer(<< C, R/bits >>) when ?IS_TOKEN68(C) -> validate_auth_bearer(R); +validate_auth_bearer(<< $=, R/bits >>) -> validate_auth_bearer_eq(R); +validate_auth_bearer(<<>>) -> ok. + +validate_auth_bearer_eq(<< $=, R/bits >>) -> validate_auth_bearer_eq(R); +validate_auth_bearer_eq(<<>>) -> ok. + +auth_digest_list(<<>>, Acc) -> lists:reverse(Acc); +auth_digest_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> auth_digest_list(R, Acc); +auth_digest_list(<< "algorithm=", C, R/bits >>, Acc) when ?IS_TOKEN(C) -> auth_digest_token(R, Acc, <<"algorithm">>, << C >>); +auth_digest_list(<< "cnonce=\"", R/bits >>, Acc) -> auth_digest_quoted(R, Acc, <<"cnonce">>, <<>>); +auth_digest_list(<< "nc=", A, B, C, D, E, F, G, H, R/bits >>, Acc) + when ?IS_LHEX(A), ?IS_LHEX(B), ?IS_LHEX(C), ?IS_LHEX(D), + ?IS_LHEX(E), ?IS_LHEX(F), ?IS_LHEX(G), ?IS_LHEX(H) -> + auth_digest_list_sep(R, [{<<"nc">>, << A, B, C, D, E, F, G, H >>}|Acc]); +auth_digest_list(<< "nonce=\"", R/bits >>, Acc) -> auth_digest_quoted(R, Acc, <<"nonce">>, <<>>); +auth_digest_list(<< "opaque=\"", R/bits >>, Acc) -> auth_digest_quoted(R, Acc, <<"opaque">>, <<>>); +auth_digest_list(<< "qop=", C, R/bits >>, Acc) when ?IS_TOKEN(C) -> auth_digest_token(R, Acc, <<"qop">>, << C >>); +auth_digest_list(<< "realm=\"", R/bits >>, Acc) -> auth_digest_quoted(R, Acc, <<"realm">>, <<>>); +auth_digest_list(<< "response=\"", R/bits >>, Acc) -> auth_digest_quoted(R, Acc, <<"response">>, <<>>); +auth_digest_list(<< "uri=\"", R/bits >>, Acc) -> auth_digest_quoted(R, Acc, <<"uri">>, <<>>); +auth_digest_list(<< "username=\"", R/bits >>, Acc) -> auth_digest_quoted(R, Acc, <<"username">>, <<>>); +auth_digest_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> + ?LOWER(auth_digest_param, R, Acc, <<>>). + +auth_digest_param(<< $=, $", R/bits >>, Acc, K) -> auth_digest_quoted(R, Acc, K, <<>>); +auth_digest_param(<< $=, C, R/bits >>, Acc, K) when ?IS_TOKEN(C) -> auth_digest_token(R, Acc, K, << C >>); +auth_digest_param(<< C, R/bits >>, Acc, K) when ?IS_TOKEN(C) -> + ?LOWER(auth_digest_param, R, Acc, K). + +auth_digest_token(<< C, R/bits >>, Acc, K, V) when ?IS_TOKEN(C) -> auth_digest_token(R, Acc, K, << V/binary, C >>); +auth_digest_token(R, Acc, K, V) -> auth_digest_list_sep(R, [{K, V}|Acc]). + +auth_digest_quoted(<< $", R/bits >>, Acc, K, V) -> auth_digest_list_sep(R, [{K, V}|Acc]); +auth_digest_quoted(<< $\\, C, R/bits >>, Acc, K, V) when ?IS_VCHAR_OBS(C) -> auth_digest_quoted(R, Acc, K, << V/binary, C >>); +auth_digest_quoted(<< C, R/bits >>, Acc, K, V) when ?IS_VCHAR_OBS(C) -> auth_digest_quoted(R, Acc, K, << V/binary, C >>). + +auth_digest_list_sep(<<>>, Acc) -> lists:reverse(Acc); +auth_digest_list_sep(<< $,, R/bits >>, Acc) -> auth_digest_list(R, Acc); +auth_digest_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> auth_digest_list_sep(R, Acc). + +-ifdef(TEST). +parse_authorization_test_() -> + Tests = [ + {<<"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==">>, {basic, <<"Aladdin">>, <<"open sesame">>}}, + {<<"bAsIc QWxhZGRpbjpvcGVuIHNlc2FtZQ==">>, {basic, <<"Aladdin">>, <<"open sesame">>}}, + {<<"Bearer mF_9.B5f-4.1JqM">>, {bearer, <<"mF_9.B5f-4.1JqM">>}}, + {<<"bEaRer mF_9.B5f-4.1JqM">>, {bearer, <<"mF_9.B5f-4.1JqM">>}}, + {<<"Digest username=\"Mufasa\"," + "realm=\"testrealm@host.com\"," + "nonce=\"dcd98b7102dd2f0e8b11d0f600bfb0c093\"," + "uri=\"/dir/index.html\"," + "qop=auth," + "nc=00000001," + "cnonce=\"0a4f113b\"," + "response=\"6629fae49393a05397450978507c4ef1\"," + "opaque=\"5ccc069c403ebaf9f0171e9517f40e41\"">>, + {digest, [ + {<<"username">>, <<"Mufasa">>}, + {<<"realm">>, <<"testrealm@host.com">>}, + {<<"nonce">>, <<"dcd98b7102dd2f0e8b11d0f600bfb0c093">>}, + {<<"uri">>, <<"/dir/index.html">>}, + {<<"qop">>, <<"auth">>}, + {<<"nc">>, <<"00000001">>}, + {<<"cnonce">>, <<"0a4f113b">>}, + {<<"response">>, <<"6629fae49393a05397450978507c4ef1">>}, + {<<"opaque">>, <<"5ccc069c403ebaf9f0171e9517f40e41">>}]}} + ], + [{V, fun() -> R = parse_authorization(V) end} || {V, R} <- Tests]. + +horse_parse_authorization_basic() -> + horse:repeat(20000, + parse_authorization(<<"Basic QWxhZGRpbjpvcGVuIHNlc2FtZQ==">>) + ). + +horse_parse_authorization_bearer() -> + horse:repeat(20000, + parse_authorization(<<"Bearer mF_9.B5f-4.1JqM">>) + ). + +horse_parse_authorization_digest() -> + horse:repeat(20000, + parse_authorization( + <<"Digest username=\"Mufasa\"," + "realm=\"testrealm@host.com\"," + "nonce=\"dcd98b7102dd2f0e8b11d0f600bfb0c093\"," + "uri=\"/dir/index.html\"," + "qop=auth," + "nc=00000001," + "cnonce=\"0a4f113b\"," + "response=\"6629fae49393a05397450978507c4ef1\"," + "opaque=\"5ccc069c403ebaf9f0171e9517f40e41\"">>) + ). +-endif. + +%% Cache-Control header. +%% +%% In the fields list case, we do not support escaping, which shouldn't be needed anyway. + +-spec parse_cache_control(binary()) + -> [binary() | {binary(), binary()} | {binary(), non_neg_integer()} | {binary(), [binary()]}]. +parse_cache_control(<<"no-cache">>) -> + [<<"no-cache">>]; +parse_cache_control(<<"max-age=0">>) -> + [{<<"max-age">>, 0}]; +parse_cache_control(CacheControl) -> + nonempty(cache_directive_list(CacheControl, [])). + +cache_directive_list(<<>>, Acc) -> lists:reverse(Acc); +cache_directive_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C)-> cache_directive_list(R, Acc); +cache_directive_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> + ?LOWER(cache_directive, R, Acc, <<>>). + +cache_directive(<< $=, $", R/bits >>, Acc, T) + when (T =:= <<"no-cache">>) or (T =:= <<"private">>) -> + cache_directive_fields_list(R, Acc, T, []); +cache_directive(<< $=, C, R/bits >>, Acc, T) + when ?IS_DIGIT(C), (T =:= <<"max-age">>) or (T =:= <<"max-stale">>) + or (T =:= <<"min-fresh">>) or (T =:= <<"s-maxage">>) -> + cache_directive_delta(R, Acc, T, (C - $0)); +cache_directive(<< $=, $", R/bits >>, Acc, T) -> cache_directive_quoted_string(R, Acc, T, <<>>); +cache_directive(<< $=, C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> cache_directive_token(R, Acc, T, << C >>); +cache_directive(<< C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> + ?LOWER(cache_directive, R, Acc, T); +cache_directive(R, Acc, T) -> cache_directive_list_sep(R, [T|Acc]). + +cache_directive_delta(<< C, R/bits >>, Acc, K, V) when ?IS_DIGIT(C) -> cache_directive_delta(R, Acc, K, V * 10 + (C - $0)); +cache_directive_delta(R, Acc, K, V) -> cache_directive_list_sep(R, [{K, V}|Acc]). + +cache_directive_fields_list(<< C, R/bits >>, Acc, K, L) when ?IS_WS_COMMA(C) -> cache_directive_fields_list(R, Acc, K, L); +cache_directive_fields_list(<< $", R/bits >>, Acc, K, L) -> cache_directive_list_sep(R, [{K, lists:reverse(L)}|Acc]); +cache_directive_fields_list(<< C, R/bits >>, Acc, K, L) when ?IS_TOKEN(C) -> + ?LOWER(cache_directive_field, R, Acc, K, L, <<>>). + +cache_directive_field(<< C, R/bits >>, Acc, K, L, F) when ?IS_TOKEN(C) -> + ?LOWER(cache_directive_field, R, Acc, K, L, F); +cache_directive_field(R, Acc, K, L, F) -> cache_directive_fields_list_sep(R, Acc, K, [F|L]). + +cache_directive_fields_list_sep(<< C, R/bits >>, Acc, K, L) when ?IS_WS(C) -> cache_directive_fields_list_sep(R, Acc, K, L); +cache_directive_fields_list_sep(<< $,, R/bits >>, Acc, K, L) -> cache_directive_fields_list(R, Acc, K, L); +cache_directive_fields_list_sep(<< $", R/bits >>, Acc, K, L) -> cache_directive_list_sep(R, [{K, lists:reverse(L)}|Acc]). + +cache_directive_token(<< C, R/bits >>, Acc, K, V) when ?IS_TOKEN(C) -> cache_directive_token(R, Acc, K, << V/binary, C >>); +cache_directive_token(R, Acc, K, V) -> cache_directive_list_sep(R, [{K, V}|Acc]). + +cache_directive_quoted_string(<< $", R/bits >>, Acc, K, V) -> cache_directive_list_sep(R, [{K, V}|Acc]); +cache_directive_quoted_string(<< $\\, C, R/bits >>, Acc, K, V) when ?IS_VCHAR_OBS(C) -> + cache_directive_quoted_string(R, Acc, K, << V/binary, C >>); +cache_directive_quoted_string(<< C, R/bits >>, Acc, K, V) when ?IS_VCHAR_OBS(C) -> + cache_directive_quoted_string(R, Acc, K, << V/binary, C >>). + +cache_directive_list_sep(<<>>, Acc) -> lists:reverse(Acc); +cache_directive_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> cache_directive_list_sep(R, Acc); +cache_directive_list_sep(<< $,, R/bits >>, Acc) -> cache_directive_list(R, Acc). + +-ifdef(TEST). +cache_directive_unreserved_token() -> + ?SUCHTHAT(T, + token(), + T =/= <<"max-age">> andalso T =/= <<"max-stale">> andalso T =/= <<"min-fresh">> + andalso T =/= <<"s-maxage">> andalso T =/= <<"no-cache">> andalso T =/= <<"private">>). + +cache_directive() -> + oneof([ + token(), + {cache_directive_unreserved_token(), token()}, + {cache_directive_unreserved_token(), quoted_string()}, + {elements([<<"max-age">>, <<"max-stale">>, <<"min-fresh">>, <<"s-maxage">>]), non_neg_integer()}, + {fields, elements([<<"no-cache">>, <<"private">>]), small_list(token())} + ]). + +cache_control() -> + ?LET(L, + non_empty(list(cache_directive())), + begin + << _, CacheControl/binary >> = iolist_to_binary([[$,, + case C of + {fields, K, V} -> [K, $=, $", [[F, $,] || F <- V], $"]; + {K, V} when is_integer(V) -> [K, $=, integer_to_binary(V)]; + {K, V} -> [K, $=, V]; + K -> K + end] || C <- L]), + {L, CacheControl} + end). + +prop_parse_cache_control() -> + ?FORALL({L, CacheControl}, + cache_control(), + begin + ResL = parse_cache_control(CacheControl), + CheckedL = [begin + ExpectedCc = case Cc of + {fields, K, V} -> {?LOWER(K), [?LOWER(F) || F <- V]}; + {K, V} -> {?LOWER(K), unquote(V)}; + K -> ?LOWER(K) + end, + ExpectedCc =:= ResCc + end || {Cc, ResCc} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_cache_control_test_() -> + Tests = [ + {<<"no-cache">>, [<<"no-cache">>]}, + {<<"no-store">>, [<<"no-store">>]}, + {<<"max-age=0">>, [{<<"max-age">>, 0}]}, + {<<"max-age=30">>, [{<<"max-age">>, 30}]}, + {<<"private, community=\"UCI\"">>, [<<"private">>, {<<"community">>, <<"UCI">>}]}, + {<<"private=\"Content-Type, Content-Encoding, Content-Language\"">>, + [{<<"private">>, [<<"content-type">>, <<"content-encoding">>, <<"content-language">>]}]} + ], + [{V, fun() -> R = parse_cache_control(V) end} || {V, R} <- Tests]. + +parse_cache_control_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_cache_control(V)) end} || V <- Tests]. + +horse_parse_cache_control_no_cache() -> + horse:repeat(200000, + parse_cache_control(<<"no-cache">>) + ). + +horse_parse_cache_control_max_age_0() -> + horse:repeat(200000, + parse_cache_control(<<"max-age=0">>) + ). + +horse_parse_cache_control_max_age_30() -> + horse:repeat(200000, + parse_cache_control(<<"max-age=30">>) + ). + +horse_parse_cache_control_custom() -> + horse:repeat(200000, + parse_cache_control(<<"private, community=\"UCI\"">>) + ). + +horse_parse_cache_control_fields() -> + horse:repeat(200000, + parse_cache_control(<<"private=\"Content-Type, Content-Encoding, Content-Language\"">>) + ). +-endif. + +%% Connection header. + +-spec parse_connection(binary()) -> [binary()]. +parse_connection(<<"close">>) -> + [<<"close">>]; +parse_connection(<<"keep-alive">>) -> + [<<"keep-alive">>]; +parse_connection(Connection) -> + nonempty(token_ci_list(Connection, [])). + +-ifdef(TEST). +prop_parse_connection() -> + ?FORALL(L, + non_empty(list(token())), + begin + << _, Connection/binary >> = iolist_to_binary([[$,, C] || C <- L]), + ResL = parse_connection(Connection), + CheckedL = [?LOWER(Co) =:= ResC || {Co, ResC} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_connection_test_() -> + Tests = [ + {<<"close">>, [<<"close">>]}, + {<<"ClOsE">>, [<<"close">>]}, + {<<"Keep-Alive">>, [<<"keep-alive">>]}, + {<<"keep-alive, Upgrade">>, [<<"keep-alive">>, <<"upgrade">>]} + ], + [{V, fun() -> R = parse_connection(V) end} || {V, R} <- Tests]. + +parse_connection_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_connection(V)) end} || V <- Tests]. + +horse_parse_connection_close() -> + horse:repeat(200000, + parse_connection(<<"close">>) + ). + +horse_parse_connection_keepalive() -> + horse:repeat(200000, + parse_connection(<<"keep-alive">>) + ). + +horse_parse_connection_keepalive_upgrade() -> + horse:repeat(200000, + parse_connection(<<"keep-alive, upgrade">>) + ). +-endif. + +%% Content-Encoding header. + +-spec parse_content_encoding(binary()) -> [binary()]. +parse_content_encoding(ContentEncoding) -> + nonempty(token_ci_list(ContentEncoding, [])). + +-ifdef(TEST). +parse_content_encoding_test_() -> + Tests = [ + {<<"gzip">>, [<<"gzip">>]} + ], + [{V, fun() -> R = parse_content_encoding(V) end} || {V, R} <- Tests]. + +parse_content_encoding_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_content_encoding(V)) end} || V <- Tests]. + +horse_parse_content_encoding() -> + horse:repeat(200000, + parse_content_encoding(<<"gzip">>) + ). +-endif. + +%% Content-Language header. +%% +%% We do not support irregular deprecated tags that do not match the ABNF. + +-spec parse_content_language(binary()) -> [binary()]. +parse_content_language(ContentLanguage) -> + nonempty(langtag_list(ContentLanguage, [])). + +langtag_list(<<>>, Acc) -> lists:reverse(Acc); +langtag_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> langtag_list(R, Acc); +langtag_list(<< A, B, C, R/bits >>, Acc) when ?IS_ALPHA(A), ?IS_ALPHA(B), ?IS_ALPHA(C) -> + langtag_extlang(R, Acc, << ?LC(A), ?LC(B), ?LC(C) >>, 0); +langtag_list(<< A, B, R/bits >>, Acc) when ?IS_ALPHA(A), ?IS_ALPHA(B) -> + langtag_extlang(R, Acc, << ?LC(A), ?LC(B) >>, 0); +langtag_list(<< X, R/bits >>, Acc) when X =:= $x; X =:= $X -> langtag_privateuse_sub(R, Acc, << $x >>, 0). + +langtag_extlang(<< $-, A, B, C, D, E, F, G, H, R/bits >>, Acc, T, _) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G), ?IS_ALPHANUM(H) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G), ?LC(H) >>); +langtag_extlang(<< $-, A, B, C, D, E, F, G, R/bits >>, Acc, T, _) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G) >>); +langtag_extlang(<< $-, A, B, C, D, E, F, R/bits >>, Acc, T, _) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F) >>); +langtag_extlang(<< $-, A, B, C, D, E, R/bits >>, Acc, T, _) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), ?IS_ALPHANUM(E) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E) >>); +langtag_extlang(<< $-, A, B, C, D, R/bits >>, Acc, T, _) + when ?IS_ALPHA(A), ?IS_ALPHA(B), ?IS_ALPHA(C), ?IS_ALPHA(D) -> + langtag_region(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D) >>); +langtag_extlang(<< $-, A, B, C, R/bits >>, Acc, T, N) + when ?IS_ALPHA(A), ?IS_ALPHA(B), ?IS_ALPHA(C) -> + case N of + 2 -> langtag_script(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C) >>); + _ -> langtag_extlang(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C) >>, N + 1) + end; +langtag_extlang(R, Acc, T, _) -> langtag_region(R, Acc, T). + +langtag_script(<< $-, A, B, C, D, E, F, G, H, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G), ?IS_ALPHANUM(H) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G), ?LC(H) >>); +langtag_script(<< $-, A, B, C, D, E, F, G, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G) >>); +langtag_script(<< $-, A, B, C, D, E, F, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F) >>); +langtag_script(<< $-, A, B, C, D, E, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), ?IS_ALPHANUM(E) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E) >>); +langtag_script(<< $-, A, B, C, D, R/bits >>, Acc, T) + when ?IS_ALPHA(A), ?IS_ALPHA(B), ?IS_ALPHA(C), ?IS_ALPHA(D) -> + langtag_region(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D) >>); +langtag_script(R, Acc, T) -> + langtag_region(R, Acc, T). + +langtag_region(<< $-, A, B, C, D, E, F, G, H, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G), ?IS_ALPHANUM(H) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G), ?LC(H) >>); +langtag_region(<< $-, A, B, C, D, E, F, G, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G) >>); +langtag_region(<< $-, A, B, C, D, E, F, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F) >>); +langtag_region(<< $-, A, B, C, D, E, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), ?IS_ALPHANUM(E) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E) >>); +langtag_region(<< $-, A, B, C, D, R/bits >>, Acc, T) + when ?IS_DIGIT(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D) -> + langtag_variant(R, Acc, << T/binary, $-, A, ?LC(B), ?LC(C), ?LC(D) >>); +langtag_region(<< $-, A, B, R/bits >>, Acc, T) when ?IS_ALPHA(A), ?IS_ALPHA(B) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B) >>); +langtag_region(<< $-, A, B, C, R/bits >>, Acc, T) when ?IS_DIGIT(A), ?IS_DIGIT(B), ?IS_DIGIT(C) -> + langtag_variant(R, Acc, << T/binary, $-, A, B, C >>); +langtag_region(R, Acc, T) -> + langtag_variant(R, Acc, T). + +langtag_variant(<< $-, A, B, C, D, E, F, G, H, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G), ?IS_ALPHANUM(H) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G), ?LC(H) >>); +langtag_variant(<< $-, A, B, C, D, E, F, G, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G) >>); +langtag_variant(<< $-, A, B, C, D, E, F, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F) >>); +langtag_variant(<< $-, A, B, C, D, E, R/bits >>, Acc, T) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), ?IS_ALPHANUM(E) -> + langtag_variant(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E) >>); +langtag_variant(<< $-, A, B, C, D, R/bits >>, Acc, T) + when ?IS_DIGIT(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D) -> + langtag_variant(R, Acc, << T/binary, $-, A, ?LC(B), ?LC(C), ?LC(D) >>); +langtag_variant(R, Acc, T) -> + langtag_extension(R, Acc, T). + +langtag_extension(<< $-, X, R/bits >>, Acc, T) when X =:= $x; X =:= $X -> langtag_privateuse_sub(R, Acc, << T/binary, $-, $x >>, 0); +langtag_extension(<< $-, S, R/bits >>, Acc, T) when ?IS_ALPHANUM(S) -> langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(S) >>, 0); +langtag_extension(R, Acc, T) -> langtag_list_sep(R, [T|Acc]). + +langtag_extension_sub(<< $-, A, B, C, D, E, F, G, H, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G), ?IS_ALPHANUM(H) -> + langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G), ?LC(H) >>, N + 1); +langtag_extension_sub(<< $-, A, B, C, D, E, F, G, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G) -> + langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G) >>, N + 1); +langtag_extension_sub(<< $-, A, B, C, D, E, F, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F) -> + langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F) >>, N + 1); +langtag_extension_sub(<< $-, A, B, C, D, E, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), ?IS_ALPHANUM(E) -> + langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E) >>, N + 1); +langtag_extension_sub(<< $-, A, B, C, D, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D) -> + langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D) >>, N + 1); +langtag_extension_sub(<< $-, A, B, C, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C) -> + langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C) >>, N + 1); +langtag_extension_sub(<< $-, A, B, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B) -> + langtag_extension_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B) >>, N + 1); +langtag_extension_sub(R, Acc, T, N) when N > 0 -> + langtag_extension(R, Acc, T). + +langtag_privateuse_sub(<< $-, A, B, C, D, E, F, G, H, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G), ?IS_ALPHANUM(H) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G), ?LC(H) >>, N + 1); +langtag_privateuse_sub(<< $-, A, B, C, D, E, F, G, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F), ?IS_ALPHANUM(G) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F), ?LC(G) >>, N + 1); +langtag_privateuse_sub(<< $-, A, B, C, D, E, F, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), + ?IS_ALPHANUM(E), ?IS_ALPHANUM(F) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E), ?LC(F) >>, N + 1); +langtag_privateuse_sub(<< $-, A, B, C, D, E, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D), ?IS_ALPHANUM(E) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D), ?LC(E) >>, N + 1); +langtag_privateuse_sub(<< $-, A, B, C, D, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C), ?IS_ALPHANUM(D) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C), ?LC(D) >>, N + 1); +langtag_privateuse_sub(<< $-, A, B, C, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B), ?IS_ALPHANUM(C) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B), ?LC(C) >>, N + 1); +langtag_privateuse_sub(<< $-, A, B, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A), ?IS_ALPHANUM(B) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A), ?LC(B) >>, N + 1); +langtag_privateuse_sub(<< $-, A, R/bits >>, Acc, T, N) + when ?IS_ALPHANUM(A) -> + langtag_privateuse_sub(R, Acc, << T/binary, $-, ?LC(A) >>, N + 1); +langtag_privateuse_sub(R, Acc, T, N) when N > 0 -> langtag_list_sep(R, [T|Acc]). + +langtag_list_sep(<<>>, Acc) -> lists:reverse(Acc); +langtag_list_sep(<< $,, R/bits >>, Acc) -> langtag_list(R, Acc); +langtag_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> langtag_list_sep(R, Acc). + +-ifdef(TEST). +langtag_language() -> vector(2, 3, alpha()). +langtag_extlang() -> vector(0, 3, [$-, alpha(), alpha(), alpha()]). +langtag_script() -> oneof([[], [$-, alpha(), alpha(), alpha(), alpha()]]). +langtag_region() -> oneof([[], [$-, alpha(), alpha()], [$-, digit(), digit(), digit()]]). + +langtag_variant() -> + small_list(frequency([ + {4, [$-, vector(5, 8, alphanum())]}, + {1, [$-, digit(), alphanum(), alphanum(), alphanum()]} + ])). + +langtag_extension() -> + small_list([$-, ?SUCHTHAT(S, alphanum(), S =/= $x andalso S =/= $X), + small_non_empty_list([$-, vector(2, 8, alphanum())]) + ]). + +langtag_privateuse() -> oneof([[], [$-, langtag_privateuse_nodash()]]). +langtag_privateuse_nodash() -> [elements([$x, $X]), small_non_empty_list([$-, vector(1, 8, alphanum())])]. +private_language_tag() -> ?LET(T, langtag_privateuse_nodash(), iolist_to_binary(T)). + +language_tag() -> + ?LET(IoList, + [langtag_language(), langtag_extlang(), langtag_script(), langtag_region(), + langtag_variant(), langtag_extension(), langtag_privateuse()], + iolist_to_binary(IoList)). + +content_language() -> + ?LET(L, + non_empty(list(frequency([ + {90, language_tag()}, + {10, private_language_tag()} + ]))), + begin + << _, ContentLanguage/binary >> = iolist_to_binary([[$,, T] || T <- L]), + {L, ContentLanguage} + end). + +prop_parse_content_language() -> + ?FORALL({L, ContentLanguage}, + content_language(), + begin + ResL = parse_content_language(ContentLanguage), + CheckedL = [?LOWER(T) =:= ResT || {T, ResT} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_content_language_test_() -> + Tests = [ + {<<"de">>, [<<"de">>]}, + {<<"fr">>, [<<"fr">>]}, + {<<"ja">>, [<<"ja">>]}, + {<<"zh-Hant">>, [<<"zh-hant">>]}, + {<<"zh-Hans">>, [<<"zh-hans">>]}, + {<<"sr-Cyrl">>, [<<"sr-cyrl">>]}, + {<<"sr-Latn">>, [<<"sr-latn">>]}, + {<<"zh-cmn-Hans-CN">>, [<<"zh-cmn-hans-cn">>]}, + {<<"cmn-Hans-CN">>, [<<"cmn-hans-cn">>]}, + {<<"zh-yue-HK">>, [<<"zh-yue-hk">>]}, + {<<"yue-HK">>, [<<"yue-hk">>]}, + {<<"zh-Hans-CN">>, [<<"zh-hans-cn">>]}, + {<<"sr-Latn-RS">>, [<<"sr-latn-rs">>]}, + {<<"sl-rozaj">>, [<<"sl-rozaj">>]}, + {<<"sl-rozaj-biske">>, [<<"sl-rozaj-biske">>]}, + {<<"sl-nedis">>, [<<"sl-nedis">>]}, + {<<"de-CH-1901">>, [<<"de-ch-1901">>]}, + {<<"sl-IT-nedis">>, [<<"sl-it-nedis">>]}, + {<<"hy-Latn-IT-arevela">>, [<<"hy-latn-it-arevela">>]}, + {<<"de-DE">>, [<<"de-de">>]}, + {<<"en-US">>, [<<"en-us">>]}, + {<<"es-419">>, [<<"es-419">>]}, + {<<"de-CH-x-phonebk">>, [<<"de-ch-x-phonebk">>]}, + {<<"az-Arab-x-AZE-derbend">>, [<<"az-arab-x-aze-derbend">>]}, + {<<"x-whatever">>, [<<"x-whatever">>]}, + {<<"qaa-Qaaa-QM-x-southern">>, [<<"qaa-qaaa-qm-x-southern">>]}, + {<<"de-Qaaa">>, [<<"de-qaaa">>]}, + {<<"sr-Latn-QM">>, [<<"sr-latn-qm">>]}, + {<<"sr-Qaaa-RS">>, [<<"sr-qaaa-rs">>]}, + {<<"en-US-u-islamcal">>, [<<"en-us-u-islamcal">>]}, + {<<"zh-CN-a-myext-x-private">>, [<<"zh-cn-a-myext-x-private">>]}, + {<<"en-a-myext-b-another">>, [<<"en-a-myext-b-another">>]}, + {<<"mn-Cyrl-MN">>, [<<"mn-cyrl-mn">>]}, + {<<"MN-cYRL-mn">>, [<<"mn-cyrl-mn">>]}, + {<<"mN-cYrL-Mn">>, [<<"mn-cyrl-mn">>]}, + {<<"az-Arab-IR">>, [<<"az-arab-ir">>]}, + {<<"zh-gan">>, [<<"zh-gan">>]}, + {<<"zh-yue">>, [<<"zh-yue">>]}, + {<<"zh-cmn">>, [<<"zh-cmn">>]}, + {<<"de-AT">>, [<<"de-at">>]}, + {<<"de-CH-1996">>, [<<"de-ch-1996">>]}, + {<<"en-Latn-GB-boont-r-extended-sequence-x-private">>, + [<<"en-latn-gb-boont-r-extended-sequence-x-private">>]}, + {<<"el-x-koine">>, [<<"el-x-koine">>]}, + {<<"el-x-attic">>, [<<"el-x-attic">>]}, + {<<"fr, en-US, es-419, az-Arab, x-pig-latin, man-Nkoo-GN">>, + [<<"fr">>, <<"en-us">>, <<"es-419">>, <<"az-arab">>, <<"x-pig-latin">>, <<"man-nkoo-gn">>]}, + {<<"da">>, [<<"da">>]}, + {<<"mi, en">>, [<<"mi">>, <<"en">>]} + ], + [{V, fun() -> R = parse_content_language(V) end} || {V, R} <- Tests]. + +parse_content_language_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_content_language(V)) end} || V <- Tests]. + +horse_parse_content_language() -> + horse:repeat(100000, + parse_content_language(<<"fr, en-US, es-419, az-Arab, x-pig-latin, man-Nkoo-GN">>) + ). +-endif. + +%% Content-Length header. + +-spec parse_content_length(binary()) -> non_neg_integer(). +parse_content_length(ContentLength) -> + I = binary_to_integer(ContentLength), + true = I >= 0, + I. + +-ifdef(TEST). +prop_parse_content_length() -> + ?FORALL( + X, + non_neg_integer(), + X =:= parse_content_length(integer_to_binary(X)) + ). + +parse_content_length_test_() -> + Tests = [ + {<<"0">>, 0}, + {<<"42">>, 42}, + {<<"69">>, 69}, + {<<"1337">>, 1337}, + {<<"3495">>, 3495}, + {<<"1234567890">>, 1234567890} + ], + [{V, fun() -> R = parse_content_length(V) end} || {V, R} <- Tests]. + +parse_content_length_error_test_() -> + Tests = [ + <<>>, + <<"-1">>, + <<"123, 123">>, + <<"4.17">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_content_length(V)) end} || V <- Tests]. + +horse_parse_content_length_zero() -> + horse:repeat(100000, + parse_content_length(<<"0">>) + ). + +horse_parse_content_length_giga() -> + horse:repeat(100000, + parse_content_length(<<"1234567890">>) + ). +-endif. + +%% Content-Range header. + +-spec parse_content_range(binary()) + -> {bytes, non_neg_integer(), non_neg_integer(), non_neg_integer() | '*'} + | {bytes, '*', non_neg_integer()} | {binary(), binary()}. +parse_content_range(<<"bytes */", C, R/bits >>) when ?IS_DIGIT(C) -> unsatisfied_range(R, C - $0); +parse_content_range(<<"bytes ", C, R/bits >>) when ?IS_DIGIT(C) -> byte_range_first(R, C - $0); +parse_content_range(<< C, R/bits >>) when ?IS_TOKEN(C) -> + ?LOWER(other_content_range_unit, R, <<>>). + +byte_range_first(<< $-, C, R/bits >>, First) when ?IS_DIGIT(C) -> byte_range_last(R, First, C - $0); +byte_range_first(<< C, R/bits >>, First) when ?IS_DIGIT(C) -> byte_range_first(R, First * 10 + C - $0). + +byte_range_last(<<"/*">>, First, Last) -> {bytes, First, Last, '*'}; +byte_range_last(<< $/, C, R/bits >>, First, Last) when ?IS_DIGIT(C) -> byte_range_complete(R, First, Last, C - $0); +byte_range_last(<< C, R/bits >>, First, Last) when ?IS_DIGIT(C) -> byte_range_last(R, First, Last * 10 + C - $0). + +byte_range_complete(<<>>, First, Last, Complete) -> {bytes, First, Last, Complete}; +byte_range_complete(<< C, R/bits >>, First, Last, Complete) when ?IS_DIGIT(C) -> + byte_range_complete(R, First, Last, Complete * 10 + C - $0). + +unsatisfied_range(<<>>, Complete) -> {bytes, '*', Complete}; +unsatisfied_range(<< C, R/bits >>, Complete) when ?IS_DIGIT(C) -> unsatisfied_range(R, Complete * 10 + C - $0). + +other_content_range_unit(<< $\s, R/bits >>, Unit) -> other_content_range_resp(R, Unit, <<>>); +other_content_range_unit(<< C, R/bits >>, Unit) when ?IS_TOKEN(C) -> + ?LOWER(other_content_range_unit, R, Unit). + +other_content_range_resp(<<>>, Unit, Resp) -> {Unit, Resp}; +other_content_range_resp(<< C, R/bits >>, Unit, Resp) when ?IS_CHAR(C) -> other_content_range_resp(R, Unit, << Resp/binary, C >>). + +-ifdef(TEST). +content_range() -> + ?LET(ContentRange, + oneof([ + ?SUCHTHAT({bytes, First, Last, Complete}, + {bytes, non_neg_integer(), non_neg_integer(), non_neg_integer()}, + First =< Last andalso Last < Complete), + ?SUCHTHAT({bytes, First, Last, '*'}, + {bytes, non_neg_integer(), non_neg_integer(), '*'}, + First =< Last), + {bytes, '*', non_neg_integer()}, + {token(), ?LET(L, list(abnf_char()), list_to_binary(L))} + ]), + {case ContentRange of + {Unit, Resp} when is_binary(Unit) -> {?LOWER(Unit), Resp}; + _ -> ContentRange + end, case ContentRange of + {bytes, First, Last, '*'} -> + << "bytes ", (integer_to_binary(First))/binary, "-", + (integer_to_binary(Last))/binary, "/*">>; + {bytes, First, Last, Complete} -> + << "bytes ", (integer_to_binary(First))/binary, "-", + (integer_to_binary(Last))/binary, "/", (integer_to_binary(Complete))/binary >>; + {bytes, '*', Complete} -> + << "bytes */", (integer_to_binary(Complete))/binary >>; + {Unit, Resp} -> + << Unit/binary, $\s, Resp/binary >> + end}). + +prop_parse_content_range() -> + ?FORALL({Res, ContentRange}, + content_range(), + Res =:= parse_content_range(ContentRange)). + +parse_content_range_test_() -> + Tests = [ + {<<"bytes 21010-47021/47022">>, {bytes, 21010, 47021, 47022}}, + {<<"bytes 500-999/8000">>, {bytes, 500, 999, 8000}}, + {<<"bytes 7000-7999/8000">>, {bytes, 7000, 7999, 8000}}, + {<<"bytes 42-1233/1234">>, {bytes, 42, 1233, 1234}}, + {<<"bytes 42-1233/*">>, {bytes, 42, 1233, '*'}}, + {<<"bytes */1234">>, {bytes, '*', 1234}}, + {<<"bytes 0-499/1234">>, {bytes, 0, 499, 1234}}, + {<<"bytes 500-999/1234">>, {bytes, 500, 999, 1234}}, + {<<"bytes 500-1233/1234">>, {bytes, 500, 1233, 1234}}, + {<<"bytes 734-1233/1234">>, {bytes, 734, 1233, 1234}}, + {<<"bytes */47022">>, {bytes, '*', 47022}}, + {<<"exampleunit 1.2-4.3/25">>, {<<"exampleunit">>, <<"1.2-4.3/25">>}}, + {<<"exampleunit 11.2-14.3/25">>, {<<"exampleunit">>, <<"11.2-14.3/25">>}} + ], + [{V, fun() -> R = parse_content_range(V) end} || {V, R} <- Tests]. + +parse_content_range_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_content_range(V)) end} || V <- Tests]. + +horse_parse_content_range_bytes() -> + horse:repeat(200000, + parse_content_range(<<"bytes 21010-47021/47022">>) + ). + +horse_parse_content_range_other() -> + horse:repeat(200000, + parse_content_range(<<"exampleunit 11.2-14.3/25">>) + ). +-endif. + +%% Content-Type header. + +-spec parse_content_type(binary()) -> media_type(). +parse_content_type(<< C, R/bits >>) when ?IS_TOKEN(C) -> + ?LOWER(media_type, R, <<>>). + +media_type(<< $/, C, R/bits >>, T) when ?IS_TOKEN(C) -> + ?LOWER(media_subtype, R, T, <<>>); +media_type(<< C, R/bits >>, T) when ?IS_TOKEN(C) -> + ?LOWER(media_type, R, T). + +media_subtype(<< C, R/bits >>, T, S) when ?IS_TOKEN(C) -> + ?LOWER(media_subtype, R, T, S); +media_subtype(R, T, S) -> media_param_sep(R, T, S, []). + +media_param_sep(<<>>, T, S, P) -> {T, S, lists:reverse(P)}; +media_param_sep(<< $;, R/bits >>, T, S, P) -> media_before_param(R, T, S, P); +media_param_sep(<< C, R/bits >>, T, S, P) when ?IS_WS(C) -> media_param_sep(R, T, S, P). + +media_before_param(<< C, R/bits >>, T, S, P) when ?IS_WS(C)-> media_before_param(R, T, S, P); +media_before_param(<< "charset=", $", R/bits >>, T, S, P) -> media_charset_quoted(R, T, S, P, <<>>); +media_before_param(<< "charset=", R/bits >>, T, S, P) -> media_charset(R, T, S, P, <<>>); +media_before_param(<< C, R/bits >>, T, S, P) when ?IS_TOKEN(C) -> + ?LOWER(media_param, R, T, S, P, <<>>). + +media_charset_quoted(<< $", R/bits >>, T, S, P, V) -> + media_param_sep(R, T, S, [{<<"charset">>, V}|P]); +media_charset_quoted(<< $\\, C, R/bits >>, T, S, P, V) when ?IS_VCHAR_OBS(C) -> + ?LOWER(media_charset_quoted, R, T, S, P, V); +media_charset_quoted(<< C, R/bits >>, T, S, P, V) when ?IS_VCHAR_OBS(C) -> + ?LOWER(media_charset_quoted, R, T, S, P, V). + +media_charset(<< C, R/bits >>, T, S, P, V) when ?IS_TOKEN(C) -> + ?LOWER(media_charset, R, T, S, P, V); +media_charset(R, T, S, P, V) -> media_param_sep(R, T, S, [{<<"charset">>, V}|P]). + +media_param(<< $=, $", R/bits >>, T, S, P, K) -> media_quoted(R, T, S, P, K, <<>>); +media_param(<< $=, C, R/bits >>, T, S, P, K) when ?IS_TOKEN(C) -> media_value(R, T, S, P, K, << C >>); +media_param(<< C, R/bits >>, T, S, P, K) when ?IS_TOKEN(C) -> + ?LOWER(media_param, R, T, S, P, K). + +media_quoted(<< $", R/bits >>, T, S, P, K, V) -> media_param_sep(R, T, S, [{K, V}|P]); +media_quoted(<< $\\, C, R/bits >>, T, S, P, K, V) when ?IS_VCHAR_OBS(C) -> media_quoted(R, T, S, P, K, << V/binary, C >>); +media_quoted(<< C, R/bits >>, T, S, P, K, V) when ?IS_VCHAR_OBS(C) -> media_quoted(R, T, S, P, K, << V/binary, C >>). + +media_value(<< C, R/bits >>, T, S, P, K, V) when ?IS_TOKEN(C) -> media_value(R, T, S, P, K, << V/binary, C >>); +media_value(R, T, S, P, K, V) -> media_param_sep(R, T, S, [{K, V}|P]). + +-ifdef(TEST). +media_type_parameter() -> + frequency([ + {90, parameter()}, + {10, {<<"charset">>, oneof([token(), quoted_string()]), <<>>, <<>>}} + ]). + +media_type() -> + ?LET({T, S, P}, + {token(), token(), small_list(media_type_parameter())}, + {T, S, P, iolist_to_binary([T, $/, S, [[OWS1, $;, OWS2, K, $=, V] || {K, V, OWS1, OWS2} <- P]])} + ). + +prop_parse_content_type() -> + ?FORALL({T, S, P, MediaType}, + media_type(), + begin + {ResT, ResS, ResP} = parse_content_type(MediaType), + ExpectedP = [case ?LOWER(K) of + <<"charset">> -> {<<"charset">>, ?LOWER(unquote(V))}; + LowK -> {LowK, unquote(V)} + end || {K, V, _, _} <- P], + ResT =:= ?LOWER(T) + andalso ResS =:= ?LOWER(S) + andalso ResP =:= ExpectedP + end + ). + +parse_content_type_test_() -> + Tests = [ + {<<"text/html;charset=utf-8">>, + {<<"text">>, <<"html">>, [{<<"charset">>, <<"utf-8">>}]}}, + {<<"text/html;charset=UTF-8">>, + {<<"text">>, <<"html">>, [{<<"charset">>, <<"utf-8">>}]}}, + {<<"Text/HTML;Charset=\"utf-8\"">>, + {<<"text">>, <<"html">>, [{<<"charset">>, <<"utf-8">>}]}}, + {<<"text/html; charset=\"utf-8\"">>, + {<<"text">>, <<"html">>, [{<<"charset">>, <<"utf-8">>}]}}, + {<<"text/html; charset=ISO-8859-4">>, + {<<"text">>, <<"html">>, [{<<"charset">>, <<"iso-8859-4">>}]}}, + {<<"text/plain; charset=iso-8859-4">>, + {<<"text">>, <<"plain">>, [{<<"charset">>, <<"iso-8859-4">>}]}}, + {<<"multipart/form-data \t;Boundary=\"MultipartIsUgly\"">>, + {<<"multipart">>, <<"form-data">>, [ + {<<"boundary">>, <<"MultipartIsUgly">>} + ]}}, + {<<"foo/bar; one=FirstParam; two=SecondParam">>, + {<<"foo">>, <<"bar">>, [ + {<<"one">>, <<"FirstParam">>}, + {<<"two">>, <<"SecondParam">>} + ]}} + ], + [{V, fun() -> R = parse_content_type(V) end} || {V, R} <- Tests]. + +horse_parse_content_type() -> + horse:repeat(200000, + parse_content_type(<<"text/html;charset=utf-8">>) + ). +-endif. + +%% Cookie header. + +-spec parse_cookie(binary()) -> [{binary(), binary()}]. +parse_cookie(Cookie) -> + cow_cookie:parse_cookie(Cookie). + +%% Date header. + +-spec parse_date(binary()) -> calendar:datetime(). +parse_date(Date) -> + cow_date:parse_date(Date). + +-ifdef(TEST). +parse_date_test_() -> + Tests = [ + {<<"Tue, 15 Nov 1994 08:12:31 GMT">>, {{1994, 11, 15}, {8, 12, 31}}} + ], + [{V, fun() -> R = parse_date(V) end} || {V, R} <- Tests]. +-endif. + +%% ETag header. + +-spec parse_etag(binary()) -> etag(). +parse_etag(<< $W, $/, $", R/bits >>) -> + etag(R, weak, <<>>); +parse_etag(<< $", R/bits >>) -> + etag(R, strong, <<>>). + +etag(<< $" >>, Strength, Tag) -> + {Strength, Tag}; +etag(<< C, R/bits >>, Strength, Tag) when ?IS_ETAGC(C) -> + etag(R, Strength, << Tag/binary, C >>). + +-ifdef(TEST). +etagc() -> + ?SUCHTHAT(C, integer(16#21, 16#ff), C =/= 16#22 andalso C =/= 16#7f). + +etag() -> + ?LET({Strength, Tag}, + {elements([weak, strong]), list(etagc())}, + begin + TagBin = list_to_binary(Tag), + {{Strength, TagBin}, + case Strength of + weak -> << $W, $/, $", TagBin/binary, $" >>; + strong -> << $", TagBin/binary, $" >> + end} + end). + +prop_parse_etag() -> + ?FORALL({Tag, TagBin}, + etag(), + Tag =:= parse_etag(TagBin)). + +parse_etag_test_() -> + Tests = [ + {<<"\"xyzzy\"">>, {strong, <<"xyzzy">>}}, + {<<"W/\"xyzzy\"">>, {weak, <<"xyzzy">>}}, + {<<"\"\"">>, {strong, <<>>}} + ], + [{V, fun() -> R = parse_etag(V) end} || {V, R} <- Tests]. + +parse_etag_error_test_() -> + Tests = [ + <<>>, + <<"\"">>, + <<"W">>, + <<"W/">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_etag(V)) end} || V <- Tests]. + +horse_parse_etag() -> + horse:repeat(200000, + parse_etag(<<"W/\"xyzzy\"">>) + ). +-endif. + +%% Expect header. + +-spec parse_expect(binary()) -> continue. +parse_expect(<<"100-continue">>) -> + continue; +parse_expect(<<"100-", C, O, N, T, I, M, U, E >>) + when (C =:= $C) or (C =:= $c), (O =:= $O) or (O =:= $o), + (N =:= $N) or (N =:= $n), (T =:= $T) or (T =:= $t), + (I =:= $I) or (I =:= $i), (M =:= $N) or (M =:= $n), + (U =:= $U) or (U =:= $u), (E =:= $E) or (E =:= $e) -> + continue. + +-ifdef(TEST). +expect() -> + ?LET(E, + [$1, $0, $0, $-, + elements([$c, $C]), elements([$o, $O]), elements([$n, $N]), + elements([$t, $T]), elements([$i, $I]), elements([$n, $N]), + elements([$u, $U]), elements([$e, $E])], + list_to_binary(E)). + +prop_parse_expect() -> + ?FORALL(E, expect(), continue =:= parse_expect(E)). + +parse_expect_test_() -> + Tests = [ + <<"100-continue">>, + <<"100-CONTINUE">>, + <<"100-Continue">>, + <<"100-CoNtInUe">> + ], + [{V, fun() -> continue = parse_expect(V) end} || V <- Tests]. + +parse_expect_error_test_() -> + Tests = [ + <<>>, + <<" ">>, + <<"200-OK">>, + <<"Cookies">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_expect(V)) end} || V <- Tests]. + +horse_parse_expect() -> + horse:repeat(200000, + parse_expect(<<"100-continue">>) + ). +-endif. + +%% Expires header. +%% +%% Recipients must interpret invalid date formats as a date +%% in the past. The value "0" is commonly used. + +-spec parse_expires(binary()) -> calendar:datetime(). +parse_expires(<<"0">>) -> + {{1, 1, 1}, {0, 0, 0}}; +parse_expires(Expires) -> + try + cow_date:parse_date(Expires) + catch _:_ -> + {{1, 1, 1}, {0, 0, 0}} + end. + +-ifdef(TEST). +parse_expires_test_() -> + Tests = [ + {<<"0">>, {{1, 1, 1}, {0, 0, 0}}}, + {<<"Thu, 01 Dec 1994 nope invalid">>, {{1, 1, 1}, {0, 0, 0}}}, + {<<"Thu, 01 Dec 1994 16:00:00 GMT">>, {{1994, 12, 1}, {16, 0, 0}}} + ], + [{V, fun() -> R = parse_expires(V) end} || {V, R} <- Tests]. + +horse_parse_expires_0() -> + horse:repeat(200000, + parse_expires(<<"0">>) + ). + +horse_parse_expires_invalid() -> + horse:repeat(200000, + parse_expires(<<"Thu, 01 Dec 1994 nope invalid">>) + ). +-endif. + +%% Host header. +%% +%% We only seek to have legal characters and separate the +%% host and port values. The number of segments in the host +%% or the size of each segment is not checked. +%% +%% There is no way to distinguish IPv4 addresses from regular +%% names until the last segment is reached therefore we do not +%% differentiate them. +%% +%% The following valid hosts are currently rejected: IPv6 +%% addresses with a zone identifier; IPvFuture addresses; +%% and percent-encoded addresses. + +-spec parse_host(binary()) -> {binary(), 0..65535 | undefined}. +parse_host(<< $[, R/bits >>) -> + ipv6_address(R, << $[ >>); +parse_host(Host) -> + reg_name(Host, <<>>). + +ipv6_address(<< $] >>, IP) -> {<< IP/binary, $] >>, undefined}; +ipv6_address(<< $], $:, Port/bits >>, IP) -> {<< IP/binary, $] >>, binary_to_integer(Port)}; +ipv6_address(<< C, R/bits >>, IP) when ?IS_HEX(C) or (C =:= $:) or (C =:= $.) -> + ?LOWER(ipv6_address, R, IP). + +reg_name(<<>>, Name) -> {Name, undefined}; +reg_name(<< $:, Port/bits >>, Name) -> {Name, binary_to_integer(Port)}; +reg_name(<< C, R/bits >>, Name) when ?IS_URI_UNRESERVED(C) or ?IS_URI_SUB_DELIMS(C) -> + ?LOWER(reg_name, R, Name). + +-ifdef(TEST). +host_chars() -> "!$&'()*+,-.0123456789;=ABCDEFGHIJKLMNOPQRSTUVWXYZ_abcdefghijklmnopqrstuvwxyz~". +host() -> vector(1, 255, elements(host_chars())). + +host_port() -> + ?LET({Host, Port}, + {host(), oneof([undefined, integer(1, 65535)])}, + begin + HostBin = list_to_binary(Host), + {{?LOWER(HostBin), Port}, + case Port of + undefined -> HostBin; + _ -> << HostBin/binary, $:, (integer_to_binary(Port))/binary >> + end} + end). + +prop_parse_host() -> + ?FORALL({Res, Host}, host_port(), Res =:= parse_host(Host)). + +parse_host_test_() -> + Tests = [ + {<<>>, {<<>>, undefined}}, + {<<"www.example.org:8080">>, {<<"www.example.org">>, 8080}}, + {<<"www.example.org">>, {<<"www.example.org">>, undefined}}, + {<<"192.0.2.1:8080">>, {<<"192.0.2.1">>, 8080}}, + {<<"192.0.2.1">>, {<<"192.0.2.1">>, undefined}}, + {<<"[2001:db8::1]:8080">>, {<<"[2001:db8::1]">>, 8080}}, + {<<"[2001:db8::1]">>, {<<"[2001:db8::1]">>, undefined}}, + {<<"[::ffff:192.0.2.1]:8080">>, {<<"[::ffff:192.0.2.1]">>, 8080}}, + {<<"[::ffff:192.0.2.1]">>, {<<"[::ffff:192.0.2.1]">>, undefined}} + ], + [{V, fun() -> R = parse_host(V) end} || {V, R} <- Tests]. + +horse_parse_host_blue_example_org() -> + horse:repeat(200000, + parse_host(<<"blue.example.org:8080">>) + ). + +horse_parse_host_ipv4() -> + horse:repeat(200000, + parse_host(<<"192.0.2.1:8080">>) + ). + +horse_parse_host_ipv6() -> + horse:repeat(200000, + parse_host(<<"[2001:db8::1]:8080">>) + ). + +horse_parse_host_ipv6_v4() -> + horse:repeat(200000, + parse_host(<<"[::ffff:192.0.2.1]:8080">>) + ). +-endif. + +%% HTTP2-Settings header. + +-spec parse_http2_settings(binary()) -> map(). +parse_http2_settings(HTTP2Settings) -> + cow_http2:parse_settings_payload(base64:decode(HTTP2Settings)). + +%% If-Match header. + +-spec parse_if_match(binary()) -> '*' | [etag()]. +parse_if_match(<<"*">>) -> + '*'; +parse_if_match(IfMatch) -> + nonempty(etag_list(IfMatch, [])). + +etag_list(<<>>, Acc) -> lists:reverse(Acc); +etag_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> etag_list(R, Acc); +etag_list(<< $W, $/, $", R/bits >>, Acc) -> etag(R, Acc, weak, <<>>); +etag_list(<< $", R/bits >>, Acc) -> etag(R, Acc, strong, <<>>). + +etag(<< $", R/bits >>, Acc, Strength, Tag) -> etag_list_sep(R, [{Strength, Tag}|Acc]); +etag(<< C, R/bits >>, Acc, Strength, Tag) when ?IS_ETAGC(C) -> etag(R, Acc, Strength, << Tag/binary, C >>). + +etag_list_sep(<<>>, Acc) -> lists:reverse(Acc); +etag_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> etag_list_sep(R, Acc); +etag_list_sep(<< $,, R/bits >>, Acc) -> etag_list(R, Acc). + +-ifdef(TEST). +prop_parse_if_match() -> + ?FORALL(L, + non_empty(list(etag())), + begin + << _, IfMatch/binary >> = iolist_to_binary([[$,, T] || {_, T} <- L]), + ResL = parse_if_match(IfMatch), + CheckedL = [T =:= ResT || {{T, _}, ResT} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_if_match_test_() -> + Tests = [ + {<<"\"xyzzy\"">>, [{strong, <<"xyzzy">>}]}, + {<<"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\"">>, + [{strong, <<"xyzzy">>}, {strong, <<"r2d2xxxx">>}, {strong, <<"c3piozzzz">>}]}, + {<<"*">>, '*'} + ], + [{V, fun() -> R = parse_if_match(V) end} || {V, R} <- Tests]. + +parse_if_match_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_if_match(V)) end} || V <- Tests]. + +horse_parse_if_match() -> + horse:repeat(200000, + parse_if_match(<<"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\"">>) + ). +-endif. + +%% If-Modified-Since header. + +-spec parse_if_modified_since(binary()) -> calendar:datetime(). +parse_if_modified_since(IfModifiedSince) -> + cow_date:parse_date(IfModifiedSince). + +-ifdef(TEST). +parse_if_modified_since_test_() -> + Tests = [ + {<<"Sat, 29 Oct 1994 19:43:31 GMT">>, {{1994, 10, 29}, {19, 43, 31}}} + ], + [{V, fun() -> R = parse_if_modified_since(V) end} || {V, R} <- Tests]. +-endif. + +%% If-None-Match header. + +-spec parse_if_none_match(binary()) -> '*' | [etag()]. +parse_if_none_match(<<"*">>) -> + '*'; +parse_if_none_match(IfNoneMatch) -> + nonempty(etag_list(IfNoneMatch, [])). + +-ifdef(TEST). +parse_if_none_match_test_() -> + Tests = [ + {<<"\"xyzzy\"">>, [{strong, <<"xyzzy">>}]}, + {<<"W/\"xyzzy\"">>, [{weak, <<"xyzzy">>}]}, + {<<"\"xyzzy\", \"r2d2xxxx\", \"c3piozzzz\"">>, + [{strong, <<"xyzzy">>}, {strong, <<"r2d2xxxx">>}, {strong, <<"c3piozzzz">>}]}, + {<<"W/\"xyzzy\", W/\"r2d2xxxx\", W/\"c3piozzzz\"">>, + [{weak, <<"xyzzy">>}, {weak, <<"r2d2xxxx">>}, {weak, <<"c3piozzzz">>}]}, + {<<"*">>, '*'} + ], + [{V, fun() -> R = parse_if_none_match(V) end} || {V, R} <- Tests]. + +parse_if_none_match_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_if_none_match(V)) end} || V <- Tests]. + +horse_parse_if_none_match() -> + horse:repeat(200000, + parse_if_none_match(<<"W/\"xyzzy\", W/\"r2d2xxxx\", W/\"c3piozzzz\"">>) + ). +-endif. + +%% If-Range header. + +-spec parse_if_range(binary()) -> etag() | calendar:datetime(). +parse_if_range(<< $W, $/, $", R/bits >>) -> + etag(R, weak, <<>>); +parse_if_range(<< $", R/bits >>) -> + etag(R, strong, <<>>); +parse_if_range(IfRange) -> + cow_date:parse_date(IfRange). + +-ifdef(TEST). +parse_if_range_test_() -> + Tests = [ + {<<"W/\"xyzzy\"">>, {weak, <<"xyzzy">>}}, + {<<"\"xyzzy\"">>, {strong, <<"xyzzy">>}}, + {<<"Sat, 29 Oct 1994 19:43:31 GMT">>, {{1994, 10, 29}, {19, 43, 31}}} + ], + [{V, fun() -> R = parse_if_range(V) end} || {V, R} <- Tests]. + +parse_if_range_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_if_range(V)) end} || V <- Tests]. + +horse_parse_if_range_etag() -> + horse:repeat(200000, + parse_if_range(<<"\"xyzzy\"">>) + ). + +horse_parse_if_range_date() -> + horse:repeat(200000, + parse_if_range(<<"Sat, 29 Oct 1994 19:43:31 GMT">>) + ). +-endif. + +%% If-Unmodified-Since header. + +-spec parse_if_unmodified_since(binary()) -> calendar:datetime(). +parse_if_unmodified_since(IfModifiedSince) -> + cow_date:parse_date(IfModifiedSince). + +-ifdef(TEST). +parse_if_unmodified_since_test_() -> + Tests = [ + {<<"Sat, 29 Oct 1994 19:43:31 GMT">>, {{1994, 10, 29}, {19, 43, 31}}} + ], + [{V, fun() -> R = parse_if_unmodified_since(V) end} || {V, R} <- Tests]. +-endif. + +%% Last-Modified header. + +-spec parse_last_modified(binary()) -> calendar:datetime(). +parse_last_modified(LastModified) -> + cow_date:parse_date(LastModified). + +-ifdef(TEST). +parse_last_modified_test_() -> + Tests = [ + {<<"Tue, 15 Nov 1994 12:45:26 GMT">>, {{1994, 11, 15}, {12, 45, 26}}} + ], + [{V, fun() -> R = parse_last_modified(V) end} || {V, R} <- Tests]. +-endif. + +%% Link header. + +-spec parse_link(binary()) -> [cow_link:link()]. +parse_link(Link) -> + cow_link:parse_link(Link). + +%% Max-Forwards header. + +-spec parse_max_forwards(binary()) -> non_neg_integer(). +parse_max_forwards(MaxForwards) -> + I = binary_to_integer(MaxForwards), + true = I >= 0, + I. + +-ifdef(TEST). +prop_parse_max_forwards() -> + ?FORALL( + X, + non_neg_integer(), + X =:= parse_max_forwards(integer_to_binary(X)) + ). + +parse_max_forwards_test_() -> + Tests = [ + {<<"0">>, 0}, + {<<"42">>, 42}, + {<<"69">>, 69}, + {<<"1337">>, 1337}, + {<<"1234567890">>, 1234567890} + ], + [{V, fun() -> R = parse_max_forwards(V) end} || {V, R} <- Tests]. + +parse_max_forwards_error_test_() -> + Tests = [ + <<>>, + <<"123, 123">>, + <<"4.17">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_max_forwards(V)) end} || V <- Tests]. +-endif. + +%% Origin header. + +%% According to the RFC6454 we should generate +%% a fresh globally unique identifier and return that value if: +%% - URI does not use a hierarchical element as a naming authority +%% or the URI is not an absolute URI +%% - the implementation doesn't support the protocol given by uri-scheme +%% Thus, erlang reference represents a GUID here. +%% +%% We only seek to have legal characters and separate the +%% host and port values. The number of segments in the host +%% or the size of each segment is not checked. +%% +%% There is no way to distinguish IPv4 addresses from regular +%% names until the last segment is reached therefore we do not +%% differentiate them. +%% +%% @todo The following valid hosts are currently rejected: IPv6 +%% addresses with a zone identifier; IPvFuture addresses; +%% and percent-encoded addresses. + +-spec parse_origin(binary()) -> [{binary(), binary(), 0..65535} | reference()]. +parse_origin(Origins) -> + nonempty(origin_scheme(Origins, [])). + +origin_scheme(<<>>, Acc) -> Acc; +origin_scheme(<< "http://", R/bits >>, Acc) -> origin_host(R, Acc, <<"http">>); +origin_scheme(<< "https://", R/bits >>, Acc) -> origin_host(R, Acc, <<"https">>); +origin_scheme(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> origin_scheme(next_origin(R), [make_ref()|Acc]). + +origin_host(<< $[, R/bits >>, Acc, Scheme) -> origin_ipv6_address(R, Acc, Scheme, << $[ >>); +origin_host(Host, Acc, Scheme) -> origin_reg_name(Host, Acc, Scheme, <<>>). + +origin_ipv6_address(<< $] >>, Acc, Scheme, IP) -> + lists:reverse([{Scheme, << IP/binary, $] >>, default_port(Scheme)}|Acc]); +origin_ipv6_address(<< $], $\s, R/bits >>, Acc, Scheme, IP) -> + origin_scheme(R, [{Scheme, << IP/binary, $] >>, default_port(Scheme)}|Acc]); +origin_ipv6_address(<< $], $:, Port/bits >>, Acc, Scheme, IP) -> + origin_port(Port, Acc, Scheme, << IP/binary, $] >>, <<>>); +origin_ipv6_address(<< C, R/bits >>, Acc, Scheme, IP) when ?IS_HEX(C) or (C =:= $:) or (C =:= $.) -> + ?LOWER(origin_ipv6_address, R, Acc, Scheme, IP). + +origin_reg_name(<<>>, Acc, Scheme, Name) -> + lists:reverse([{Scheme, Name, default_port(Scheme)}|Acc]); +origin_reg_name(<< $\s, R/bits >>, Acc, Scheme, Name) -> + origin_scheme(R, [{Scheme, Name, default_port(Scheme)}|Acc]); +origin_reg_name(<< $:, Port/bits >>, Acc, Scheme, Name) -> + origin_port(Port, Acc, Scheme, Name, <<>>); +origin_reg_name(<< C, R/bits >>, Acc, Scheme, Name) when ?IS_URI_UNRESERVED(C) or ?IS_URI_SUB_DELIMS(C) -> + ?LOWER(origin_reg_name, R, Acc, Scheme, Name). + +origin_port(<<>>, Acc, Scheme, Host, Port) -> + lists:reverse([{Scheme, Host, binary_to_integer(Port)}|Acc]); +origin_port(<< $\s, R/bits >>, Acc, Scheme, Host, Port) -> + origin_scheme(R, [{Scheme, Host, binary_to_integer(Port)}|Acc]); +origin_port(<< C, R/bits >>, Acc, Scheme, Host, Port) when ?IS_DIGIT(C) -> + origin_port(R, Acc, Scheme, Host, << Port/binary, C >>). + +next_origin(<<>>) -> <<>>; +next_origin(<< $\s, C, R/bits >>) when ?IS_TOKEN(C) -> << C, R/bits >>; +next_origin(<< C, R/bits >>) when ?IS_TOKEN(C) or (C =:= $:) or (C =:= $/) -> next_origin(R). + +default_port(<< "http" >>) -> 80; +default_port(<< "https" >>) -> 443. + +-ifdef(TEST). +scheme() -> oneof([<<"http">>, <<"https">>]). + +scheme_host_port() -> + ?LET({Scheme, Host, Port}, + {scheme(), host(), integer(1, 65535)}, + begin + HostBin = list_to_binary(Host), + {[{Scheme, ?LOWER(HostBin), Port}], + case default_port(Scheme) of + Port -> << Scheme/binary, "://", HostBin/binary>>; + _ -> << Scheme/binary, "://", HostBin/binary, $:, (integer_to_binary(Port))/binary >> + end} + end). + +prop_parse_origin() -> + ?FORALL({Res, Origin}, scheme_host_port(), Res =:= parse_origin(Origin)). + +parse_origin_test_() -> + Tests = [ + {<<"http://www.example.org:8080">>, [{<<"http">>, <<"www.example.org">>, 8080}]}, + {<<"http://www.example.org">>, [{<<"http">>, <<"www.example.org">>, 80}]}, + {<<"http://192.0.2.1:8080">>, [{<<"http">>, <<"192.0.2.1">>, 8080}]}, + {<<"http://192.0.2.1">>, [{<<"http">>, <<"192.0.2.1">>, 80}]}, + {<<"http://[2001:db8::1]:8080">>, [{<<"http">>, <<"[2001:db8::1]">>, 8080}]}, + {<<"http://[2001:db8::1]">>, [{<<"http">>, <<"[2001:db8::1]">>, 80}]}, + {<<"http://[::ffff:192.0.2.1]:8080">>, [{<<"http">>, <<"[::ffff:192.0.2.1]">>, 8080}]}, + {<<"http://[::ffff:192.0.2.1]">>, [{<<"http">>, <<"[::ffff:192.0.2.1]">>, 80}]}, + {<<"http://example.org https://blue.example.com:8080">>, + [{<<"http">>, <<"example.org">>, 80}, + {<<"https">>, <<"blue.example.com">>, 8080}]} + ], + [{V, fun() -> R = parse_origin(V) end} || {V, R} <- Tests]. + +parse_origin_reference_test_() -> + Tests = [ + <<"null">>, + <<"httpx://example.org:80">>, + <<"httpx://example.org:80 null">>, + <<"null null">> + ], + [{V, fun() -> [true = is_reference(Ref) || Ref <- parse_origin(V)] end} || V <- Tests]. + +parse_origin_error_test_() -> + Tests = [ + <<>>, + <<"null", $\t, "null">>, + <<"null", $\s, $\s, "null">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_origin(V)) end} || V <- Tests]. + +horse_parse_origin_blue_example_org() -> + horse:repeat(200000, + parse_origin(<<"http://blue.example.org:8080">>) + ). + +horse_parse_origin_ipv4() -> + horse:repeat(200000, + parse_origin(<<"http://192.0.2.1:8080">>) + ). + +horse_parse_origin_ipv6() -> + horse:repeat(200000, + parse_origin(<<"http://[2001:db8::1]:8080">>) + ). + +horse_parse_origin_ipv6_v4() -> + horse:repeat(200000, + parse_origin(<<"http://[::ffff:192.0.2.1]:8080">>) + ). + +horse_parse_origin_null() -> + horse:repeat(200000, + parse_origin(<<"null">>) + ). +-endif. + +%% Pragma header. +%% +%% Legacy header kept for backward compatibility with HTTP/1.0 caches. +%% Only the "no-cache" directive was ever specified, and only for +%% request messages. +%% +%% We take a large shortcut in the parsing of this header, expecting +%% an exact match of "no-cache". + +-spec parse_pragma(binary()) -> cache | no_cache. +parse_pragma(<<"no-cache">>) -> no_cache; +parse_pragma(_) -> cache. + +%% Proxy-Authenticate header. +%% +%% Alias of parse_www_authenticate/1 due to identical syntax. + +-spec parse_proxy_authenticate(binary()) -> [{basic, binary()} + | {bearer | digest | binary(), [{binary(), binary()}]}]. +parse_proxy_authenticate(ProxyAuthenticate) -> + parse_www_authenticate(ProxyAuthenticate). + +%% Proxy-Authorization header. +%% +%% Alias of parse_authorization/1 due to identical syntax. + +-spec parse_proxy_authorization(binary()) + -> {basic, binary(), binary()} + | {bearer, binary()} + | {digest, [{binary(), binary()}]}. +parse_proxy_authorization(ProxyAuthorization) -> + parse_authorization(ProxyAuthorization). + +%% Range header. + +-spec parse_range(binary()) + -> {bytes, [{non_neg_integer(), non_neg_integer() | infinity} | neg_integer()]} + | {binary(), binary()}. +parse_range(<<"bytes=", R/bits >>) -> + bytes_range_set(R, []); +parse_range(<< C, R/bits >>) when ?IS_TOKEN(C) -> + ?LOWER(other_range_unit, R, <<>>). + +bytes_range_set(<<>>, Acc) -> {bytes, lists:reverse(Acc)}; +bytes_range_set(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> bytes_range_set(R, Acc); +bytes_range_set(<< $-, C, R/bits >>, Acc) when ?IS_DIGIT(C) -> bytes_range_suffix_spec(R, Acc, C - $0); +bytes_range_set(<< C, R/bits >>, Acc) when ?IS_DIGIT(C) -> bytes_range_spec(R, Acc, C - $0). + +bytes_range_spec(<< $-, C, R/bits >>, Acc, First) when ?IS_DIGIT(C) -> bytes_range_spec_last(R, Acc, First, C - $0); +bytes_range_spec(<< $-, R/bits >>, Acc, First) -> bytes_range_set_sep(R, [{First, infinity}|Acc]); +bytes_range_spec(<< C, R/bits >>, Acc, First) when ?IS_DIGIT(C) -> bytes_range_spec(R, Acc, First * 10 + C - $0). + +bytes_range_spec_last(<< C, R/bits >>, Acc, First, Last) when ?IS_DIGIT(C) -> bytes_range_spec_last(R, Acc, First, Last * 10 + C - $0); +bytes_range_spec_last(R, Acc, First, Last) -> bytes_range_set_sep(R, [{First, Last}|Acc]). + +bytes_range_suffix_spec(<< C, R/bits >>, Acc, Suffix) when ?IS_DIGIT(C) -> bytes_range_suffix_spec(R, Acc, Suffix * 10 + C - $0); +bytes_range_suffix_spec(R, Acc, Suffix) -> bytes_range_set_sep(R, [-Suffix|Acc]). + +bytes_range_set_sep(<<>>, Acc) -> {bytes, lists:reverse(Acc)}; +bytes_range_set_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> bytes_range_set_sep(R, Acc); +bytes_range_set_sep(<< $,, R/bits >>, Acc) -> bytes_range_set(R, Acc). + +other_range_unit(<< $=, C, R/bits >>, U) when ?IS_VCHAR(C) -> + other_range_set(R, U, << C >>); +other_range_unit(<< C, R/bits >>, U) when ?IS_TOKEN(C) -> + ?LOWER(other_range_unit, R, U). + +other_range_set(<<>>, U, S) -> + {U, S}; +other_range_set(<< C, R/bits >>, U, S) when ?IS_VCHAR(C) -> + other_range_set(R, U, << S/binary, C >>). + +-ifdef(TEST). +bytes_range() -> + ?LET(BytesSet, + non_empty(list(oneof([ + ?SUCHTHAT({First, Last}, {pos_integer(), pos_integer()}, First =< Last), + {pos_integer(), infinity}, + ?LET(I, pos_integer(), -I) + ]))), + {{bytes, BytesSet}, begin + << _, Set/bits >> = iolist_to_binary([ + case Spec of + {First, infinity} -> [$,, integer_to_binary(First), $-]; + {First, Last} -> [$,, integer_to_binary(First), $-, integer_to_binary(Last)]; + Suffix -> [$,, integer_to_binary(Suffix)] + end || Spec <- BytesSet]), + <<"bytes=", Set/binary >> + end}). + +other_range() -> + ?LET(Range = {Unit, Set}, + {token(), ?LET(L, non_empty(list(vchar())), list_to_binary(L))}, + {Range, << Unit/binary, $=, Set/binary >>}). + +range() -> + oneof([ + bytes_range(), + other_range() + ]). + +prop_parse_range() -> + ?FORALL({Range, RangeBin}, + range(), + begin + Range2 = case Range of + {bytes, _} -> Range; + {Unit, Set} -> {?LOWER(Unit), Set} + end, + Range2 =:= parse_range(RangeBin) + end). + +parse_range_test_() -> + Tests = [ + {<<"bytes=0-499">>, {bytes, [{0, 499}]}}, + {<<"bytes=500-999">>, {bytes, [{500, 999}]}}, + {<<"bytes=-500">>, {bytes, [-500]}}, + {<<"bytes=9500-">>, {bytes, [{9500, infinity}]}}, + {<<"bytes=0-0,-1">>, {bytes, [{0, 0}, -1]}}, + {<<"bytes=500-600,601-999">>, {bytes, [{500, 600}, {601, 999}]}}, + {<<"bytes=500-700,601-999">>, {bytes, [{500, 700}, {601, 999}]}}, + {<<"books=I-III,V-IX">>, {<<"books">>, <<"I-III,V-IX">>}} + ], + [{V, fun() -> R = parse_range(V) end} || {V, R} <- Tests]. + +parse_range_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_range(V)) end} || V <- Tests]. + +horse_parse_range_first_last() -> + horse:repeat(200000, + parse_range(<<"bytes=500-999">>) + ). + +horse_parse_range_infinity() -> + horse:repeat(200000, + parse_range(<<"bytes=9500-">>) + ). + +horse_parse_range_suffix() -> + horse:repeat(200000, + parse_range(<<"bytes=-500">>) + ). + +horse_parse_range_two() -> + horse:repeat(200000, + parse_range(<<"bytes=500-700,601-999">>) + ). + +horse_parse_range_other() -> + horse:repeat(200000, + parse_range(<<"books=I-III,V-IX">>) + ). +-endif. + +%% Retry-After header. + +-spec parse_retry_after(binary()) -> non_neg_integer() | calendar:datetime(). +parse_retry_after(RetryAfter = << D, _/bits >>) when ?IS_DIGIT(D) -> + I = binary_to_integer(RetryAfter), + true = I >= 0, + I; +parse_retry_after(RetryAfter) -> + cow_date:parse_date(RetryAfter). + +-ifdef(TEST). +parse_retry_after_test_() -> + Tests = [ + {<<"Fri, 31 Dec 1999 23:59:59 GMT">>, {{1999, 12, 31}, {23, 59, 59}}}, + {<<"120">>, 120} + ], + [{V, fun() -> R = parse_retry_after(V) end} || {V, R} <- Tests]. + +parse_retry_after_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_retry_after(V)) end} || V <- Tests]. + +horse_parse_retry_after_date() -> + horse:repeat(200000, + parse_retry_after(<<"Fri, 31 Dec 1999 23:59:59 GMT">>) + ). + +horse_parse_retry_after_delay_seconds() -> + horse:repeat(200000, + parse_retry_after(<<"120">>) + ). +-endif. + +%% Sec-WebSocket-Accept header. +%% +%% The argument is returned without any processing. This value is +%% expected to be matched directly by the client so no parsing is +%% needed. + +-spec parse_sec_websocket_accept(binary()) -> binary(). +parse_sec_websocket_accept(SecWebSocketAccept) -> + SecWebSocketAccept. + +%% Sec-WebSocket-Extensions header. + +-spec parse_sec_websocket_extensions(binary()) -> [{binary(), [binary() | {binary(), binary()}]}]. +parse_sec_websocket_extensions(SecWebSocketExtensions) -> + nonempty(ws_extension_list(SecWebSocketExtensions, [])). + +ws_extension_list(<<>>, Acc) -> lists:reverse(Acc); +ws_extension_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> ws_extension_list(R, Acc); +ws_extension_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> ws_extension(R, Acc, << C >>). + +ws_extension(<< C, R/bits >>, Acc, E) when ?IS_TOKEN(C) -> ws_extension(R, Acc, << E/binary, C >>); +ws_extension(R, Acc, E) -> ws_extension_param_sep(R, Acc, E, []). + +ws_extension_param_sep(<<>>, Acc, E, P) -> lists:reverse([{E, lists:reverse(P)}|Acc]); +ws_extension_param_sep(<< $,, R/bits >>, Acc, E, P) -> ws_extension_list(R, [{E, lists:reverse(P)}|Acc]); +ws_extension_param_sep(<< $;, R/bits >>, Acc, E, P) -> ws_extension_before_param(R, Acc, E, P); +ws_extension_param_sep(<< C, R/bits >>, Acc, E, P) when ?IS_WS(C) -> ws_extension_param_sep(R, Acc, E, P). + +ws_extension_before_param(<< C, R/bits >>, Acc, E, P) when ?IS_WS(C) -> ws_extension_before_param(R, Acc, E, P); +ws_extension_before_param(<< C, R/bits >>, Acc, E, P) when ?IS_TOKEN(C) -> ws_extension_param(R, Acc, E, P, << C >>). + +ws_extension_param(<< $=, $", R/bits >>, Acc, E, P, K) -> ws_extension_quoted(R, Acc, E, P, K, <<>>); +ws_extension_param(<< $=, C, R/bits >>, Acc, E, P, K) when ?IS_TOKEN(C) -> ws_extension_value(R, Acc, E, P, K, << C >>); +ws_extension_param(<< C, R/bits >>, Acc, E, P, K) when ?IS_TOKEN(C) -> ws_extension_param(R, Acc, E, P, << K/binary, C >>); +ws_extension_param(R, Acc, E, P, K) -> ws_extension_param_sep(R, Acc, E, [K|P]). + +ws_extension_quoted(<< $", R/bits >>, Acc, E, P, K, V) -> ws_extension_param_sep(R, Acc, E, [{K, V}|P]); +ws_extension_quoted(<< $\\, C, R/bits >>, Acc, E, P, K, V) when ?IS_TOKEN(C) -> ws_extension_quoted(R, Acc, E, P, K, << V/binary, C >>); +ws_extension_quoted(<< C, R/bits >>, Acc, E, P, K, V) when ?IS_TOKEN(C) -> ws_extension_quoted(R, Acc, E, P, K, << V/binary, C >>). + +ws_extension_value(<< C, R/bits >>, Acc, E, P, K, V) when ?IS_TOKEN(C) -> ws_extension_value(R, Acc, E, P, K, << V/binary, C >>); +ws_extension_value(R, Acc, E, P, K, V) -> ws_extension_param_sep(R, Acc, E, [{K, V}|P]). + +-ifdef(TEST). +quoted_token() -> + ?LET(T, + non_empty(list(frequency([ + {99, tchar()}, + {1, [$\\, tchar()]} + ]))), + [$", T, $"]). + +ws_extension() -> + ?LET({E, PL}, + {token(), small_list({ows(), ows(), oneof([token(), {token(), oneof([token(), quoted_token()])}])})}, + {E, PL, iolist_to_binary([E, + [case P of + {OWS1, OWS2, {K, V}} -> [OWS1, $;, OWS2, K, $=, V]; + {OWS1, OWS2, K} -> [OWS1, $;, OWS2, K] + end || P <- PL] + ])}). + +prop_parse_sec_websocket_extensions() -> + ?FORALL(L, + vector(1, 50, ws_extension()), + begin + << _, SecWebsocketExtensions/binary >> = iolist_to_binary([[$,, E] || {_, _, E} <- L]), + ResL = parse_sec_websocket_extensions(SecWebsocketExtensions), + CheckedL = [begin + ExpectedPL = [case P of + {_, _, {K, V}} -> {K, unquote(V)}; + {_, _, K} -> K + end || P <- PL], + E =:= ResE andalso ExpectedPL =:= ResPL + end || {{E, PL, _}, {ResE, ResPL}} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_sec_websocket_extensions_test_() -> + Tests = [ + {<<"foo">>, [{<<"foo">>, []}]}, + {<<"bar; baz=2">>, [{<<"bar">>, [{<<"baz">>, <<"2">>}]}]}, + {<<"foo, bar; baz=2">>, [{<<"foo">>, []}, {<<"bar">>, [{<<"baz">>, <<"2">>}]}]}, + {<<"deflate-stream">>, [{<<"deflate-stream">>, []}]}, + {<<"mux; max-channels=4; flow-control, deflate-stream">>, + [{<<"mux">>, [{<<"max-channels">>, <<"4">>}, <<"flow-control">>]}, {<<"deflate-stream">>, []}]}, + {<<"private-extension">>, [{<<"private-extension">>, []}]} + ], + [{V, fun() -> R = parse_sec_websocket_extensions(V) end} || {V, R} <- Tests]. + +parse_sec_websocket_extensions_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_sec_websocket_extensions(V)) end} + || V <- Tests]. + +horse_parse_sec_websocket_extensions() -> + horse:repeat(200000, + parse_sec_websocket_extensions(<<"mux; max-channels=4; flow-control, deflate-stream">>) + ). +-endif. + +%% Sec-WebSocket-Key header. +%% +%% The argument is returned without any processing. This value is +%% expected to be prepended to a static value, the result of which +%% hashed to form a new base64 value returned in Sec-WebSocket-Accept, +%% therefore no parsing is needed. + +-spec parse_sec_websocket_key(binary()) -> binary(). +parse_sec_websocket_key(SecWebSocketKey) -> + SecWebSocketKey. + +%% Sec-WebSocket-Protocol request header. + +-spec parse_sec_websocket_protocol_req(binary()) -> [binary()]. +parse_sec_websocket_protocol_req(SecWebSocketProtocol) -> + nonempty(token_list(SecWebSocketProtocol, [])). + +-ifdef(TEST). +parse_sec_websocket_protocol_req_test_() -> + Tests = [ + {<<"chat, superchat">>, [<<"chat">>, <<"superchat">>]}, + {<<"Chat, SuperChat">>, [<<"Chat">>, <<"SuperChat">>]} + ], + [{V, fun() -> R = parse_sec_websocket_protocol_req(V) end} || {V, R} <- Tests]. + +parse_sec_websocket_protocol_req_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_sec_websocket_protocol_req(V)) end} + || V <- Tests]. + +horse_parse_sec_websocket_protocol_req() -> + horse:repeat(200000, + parse_sec_websocket_protocol_req(<<"chat, superchat">>) + ). +-endif. + +%% Sec-Websocket-Protocol response header. + +-spec parse_sec_websocket_protocol_resp(binary()) -> binary(). +parse_sec_websocket_protocol_resp(Protocol) -> + true = <<>> =/= Protocol, + ok = validate_token(Protocol), + Protocol. + +-ifdef(TEST). +prop_parse_sec_websocket_protocol_resp() -> + ?FORALL(T, + token(), + T =:= parse_sec_websocket_protocol_resp(T)). + +parse_sec_websocket_protocol_resp_test_() -> + Tests = [ + {<<"chat">>, <<"chat">>}, + {<<"CHAT">>, <<"CHAT">>} + ], + [{V, fun() -> R = parse_sec_websocket_protocol_resp(V) end} || {V, R} <- Tests]. + +parse_sec_websocket_protocol_resp_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_sec_websocket_protocol_resp(V)) end} + || V <- Tests]. + +horse_parse_sec_websocket_protocol_resp() -> + horse:repeat(200000, + parse_sec_websocket_protocol_resp(<<"chat">>) + ). +-endif. + +%% Sec-WebSocket-Version request header. + +-spec parse_sec_websocket_version_req(binary()) -> websocket_version(). +parse_sec_websocket_version_req(SecWebSocketVersion) when byte_size(SecWebSocketVersion) < 4 -> + Version = binary_to_integer(SecWebSocketVersion), + true = Version >= 0 andalso Version =< 255, + Version. + +-ifdef(TEST). +prop_parse_sec_websocket_version_req() -> + ?FORALL(Version, + integer(0, 255), + Version =:= parse_sec_websocket_version_req(integer_to_binary(Version))). + +parse_sec_websocket_version_req_test_() -> + Tests = [ + {<<"13">>, 13}, + {<<"25">>, 25} + ], + [{V, fun() -> R = parse_sec_websocket_version_req(V) end} || {V, R} <- Tests]. + +parse_sec_websocket_version_req_error_test_() -> + Tests = [ + <<>>, + <<" ">>, + <<"7, 8, 13">>, + <<"invalid">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_sec_websocket_version_req(V)) end} + || V <- Tests]. + +horse_parse_sec_websocket_version_req_13() -> + horse:repeat(200000, + parse_sec_websocket_version_req(<<"13">>) + ). + +horse_parse_sec_websocket_version_req_255() -> + horse:repeat(200000, + parse_sec_websocket_version_req(<<"255">>) + ). +-endif. + +%% Sec-WebSocket-Version response header. + +-spec parse_sec_websocket_version_resp(binary()) -> [websocket_version()]. +parse_sec_websocket_version_resp(SecWebSocketVersion) -> + nonempty(ws_version_list(SecWebSocketVersion, [])). + +ws_version_list(<<>>, Acc) -> lists:reverse(Acc); +ws_version_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> ws_version_list(R, Acc); +ws_version_list(<< C, R/bits >>, Acc) when ?IS_DIGIT(C) -> ws_version(R, Acc, C - $0). + +ws_version(<< C, R/bits >>, Acc, V) when ?IS_DIGIT(C) -> ws_version(R, Acc, V * 10 + C - $0); +ws_version(R, Acc, V) -> ws_version_list_sep(R, [V|Acc]). + +ws_version_list_sep(<<>>, Acc) -> lists:reverse(Acc); +ws_version_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> ws_version_list_sep(R, Acc); +ws_version_list_sep(<< $,, R/bits >>, Acc) -> ws_version_list(R, Acc). + +-ifdef(TEST). +sec_websocket_version_resp() -> + ?LET(L, + non_empty(list({ows(), ows(), integer(0, 255)})), + begin + << _, SecWebSocketVersion/binary >> = iolist_to_binary( + [[OWS1, $,, OWS2, integer_to_binary(V)] || {OWS1, OWS2, V} <- L]), + {[V || {_, _, V} <- L], SecWebSocketVersion} + end). + +prop_parse_sec_websocket_version_resp() -> + ?FORALL({L, SecWebSocketVersion}, + sec_websocket_version_resp(), + L =:= parse_sec_websocket_version_resp(SecWebSocketVersion)). + +parse_sec_websocket_version_resp_test_() -> + Tests = [ + {<<"13, 8, 7">>, [13, 8, 7]} + ], + [{V, fun() -> R = parse_sec_websocket_version_resp(V) end} || {V, R} <- Tests]. + +parse_sec_websocket_version_resp_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_sec_websocket_version_resp(V)) end} + || V <- Tests]. + +horse_parse_sec_websocket_version_resp() -> + horse:repeat(200000, + parse_sec_websocket_version_resp(<<"13, 8, 7">>) + ). +-endif. + +%% Set-Cookie header. + +-spec parse_set_cookie(binary()) + -> {ok, binary(), binary(), cow_cookie:cookie_attrs()} + | ignore. +parse_set_cookie(SetCookie) -> + cow_cookie:parse_set_cookie(SetCookie). + +%% TE header. +%% +%% This function does not support parsing of transfer-parameter. + +-spec parse_te(binary()) -> {trailers | no_trailers, [{binary(), qvalue()}]}. +parse_te(TE) -> + te_list(TE, no_trailers, []). + +te_list(<<>>, Trail, Acc) -> {Trail, lists:reverse(Acc)}; +te_list(<< C, R/bits >>, Trail, Acc) when ?IS_WS_COMMA(C) -> te_list(R, Trail, Acc); +te_list(<< "trailers", R/bits >>, Trail, Acc) -> te(R, Trail, Acc, <<"trailers">>); +te_list(<< "compress", R/bits >>, Trail, Acc) -> te(R, Trail, Acc, <<"compress">>); +te_list(<< "deflate", R/bits >>, Trail, Acc) -> te(R, Trail, Acc, <<"deflate">>); +te_list(<< "gzip", R/bits >>, Trail, Acc) -> te(R, Trail, Acc, <<"gzip">>); +te_list(<< C, R/bits >>, Trail, Acc) when ?IS_TOKEN(C) -> + ?LOWER(te, R, Trail, Acc, <<>>). + +te(<<>>, _, Acc, <<"trailers">>) -> {trailers, lists:reverse(Acc)}; +te(<< $,, R/bits >>, _, Acc, <<"trailers">>) -> te_list(R, trailers, Acc); +te(<< $;, R/bits >>, Trail, Acc, T) when T =/= <<"trailers">> -> te_before_weight(R, Trail, Acc, T); +te(<< C, R/bits >>, _, Acc, <<"trailers">>) when ?IS_WS(C) -> te_list_sep(R, trailers, Acc); +te(<< C, R/bits >>, Trail, Acc, T) when ?IS_TOKEN(C) -> + ?LOWER(te, R, Trail, Acc, T); +te(R, Trail, Acc, T) -> te_param_sep(R, Trail, Acc, T). + +te_param_sep(<<>>, Trail, Acc, T) -> {Trail, lists:reverse([{T, 1000}|Acc])}; +te_param_sep(<< $,, R/bits >>, Trail, Acc, T) -> te_list(R, Trail, [{T, 1000}|Acc]); +te_param_sep(<< C, R/bits >>, Trail, Acc, T) when ?IS_WS(C) -> te_param_sep(R, Trail, Acc, T). + +te_before_weight(<< C, R/bits >>, Trail, Acc, T) when ?IS_WS(C) -> te_before_weight(R, Trail, Acc, T); +te_before_weight(<< $q, $=, R/bits >>, Trail, Acc, T) -> te_weight(R, Trail, Acc, T). + +te_weight(<< "1.000", R/bits >>, Trail, Acc, T) -> te_list_sep(R, Trail, [{T, 1000}|Acc]); +te_weight(<< "1.00", R/bits >>, Trail, Acc, T) -> te_list_sep(R, Trail, [{T, 1000}|Acc]); +te_weight(<< "1.0", R/bits >>, Trail, Acc, T) -> te_list_sep(R, Trail, [{T, 1000}|Acc]); +te_weight(<< "1.", R/bits >>, Trail, Acc, T) -> te_list_sep(R, Trail, [{T, 1000}|Acc]); +te_weight(<< "1", R/bits >>, Trail, Acc, T) -> te_list_sep(R, Trail, [{T, 1000}|Acc]); +te_weight(<< "0.", A, B, C, R/bits >>, Trail, Acc, T) when ?IS_DIGIT(A), ?IS_DIGIT(B), ?IS_DIGIT(C) -> + te_list_sep(R, Trail, [{T, (A - $0) * 100 + (B - $0) * 10 + (C - $0)}|Acc]); +te_weight(<< "0.", A, B, R/bits >>, Trail, Acc, T) when ?IS_DIGIT(A), ?IS_DIGIT(B) -> + te_list_sep(R, Trail, [{T, (A - $0) * 100 + (B - $0) * 10}|Acc]); +te_weight(<< "0.", A, R/bits >>, Trail, Acc, T) when ?IS_DIGIT(A) -> + te_list_sep(R, Trail, [{T, (A - $0) * 100}|Acc]); +te_weight(<< "0.", R/bits >>, Trail, Acc, T) -> te_list_sep(R, Trail, [{T, 0}|Acc]); +te_weight(<< "0", R/bits >>, Trail, Acc, T) -> te_list_sep(R, Trail, [{T, 0}|Acc]). + +te_list_sep(<<>>, Trail, Acc) -> {Trail, lists:reverse(Acc)}; +te_list_sep(<< C, R/bits >>, Trail, Acc) when ?IS_WS(C) -> te_list_sep(R, Trail, Acc); +te_list_sep(<< $,, R/bits >>, Trail, Acc) -> te_list(R, Trail, Acc). + +-ifdef(TEST). +te() -> + ?LET({Trail, L}, + {elements([trailers, no_trailers]), + small_non_empty_list({?SUCHTHAT(T, token(), T =/= <<"trailers">>), weight()})}, + {Trail, L, begin + L2 = case Trail of + no_trailers -> L; + trailers -> + Rand = rand:uniform(length(L) + 1) - 1, + {Before, After} = lists:split(Rand, L), + Before ++ [{<<"trailers">>, undefined}|After] + end, + << _, TE/binary >> = iolist_to_binary([case W of + undefined -> [$,, T]; + _ -> [$,, T, <<";q=">>, qvalue_to_iodata(W)] + end || {T, W} <- L2]), + TE + end} + ). + +prop_parse_te() -> + ?FORALL({Trail, L, TE}, + te(), + begin + {ResTrail, ResL} = parse_te(TE), + CheckedL = [begin + ResT =:= ?LOWER(T) + andalso (ResW =:= W orelse (W =:= undefined andalso ResW =:= 1000)) + end || {{T, W}, {ResT, ResW}} <- lists:zip(L, ResL)], + ResTrail =:= Trail andalso [true] =:= lists:usort(CheckedL) + end). + +parse_te_test_() -> + Tests = [ + {<<"deflate">>, {no_trailers, [{<<"deflate">>, 1000}]}}, + {<<>>, {no_trailers, []}}, + {<<"trailers, deflate;q=0.5">>, {trailers, [{<<"deflate">>, 500}]}} + ], + [{V, fun() -> R = parse_te(V) end} || {V, R} <- Tests]. + +horse_parse_te() -> + horse:repeat(200000, + parse_te(<<"trailers, deflate;q=0.5">>) + ). +-endif. + +%% Trailer header. + +-spec parse_trailer(binary()) -> [binary()]. +parse_trailer(Trailer) -> + nonempty(token_ci_list(Trailer, [])). + +-ifdef(TEST). +parse_trailer_test_() -> + Tests = [ + {<<"Date, Content-MD5">>, [<<"date">>, <<"content-md5">>]} + ], + [{V, fun() -> R = parse_trailer(V) end} || {V, R} <- Tests]. + +parse_trailer_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_trailer(V)) end} || V <- Tests]. + +horse_parse_trailer() -> + horse:repeat(200000, + parse_trailer(<<"Date, Content-MD5">>) + ). +-endif. + +%% Transfer-Encoding header. +%% +%% This function does not support parsing of transfer-parameter. + +-spec parse_transfer_encoding(binary()) -> [binary()]. +parse_transfer_encoding(<<"chunked">>) -> + [<<"chunked">>]; +parse_transfer_encoding(TransferEncoding) -> + nonempty(token_ci_list(TransferEncoding, [])). + +-ifdef(TEST). +prop_parse_transfer_encoding() -> + ?FORALL(L, + non_empty(list(token())), + begin + << _, TransferEncoding/binary >> = iolist_to_binary([[$,, C] || C <- L]), + ResL = parse_transfer_encoding(TransferEncoding), + CheckedL = [?LOWER(Co) =:= ResC || {Co, ResC} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_transfer_encoding_test_() -> + Tests = [ + {<<"a , , , ">>, [<<"a">>]}, + {<<" , , , a">>, [<<"a">>]}, + {<<"a , , b">>, [<<"a">>, <<"b">>]}, + {<<"chunked">>, [<<"chunked">>]}, + {<<"chunked, something">>, [<<"chunked">>, <<"something">>]}, + {<<"gzip, chunked">>, [<<"gzip">>, <<"chunked">>]} + ], + [{V, fun() -> R = parse_transfer_encoding(V) end} || {V, R} <- Tests]. + +parse_transfer_encoding_error_test_() -> + Tests = [ + <<>>, + <<" ">>, + <<" , ">>, + <<",,,">>, + <<"a b">> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_transfer_encoding(V)) end} + || V <- Tests]. + +horse_parse_transfer_encoding_chunked() -> + horse:repeat(200000, + parse_transfer_encoding(<<"chunked">>) + ). + +horse_parse_transfer_encoding_custom() -> + horse:repeat(200000, + parse_transfer_encoding(<<"chunked, something">>) + ). +-endif. + +%% Upgrade header. +%% +%% It is unclear from the RFC whether the values here are +%% case sensitive. +%% +%% We handle them in a case insensitive manner because they +%% are described as case insensitive in the Websocket RFC. + +-spec parse_upgrade(binary()) -> [binary()]. +parse_upgrade(Upgrade) -> + nonempty(protocol_list(Upgrade, [])). + +protocol_list(<<>>, Acc) -> lists:reverse(Acc); +protocol_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> protocol_list(R, Acc); +protocol_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> + ?LOWER(protocol_name, R, Acc, <<>>). + +protocol_name(<< $/, C, R/bits >>, Acc, P) -> + ?LOWER(protocol_version, R, Acc, << P/binary, $/ >>); +protocol_name(<< C, R/bits >>, Acc, P) when ?IS_TOKEN(C) -> + ?LOWER(protocol_name, R, Acc, P); +protocol_name(R, Acc, P) -> protocol_list_sep(R, [P|Acc]). + +protocol_version(<< C, R/bits >>, Acc, P) when ?IS_TOKEN(C) -> + ?LOWER(protocol_version, R, Acc, P); +protocol_version(R, Acc, P) -> protocol_list_sep(R, [P|Acc]). + +protocol_list_sep(<<>>, Acc) -> lists:reverse(Acc); +protocol_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> protocol_list_sep(R, Acc); +protocol_list_sep(<< $,, R/bits >>, Acc) -> protocol_list(R, Acc). + +-ifdef(TEST). +protocols() -> + ?LET(P, + oneof([token(), [token(), $/, token()]]), + iolist_to_binary(P)). + +prop_parse_upgrade() -> + ?FORALL(L, + non_empty(list(protocols())), + begin + << _, Upgrade/binary >> = iolist_to_binary([[$,, P] || P <- L]), + ResL = parse_upgrade(Upgrade), + CheckedL = [?LOWER(P) =:= ResP || {P, ResP} <- lists:zip(L, ResL)], + [true] =:= lists:usort(CheckedL) + end). + +parse_upgrade_test_() -> + Tests = [ + {<<"HTTP/2.0, SHTTP/1.3, IRC/6.9, RTA/x11">>, + [<<"http/2.0">>, <<"shttp/1.3">>, <<"irc/6.9">>, <<"rta/x11">>]}, + {<<"HTTP/2.0">>, [<<"http/2.0">>]} + ], + [{V, fun() -> R = parse_upgrade(V) end} || {V, R} <- Tests]. + +parse_upgrade_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_upgrade(V)) end} + || V <- Tests]. +-endif. + +%% Variant-Key-06 (draft) header. +%% +%% The Variants header must be parsed first in order to know +%% the NumMembers argument as it is the number of members in +%% the Variants dictionary. + +-spec parse_variant_key(binary(), pos_integer()) -> [[binary()]]. +parse_variant_key(VariantKey, NumMembers) -> + List = cow_http_struct_hd:parse_list(VariantKey), + [case Inner of + {with_params, InnerList, #{}} -> + NumMembers = length(InnerList), + [case Item of + {with_params, {token, Value}, #{}} -> Value; + {with_params, {string, Value}, #{}} -> Value + end || Item <- InnerList] + end || Inner <- List]. + +-ifdef(TEST). +parse_variant_key_test_() -> + Tests = [ + {<<"(en)">>, 1, [[<<"en">>]]}, + {<<"(gzip fr)">>, 2, [[<<"gzip">>, <<"fr">>]]}, + {<<"(gzip fr), (\"identity\" fr)">>, 2, [[<<"gzip">>, <<"fr">>], [<<"identity">>, <<"fr">>]]}, + {<<"(\"gzip \" fr)">>, 2, [[<<"gzip ">>, <<"fr">>]]}, + {<<"(en br)">>, 2, [[<<"en">>, <<"br">>]]}, + {<<"(\"0\")">>, 1, [[<<"0">>]]}, + {<<"(silver), (\"bronze\")">>, 1, [[<<"silver">>], [<<"bronze">>]]}, + {<<"(some_person)">>, 1, [[<<"some_person">>]]}, + {<<"(gold europe)">>, 2, [[<<"gold">>, <<"europe">>]]} + ], + [{V, fun() -> R = parse_variant_key(V, N) end} || {V, N, R} <- Tests]. + +parse_variant_key_error_test_() -> + Tests = [ + {<<"(gzip fr), (identity fr), (br fr oops)">>, 2} + ], + [{V, fun() -> {'EXIT', _} = (catch parse_variant_key(V, N)) end} || {V, N} <- Tests]. +-endif. + +-spec variant_key([[binary()]]) -> iolist(). +%% We assume that the lists are of correct length. +variant_key(VariantKeys) -> + cow_http_struct_hd:list([ + {with_params, [ + {with_params, {string, Value}, #{}} + || Value <- InnerList], #{}} + || InnerList <- VariantKeys]). + +-ifdef(TEST). +variant_key_identity_test_() -> + Tests = [ + {1, [[<<"en">>]]}, + {2, [[<<"gzip">>, <<"fr">>]]}, + {2, [[<<"gzip">>, <<"fr">>], [<<"identity">>, <<"fr">>]]}, + {2, [[<<"gzip ">>, <<"fr">>]]}, + {2, [[<<"en">>, <<"br">>]]}, + {1, [[<<"0">>]]}, + {1, [[<<"silver">>], [<<"bronze">>]]}, + {1, [[<<"some_person">>]]}, + {2, [[<<"gold">>, <<"europe">>]]} + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> V = parse_variant_key(iolist_to_binary(variant_key(V)), N) end} || {N, V} <- Tests]. +-endif. + +%% Variants-06 (draft) header. + +-spec parse_variants(binary()) -> [{binary(), [binary()]}]. +parse_variants(Variants) -> + {Dict0, Order} = cow_http_struct_hd:parse_dictionary(Variants), + Dict = maps:map(fun(_, {with_params, List, #{}}) -> + [case Item of + {with_params, {token, Value}, #{}} -> Value; + {with_params, {string, Value}, #{}} -> Value + end || Item <- List] + end, Dict0), + [{Key, maps:get(Key, Dict)} || Key <- Order]. + +-ifdef(TEST). +parse_variants_test_() -> + Tests = [ + {<<"accept-language=(de en jp)">>, [{<<"accept-language">>, [<<"de">>, <<"en">>, <<"jp">>]}]}, + {<<"accept-encoding=(gzip)">>, [{<<"accept-encoding">>, [<<"gzip">>]}]}, + {<<"accept-encoding=()">>, [{<<"accept-encoding">>, []}]}, + {<<"accept-encoding=(gzip br), accept-language=(en fr)">>, [ + {<<"accept-encoding">>, [<<"gzip">>, <<"br">>]}, + {<<"accept-language">>, [<<"en">>, <<"fr">>]} + ]}, + {<<"accept-language=(en fr de), accept-encoding=(gzip br)">>, [ + {<<"accept-language">>, [<<"en">>, <<"fr">>, <<"de">>]}, + {<<"accept-encoding">>, [<<"gzip">>, <<"br">>]} + ]} + ], + [{V, fun() -> R = parse_variants(V) end} || {V, R} <- Tests]. +-endif. + +-spec variants([{binary(), [binary()]}]) -> iolist(). +variants(Variants) -> + cow_http_struct_hd:dictionary([ + {Key, {with_params, [ + {with_params, {string, Value}, #{}} + || Value <- List], #{}}} + || {Key, List} <- Variants]). + +-ifdef(TEST). +variants_identity_test_() -> + Tests = [ + [{<<"accept-language">>, [<<"de">>, <<"en">>, <<"jp">>]}], + [{<<"accept-encoding">>, [<<"gzip">>]}], + [{<<"accept-encoding">>, []}], + [ + {<<"accept-encoding">>, [<<"gzip">>, <<"br">>]}, + {<<"accept-language">>, [<<"en">>, <<"fr">>]} + ], + [ + {<<"accept-language">>, [<<"en">>, <<"fr">>, <<"de">>]}, + {<<"accept-encoding">>, [<<"gzip">>, <<"br">>]} + ] + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> V = parse_variants(iolist_to_binary(variants(V))) end} || V <- Tests]. +-endif. + +%% Vary header. + +-spec parse_vary(binary()) -> '*' | [binary()]. +parse_vary(<<"*">>) -> + '*'; +parse_vary(Vary) -> + nonempty(token_ci_list(Vary, [])). + +-ifdef(TEST). +parse_vary_test_() -> + Tests = [ + {<<"*">>, '*'}, + {<<"Accept-Encoding">>, [<<"accept-encoding">>]}, + {<<"accept-encoding, accept-language">>, [<<"accept-encoding">>, <<"accept-language">>]} + ], + [{V, fun() -> R = parse_vary(V) end} || {V, R} <- Tests]. + +parse_vary_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_vary(V)) end} || V <- Tests]. +-endif. + +%% WWW-Authenticate header. +%% +%% Unknown schemes are represented as the lowercase binary +%% instead of an atom. Unlike with parse_authorization/1, +%% we do not crash on unknown schemes. +%% +%% When parsing auth-params, we do not accept BWS characters around the "=". + +-spec parse_www_authenticate(binary()) -> [{basic, binary()} + | {bearer | digest | binary(), [{binary(), binary()}]}]. +parse_www_authenticate(Authenticate) -> + nonempty(www_auth_list(Authenticate, [])). + +www_auth_list(<<>>, Acc) -> lists:reverse(Acc); +www_auth_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> www_auth_list(R, Acc); +www_auth_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> + ?LOWER(www_auth_scheme, R, Acc, <<>>). + +www_auth_basic_before_realm(<< C, R/bits >>, Acc) when ?IS_WS(C) -> www_auth_basic_before_realm(R, Acc); +www_auth_basic_before_realm(<< "realm=\"", R/bits >>, Acc) -> www_auth_basic(R, Acc, <<>>). + +www_auth_basic(<< $", R/bits >>, Acc, Realm) -> www_auth_list_sep(R, [{basic, Realm}|Acc]); +www_auth_basic(<< $\\, C, R/bits >>, Acc, Realm) when ?IS_VCHAR_OBS(C) -> www_auth_basic(R, Acc, << Realm/binary, C >>); +www_auth_basic(<< C, R/bits >>, Acc, Realm) when ?IS_VCHAR_OBS(C) -> www_auth_basic(R, Acc, << Realm/binary, C >>). + +www_auth_scheme(<< C, R/bits >>, Acc, Scheme) when ?IS_WS(C) -> + case Scheme of + <<"basic">> -> www_auth_basic_before_realm(R, Acc); + <<"bearer">> -> www_auth_params_list(R, Acc, bearer, []); + <<"digest">> -> www_auth_params_list(R, Acc, digest, []); + _ -> www_auth_params_list(R, Acc, Scheme, []) + end; +www_auth_scheme(<< C, R/bits >>, Acc, Scheme) when ?IS_TOKEN(C) -> + ?LOWER(www_auth_scheme, R, Acc, Scheme). + +www_auth_list_sep(<<>>, Acc) -> lists:reverse(Acc); +www_auth_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> www_auth_list_sep(R, Acc); +www_auth_list_sep(<< $,, R/bits >>, Acc) -> www_auth_list(R, Acc). + +www_auth_params_list(<<>>, Acc, Scheme, Params) -> + lists:reverse([{Scheme, lists:reverse(nonempty(Params))}|Acc]); +www_auth_params_list(<< C, R/bits >>, Acc, Scheme, Params) when ?IS_WS_COMMA(C) -> + www_auth_params_list(R, Acc, Scheme, Params); +www_auth_params_list(<< "algorithm=", C, R/bits >>, Acc, Scheme, Params) when ?IS_TOKEN(C) -> + www_auth_token(R, Acc, Scheme, Params, <<"algorithm">>, << C >>); +www_auth_params_list(<< "domain=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"domain">>, <<>>); +www_auth_params_list(<< "error=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"error">>, <<>>); +www_auth_params_list(<< "error_description=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"error_description">>, <<>>); +www_auth_params_list(<< "error_uri=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"error_uri">>, <<>>); +www_auth_params_list(<< "nonce=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"nonce">>, <<>>); +www_auth_params_list(<< "opaque=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"opaque">>, <<>>); +www_auth_params_list(<< "qop=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"qop">>, <<>>); +www_auth_params_list(<< "realm=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"realm">>, <<>>); +www_auth_params_list(<< "scope=\"", R/bits >>, Acc, Scheme, Params) -> + www_auth_quoted(R, Acc, Scheme, Params, <<"scope">>, <<>>); +www_auth_params_list(<< "stale=false", R/bits >>, Acc, Scheme, Params) -> + www_auth_params_list_sep(R, Acc, Scheme, [{<<"stale">>, <<"false">>}|Params]); +www_auth_params_list(<< "stale=true", R/bits >>, Acc, Scheme, Params) -> + www_auth_params_list_sep(R, Acc, Scheme, [{<<"stale">>, <<"true">>}|Params]); +www_auth_params_list(<< C, R/bits >>, Acc, Scheme, Params) when ?IS_TOKEN(C) -> + ?LOWER(www_auth_param, R, Acc, Scheme, Params, <<>>). + +www_auth_param(<< $=, $", R/bits >>, Acc, Scheme, Params, K) -> + www_auth_quoted(R, Acc, Scheme, Params, K, <<>>); +www_auth_param(<< $=, C, R/bits >>, Acc, Scheme, Params, K) when ?IS_TOKEN(C) -> + www_auth_token(R, Acc, Scheme, Params, K, << C >>); +www_auth_param(<< C, R/bits >>, Acc, Scheme, Params, K) when ?IS_TOKEN(C) -> + ?LOWER(www_auth_param, R, Acc, Scheme, Params, K); +www_auth_param(R, Acc, Scheme, Params, NewScheme) -> + www_auth_scheme(R, [{Scheme, lists:reverse(Params)}|Acc], NewScheme). + +www_auth_token(<< C, R/bits >>, Acc, Scheme, Params, K, V) when ?IS_TOKEN(C) -> + www_auth_token(R, Acc, Scheme, Params, K, << V/binary, C >>); +www_auth_token(R, Acc, Scheme, Params, K, V) -> + www_auth_params_list_sep(R, Acc, Scheme, [{K, V}|Params]). + +www_auth_quoted(<< $", R/bits >>, Acc, Scheme, Params, K, V) -> + www_auth_params_list_sep(R, Acc, Scheme, [{K, V}|Params]); +www_auth_quoted(<< $\\, C, R/bits >>, Acc, Scheme, Params, K, V) when ?IS_VCHAR_OBS(C) -> + www_auth_quoted(R, Acc, Scheme, Params, K, << V/binary, C >>); +www_auth_quoted(<< C, R/bits >>, Acc, Scheme, Params, K, V) when ?IS_VCHAR_OBS(C) -> + www_auth_quoted(R, Acc, Scheme, Params, K, << V/binary, C >>). + +www_auth_params_list_sep(<<>>, Acc, Scheme, Params) -> + lists:reverse([{Scheme, lists:reverse(Params)}|Acc]); +www_auth_params_list_sep(<< C, R/bits >>, Acc, Scheme, Params) when ?IS_WS(C) -> + www_auth_params_list_sep(R, Acc, Scheme, Params); +www_auth_params_list_sep(<< $,, R/bits >>, Acc, Scheme, Params) -> + www_auth_params_list_after_sep(R, Acc, Scheme, Params). + +www_auth_params_list_after_sep(<<>>, Acc, Scheme, Params) -> + lists:reverse([{Scheme, lists:reverse(Params)}|Acc]); +www_auth_params_list_after_sep(<< C, R/bits >>, Acc, Scheme, Params) when ?IS_WS_COMMA(C) -> + www_auth_params_list_after_sep(R, Acc, Scheme, Params); +www_auth_params_list_after_sep(R, Acc, Scheme, Params) -> + www_auth_params_list(R, Acc, Scheme, Params). + +-ifdef(TEST). +parse_www_authenticate_test_() -> + Tests = [ + {<<"Newauth realm=\"apps\", type=1, title=\"Login to \\\"apps\\\"\", Basic realm=\"simple\"">>, + [{<<"newauth">>, [ + {<<"realm">>, <<"apps">>}, + {<<"type">>, <<"1">>}, + {<<"title">>, <<"Login to \"apps\"">>}]}, + {basic, <<"simple">>}]}, + %% Same test, different order. + {<<"Basic realm=\"simple\", Newauth realm=\"apps\", type=1, title=\"Login to \\\"apps\\\"\"">>, + [{basic, <<"simple">>}, + {<<"newauth">>, [ + {<<"realm">>, <<"apps">>}, + {<<"type">>, <<"1">>}, + {<<"title">>, <<"Login to \"apps\"">>}]}]}, + {<<"Bearer realm=\"example\"">>, + [{bearer, [{<<"realm">>, <<"example">>}]}]}, + {<<"Bearer realm=\"example\", error=\"invalid_token\", error_description=\"The access token expired\"">>, + [{bearer, [ + {<<"realm">>, <<"example">>}, + {<<"error">>, <<"invalid_token">>}, + {<<"error_description">>, <<"The access token expired">>} + ]}]}, + {<<"Basic realm=\"WallyWorld\"">>, + [{basic, <<"WallyWorld">>}]}, + {<<"Digest realm=\"testrealm@host.com\", qop=\"auth,auth-int\", " + "nonce=\"dcd98b7102dd2f0e8b11d0f600bfb0c093\", " + "opaque=\"5ccc069c403ebaf9f0171e9517f40e41\"">>, + [{digest, [ + {<<"realm">>, <<"testrealm@host.com">>}, + {<<"qop">>, <<"auth,auth-int">>}, + {<<"nonce">>, <<"dcd98b7102dd2f0e8b11d0f600bfb0c093">>}, + {<<"opaque">>, <<"5ccc069c403ebaf9f0171e9517f40e41">>} + ]}]} + ], + [{V, fun() -> R = parse_www_authenticate(V) end} || {V, R} <- Tests]. + +parse_www_authenticate_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_www_authenticate(V)) end} || V <- Tests]. + +horse_parse_www_authenticate() -> + horse:repeat(200000, + parse_www_authenticate(<<"Newauth realm=\"apps\", type=1, title=\"Login to \\\"apps\\\"\", Basic realm=\"simple\"">>) + ). +-endif. + +%% X-Forwarded-For header. +%% +%% This header has no specification but *looks like* it is +%% a list of tokens. +%% +%% This header is deprecated in favor of the Forwarded header. + +-spec parse_x_forwarded_for(binary()) -> [binary()]. +parse_x_forwarded_for(XForwardedFor) -> + nonempty(nodeid_list(XForwardedFor, [])). + +-define(IS_NODEID_TOKEN(C), + ?IS_ALPHA(C) or ?IS_DIGIT(C) + or (C =:= $:) or (C =:= $.) or (C =:= $_) + or (C =:= $-) or (C =:= $[) or (C =:= $])). + +nodeid_list(<<>>, Acc) -> lists:reverse(Acc); +nodeid_list(<>, Acc) when ?IS_WS_COMMA(C) -> nodeid_list(R, Acc); +nodeid_list(<>, Acc) when ?IS_NODEID_TOKEN(C) -> nodeid(R, Acc, <>). + +nodeid(<>, Acc, T) when ?IS_NODEID_TOKEN(C) -> nodeid(R, Acc, <>); +nodeid(R, Acc, T) -> nodeid_list_sep(R, [T|Acc]). + +nodeid_list_sep(<<>>, Acc) -> lists:reverse(Acc); +nodeid_list_sep(<>, Acc) when ?IS_WS(C) -> nodeid_list_sep(R, Acc); +nodeid_list_sep(<<$,, R/bits>>, Acc) -> nodeid_list(R, Acc). + +-ifdef(TEST). +parse_x_forwarded_for_test_() -> + Tests = [ + {<<"client, proxy1, proxy2">>, + [<<"client">>, <<"proxy1">>, <<"proxy2">>]}, + {<<"128.138.243.150, unknown, 192.52.106.30">>, + [<<"128.138.243.150">>, <<"unknown">>, <<"192.52.106.30">>]}, + %% Examples from Mozilla DN. + {<<"2001:db8:85a3:8d3:1319:8a2e:370:7348">>, + [<<"2001:db8:85a3:8d3:1319:8a2e:370:7348">>]}, + {<<"203.0.113.195">>, + [<<"203.0.113.195">>]}, + {<<"203.0.113.195, 70.41.3.18, 150.172.238.178">>, + [<<"203.0.113.195">>, <<"70.41.3.18">>, <<"150.172.238.178">>]}, + %% Examples from RFC7239 modified for x-forwarded-for. + {<<"[2001:db8:cafe::17]:4711">>, + [<<"[2001:db8:cafe::17]:4711">>]}, + {<<"192.0.2.43, 198.51.100.17">>, + [<<"192.0.2.43">>, <<"198.51.100.17">>]}, + {<<"_hidden">>, + [<<"_hidden">>]}, + {<<"192.0.2.43,[2001:db8:cafe::17],unknown">>, + [<<"192.0.2.43">>, <<"[2001:db8:cafe::17]">>, <<"unknown">>]}, + {<<"192.0.2.43, [2001:db8:cafe::17], unknown">>, + [<<"192.0.2.43">>, <<"[2001:db8:cafe::17]">>, <<"unknown">>]}, + {<<"192.0.2.43, 2001:db8:cafe::17">>, + [<<"192.0.2.43">>, <<"2001:db8:cafe::17">>]}, + {<<"192.0.2.43, [2001:db8:cafe::17]">>, + [<<"192.0.2.43">>, <<"[2001:db8:cafe::17]">>]} + ], + [{V, fun() -> R = parse_x_forwarded_for(V) end} || {V, R} <- Tests]. + +parse_x_forwarded_for_error_test_() -> + Tests = [ + <<>> + ], + [{V, fun() -> {'EXIT', _} = (catch parse_x_forwarded_for(V)) end} || V <- Tests]. +-endif. + +%% Internal. + +%% Only return if the list is not empty. +nonempty(L) when L =/= [] -> L. + +%% Parse a list of case sensitive tokens. +token_list(<<>>, Acc) -> lists:reverse(Acc); +token_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> token_list(R, Acc); +token_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> token(R, Acc, << C >>). + +token(<< C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> token(R, Acc, << T/binary, C >>); +token(R, Acc, T) -> token_list_sep(R, [T|Acc]). + +token_list_sep(<<>>, Acc) -> lists:reverse(Acc); +token_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> token_list_sep(R, Acc); +token_list_sep(<< $,, R/bits >>, Acc) -> token_list(R, Acc). + +%% Parse a list of case insensitive tokens. +token_ci_list(<<>>, Acc) -> lists:reverse(Acc); +token_ci_list(<< C, R/bits >>, Acc) when ?IS_WS_COMMA(C) -> token_ci_list(R, Acc); +token_ci_list(<< C, R/bits >>, Acc) when ?IS_TOKEN(C) -> ?LOWER(token_ci, R, Acc, <<>>). + +token_ci(<< C, R/bits >>, Acc, T) when ?IS_TOKEN(C) -> ?LOWER(token_ci, R, Acc, T); +token_ci(R, Acc, T) -> token_ci_list_sep(R, [T|Acc]). + +token_ci_list_sep(<<>>, Acc) -> lists:reverse(Acc); +token_ci_list_sep(<< C, R/bits >>, Acc) when ?IS_WS(C) -> token_ci_list_sep(R, Acc); +token_ci_list_sep(<< $,, R/bits >>, Acc) -> token_ci_list(R, Acc). + +join_token_list([]) -> []; +join_token_list([H|T]) -> join_token_list(T, [H]). + +join_token_list([], Acc) -> lists:reverse(Acc); +join_token_list([H|T], Acc) -> join_token_list(T, [H,<<", ">>|Acc]). diff --git a/deps/cowlib/src/cow_http_struct_hd.erl b/deps/cowlib/src/cow_http_struct_hd.erl new file mode 100644 index 0000000..373c8da --- /dev/null +++ b/deps/cowlib/src/cow_http_struct_hd.erl @@ -0,0 +1,420 @@ +%% Copyright (c) 2019, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% The mapping between Erlang and structured headers types is as follow: +%% +%% List: list() +%% Dictionary: map() +%% Bare item: one bare_item() that can be of type: +%% Integer: integer() +%% Float: float() +%% String: {string, binary()} +%% Token: {token, binary()} +%% Byte sequence: {binary, binary()} +%% Boolean: boolean() +%% And finally: +%% Type with Parameters: {with_params, Type, Parameters} +%% Parameters: [{binary(), bare_item()}] + +-module(cow_http_struct_hd). + +-export([parse_dictionary/1]). +-export([parse_item/1]). +-export([parse_list/1]). +-export([dictionary/1]). +-export([item/1]). +-export([list/1]). + +-include("cow_parse.hrl"). + +-type sh_list() :: [sh_item() | sh_inner_list()]. +-type sh_inner_list() :: sh_with_params([sh_item()]). +-type sh_params() :: #{binary() => sh_bare_item() | undefined}. +-type sh_dictionary() :: {#{binary() => sh_item() | sh_inner_list()}, [binary()]}. +-type sh_item() :: sh_with_params(sh_bare_item()). +-type sh_bare_item() :: integer() | float() | boolean() + | {string | token | binary, binary()}. +-type sh_with_params(Type) :: {with_params, Type, sh_params()}. + +-define(IS_LC_ALPHA(C), + (C =:= $a) or (C =:= $b) or (C =:= $c) or (C =:= $d) or (C =:= $e) or + (C =:= $f) or (C =:= $g) or (C =:= $h) or (C =:= $i) or (C =:= $j) or + (C =:= $k) or (C =:= $l) or (C =:= $m) or (C =:= $n) or (C =:= $o) or + (C =:= $p) or (C =:= $q) or (C =:= $r) or (C =:= $s) or (C =:= $t) or + (C =:= $u) or (C =:= $v) or (C =:= $w) or (C =:= $x) or (C =:= $y) or + (C =:= $z) +). + +%% Parsing. + +-spec parse_dictionary(binary()) -> sh_dictionary(). +parse_dictionary(<<>>) -> + {#{}, []}; +parse_dictionary(<>) when ?IS_LC_ALPHA(C) -> + {Dict, Order, <<>>} = parse_dict_key(R, #{}, [], <>), + {Dict, Order}. + +parse_dict_key(<<$=,$(,R0/bits>>, Acc, Order, K) -> + false = maps:is_key(K, Acc), + {Item, R} = parse_inner_list(R0, []), + parse_dict_before_sep(R, Acc#{K => Item}, [K|Order]); +parse_dict_key(<<$=,R0/bits>>, Acc, Order, K) -> + false = maps:is_key(K, Acc), + {Item, R} = parse_item1(R0), + parse_dict_before_sep(R, Acc#{K => Item}, [K|Order]); +parse_dict_key(<>, Acc, Order, K) + when ?IS_LC_ALPHA(C) or ?IS_DIGIT(C) + or (C =:= $_) or (C =:= $-) or (C =:= $*) -> + parse_dict_key(R, Acc, Order, <>). + +parse_dict_before_sep(<>, Acc, Order) when ?IS_WS(C) -> + parse_dict_before_sep(R, Acc, Order); +parse_dict_before_sep(<>, Acc, Order) when C =:= $, -> + parse_dict_before_member(R, Acc, Order); +parse_dict_before_sep(<<>>, Acc, Order) -> + {Acc, lists:reverse(Order), <<>>}. + +parse_dict_before_member(<>, Acc, Order) when ?IS_WS(C) -> + parse_dict_before_member(R, Acc, Order); +parse_dict_before_member(<>, Acc, Order) when ?IS_LC_ALPHA(C) -> + parse_dict_key(R, Acc, Order, <>). + +-spec parse_item(binary()) -> sh_item(). +parse_item(Bin) -> + {Item, <<>>} = parse_item1(Bin), + Item. + +parse_item1(Bin) -> + case parse_bare_item(Bin) of + {Item, <<$;,R/bits>>} -> + {Params, Rest} = parse_before_param(R, #{}), + {{with_params, Item, Params}, Rest}; + {Item, Rest} -> + {{with_params, Item, #{}}, Rest} + end. + +-spec parse_list(binary()) -> sh_list(). +parse_list(<<>>) -> + []; +parse_list(Bin) -> + parse_list_before_member(Bin, []). + +parse_list_member(<<$(,R0/bits>>, Acc) -> + {Item, R} = parse_inner_list(R0, []), + parse_list_before_sep(R, [Item|Acc]); +parse_list_member(R0, Acc) -> + {Item, R} = parse_item1(R0), + parse_list_before_sep(R, [Item|Acc]). + +parse_list_before_sep(<>, Acc) when ?IS_WS(C) -> + parse_list_before_sep(R, Acc); +parse_list_before_sep(<<$,,R/bits>>, Acc) -> + parse_list_before_member(R, Acc); +parse_list_before_sep(<<>>, Acc) -> + lists:reverse(Acc). + +parse_list_before_member(<>, Acc) when ?IS_WS(C) -> + parse_list_before_member(R, Acc); +parse_list_before_member(R, Acc) -> + parse_list_member(R, Acc). + +%% Internal. + +parse_inner_list(<>, Acc) when ?IS_WS(C) -> + parse_inner_list(R, Acc); +parse_inner_list(<<$),$;,R0/bits>>, Acc) -> + {Params, R} = parse_before_param(R0, #{}), + {{with_params, lists:reverse(Acc), Params}, R}; +parse_inner_list(<<$),R/bits>>, Acc) -> + {{with_params, lists:reverse(Acc), #{}}, R}; +parse_inner_list(R0, Acc) -> + {Item, R = <>} = parse_item1(R0), + true = (C =:= $\s) orelse (C =:= $)), + parse_inner_list(R, [Item|Acc]). + +parse_before_param(<>, Acc) when ?IS_WS(C) -> + parse_before_param(R, Acc); +parse_before_param(<>, Acc) when ?IS_LC_ALPHA(C) -> + parse_param(R, Acc, <>). + +parse_param(<<$;,R/bits>>, Acc, K) -> + parse_before_param(R, Acc#{K => undefined}); +parse_param(<<$=,R0/bits>>, Acc, K) -> + case parse_bare_item(R0) of + {Item, <<$;,R/bits>>} -> + false = maps:is_key(K, Acc), + parse_before_param(R, Acc#{K => Item}); + {Item, R} -> + false = maps:is_key(K, Acc), + {Acc#{K => Item}, R} + end; +parse_param(<>, Acc, K) + when ?IS_LC_ALPHA(C) or ?IS_DIGIT(C) + or (C =:= $_) or (C =:= $-) or (C =:= $*) -> + parse_param(R, Acc, <>); +parse_param(R, Acc, K) -> + false = maps:is_key(K, Acc), + {Acc#{K => undefined}, R}. + +%% Integer or float. +parse_bare_item(<<$-,R/bits>>) -> parse_number(R, 0, <<$->>); +parse_bare_item(<>) when ?IS_DIGIT(C) -> parse_number(R, 1, <>); +%% String. +parse_bare_item(<<$",R/bits>>) -> parse_string(R, <<>>); +%% Token. +parse_bare_item(<>) when ?IS_ALPHA(C) -> parse_token(R, <>); +%% Byte sequence. +parse_bare_item(<<$*,R/bits>>) -> parse_binary(R, <<>>); +%% Boolean. +parse_bare_item(<<"?0",R/bits>>) -> {false, R}; +parse_bare_item(<<"?1",R/bits>>) -> {true, R}. + +parse_number(<>, L, Acc) when ?IS_DIGIT(C) -> + parse_number(R, L+1, <>); +parse_number(<>, L, Acc) when C =:= $. -> + parse_float(R, L, 0, <>); +parse_number(R, L, Acc) when L =< 15 -> + {binary_to_integer(Acc), R}. + +parse_float(<>, L1, L2, Acc) when ?IS_DIGIT(C) -> + parse_float(R, L1, L2+1, <>); +parse_float(R, L1, L2, Acc) when + L1 =< 9, L2 =< 6; + L1 =< 10, L2 =< 5; + L1 =< 11, L2 =< 4; + L1 =< 12, L2 =< 3; + L1 =< 13, L2 =< 2; + L1 =< 14, L2 =< 1 -> + {binary_to_float(Acc), R}. + +parse_string(<<$\\,$",R/bits>>, Acc) -> + parse_string(R, <>); +parse_string(<<$\\,$\\,R/bits>>, Acc) -> + parse_string(R, <>); +parse_string(<<$",R/bits>>, Acc) -> + {{string, Acc}, R}; +parse_string(<>, Acc) when + C >= 16#20, C =< 16#21; + C >= 16#23, C =< 16#5b; + C >= 16#5d, C =< 16#7e -> + parse_string(R, <>). + +parse_token(<>, Acc) when ?IS_TOKEN(C) or (C =:= $:) or (C =:= $/) -> + parse_token(R, <>); +parse_token(R, Acc) -> + {{token, Acc}, R}. + +parse_binary(<<$*,R/bits>>, Acc) -> + {{binary, base64:decode(Acc)}, R}; +parse_binary(<>, Acc) when ?IS_ALPHANUM(C) or (C =:= $+) or (C =:= $/) or (C =:= $=) -> + parse_binary(R, <>). + +-ifdef(TEST). +parse_struct_hd_test_() -> + Files = filelib:wildcard("deps/structured-header-tests/*.json"), + lists:flatten([begin + {ok, JSON} = file:read_file(File), + Tests = jsx:decode(JSON, [return_maps]), + [ + {iolist_to_binary(io_lib:format("~s: ~s", [filename:basename(File), Name])), fun() -> + %% The implementation is strict. We fail whenever we can. + CanFail = maps:get(<<"can_fail">>, Test, false), + MustFail = maps:get(<<"must_fail">>, Test, false), + Expected = case MustFail of + true -> undefined; + false -> expected_to_term(maps:get(<<"expected">>, Test)) + end, + Raw = raw_to_binary(Raw0), + case HeaderType of + <<"dictionary">> when MustFail; CanFail -> + {'EXIT', _} = (catch parse_dictionary(Raw)); + %% The test "binary.json: non-zero pad bits" does not fail + %% due to our reliance on Erlang/OTP's base64 module. + <<"item">> when CanFail -> + case (catch parse_item(Raw)) of + {'EXIT', _} -> ok; + Expected -> ok + end; + <<"item">> when MustFail -> + {'EXIT', _} = (catch parse_item(Raw)); + <<"list">> when MustFail; CanFail -> + {'EXIT', _} = (catch parse_list(Raw)); + <<"dictionary">> -> + {Expected, _Order} = (catch parse_dictionary(Raw)); + <<"item">> -> + Expected = (catch parse_item(Raw)); + <<"list">> -> + Expected = (catch parse_list(Raw)) + end + end} + || Test=#{ + <<"name">> := Name, + <<"header_type">> := HeaderType, + <<"raw">> := Raw0 + } <- Tests] + end || File <- Files]). + +%% Item. +expected_to_term(E=[_, Params]) when is_map(Params) -> + e2t(E); +%% Outer list. +expected_to_term(Expected) when is_list(Expected) -> + [e2t(E) || E <- Expected]; +expected_to_term(Expected) -> + e2t(Expected). + +%% Dictionary. +e2t(Dict) when is_map(Dict) -> + maps:map(fun(_, V) -> e2t(V) end, Dict); +%% Inner list. +e2t([List, Params]) when is_list(List) -> + {with_params, [e2t(E) || E <- List], + maps:map(fun(_, P) -> e2tb(P) end, Params)}; +%% Item. +e2t([Bare, Params]) -> + {with_params, e2tb(Bare), + maps:map(fun(_, P) -> e2tb(P) end, Params)}. + +%% Bare item. +e2tb(#{<<"__type">> := <<"token">>, <<"value">> := V}) -> + {token, V}; +e2tb(#{<<"__type">> := <<"binary">>, <<"value">> := V}) -> + {binary, base32:decode(V)}; +e2tb(V) when is_binary(V) -> + {string, V}; +e2tb(null) -> + undefined; +e2tb(V) -> + V. + +%% The Cowlib parsers currently do not support resuming parsing +%% in the case of multiple headers. To make tests work we modify +%% the raw value the same way Cowboy does when encountering +%% multiple headers: by adding a comma and space in between. +%% +%% Similarly, the Cowlib parsers expect the leading and trailing +%% whitespace to be removed before calling the parser. +raw_to_binary(RawList) -> + trim_ws(iolist_to_binary(lists:join(<<", ">>, RawList))). + +trim_ws(<>) when ?IS_WS(C) -> trim_ws(R); +trim_ws(R) -> trim_ws_end(R, byte_size(R) - 1). + +trim_ws_end(_, -1) -> + <<>>; +trim_ws_end(Value, N) -> + case binary:at(Value, N) of + $\s -> trim_ws_end(Value, N - 1); + $\t -> trim_ws_end(Value, N - 1); + _ -> + S = N + 1, + << Value2:S/binary, _/bits >> = Value, + Value2 + end. +-endif. + +%% Building. + +-spec dictionary(#{binary() => sh_item() | sh_inner_list()} + | [{binary(), sh_item() | sh_inner_list()}]) + -> iolist(). +%% @todo Also accept this? dictionary({Map, Order}) -> +dictionary(Map) when is_map(Map) -> + dictionary(maps:to_list(Map)); +dictionary(KVList) when is_list(KVList) -> + lists:join(<<", ">>, [ + [Key, $=, item_or_inner_list(Value)] + || {Key, Value} <- KVList]). + +-spec item(sh_item()) -> iolist(). +item({with_params, BareItem, Params}) -> + [bare_item(BareItem), params(Params)]. + +-spec list(sh_list()) -> iolist(). +list(List) -> + lists:join(<<", ">>, [item_or_inner_list(Value) || Value <- List]). + +item_or_inner_list(Value={with_params, List, _}) when is_list(List) -> + inner_list(Value); +item_or_inner_list(Value) -> + item(Value). + +inner_list({with_params, List, Params}) -> + [$(, lists:join($\s, [item(Value) || Value <- List]), $), params(Params)]. + +bare_item({string, String}) -> + [$", escape_string(String, <<>>), $"]; +bare_item({token, Token}) -> + Token; +bare_item({binary, Binary}) -> + [$*, base64:encode(Binary), $*]; +bare_item(Integer) when is_integer(Integer) -> + integer_to_binary(Integer); +%% In order to properly reproduce the float as a string we +%% must first determine how many decimals we want in the +%% fractional component, otherwise rounding errors may occur. +bare_item(Float) when is_float(Float) -> + Decimals = case trunc(Float) of + I when I >= 10000000000000 -> 1; + I when I >= 1000000000000 -> 2; + I when I >= 100000000000 -> 3; + I when I >= 10000000000 -> 4; + I when I >= 1000000000 -> 5; + _ -> 6 + end, + float_to_binary(Float, [{decimals, Decimals}, compact]); +bare_item(true) -> + <<"?1">>; +bare_item(false) -> + <<"?0">>. + +escape_string(<<>>, Acc) -> Acc; +escape_string(<<$\\,R/bits>>, Acc) -> escape_string(R, <>); +escape_string(<<$",R/bits>>, Acc) -> escape_string(R, <>); +escape_string(<>, Acc) -> escape_string(R, <>). + +params(Params) -> + maps:fold(fun + (Key, undefined, Acc) -> + [[$;, Key]|Acc]; + (Key, Value, Acc) -> + [[$;, Key, $=, bare_item(Value)]|Acc] + end, [], Params). + +-ifdef(TEST). +struct_hd_identity_test_() -> + Files = filelib:wildcard("deps/structured-header-tests/*.json"), + lists:flatten([begin + {ok, JSON} = file:read_file(File), + Tests = jsx:decode(JSON, [return_maps]), + [ + {iolist_to_binary(io_lib:format("~s: ~s", [filename:basename(File), Name])), fun() -> + Expected = expected_to_term(Expected0), + case HeaderType of + <<"dictionary">> -> + {Expected, _Order} = parse_dictionary(iolist_to_binary(dictionary(Expected))); + <<"item">> -> + Expected = parse_item(iolist_to_binary(item(Expected))); + <<"list">> -> + Expected = parse_list(iolist_to_binary(list(Expected))) + end + end} + || #{ + <<"name">> := Name, + <<"header_type">> := HeaderType, + %% We only run tests that must not fail. + <<"expected">> := Expected0 + } <- Tests] + end || File <- Files]). +-endif. diff --git a/deps/cowlib/src/cow_http_te.erl b/deps/cowlib/src/cow_http_te.erl new file mode 100644 index 0000000..57d5167 --- /dev/null +++ b/deps/cowlib/src/cow_http_te.erl @@ -0,0 +1,373 @@ +%% Copyright (c) 2014-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_http_te). + +%% Identity. +-export([stream_identity/2]). +-export([identity/1]). + +%% Chunked. +-export([stream_chunked/2]). +-export([chunk/1]). +-export([last_chunk/0]). + +%% The state type is the same for both identity and chunked. +-type state() :: {non_neg_integer(), non_neg_integer()}. +-export_type([state/0]). + +-type decode_ret() :: more + | {more, Data::binary(), state()} + | {more, Data::binary(), RemLen::non_neg_integer(), state()} + | {more, Data::binary(), Rest::binary(), state()} + | {done, HasTrailers::trailers | no_trailers, Rest::binary()} + | {done, Data::binary(), HasTrailers::trailers | no_trailers, Rest::binary()}. +-export_type([decode_ret/0]). + +-include("cow_parse.hrl"). + +-ifdef(TEST). +dripfeed(<< C, Rest/bits >>, Acc, State, F) -> + case F(<< Acc/binary, C >>, State) of + more -> + dripfeed(Rest, << Acc/binary, C >>, State, F); + {more, _, State2} -> + dripfeed(Rest, <<>>, State2, F); + {more, _, Length, State2} when is_integer(Length) -> + dripfeed(Rest, <<>>, State2, F); + {more, _, Acc2, State2} -> + dripfeed(Rest, Acc2, State2, F); + {done, _, <<>>} -> + ok; + {done, _, _, <<>>} -> + ok + end. +-endif. + +%% Identity. + +%% @doc Decode an identity stream. + +-spec stream_identity(Data, State) + -> {more, Data, Len, State} | {done, Data, Len, Data} + when Data::binary(), State::state(), Len::non_neg_integer(). +stream_identity(Data, {Streamed, Total}) -> + Streamed2 = Streamed + byte_size(Data), + if + Streamed2 < Total -> + {more, Data, Total - Streamed2, {Streamed2, Total}}; + true -> + Size = Total - Streamed, + << Data2:Size/binary, Rest/bits >> = Data, + {done, Data2, Total, Rest} + end. + +-spec identity(Data) -> Data when Data::iodata(). +identity(Data) -> + Data. + +-ifdef(TEST). +stream_identity_test() -> + {done, <<>>, 0, <<>>} + = stream_identity(identity(<<>>), {0, 0}), + {done, <<"\r\n">>, 2, <<>>} + = stream_identity(identity(<<"\r\n">>), {0, 2}), + {done, << 0:80000 >>, 10000, <<>>} + = stream_identity(identity(<< 0:80000 >>), {0, 10000}), + ok. + +stream_identity_parts_test() -> + {more, << 0:8000 >>, 1999, S1} + = stream_identity(<< 0:8000 >>, {0, 2999}), + {more, << 0:8000 >>, 999, S2} + = stream_identity(<< 0:8000 >>, S1), + {done, << 0:7992 >>, 2999, <<>>} + = stream_identity(<< 0:7992 >>, S2), + ok. + +%% Using the same data as the chunked one for comparison. +horse_stream_identity() -> + horse:repeat(10000, + stream_identity(<< + "4\r\n" + "Wiki\r\n" + "5\r\n" + "pedia\r\n" + "e\r\n" + " in\r\n\r\nchunks.\r\n" + "0\r\n" + "\r\n">>, {0, 43}) + ). + +horse_stream_identity_dripfeed() -> + horse:repeat(10000, + dripfeed(<< + "4\r\n" + "Wiki\r\n" + "5\r\n" + "pedia\r\n" + "e\r\n" + " in\r\n\r\nchunks.\r\n" + "0\r\n" + "\r\n">>, <<>>, {0, 43}, fun stream_identity/2) + ). +-endif. + +%% Chunked. + +%% @doc Decode a chunked stream. + +-spec stream_chunked(Data, State) + -> more | {more, Data, State} | {more, Data, non_neg_integer(), State} + | {more, Data, Data, State} + | {done, HasTrailers, Data} | {done, Data, HasTrailers, Data} + when Data::binary(), State::state(), HasTrailers::trailers | no_trailers. +stream_chunked(Data, State) -> + stream_chunked(Data, State, <<>>). + +%% New chunk. +stream_chunked(Data = << C, _/bits >>, {0, Streamed}, Acc) when C =/= $\r -> + case chunked_len(Data, Streamed, Acc, 0) of + {next, Rest, State, Acc2} -> + stream_chunked(Rest, State, Acc2); + {more, State, Acc2} -> + {more, Acc2, Data, State}; + Ret -> + Ret + end; +%% Trailing \r\n before next chunk. +stream_chunked(<< "\r\n", Rest/bits >>, {2, Streamed}, Acc) -> + stream_chunked(Rest, {0, Streamed}, Acc); +%% Trailing \r before next chunk. +stream_chunked(<< "\r" >>, {2, Streamed}, Acc) -> + {more, Acc, {1, Streamed}}; +%% Trailing \n before next chunk. +stream_chunked(<< "\n", Rest/bits >>, {1, Streamed}, Acc) -> + stream_chunked(Rest, {0, Streamed}, Acc); +%% More data needed. +stream_chunked(<<>>, State = {Rem, _}, Acc) -> + {more, Acc, Rem, State}; +%% Chunk data. +stream_chunked(Data, {Rem, Streamed}, Acc) when Rem > 2 -> + DataSize = byte_size(Data), + RemSize = Rem - 2, + case Data of + << Chunk:RemSize/binary, "\r\n", Rest/bits >> -> + stream_chunked(Rest, {0, Streamed + RemSize}, << Acc/binary, Chunk/binary >>); + << Chunk:RemSize/binary, "\r" >> -> + {more, << Acc/binary, Chunk/binary >>, {1, Streamed + RemSize}}; + %% Everything in Data is part of the chunk. If we have more + %% data than the chunk accepts, then this is an error and we crash. + _ when DataSize =< RemSize -> + Rem2 = Rem - DataSize, + {more, << Acc/binary, Data/binary >>, Rem2, {Rem2, Streamed + DataSize}} + end. + +chunked_len(<< $0, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16); +chunked_len(<< $1, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 1); +chunked_len(<< $2, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 2); +chunked_len(<< $3, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 3); +chunked_len(<< $4, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 4); +chunked_len(<< $5, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 5); +chunked_len(<< $6, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 6); +chunked_len(<< $7, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 7); +chunked_len(<< $8, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 8); +chunked_len(<< $9, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 9); +chunked_len(<< $A, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 10); +chunked_len(<< $B, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 11); +chunked_len(<< $C, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 12); +chunked_len(<< $D, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 13); +chunked_len(<< $E, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 14); +chunked_len(<< $F, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 15); +chunked_len(<< $a, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 10); +chunked_len(<< $b, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 11); +chunked_len(<< $c, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 12); +chunked_len(<< $d, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 13); +chunked_len(<< $e, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 14); +chunked_len(<< $f, R/bits >>, S, A, Len) -> chunked_len(R, S, A, Len * 16 + 15); +%% Chunk extensions. +%% +%% Note that we currently skip the first character we encounter here, +%% and not in the skip_chunk_ext function. If we latter implement +%% chunk extensions (unlikely) we will need to change this clause too. +chunked_len(<< C, R/bits >>, S, A, Len) when ?IS_WS(C); C =:= $; -> skip_chunk_ext(R, S, A, Len, 0); +%% Final chunk. +%% +%% When trailers are following we simply return them as the Rest. +%% Then the user code can decide to call the stream_trailers function +%% to parse them. The user can therefore ignore trailers as necessary +%% if they do not wish to handle them. +chunked_len(<< "\r\n\r\n", R/bits >>, _, <<>>, 0) -> {done, no_trailers, R}; +chunked_len(<< "\r\n\r\n", R/bits >>, _, A, 0) -> {done, A, no_trailers, R}; +chunked_len(<< "\r\n", R/bits >>, _, <<>>, 0) when byte_size(R) > 2 -> {done, trailers, R}; +chunked_len(<< "\r\n", R/bits >>, _, A, 0) when byte_size(R) > 2 -> {done, A, trailers, R}; +chunked_len(_, _, _, 0) -> more; +%% Normal chunk. Add 2 to Len for the trailing \r\n. +chunked_len(<< "\r\n", R/bits >>, S, A, Len) -> {next, R, {Len + 2, S}, A}; +chunked_len(<<"\r">>, _, <<>>, _) -> more; +chunked_len(<<"\r">>, S, A, _) -> {more, {0, S}, A}; +chunked_len(<<>>, _, <<>>, _) -> more; +chunked_len(<<>>, S, A, _) -> {more, {0, S}, A}. + +skip_chunk_ext(R = << "\r", _/bits >>, S, A, Len, _) -> chunked_len(R, S, A, Len); +skip_chunk_ext(R = <<>>, S, A, Len, _) -> chunked_len(R, S, A, Len); +%% We skip up to 128 characters of chunk extensions. The value +%% is hardcoded: chunk extensions are very rarely seen in the +%% wild and Cowboy doesn't do anything with them anyway. +%% +%% Line breaks are not allowed in the middle of chunk extensions. +skip_chunk_ext(<< C, R/bits >>, S, A, Len, Skipped) when C =/= $\n, Skipped < 128 -> + skip_chunk_ext(R, S, A, Len, Skipped + 1). + +%% @doc Encode a chunk. + +-spec chunk(D) -> D when D::iodata(). +chunk(Data) -> + [integer_to_list(iolist_size(Data), 16), <<"\r\n">>, + Data, <<"\r\n">>]. + +%% @doc Encode the last chunk of a chunked stream. + +-spec last_chunk() -> << _:40 >>. +last_chunk() -> + <<"0\r\n\r\n">>. + +-ifdef(TEST). +stream_chunked_identity_test() -> + {done, <<"Wikipedia in\r\n\r\nchunks.">>, no_trailers, <<>>} + = stream_chunked(iolist_to_binary([ + chunk("Wiki"), + chunk("pedia"), + chunk(" in\r\n\r\nchunks."), + last_chunk() + ]), {0, 0}), + ok. + +stream_chunked_one_pass_test() -> + {done, no_trailers, <<>>} = stream_chunked(<<"0\r\n\r\n">>, {0, 0}), + {done, <<"Wikipedia in\r\n\r\nchunks.">>, no_trailers, <<>>} + = stream_chunked(<< + "4\r\n" + "Wiki\r\n" + "5\r\n" + "pedia\r\n" + "e\r\n" + " in\r\n\r\nchunks.\r\n" + "0\r\n" + "\r\n">>, {0, 0}), + %% Same but with extra spaces or chunk extensions. + {done, <<"Wikipedia in\r\n\r\nchunks.">>, no_trailers, <<>>} + = stream_chunked(<< + "4 \r\n" + "Wiki\r\n" + "5 ; ext = abc\r\n" + "pedia\r\n" + "e;ext=abc\r\n" + " in\r\n\r\nchunks.\r\n" + "0;ext\r\n" + "\r\n">>, {0, 0}), + %% Same but with trailers. + {done, <<"Wikipedia in\r\n\r\nchunks.">>, trailers, Rest} + = stream_chunked(<< + "4\r\n" + "Wiki\r\n" + "5\r\n" + "pedia\r\n" + "e\r\n" + " in\r\n\r\nchunks.\r\n" + "0\r\n" + "x-foo-bar: bar foo\r\n" + "\r\n">>, {0, 0}), + {[{<<"x-foo-bar">>, <<"bar foo">>}], <<>>} = cow_http:parse_headers(Rest), + ok. + +stream_chunked_n_passes_test() -> + S0 = {0, 0}, + more = stream_chunked(<<"4\r">>, S0), + {more, <<>>, 6, S1} = stream_chunked(<<"4\r\n">>, S0), + {more, <<"Wiki">>, 0, S2} = stream_chunked(<<"Wiki\r\n">>, S1), + {more, <<"pedia">>, <<"e\r">>, S3} = stream_chunked(<<"5\r\npedia\r\ne\r">>, S2), + {more, <<" in\r\n\r\nchunks.">>, 2, S4} = stream_chunked(<<"e\r\n in\r\n\r\nchunks.">>, S3), + {done, no_trailers, <<>>} = stream_chunked(<<"\r\n0\r\n\r\n">>, S4), + %% A few extra for coverage purposes. + more = stream_chunked(<<"\n3">>, {1, 0}), + {more, <<"abc">>, 2, {2, 3}} = stream_chunked(<<"\n3\r\nabc">>, {1, 0}), + {more, <<"abc">>, {1, 3}} = stream_chunked(<<"3\r\nabc\r">>, {0, 0}), + {more, <<"abc">>, <<"123">>, {0, 3}} = stream_chunked(<<"3\r\nabc\r\n123">>, {0, 0}), + ok. + +stream_chunked_dripfeed_test() -> + dripfeed(<< + "4\r\n" + "Wiki\r\n" + "5\r\n" + "pedia\r\n" + "e\r\n" + " in\r\n\r\nchunks.\r\n" + "0\r\n" + "\r\n">>, <<>>, {0, 0}, fun stream_chunked/2). + +do_body_to_chunks(_, <<>>, Acc) -> + lists:reverse([<<"0\r\n\r\n">>|Acc]); +do_body_to_chunks(ChunkSize, Body, Acc) -> + BodySize = byte_size(Body), + ChunkSize2 = case BodySize < ChunkSize of + true -> BodySize; + false -> ChunkSize + end, + << Chunk:ChunkSize2/binary, Rest/binary >> = Body, + ChunkSizeBin = list_to_binary(integer_to_list(ChunkSize2, 16)), + do_body_to_chunks(ChunkSize, Rest, + [<< ChunkSizeBin/binary, "\r\n", Chunk/binary, "\r\n" >>|Acc]). + +stream_chunked_dripfeed2_test() -> + Body = list_to_binary(io_lib:format("~p", [lists:seq(1, 100)])), + Body2 = iolist_to_binary(do_body_to_chunks(50, Body, [])), + dripfeed(Body2, <<>>, {0, 0}, fun stream_chunked/2). + +stream_chunked_error_test_() -> + Tests = [ + {<<>>, undefined}, + {<<"\n\naaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa">>, {2, 0}} + ], + [{lists:flatten(io_lib:format("value ~p state ~p", [V, S])), + fun() -> {'EXIT', _} = (catch stream_chunked(V, S)) end} + || {V, S} <- Tests]. + +horse_stream_chunked() -> + horse:repeat(10000, + stream_chunked(<< + "4\r\n" + "Wiki\r\n" + "5\r\n" + "pedia\r\n" + "e\r\n" + " in\r\n\r\nchunks.\r\n" + "0\r\n" + "\r\n">>, {0, 0}) + ). + +horse_stream_chunked_dripfeed() -> + horse:repeat(10000, + dripfeed(<< + "4\r\n" + "Wiki\r\n" + "5\r\n" + "pedia\r\n" + "e\r\n" + " in\r\n\r\nchunks.\r\n" + "0\r\n" + "\r\n">>, <<>>, {0, 43}, fun stream_chunked/2) + ). +-endif. diff --git a/deps/cowlib/src/cow_iolists.erl b/deps/cowlib/src/cow_iolists.erl new file mode 100644 index 0000000..dcb48d7 --- /dev/null +++ b/deps/cowlib/src/cow_iolists.erl @@ -0,0 +1,95 @@ +%% Copyright (c) 2017-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_iolists). + +-export([split/2]). + +-ifdef(TEST). +-include_lib("proper/include/proper.hrl"). +-endif. + +-spec split(non_neg_integer(), iodata()) -> {iodata(), iodata()}. +split(N, Iolist) -> + case split(N, Iolist, []) of + {ok, Before, After} -> + {Before, After}; + {more, _, Before} -> + {lists:reverse(Before), <<>>} + end. + +split(0, Rest, Acc) -> + {ok, lists:reverse(Acc), Rest}; +split(N, [], Acc) -> + {more, N, Acc}; +split(N, Binary, Acc) when byte_size(Binary) =< N -> + {more, N - byte_size(Binary), [Binary|Acc]}; +split(N, Binary, Acc) when is_binary(Binary) -> + << Before:N/binary, After/bits >> = Binary, + {ok, lists:reverse([Before|Acc]), After}; +split(N, [Binary|Tail], Acc) when byte_size(Binary) =< N -> + split(N - byte_size(Binary), Tail, [Binary|Acc]); +split(N, [Binary|Tail], Acc) when is_binary(Binary) -> + << Before:N/binary, After/bits >> = Binary, + {ok, lists:reverse([Before|Acc]), [After|Tail]}; +split(N, [Char|Tail], Acc) when is_integer(Char) -> + split(N - 1, Tail, [Char|Acc]); +split(N, [List|Tail], Acc0) -> + case split(N, List, Acc0) of + {ok, Before, After} -> + {ok, Before, [After|Tail]}; + {more, More, Acc} -> + split(More, Tail, Acc) + end. + +-ifdef(TEST). + +split_test_() -> + Tests = [ + {10, "Hello world!", "Hello worl", "d!"}, + {10, <<"Hello world!">>, "Hello worl", "d!"}, + {10, ["He", [<<"llo">>], $\s, [["world"], <<"!">>]], "Hello worl", "d!"}, + {10, ["Hello "|<<"world!">>], "Hello worl", "d!"}, + {10, "Hello!", "Hello!", ""}, + {10, <<"Hello!">>, "Hello!", ""}, + {10, ["He", [<<"ll">>], $o, [["!"]]], "Hello!", ""}, + {10, ["Hel"|<<"lo!">>], "Hello!", ""}, + {10, [[<<>>|<<>>], [], <<"Hello world!">>], "Hello worl", "d!"}, + {10, [[<<"He">>|<<"llo">>], [$\s], <<"world!">>], "Hello worl", "d!"}, + {10, [[[]|<<"He">>], [[]|<<"llo wor">>]|<<"ld!">>], "Hello worl", "d!"} + ], + [{iolist_to_binary(V), fun() -> + {B, A} = split(N, V), + true = iolist_to_binary(RB) =:= iolist_to_binary(B), + true = iolist_to_binary(RA) =:= iolist_to_binary(A) + end} || {N, V, RB, RA} <- Tests]. + +prop_split_test() -> + ?FORALL({N, Input}, + {non_neg_integer(), iolist()}, + begin + Size = iolist_size(Input), + {Before, After} = split(N, Input), + if + N >= Size -> + ((iolist_size(After) =:= 0) + andalso iolist_to_binary(Before) =:= iolist_to_binary(Input)); + true -> + <> = iolist_to_binary(Input), + (ExpectBefore =:= iolist_to_binary(Before)) + andalso (ExpectAfter =:= iolist_to_binary(After)) + end + end). + +-endif. diff --git a/deps/cowlib/src/cow_link.erl b/deps/cowlib/src/cow_link.erl new file mode 100644 index 0000000..8320297 --- /dev/null +++ b/deps/cowlib/src/cow_link.erl @@ -0,0 +1,445 @@ +%% Copyright (c) 2019, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_link). +-compile({no_auto_import, [link/1]}). + +-export([parse_link/1]). +-export([resolve_link/2]). +-export([resolve_link/3]). +-export([link/1]). + +-include("cow_inline.hrl"). +-include("cow_parse.hrl"). + +-type link() :: #{ + target := binary(), + rel := binary(), + attributes := [{binary(), binary()}] +}. +-export_type([link/0]). + +-type resolve_opts() :: #{ + allow_anchor => boolean() +}. + +-type uri() :: uri_string:uri_map() | uri_string:uri_string() | undefined. + +%% Parse a link header. + +%% This function returns the URI target from the header directly. +%% Relative URIs must then be resolved as per RFC3986 5. In some +%% cases it might not be possible to resolve URIs, for example when +%% the link header is returned with a 404 status code. +-spec parse_link(binary()) -> [link()]. +parse_link(Link) -> + before_target(Link, []). + +before_target(<<>>, Acc) -> lists:reverse(Acc); +before_target(<<$<,R/bits>>, Acc) -> target(R, Acc, <<>>); +before_target(<>, Acc) when ?IS_WS(C) -> before_target(R, Acc). + +target(<<$>,R/bits>>, Acc, T) -> param_sep(R, Acc, T, []); +target(<>, Acc, T) -> target(R, Acc, <>). + +param_sep(<<>>, Acc, T, P) -> lists:reverse(acc_link(Acc, T, P)); +param_sep(<<$,,R/bits>>, Acc, T, P) -> before_target(R, acc_link(Acc, T, P)); +param_sep(<<$;,R/bits>>, Acc, T, P) -> before_param(R, Acc, T, P); +param_sep(<>, Acc, T, P) when ?IS_WS(C) -> param_sep(R, Acc, T, P). + +before_param(<>, Acc, T, P) when ?IS_WS(C) -> before_param(R, Acc, T, P); +before_param(<>, Acc, T, P) when ?IS_TOKEN(C) -> ?LOWER(param, R, Acc, T, P, <<>>). + +param(<<$=,$",R/bits>>, Acc, T, P, K) -> quoted(R, Acc, T, P, K, <<>>); +param(<<$=,C,R/bits>>, Acc, T, P, K) when ?IS_TOKEN(C) -> value(R, Acc, T, P, K, <>); +param(<>, Acc, T, P, K) when ?IS_TOKEN(C) -> ?LOWER(param, R, Acc, T, P, K). + +quoted(<<$",R/bits>>, Acc, T, P, K, V) -> param_sep(R, Acc, T, [{K, V}|P]); +quoted(<<$\\,C,R/bits>>, Acc, T, P, K, V) when ?IS_VCHAR_OBS(C) -> quoted(R, Acc, T, P, K, <>); +quoted(<>, Acc, T, P, K, V) when ?IS_VCHAR_OBS(C) -> quoted(R, Acc, T, P, K, <>). + +value(<>, Acc, T, P, K, V) when ?IS_TOKEN(C) -> value(R, Acc, T, P, K, <>); +value(R, Acc, T, P, K, V) -> param_sep(R, Acc, T, [{K, V}|P]). + +acc_link(Acc, Target, Params0) -> + Params1 = lists:reverse(Params0), + %% The rel parameter MUST be present. (RFC8288 3.3) + {value, {_, Rel}, Params2} = lists:keytake(<<"rel">>, 1, Params1), + %% Occurrences after the first MUST be ignored by parsers. + Params = filter_out_duplicates(Params2, #{}), + [#{ + target => Target, + rel => ?LOWER(Rel), + attributes => Params + }|Acc]. + +%% This function removes duplicates for attributes that don't allow them. +filter_out_duplicates([], _) -> + []; +%% The "rel" is mandatory and was already removed from params. +filter_out_duplicates([{<<"rel">>, _}|Tail], State) -> + filter_out_duplicates(Tail, State); +filter_out_duplicates([{<<"anchor">>, _}|Tail], State=#{anchor := true}) -> + filter_out_duplicates(Tail, State); +filter_out_duplicates([{<<"media">>, _}|Tail], State=#{media := true}) -> + filter_out_duplicates(Tail, State); +filter_out_duplicates([{<<"title">>, _}|Tail], State=#{title := true}) -> + filter_out_duplicates(Tail, State); +filter_out_duplicates([{<<"title*">>, _}|Tail], State=#{title_star := true}) -> + filter_out_duplicates(Tail, State); +filter_out_duplicates([{<<"type">>, _}|Tail], State=#{type := true}) -> + filter_out_duplicates(Tail, State); +filter_out_duplicates([Tuple={<<"anchor">>, _}|Tail], State) -> + [Tuple|filter_out_duplicates(Tail, State#{anchor => true})]; +filter_out_duplicates([Tuple={<<"media">>, _}|Tail], State) -> + [Tuple|filter_out_duplicates(Tail, State#{media => true})]; +filter_out_duplicates([Tuple={<<"title">>, _}|Tail], State) -> + [Tuple|filter_out_duplicates(Tail, State#{title => true})]; +filter_out_duplicates([Tuple={<<"title*">>, _}|Tail], State) -> + [Tuple|filter_out_duplicates(Tail, State#{title_star => true})]; +filter_out_duplicates([Tuple={<<"type">>, _}|Tail], State) -> + [Tuple|filter_out_duplicates(Tail, State#{type => true})]; +filter_out_duplicates([Tuple|Tail], State) -> + [Tuple|filter_out_duplicates(Tail, State)]. + +-ifdef(TEST). +parse_link_test_() -> + Tests = [ + {<<>>, []}, + {<<" ">>, []}, + %% Examples from the RFC. + {<<"; rel=\"previous\"; title=\"previous chapter\"">>, [ + #{ + target => <<"http://example.com/TheBook/chapter2">>, + rel => <<"previous">>, + attributes => [ + {<<"title">>, <<"previous chapter">>} + ] + } + ]}, + {<<"; rel=\"http://example.net/foo\"">>, [ + #{ + target => <<"/">>, + rel => <<"http://example.net/foo">>, + attributes => [] + } + ]}, + {<<"; rel=\"copyright\"; anchor=\"#foo\"">>, [ + #{ + target => <<"/terms">>, + rel => <<"copyright">>, + attributes => [ + {<<"anchor">>, <<"#foo">>} + ] + } + ]}, +% {<<"; rel=\"previous\"; title*=UTF-8'de'letztes%20Kapitel, " +% "; rel=\"next\"; title*=UTF-8'de'n%c3%a4chstes%20Kapitel">>, [ +% %% @todo +% ]} + {<<"; rel=\"start http://example.net/relation/other\"">>, [ + #{ + target => <<"http://example.org/">>, + rel => <<"start http://example.net/relation/other">>, + attributes => [] + } + ]}, + {<<"; rel=\"start\", " + "; rel=\"index\"">>, [ + #{ + target => <<"https://example.org/">>, + rel => <<"start">>, + attributes => [] + }, + #{ + target => <<"https://example.org/index">>, + rel => <<"index">>, + attributes => [] + } + ]}, + %% Relation types are case insensitive. + {<<"; rel=\"SELF\"">>, [ + #{ + target => <<"/">>, + rel => <<"self">>, + attributes => [] + } + ]}, + {<<"; rel=\"HTTP://EXAMPLE.NET/FOO\"">>, [ + #{ + target => <<"/">>, + rel => <<"http://example.net/foo">>, + attributes => [] + } + ]}, + %% Attribute names are case insensitive. + {<<"; REL=\"copyright\"; ANCHOR=\"#foo\"">>, [ + #{ + target => <<"/terms">>, + rel => <<"copyright">>, + attributes => [ + {<<"anchor">>, <<"#foo">>} + ] + } + ]} + ], + [{V, fun() -> R = parse_link(V) end} || {V, R} <- Tests]. +-endif. + +%% Resolve a link based on the context URI and options. + +-spec resolve_link(Link, uri()) -> Link | false when Link::link(). +resolve_link(Link, ContextURI) -> + resolve_link(Link, ContextURI, #{}). + +-spec resolve_link(Link, uri(), resolve_opts()) -> Link | false when Link::link(). +%% When we do not have a context URI we only succeed when the target URI is absolute. +%% The target URI will only be normalized in that case. +resolve_link(Link=#{target := TargetURI}, undefined, _) -> + case uri_string:parse(TargetURI) of + URIMap = #{scheme := _} -> + Link#{target => uri_string:normalize(URIMap)}; + _ -> + false + end; +resolve_link(Link=#{attributes := Params}, ContextURI, Opts) -> + AllowAnchor = maps:get(allow_anchor, Opts, true), + case lists:keyfind(<<"anchor">>, 1, Params) of + false -> + do_resolve_link(Link, ContextURI); + {_, Anchor} when AllowAnchor -> + do_resolve_link(Link, resolve(Anchor, ContextURI)); + _ -> + false + end. + +do_resolve_link(Link=#{target := TargetURI}, ContextURI) -> + Link#{target => uri_string:recompose(resolve(TargetURI, ContextURI))}. + +-ifdef(TEST). +resolve_link_test_() -> + Tests = [ + %% No context URI available. + {#{target => <<"http://a/b/./c">>}, undefined, #{}, + #{target => <<"http://a/b/c">>}}, + {#{target => <<"a/b/./c">>}, undefined, #{}, + false}, + %% Context URI available, allow_anchor => true. + {#{target => <<"http://a/b">>, attributes => []}, <<"http://a/c">>, #{}, + #{target => <<"http://a/b">>, attributes => []}}, + {#{target => <<"b">>, attributes => []}, <<"http://a/c">>, #{}, + #{target => <<"http://a/b">>, attributes => []}}, + {#{target => <<"b">>, attributes => [{<<"anchor">>, <<"#frag">>}]}, <<"http://a/c">>, #{}, + #{target => <<"http://a/b">>, attributes => [{<<"anchor">>, <<"#frag">>}]}}, + {#{target => <<"b">>, attributes => [{<<"anchor">>, <<"d/e">>}]}, <<"http://a/c">>, #{}, + #{target => <<"http://a/d/b">>, attributes => [{<<"anchor">>, <<"d/e">>}]}}, + %% Context URI available, allow_anchor => false. + {#{target => <<"http://a/b">>, attributes => []}, <<"http://a/c">>, #{allow_anchor => false}, + #{target => <<"http://a/b">>, attributes => []}}, + {#{target => <<"b">>, attributes => []}, <<"http://a/c">>, #{allow_anchor => false}, + #{target => <<"http://a/b">>, attributes => []}}, + {#{target => <<"b">>, attributes => [{<<"anchor">>, <<"#frag">>}]}, + <<"http://a/c">>, #{allow_anchor => false}, false}, + {#{target => <<"b">>, attributes => [{<<"anchor">>, <<"d/e">>}]}, + <<"http://a/c">>, #{allow_anchor => false}, false} + ], + [{iolist_to_binary(io_lib:format("~0p", [L])), + fun() -> R = resolve_link(L, C, O) end} || {L, C, O, R} <- Tests]. +-endif. + +%% @todo This function has been added to Erlang/OTP 22.3 as uri_string:resolve/2,3. +resolve(URI, BaseURI) -> + case resolve1(ensure_map_uri(URI), BaseURI) of + TargetURI = #{path := Path0} -> + %% We remove dot segments. Normalizing the entire URI + %% will sometimes add an extra slash we don't want. + #{path := Path} = uri_string:normalize(#{path => Path0}, [return_map]), + TargetURI#{path => Path}; + TargetURI -> + TargetURI + end. + +resolve1(URI=#{scheme := _}, _) -> + URI; +resolve1(URI=#{host := _}, BaseURI) -> + #{scheme := Scheme} = ensure_map_uri(BaseURI), + URI#{scheme => Scheme}; +resolve1(URI=#{path := <<>>}, BaseURI0) -> + BaseURI = ensure_map_uri(BaseURI0), + Keys = case maps:is_key(query, URI) of + true -> [scheme, host, port, path]; + false -> [scheme, host, port, path, query] + end, + maps:merge(URI, maps:with(Keys, BaseURI)); +resolve1(URI=#{path := <<"/",_/bits>>}, BaseURI0) -> + BaseURI = ensure_map_uri(BaseURI0), + maps:merge(URI, maps:with([scheme, host, port], BaseURI)); +resolve1(URI=#{path := Path}, BaseURI0) -> + BaseURI = ensure_map_uri(BaseURI0), + maps:merge( + URI#{path := merge_paths(Path, BaseURI)}, + maps:with([scheme, host, port], BaseURI)). + +merge_paths(Path, #{host := _, path := <<>>}) -> + <<$/, Path/binary>>; +merge_paths(Path, #{path := BasePath0}) -> + case string:split(BasePath0, <<$/>>, trailing) of + [BasePath, _] -> <>; + [_] -> <<$/, Path/binary>> + end. + +ensure_map_uri(URI) when is_map(URI) -> URI; +ensure_map_uri(URI) -> uri_string:parse(iolist_to_binary(URI)). + +-ifdef(TEST). +resolve_test_() -> + Tests = [ + %% 5.4.1. Normal Examples + {<<"g:h">>, <<"g:h">>}, + {<<"g">>, <<"http://a/b/c/g">>}, + {<<"./g">>, <<"http://a/b/c/g">>}, + {<<"g/">>, <<"http://a/b/c/g/">>}, + {<<"/g">>, <<"http://a/g">>}, + {<<"//g">>, <<"http://g">>}, + {<<"?y">>, <<"http://a/b/c/d;p?y">>}, + {<<"g?y">>, <<"http://a/b/c/g?y">>}, + {<<"#s">>, <<"http://a/b/c/d;p?q#s">>}, + {<<"g#s">>, <<"http://a/b/c/g#s">>}, + {<<"g?y#s">>, <<"http://a/b/c/g?y#s">>}, + {<<";x">>, <<"http://a/b/c/;x">>}, + {<<"g;x">>, <<"http://a/b/c/g;x">>}, + {<<"g;x?y#s">>, <<"http://a/b/c/g;x?y#s">>}, + {<<"">>, <<"http://a/b/c/d;p?q">>}, + {<<".">>, <<"http://a/b/c/">>}, + {<<"./">>, <<"http://a/b/c/">>}, + {<<"..">>, <<"http://a/b/">>}, + {<<"../">>, <<"http://a/b/">>}, + {<<"../g">>, <<"http://a/b/g">>}, + {<<"../..">>, <<"http://a/">>}, + {<<"../../">>, <<"http://a/">>}, + {<<"../../g">>, <<"http://a/g">>}, + %% 5.4.2. Abnormal Examples + {<<"../../../g">>, <<"http://a/g">>}, + {<<"../../../../g">>, <<"http://a/g">>}, + {<<"/./g">>, <<"http://a/g">>}, + {<<"/../g">>, <<"http://a/g">>}, + {<<"g.">>, <<"http://a/b/c/g.">>}, + {<<".g">>, <<"http://a/b/c/.g">>}, + {<<"g..">>, <<"http://a/b/c/g..">>}, + {<<"..g">>, <<"http://a/b/c/..g">>}, + {<<"./../g">>, <<"http://a/b/g">>}, + {<<"./g/.">>, <<"http://a/b/c/g/">>}, + {<<"g/./h">>, <<"http://a/b/c/g/h">>}, + {<<"g/../h">>, <<"http://a/b/c/h">>}, + {<<"g;x=1/./y">>, <<"http://a/b/c/g;x=1/y">>}, + {<<"g;x=1/../y">>, <<"http://a/b/c/y">>}, + {<<"g?y/./x">>, <<"http://a/b/c/g?y/./x">>}, + {<<"g?y/../x">>, <<"http://a/b/c/g?y/../x">>}, + {<<"g#s/./x">>, <<"http://a/b/c/g#s/./x">>}, + {<<"g#s/../x">>, <<"http://a/b/c/g#s/../x">>}, + {<<"http:g">>, <<"http:g">>} %% for strict parsers + ], + [{V, fun() -> R = uri_string:recompose(resolve(V, <<"http://a/b/c/d;p?q">>)) end} || {V, R} <- Tests]. +-endif. + +%% Build a link header. + +-spec link([#{ + target := binary(), + rel := binary(), + attributes := [{binary(), binary()}] +}]) -> iodata(). +link(Links) -> + lists:join(<<", ">>, [do_link(Link) || Link <- Links]). + +do_link(#{target := TargetURI, rel := Rel, attributes := Params}) -> + [ + $<, TargetURI, <<">" + "; rel=\"">>, Rel, $", + [[<<"; ">>, Key, <<"=\"">>, escape(iolist_to_binary(Value), <<>>), $"] + || {Key, Value} <- Params] + ]. + +escape(<<>>, Acc) -> Acc; +escape(<<$\\,R/bits>>, Acc) -> escape(R, <>); +escape(<<$\",R/bits>>, Acc) -> escape(R, <>); +escape(<>, Acc) -> escape(R, <>). + +-ifdef(TEST). +link_test_() -> + Tests = [ + {<<>>, []}, + %% Examples from the RFC. + {<<"; rel=\"previous\"; title=\"previous chapter\"">>, [ + #{ + target => <<"http://example.com/TheBook/chapter2">>, + rel => <<"previous">>, + attributes => [ + {<<"title">>, <<"previous chapter">>} + ] + } + ]}, + {<<"; rel=\"http://example.net/foo\"">>, [ + #{ + target => <<"/">>, + rel => <<"http://example.net/foo">>, + attributes => [] + } + ]}, + {<<"; rel=\"copyright\"; anchor=\"#foo\"">>, [ + #{ + target => <<"/terms">>, + rel => <<"copyright">>, + attributes => [ + {<<"anchor">>, <<"#foo">>} + ] + } + ]}, +% {<<"; rel=\"previous\"; title*=UTF-8'de'letztes%20Kapitel, " +% "; rel=\"next\"; title*=UTF-8'de'n%c3%a4chstes%20Kapitel">>, [ +% %% @todo +% ]} + {<<"; rel=\"start http://example.net/relation/other\"">>, [ + #{ + target => <<"http://example.org/">>, + rel => <<"start http://example.net/relation/other">>, + attributes => [] + } + ]}, + {<<"; rel=\"start\", " + "; rel=\"index\"">>, [ + #{ + target => <<"https://example.org/">>, + rel => <<"start">>, + attributes => [] + }, + #{ + target => <<"https://example.org/index">>, + rel => <<"index">>, + attributes => [] + } + ]}, + {<<"; rel=\"previous\"; quoted=\"name=\\\"value\\\"\"">>, [ + #{ + target => <<"/">>, + rel => <<"previous">>, + attributes => [ + {<<"quoted">>, <<"name=\"value\"">>} + ] + } + ]} + ], + [{iolist_to_binary(io_lib:format("~0p", [V])), + fun() -> R = iolist_to_binary(link(V)) end} || {R, V} <- Tests]. +-endif. diff --git a/deps/cowlib/src/cow_mimetypes.erl b/deps/cowlib/src/cow_mimetypes.erl new file mode 100644 index 0000000..07fc69f --- /dev/null +++ b/deps/cowlib/src/cow_mimetypes.erl @@ -0,0 +1,1045 @@ +%% Copyright (c) 2013-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_mimetypes). + +-export([all/1]). +-export([web/1]). + +%% @doc Return the mimetype for any file by looking at its extension. + +-spec all(binary()) -> {binary(), binary(), []}. +all(Path) -> + case filename:extension(Path) of + <<>> -> {<<"application">>, <<"octet-stream">>, []}; + %% @todo Convert to string:lowercase on OTP-20+. + << $., Ext/binary >> -> all_ext(list_to_binary(string:to_lower(binary_to_list(Ext)))) + end. + +%% @doc Return the mimetype for a Web related file by looking at its extension. + +-spec web(binary()) -> {binary(), binary(), []}. +web(Path) -> + case filename:extension(Path) of + <<>> -> {<<"application">>, <<"octet-stream">>, []}; + %% @todo Convert to string:lowercase on OTP-20+. + << $., Ext/binary >> -> web_ext(list_to_binary(string:to_lower(binary_to_list(Ext)))) + end. + +%% Internal. + +%% GENERATED +all_ext(<<"123">>) -> {<<"application">>, <<"vnd.lotus-1-2-3">>, []}; +all_ext(<<"3dml">>) -> {<<"text">>, <<"vnd.in3d.3dml">>, []}; +all_ext(<<"3ds">>) -> {<<"image">>, <<"x-3ds">>, []}; +all_ext(<<"3g2">>) -> {<<"video">>, <<"3gpp2">>, []}; +all_ext(<<"3gp">>) -> {<<"video">>, <<"3gpp">>, []}; +all_ext(<<"7z">>) -> {<<"application">>, <<"x-7z-compressed">>, []}; +all_ext(<<"aab">>) -> {<<"application">>, <<"x-authorware-bin">>, []}; +all_ext(<<"aac">>) -> {<<"audio">>, <<"x-aac">>, []}; +all_ext(<<"aam">>) -> {<<"application">>, <<"x-authorware-map">>, []}; +all_ext(<<"aas">>) -> {<<"application">>, <<"x-authorware-seg">>, []}; +all_ext(<<"abw">>) -> {<<"application">>, <<"x-abiword">>, []}; +all_ext(<<"ac">>) -> {<<"application">>, <<"pkix-attr-cert">>, []}; +all_ext(<<"acc">>) -> {<<"application">>, <<"vnd.americandynamics.acc">>, []}; +all_ext(<<"ace">>) -> {<<"application">>, <<"x-ace-compressed">>, []}; +all_ext(<<"acu">>) -> {<<"application">>, <<"vnd.acucobol">>, []}; +all_ext(<<"acutc">>) -> {<<"application">>, <<"vnd.acucorp">>, []}; +all_ext(<<"adp">>) -> {<<"audio">>, <<"adpcm">>, []}; +all_ext(<<"aep">>) -> {<<"application">>, <<"vnd.audiograph">>, []}; +all_ext(<<"afm">>) -> {<<"application">>, <<"x-font-type1">>, []}; +all_ext(<<"afp">>) -> {<<"application">>, <<"vnd.ibm.modcap">>, []}; +all_ext(<<"ahead">>) -> {<<"application">>, <<"vnd.ahead.space">>, []}; +all_ext(<<"ai">>) -> {<<"application">>, <<"postscript">>, []}; +all_ext(<<"aif">>) -> {<<"audio">>, <<"x-aiff">>, []}; +all_ext(<<"aifc">>) -> {<<"audio">>, <<"x-aiff">>, []}; +all_ext(<<"aiff">>) -> {<<"audio">>, <<"x-aiff">>, []}; +all_ext(<<"air">>) -> {<<"application">>, <<"vnd.adobe.air-application-installer-package+zip">>, []}; +all_ext(<<"ait">>) -> {<<"application">>, <<"vnd.dvb.ait">>, []}; +all_ext(<<"ami">>) -> {<<"application">>, <<"vnd.amiga.ami">>, []}; +all_ext(<<"apk">>) -> {<<"application">>, <<"vnd.android.package-archive">>, []}; +all_ext(<<"appcache">>) -> {<<"text">>, <<"cache-manifest">>, []}; +all_ext(<<"application">>) -> {<<"application">>, <<"x-ms-application">>, []}; +all_ext(<<"apr">>) -> {<<"application">>, <<"vnd.lotus-approach">>, []}; +all_ext(<<"arc">>) -> {<<"application">>, <<"x-freearc">>, []}; +all_ext(<<"asc">>) -> {<<"application">>, <<"pgp-signature">>, []}; +all_ext(<<"asf">>) -> {<<"video">>, <<"x-ms-asf">>, []}; +all_ext(<<"asm">>) -> {<<"text">>, <<"x-asm">>, []}; +all_ext(<<"aso">>) -> {<<"application">>, <<"vnd.accpac.simply.aso">>, []}; +all_ext(<<"asx">>) -> {<<"video">>, <<"x-ms-asf">>, []}; +all_ext(<<"atc">>) -> {<<"application">>, <<"vnd.acucorp">>, []}; +all_ext(<<"atom">>) -> {<<"application">>, <<"atom+xml">>, []}; +all_ext(<<"atomcat">>) -> {<<"application">>, <<"atomcat+xml">>, []}; +all_ext(<<"atomsvc">>) -> {<<"application">>, <<"atomsvc+xml">>, []}; +all_ext(<<"atx">>) -> {<<"application">>, <<"vnd.antix.game-component">>, []}; +all_ext(<<"au">>) -> {<<"audio">>, <<"basic">>, []}; +all_ext(<<"avi">>) -> {<<"video">>, <<"x-msvideo">>, []}; +all_ext(<<"aw">>) -> {<<"application">>, <<"applixware">>, []}; +all_ext(<<"azf">>) -> {<<"application">>, <<"vnd.airzip.filesecure.azf">>, []}; +all_ext(<<"azs">>) -> {<<"application">>, <<"vnd.airzip.filesecure.azs">>, []}; +all_ext(<<"azw">>) -> {<<"application">>, <<"vnd.amazon.ebook">>, []}; +all_ext(<<"bat">>) -> {<<"application">>, <<"x-msdownload">>, []}; +all_ext(<<"bcpio">>) -> {<<"application">>, <<"x-bcpio">>, []}; +all_ext(<<"bdf">>) -> {<<"application">>, <<"x-font-bdf">>, []}; +all_ext(<<"bdm">>) -> {<<"application">>, <<"vnd.syncml.dm+wbxml">>, []}; +all_ext(<<"bed">>) -> {<<"application">>, <<"vnd.realvnc.bed">>, []}; +all_ext(<<"bh2">>) -> {<<"application">>, <<"vnd.fujitsu.oasysprs">>, []}; +all_ext(<<"bin">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"blb">>) -> {<<"application">>, <<"x-blorb">>, []}; +all_ext(<<"blorb">>) -> {<<"application">>, <<"x-blorb">>, []}; +all_ext(<<"bmi">>) -> {<<"application">>, <<"vnd.bmi">>, []}; +all_ext(<<"bmp">>) -> {<<"image">>, <<"bmp">>, []}; +all_ext(<<"book">>) -> {<<"application">>, <<"vnd.framemaker">>, []}; +all_ext(<<"box">>) -> {<<"application">>, <<"vnd.previewsystems.box">>, []}; +all_ext(<<"boz">>) -> {<<"application">>, <<"x-bzip2">>, []}; +all_ext(<<"bpk">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"btif">>) -> {<<"image">>, <<"prs.btif">>, []}; +all_ext(<<"bz2">>) -> {<<"application">>, <<"x-bzip2">>, []}; +all_ext(<<"bz">>) -> {<<"application">>, <<"x-bzip">>, []}; +all_ext(<<"c11amc">>) -> {<<"application">>, <<"vnd.cluetrust.cartomobile-config">>, []}; +all_ext(<<"c11amz">>) -> {<<"application">>, <<"vnd.cluetrust.cartomobile-config-pkg">>, []}; +all_ext(<<"c4d">>) -> {<<"application">>, <<"vnd.clonk.c4group">>, []}; +all_ext(<<"c4f">>) -> {<<"application">>, <<"vnd.clonk.c4group">>, []}; +all_ext(<<"c4g">>) -> {<<"application">>, <<"vnd.clonk.c4group">>, []}; +all_ext(<<"c4p">>) -> {<<"application">>, <<"vnd.clonk.c4group">>, []}; +all_ext(<<"c4u">>) -> {<<"application">>, <<"vnd.clonk.c4group">>, []}; +all_ext(<<"cab">>) -> {<<"application">>, <<"vnd.ms-cab-compressed">>, []}; +all_ext(<<"caf">>) -> {<<"audio">>, <<"x-caf">>, []}; +all_ext(<<"cap">>) -> {<<"application">>, <<"vnd.tcpdump.pcap">>, []}; +all_ext(<<"car">>) -> {<<"application">>, <<"vnd.curl.car">>, []}; +all_ext(<<"cat">>) -> {<<"application">>, <<"vnd.ms-pki.seccat">>, []}; +all_ext(<<"cb7">>) -> {<<"application">>, <<"x-cbr">>, []}; +all_ext(<<"cba">>) -> {<<"application">>, <<"x-cbr">>, []}; +all_ext(<<"cbr">>) -> {<<"application">>, <<"x-cbr">>, []}; +all_ext(<<"cbt">>) -> {<<"application">>, <<"x-cbr">>, []}; +all_ext(<<"cbz">>) -> {<<"application">>, <<"x-cbr">>, []}; +all_ext(<<"cct">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"cc">>) -> {<<"text">>, <<"x-c">>, []}; +all_ext(<<"ccxml">>) -> {<<"application">>, <<"ccxml+xml">>, []}; +all_ext(<<"cdbcmsg">>) -> {<<"application">>, <<"vnd.contact.cmsg">>, []}; +all_ext(<<"cdf">>) -> {<<"application">>, <<"x-netcdf">>, []}; +all_ext(<<"cdkey">>) -> {<<"application">>, <<"vnd.mediastation.cdkey">>, []}; +all_ext(<<"cdmia">>) -> {<<"application">>, <<"cdmi-capability">>, []}; +all_ext(<<"cdmic">>) -> {<<"application">>, <<"cdmi-container">>, []}; +all_ext(<<"cdmid">>) -> {<<"application">>, <<"cdmi-domain">>, []}; +all_ext(<<"cdmio">>) -> {<<"application">>, <<"cdmi-object">>, []}; +all_ext(<<"cdmiq">>) -> {<<"application">>, <<"cdmi-queue">>, []}; +all_ext(<<"cdx">>) -> {<<"chemical">>, <<"x-cdx">>, []}; +all_ext(<<"cdxml">>) -> {<<"application">>, <<"vnd.chemdraw+xml">>, []}; +all_ext(<<"cdy">>) -> {<<"application">>, <<"vnd.cinderella">>, []}; +all_ext(<<"cer">>) -> {<<"application">>, <<"pkix-cert">>, []}; +all_ext(<<"cfs">>) -> {<<"application">>, <<"x-cfs-compressed">>, []}; +all_ext(<<"cgm">>) -> {<<"image">>, <<"cgm">>, []}; +all_ext(<<"chat">>) -> {<<"application">>, <<"x-chat">>, []}; +all_ext(<<"chm">>) -> {<<"application">>, <<"vnd.ms-htmlhelp">>, []}; +all_ext(<<"chrt">>) -> {<<"application">>, <<"vnd.kde.kchart">>, []}; +all_ext(<<"cif">>) -> {<<"chemical">>, <<"x-cif">>, []}; +all_ext(<<"cii">>) -> {<<"application">>, <<"vnd.anser-web-certificate-issue-initiation">>, []}; +all_ext(<<"cil">>) -> {<<"application">>, <<"vnd.ms-artgalry">>, []}; +all_ext(<<"cla">>) -> {<<"application">>, <<"vnd.claymore">>, []}; +all_ext(<<"class">>) -> {<<"application">>, <<"java-vm">>, []}; +all_ext(<<"clkk">>) -> {<<"application">>, <<"vnd.crick.clicker.keyboard">>, []}; +all_ext(<<"clkp">>) -> {<<"application">>, <<"vnd.crick.clicker.palette">>, []}; +all_ext(<<"clkt">>) -> {<<"application">>, <<"vnd.crick.clicker.template">>, []}; +all_ext(<<"clkw">>) -> {<<"application">>, <<"vnd.crick.clicker.wordbank">>, []}; +all_ext(<<"clkx">>) -> {<<"application">>, <<"vnd.crick.clicker">>, []}; +all_ext(<<"clp">>) -> {<<"application">>, <<"x-msclip">>, []}; +all_ext(<<"cmc">>) -> {<<"application">>, <<"vnd.cosmocaller">>, []}; +all_ext(<<"cmdf">>) -> {<<"chemical">>, <<"x-cmdf">>, []}; +all_ext(<<"cml">>) -> {<<"chemical">>, <<"x-cml">>, []}; +all_ext(<<"cmp">>) -> {<<"application">>, <<"vnd.yellowriver-custom-menu">>, []}; +all_ext(<<"cmx">>) -> {<<"image">>, <<"x-cmx">>, []}; +all_ext(<<"cod">>) -> {<<"application">>, <<"vnd.rim.cod">>, []}; +all_ext(<<"com">>) -> {<<"application">>, <<"x-msdownload">>, []}; +all_ext(<<"conf">>) -> {<<"text">>, <<"plain">>, []}; +all_ext(<<"cpio">>) -> {<<"application">>, <<"x-cpio">>, []}; +all_ext(<<"cpp">>) -> {<<"text">>, <<"x-c">>, []}; +all_ext(<<"cpt">>) -> {<<"application">>, <<"mac-compactpro">>, []}; +all_ext(<<"crd">>) -> {<<"application">>, <<"x-mscardfile">>, []}; +all_ext(<<"crl">>) -> {<<"application">>, <<"pkix-crl">>, []}; +all_ext(<<"crt">>) -> {<<"application">>, <<"x-x509-ca-cert">>, []}; +all_ext(<<"cryptonote">>) -> {<<"application">>, <<"vnd.rig.cryptonote">>, []}; +all_ext(<<"csh">>) -> {<<"application">>, <<"x-csh">>, []}; +all_ext(<<"csml">>) -> {<<"chemical">>, <<"x-csml">>, []}; +all_ext(<<"csp">>) -> {<<"application">>, <<"vnd.commonspace">>, []}; +all_ext(<<"css">>) -> {<<"text">>, <<"css">>, []}; +all_ext(<<"cst">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"csv">>) -> {<<"text">>, <<"csv">>, []}; +all_ext(<<"c">>) -> {<<"text">>, <<"x-c">>, []}; +all_ext(<<"cu">>) -> {<<"application">>, <<"cu-seeme">>, []}; +all_ext(<<"curl">>) -> {<<"text">>, <<"vnd.curl">>, []}; +all_ext(<<"cww">>) -> {<<"application">>, <<"prs.cww">>, []}; +all_ext(<<"cxt">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"cxx">>) -> {<<"text">>, <<"x-c">>, []}; +all_ext(<<"dae">>) -> {<<"model">>, <<"vnd.collada+xml">>, []}; +all_ext(<<"daf">>) -> {<<"application">>, <<"vnd.mobius.daf">>, []}; +all_ext(<<"dart">>) -> {<<"application">>, <<"vnd.dart">>, []}; +all_ext(<<"dataless">>) -> {<<"application">>, <<"vnd.fdsn.seed">>, []}; +all_ext(<<"davmount">>) -> {<<"application">>, <<"davmount+xml">>, []}; +all_ext(<<"dbk">>) -> {<<"application">>, <<"docbook+xml">>, []}; +all_ext(<<"dcr">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"dcurl">>) -> {<<"text">>, <<"vnd.curl.dcurl">>, []}; +all_ext(<<"dd2">>) -> {<<"application">>, <<"vnd.oma.dd2+xml">>, []}; +all_ext(<<"ddd">>) -> {<<"application">>, <<"vnd.fujixerox.ddd">>, []}; +all_ext(<<"deb">>) -> {<<"application">>, <<"x-debian-package">>, []}; +all_ext(<<"def">>) -> {<<"text">>, <<"plain">>, []}; +all_ext(<<"deploy">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"der">>) -> {<<"application">>, <<"x-x509-ca-cert">>, []}; +all_ext(<<"dfac">>) -> {<<"application">>, <<"vnd.dreamfactory">>, []}; +all_ext(<<"dgc">>) -> {<<"application">>, <<"x-dgc-compressed">>, []}; +all_ext(<<"dic">>) -> {<<"text">>, <<"x-c">>, []}; +all_ext(<<"dir">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"dis">>) -> {<<"application">>, <<"vnd.mobius.dis">>, []}; +all_ext(<<"dist">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"distz">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"djv">>) -> {<<"image">>, <<"vnd.djvu">>, []}; +all_ext(<<"djvu">>) -> {<<"image">>, <<"vnd.djvu">>, []}; +all_ext(<<"dll">>) -> {<<"application">>, <<"x-msdownload">>, []}; +all_ext(<<"dmg">>) -> {<<"application">>, <<"x-apple-diskimage">>, []}; +all_ext(<<"dmp">>) -> {<<"application">>, <<"vnd.tcpdump.pcap">>, []}; +all_ext(<<"dms">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"dna">>) -> {<<"application">>, <<"vnd.dna">>, []}; +all_ext(<<"doc">>) -> {<<"application">>, <<"msword">>, []}; +all_ext(<<"docm">>) -> {<<"application">>, <<"vnd.ms-word.document.macroenabled.12">>, []}; +all_ext(<<"docx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.wordprocessingml.document">>, []}; +all_ext(<<"dot">>) -> {<<"application">>, <<"msword">>, []}; +all_ext(<<"dotm">>) -> {<<"application">>, <<"vnd.ms-word.template.macroenabled.12">>, []}; +all_ext(<<"dotx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.wordprocessingml.template">>, []}; +all_ext(<<"dp">>) -> {<<"application">>, <<"vnd.osgi.dp">>, []}; +all_ext(<<"dpg">>) -> {<<"application">>, <<"vnd.dpgraph">>, []}; +all_ext(<<"dra">>) -> {<<"audio">>, <<"vnd.dra">>, []}; +all_ext(<<"dsc">>) -> {<<"text">>, <<"prs.lines.tag">>, []}; +all_ext(<<"dssc">>) -> {<<"application">>, <<"dssc+der">>, []}; +all_ext(<<"dtb">>) -> {<<"application">>, <<"x-dtbook+xml">>, []}; +all_ext(<<"dtd">>) -> {<<"application">>, <<"xml-dtd">>, []}; +all_ext(<<"dts">>) -> {<<"audio">>, <<"vnd.dts">>, []}; +all_ext(<<"dtshd">>) -> {<<"audio">>, <<"vnd.dts.hd">>, []}; +all_ext(<<"dump">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"dvb">>) -> {<<"video">>, <<"vnd.dvb.file">>, []}; +all_ext(<<"dvi">>) -> {<<"application">>, <<"x-dvi">>, []}; +all_ext(<<"dwf">>) -> {<<"model">>, <<"vnd.dwf">>, []}; +all_ext(<<"dwg">>) -> {<<"image">>, <<"vnd.dwg">>, []}; +all_ext(<<"dxf">>) -> {<<"image">>, <<"vnd.dxf">>, []}; +all_ext(<<"dxp">>) -> {<<"application">>, <<"vnd.spotfire.dxp">>, []}; +all_ext(<<"dxr">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"ecelp4800">>) -> {<<"audio">>, <<"vnd.nuera.ecelp4800">>, []}; +all_ext(<<"ecelp7470">>) -> {<<"audio">>, <<"vnd.nuera.ecelp7470">>, []}; +all_ext(<<"ecelp9600">>) -> {<<"audio">>, <<"vnd.nuera.ecelp9600">>, []}; +all_ext(<<"ecma">>) -> {<<"application">>, <<"ecmascript">>, []}; +all_ext(<<"edm">>) -> {<<"application">>, <<"vnd.novadigm.edm">>, []}; +all_ext(<<"edx">>) -> {<<"application">>, <<"vnd.novadigm.edx">>, []}; +all_ext(<<"efif">>) -> {<<"application">>, <<"vnd.picsel">>, []}; +all_ext(<<"ei6">>) -> {<<"application">>, <<"vnd.pg.osasli">>, []}; +all_ext(<<"elc">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"emf">>) -> {<<"application">>, <<"x-msmetafile">>, []}; +all_ext(<<"eml">>) -> {<<"message">>, <<"rfc822">>, []}; +all_ext(<<"emma">>) -> {<<"application">>, <<"emma+xml">>, []}; +all_ext(<<"emz">>) -> {<<"application">>, <<"x-msmetafile">>, []}; +all_ext(<<"eol">>) -> {<<"audio">>, <<"vnd.digital-winds">>, []}; +all_ext(<<"eot">>) -> {<<"application">>, <<"vnd.ms-fontobject">>, []}; +all_ext(<<"eps">>) -> {<<"application">>, <<"postscript">>, []}; +all_ext(<<"epub">>) -> {<<"application">>, <<"epub+zip">>, []}; +all_ext(<<"es3">>) -> {<<"application">>, <<"vnd.eszigno3+xml">>, []}; +all_ext(<<"esa">>) -> {<<"application">>, <<"vnd.osgi.subsystem">>, []}; +all_ext(<<"esf">>) -> {<<"application">>, <<"vnd.epson.esf">>, []}; +all_ext(<<"et3">>) -> {<<"application">>, <<"vnd.eszigno3+xml">>, []}; +all_ext(<<"etx">>) -> {<<"text">>, <<"x-setext">>, []}; +all_ext(<<"eva">>) -> {<<"application">>, <<"x-eva">>, []}; +all_ext(<<"evy">>) -> {<<"application">>, <<"x-envoy">>, []}; +all_ext(<<"exe">>) -> {<<"application">>, <<"x-msdownload">>, []}; +all_ext(<<"exi">>) -> {<<"application">>, <<"exi">>, []}; +all_ext(<<"ext">>) -> {<<"application">>, <<"vnd.novadigm.ext">>, []}; +all_ext(<<"ez2">>) -> {<<"application">>, <<"vnd.ezpix-album">>, []}; +all_ext(<<"ez3">>) -> {<<"application">>, <<"vnd.ezpix-package">>, []}; +all_ext(<<"ez">>) -> {<<"application">>, <<"andrew-inset">>, []}; +all_ext(<<"f4v">>) -> {<<"video">>, <<"x-f4v">>, []}; +all_ext(<<"f77">>) -> {<<"text">>, <<"x-fortran">>, []}; +all_ext(<<"f90">>) -> {<<"text">>, <<"x-fortran">>, []}; +all_ext(<<"fbs">>) -> {<<"image">>, <<"vnd.fastbidsheet">>, []}; +all_ext(<<"fcdt">>) -> {<<"application">>, <<"vnd.adobe.formscentral.fcdt">>, []}; +all_ext(<<"fcs">>) -> {<<"application">>, <<"vnd.isac.fcs">>, []}; +all_ext(<<"fdf">>) -> {<<"application">>, <<"vnd.fdf">>, []}; +all_ext(<<"fe_launch">>) -> {<<"application">>, <<"vnd.denovo.fcselayout-link">>, []}; +all_ext(<<"fg5">>) -> {<<"application">>, <<"vnd.fujitsu.oasysgp">>, []}; +all_ext(<<"fgd">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"fh4">>) -> {<<"image">>, <<"x-freehand">>, []}; +all_ext(<<"fh5">>) -> {<<"image">>, <<"x-freehand">>, []}; +all_ext(<<"fh7">>) -> {<<"image">>, <<"x-freehand">>, []}; +all_ext(<<"fhc">>) -> {<<"image">>, <<"x-freehand">>, []}; +all_ext(<<"fh">>) -> {<<"image">>, <<"x-freehand">>, []}; +all_ext(<<"fig">>) -> {<<"application">>, <<"x-xfig">>, []}; +all_ext(<<"flac">>) -> {<<"audio">>, <<"x-flac">>, []}; +all_ext(<<"fli">>) -> {<<"video">>, <<"x-fli">>, []}; +all_ext(<<"flo">>) -> {<<"application">>, <<"vnd.micrografx.flo">>, []}; +all_ext(<<"flv">>) -> {<<"video">>, <<"x-flv">>, []}; +all_ext(<<"flw">>) -> {<<"application">>, <<"vnd.kde.kivio">>, []}; +all_ext(<<"flx">>) -> {<<"text">>, <<"vnd.fmi.flexstor">>, []}; +all_ext(<<"fly">>) -> {<<"text">>, <<"vnd.fly">>, []}; +all_ext(<<"fm">>) -> {<<"application">>, <<"vnd.framemaker">>, []}; +all_ext(<<"fnc">>) -> {<<"application">>, <<"vnd.frogans.fnc">>, []}; +all_ext(<<"for">>) -> {<<"text">>, <<"x-fortran">>, []}; +all_ext(<<"fpx">>) -> {<<"image">>, <<"vnd.fpx">>, []}; +all_ext(<<"frame">>) -> {<<"application">>, <<"vnd.framemaker">>, []}; +all_ext(<<"fsc">>) -> {<<"application">>, <<"vnd.fsc.weblaunch">>, []}; +all_ext(<<"fst">>) -> {<<"image">>, <<"vnd.fst">>, []}; +all_ext(<<"ftc">>) -> {<<"application">>, <<"vnd.fluxtime.clip">>, []}; +all_ext(<<"f">>) -> {<<"text">>, <<"x-fortran">>, []}; +all_ext(<<"fti">>) -> {<<"application">>, <<"vnd.anser-web-funds-transfer-initiation">>, []}; +all_ext(<<"fvt">>) -> {<<"video">>, <<"vnd.fvt">>, []}; +all_ext(<<"fxp">>) -> {<<"application">>, <<"vnd.adobe.fxp">>, []}; +all_ext(<<"fxpl">>) -> {<<"application">>, <<"vnd.adobe.fxp">>, []}; +all_ext(<<"fzs">>) -> {<<"application">>, <<"vnd.fuzzysheet">>, []}; +all_ext(<<"g2w">>) -> {<<"application">>, <<"vnd.geoplan">>, []}; +all_ext(<<"g3">>) -> {<<"image">>, <<"g3fax">>, []}; +all_ext(<<"g3w">>) -> {<<"application">>, <<"vnd.geospace">>, []}; +all_ext(<<"gac">>) -> {<<"application">>, <<"vnd.groove-account">>, []}; +all_ext(<<"gam">>) -> {<<"application">>, <<"x-tads">>, []}; +all_ext(<<"gbr">>) -> {<<"application">>, <<"rpki-ghostbusters">>, []}; +all_ext(<<"gca">>) -> {<<"application">>, <<"x-gca-compressed">>, []}; +all_ext(<<"gdl">>) -> {<<"model">>, <<"vnd.gdl">>, []}; +all_ext(<<"geo">>) -> {<<"application">>, <<"vnd.dynageo">>, []}; +all_ext(<<"gex">>) -> {<<"application">>, <<"vnd.geometry-explorer">>, []}; +all_ext(<<"ggb">>) -> {<<"application">>, <<"vnd.geogebra.file">>, []}; +all_ext(<<"ggt">>) -> {<<"application">>, <<"vnd.geogebra.tool">>, []}; +all_ext(<<"ghf">>) -> {<<"application">>, <<"vnd.groove-help">>, []}; +all_ext(<<"gif">>) -> {<<"image">>, <<"gif">>, []}; +all_ext(<<"gim">>) -> {<<"application">>, <<"vnd.groove-identity-message">>, []}; +all_ext(<<"gml">>) -> {<<"application">>, <<"gml+xml">>, []}; +all_ext(<<"gmx">>) -> {<<"application">>, <<"vnd.gmx">>, []}; +all_ext(<<"gnumeric">>) -> {<<"application">>, <<"x-gnumeric">>, []}; +all_ext(<<"gph">>) -> {<<"application">>, <<"vnd.flographit">>, []}; +all_ext(<<"gpx">>) -> {<<"application">>, <<"gpx+xml">>, []}; +all_ext(<<"gqf">>) -> {<<"application">>, <<"vnd.grafeq">>, []}; +all_ext(<<"gqs">>) -> {<<"application">>, <<"vnd.grafeq">>, []}; +all_ext(<<"gram">>) -> {<<"application">>, <<"srgs">>, []}; +all_ext(<<"gramps">>) -> {<<"application">>, <<"x-gramps-xml">>, []}; +all_ext(<<"gre">>) -> {<<"application">>, <<"vnd.geometry-explorer">>, []}; +all_ext(<<"grv">>) -> {<<"application">>, <<"vnd.groove-injector">>, []}; +all_ext(<<"grxml">>) -> {<<"application">>, <<"srgs+xml">>, []}; +all_ext(<<"gsf">>) -> {<<"application">>, <<"x-font-ghostscript">>, []}; +all_ext(<<"gtar">>) -> {<<"application">>, <<"x-gtar">>, []}; +all_ext(<<"gtm">>) -> {<<"application">>, <<"vnd.groove-tool-message">>, []}; +all_ext(<<"gtw">>) -> {<<"model">>, <<"vnd.gtw">>, []}; +all_ext(<<"gv">>) -> {<<"text">>, <<"vnd.graphviz">>, []}; +all_ext(<<"gxf">>) -> {<<"application">>, <<"gxf">>, []}; +all_ext(<<"gxt">>) -> {<<"application">>, <<"vnd.geonext">>, []}; +all_ext(<<"h261">>) -> {<<"video">>, <<"h261">>, []}; +all_ext(<<"h263">>) -> {<<"video">>, <<"h263">>, []}; +all_ext(<<"h264">>) -> {<<"video">>, <<"h264">>, []}; +all_ext(<<"hal">>) -> {<<"application">>, <<"vnd.hal+xml">>, []}; +all_ext(<<"hbci">>) -> {<<"application">>, <<"vnd.hbci">>, []}; +all_ext(<<"hdf">>) -> {<<"application">>, <<"x-hdf">>, []}; +all_ext(<<"hh">>) -> {<<"text">>, <<"x-c">>, []}; +all_ext(<<"hlp">>) -> {<<"application">>, <<"winhlp">>, []}; +all_ext(<<"hpgl">>) -> {<<"application">>, <<"vnd.hp-hpgl">>, []}; +all_ext(<<"hpid">>) -> {<<"application">>, <<"vnd.hp-hpid">>, []}; +all_ext(<<"hps">>) -> {<<"application">>, <<"vnd.hp-hps">>, []}; +all_ext(<<"hqx">>) -> {<<"application">>, <<"mac-binhex40">>, []}; +all_ext(<<"h">>) -> {<<"text">>, <<"x-c">>, []}; +all_ext(<<"htke">>) -> {<<"application">>, <<"vnd.kenameaapp">>, []}; +all_ext(<<"html">>) -> {<<"text">>, <<"html">>, []}; +all_ext(<<"htm">>) -> {<<"text">>, <<"html">>, []}; +all_ext(<<"hvd">>) -> {<<"application">>, <<"vnd.yamaha.hv-dic">>, []}; +all_ext(<<"hvp">>) -> {<<"application">>, <<"vnd.yamaha.hv-voice">>, []}; +all_ext(<<"hvs">>) -> {<<"application">>, <<"vnd.yamaha.hv-script">>, []}; +all_ext(<<"i2g">>) -> {<<"application">>, <<"vnd.intergeo">>, []}; +all_ext(<<"icc">>) -> {<<"application">>, <<"vnd.iccprofile">>, []}; +all_ext(<<"ice">>) -> {<<"x-conference">>, <<"x-cooltalk">>, []}; +all_ext(<<"icm">>) -> {<<"application">>, <<"vnd.iccprofile">>, []}; +all_ext(<<"ico">>) -> {<<"image">>, <<"x-icon">>, []}; +all_ext(<<"ics">>) -> {<<"text">>, <<"calendar">>, []}; +all_ext(<<"ief">>) -> {<<"image">>, <<"ief">>, []}; +all_ext(<<"ifb">>) -> {<<"text">>, <<"calendar">>, []}; +all_ext(<<"ifm">>) -> {<<"application">>, <<"vnd.shana.informed.formdata">>, []}; +all_ext(<<"iges">>) -> {<<"model">>, <<"iges">>, []}; +all_ext(<<"igl">>) -> {<<"application">>, <<"vnd.igloader">>, []}; +all_ext(<<"igm">>) -> {<<"application">>, <<"vnd.insors.igm">>, []}; +all_ext(<<"igs">>) -> {<<"model">>, <<"iges">>, []}; +all_ext(<<"igx">>) -> {<<"application">>, <<"vnd.micrografx.igx">>, []}; +all_ext(<<"iif">>) -> {<<"application">>, <<"vnd.shana.informed.interchange">>, []}; +all_ext(<<"imp">>) -> {<<"application">>, <<"vnd.accpac.simply.imp">>, []}; +all_ext(<<"ims">>) -> {<<"application">>, <<"vnd.ms-ims">>, []}; +all_ext(<<"ink">>) -> {<<"application">>, <<"inkml+xml">>, []}; +all_ext(<<"inkml">>) -> {<<"application">>, <<"inkml+xml">>, []}; +all_ext(<<"install">>) -> {<<"application">>, <<"x-install-instructions">>, []}; +all_ext(<<"in">>) -> {<<"text">>, <<"plain">>, []}; +all_ext(<<"iota">>) -> {<<"application">>, <<"vnd.astraea-software.iota">>, []}; +all_ext(<<"ipfix">>) -> {<<"application">>, <<"ipfix">>, []}; +all_ext(<<"ipk">>) -> {<<"application">>, <<"vnd.shana.informed.package">>, []}; +all_ext(<<"irm">>) -> {<<"application">>, <<"vnd.ibm.rights-management">>, []}; +all_ext(<<"irp">>) -> {<<"application">>, <<"vnd.irepository.package+xml">>, []}; +all_ext(<<"iso">>) -> {<<"application">>, <<"x-iso9660-image">>, []}; +all_ext(<<"itp">>) -> {<<"application">>, <<"vnd.shana.informed.formtemplate">>, []}; +all_ext(<<"ivp">>) -> {<<"application">>, <<"vnd.immervision-ivp">>, []}; +all_ext(<<"ivu">>) -> {<<"application">>, <<"vnd.immervision-ivu">>, []}; +all_ext(<<"jad">>) -> {<<"text">>, <<"vnd.sun.j2me.app-descriptor">>, []}; +all_ext(<<"jam">>) -> {<<"application">>, <<"vnd.jam">>, []}; +all_ext(<<"jar">>) -> {<<"application">>, <<"java-archive">>, []}; +all_ext(<<"java">>) -> {<<"text">>, <<"x-java-source">>, []}; +all_ext(<<"jisp">>) -> {<<"application">>, <<"vnd.jisp">>, []}; +all_ext(<<"jlt">>) -> {<<"application">>, <<"vnd.hp-jlyt">>, []}; +all_ext(<<"jnlp">>) -> {<<"application">>, <<"x-java-jnlp-file">>, []}; +all_ext(<<"joda">>) -> {<<"application">>, <<"vnd.joost.joda-archive">>, []}; +all_ext(<<"jpeg">>) -> {<<"image">>, <<"jpeg">>, []}; +all_ext(<<"jpe">>) -> {<<"image">>, <<"jpeg">>, []}; +all_ext(<<"jpg">>) -> {<<"image">>, <<"jpeg">>, []}; +all_ext(<<"jpgm">>) -> {<<"video">>, <<"jpm">>, []}; +all_ext(<<"jpgv">>) -> {<<"video">>, <<"jpeg">>, []}; +all_ext(<<"jpm">>) -> {<<"video">>, <<"jpm">>, []}; +all_ext(<<"js">>) -> {<<"application">>, <<"javascript">>, []}; +all_ext(<<"json">>) -> {<<"application">>, <<"json">>, []}; +all_ext(<<"jsonml">>) -> {<<"application">>, <<"jsonml+json">>, []}; +all_ext(<<"kar">>) -> {<<"audio">>, <<"midi">>, []}; +all_ext(<<"karbon">>) -> {<<"application">>, <<"vnd.kde.karbon">>, []}; +all_ext(<<"kfo">>) -> {<<"application">>, <<"vnd.kde.kformula">>, []}; +all_ext(<<"kia">>) -> {<<"application">>, <<"vnd.kidspiration">>, []}; +all_ext(<<"kml">>) -> {<<"application">>, <<"vnd.google-earth.kml+xml">>, []}; +all_ext(<<"kmz">>) -> {<<"application">>, <<"vnd.google-earth.kmz">>, []}; +all_ext(<<"kne">>) -> {<<"application">>, <<"vnd.kinar">>, []}; +all_ext(<<"knp">>) -> {<<"application">>, <<"vnd.kinar">>, []}; +all_ext(<<"kon">>) -> {<<"application">>, <<"vnd.kde.kontour">>, []}; +all_ext(<<"kpr">>) -> {<<"application">>, <<"vnd.kde.kpresenter">>, []}; +all_ext(<<"kpt">>) -> {<<"application">>, <<"vnd.kde.kpresenter">>, []}; +all_ext(<<"kpxx">>) -> {<<"application">>, <<"vnd.ds-keypoint">>, []}; +all_ext(<<"ksp">>) -> {<<"application">>, <<"vnd.kde.kspread">>, []}; +all_ext(<<"ktr">>) -> {<<"application">>, <<"vnd.kahootz">>, []}; +all_ext(<<"ktx">>) -> {<<"image">>, <<"ktx">>, []}; +all_ext(<<"ktz">>) -> {<<"application">>, <<"vnd.kahootz">>, []}; +all_ext(<<"kwd">>) -> {<<"application">>, <<"vnd.kde.kword">>, []}; +all_ext(<<"kwt">>) -> {<<"application">>, <<"vnd.kde.kword">>, []}; +all_ext(<<"lasxml">>) -> {<<"application">>, <<"vnd.las.las+xml">>, []}; +all_ext(<<"latex">>) -> {<<"application">>, <<"x-latex">>, []}; +all_ext(<<"lbd">>) -> {<<"application">>, <<"vnd.llamagraphics.life-balance.desktop">>, []}; +all_ext(<<"lbe">>) -> {<<"application">>, <<"vnd.llamagraphics.life-balance.exchange+xml">>, []}; +all_ext(<<"les">>) -> {<<"application">>, <<"vnd.hhe.lesson-player">>, []}; +all_ext(<<"lha">>) -> {<<"application">>, <<"x-lzh-compressed">>, []}; +all_ext(<<"link66">>) -> {<<"application">>, <<"vnd.route66.link66+xml">>, []}; +all_ext(<<"list3820">>) -> {<<"application">>, <<"vnd.ibm.modcap">>, []}; +all_ext(<<"listafp">>) -> {<<"application">>, <<"vnd.ibm.modcap">>, []}; +all_ext(<<"list">>) -> {<<"text">>, <<"plain">>, []}; +all_ext(<<"lnk">>) -> {<<"application">>, <<"x-ms-shortcut">>, []}; +all_ext(<<"log">>) -> {<<"text">>, <<"plain">>, []}; +all_ext(<<"lostxml">>) -> {<<"application">>, <<"lost+xml">>, []}; +all_ext(<<"lrf">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"lrm">>) -> {<<"application">>, <<"vnd.ms-lrm">>, []}; +all_ext(<<"ltf">>) -> {<<"application">>, <<"vnd.frogans.ltf">>, []}; +all_ext(<<"lvp">>) -> {<<"audio">>, <<"vnd.lucent.voice">>, []}; +all_ext(<<"lwp">>) -> {<<"application">>, <<"vnd.lotus-wordpro">>, []}; +all_ext(<<"lzh">>) -> {<<"application">>, <<"x-lzh-compressed">>, []}; +all_ext(<<"m13">>) -> {<<"application">>, <<"x-msmediaview">>, []}; +all_ext(<<"m14">>) -> {<<"application">>, <<"x-msmediaview">>, []}; +all_ext(<<"m1v">>) -> {<<"video">>, <<"mpeg">>, []}; +all_ext(<<"m21">>) -> {<<"application">>, <<"mp21">>, []}; +all_ext(<<"m2a">>) -> {<<"audio">>, <<"mpeg">>, []}; +all_ext(<<"m2v">>) -> {<<"video">>, <<"mpeg">>, []}; +all_ext(<<"m3a">>) -> {<<"audio">>, <<"mpeg">>, []}; +all_ext(<<"m3u8">>) -> {<<"application">>, <<"vnd.apple.mpegurl">>, []}; +all_ext(<<"m3u">>) -> {<<"audio">>, <<"x-mpegurl">>, []}; +all_ext(<<"m4a">>) -> {<<"audio">>, <<"mp4">>, []}; +all_ext(<<"m4u">>) -> {<<"video">>, <<"vnd.mpegurl">>, []}; +all_ext(<<"m4v">>) -> {<<"video">>, <<"x-m4v">>, []}; +all_ext(<<"ma">>) -> {<<"application">>, <<"mathematica">>, []}; +all_ext(<<"mads">>) -> {<<"application">>, <<"mads+xml">>, []}; +all_ext(<<"mag">>) -> {<<"application">>, <<"vnd.ecowin.chart">>, []}; +all_ext(<<"maker">>) -> {<<"application">>, <<"vnd.framemaker">>, []}; +all_ext(<<"man">>) -> {<<"text">>, <<"troff">>, []}; +all_ext(<<"mar">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"mathml">>) -> {<<"application">>, <<"mathml+xml">>, []}; +all_ext(<<"mb">>) -> {<<"application">>, <<"mathematica">>, []}; +all_ext(<<"mbk">>) -> {<<"application">>, <<"vnd.mobius.mbk">>, []}; +all_ext(<<"mbox">>) -> {<<"application">>, <<"mbox">>, []}; +all_ext(<<"mc1">>) -> {<<"application">>, <<"vnd.medcalcdata">>, []}; +all_ext(<<"mcd">>) -> {<<"application">>, <<"vnd.mcd">>, []}; +all_ext(<<"mcurl">>) -> {<<"text">>, <<"vnd.curl.mcurl">>, []}; +all_ext(<<"mdb">>) -> {<<"application">>, <<"x-msaccess">>, []}; +all_ext(<<"mdi">>) -> {<<"image">>, <<"vnd.ms-modi">>, []}; +all_ext(<<"mesh">>) -> {<<"model">>, <<"mesh">>, []}; +all_ext(<<"meta4">>) -> {<<"application">>, <<"metalink4+xml">>, []}; +all_ext(<<"metalink">>) -> {<<"application">>, <<"metalink+xml">>, []}; +all_ext(<<"me">>) -> {<<"text">>, <<"troff">>, []}; +all_ext(<<"mets">>) -> {<<"application">>, <<"mets+xml">>, []}; +all_ext(<<"mfm">>) -> {<<"application">>, <<"vnd.mfmp">>, []}; +all_ext(<<"mft">>) -> {<<"application">>, <<"rpki-manifest">>, []}; +all_ext(<<"mgp">>) -> {<<"application">>, <<"vnd.osgeo.mapguide.package">>, []}; +all_ext(<<"mgz">>) -> {<<"application">>, <<"vnd.proteus.magazine">>, []}; +all_ext(<<"mid">>) -> {<<"audio">>, <<"midi">>, []}; +all_ext(<<"midi">>) -> {<<"audio">>, <<"midi">>, []}; +all_ext(<<"mie">>) -> {<<"application">>, <<"x-mie">>, []}; +all_ext(<<"mif">>) -> {<<"application">>, <<"vnd.mif">>, []}; +all_ext(<<"mime">>) -> {<<"message">>, <<"rfc822">>, []}; +all_ext(<<"mj2">>) -> {<<"video">>, <<"mj2">>, []}; +all_ext(<<"mjp2">>) -> {<<"video">>, <<"mj2">>, []}; +all_ext(<<"mk3d">>) -> {<<"video">>, <<"x-matroska">>, []}; +all_ext(<<"mka">>) -> {<<"audio">>, <<"x-matroska">>, []}; +all_ext(<<"mks">>) -> {<<"video">>, <<"x-matroska">>, []}; +all_ext(<<"mkv">>) -> {<<"video">>, <<"x-matroska">>, []}; +all_ext(<<"mlp">>) -> {<<"application">>, <<"vnd.dolby.mlp">>, []}; +all_ext(<<"mmd">>) -> {<<"application">>, <<"vnd.chipnuts.karaoke-mmd">>, []}; +all_ext(<<"mmf">>) -> {<<"application">>, <<"vnd.smaf">>, []}; +all_ext(<<"mmr">>) -> {<<"image">>, <<"vnd.fujixerox.edmics-mmr">>, []}; +all_ext(<<"mng">>) -> {<<"video">>, <<"x-mng">>, []}; +all_ext(<<"mny">>) -> {<<"application">>, <<"x-msmoney">>, []}; +all_ext(<<"mobi">>) -> {<<"application">>, <<"x-mobipocket-ebook">>, []}; +all_ext(<<"mods">>) -> {<<"application">>, <<"mods+xml">>, []}; +all_ext(<<"movie">>) -> {<<"video">>, <<"x-sgi-movie">>, []}; +all_ext(<<"mov">>) -> {<<"video">>, <<"quicktime">>, []}; +all_ext(<<"mp21">>) -> {<<"application">>, <<"mp21">>, []}; +all_ext(<<"mp2a">>) -> {<<"audio">>, <<"mpeg">>, []}; +all_ext(<<"mp2">>) -> {<<"audio">>, <<"mpeg">>, []}; +all_ext(<<"mp3">>) -> {<<"audio">>, <<"mpeg">>, []}; +all_ext(<<"mp4a">>) -> {<<"audio">>, <<"mp4">>, []}; +all_ext(<<"mp4s">>) -> {<<"application">>, <<"mp4">>, []}; +all_ext(<<"mp4">>) -> {<<"video">>, <<"mp4">>, []}; +all_ext(<<"mp4v">>) -> {<<"video">>, <<"mp4">>, []}; +all_ext(<<"mpc">>) -> {<<"application">>, <<"vnd.mophun.certificate">>, []}; +all_ext(<<"mpeg">>) -> {<<"video">>, <<"mpeg">>, []}; +all_ext(<<"mpe">>) -> {<<"video">>, <<"mpeg">>, []}; +all_ext(<<"mpg4">>) -> {<<"video">>, <<"mp4">>, []}; +all_ext(<<"mpga">>) -> {<<"audio">>, <<"mpeg">>, []}; +all_ext(<<"mpg">>) -> {<<"video">>, <<"mpeg">>, []}; +all_ext(<<"mpkg">>) -> {<<"application">>, <<"vnd.apple.installer+xml">>, []}; +all_ext(<<"mpm">>) -> {<<"application">>, <<"vnd.blueice.multipass">>, []}; +all_ext(<<"mpn">>) -> {<<"application">>, <<"vnd.mophun.application">>, []}; +all_ext(<<"mpp">>) -> {<<"application">>, <<"vnd.ms-project">>, []}; +all_ext(<<"mpt">>) -> {<<"application">>, <<"vnd.ms-project">>, []}; +all_ext(<<"mpy">>) -> {<<"application">>, <<"vnd.ibm.minipay">>, []}; +all_ext(<<"mqy">>) -> {<<"application">>, <<"vnd.mobius.mqy">>, []}; +all_ext(<<"mrc">>) -> {<<"application">>, <<"marc">>, []}; +all_ext(<<"mrcx">>) -> {<<"application">>, <<"marcxml+xml">>, []}; +all_ext(<<"mscml">>) -> {<<"application">>, <<"mediaservercontrol+xml">>, []}; +all_ext(<<"mseed">>) -> {<<"application">>, <<"vnd.fdsn.mseed">>, []}; +all_ext(<<"mseq">>) -> {<<"application">>, <<"vnd.mseq">>, []}; +all_ext(<<"msf">>) -> {<<"application">>, <<"vnd.epson.msf">>, []}; +all_ext(<<"msh">>) -> {<<"model">>, <<"mesh">>, []}; +all_ext(<<"msi">>) -> {<<"application">>, <<"x-msdownload">>, []}; +all_ext(<<"msl">>) -> {<<"application">>, <<"vnd.mobius.msl">>, []}; +all_ext(<<"ms">>) -> {<<"text">>, <<"troff">>, []}; +all_ext(<<"msty">>) -> {<<"application">>, <<"vnd.muvee.style">>, []}; +all_ext(<<"mts">>) -> {<<"model">>, <<"vnd.mts">>, []}; +all_ext(<<"mus">>) -> {<<"application">>, <<"vnd.musician">>, []}; +all_ext(<<"musicxml">>) -> {<<"application">>, <<"vnd.recordare.musicxml+xml">>, []}; +all_ext(<<"mvb">>) -> {<<"application">>, <<"x-msmediaview">>, []}; +all_ext(<<"mwf">>) -> {<<"application">>, <<"vnd.mfer">>, []}; +all_ext(<<"mxf">>) -> {<<"application">>, <<"mxf">>, []}; +all_ext(<<"mxl">>) -> {<<"application">>, <<"vnd.recordare.musicxml">>, []}; +all_ext(<<"mxml">>) -> {<<"application">>, <<"xv+xml">>, []}; +all_ext(<<"mxs">>) -> {<<"application">>, <<"vnd.triscape.mxs">>, []}; +all_ext(<<"mxu">>) -> {<<"video">>, <<"vnd.mpegurl">>, []}; +all_ext(<<"n3">>) -> {<<"text">>, <<"n3">>, []}; +all_ext(<<"nb">>) -> {<<"application">>, <<"mathematica">>, []}; +all_ext(<<"nbp">>) -> {<<"application">>, <<"vnd.wolfram.player">>, []}; +all_ext(<<"nc">>) -> {<<"application">>, <<"x-netcdf">>, []}; +all_ext(<<"ncx">>) -> {<<"application">>, <<"x-dtbncx+xml">>, []}; +all_ext(<<"nfo">>) -> {<<"text">>, <<"x-nfo">>, []}; +all_ext(<<"n-gage">>) -> {<<"application">>, <<"vnd.nokia.n-gage.symbian.install">>, []}; +all_ext(<<"ngdat">>) -> {<<"application">>, <<"vnd.nokia.n-gage.data">>, []}; +all_ext(<<"nitf">>) -> {<<"application">>, <<"vnd.nitf">>, []}; +all_ext(<<"nlu">>) -> {<<"application">>, <<"vnd.neurolanguage.nlu">>, []}; +all_ext(<<"nml">>) -> {<<"application">>, <<"vnd.enliven">>, []}; +all_ext(<<"nnd">>) -> {<<"application">>, <<"vnd.noblenet-directory">>, []}; +all_ext(<<"nns">>) -> {<<"application">>, <<"vnd.noblenet-sealer">>, []}; +all_ext(<<"nnw">>) -> {<<"application">>, <<"vnd.noblenet-web">>, []}; +all_ext(<<"npx">>) -> {<<"image">>, <<"vnd.net-fpx">>, []}; +all_ext(<<"nsc">>) -> {<<"application">>, <<"x-conference">>, []}; +all_ext(<<"nsf">>) -> {<<"application">>, <<"vnd.lotus-notes">>, []}; +all_ext(<<"ntf">>) -> {<<"application">>, <<"vnd.nitf">>, []}; +all_ext(<<"nzb">>) -> {<<"application">>, <<"x-nzb">>, []}; +all_ext(<<"oa2">>) -> {<<"application">>, <<"vnd.fujitsu.oasys2">>, []}; +all_ext(<<"oa3">>) -> {<<"application">>, <<"vnd.fujitsu.oasys3">>, []}; +all_ext(<<"oas">>) -> {<<"application">>, <<"vnd.fujitsu.oasys">>, []}; +all_ext(<<"obd">>) -> {<<"application">>, <<"x-msbinder">>, []}; +all_ext(<<"obj">>) -> {<<"application">>, <<"x-tgif">>, []}; +all_ext(<<"oda">>) -> {<<"application">>, <<"oda">>, []}; +all_ext(<<"odb">>) -> {<<"application">>, <<"vnd.oasis.opendocument.database">>, []}; +all_ext(<<"odc">>) -> {<<"application">>, <<"vnd.oasis.opendocument.chart">>, []}; +all_ext(<<"odf">>) -> {<<"application">>, <<"vnd.oasis.opendocument.formula">>, []}; +all_ext(<<"odft">>) -> {<<"application">>, <<"vnd.oasis.opendocument.formula-template">>, []}; +all_ext(<<"odg">>) -> {<<"application">>, <<"vnd.oasis.opendocument.graphics">>, []}; +all_ext(<<"odi">>) -> {<<"application">>, <<"vnd.oasis.opendocument.image">>, []}; +all_ext(<<"odm">>) -> {<<"application">>, <<"vnd.oasis.opendocument.text-master">>, []}; +all_ext(<<"odp">>) -> {<<"application">>, <<"vnd.oasis.opendocument.presentation">>, []}; +all_ext(<<"ods">>) -> {<<"application">>, <<"vnd.oasis.opendocument.spreadsheet">>, []}; +all_ext(<<"odt">>) -> {<<"application">>, <<"vnd.oasis.opendocument.text">>, []}; +all_ext(<<"oga">>) -> {<<"audio">>, <<"ogg">>, []}; +all_ext(<<"ogg">>) -> {<<"audio">>, <<"ogg">>, []}; +all_ext(<<"ogv">>) -> {<<"video">>, <<"ogg">>, []}; +all_ext(<<"ogx">>) -> {<<"application">>, <<"ogg">>, []}; +all_ext(<<"omdoc">>) -> {<<"application">>, <<"omdoc+xml">>, []}; +all_ext(<<"onepkg">>) -> {<<"application">>, <<"onenote">>, []}; +all_ext(<<"onetmp">>) -> {<<"application">>, <<"onenote">>, []}; +all_ext(<<"onetoc2">>) -> {<<"application">>, <<"onenote">>, []}; +all_ext(<<"onetoc">>) -> {<<"application">>, <<"onenote">>, []}; +all_ext(<<"opf">>) -> {<<"application">>, <<"oebps-package+xml">>, []}; +all_ext(<<"opml">>) -> {<<"text">>, <<"x-opml">>, []}; +all_ext(<<"oprc">>) -> {<<"application">>, <<"vnd.palm">>, []}; +all_ext(<<"org">>) -> {<<"application">>, <<"vnd.lotus-organizer">>, []}; +all_ext(<<"osf">>) -> {<<"application">>, <<"vnd.yamaha.openscoreformat">>, []}; +all_ext(<<"osfpvg">>) -> {<<"application">>, <<"vnd.yamaha.openscoreformat.osfpvg+xml">>, []}; +all_ext(<<"otc">>) -> {<<"application">>, <<"vnd.oasis.opendocument.chart-template">>, []}; +all_ext(<<"otf">>) -> {<<"font">>, <<"otf">>, []}; +all_ext(<<"otg">>) -> {<<"application">>, <<"vnd.oasis.opendocument.graphics-template">>, []}; +all_ext(<<"oth">>) -> {<<"application">>, <<"vnd.oasis.opendocument.text-web">>, []}; +all_ext(<<"oti">>) -> {<<"application">>, <<"vnd.oasis.opendocument.image-template">>, []}; +all_ext(<<"otp">>) -> {<<"application">>, <<"vnd.oasis.opendocument.presentation-template">>, []}; +all_ext(<<"ots">>) -> {<<"application">>, <<"vnd.oasis.opendocument.spreadsheet-template">>, []}; +all_ext(<<"ott">>) -> {<<"application">>, <<"vnd.oasis.opendocument.text-template">>, []}; +all_ext(<<"oxps">>) -> {<<"application">>, <<"oxps">>, []}; +all_ext(<<"oxt">>) -> {<<"application">>, <<"vnd.openofficeorg.extension">>, []}; +all_ext(<<"p10">>) -> {<<"application">>, <<"pkcs10">>, []}; +all_ext(<<"p12">>) -> {<<"application">>, <<"x-pkcs12">>, []}; +all_ext(<<"p7b">>) -> {<<"application">>, <<"x-pkcs7-certificates">>, []}; +all_ext(<<"p7c">>) -> {<<"application">>, <<"pkcs7-mime">>, []}; +all_ext(<<"p7m">>) -> {<<"application">>, <<"pkcs7-mime">>, []}; +all_ext(<<"p7r">>) -> {<<"application">>, <<"x-pkcs7-certreqresp">>, []}; +all_ext(<<"p7s">>) -> {<<"application">>, <<"pkcs7-signature">>, []}; +all_ext(<<"p8">>) -> {<<"application">>, <<"pkcs8">>, []}; +all_ext(<<"pas">>) -> {<<"text">>, <<"x-pascal">>, []}; +all_ext(<<"paw">>) -> {<<"application">>, <<"vnd.pawaafile">>, []}; +all_ext(<<"pbd">>) -> {<<"application">>, <<"vnd.powerbuilder6">>, []}; +all_ext(<<"pbm">>) -> {<<"image">>, <<"x-portable-bitmap">>, []}; +all_ext(<<"pcap">>) -> {<<"application">>, <<"vnd.tcpdump.pcap">>, []}; +all_ext(<<"pcf">>) -> {<<"application">>, <<"x-font-pcf">>, []}; +all_ext(<<"pcl">>) -> {<<"application">>, <<"vnd.hp-pcl">>, []}; +all_ext(<<"pclxl">>) -> {<<"application">>, <<"vnd.hp-pclxl">>, []}; +all_ext(<<"pct">>) -> {<<"image">>, <<"x-pict">>, []}; +all_ext(<<"pcurl">>) -> {<<"application">>, <<"vnd.curl.pcurl">>, []}; +all_ext(<<"pcx">>) -> {<<"image">>, <<"x-pcx">>, []}; +all_ext(<<"pdb">>) -> {<<"application">>, <<"vnd.palm">>, []}; +all_ext(<<"pdf">>) -> {<<"application">>, <<"pdf">>, []}; +all_ext(<<"pfa">>) -> {<<"application">>, <<"x-font-type1">>, []}; +all_ext(<<"pfb">>) -> {<<"application">>, <<"x-font-type1">>, []}; +all_ext(<<"pfm">>) -> {<<"application">>, <<"x-font-type1">>, []}; +all_ext(<<"pfr">>) -> {<<"application">>, <<"font-tdpfr">>, []}; +all_ext(<<"pfx">>) -> {<<"application">>, <<"x-pkcs12">>, []}; +all_ext(<<"pgm">>) -> {<<"image">>, <<"x-portable-graymap">>, []}; +all_ext(<<"pgn">>) -> {<<"application">>, <<"x-chess-pgn">>, []}; +all_ext(<<"pgp">>) -> {<<"application">>, <<"pgp-encrypted">>, []}; +all_ext(<<"pic">>) -> {<<"image">>, <<"x-pict">>, []}; +all_ext(<<"pkg">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"pki">>) -> {<<"application">>, <<"pkixcmp">>, []}; +all_ext(<<"pkipath">>) -> {<<"application">>, <<"pkix-pkipath">>, []}; +all_ext(<<"plb">>) -> {<<"application">>, <<"vnd.3gpp.pic-bw-large">>, []}; +all_ext(<<"plc">>) -> {<<"application">>, <<"vnd.mobius.plc">>, []}; +all_ext(<<"plf">>) -> {<<"application">>, <<"vnd.pocketlearn">>, []}; +all_ext(<<"pls">>) -> {<<"application">>, <<"pls+xml">>, []}; +all_ext(<<"pml">>) -> {<<"application">>, <<"vnd.ctc-posml">>, []}; +all_ext(<<"png">>) -> {<<"image">>, <<"png">>, []}; +all_ext(<<"pnm">>) -> {<<"image">>, <<"x-portable-anymap">>, []}; +all_ext(<<"portpkg">>) -> {<<"application">>, <<"vnd.macports.portpkg">>, []}; +all_ext(<<"pot">>) -> {<<"application">>, <<"vnd.ms-powerpoint">>, []}; +all_ext(<<"potm">>) -> {<<"application">>, <<"vnd.ms-powerpoint.template.macroenabled.12">>, []}; +all_ext(<<"potx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.presentationml.template">>, []}; +all_ext(<<"ppam">>) -> {<<"application">>, <<"vnd.ms-powerpoint.addin.macroenabled.12">>, []}; +all_ext(<<"ppd">>) -> {<<"application">>, <<"vnd.cups-ppd">>, []}; +all_ext(<<"ppm">>) -> {<<"image">>, <<"x-portable-pixmap">>, []}; +all_ext(<<"pps">>) -> {<<"application">>, <<"vnd.ms-powerpoint">>, []}; +all_ext(<<"ppsm">>) -> {<<"application">>, <<"vnd.ms-powerpoint.slideshow.macroenabled.12">>, []}; +all_ext(<<"ppsx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.presentationml.slideshow">>, []}; +all_ext(<<"ppt">>) -> {<<"application">>, <<"vnd.ms-powerpoint">>, []}; +all_ext(<<"pptm">>) -> {<<"application">>, <<"vnd.ms-powerpoint.presentation.macroenabled.12">>, []}; +all_ext(<<"pptx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.presentationml.presentation">>, []}; +all_ext(<<"pqa">>) -> {<<"application">>, <<"vnd.palm">>, []}; +all_ext(<<"prc">>) -> {<<"application">>, <<"x-mobipocket-ebook">>, []}; +all_ext(<<"pre">>) -> {<<"application">>, <<"vnd.lotus-freelance">>, []}; +all_ext(<<"prf">>) -> {<<"application">>, <<"pics-rules">>, []}; +all_ext(<<"ps">>) -> {<<"application">>, <<"postscript">>, []}; +all_ext(<<"psb">>) -> {<<"application">>, <<"vnd.3gpp.pic-bw-small">>, []}; +all_ext(<<"psd">>) -> {<<"image">>, <<"vnd.adobe.photoshop">>, []}; +all_ext(<<"psf">>) -> {<<"application">>, <<"x-font-linux-psf">>, []}; +all_ext(<<"pskcxml">>) -> {<<"application">>, <<"pskc+xml">>, []}; +all_ext(<<"p">>) -> {<<"text">>, <<"x-pascal">>, []}; +all_ext(<<"ptid">>) -> {<<"application">>, <<"vnd.pvi.ptid1">>, []}; +all_ext(<<"pub">>) -> {<<"application">>, <<"x-mspublisher">>, []}; +all_ext(<<"pvb">>) -> {<<"application">>, <<"vnd.3gpp.pic-bw-var">>, []}; +all_ext(<<"pwn">>) -> {<<"application">>, <<"vnd.3m.post-it-notes">>, []}; +all_ext(<<"pya">>) -> {<<"audio">>, <<"vnd.ms-playready.media.pya">>, []}; +all_ext(<<"pyv">>) -> {<<"video">>, <<"vnd.ms-playready.media.pyv">>, []}; +all_ext(<<"qam">>) -> {<<"application">>, <<"vnd.epson.quickanime">>, []}; +all_ext(<<"qbo">>) -> {<<"application">>, <<"vnd.intu.qbo">>, []}; +all_ext(<<"qfx">>) -> {<<"application">>, <<"vnd.intu.qfx">>, []}; +all_ext(<<"qps">>) -> {<<"application">>, <<"vnd.publishare-delta-tree">>, []}; +all_ext(<<"qt">>) -> {<<"video">>, <<"quicktime">>, []}; +all_ext(<<"qwd">>) -> {<<"application">>, <<"vnd.quark.quarkxpress">>, []}; +all_ext(<<"qwt">>) -> {<<"application">>, <<"vnd.quark.quarkxpress">>, []}; +all_ext(<<"qxb">>) -> {<<"application">>, <<"vnd.quark.quarkxpress">>, []}; +all_ext(<<"qxd">>) -> {<<"application">>, <<"vnd.quark.quarkxpress">>, []}; +all_ext(<<"qxl">>) -> {<<"application">>, <<"vnd.quark.quarkxpress">>, []}; +all_ext(<<"qxt">>) -> {<<"application">>, <<"vnd.quark.quarkxpress">>, []}; +all_ext(<<"ra">>) -> {<<"audio">>, <<"x-pn-realaudio">>, []}; +all_ext(<<"ram">>) -> {<<"audio">>, <<"x-pn-realaudio">>, []}; +all_ext(<<"rar">>) -> {<<"application">>, <<"x-rar-compressed">>, []}; +all_ext(<<"ras">>) -> {<<"image">>, <<"x-cmu-raster">>, []}; +all_ext(<<"rcprofile">>) -> {<<"application">>, <<"vnd.ipunplugged.rcprofile">>, []}; +all_ext(<<"rdf">>) -> {<<"application">>, <<"rdf+xml">>, []}; +all_ext(<<"rdz">>) -> {<<"application">>, <<"vnd.data-vision.rdz">>, []}; +all_ext(<<"rep">>) -> {<<"application">>, <<"vnd.businessobjects">>, []}; +all_ext(<<"res">>) -> {<<"application">>, <<"x-dtbresource+xml">>, []}; +all_ext(<<"rgb">>) -> {<<"image">>, <<"x-rgb">>, []}; +all_ext(<<"rif">>) -> {<<"application">>, <<"reginfo+xml">>, []}; +all_ext(<<"rip">>) -> {<<"audio">>, <<"vnd.rip">>, []}; +all_ext(<<"ris">>) -> {<<"application">>, <<"x-research-info-systems">>, []}; +all_ext(<<"rl">>) -> {<<"application">>, <<"resource-lists+xml">>, []}; +all_ext(<<"rlc">>) -> {<<"image">>, <<"vnd.fujixerox.edmics-rlc">>, []}; +all_ext(<<"rld">>) -> {<<"application">>, <<"resource-lists-diff+xml">>, []}; +all_ext(<<"rm">>) -> {<<"application">>, <<"vnd.rn-realmedia">>, []}; +all_ext(<<"rmi">>) -> {<<"audio">>, <<"midi">>, []}; +all_ext(<<"rmp">>) -> {<<"audio">>, <<"x-pn-realaudio-plugin">>, []}; +all_ext(<<"rms">>) -> {<<"application">>, <<"vnd.jcp.javame.midlet-rms">>, []}; +all_ext(<<"rmvb">>) -> {<<"application">>, <<"vnd.rn-realmedia-vbr">>, []}; +all_ext(<<"rnc">>) -> {<<"application">>, <<"relax-ng-compact-syntax">>, []}; +all_ext(<<"roa">>) -> {<<"application">>, <<"rpki-roa">>, []}; +all_ext(<<"roff">>) -> {<<"text">>, <<"troff">>, []}; +all_ext(<<"rp9">>) -> {<<"application">>, <<"vnd.cloanto.rp9">>, []}; +all_ext(<<"rpss">>) -> {<<"application">>, <<"vnd.nokia.radio-presets">>, []}; +all_ext(<<"rpst">>) -> {<<"application">>, <<"vnd.nokia.radio-preset">>, []}; +all_ext(<<"rq">>) -> {<<"application">>, <<"sparql-query">>, []}; +all_ext(<<"rs">>) -> {<<"application">>, <<"rls-services+xml">>, []}; +all_ext(<<"rsd">>) -> {<<"application">>, <<"rsd+xml">>, []}; +all_ext(<<"rss">>) -> {<<"application">>, <<"rss+xml">>, []}; +all_ext(<<"rtf">>) -> {<<"application">>, <<"rtf">>, []}; +all_ext(<<"rtx">>) -> {<<"text">>, <<"richtext">>, []}; +all_ext(<<"s3m">>) -> {<<"audio">>, <<"s3m">>, []}; +all_ext(<<"saf">>) -> {<<"application">>, <<"vnd.yamaha.smaf-audio">>, []}; +all_ext(<<"sbml">>) -> {<<"application">>, <<"sbml+xml">>, []}; +all_ext(<<"sc">>) -> {<<"application">>, <<"vnd.ibm.secure-container">>, []}; +all_ext(<<"scd">>) -> {<<"application">>, <<"x-msschedule">>, []}; +all_ext(<<"scm">>) -> {<<"application">>, <<"vnd.lotus-screencam">>, []}; +all_ext(<<"scq">>) -> {<<"application">>, <<"scvp-cv-request">>, []}; +all_ext(<<"scs">>) -> {<<"application">>, <<"scvp-cv-response">>, []}; +all_ext(<<"scurl">>) -> {<<"text">>, <<"vnd.curl.scurl">>, []}; +all_ext(<<"sda">>) -> {<<"application">>, <<"vnd.stardivision.draw">>, []}; +all_ext(<<"sdc">>) -> {<<"application">>, <<"vnd.stardivision.calc">>, []}; +all_ext(<<"sdd">>) -> {<<"application">>, <<"vnd.stardivision.impress">>, []}; +all_ext(<<"sdkd">>) -> {<<"application">>, <<"vnd.solent.sdkm+xml">>, []}; +all_ext(<<"sdkm">>) -> {<<"application">>, <<"vnd.solent.sdkm+xml">>, []}; +all_ext(<<"sdp">>) -> {<<"application">>, <<"sdp">>, []}; +all_ext(<<"sdw">>) -> {<<"application">>, <<"vnd.stardivision.writer">>, []}; +all_ext(<<"see">>) -> {<<"application">>, <<"vnd.seemail">>, []}; +all_ext(<<"seed">>) -> {<<"application">>, <<"vnd.fdsn.seed">>, []}; +all_ext(<<"sema">>) -> {<<"application">>, <<"vnd.sema">>, []}; +all_ext(<<"semd">>) -> {<<"application">>, <<"vnd.semd">>, []}; +all_ext(<<"semf">>) -> {<<"application">>, <<"vnd.semf">>, []}; +all_ext(<<"ser">>) -> {<<"application">>, <<"java-serialized-object">>, []}; +all_ext(<<"setpay">>) -> {<<"application">>, <<"set-payment-initiation">>, []}; +all_ext(<<"setreg">>) -> {<<"application">>, <<"set-registration-initiation">>, []}; +all_ext(<<"sfd-hdstx">>) -> {<<"application">>, <<"vnd.hydrostatix.sof-data">>, []}; +all_ext(<<"sfs">>) -> {<<"application">>, <<"vnd.spotfire.sfs">>, []}; +all_ext(<<"sfv">>) -> {<<"text">>, <<"x-sfv">>, []}; +all_ext(<<"sgi">>) -> {<<"image">>, <<"sgi">>, []}; +all_ext(<<"sgl">>) -> {<<"application">>, <<"vnd.stardivision.writer-global">>, []}; +all_ext(<<"sgml">>) -> {<<"text">>, <<"sgml">>, []}; +all_ext(<<"sgm">>) -> {<<"text">>, <<"sgml">>, []}; +all_ext(<<"sh">>) -> {<<"application">>, <<"x-sh">>, []}; +all_ext(<<"shar">>) -> {<<"application">>, <<"x-shar">>, []}; +all_ext(<<"shf">>) -> {<<"application">>, <<"shf+xml">>, []}; +all_ext(<<"sid">>) -> {<<"image">>, <<"x-mrsid-image">>, []}; +all_ext(<<"sig">>) -> {<<"application">>, <<"pgp-signature">>, []}; +all_ext(<<"sil">>) -> {<<"audio">>, <<"silk">>, []}; +all_ext(<<"silo">>) -> {<<"model">>, <<"mesh">>, []}; +all_ext(<<"sis">>) -> {<<"application">>, <<"vnd.symbian.install">>, []}; +all_ext(<<"sisx">>) -> {<<"application">>, <<"vnd.symbian.install">>, []}; +all_ext(<<"sit">>) -> {<<"application">>, <<"x-stuffit">>, []}; +all_ext(<<"sitx">>) -> {<<"application">>, <<"x-stuffitx">>, []}; +all_ext(<<"skd">>) -> {<<"application">>, <<"vnd.koan">>, []}; +all_ext(<<"skm">>) -> {<<"application">>, <<"vnd.koan">>, []}; +all_ext(<<"skp">>) -> {<<"application">>, <<"vnd.koan">>, []}; +all_ext(<<"skt">>) -> {<<"application">>, <<"vnd.koan">>, []}; +all_ext(<<"sldm">>) -> {<<"application">>, <<"vnd.ms-powerpoint.slide.macroenabled.12">>, []}; +all_ext(<<"sldx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.presentationml.slide">>, []}; +all_ext(<<"slt">>) -> {<<"application">>, <<"vnd.epson.salt">>, []}; +all_ext(<<"sm">>) -> {<<"application">>, <<"vnd.stepmania.stepchart">>, []}; +all_ext(<<"smf">>) -> {<<"application">>, <<"vnd.stardivision.math">>, []}; +all_ext(<<"smi">>) -> {<<"application">>, <<"smil+xml">>, []}; +all_ext(<<"smil">>) -> {<<"application">>, <<"smil+xml">>, []}; +all_ext(<<"smv">>) -> {<<"video">>, <<"x-smv">>, []}; +all_ext(<<"smzip">>) -> {<<"application">>, <<"vnd.stepmania.package">>, []}; +all_ext(<<"snd">>) -> {<<"audio">>, <<"basic">>, []}; +all_ext(<<"snf">>) -> {<<"application">>, <<"x-font-snf">>, []}; +all_ext(<<"so">>) -> {<<"application">>, <<"octet-stream">>, []}; +all_ext(<<"spc">>) -> {<<"application">>, <<"x-pkcs7-certificates">>, []}; +all_ext(<<"spf">>) -> {<<"application">>, <<"vnd.yamaha.smaf-phrase">>, []}; +all_ext(<<"spl">>) -> {<<"application">>, <<"x-futuresplash">>, []}; +all_ext(<<"spot">>) -> {<<"text">>, <<"vnd.in3d.spot">>, []}; +all_ext(<<"spp">>) -> {<<"application">>, <<"scvp-vp-response">>, []}; +all_ext(<<"spq">>) -> {<<"application">>, <<"scvp-vp-request">>, []}; +all_ext(<<"spx">>) -> {<<"audio">>, <<"ogg">>, []}; +all_ext(<<"sql">>) -> {<<"application">>, <<"x-sql">>, []}; +all_ext(<<"src">>) -> {<<"application">>, <<"x-wais-source">>, []}; +all_ext(<<"srt">>) -> {<<"application">>, <<"x-subrip">>, []}; +all_ext(<<"sru">>) -> {<<"application">>, <<"sru+xml">>, []}; +all_ext(<<"srx">>) -> {<<"application">>, <<"sparql-results+xml">>, []}; +all_ext(<<"ssdl">>) -> {<<"application">>, <<"ssdl+xml">>, []}; +all_ext(<<"sse">>) -> {<<"application">>, <<"vnd.kodak-descriptor">>, []}; +all_ext(<<"ssf">>) -> {<<"application">>, <<"vnd.epson.ssf">>, []}; +all_ext(<<"ssml">>) -> {<<"application">>, <<"ssml+xml">>, []}; +all_ext(<<"st">>) -> {<<"application">>, <<"vnd.sailingtracker.track">>, []}; +all_ext(<<"stc">>) -> {<<"application">>, <<"vnd.sun.xml.calc.template">>, []}; +all_ext(<<"std">>) -> {<<"application">>, <<"vnd.sun.xml.draw.template">>, []}; +all_ext(<<"s">>) -> {<<"text">>, <<"x-asm">>, []}; +all_ext(<<"stf">>) -> {<<"application">>, <<"vnd.wt.stf">>, []}; +all_ext(<<"sti">>) -> {<<"application">>, <<"vnd.sun.xml.impress.template">>, []}; +all_ext(<<"stk">>) -> {<<"application">>, <<"hyperstudio">>, []}; +all_ext(<<"stl">>) -> {<<"application">>, <<"vnd.ms-pki.stl">>, []}; +all_ext(<<"str">>) -> {<<"application">>, <<"vnd.pg.format">>, []}; +all_ext(<<"stw">>) -> {<<"application">>, <<"vnd.sun.xml.writer.template">>, []}; +all_ext(<<"sub">>) -> {<<"image">>, <<"vnd.dvb.subtitle">>, []}; +all_ext(<<"sus">>) -> {<<"application">>, <<"vnd.sus-calendar">>, []}; +all_ext(<<"susp">>) -> {<<"application">>, <<"vnd.sus-calendar">>, []}; +all_ext(<<"sv4cpio">>) -> {<<"application">>, <<"x-sv4cpio">>, []}; +all_ext(<<"sv4crc">>) -> {<<"application">>, <<"x-sv4crc">>, []}; +all_ext(<<"svc">>) -> {<<"application">>, <<"vnd.dvb.service">>, []}; +all_ext(<<"svd">>) -> {<<"application">>, <<"vnd.svd">>, []}; +all_ext(<<"svg">>) -> {<<"image">>, <<"svg+xml">>, []}; +all_ext(<<"svgz">>) -> {<<"image">>, <<"svg+xml">>, []}; +all_ext(<<"swa">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"swf">>) -> {<<"application">>, <<"x-shockwave-flash">>, []}; +all_ext(<<"swi">>) -> {<<"application">>, <<"vnd.aristanetworks.swi">>, []}; +all_ext(<<"sxc">>) -> {<<"application">>, <<"vnd.sun.xml.calc">>, []}; +all_ext(<<"sxd">>) -> {<<"application">>, <<"vnd.sun.xml.draw">>, []}; +all_ext(<<"sxg">>) -> {<<"application">>, <<"vnd.sun.xml.writer.global">>, []}; +all_ext(<<"sxi">>) -> {<<"application">>, <<"vnd.sun.xml.impress">>, []}; +all_ext(<<"sxm">>) -> {<<"application">>, <<"vnd.sun.xml.math">>, []}; +all_ext(<<"sxw">>) -> {<<"application">>, <<"vnd.sun.xml.writer">>, []}; +all_ext(<<"t3">>) -> {<<"application">>, <<"x-t3vm-image">>, []}; +all_ext(<<"taglet">>) -> {<<"application">>, <<"vnd.mynfc">>, []}; +all_ext(<<"tao">>) -> {<<"application">>, <<"vnd.tao.intent-module-archive">>, []}; +all_ext(<<"tar">>) -> {<<"application">>, <<"x-tar">>, []}; +all_ext(<<"tcap">>) -> {<<"application">>, <<"vnd.3gpp2.tcap">>, []}; +all_ext(<<"tcl">>) -> {<<"application">>, <<"x-tcl">>, []}; +all_ext(<<"teacher">>) -> {<<"application">>, <<"vnd.smart.teacher">>, []}; +all_ext(<<"tei">>) -> {<<"application">>, <<"tei+xml">>, []}; +all_ext(<<"teicorpus">>) -> {<<"application">>, <<"tei+xml">>, []}; +all_ext(<<"tex">>) -> {<<"application">>, <<"x-tex">>, []}; +all_ext(<<"texi">>) -> {<<"application">>, <<"x-texinfo">>, []}; +all_ext(<<"texinfo">>) -> {<<"application">>, <<"x-texinfo">>, []}; +all_ext(<<"text">>) -> {<<"text">>, <<"plain">>, []}; +all_ext(<<"tfi">>) -> {<<"application">>, <<"thraud+xml">>, []}; +all_ext(<<"tfm">>) -> {<<"application">>, <<"x-tex-tfm">>, []}; +all_ext(<<"tga">>) -> {<<"image">>, <<"x-tga">>, []}; +all_ext(<<"thmx">>) -> {<<"application">>, <<"vnd.ms-officetheme">>, []}; +all_ext(<<"tiff">>) -> {<<"image">>, <<"tiff">>, []}; +all_ext(<<"tif">>) -> {<<"image">>, <<"tiff">>, []}; +all_ext(<<"tmo">>) -> {<<"application">>, <<"vnd.tmobile-livetv">>, []}; +all_ext(<<"torrent">>) -> {<<"application">>, <<"x-bittorrent">>, []}; +all_ext(<<"tpl">>) -> {<<"application">>, <<"vnd.groove-tool-template">>, []}; +all_ext(<<"tpt">>) -> {<<"application">>, <<"vnd.trid.tpt">>, []}; +all_ext(<<"tra">>) -> {<<"application">>, <<"vnd.trueapp">>, []}; +all_ext(<<"trm">>) -> {<<"application">>, <<"x-msterminal">>, []}; +all_ext(<<"tr">>) -> {<<"text">>, <<"troff">>, []}; +all_ext(<<"tsd">>) -> {<<"application">>, <<"timestamped-data">>, []}; +all_ext(<<"tsv">>) -> {<<"text">>, <<"tab-separated-values">>, []}; +all_ext(<<"ttc">>) -> {<<"font">>, <<"collection">>, []}; +all_ext(<<"t">>) -> {<<"text">>, <<"troff">>, []}; +all_ext(<<"ttf">>) -> {<<"font">>, <<"ttf">>, []}; +all_ext(<<"ttl">>) -> {<<"text">>, <<"turtle">>, []}; +all_ext(<<"twd">>) -> {<<"application">>, <<"vnd.simtech-mindmapper">>, []}; +all_ext(<<"twds">>) -> {<<"application">>, <<"vnd.simtech-mindmapper">>, []}; +all_ext(<<"txd">>) -> {<<"application">>, <<"vnd.genomatix.tuxedo">>, []}; +all_ext(<<"txf">>) -> {<<"application">>, <<"vnd.mobius.txf">>, []}; +all_ext(<<"txt">>) -> {<<"text">>, <<"plain">>, []}; +all_ext(<<"u32">>) -> {<<"application">>, <<"x-authorware-bin">>, []}; +all_ext(<<"udeb">>) -> {<<"application">>, <<"x-debian-package">>, []}; +all_ext(<<"ufd">>) -> {<<"application">>, <<"vnd.ufdl">>, []}; +all_ext(<<"ufdl">>) -> {<<"application">>, <<"vnd.ufdl">>, []}; +all_ext(<<"ulx">>) -> {<<"application">>, <<"x-glulx">>, []}; +all_ext(<<"umj">>) -> {<<"application">>, <<"vnd.umajin">>, []}; +all_ext(<<"unityweb">>) -> {<<"application">>, <<"vnd.unity">>, []}; +all_ext(<<"uoml">>) -> {<<"application">>, <<"vnd.uoml+xml">>, []}; +all_ext(<<"uris">>) -> {<<"text">>, <<"uri-list">>, []}; +all_ext(<<"uri">>) -> {<<"text">>, <<"uri-list">>, []}; +all_ext(<<"urls">>) -> {<<"text">>, <<"uri-list">>, []}; +all_ext(<<"ustar">>) -> {<<"application">>, <<"x-ustar">>, []}; +all_ext(<<"utz">>) -> {<<"application">>, <<"vnd.uiq.theme">>, []}; +all_ext(<<"uu">>) -> {<<"text">>, <<"x-uuencode">>, []}; +all_ext(<<"uva">>) -> {<<"audio">>, <<"vnd.dece.audio">>, []}; +all_ext(<<"uvd">>) -> {<<"application">>, <<"vnd.dece.data">>, []}; +all_ext(<<"uvf">>) -> {<<"application">>, <<"vnd.dece.data">>, []}; +all_ext(<<"uvg">>) -> {<<"image">>, <<"vnd.dece.graphic">>, []}; +all_ext(<<"uvh">>) -> {<<"video">>, <<"vnd.dece.hd">>, []}; +all_ext(<<"uvi">>) -> {<<"image">>, <<"vnd.dece.graphic">>, []}; +all_ext(<<"uvm">>) -> {<<"video">>, <<"vnd.dece.mobile">>, []}; +all_ext(<<"uvp">>) -> {<<"video">>, <<"vnd.dece.pd">>, []}; +all_ext(<<"uvs">>) -> {<<"video">>, <<"vnd.dece.sd">>, []}; +all_ext(<<"uvt">>) -> {<<"application">>, <<"vnd.dece.ttml+xml">>, []}; +all_ext(<<"uvu">>) -> {<<"video">>, <<"vnd.uvvu.mp4">>, []}; +all_ext(<<"uvva">>) -> {<<"audio">>, <<"vnd.dece.audio">>, []}; +all_ext(<<"uvvd">>) -> {<<"application">>, <<"vnd.dece.data">>, []}; +all_ext(<<"uvvf">>) -> {<<"application">>, <<"vnd.dece.data">>, []}; +all_ext(<<"uvvg">>) -> {<<"image">>, <<"vnd.dece.graphic">>, []}; +all_ext(<<"uvvh">>) -> {<<"video">>, <<"vnd.dece.hd">>, []}; +all_ext(<<"uvvi">>) -> {<<"image">>, <<"vnd.dece.graphic">>, []}; +all_ext(<<"uvvm">>) -> {<<"video">>, <<"vnd.dece.mobile">>, []}; +all_ext(<<"uvvp">>) -> {<<"video">>, <<"vnd.dece.pd">>, []}; +all_ext(<<"uvvs">>) -> {<<"video">>, <<"vnd.dece.sd">>, []}; +all_ext(<<"uvvt">>) -> {<<"application">>, <<"vnd.dece.ttml+xml">>, []}; +all_ext(<<"uvvu">>) -> {<<"video">>, <<"vnd.uvvu.mp4">>, []}; +all_ext(<<"uvv">>) -> {<<"video">>, <<"vnd.dece.video">>, []}; +all_ext(<<"uvvv">>) -> {<<"video">>, <<"vnd.dece.video">>, []}; +all_ext(<<"uvvx">>) -> {<<"application">>, <<"vnd.dece.unspecified">>, []}; +all_ext(<<"uvvz">>) -> {<<"application">>, <<"vnd.dece.zip">>, []}; +all_ext(<<"uvx">>) -> {<<"application">>, <<"vnd.dece.unspecified">>, []}; +all_ext(<<"uvz">>) -> {<<"application">>, <<"vnd.dece.zip">>, []}; +all_ext(<<"vcard">>) -> {<<"text">>, <<"vcard">>, []}; +all_ext(<<"vcd">>) -> {<<"application">>, <<"x-cdlink">>, []}; +all_ext(<<"vcf">>) -> {<<"text">>, <<"x-vcard">>, []}; +all_ext(<<"vcg">>) -> {<<"application">>, <<"vnd.groove-vcard">>, []}; +all_ext(<<"vcs">>) -> {<<"text">>, <<"x-vcalendar">>, []}; +all_ext(<<"vcx">>) -> {<<"application">>, <<"vnd.vcx">>, []}; +all_ext(<<"vis">>) -> {<<"application">>, <<"vnd.visionary">>, []}; +all_ext(<<"viv">>) -> {<<"video">>, <<"vnd.vivo">>, []}; +all_ext(<<"vob">>) -> {<<"video">>, <<"x-ms-vob">>, []}; +all_ext(<<"vor">>) -> {<<"application">>, <<"vnd.stardivision.writer">>, []}; +all_ext(<<"vox">>) -> {<<"application">>, <<"x-authorware-bin">>, []}; +all_ext(<<"vrml">>) -> {<<"model">>, <<"vrml">>, []}; +all_ext(<<"vsd">>) -> {<<"application">>, <<"vnd.visio">>, []}; +all_ext(<<"vsf">>) -> {<<"application">>, <<"vnd.vsf">>, []}; +all_ext(<<"vss">>) -> {<<"application">>, <<"vnd.visio">>, []}; +all_ext(<<"vst">>) -> {<<"application">>, <<"vnd.visio">>, []}; +all_ext(<<"vsw">>) -> {<<"application">>, <<"vnd.visio">>, []}; +all_ext(<<"vtu">>) -> {<<"model">>, <<"vnd.vtu">>, []}; +all_ext(<<"vxml">>) -> {<<"application">>, <<"voicexml+xml">>, []}; +all_ext(<<"w3d">>) -> {<<"application">>, <<"x-director">>, []}; +all_ext(<<"wad">>) -> {<<"application">>, <<"x-doom">>, []}; +all_ext(<<"wav">>) -> {<<"audio">>, <<"x-wav">>, []}; +all_ext(<<"wax">>) -> {<<"audio">>, <<"x-ms-wax">>, []}; +all_ext(<<"wbmp">>) -> {<<"image">>, <<"vnd.wap.wbmp">>, []}; +all_ext(<<"wbs">>) -> {<<"application">>, <<"vnd.criticaltools.wbs+xml">>, []}; +all_ext(<<"wbxml">>) -> {<<"application">>, <<"vnd.wap.wbxml">>, []}; +all_ext(<<"wcm">>) -> {<<"application">>, <<"vnd.ms-works">>, []}; +all_ext(<<"wdb">>) -> {<<"application">>, <<"vnd.ms-works">>, []}; +all_ext(<<"wdp">>) -> {<<"image">>, <<"vnd.ms-photo">>, []}; +all_ext(<<"weba">>) -> {<<"audio">>, <<"webm">>, []}; +all_ext(<<"webm">>) -> {<<"video">>, <<"webm">>, []}; +all_ext(<<"webp">>) -> {<<"image">>, <<"webp">>, []}; +all_ext(<<"wg">>) -> {<<"application">>, <<"vnd.pmi.widget">>, []}; +all_ext(<<"wgt">>) -> {<<"application">>, <<"widget">>, []}; +all_ext(<<"wks">>) -> {<<"application">>, <<"vnd.ms-works">>, []}; +all_ext(<<"wma">>) -> {<<"audio">>, <<"x-ms-wma">>, []}; +all_ext(<<"wmd">>) -> {<<"application">>, <<"x-ms-wmd">>, []}; +all_ext(<<"wmf">>) -> {<<"application">>, <<"x-msmetafile">>, []}; +all_ext(<<"wmlc">>) -> {<<"application">>, <<"vnd.wap.wmlc">>, []}; +all_ext(<<"wmlsc">>) -> {<<"application">>, <<"vnd.wap.wmlscriptc">>, []}; +all_ext(<<"wmls">>) -> {<<"text">>, <<"vnd.wap.wmlscript">>, []}; +all_ext(<<"wml">>) -> {<<"text">>, <<"vnd.wap.wml">>, []}; +all_ext(<<"wm">>) -> {<<"video">>, <<"x-ms-wm">>, []}; +all_ext(<<"wmv">>) -> {<<"video">>, <<"x-ms-wmv">>, []}; +all_ext(<<"wmx">>) -> {<<"video">>, <<"x-ms-wmx">>, []}; +all_ext(<<"wmz">>) -> {<<"application">>, <<"x-msmetafile">>, []}; +all_ext(<<"woff2">>) -> {<<"font">>, <<"woff2">>, []}; +all_ext(<<"woff">>) -> {<<"font">>, <<"woff">>, []}; +all_ext(<<"wpd">>) -> {<<"application">>, <<"vnd.wordperfect">>, []}; +all_ext(<<"wpl">>) -> {<<"application">>, <<"vnd.ms-wpl">>, []}; +all_ext(<<"wps">>) -> {<<"application">>, <<"vnd.ms-works">>, []}; +all_ext(<<"wqd">>) -> {<<"application">>, <<"vnd.wqd">>, []}; +all_ext(<<"wri">>) -> {<<"application">>, <<"x-mswrite">>, []}; +all_ext(<<"wrl">>) -> {<<"model">>, <<"vrml">>, []}; +all_ext(<<"wsdl">>) -> {<<"application">>, <<"wsdl+xml">>, []}; +all_ext(<<"wspolicy">>) -> {<<"application">>, <<"wspolicy+xml">>, []}; +all_ext(<<"wtb">>) -> {<<"application">>, <<"vnd.webturbo">>, []}; +all_ext(<<"wvx">>) -> {<<"video">>, <<"x-ms-wvx">>, []}; +all_ext(<<"x32">>) -> {<<"application">>, <<"x-authorware-bin">>, []}; +all_ext(<<"x3db">>) -> {<<"model">>, <<"x3d+binary">>, []}; +all_ext(<<"x3dbz">>) -> {<<"model">>, <<"x3d+binary">>, []}; +all_ext(<<"x3d">>) -> {<<"model">>, <<"x3d+xml">>, []}; +all_ext(<<"x3dv">>) -> {<<"model">>, <<"x3d+vrml">>, []}; +all_ext(<<"x3dvz">>) -> {<<"model">>, <<"x3d+vrml">>, []}; +all_ext(<<"x3dz">>) -> {<<"model">>, <<"x3d+xml">>, []}; +all_ext(<<"xaml">>) -> {<<"application">>, <<"xaml+xml">>, []}; +all_ext(<<"xap">>) -> {<<"application">>, <<"x-silverlight-app">>, []}; +all_ext(<<"xar">>) -> {<<"application">>, <<"vnd.xara">>, []}; +all_ext(<<"xbap">>) -> {<<"application">>, <<"x-ms-xbap">>, []}; +all_ext(<<"xbd">>) -> {<<"application">>, <<"vnd.fujixerox.docuworks.binder">>, []}; +all_ext(<<"xbm">>) -> {<<"image">>, <<"x-xbitmap">>, []}; +all_ext(<<"xdf">>) -> {<<"application">>, <<"xcap-diff+xml">>, []}; +all_ext(<<"xdm">>) -> {<<"application">>, <<"vnd.syncml.dm+xml">>, []}; +all_ext(<<"xdp">>) -> {<<"application">>, <<"vnd.adobe.xdp+xml">>, []}; +all_ext(<<"xdssc">>) -> {<<"application">>, <<"dssc+xml">>, []}; +all_ext(<<"xdw">>) -> {<<"application">>, <<"vnd.fujixerox.docuworks">>, []}; +all_ext(<<"xenc">>) -> {<<"application">>, <<"xenc+xml">>, []}; +all_ext(<<"xer">>) -> {<<"application">>, <<"patch-ops-error+xml">>, []}; +all_ext(<<"xfdf">>) -> {<<"application">>, <<"vnd.adobe.xfdf">>, []}; +all_ext(<<"xfdl">>) -> {<<"application">>, <<"vnd.xfdl">>, []}; +all_ext(<<"xht">>) -> {<<"application">>, <<"xhtml+xml">>, []}; +all_ext(<<"xhtml">>) -> {<<"application">>, <<"xhtml+xml">>, []}; +all_ext(<<"xhvml">>) -> {<<"application">>, <<"xv+xml">>, []}; +all_ext(<<"xif">>) -> {<<"image">>, <<"vnd.xiff">>, []}; +all_ext(<<"xla">>) -> {<<"application">>, <<"vnd.ms-excel">>, []}; +all_ext(<<"xlam">>) -> {<<"application">>, <<"vnd.ms-excel.addin.macroenabled.12">>, []}; +all_ext(<<"xlc">>) -> {<<"application">>, <<"vnd.ms-excel">>, []}; +all_ext(<<"xlf">>) -> {<<"application">>, <<"x-xliff+xml">>, []}; +all_ext(<<"xlm">>) -> {<<"application">>, <<"vnd.ms-excel">>, []}; +all_ext(<<"xls">>) -> {<<"application">>, <<"vnd.ms-excel">>, []}; +all_ext(<<"xlsb">>) -> {<<"application">>, <<"vnd.ms-excel.sheet.binary.macroenabled.12">>, []}; +all_ext(<<"xlsm">>) -> {<<"application">>, <<"vnd.ms-excel.sheet.macroenabled.12">>, []}; +all_ext(<<"xlsx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.spreadsheetml.sheet">>, []}; +all_ext(<<"xlt">>) -> {<<"application">>, <<"vnd.ms-excel">>, []}; +all_ext(<<"xltm">>) -> {<<"application">>, <<"vnd.ms-excel.template.macroenabled.12">>, []}; +all_ext(<<"xltx">>) -> {<<"application">>, <<"vnd.openxmlformats-officedocument.spreadsheetml.template">>, []}; +all_ext(<<"xlw">>) -> {<<"application">>, <<"vnd.ms-excel">>, []}; +all_ext(<<"xm">>) -> {<<"audio">>, <<"xm">>, []}; +all_ext(<<"xml">>) -> {<<"application">>, <<"xml">>, []}; +all_ext(<<"xo">>) -> {<<"application">>, <<"vnd.olpc-sugar">>, []}; +all_ext(<<"xop">>) -> {<<"application">>, <<"xop+xml">>, []}; +all_ext(<<"xpi">>) -> {<<"application">>, <<"x-xpinstall">>, []}; +all_ext(<<"xpl">>) -> {<<"application">>, <<"xproc+xml">>, []}; +all_ext(<<"xpm">>) -> {<<"image">>, <<"x-xpixmap">>, []}; +all_ext(<<"xpr">>) -> {<<"application">>, <<"vnd.is-xpr">>, []}; +all_ext(<<"xps">>) -> {<<"application">>, <<"vnd.ms-xpsdocument">>, []}; +all_ext(<<"xpw">>) -> {<<"application">>, <<"vnd.intercon.formnet">>, []}; +all_ext(<<"xpx">>) -> {<<"application">>, <<"vnd.intercon.formnet">>, []}; +all_ext(<<"xsl">>) -> {<<"application">>, <<"xml">>, []}; +all_ext(<<"xslt">>) -> {<<"application">>, <<"xslt+xml">>, []}; +all_ext(<<"xsm">>) -> {<<"application">>, <<"vnd.syncml+xml">>, []}; +all_ext(<<"xspf">>) -> {<<"application">>, <<"xspf+xml">>, []}; +all_ext(<<"xul">>) -> {<<"application">>, <<"vnd.mozilla.xul+xml">>, []}; +all_ext(<<"xvm">>) -> {<<"application">>, <<"xv+xml">>, []}; +all_ext(<<"xvml">>) -> {<<"application">>, <<"xv+xml">>, []}; +all_ext(<<"xwd">>) -> {<<"image">>, <<"x-xwindowdump">>, []}; +all_ext(<<"xyz">>) -> {<<"chemical">>, <<"x-xyz">>, []}; +all_ext(<<"xz">>) -> {<<"application">>, <<"x-xz">>, []}; +all_ext(<<"yang">>) -> {<<"application">>, <<"yang">>, []}; +all_ext(<<"yin">>) -> {<<"application">>, <<"yin+xml">>, []}; +all_ext(<<"z1">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"z2">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"z3">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"z4">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"z5">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"z6">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"z7">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"z8">>) -> {<<"application">>, <<"x-zmachine">>, []}; +all_ext(<<"zaz">>) -> {<<"application">>, <<"vnd.zzazz.deck+xml">>, []}; +all_ext(<<"zip">>) -> {<<"application">>, <<"zip">>, []}; +all_ext(<<"zir">>) -> {<<"application">>, <<"vnd.zul">>, []}; +all_ext(<<"zirz">>) -> {<<"application">>, <<"vnd.zul">>, []}; +all_ext(<<"zmm">>) -> {<<"application">>, <<"vnd.handheld-entertainment+xml">>, []}; +%% GENERATED +all_ext(_) -> {<<"application">>, <<"octet-stream">>, []}. + +web_ext(<<"css">>) -> {<<"text">>, <<"css">>, []}; +web_ext(<<"gif">>) -> {<<"image">>, <<"gif">>, []}; +web_ext(<<"html">>) -> {<<"text">>, <<"html">>, []}; +web_ext(<<"htm">>) -> {<<"text">>, <<"html">>, []}; +web_ext(<<"ico">>) -> {<<"image">>, <<"x-icon">>, []}; +web_ext(<<"jpeg">>) -> {<<"image">>, <<"jpeg">>, []}; +web_ext(<<"jpg">>) -> {<<"image">>, <<"jpeg">>, []}; +web_ext(<<"js">>) -> {<<"application">>, <<"javascript">>, []}; +web_ext(<<"mp3">>) -> {<<"audio">>, <<"mpeg">>, []}; +web_ext(<<"mp4">>) -> {<<"video">>, <<"mp4">>, []}; +web_ext(<<"ogg">>) -> {<<"audio">>, <<"ogg">>, []}; +web_ext(<<"ogv">>) -> {<<"video">>, <<"ogg">>, []}; +web_ext(<<"png">>) -> {<<"image">>, <<"png">>, []}; +web_ext(<<"svg">>) -> {<<"image">>, <<"svg+xml">>, []}; +web_ext(<<"wav">>) -> {<<"audio">>, <<"x-wav">>, []}; +web_ext(<<"webm">>) -> {<<"video">>, <<"webm">>, []}; +web_ext(_) -> {<<"application">>, <<"octet-stream">>, []}. diff --git a/deps/cowlib/src/cow_mimetypes.erl.src b/deps/cowlib/src/cow_mimetypes.erl.src new file mode 100644 index 0000000..2c57834 --- /dev/null +++ b/deps/cowlib/src/cow_mimetypes.erl.src @@ -0,0 +1,61 @@ +%% Copyright (c) 2013-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_mimetypes). + +-export([all/1]). +-export([web/1]). + +%% @doc Return the mimetype for any file by looking at its extension. + +-spec all(binary()) -> {binary(), binary(), []}. +all(Path) -> + case filename:extension(Path) of + <<>> -> {<<"application">>, <<"octet-stream">>, []}; + %% @todo Convert to string:lowercase on OTP-20+. + << $., Ext/binary >> -> all_ext(list_to_binary(string:to_lower(binary_to_list(Ext)))) + end. + +%% @doc Return the mimetype for a Web related file by looking at its extension. + +-spec web(binary()) -> {binary(), binary(), []}. +web(Path) -> + case filename:extension(Path) of + <<>> -> {<<"application">>, <<"octet-stream">>, []}; + %% @todo Convert to string:lowercase on OTP-20+. + << $., Ext/binary >> -> web_ext(list_to_binary(string:to_lower(binary_to_list(Ext)))) + end. + +%% Internal. + +%% GENERATED +all_ext(_) -> {<<"application">>, <<"octet-stream">>, []}. + +web_ext(<<"css">>) -> {<<"text">>, <<"css">>, []}; +web_ext(<<"gif">>) -> {<<"image">>, <<"gif">>, []}; +web_ext(<<"html">>) -> {<<"text">>, <<"html">>, []}; +web_ext(<<"htm">>) -> {<<"text">>, <<"html">>, []}; +web_ext(<<"ico">>) -> {<<"image">>, <<"x-icon">>, []}; +web_ext(<<"jpeg">>) -> {<<"image">>, <<"jpeg">>, []}; +web_ext(<<"jpg">>) -> {<<"image">>, <<"jpeg">>, []}; +web_ext(<<"js">>) -> {<<"application">>, <<"javascript">>, []}; +web_ext(<<"mp3">>) -> {<<"audio">>, <<"mpeg">>, []}; +web_ext(<<"mp4">>) -> {<<"video">>, <<"mp4">>, []}; +web_ext(<<"ogg">>) -> {<<"audio">>, <<"ogg">>, []}; +web_ext(<<"ogv">>) -> {<<"video">>, <<"ogg">>, []}; +web_ext(<<"png">>) -> {<<"image">>, <<"png">>, []}; +web_ext(<<"svg">>) -> {<<"image">>, <<"svg+xml">>, []}; +web_ext(<<"wav">>) -> {<<"audio">>, <<"x-wav">>, []}; +web_ext(<<"webm">>) -> {<<"video">>, <<"webm">>, []}; +web_ext(_) -> {<<"application">>, <<"octet-stream">>, []}. diff --git a/deps/cowlib/src/cow_multipart.erl b/deps/cowlib/src/cow_multipart.erl new file mode 100644 index 0000000..f418813 --- /dev/null +++ b/deps/cowlib/src/cow_multipart.erl @@ -0,0 +1,775 @@ +%% Copyright (c) 2014-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_multipart). + +%% Parsing. +-export([parse_headers/2]). +-export([parse_body/2]). + +%% Building. +-export([boundary/0]). +-export([first_part/2]). +-export([part/2]). +-export([close/1]). + +%% Headers. +-export([form_data/1]). +-export([parse_content_disposition/1]). +-export([parse_content_transfer_encoding/1]). +-export([parse_content_type/1]). + +-type headers() :: [{iodata(), iodata()}]. +-export_type([headers/0]). + +-include("cow_inline.hrl"). + +-define(TEST1_MIME, << + "This is a message with multiple parts in MIME format.\r\n" + "--frontier\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "This is the body of the message.\r\n" + "--frontier\r\n" + "Content-Type: application/octet-stream\r\n" + "Content-Transfer-Encoding: base64\r\n" + "\r\n" + "PGh0bWw+CiAgPGhlYWQ+CiAgPC9oZWFkPgogIDxib2R5PgogICAgPHA+VGhpcyBpcyB0aGUg\r\n" + "Ym9keSBvZiB0aGUgbWVzc2FnZS48L3A+CiAgPC9ib2R5Pgo8L2h0bWw+Cg==\r\n" + "--frontier--" +>>). +-define(TEST1_BOUNDARY, <<"frontier">>). + +-define(TEST2_MIME, << + "--AaB03x\r\n" + "Content-Disposition: form-data; name=\"submit-name\"\r\n" + "\r\n" + "Larry\r\n" + "--AaB03x\r\n" + "Content-Disposition: form-data; name=\"files\"\r\n" + "Content-Type: multipart/mixed; boundary=BbC04y\r\n" + "\r\n" + "--BbC04y\r\n" + "Content-Disposition: file; filename=\"file1.txt\"\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "... contents of file1.txt ...\r\n" + "--BbC04y\r\n" + "Content-Disposition: file; filename=\"file2.gif\"\r\n" + "Content-Type: image/gif\r\n" + "Content-Transfer-Encoding: binary\r\n" + "\r\n" + "...contents of file2.gif...\r\n" + "--BbC04y--\r\n" + "--AaB03x--" +>>). +-define(TEST2_BOUNDARY, <<"AaB03x">>). + +-define(TEST3_MIME, << + "This is the preamble.\r\n" + "--boundary\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "This is the body of the message.\r\n" + "--boundary--" + "\r\nThis is the epilogue. Here it includes leading CRLF" +>>). +-define(TEST3_BOUNDARY, <<"boundary">>). + +-define(TEST4_MIME, << + "This is the preamble.\r\n" + "--boundary\r\n" + "Content-Type: text/plain\r\n" + "\r\n" + "This is the body of the message.\r\n" + "--boundary--" + "\r\n" +>>). +-define(TEST4_BOUNDARY, <<"boundary">>). + +%% RFC 2046, Section 5.1.1 +-define(TEST5_MIME, << + "This is the preamble. It is to be ignored, though it\r\n" + "is a handy place for composition agents to include an\r\n" + "explanatory note to non-MIME conformant readers.\r\n" + "\r\n" + "--simple boundary\r\n", + "\r\n" + "This is implicitly typed plain US-ASCII text.\r\n" + "It does NOT end with a linebreak." + "\r\n" + "--simple boundary\r\n", + "Content-type: text/plain; charset=us-ascii\r\n" + "\r\n" + "This is explicitly typed plain US-ASCII text.\r\n" + "It DOES end with a linebreak.\r\n" + "\r\n" + "--simple boundary--\r\n" + "\r\n" + "This is the epilogue. It is also to be ignored." +>>). +-define(TEST5_BOUNDARY, <<"simple boundary">>). + +%% Parsing. +%% +%% The multipart format is defined in RFC 2045. + +%% @doc Parse the headers for the next multipart part. +%% +%% This function skips any preamble before the boundary. +%% The preamble may be retrieved using parse_body/2. +%% +%% This function will accept input of any size, it is +%% up to the caller to limit it if needed. + +-spec parse_headers(binary(), binary()) + -> more | {more, binary()} + | {ok, headers(), binary()} + | {done, binary()}. +%% If the stream starts with the boundary we can make a few assumptions +%% and quickly figure out if we got the complete list of headers. +parse_headers(<< "--", Stream/bits >>, Boundary) -> + BoundarySize = byte_size(Boundary), + case Stream of + %% Last boundary. Return the epilogue. + << Boundary:BoundarySize/binary, "--", Stream2/bits >> -> + {done, Stream2}; + << Boundary:BoundarySize/binary, Stream2/bits >> -> + %% We have all the headers only if there is a \r\n\r\n + %% somewhere in the data after the boundary. + case binary:match(Stream2, <<"\r\n\r\n">>) of + nomatch -> + more; + _ -> + before_parse_headers(Stream2) + end; + %% If there isn't enough to represent Boundary \r\n\r\n + %% then we definitely don't have all the headers. + _ when byte_size(Stream) < byte_size(Boundary) + 4 -> + more; + %% Otherwise we have preamble data to skip. + %% We still got rid of the first two misleading bytes. + _ -> + skip_preamble(Stream, Boundary) + end; +%% Otherwise we have preamble data to skip. +parse_headers(Stream, Boundary) -> + skip_preamble(Stream, Boundary). + +%% We need to find the boundary and a \r\n\r\n after that. +%% Since the boundary isn't at the start, it must be right +%% after a \r\n too. +skip_preamble(Stream, Boundary) -> + case binary:match(Stream, <<"\r\n--", Boundary/bits >>) of + %% No boundary, need more data. + nomatch -> + %% We can safely skip the size of the stream + %% minus the last 3 bytes which may be a partial boundary. + SkipSize = byte_size(Stream) - 3, + case SkipSize > 0 of + false -> + more; + true -> + << _:SkipSize/binary, Stream2/bits >> = Stream, + {more, Stream2} + end; + {Start, Length} -> + Start2 = Start + Length, + << _:Start2/binary, Stream2/bits >> = Stream, + case Stream2 of + %% Last boundary. Return the epilogue. + << "--", Stream3/bits >> -> + {done, Stream3}; + _ -> + case binary:match(Stream, <<"\r\n\r\n">>) of + %% We don't have the full headers. + nomatch -> + {more, Stream2}; + _ -> + before_parse_headers(Stream2) + end + end + end. + +before_parse_headers(<< "\r\n\r\n", Stream/bits >>) -> + %% This indicates that there are no headers, so we can abort immediately. + {ok, [], Stream}; +before_parse_headers(<< "\r\n", Stream/bits >>) -> + %% There is a line break right after the boundary, skip it. + parse_hd_name(Stream, [], <<>>). + +parse_hd_name(<< C, Rest/bits >>, H, SoFar) -> + case C of + $: -> parse_hd_before_value(Rest, H, SoFar); + $\s -> parse_hd_name_ws(Rest, H, SoFar); + $\t -> parse_hd_name_ws(Rest, H, SoFar); + _ -> ?LOWER(parse_hd_name, Rest, H, SoFar) + end. + +parse_hd_name_ws(<< C, Rest/bits >>, H, Name) -> + case C of + $\s -> parse_hd_name_ws(Rest, H, Name); + $\t -> parse_hd_name_ws(Rest, H, Name); + $: -> parse_hd_before_value(Rest, H, Name) + end. + +parse_hd_before_value(<< $\s, Rest/bits >>, H, N) -> + parse_hd_before_value(Rest, H, N); +parse_hd_before_value(<< $\t, Rest/bits >>, H, N) -> + parse_hd_before_value(Rest, H, N); +parse_hd_before_value(Buffer, H, N) -> + parse_hd_value(Buffer, H, N, <<>>). + +parse_hd_value(<< $\r, Rest/bits >>, Headers, Name, SoFar) -> + case Rest of + << "\n\r\n", Rest2/bits >> -> + {ok, [{Name, SoFar}|Headers], Rest2}; + << $\n, C, Rest2/bits >> when C =:= $\s; C =:= $\t -> + parse_hd_value(Rest2, Headers, Name, SoFar); + << $\n, Rest2/bits >> -> + parse_hd_name(Rest2, [{Name, SoFar}|Headers], <<>>) + end; +parse_hd_value(<< C, Rest/bits >>, H, N, SoFar) -> + parse_hd_value(Rest, H, N, << SoFar/binary, C >>). + +%% @doc Parse the body of the current multipart part. +%% +%% The body is everything until the next boundary. + +-spec parse_body(binary(), binary()) + -> {ok, binary()} | {ok, binary(), binary()} + | done | {done, binary()} | {done, binary(), binary()}. +parse_body(Stream, Boundary) -> + BoundarySize = byte_size(Boundary), + case Stream of + << "--", Boundary:BoundarySize/binary, _/bits >> -> + done; + _ -> + case binary:match(Stream, << "\r\n--", Boundary/bits >>) of + %% No boundary, check for a possible partial at the end. + %% Return more or less of the body depending on the result. + nomatch -> + StreamSize = byte_size(Stream), + From = StreamSize - BoundarySize - 3, + MatchOpts = if + %% Binary too small to contain boundary, check it fully. + From < 0 -> []; + %% Optimize, only check the end of the binary. + true -> [{scope, {From, StreamSize - From}}] + end, + case binary:match(Stream, <<"\r">>, MatchOpts) of + nomatch -> + {ok, Stream}; + {Pos, _} -> + case Stream of + << Body:Pos/binary >> -> + {ok, Body}; + << Body:Pos/binary, Rest/bits >> -> + {ok, Body, Rest} + end + end; + %% Boundary found, this is the last chunk of the body. + {Pos, _} -> + case Stream of + << Body:Pos/binary, "\r\n" >> -> + {done, Body}; + << Body:Pos/binary, "\r\n", Rest/bits >> -> + {done, Body, Rest}; + << Body:Pos/binary, Rest/bits >> -> + {done, Body, Rest} + end + end + end. + +-ifdef(TEST). +parse_test() -> + H1 = [{<<"content-type">>, <<"text/plain">>}], + Body1 = <<"This is the body of the message.">>, + H2 = lists:sort([{<<"content-type">>, <<"application/octet-stream">>}, + {<<"content-transfer-encoding">>, <<"base64">>}]), + Body2 = <<"PGh0bWw+CiAgPGhlYWQ+CiAgPC9oZWFkPgogIDxib2R5PgogICAgPHA+VGhpcyBpcyB0aGUg\r\n" + "Ym9keSBvZiB0aGUgbWVzc2FnZS48L3A+CiAgPC9ib2R5Pgo8L2h0bWw+Cg==">>, + {ok, H1, Rest} = parse_headers(?TEST1_MIME, ?TEST1_BOUNDARY), + {done, Body1, Rest2} = parse_body(Rest, ?TEST1_BOUNDARY), + done = parse_body(Rest2, ?TEST1_BOUNDARY), + {ok, H2Unsorted, Rest3} = parse_headers(Rest2, ?TEST1_BOUNDARY), + H2 = lists:sort(H2Unsorted), + {done, Body2, Rest4} = parse_body(Rest3, ?TEST1_BOUNDARY), + done = parse_body(Rest4, ?TEST1_BOUNDARY), + {done, <<>>} = parse_headers(Rest4, ?TEST1_BOUNDARY), + ok. + +parse_interleaved_test() -> + H1 = [{<<"content-disposition">>, <<"form-data; name=\"submit-name\"">>}], + Body1 = <<"Larry">>, + H2 = lists:sort([{<<"content-disposition">>, <<"form-data; name=\"files\"">>}, + {<<"content-type">>, <<"multipart/mixed; boundary=BbC04y">>}]), + InH1 = lists:sort([{<<"content-disposition">>, <<"file; filename=\"file1.txt\"">>}, + {<<"content-type">>, <<"text/plain">>}]), + InBody1 = <<"... contents of file1.txt ...">>, + InH2 = lists:sort([{<<"content-disposition">>, <<"file; filename=\"file2.gif\"">>}, + {<<"content-type">>, <<"image/gif">>}, + {<<"content-transfer-encoding">>, <<"binary">>}]), + InBody2 = <<"...contents of file2.gif...">>, + {ok, H1, Rest} = parse_headers(?TEST2_MIME, ?TEST2_BOUNDARY), + {done, Body1, Rest2} = parse_body(Rest, ?TEST2_BOUNDARY), + done = parse_body(Rest2, ?TEST2_BOUNDARY), + {ok, H2Unsorted, Rest3} = parse_headers(Rest2, ?TEST2_BOUNDARY), + H2 = lists:sort(H2Unsorted), + {_, ContentType} = lists:keyfind(<<"content-type">>, 1, H2), + {<<"multipart">>, <<"mixed">>, [{<<"boundary">>, InBoundary}]} + = parse_content_type(ContentType), + {ok, InH1Unsorted, InRest} = parse_headers(Rest3, InBoundary), + InH1 = lists:sort(InH1Unsorted), + {done, InBody1, InRest2} = parse_body(InRest, InBoundary), + done = parse_body(InRest2, InBoundary), + {ok, InH2Unsorted, InRest3} = parse_headers(InRest2, InBoundary), + InH2 = lists:sort(InH2Unsorted), + {done, InBody2, InRest4} = parse_body(InRest3, InBoundary), + done = parse_body(InRest4, InBoundary), + {done, Rest4} = parse_headers(InRest4, InBoundary), + {done, <<>>} = parse_headers(Rest4, ?TEST2_BOUNDARY), + ok. + +parse_epilogue_test() -> + H1 = [{<<"content-type">>, <<"text/plain">>}], + Body1 = <<"This is the body of the message.">>, + Epilogue = <<"\r\nThis is the epilogue. Here it includes leading CRLF">>, + {ok, H1, Rest} = parse_headers(?TEST3_MIME, ?TEST3_BOUNDARY), + {done, Body1, Rest2} = parse_body(Rest, ?TEST3_BOUNDARY), + done = parse_body(Rest2, ?TEST3_BOUNDARY), + {done, Epilogue} = parse_headers(Rest2, ?TEST3_BOUNDARY), + ok. + +parse_epilogue_crlf_test() -> + H1 = [{<<"content-type">>, <<"text/plain">>}], + Body1 = <<"This is the body of the message.">>, + Epilogue = <<"\r\n">>, + {ok, H1, Rest} = parse_headers(?TEST4_MIME, ?TEST4_BOUNDARY), + {done, Body1, Rest2} = parse_body(Rest, ?TEST4_BOUNDARY), + done = parse_body(Rest2, ?TEST4_BOUNDARY), + {done, Epilogue} = parse_headers(Rest2, ?TEST4_BOUNDARY), + ok. + +parse_rfc2046_test() -> + %% The following is an example included in RFC 2046, Section 5.1.1. + Body1 = <<"This is implicitly typed plain US-ASCII text.\r\n" + "It does NOT end with a linebreak.">>, + Body2 = <<"This is explicitly typed plain US-ASCII text.\r\n" + "It DOES end with a linebreak.\r\n">>, + H2 = [{<<"content-type">>, <<"text/plain; charset=us-ascii">>}], + Epilogue = <<"\r\n\r\nThis is the epilogue. It is also to be ignored.">>, + {ok, [], Rest} = parse_headers(?TEST5_MIME, ?TEST5_BOUNDARY), + {done, Body1, Rest2} = parse_body(Rest, ?TEST5_BOUNDARY), + {ok, H2, Rest3} = parse_headers(Rest2, ?TEST5_BOUNDARY), + {done, Body2, Rest4} = parse_body(Rest3, ?TEST5_BOUNDARY), + {done, Epilogue} = parse_headers(Rest4, ?TEST5_BOUNDARY), + ok. + +parse_partial_test() -> + {ok, <<0:8000, "abcdef">>, <<"\rghij">>} + = parse_body(<<0:8000, "abcdef\rghij">>, <<"boundary">>), + {ok, <<"abcdef">>, <<"\rghij">>} + = parse_body(<<"abcdef\rghij">>, <<"boundary">>), + {ok, <<"abc">>, <<"\rdef">>} + = parse_body(<<"abc\rdef">>, <<"boundaryboundary">>), + {ok, <<0:8000, "abcdef">>, <<"\r\nghij">>} + = parse_body(<<0:8000, "abcdef\r\nghij">>, <<"boundary">>), + {ok, <<"abcdef">>, <<"\r\nghij">>} + = parse_body(<<"abcdef\r\nghij">>, <<"boundary">>), + {ok, <<"abc">>, <<"\r\ndef">>} + = parse_body(<<"abc\r\ndef">>, <<"boundaryboundary">>), + {ok, <<"boundary">>, <<"\r">>} + = parse_body(<<"boundary\r">>, <<"boundary">>), + {ok, <<"boundary">>, <<"\r\n">>} + = parse_body(<<"boundary\r\n">>, <<"boundary">>), + {ok, <<"boundary">>, <<"\r\n-">>} + = parse_body(<<"boundary\r\n-">>, <<"boundary">>), + {ok, <<"boundary">>, <<"\r\n--">>} + = parse_body(<<"boundary\r\n--">>, <<"boundary">>), + ok. + +perf_parse_multipart(Stream, Boundary) -> + case parse_headers(Stream, Boundary) of + {ok, _, Rest} -> + {_, _, Rest2} = parse_body(Rest, Boundary), + perf_parse_multipart(Rest2, Boundary); + {done, _} -> + ok + end. + +horse_parse() -> + horse:repeat(50000, + perf_parse_multipart(?TEST1_MIME, ?TEST1_BOUNDARY) + ). +-endif. + +%% Building. + +%% @doc Generate a new random boundary. +%% +%% The boundary generated has a low probability of ever appearing +%% in the data. + +-spec boundary() -> binary(). +boundary() -> + cow_base64url:encode(crypto:strong_rand_bytes(48), #{padding => false}). + +%% @doc Return the first part's head. +%% +%% This works exactly like the part/2 function except there is +%% no leading \r\n. It's not required to use this function, +%% just makes the output a little smaller and prettier. + +-spec first_part(binary(), headers()) -> iodata(). +first_part(Boundary, Headers) -> + [<<"--">>, Boundary, <<"\r\n">>, headers_to_iolist(Headers, [])]. + +%% @doc Return a part's head. + +-spec part(binary(), headers()) -> iodata(). +part(Boundary, Headers) -> + [<<"\r\n--">>, Boundary, <<"\r\n">>, headers_to_iolist(Headers, [])]. + +headers_to_iolist([], Acc) -> + lists:reverse([<<"\r\n">>|Acc]); +headers_to_iolist([{N, V}|Tail], Acc) -> + %% We don't want to create a sublist so we list the + %% values in reverse order so that it gets reversed properly. + headers_to_iolist(Tail, [<<"\r\n">>, V, <<": ">>, N|Acc]). + +%% @doc Return the closing delimiter of the multipart message. + +-spec close(binary()) -> iodata(). +close(Boundary) -> + [<<"\r\n--">>, Boundary, <<"--">>]. + +-ifdef(TEST). +build_test() -> + Result = string:to_lower(binary_to_list(?TEST1_MIME)), + Result = string:to_lower(binary_to_list(iolist_to_binary([ + <<"This is a message with multiple parts in MIME format.\r\n">>, + first_part(?TEST1_BOUNDARY, [{<<"content-type">>, <<"text/plain">>}]), + <<"This is the body of the message.">>, + part(?TEST1_BOUNDARY, [ + {<<"content-type">>, <<"application/octet-stream">>}, + {<<"content-transfer-encoding">>, <<"base64">>}]), + <<"PGh0bWw+CiAgPGhlYWQ+CiAgPC9oZWFkPgogIDxib2R5PgogICAgPHA+VGhpcyBpcyB0aGUg\r\n" + "Ym9keSBvZiB0aGUgbWVzc2FnZS48L3A+CiAgPC9ib2R5Pgo8L2h0bWw+Cg==">>, + close(?TEST1_BOUNDARY) + ]))), + ok. + +identity_test() -> + B = boundary(), + Preamble = <<"This is a message with multiple parts in MIME format.">>, + H1 = [{<<"content-type">>, <<"text/plain">>}], + Body1 = <<"This is the body of the message.">>, + H2 = lists:sort([{<<"content-type">>, <<"application/octet-stream">>}, + {<<"content-transfer-encoding">>, <<"base64">>}]), + Body2 = <<"PGh0bWw+CiAgPGhlYWQ+CiAgPC9oZWFkPgogIDxib2R5PgogICAgPHA+VGhpcyBpcyB0aGUg\r\n" + "Ym9keSBvZiB0aGUgbWVzc2FnZS48L3A+CiAgPC9ib2R5Pgo8L2h0bWw+Cg==">>, + Epilogue = <<"Gotta go fast!">>, + M = iolist_to_binary([ + Preamble, + part(B, H1), Body1, + part(B, H2), Body2, + close(B), + Epilogue + ]), + {done, Preamble, M2} = parse_body(M, B), + {ok, H1, M3} = parse_headers(M2, B), + {done, Body1, M4} = parse_body(M3, B), + {ok, H2Unsorted, M5} = parse_headers(M4, B), + H2 = lists:sort(H2Unsorted), + {done, Body2, M6} = parse_body(M5, B), + {done, Epilogue} = parse_headers(M6, B), + ok. + +perf_build_multipart() -> + B = boundary(), + [ + <<"preamble\r\n">>, + first_part(B, [{<<"content-type">>, <<"text/plain">>}]), + <<"This is the body of the message.">>, + part(B, [ + {<<"content-type">>, <<"application/octet-stream">>}, + {<<"content-transfer-encoding">>, <<"base64">>}]), + <<"PGh0bWw+CiAgPGhlYWQ+CiAgPC9oZWFkPgogIDxib2R5PgogICAgPHA+VGhpcyBpcyB0aGUg\r\n" + "Ym9keSBvZiB0aGUgbWVzc2FnZS48L3A+CiAgPC9ib2R5Pgo8L2h0bWw+Cg==">>, + close(B), + <<"epilogue">> + ]. + +horse_build() -> + horse:repeat(50000, + perf_build_multipart() + ). +-endif. + +%% Headers. + +%% @doc Convenience function for extracting information from headers +%% when parsing a multipart/form-data stream. + +-spec form_data(headers() | #{binary() => binary()}) + -> {data, binary()} + | {file, binary(), binary(), binary()}. +form_data(Headers) when is_map(Headers) -> + form_data(maps:to_list(Headers)); +form_data(Headers) -> + {_, DispositionBin} = lists:keyfind(<<"content-disposition">>, 1, Headers), + {<<"form-data">>, Params} = parse_content_disposition(DispositionBin), + {_, FieldName} = lists:keyfind(<<"name">>, 1, Params), + case lists:keyfind(<<"filename">>, 1, Params) of + false -> + {data, FieldName}; + {_, Filename} -> + Type = case lists:keyfind(<<"content-type">>, 1, Headers) of + false -> <<"text/plain">>; + {_, T} -> T + end, + {file, FieldName, Filename, Type} + end. + +-ifdef(TEST). +form_data_test_() -> + Tests = [ + {[{<<"content-disposition">>, <<"form-data; name=\"submit-name\"">>}], + {data, <<"submit-name">>}}, + {[{<<"content-disposition">>, + <<"form-data; name=\"files\"; filename=\"file1.txt\"">>}, + {<<"content-type">>, <<"text/x-plain">>}], + {file, <<"files">>, <<"file1.txt">>, <<"text/x-plain">>}} + ], + [{lists:flatten(io_lib:format("~p", [V])), + fun() -> R = form_data(V) end} || {V, R} <- Tests]. +-endif. + +%% @todo parse_content_description +%% @todo parse_content_id + +%% @doc Parse an RFC 2183 content-disposition value. +%% @todo Support RFC 2231. + +-spec parse_content_disposition(binary()) + -> {binary(), [{binary(), binary()}]}. +parse_content_disposition(Bin) -> + parse_cd_type(Bin, <<>>). + +parse_cd_type(<<>>, Acc) -> + {Acc, []}; +parse_cd_type(<< C, Rest/bits >>, Acc) -> + case C of + $; -> {Acc, parse_before_param(Rest, [])}; + $\s -> {Acc, parse_before_param(Rest, [])}; + $\t -> {Acc, parse_before_param(Rest, [])}; + _ -> ?LOWER(parse_cd_type, Rest, Acc) + end. + +-ifdef(TEST). +parse_content_disposition_test_() -> + Tests = [ + {<<"inline">>, {<<"inline">>, []}}, + {<<"attachment">>, {<<"attachment">>, []}}, + {<<"attachment; filename=genome.jpeg;" + " modification-date=\"Wed, 12 Feb 1997 16:29:51 -0500\";">>, + {<<"attachment">>, [ + {<<"filename">>, <<"genome.jpeg">>}, + {<<"modification-date">>, <<"Wed, 12 Feb 1997 16:29:51 -0500">>} + ]}}, + {<<"form-data; name=\"user\"">>, + {<<"form-data">>, [{<<"name">>, <<"user">>}]}}, + {<<"form-data; NAME=\"submit-name\"">>, + {<<"form-data">>, [{<<"name">>, <<"submit-name">>}]}}, + {<<"form-data; name=\"files\"; filename=\"file1.txt\"">>, + {<<"form-data">>, [ + {<<"name">>, <<"files">>}, + {<<"filename">>, <<"file1.txt">>} + ]}}, + {<<"file; filename=\"file1.txt\"">>, + {<<"file">>, [{<<"filename">>, <<"file1.txt">>}]}}, + {<<"file; filename=\"file2.gif\"">>, + {<<"file">>, [{<<"filename">>, <<"file2.gif">>}]}} + ], + [{V, fun() -> R = parse_content_disposition(V) end} || {V, R} <- Tests]. + +horse_parse_content_disposition_attachment() -> + horse:repeat(100000, + parse_content_disposition(<<"attachment; filename=genome.jpeg;" + " modification-date=\"Wed, 12 Feb 1997 16:29:51 -0500\";">>) + ). + +horse_parse_content_disposition_form_data() -> + horse:repeat(100000, + parse_content_disposition( + <<"form-data; name=\"files\"; filename=\"file1.txt\"">>) + ). + +horse_parse_content_disposition_inline() -> + horse:repeat(100000, + parse_content_disposition(<<"inline">>) + ). +-endif. + +%% @doc Parse an RFC 2045 content-transfer-encoding header. + +-spec parse_content_transfer_encoding(binary()) -> binary(). +parse_content_transfer_encoding(Bin) -> + ?LOWER(Bin). + +-ifdef(TEST). +parse_content_transfer_encoding_test_() -> + Tests = [ + {<<"7bit">>, <<"7bit">>}, + {<<"7BIT">>, <<"7bit">>}, + {<<"8bit">>, <<"8bit">>}, + {<<"binary">>, <<"binary">>}, + {<<"quoted-printable">>, <<"quoted-printable">>}, + {<<"base64">>, <<"base64">>}, + {<<"Base64">>, <<"base64">>}, + {<<"BASE64">>, <<"base64">>}, + {<<"bAsE64">>, <<"base64">>} + ], + [{V, fun() -> R = parse_content_transfer_encoding(V) end} + || {V, R} <- Tests]. + +horse_parse_content_transfer_encoding() -> + horse:repeat(100000, + parse_content_transfer_encoding(<<"QUOTED-PRINTABLE">>) + ). +-endif. + +%% @doc Parse an RFC 2045 content-type header. + +-spec parse_content_type(binary()) + -> {binary(), binary(), [{binary(), binary()}]}. +parse_content_type(Bin) -> + parse_ct_type(Bin, <<>>). + +parse_ct_type(<< C, Rest/bits >>, Acc) -> + case C of + $/ -> parse_ct_subtype(Rest, Acc, <<>>); + _ -> ?LOWER(parse_ct_type, Rest, Acc) + end. + +parse_ct_subtype(<<>>, Type, Subtype) when Subtype =/= <<>> -> + {Type, Subtype, []}; +parse_ct_subtype(<< C, Rest/bits >>, Type, Acc) -> + case C of + $; -> {Type, Acc, parse_before_param(Rest, [])}; + $\s -> {Type, Acc, parse_before_param(Rest, [])}; + $\t -> {Type, Acc, parse_before_param(Rest, [])}; + _ -> ?LOWER(parse_ct_subtype, Rest, Type, Acc) + end. + +-ifdef(TEST). +parse_content_type_test_() -> + Tests = [ + {<<"image/gif">>, + {<<"image">>, <<"gif">>, []}}, + {<<"text/plain">>, + {<<"text">>, <<"plain">>, []}}, + {<<"text/plain; charset=us-ascii">>, + {<<"text">>, <<"plain">>, [{<<"charset">>, <<"us-ascii">>}]}}, + {<<"text/plain; charset=\"us-ascii\"">>, + {<<"text">>, <<"plain">>, [{<<"charset">>, <<"us-ascii">>}]}}, + {<<"multipart/form-data; boundary=AaB03x">>, + {<<"multipart">>, <<"form-data">>, + [{<<"boundary">>, <<"AaB03x">>}]}}, + {<<"multipart/mixed; boundary=BbC04y">>, + {<<"multipart">>, <<"mixed">>, [{<<"boundary">>, <<"BbC04y">>}]}}, + {<<"multipart/mixed; boundary=--------">>, + {<<"multipart">>, <<"mixed">>, [{<<"boundary">>, <<"--------">>}]}}, + {<<"application/x-horse; filename=genome.jpeg;" + " some-date=\"Wed, 12 Feb 1997 16:29:51 -0500\";" + " charset=us-ascii; empty=; number=12345">>, + {<<"application">>, <<"x-horse">>, [ + {<<"filename">>, <<"genome.jpeg">>}, + {<<"some-date">>, <<"Wed, 12 Feb 1997 16:29:51 -0500">>}, + {<<"charset">>, <<"us-ascii">>}, + {<<"empty">>, <<>>}, + {<<"number">>, <<"12345">>} + ]}} + ], + [{V, fun() -> R = parse_content_type(V) end} + || {V, R} <- Tests]. + +horse_parse_content_type_zero() -> + horse:repeat(100000, + parse_content_type(<<"text/plain">>) + ). + +horse_parse_content_type_one() -> + horse:repeat(100000, + parse_content_type(<<"text/plain; charset=\"us-ascii\"">>) + ). + +horse_parse_content_type_five() -> + horse:repeat(100000, + parse_content_type(<<"application/x-horse; filename=genome.jpeg;" + " some-date=\"Wed, 12 Feb 1997 16:29:51 -0500\";" + " charset=us-ascii; empty=; number=12345">>) + ). +-endif. + +%% @doc Parse RFC 2045 parameters. + +parse_before_param(<<>>, Params) -> + lists:reverse(Params); +parse_before_param(<< C, Rest/bits >>, Params) -> + case C of + $; -> parse_before_param(Rest, Params); + $\s -> parse_before_param(Rest, Params); + $\t -> parse_before_param(Rest, Params); + _ -> ?LOWER(parse_param_name, Rest, Params, <<>>) + end. + +parse_param_name(<<>>, Params, Acc) -> + lists:reverse([{Acc, <<>>}|Params]); +parse_param_name(<< C, Rest/bits >>, Params, Acc) -> + case C of + $= -> parse_param_value(Rest, Params, Acc); + _ -> ?LOWER(parse_param_name, Rest, Params, Acc) + end. + +parse_param_value(<<>>, Params, Name) -> + lists:reverse([{Name, <<>>}|Params]); +parse_param_value(<< C, Rest/bits >>, Params, Name) -> + case C of + $" -> parse_param_quoted_value(Rest, Params, Name, <<>>); + $; -> parse_before_param(Rest, [{Name, <<>>}|Params]); + $\s -> parse_before_param(Rest, [{Name, <<>>}|Params]); + $\t -> parse_before_param(Rest, [{Name, <<>>}|Params]); + C -> parse_param_value(Rest, Params, Name, << C >>) + end. + +parse_param_value(<<>>, Params, Name, Acc) -> + lists:reverse([{Name, Acc}|Params]); +parse_param_value(<< C, Rest/bits >>, Params, Name, Acc) -> + case C of + $; -> parse_before_param(Rest, [{Name, Acc}|Params]); + $\s -> parse_before_param(Rest, [{Name, Acc}|Params]); + $\t -> parse_before_param(Rest, [{Name, Acc}|Params]); + C -> parse_param_value(Rest, Params, Name, << Acc/binary, C >>) + end. + +%% We expect a final $" so no need to test for <<>>. +parse_param_quoted_value(<< $\\, C, Rest/bits >>, Params, Name, Acc) -> + parse_param_quoted_value(Rest, Params, Name, << Acc/binary, C >>); +parse_param_quoted_value(<< $", Rest/bits >>, Params, Name, Acc) -> + parse_before_param(Rest, [{Name, Acc}|Params]); +parse_param_quoted_value(<< C, Rest/bits >>, Params, Name, Acc) + when C =/= $\r -> + parse_param_quoted_value(Rest, Params, Name, << Acc/binary, C >>). diff --git a/deps/cowlib/src/cow_qs.erl b/deps/cowlib/src/cow_qs.erl new file mode 100644 index 0000000..d812e39 --- /dev/null +++ b/deps/cowlib/src/cow_qs.erl @@ -0,0 +1,563 @@ +%% Copyright (c) 2013-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_qs). + +-export([parse_qs/1]). +-export([qs/1]). +-export([urldecode/1]). +-export([urlencode/1]). + +-type qs_vals() :: [{binary(), binary() | true}]. + +%% @doc Parse an application/x-www-form-urlencoded string. +%% +%% The percent decoding is inlined to greatly improve the performance +%% by avoiding copying binaries twice (once for extracting, once for +%% decoding) instead of just extracting the proper representation. + +-spec parse_qs(binary()) -> qs_vals(). +parse_qs(B) -> + parse_qs_name(B, [], <<>>). + +parse_qs_name(<< $%, H, L, Rest/bits >>, Acc, Name) -> + C = (unhex(H) bsl 4 bor unhex(L)), + parse_qs_name(Rest, Acc, << Name/bits, C >>); +parse_qs_name(<< $+, Rest/bits >>, Acc, Name) -> + parse_qs_name(Rest, Acc, << Name/bits, " " >>); +parse_qs_name(<< $=, Rest/bits >>, Acc, Name) when Name =/= <<>> -> + parse_qs_value(Rest, Acc, Name, <<>>); +parse_qs_name(<< $&, Rest/bits >>, Acc, Name) -> + case Name of + <<>> -> parse_qs_name(Rest, Acc, <<>>); + _ -> parse_qs_name(Rest, [{Name, true}|Acc], <<>>) + end; +parse_qs_name(<< C, Rest/bits >>, Acc, Name) when C =/= $%, C =/= $= -> + parse_qs_name(Rest, Acc, << Name/bits, C >>); +parse_qs_name(<<>>, Acc, Name) -> + case Name of + <<>> -> lists:reverse(Acc); + _ -> lists:reverse([{Name, true}|Acc]) + end. + +parse_qs_value(<< $%, H, L, Rest/bits >>, Acc, Name, Value) -> + C = (unhex(H) bsl 4 bor unhex(L)), + parse_qs_value(Rest, Acc, Name, << Value/bits, C >>); +parse_qs_value(<< $+, Rest/bits >>, Acc, Name, Value) -> + parse_qs_value(Rest, Acc, Name, << Value/bits, " " >>); +parse_qs_value(<< $&, Rest/bits >>, Acc, Name, Value) -> + parse_qs_name(Rest, [{Name, Value}|Acc], <<>>); +parse_qs_value(<< C, Rest/bits >>, Acc, Name, Value) when C =/= $% -> + parse_qs_value(Rest, Acc, Name, << Value/bits, C >>); +parse_qs_value(<<>>, Acc, Name, Value) -> + lists:reverse([{Name, Value}|Acc]). + +-ifdef(TEST). +parse_qs_test_() -> + Tests = [ + {<<>>, []}, + {<<"&">>, []}, + {<<"a">>, [{<<"a">>, true}]}, + {<<"a&">>, [{<<"a">>, true}]}, + {<<"&a">>, [{<<"a">>, true}]}, + {<<"a&b">>, [{<<"a">>, true}, {<<"b">>, true}]}, + {<<"a&&b">>, [{<<"a">>, true}, {<<"b">>, true}]}, + {<<"a&b&">>, [{<<"a">>, true}, {<<"b">>, true}]}, + {<<"=">>, error}, + {<<"=b">>, error}, + {<<"a=">>, [{<<"a">>, <<>>}]}, + {<<"a=b">>, [{<<"a">>, <<"b">>}]}, + {<<"a=&b=">>, [{<<"a">>, <<>>}, {<<"b">>, <<>>}]}, + {<<"a=b&c&d=e">>, [{<<"a">>, <<"b">>}, + {<<"c">>, true}, {<<"d">>, <<"e">>}]}, + {<<"a=b=c&d=e=f&g=h=i">>, [{<<"a">>, <<"b=c">>}, + {<<"d">>, <<"e=f">>}, {<<"g">>, <<"h=i">>}]}, + {<<"+">>, [{<<" ">>, true}]}, + {<<"+=+">>, [{<<" ">>, <<" ">>}]}, + {<<"a+b=c+d">>, [{<<"a b">>, <<"c d">>}]}, + {<<"+a+=+b+&+c+=+d+">>, [{<<" a ">>, <<" b ">>}, + {<<" c ">>, <<" d ">>}]}, + {<<"a%20b=c%20d">>, [{<<"a b">>, <<"c d">>}]}, + {<<"%25%26%3D=%25%26%3D&_-.=.-_">>, [{<<"%&=">>, <<"%&=">>}, + {<<"_-.">>, <<".-_">>}]}, + {<<"for=extend%2Franch">>, [{<<"for">>, <<"extend/ranch">>}]} + ], + [{Qs, fun() -> + E = try parse_qs(Qs) of + R -> R + catch _:_ -> + error + end + end} || {Qs, E} <- Tests]. + +parse_qs_identity_test_() -> + Tests = [ + <<"+">>, + <<"hl=en&q=erlang+cowboy">>, + <<"direction=desc&for=extend%2Franch&sort=updated&state=open">>, + <<"i=EWiIXmPj5gl6&v=QowBp0oDLQXdd4x_GwiywA&ip=98.20.31.81&" + "la=en&pg=New8.undertonebrandsafe.com%2F698a2525065ee2" + "60c0b2f2aaad89ab82&re=&sz=1&fc=1&fr=140&br=3&bv=11.0." + "696.16&os=3&ov=&rs=vpl&k=cookies%7Csale%7Cbrowser%7Cm" + "ore%7Cprivacy%7Cstatistics%7Cactivities%7Cauction%7Ce" + "mail%7Cfree%7Cin...&t=112373&xt=5%7C61%7C0&tz=-1&ev=x" + "&tk=&za=1&ortb-za=1&zu=&zl=&ax=U&ay=U&ortb-pid=536454" + ".55&ortb-sid=112373.8&seats=999&ortb-xt=IAB24&ortb-ugc=">>, + <<"i=9pQNskA&v=0ySQQd1F&ev=12345678&t=12345&sz=3&ip=67.58." + "236.89&la=en&pg=http%3A%2F%2Fwww.yahoo.com%2Fpage1.ht" + "m&re=http%3A%2F%2Fsearch.google.com&fc=1&fr=1&br=2&bv" + "=3.0.14&os=1&ov=XP&k=cars%2Cford&rs=js&xt=5%7C22%7C23" + "4&tz=%2B180&tk=key1%3Dvalue1%7Ckey2%3Dvalue2&zl=4%2C5" + "%2C6&za=4&zu=competitor.com&ua=Mozilla%2F5.0+%28Windo" + "ws%3B+U%3B+Windows+NT+6.1%3B+en-US%29+AppleWebKit%2F5" + "34.13+%28KHTML%2C+like+Gecko%29+Chrome%2F9.0.597.98+S" + "afari%2F534.13&ortb-za=1%2C6%2C13&ortb-pid=521732&ort" + "b-sid=521732&ortb-xt=IAB3&ortb-ugc=">> + ], + [{V, fun() -> V = qs(parse_qs(V)) end} || V <- Tests]. + +horse_parse_qs_shorter() -> + horse:repeat(20000, + parse_qs(<<"hl=en&q=erlang%20cowboy">>) + ). + +horse_parse_qs_short() -> + horse:repeat(20000, + parse_qs( + <<"direction=desc&for=extend%2Franch&sort=updated&state=open">>) + ). + +horse_parse_qs_long() -> + horse:repeat(20000, + parse_qs(<<"i=EWiIXmPj5gl6&v=QowBp0oDLQXdd4x_GwiywA&ip=98.20.31.81&" + "la=en&pg=New8.undertonebrandsafe.com%2F698a2525065ee260c0b2f2a" + "aad89ab82&re=&sz=1&fc=1&fr=140&br=3&bv=11.0.696.16&os=3&ov=&rs" + "=vpl&k=cookies%7Csale%7Cbrowser%7Cmore%7Cprivacy%7Cstatistics%" + "7Cactivities%7Cauction%7Cemail%7Cfree%7Cin...&t=112373&xt=5%7C" + "61%7C0&tz=-1&ev=x&tk=&za=1&ortb-za=1&zu=&zl=&ax=U&ay=U&ortb-pi" + "d=536454.55&ortb-sid=112373.8&seats=999&ortb-xt=IAB24&ortb-ugc" + "=">>) + ). + +horse_parse_qs_longer() -> + horse:repeat(20000, + parse_qs(<<"i=9pQNskA&v=0ySQQd1F&ev=12345678&t=12345&sz=3&ip=67.58." + "236.89&la=en&pg=http%3A%2F%2Fwww.yahoo.com%2Fpage1.htm&re=http" + "%3A%2F%2Fsearch.google.com&fc=1&fr=1&br=2&bv=3.0.14&os=1&ov=XP" + "&k=cars%2cford&rs=js&xt=5%7c22%7c234&tz=%2b180&tk=key1%3Dvalue" + "1%7Ckey2%3Dvalue2&zl=4,5,6&za=4&zu=competitor.com&ua=Mozilla%2" + "F5.0%20(Windows%3B%20U%3B%20Windows%20NT%206.1%3B%20en-US)%20A" + "ppleWebKit%2F534.13%20(KHTML%2C%20like%20Gecko)%20Chrome%2F9.0" + ".597.98%20Safari%2F534.13&ortb-za=1%2C6%2C13&ortb-pid=521732&o" + "rtb-sid=521732&ortb-xt=IAB3&ortb-ugc=">>) + ). +-endif. + +%% @doc Build an application/x-www-form-urlencoded string. + +-spec qs(qs_vals()) -> binary(). +qs([]) -> + <<>>; +qs(L) -> + qs(L, <<>>). + +qs([], Acc) -> + << $&, Qs/bits >> = Acc, + Qs; +qs([{Name, true}|Tail], Acc) -> + Acc2 = urlencode(Name, << Acc/bits, $& >>), + qs(Tail, Acc2); +qs([{Name, Value}|Tail], Acc) -> + Acc2 = urlencode(Name, << Acc/bits, $& >>), + Acc3 = urlencode(Value, << Acc2/bits, $= >>), + qs(Tail, Acc3). + +-define(QS_SHORTER, [ + {<<"hl">>, <<"en">>}, + {<<"q">>, <<"erlang cowboy">>} +]). + +-define(QS_SHORT, [ + {<<"direction">>, <<"desc">>}, + {<<"for">>, <<"extend/ranch">>}, + {<<"sort">>, <<"updated">>}, + {<<"state">>, <<"open">>} +]). + +-define(QS_LONG, [ + {<<"i">>, <<"EWiIXmPj5gl6">>}, + {<<"v">>, <<"QowBp0oDLQXdd4x_GwiywA">>}, + {<<"ip">>, <<"98.20.31.81">>}, + {<<"la">>, <<"en">>}, + {<<"pg">>, <<"New8.undertonebrandsafe.com/" + "698a2525065ee260c0b2f2aaad89ab82">>}, + {<<"re">>, <<>>}, + {<<"sz">>, <<"1">>}, + {<<"fc">>, <<"1">>}, + {<<"fr">>, <<"140">>}, + {<<"br">>, <<"3">>}, + {<<"bv">>, <<"11.0.696.16">>}, + {<<"os">>, <<"3">>}, + {<<"ov">>, <<>>}, + {<<"rs">>, <<"vpl">>}, + {<<"k">>, <<"cookies|sale|browser|more|privacy|statistics|" + "activities|auction|email|free|in...">>}, + {<<"t">>, <<"112373">>}, + {<<"xt">>, <<"5|61|0">>}, + {<<"tz">>, <<"-1">>}, + {<<"ev">>, <<"x">>}, + {<<"tk">>, <<>>}, + {<<"za">>, <<"1">>}, + {<<"ortb-za">>, <<"1">>}, + {<<"zu">>, <<>>}, + {<<"zl">>, <<>>}, + {<<"ax">>, <<"U">>}, + {<<"ay">>, <<"U">>}, + {<<"ortb-pid">>, <<"536454.55">>}, + {<<"ortb-sid">>, <<"112373.8">>}, + {<<"seats">>, <<"999">>}, + {<<"ortb-xt">>, <<"IAB24">>}, + {<<"ortb-ugc">>, <<>>} +]). + +-define(QS_LONGER, [ + {<<"i">>, <<"9pQNskA">>}, + {<<"v">>, <<"0ySQQd1F">>}, + {<<"ev">>, <<"12345678">>}, + {<<"t">>, <<"12345">>}, + {<<"sz">>, <<"3">>}, + {<<"ip">>, <<"67.58.236.89">>}, + {<<"la">>, <<"en">>}, + {<<"pg">>, <<"http://www.yahoo.com/page1.htm">>}, + {<<"re">>, <<"http://search.google.com">>}, + {<<"fc">>, <<"1">>}, + {<<"fr">>, <<"1">>}, + {<<"br">>, <<"2">>}, + {<<"bv">>, <<"3.0.14">>}, + {<<"os">>, <<"1">>}, + {<<"ov">>, <<"XP">>}, + {<<"k">>, <<"cars,ford">>}, + {<<"rs">>, <<"js">>}, + {<<"xt">>, <<"5|22|234">>}, + {<<"tz">>, <<"+180">>}, + {<<"tk">>, <<"key1=value1|key2=value2">>}, + {<<"zl">>, <<"4,5,6">>}, + {<<"za">>, <<"4">>}, + {<<"zu">>, <<"competitor.com">>}, + {<<"ua">>, <<"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) " + "AppleWebKit/534.13 (KHTML, like Gecko) Chrome/9.0.597.98 " + "Safari/534.13">>}, + {<<"ortb-za">>, <<"1,6,13">>}, + {<<"ortb-pid">>, <<"521732">>}, + {<<"ortb-sid">>, <<"521732">>}, + {<<"ortb-xt">>, <<"IAB3">>}, + {<<"ortb-ugc">>, <<>>} +]). + +-ifdef(TEST). +qs_test_() -> + Tests = [ + {[<<"a">>], error}, + {[{<<"a">>, <<"b">>, <<"c">>}], error}, + {[], <<>>}, + {[{<<"a">>, true}], <<"a">>}, + {[{<<"a">>, true}, {<<"b">>, true}], <<"a&b">>}, + {[{<<"a">>, <<>>}], <<"a=">>}, + {[{<<"a">>, <<"b">>}], <<"a=b">>}, + {[{<<"a">>, <<>>}, {<<"b">>, <<>>}], <<"a=&b=">>}, + {[{<<"a">>, <<"b">>}, {<<"c">>, true}, {<<"d">>, <<"e">>}], + <<"a=b&c&d=e">>}, + {[{<<"a">>, <<"b=c">>}, {<<"d">>, <<"e=f">>}, {<<"g">>, <<"h=i">>}], + <<"a=b%3Dc&d=e%3Df&g=h%3Di">>}, + {[{<<" ">>, true}], <<"+">>}, + {[{<<" ">>, <<" ">>}], <<"+=+">>}, + {[{<<"a b">>, <<"c d">>}], <<"a+b=c+d">>}, + {[{<<" a ">>, <<" b ">>}, {<<" c ">>, <<" d ">>}], + <<"+a+=+b+&+c+=+d+">>}, + {[{<<"%&=">>, <<"%&=">>}, {<<"_-.">>, <<".-_">>}], + <<"%25%26%3D=%25%26%3D&_-.=.-_">>}, + {[{<<"for">>, <<"extend/ranch">>}], <<"for=extend%2Franch">>} + ], + [{lists:flatten(io_lib:format("~p", [Vals])), fun() -> + E = try qs(Vals) of + R -> R + catch _:_ -> + error + end + end} || {Vals, E} <- Tests]. + +qs_identity_test_() -> + Tests = [ + [{<<"+">>, true}], + ?QS_SHORTER, + ?QS_SHORT, + ?QS_LONG, + ?QS_LONGER + ], + [{lists:flatten(io_lib:format("~p", [V])), fun() -> + V = parse_qs(qs(V)) + end} || V <- Tests]. + +horse_qs_shorter() -> + horse:repeat(20000, qs(?QS_SHORTER)). + +horse_qs_short() -> + horse:repeat(20000, qs(?QS_SHORT)). + +horse_qs_long() -> + horse:repeat(20000, qs(?QS_LONG)). + +horse_qs_longer() -> + horse:repeat(20000, qs(?QS_LONGER)). +-endif. + +%% @doc Decode a percent encoded string (x-www-form-urlencoded rules). + +-spec urldecode(B) -> B when B::binary(). +urldecode(B) -> + urldecode(B, <<>>). + +urldecode(<< $%, H, L, Rest/bits >>, Acc) -> + C = (unhex(H) bsl 4 bor unhex(L)), + urldecode(Rest, << Acc/bits, C >>); +urldecode(<< $+, Rest/bits >>, Acc) -> + urldecode(Rest, << Acc/bits, " " >>); +urldecode(<< C, Rest/bits >>, Acc) when C =/= $% -> + urldecode(Rest, << Acc/bits, C >>); +urldecode(<<>>, Acc) -> + Acc. + +unhex($0) -> 0; +unhex($1) -> 1; +unhex($2) -> 2; +unhex($3) -> 3; +unhex($4) -> 4; +unhex($5) -> 5; +unhex($6) -> 6; +unhex($7) -> 7; +unhex($8) -> 8; +unhex($9) -> 9; +unhex($A) -> 10; +unhex($B) -> 11; +unhex($C) -> 12; +unhex($D) -> 13; +unhex($E) -> 14; +unhex($F) -> 15; +unhex($a) -> 10; +unhex($b) -> 11; +unhex($c) -> 12; +unhex($d) -> 13; +unhex($e) -> 14; +unhex($f) -> 15. + +-ifdef(TEST). +urldecode_test_() -> + Tests = [ + {<<"%20">>, <<" ">>}, + {<<"+">>, <<" ">>}, + {<<"%00">>, <<0>>}, + {<<"%fF">>, <<255>>}, + {<<"123">>, <<"123">>}, + {<<"%i5">>, error}, + {<<"%5">>, error} + ], + [{Qs, fun() -> + E = try urldecode(Qs) of + R -> R + catch _:_ -> + error + end + end} || {Qs, E} <- Tests]. + +urldecode_identity_test_() -> + Tests = [ + <<"+">>, + <<"nothingnothingnothingnothing">>, + <<"Small+fast+modular+HTTP+server">>, + <<"Small%2C+fast%2C+modular+HTTP+server.">>, + <<"%E3%83%84%E3%82%A4%E3%83%B3%E3%82%BD%E3%82%A6%E3%83" + "%AB%E3%80%9C%E8%BC%AA%E5%BB%BB%E3%81%99%E3%82%8B%E6%97%8B%E5" + "%BE%8B%E3%80%9C">> + ], + [{V, fun() -> V = urlencode(urldecode(V)) end} || V <- Tests]. + +horse_urldecode() -> + horse:repeat(100000, + urldecode(<<"nothingnothingnothingnothing">>) + ). + +horse_urldecode_plus() -> + horse:repeat(100000, + urldecode(<<"Small+fast+modular+HTTP+server">>) + ). + +horse_urldecode_hex() -> + horse:repeat(100000, + urldecode(<<"Small%2C%20fast%2C%20modular%20HTTP%20server.">>) + ). + +horse_urldecode_jp_hex() -> + horse:repeat(100000, + urldecode(<<"%E3%83%84%E3%82%A4%E3%83%B3%E3%82%BD%E3%82%A6%E3%83" + "%AB%E3%80%9C%E8%BC%AA%E5%BB%BB%E3%81%99%E3%82%8B%E6%97%8B%E5" + "%BE%8B%E3%80%9C">>) + ). + +horse_urldecode_mix() -> + horse:repeat(100000, + urldecode(<<"Small%2C+fast%2C+modular+HTTP+server.">>) + ). +-endif. + +%% @doc Percent encode a string (x-www-form-urlencoded rules). + +-spec urlencode(B) -> B when B::binary(). +urlencode(B) -> + urlencode(B, <<>>). + +urlencode(<< $\s, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $+ >>); +urlencode(<< $-, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $- >>); +urlencode(<< $., Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $. >>); +urlencode(<< $0, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $0 >>); +urlencode(<< $1, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $1 >>); +urlencode(<< $2, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $2 >>); +urlencode(<< $3, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $3 >>); +urlencode(<< $4, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $4 >>); +urlencode(<< $5, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $5 >>); +urlencode(<< $6, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $6 >>); +urlencode(<< $7, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $7 >>); +urlencode(<< $8, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $8 >>); +urlencode(<< $9, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $9 >>); +urlencode(<< $A, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $A >>); +urlencode(<< $B, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $B >>); +urlencode(<< $C, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $C >>); +urlencode(<< $D, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $D >>); +urlencode(<< $E, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $E >>); +urlencode(<< $F, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $F >>); +urlencode(<< $G, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $G >>); +urlencode(<< $H, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $H >>); +urlencode(<< $I, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $I >>); +urlencode(<< $J, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $J >>); +urlencode(<< $K, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $K >>); +urlencode(<< $L, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $L >>); +urlencode(<< $M, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $M >>); +urlencode(<< $N, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $N >>); +urlencode(<< $O, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $O >>); +urlencode(<< $P, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $P >>); +urlencode(<< $Q, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $Q >>); +urlencode(<< $R, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $R >>); +urlencode(<< $S, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $S >>); +urlencode(<< $T, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $T >>); +urlencode(<< $U, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $U >>); +urlencode(<< $V, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $V >>); +urlencode(<< $W, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $W >>); +urlencode(<< $X, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $X >>); +urlencode(<< $Y, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $Y >>); +urlencode(<< $Z, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $Z >>); +urlencode(<< $_, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $_ >>); +urlencode(<< $a, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $a >>); +urlencode(<< $b, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $b >>); +urlencode(<< $c, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $c >>); +urlencode(<< $d, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $d >>); +urlencode(<< $e, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $e >>); +urlencode(<< $f, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $f >>); +urlencode(<< $g, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $g >>); +urlencode(<< $h, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $h >>); +urlencode(<< $i, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $i >>); +urlencode(<< $j, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $j >>); +urlencode(<< $k, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $k >>); +urlencode(<< $l, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $l >>); +urlencode(<< $m, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $m >>); +urlencode(<< $n, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $n >>); +urlencode(<< $o, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $o >>); +urlencode(<< $p, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $p >>); +urlencode(<< $q, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $q >>); +urlencode(<< $r, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $r >>); +urlencode(<< $s, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $s >>); +urlencode(<< $t, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $t >>); +urlencode(<< $u, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $u >>); +urlencode(<< $v, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $v >>); +urlencode(<< $w, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $w >>); +urlencode(<< $x, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $x >>); +urlencode(<< $y, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $y >>); +urlencode(<< $z, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $z >>); +urlencode(<< C, Rest/bits >>, Acc) -> + H = hex(C bsr 4), + L = hex(C band 16#0f), + urlencode(Rest, << Acc/bits, $%, H, L >>); +urlencode(<<>>, Acc) -> + Acc. + +hex( 0) -> $0; +hex( 1) -> $1; +hex( 2) -> $2; +hex( 3) -> $3; +hex( 4) -> $4; +hex( 5) -> $5; +hex( 6) -> $6; +hex( 7) -> $7; +hex( 8) -> $8; +hex( 9) -> $9; +hex(10) -> $A; +hex(11) -> $B; +hex(12) -> $C; +hex(13) -> $D; +hex(14) -> $E; +hex(15) -> $F. + +-ifdef(TEST). +urlencode_test_() -> + Tests = [ + {<<255, 0>>, <<"%FF%00">>}, + {<<255, " ">>, <<"%FF+">>}, + {<<" ">>, <<"+">>}, + {<<"aBc123">>, <<"aBc123">>}, + {<<".-_">>, <<".-_">>} + ], + [{V, fun() -> E = urlencode(V) end} || {V, E} <- Tests]. + +urlencode_identity_test_() -> + Tests = [ + <<"+">>, + <<"nothingnothingnothingnothing">>, + <<"Small fast modular HTTP server">>, + <<"Small, fast, modular HTTP server.">>, + <<227,131,132,227,130,164,227,131,179,227,130,189,227, + 130,166,227,131,171,227,128,156,232,188,170,229,187,187,227, + 129,153,227,130,139,230,151,139,229,190,139,227,128,156>> + ], + [{V, fun() -> V = urldecode(urlencode(V)) end} || V <- Tests]. + +horse_urlencode() -> + horse:repeat(100000, + urlencode(<<"nothingnothingnothingnothing">>) + ). + +horse_urlencode_plus() -> + horse:repeat(100000, + urlencode(<<"Small fast modular HTTP server">>) + ). + +horse_urlencode_jp() -> + horse:repeat(100000, + urlencode(<<227,131,132,227,130,164,227,131,179,227,130,189,227, + 130,166,227,131,171,227,128,156,232,188,170,229,187,187,227, + 129,153,227,130,139,230,151,139,229,190,139,227,128,156>>) + ). + +horse_urlencode_mix() -> + horse:repeat(100000, + urlencode(<<"Small, fast, modular HTTP server.">>) + ). +-endif. diff --git a/deps/cowlib/src/cow_spdy.erl b/deps/cowlib/src/cow_spdy.erl new file mode 100644 index 0000000..8bda45b --- /dev/null +++ b/deps/cowlib/src/cow_spdy.erl @@ -0,0 +1,313 @@ +%% Copyright (c) 2013-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_spdy). + +%% Zstream. +-export([deflate_init/0]). +-export([inflate_init/0]). + +%% Parse. +-export([split/1]). +-export([parse/2]). + +%% Build. +-export([data/3]). +-export([syn_stream/12]). +-export([syn_reply/6]). +-export([rst_stream/2]). +-export([settings/2]). +-export([ping/1]). +-export([goaway/2]). +%% @todo headers +%% @todo window_update + +-include("cow_spdy.hrl"). + +%% Zstream. + +deflate_init() -> + Zdef = zlib:open(), + ok = zlib:deflateInit(Zdef), + _ = zlib:deflateSetDictionary(Zdef, ?ZDICT), + Zdef. + +inflate_init() -> + Zinf = zlib:open(), + ok = zlib:inflateInit(Zinf), + Zinf. + +%% Parse. + +split(Data = << _:40, Length:24, _/bits >>) + when byte_size(Data) >= Length + 8 -> + Length2 = Length + 8, + << Frame:Length2/binary, Rest/bits >> = Data, + {true, Frame, Rest}; +split(_) -> + false. + +parse(<< 0:1, StreamID:31, 0:7, IsFinFlag:1, _:24, Data/bits >>, _) -> + {data, StreamID, from_flag(IsFinFlag), Data}; +parse(<< 1:1, 3:15, 1:16, 0:6, IsUnidirectionalFlag:1, IsFinFlag:1, + _:25, StreamID:31, _:1, AssocToStreamID:31, Priority:3, _:5, + 0:8, Rest/bits >>, Zinf) -> + case parse_headers(Rest, Zinf) of + {ok, Headers, [{<<":host">>, Host}, {<<":method">>, Method}, + {<<":path">>, Path}, {<<":scheme">>, Scheme}, + {<<":version">>, Version}]} -> + {syn_stream, StreamID, AssocToStreamID, from_flag(IsFinFlag), + from_flag(IsUnidirectionalFlag), Priority, Method, + Scheme, Host, Path, Version, Headers}; + _ -> + {error, badprotocol} + end; +parse(<< 1:1, 3:15, 2:16, 0:7, IsFinFlag:1, _:25, + StreamID:31, Rest/bits >>, Zinf) -> + case parse_headers(Rest, Zinf) of + {ok, Headers, [{<<":status">>, Status}, {<<":version">>, Version}]} -> + {syn_reply, StreamID, from_flag(IsFinFlag), + Status, Version, Headers}; + _ -> + {error, badprotocol} + end; +parse(<< 1:1, 3:15, 3:16, 0:8, _:56, StatusCode:32 >>, _) + when StatusCode =:= 0; StatusCode > 11 -> + {error, badprotocol}; +parse(<< 1:1, 3:15, 3:16, 0:8, _:25, StreamID:31, StatusCode:32 >>, _) -> + Status = case StatusCode of + 1 -> protocol_error; + 2 -> invalid_stream; + 3 -> refused_stream; + 4 -> unsupported_version; + 5 -> cancel; + 6 -> internal_error; + 7 -> flow_control_error; + 8 -> stream_in_use; + 9 -> stream_already_closed; + 10 -> invalid_credentials; + 11 -> frame_too_large + end, + {rst_stream, StreamID, Status}; +parse(<< 1:1, 3:15, 4:16, 0:7, ClearSettingsFlag:1, _:24, + NbEntries:32, Rest/bits >>, _) -> + try + Settings = [begin + Is0 = 0, + Key = case ID of + 1 -> upload_bandwidth; + 2 -> download_bandwidth; + 3 -> round_trip_time; + 4 -> max_concurrent_streams; + 5 -> current_cwnd; + 6 -> download_retrans_rate; + 7 -> initial_window_size; + 8 -> client_certificate_vector_size + end, + {Key, Value, from_flag(PersistFlag), from_flag(WasPersistedFlag)} + end || << Is0:6, WasPersistedFlag:1, PersistFlag:1, + ID:24, Value:32 >> <= Rest], + NbEntries = length(Settings), + {settings, from_flag(ClearSettingsFlag), Settings} + catch _:_ -> + {error, badprotocol} + end; +parse(<< 1:1, 3:15, 6:16, 0:8, _:24, PingID:32 >>, _) -> + {ping, PingID}; +parse(<< 1:1, 3:15, 7:16, 0:8, _:56, StatusCode:32 >>, _) + when StatusCode > 2 -> + {error, badprotocol}; +parse(<< 1:1, 3:15, 7:16, 0:8, _:25, LastGoodStreamID:31, + StatusCode:32 >>, _) -> + Status = case StatusCode of + 0 -> ok; + 1 -> protocol_error; + 2 -> internal_error + end, + {goaway, LastGoodStreamID, Status}; +parse(<< 1:1, 3:15, 8:16, 0:7, IsFinFlag:1, _:25, StreamID:31, + Rest/bits >>, Zinf) -> + case parse_headers(Rest, Zinf) of + {ok, Headers, []} -> + {headers, StreamID, from_flag(IsFinFlag), Headers}; + _ -> + {error, badprotocol} + end; +parse(<< 1:1, 3:15, 9:16, 0:8, _:57, 0:31 >>, _) -> + {error, badprotocol}; +parse(<< 1:1, 3:15, 9:16, 0:8, _:25, StreamID:31, + _:1, DeltaWindowSize:31 >>, _) -> + {window_update, StreamID, DeltaWindowSize}; +parse(_, _) -> + {error, badprotocol}. + +parse_headers(Data, Zinf) -> + [<< NbHeaders:32, Rest/bits >>] = inflate(Zinf, Data), + parse_headers(Rest, NbHeaders, [], []). + +parse_headers(<<>>, 0, Headers, SpHeaders) -> + {ok, lists:reverse(Headers), lists:sort(SpHeaders)}; +parse_headers(<<>>, _, _, _) -> + error; +parse_headers(_, 0, _, _) -> + error; +parse_headers(<< 0:32, _/bits >>, _, _, _) -> + error; +parse_headers(<< L1:32, Key:L1/binary, L2:32, Value:L2/binary, Rest/bits >>, + NbHeaders, Acc, SpAcc) -> + case Key of + << $:, _/bits >> -> + parse_headers(Rest, NbHeaders - 1, Acc, + lists:keystore(Key, 1, SpAcc, {Key, Value})); + _ -> + parse_headers(Rest, NbHeaders - 1, [{Key, Value}|Acc], SpAcc) + end. + +inflate(Zinf, Data) -> + try + zlib:inflate(Zinf, Data) + catch _:_ -> + ok = zlib:inflateSetDictionary(Zinf, ?ZDICT), + zlib:inflate(Zinf, <<>>) + end. + +from_flag(0) -> false; +from_flag(1) -> true. + +%% Build. + +data(StreamID, IsFin, Data) -> + IsFinFlag = to_flag(IsFin), + Length = iolist_size(Data), + [<< 0:1, StreamID:31, 0:7, IsFinFlag:1, Length:24 >>, Data]. + +syn_stream(Zdef, StreamID, AssocToStreamID, IsFin, IsUnidirectional, + Priority, Method, Scheme, Host, Path, Version, Headers) -> + IsFinFlag = to_flag(IsFin), + IsUnidirectionalFlag = to_flag(IsUnidirectional), + HeaderBlock = build_headers(Zdef, [ + {<<":method">>, Method}, + {<<":scheme">>, Scheme}, + {<<":host">>, Host}, + {<<":path">>, Path}, + {<<":version">>, Version} + |Headers]), + Length = 10 + iolist_size(HeaderBlock), + [<< 1:1, 3:15, 1:16, 0:6, IsUnidirectionalFlag:1, IsFinFlag:1, + Length:24, 0:1, StreamID:31, 0:1, AssocToStreamID:31, + Priority:3, 0:5, 0:8 >>, HeaderBlock]. + +syn_reply(Zdef, StreamID, IsFin, Status, Version, Headers) -> + IsFinFlag = to_flag(IsFin), + HeaderBlock = build_headers(Zdef, [ + {<<":status">>, Status}, + {<<":version">>, Version} + |Headers]), + Length = 4 + iolist_size(HeaderBlock), + [<< 1:1, 3:15, 2:16, 0:7, IsFinFlag:1, Length:24, + 0:1, StreamID:31 >>, HeaderBlock]. + +rst_stream(StreamID, Status) -> + StatusCode = case Status of + protocol_error -> 1; + invalid_stream -> 2; + refused_stream -> 3; + unsupported_version -> 4; + cancel -> 5; + internal_error -> 6; + flow_control_error -> 7; + stream_in_use -> 8; + stream_already_closed -> 9; + invalid_credentials -> 10; + frame_too_large -> 11 + end, + << 1:1, 3:15, 3:16, 0:8, 8:24, + 0:1, StreamID:31, StatusCode:32 >>. + +settings(ClearSettingsFlag, Settings) -> + IsClearSettingsFlag = to_flag(ClearSettingsFlag), + NbEntries = length(Settings), + Entries = [begin + IsWasPersistedFlag = to_flag(WasPersistedFlag), + IsPersistFlag = to_flag(PersistFlag), + ID = case Key of + upload_bandwidth -> 1; + download_bandwidth -> 2; + round_trip_time -> 3; + max_concurrent_streams -> 4; + current_cwnd -> 5; + download_retrans_rate -> 6; + initial_window_size -> 7; + client_certificate_vector_size -> 8 + end, + << 0:6, IsWasPersistedFlag:1, IsPersistFlag:1, ID:24, Value:32 >> + end || {Key, Value, WasPersistedFlag, PersistFlag} <- Settings], + Length = 4 + iolist_size(Entries), + [<< 1:1, 3:15, 4:16, 0:7, IsClearSettingsFlag:1, Length:24, + NbEntries:32 >>, Entries]. + +-ifdef(TEST). +settings_frame_test() -> + ClearSettingsFlag = false, + Settings = [{max_concurrent_streams,1000,false,false}, + {initial_window_size,10485760,false,false}], + Bin = list_to_binary(cow_spdy:settings(ClearSettingsFlag, Settings)), + P = cow_spdy:parse(Bin, undefined), + P = {settings, ClearSettingsFlag, Settings}, + ok. +-endif. + +ping(PingID) -> + << 1:1, 3:15, 6:16, 0:8, 4:24, PingID:32 >>. + +goaway(LastGoodStreamID, Status) -> + StatusCode = case Status of + ok -> 0; + protocol_error -> 1; + internal_error -> 2 + end, + << 1:1, 3:15, 7:16, 0:8, 8:24, + 0:1, LastGoodStreamID:31, StatusCode:32 >>. + +%% @todo headers +%% @todo window_update + +build_headers(Zdef, Headers) -> + Headers1 = merge_headers(lists:sort(Headers), []), + NbHeaders = length(Headers1), + Headers2 = [begin + L1 = iolist_size(Key), + L2 = iolist_size(Value), + [<< L1:32 >>, Key, << L2:32 >>, Value] + end || {Key, Value} <- Headers1], + zlib:deflate(Zdef, [<< NbHeaders:32 >>, Headers2], full). + +merge_headers([], Acc) -> + lists:reverse(Acc); +merge_headers([{Name, Value1}, {Name, Value2}|Tail], Acc) -> + merge_headers([{Name, [Value1, 0, Value2]}|Tail], Acc); +merge_headers([Head|Tail], Acc) -> + merge_headers(Tail, [Head|Acc]). + +-ifdef(TEST). +merge_headers_test_() -> + Tests = [ + {[{<<"set-cookie">>, <<"session=123">>}, {<<"set-cookie">>, <<"other=456">>}, {<<"content-type">>, <<"text/html">>}], + [{<<"set-cookie">>, [<<"session=123">>, 0, <<"other=456">>]}, {<<"content-type">>, <<"text/html">>}]} + ], + [fun() -> D = merge_headers(R, []) end || {R, D} <- Tests]. +-endif. + +to_flag(false) -> 0; +to_flag(true) -> 1. diff --git a/deps/cowlib/src/cow_spdy.hrl b/deps/cowlib/src/cow_spdy.hrl new file mode 100644 index 0000000..9637b1c --- /dev/null +++ b/deps/cowlib/src/cow_spdy.hrl @@ -0,0 +1,181 @@ +%% Zlib dictionary. + +-define(ZDICT, << + 16#00, 16#00, 16#00, 16#07, 16#6f, 16#70, 16#74, 16#69, + 16#6f, 16#6e, 16#73, 16#00, 16#00, 16#00, 16#04, 16#68, + 16#65, 16#61, 16#64, 16#00, 16#00, 16#00, 16#04, 16#70, + 16#6f, 16#73, 16#74, 16#00, 16#00, 16#00, 16#03, 16#70, + 16#75, 16#74, 16#00, 16#00, 16#00, 16#06, 16#64, 16#65, + 16#6c, 16#65, 16#74, 16#65, 16#00, 16#00, 16#00, 16#05, + 16#74, 16#72, 16#61, 16#63, 16#65, 16#00, 16#00, 16#00, + 16#06, 16#61, 16#63, 16#63, 16#65, 16#70, 16#74, 16#00, + 16#00, 16#00, 16#0e, 16#61, 16#63, 16#63, 16#65, 16#70, + 16#74, 16#2d, 16#63, 16#68, 16#61, 16#72, 16#73, 16#65, + 16#74, 16#00, 16#00, 16#00, 16#0f, 16#61, 16#63, 16#63, + 16#65, 16#70, 16#74, 16#2d, 16#65, 16#6e, 16#63, 16#6f, + 16#64, 16#69, 16#6e, 16#67, 16#00, 16#00, 16#00, 16#0f, + 16#61, 16#63, 16#63, 16#65, 16#70, 16#74, 16#2d, 16#6c, + 16#61, 16#6e, 16#67, 16#75, 16#61, 16#67, 16#65, 16#00, + 16#00, 16#00, 16#0d, 16#61, 16#63, 16#63, 16#65, 16#70, + 16#74, 16#2d, 16#72, 16#61, 16#6e, 16#67, 16#65, 16#73, + 16#00, 16#00, 16#00, 16#03, 16#61, 16#67, 16#65, 16#00, + 16#00, 16#00, 16#05, 16#61, 16#6c, 16#6c, 16#6f, 16#77, + 16#00, 16#00, 16#00, 16#0d, 16#61, 16#75, 16#74, 16#68, + 16#6f, 16#72, 16#69, 16#7a, 16#61, 16#74, 16#69, 16#6f, + 16#6e, 16#00, 16#00, 16#00, 16#0d, 16#63, 16#61, 16#63, + 16#68, 16#65, 16#2d, 16#63, 16#6f, 16#6e, 16#74, 16#72, + 16#6f, 16#6c, 16#00, 16#00, 16#00, 16#0a, 16#63, 16#6f, + 16#6e, 16#6e, 16#65, 16#63, 16#74, 16#69, 16#6f, 16#6e, + 16#00, 16#00, 16#00, 16#0c, 16#63, 16#6f, 16#6e, 16#74, + 16#65, 16#6e, 16#74, 16#2d, 16#62, 16#61, 16#73, 16#65, + 16#00, 16#00, 16#00, 16#10, 16#63, 16#6f, 16#6e, 16#74, + 16#65, 16#6e, 16#74, 16#2d, 16#65, 16#6e, 16#63, 16#6f, + 16#64, 16#69, 16#6e, 16#67, 16#00, 16#00, 16#00, 16#10, + 16#63, 16#6f, 16#6e, 16#74, 16#65, 16#6e, 16#74, 16#2d, + 16#6c, 16#61, 16#6e, 16#67, 16#75, 16#61, 16#67, 16#65, + 16#00, 16#00, 16#00, 16#0e, 16#63, 16#6f, 16#6e, 16#74, + 16#65, 16#6e, 16#74, 16#2d, 16#6c, 16#65, 16#6e, 16#67, + 16#74, 16#68, 16#00, 16#00, 16#00, 16#10, 16#63, 16#6f, + 16#6e, 16#74, 16#65, 16#6e, 16#74, 16#2d, 16#6c, 16#6f, + 16#63, 16#61, 16#74, 16#69, 16#6f, 16#6e, 16#00, 16#00, + 16#00, 16#0b, 16#63, 16#6f, 16#6e, 16#74, 16#65, 16#6e, + 16#74, 16#2d, 16#6d, 16#64, 16#35, 16#00, 16#00, 16#00, + 16#0d, 16#63, 16#6f, 16#6e, 16#74, 16#65, 16#6e, 16#74, + 16#2d, 16#72, 16#61, 16#6e, 16#67, 16#65, 16#00, 16#00, + 16#00, 16#0c, 16#63, 16#6f, 16#6e, 16#74, 16#65, 16#6e, + 16#74, 16#2d, 16#74, 16#79, 16#70, 16#65, 16#00, 16#00, + 16#00, 16#04, 16#64, 16#61, 16#74, 16#65, 16#00, 16#00, + 16#00, 16#04, 16#65, 16#74, 16#61, 16#67, 16#00, 16#00, + 16#00, 16#06, 16#65, 16#78, 16#70, 16#65, 16#63, 16#74, + 16#00, 16#00, 16#00, 16#07, 16#65, 16#78, 16#70, 16#69, + 16#72, 16#65, 16#73, 16#00, 16#00, 16#00, 16#04, 16#66, + 16#72, 16#6f, 16#6d, 16#00, 16#00, 16#00, 16#04, 16#68, + 16#6f, 16#73, 16#74, 16#00, 16#00, 16#00, 16#08, 16#69, + 16#66, 16#2d, 16#6d, 16#61, 16#74, 16#63, 16#68, 16#00, + 16#00, 16#00, 16#11, 16#69, 16#66, 16#2d, 16#6d, 16#6f, + 16#64, 16#69, 16#66, 16#69, 16#65, 16#64, 16#2d, 16#73, + 16#69, 16#6e, 16#63, 16#65, 16#00, 16#00, 16#00, 16#0d, + 16#69, 16#66, 16#2d, 16#6e, 16#6f, 16#6e, 16#65, 16#2d, + 16#6d, 16#61, 16#74, 16#63, 16#68, 16#00, 16#00, 16#00, + 16#08, 16#69, 16#66, 16#2d, 16#72, 16#61, 16#6e, 16#67, + 16#65, 16#00, 16#00, 16#00, 16#13, 16#69, 16#66, 16#2d, + 16#75, 16#6e, 16#6d, 16#6f, 16#64, 16#69, 16#66, 16#69, + 16#65, 16#64, 16#2d, 16#73, 16#69, 16#6e, 16#63, 16#65, + 16#00, 16#00, 16#00, 16#0d, 16#6c, 16#61, 16#73, 16#74, + 16#2d, 16#6d, 16#6f, 16#64, 16#69, 16#66, 16#69, 16#65, + 16#64, 16#00, 16#00, 16#00, 16#08, 16#6c, 16#6f, 16#63, + 16#61, 16#74, 16#69, 16#6f, 16#6e, 16#00, 16#00, 16#00, + 16#0c, 16#6d, 16#61, 16#78, 16#2d, 16#66, 16#6f, 16#72, + 16#77, 16#61, 16#72, 16#64, 16#73, 16#00, 16#00, 16#00, + 16#06, 16#70, 16#72, 16#61, 16#67, 16#6d, 16#61, 16#00, + 16#00, 16#00, 16#12, 16#70, 16#72, 16#6f, 16#78, 16#79, + 16#2d, 16#61, 16#75, 16#74, 16#68, 16#65, 16#6e, 16#74, + 16#69, 16#63, 16#61, 16#74, 16#65, 16#00, 16#00, 16#00, + 16#13, 16#70, 16#72, 16#6f, 16#78, 16#79, 16#2d, 16#61, + 16#75, 16#74, 16#68, 16#6f, 16#72, 16#69, 16#7a, 16#61, + 16#74, 16#69, 16#6f, 16#6e, 16#00, 16#00, 16#00, 16#05, + 16#72, 16#61, 16#6e, 16#67, 16#65, 16#00, 16#00, 16#00, + 16#07, 16#72, 16#65, 16#66, 16#65, 16#72, 16#65, 16#72, + 16#00, 16#00, 16#00, 16#0b, 16#72, 16#65, 16#74, 16#72, + 16#79, 16#2d, 16#61, 16#66, 16#74, 16#65, 16#72, 16#00, + 16#00, 16#00, 16#06, 16#73, 16#65, 16#72, 16#76, 16#65, + 16#72, 16#00, 16#00, 16#00, 16#02, 16#74, 16#65, 16#00, + 16#00, 16#00, 16#07, 16#74, 16#72, 16#61, 16#69, 16#6c, + 16#65, 16#72, 16#00, 16#00, 16#00, 16#11, 16#74, 16#72, + 16#61, 16#6e, 16#73, 16#66, 16#65, 16#72, 16#2d, 16#65, + 16#6e, 16#63, 16#6f, 16#64, 16#69, 16#6e, 16#67, 16#00, + 16#00, 16#00, 16#07, 16#75, 16#70, 16#67, 16#72, 16#61, + 16#64, 16#65, 16#00, 16#00, 16#00, 16#0a, 16#75, 16#73, + 16#65, 16#72, 16#2d, 16#61, 16#67, 16#65, 16#6e, 16#74, + 16#00, 16#00, 16#00, 16#04, 16#76, 16#61, 16#72, 16#79, + 16#00, 16#00, 16#00, 16#03, 16#76, 16#69, 16#61, 16#00, + 16#00, 16#00, 16#07, 16#77, 16#61, 16#72, 16#6e, 16#69, + 16#6e, 16#67, 16#00, 16#00, 16#00, 16#10, 16#77, 16#77, + 16#77, 16#2d, 16#61, 16#75, 16#74, 16#68, 16#65, 16#6e, + 16#74, 16#69, 16#63, 16#61, 16#74, 16#65, 16#00, 16#00, + 16#00, 16#06, 16#6d, 16#65, 16#74, 16#68, 16#6f, 16#64, + 16#00, 16#00, 16#00, 16#03, 16#67, 16#65, 16#74, 16#00, + 16#00, 16#00, 16#06, 16#73, 16#74, 16#61, 16#74, 16#75, + 16#73, 16#00, 16#00, 16#00, 16#06, 16#32, 16#30, 16#30, + 16#20, 16#4f, 16#4b, 16#00, 16#00, 16#00, 16#07, 16#76, + 16#65, 16#72, 16#73, 16#69, 16#6f, 16#6e, 16#00, 16#00, + 16#00, 16#08, 16#48, 16#54, 16#54, 16#50, 16#2f, 16#31, + 16#2e, 16#31, 16#00, 16#00, 16#00, 16#03, 16#75, 16#72, + 16#6c, 16#00, 16#00, 16#00, 16#06, 16#70, 16#75, 16#62, + 16#6c, 16#69, 16#63, 16#00, 16#00, 16#00, 16#0a, 16#73, + 16#65, 16#74, 16#2d, 16#63, 16#6f, 16#6f, 16#6b, 16#69, + 16#65, 16#00, 16#00, 16#00, 16#0a, 16#6b, 16#65, 16#65, + 16#70, 16#2d, 16#61, 16#6c, 16#69, 16#76, 16#65, 16#00, + 16#00, 16#00, 16#06, 16#6f, 16#72, 16#69, 16#67, 16#69, + 16#6e, 16#31, 16#30, 16#30, 16#31, 16#30, 16#31, 16#32, + 16#30, 16#31, 16#32, 16#30, 16#32, 16#32, 16#30, 16#35, + 16#32, 16#30, 16#36, 16#33, 16#30, 16#30, 16#33, 16#30, + 16#32, 16#33, 16#30, 16#33, 16#33, 16#30, 16#34, 16#33, + 16#30, 16#35, 16#33, 16#30, 16#36, 16#33, 16#30, 16#37, + 16#34, 16#30, 16#32, 16#34, 16#30, 16#35, 16#34, 16#30, + 16#36, 16#34, 16#30, 16#37, 16#34, 16#30, 16#38, 16#34, + 16#30, 16#39, 16#34, 16#31, 16#30, 16#34, 16#31, 16#31, + 16#34, 16#31, 16#32, 16#34, 16#31, 16#33, 16#34, 16#31, + 16#34, 16#34, 16#31, 16#35, 16#34, 16#31, 16#36, 16#34, + 16#31, 16#37, 16#35, 16#30, 16#32, 16#35, 16#30, 16#34, + 16#35, 16#30, 16#35, 16#32, 16#30, 16#33, 16#20, 16#4e, + 16#6f, 16#6e, 16#2d, 16#41, 16#75, 16#74, 16#68, 16#6f, + 16#72, 16#69, 16#74, 16#61, 16#74, 16#69, 16#76, 16#65, + 16#20, 16#49, 16#6e, 16#66, 16#6f, 16#72, 16#6d, 16#61, + 16#74, 16#69, 16#6f, 16#6e, 16#32, 16#30, 16#34, 16#20, + 16#4e, 16#6f, 16#20, 16#43, 16#6f, 16#6e, 16#74, 16#65, + 16#6e, 16#74, 16#33, 16#30, 16#31, 16#20, 16#4d, 16#6f, + 16#76, 16#65, 16#64, 16#20, 16#50, 16#65, 16#72, 16#6d, + 16#61, 16#6e, 16#65, 16#6e, 16#74, 16#6c, 16#79, 16#34, + 16#30, 16#30, 16#20, 16#42, 16#61, 16#64, 16#20, 16#52, + 16#65, 16#71, 16#75, 16#65, 16#73, 16#74, 16#34, 16#30, + 16#31, 16#20, 16#55, 16#6e, 16#61, 16#75, 16#74, 16#68, + 16#6f, 16#72, 16#69, 16#7a, 16#65, 16#64, 16#34, 16#30, + 16#33, 16#20, 16#46, 16#6f, 16#72, 16#62, 16#69, 16#64, + 16#64, 16#65, 16#6e, 16#34, 16#30, 16#34, 16#20, 16#4e, + 16#6f, 16#74, 16#20, 16#46, 16#6f, 16#75, 16#6e, 16#64, + 16#35, 16#30, 16#30, 16#20, 16#49, 16#6e, 16#74, 16#65, + 16#72, 16#6e, 16#61, 16#6c, 16#20, 16#53, 16#65, 16#72, + 16#76, 16#65, 16#72, 16#20, 16#45, 16#72, 16#72, 16#6f, + 16#72, 16#35, 16#30, 16#31, 16#20, 16#4e, 16#6f, 16#74, + 16#20, 16#49, 16#6d, 16#70, 16#6c, 16#65, 16#6d, 16#65, + 16#6e, 16#74, 16#65, 16#64, 16#35, 16#30, 16#33, 16#20, + 16#53, 16#65, 16#72, 16#76, 16#69, 16#63, 16#65, 16#20, + 16#55, 16#6e, 16#61, 16#76, 16#61, 16#69, 16#6c, 16#61, + 16#62, 16#6c, 16#65, 16#4a, 16#61, 16#6e, 16#20, 16#46, + 16#65, 16#62, 16#20, 16#4d, 16#61, 16#72, 16#20, 16#41, + 16#70, 16#72, 16#20, 16#4d, 16#61, 16#79, 16#20, 16#4a, + 16#75, 16#6e, 16#20, 16#4a, 16#75, 16#6c, 16#20, 16#41, + 16#75, 16#67, 16#20, 16#53, 16#65, 16#70, 16#74, 16#20, + 16#4f, 16#63, 16#74, 16#20, 16#4e, 16#6f, 16#76, 16#20, + 16#44, 16#65, 16#63, 16#20, 16#30, 16#30, 16#3a, 16#30, + 16#30, 16#3a, 16#30, 16#30, 16#20, 16#4d, 16#6f, 16#6e, + 16#2c, 16#20, 16#54, 16#75, 16#65, 16#2c, 16#20, 16#57, + 16#65, 16#64, 16#2c, 16#20, 16#54, 16#68, 16#75, 16#2c, + 16#20, 16#46, 16#72, 16#69, 16#2c, 16#20, 16#53, 16#61, + 16#74, 16#2c, 16#20, 16#53, 16#75, 16#6e, 16#2c, 16#20, + 16#47, 16#4d, 16#54, 16#63, 16#68, 16#75, 16#6e, 16#6b, + 16#65, 16#64, 16#2c, 16#74, 16#65, 16#78, 16#74, 16#2f, + 16#68, 16#74, 16#6d, 16#6c, 16#2c, 16#69, 16#6d, 16#61, + 16#67, 16#65, 16#2f, 16#70, 16#6e, 16#67, 16#2c, 16#69, + 16#6d, 16#61, 16#67, 16#65, 16#2f, 16#6a, 16#70, 16#67, + 16#2c, 16#69, 16#6d, 16#61, 16#67, 16#65, 16#2f, 16#67, + 16#69, 16#66, 16#2c, 16#61, 16#70, 16#70, 16#6c, 16#69, + 16#63, 16#61, 16#74, 16#69, 16#6f, 16#6e, 16#2f, 16#78, + 16#6d, 16#6c, 16#2c, 16#61, 16#70, 16#70, 16#6c, 16#69, + 16#63, 16#61, 16#74, 16#69, 16#6f, 16#6e, 16#2f, 16#78, + 16#68, 16#74, 16#6d, 16#6c, 16#2b, 16#78, 16#6d, 16#6c, + 16#2c, 16#74, 16#65, 16#78, 16#74, 16#2f, 16#70, 16#6c, + 16#61, 16#69, 16#6e, 16#2c, 16#74, 16#65, 16#78, 16#74, + 16#2f, 16#6a, 16#61, 16#76, 16#61, 16#73, 16#63, 16#72, + 16#69, 16#70, 16#74, 16#2c, 16#70, 16#75, 16#62, 16#6c, + 16#69, 16#63, 16#70, 16#72, 16#69, 16#76, 16#61, 16#74, + 16#65, 16#6d, 16#61, 16#78, 16#2d, 16#61, 16#67, 16#65, + 16#3d, 16#67, 16#7a, 16#69, 16#70, 16#2c, 16#64, 16#65, + 16#66, 16#6c, 16#61, 16#74, 16#65, 16#2c, 16#73, 16#64, + 16#63, 16#68, 16#63, 16#68, 16#61, 16#72, 16#73, 16#65, + 16#74, 16#3d, 16#75, 16#74, 16#66, 16#2d, 16#38, 16#63, + 16#68, 16#61, 16#72, 16#73, 16#65, 16#74, 16#3d, 16#69, + 16#73, 16#6f, 16#2d, 16#38, 16#38, 16#35, 16#39, 16#2d, + 16#31, 16#2c, 16#75, 16#74, 16#66, 16#2d, 16#2c, 16#2a, + 16#2c, 16#65, 16#6e, 16#71, 16#3d, 16#30, 16#2e >>). diff --git a/deps/cowlib/src/cow_sse.erl b/deps/cowlib/src/cow_sse.erl new file mode 100644 index 0000000..7aa98ce --- /dev/null +++ b/deps/cowlib/src/cow_sse.erl @@ -0,0 +1,348 @@ +%% Copyright (c) 2017-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_sse). + +-export([init/0]). +-export([parse/2]). +-export([events/1]). +-export([event/1]). + +-record(state, { + state_name = bom :: bom | events, + buffer = <<>> :: binary(), + last_event_id = <<>> :: binary(), + last_event_id_set = false :: boolean(), + event_type = <<>> :: binary(), + data = [] :: iolist(), + retry = undefined :: undefined | non_neg_integer() +}). +-type state() :: #state{}. +-export_type([state/0]). + +-type parsed_event() :: #{ + last_event_id := binary(), + event_type := binary(), + data := iolist() +}. + +-type event() :: #{ + comment => iodata(), + data => iodata(), + event => iodata() | atom(), + id => iodata(), + retry => non_neg_integer() +}. +-export_type([event/0]). + +-spec init() -> state(). +init() -> + #state{}. + +%% @todo Add a function to retrieve the retry value from the state. + +-spec parse(binary(), state()) + -> {event, parsed_event(), State} | {more, State}. +parse(Data0, State=#state{state_name=bom, buffer=Buffer}) -> + Data1 = case Buffer of + <<>> -> Data0; + _ -> << Buffer/binary, Data0/binary >> + end, + case Data1 of + %% Skip the BOM. + << 16#fe, 16#ff, Data/bits >> -> + parse_event(Data, State#state{state_name=events, buffer= <<>>}); + %% Not enough data to know wether we have a BOM. + << 16#fe >> -> + {more, State#state{buffer=Data1}}; + <<>> -> + {more, State}; + %% No BOM. + _ -> + parse_event(Data1, State#state{state_name=events, buffer= <<>>}) + end; +%% Try to process data from the buffer if there is no new input. +parse(<<>>, State=#state{buffer=Buffer}) -> + parse_event(Buffer, State#state{buffer= <<>>}); +%% Otherwise process the input data as-is. +parse(Data0, State=#state{buffer=Buffer}) -> + Data = case Buffer of + <<>> -> Data0; + _ -> << Buffer/binary, Data0/binary >> + end, + parse_event(Data, State). + +parse_event(Data, State0) -> + case binary:split(Data, [<<"\r\n">>, <<"\r">>, <<"\n">>]) of + [Line, Rest] -> + case parse_line(Line, State0) of + {ok, State} -> + parse_event(Rest, State); + {event, Event, State} -> + {event, Event, State#state{buffer=Rest}} + end; + [_] -> + {more, State0#state{buffer=Data}} + end. + +%% Dispatch events on empty line. +parse_line(<<>>, State) -> + dispatch_event(State); +%% Ignore comments. +parse_line(<< $:, _/bits >>, State) -> + {ok, State}; +%% Normal line. +parse_line(Line, State) -> + case binary:split(Line, [<<":\s">>, <<":">>]) of + [Field, Value] -> + process_field(Field, Value, State); + [Field] -> + process_field(Field, <<>>, State) + end. + +process_field(<<"event">>, Value, State) -> + {ok, State#state{event_type=Value}}; +process_field(<<"data">>, Value, State=#state{data=Data}) -> + {ok, State#state{data=[<<$\n>>, Value|Data]}}; +process_field(<<"id">>, Value, State) -> + {ok, State#state{last_event_id=Value, last_event_id_set=true}}; +process_field(<<"retry">>, Value, State) -> + try + {ok, State#state{retry=binary_to_integer(Value)}} + catch _:_ -> + {ok, State} + end; +process_field(_, _, State) -> + {ok, State}. + +%% Data is an empty string; abort. +dispatch_event(State=#state{last_event_id_set=false, data=[]}) -> + {ok, State#state{event_type= <<>>}}; +%% Data is an empty string but we have a last_event_id: +%% propagate it on its own so that the caller knows the +%% most recent ID. +dispatch_event(State=#state{last_event_id=LastEventID, data=[]}) -> + {event, #{ + last_event_id => LastEventID + }, State#state{last_event_id_set=false, event_type= <<>>}}; +%% Dispatch the event. +%% +%% Always remove the last linebreak from the data. +dispatch_event(State=#state{last_event_id=LastEventID, + event_type=EventType, data=[_|Data]}) -> + {event, #{ + last_event_id => LastEventID, + event_type => case EventType of + <<>> -> <<"message">>; + _ -> EventType + end, + data => lists:reverse(Data) + }, State#state{last_event_id_set=false, event_type= <<>>, data=[]}}. + +-ifdef(TEST). +parse_example1_test() -> + {event, #{ + event_type := <<"message">>, + last_event_id := <<>>, + data := Data + }, State} = parse(<< + "data: YHOO\n" + "data: +2\n" + "data: 10\n" + "\n">>, init()), + <<"YHOO\n+2\n10">> = iolist_to_binary(Data), + {more, _} = parse(<<>>, State), + ok. + +parse_example2_test() -> + {event, #{ + event_type := <<"message">>, + last_event_id := <<"1">>, + data := Data1 + }, State0} = parse(<< + ": test stream\n" + "\n" + "data: first event\n" + "id: 1\n" + "\n" + "data:second event\n" + "id\n" + "\n" + "data: third event\n" + "\n">>, init()), + <<"first event">> = iolist_to_binary(Data1), + {event, #{ + event_type := <<"message">>, + last_event_id := <<>>, + data := Data2 + }, State1} = parse(<<>>, State0), + <<"second event">> = iolist_to_binary(Data2), + {event, #{ + event_type := <<"message">>, + last_event_id := <<>>, + data := Data3 + }, State} = parse(<<>>, State1), + <<" third event">> = iolist_to_binary(Data3), + {more, _} = parse(<<>>, State), + ok. + +parse_example3_test() -> + {event, #{ + event_type := <<"message">>, + last_event_id := <<>>, + data := Data1 + }, State0} = parse(<< + "data\n" + "\n" + "data\n" + "data\n" + "\n" + "data:\n">>, init()), + <<>> = iolist_to_binary(Data1), + {event, #{ + event_type := <<"message">>, + last_event_id := <<>>, + data := Data2 + }, State} = parse(<<>>, State0), + <<"\n">> = iolist_to_binary(Data2), + {more, _} = parse(<<>>, State), + ok. + +parse_example4_test() -> + {event, Event, State0} = parse(<< + "data:test\n" + "\n" + "data: test\n" + "\n">>, init()), + {event, Event, State} = parse(<<>>, State0), + {more, _} = parse(<<>>, State), + ok. + +parse_id_without_data_test() -> + {event, Event1, State0} = parse(<< + "id: 1\n" + "\n" + "data: data\n" + "\n" + "id: 2\n" + "\n">>, init()), + 1 = maps:size(Event1), + #{last_event_id := <<"1">>} = Event1, + {event, #{ + event_type := <<"message">>, + last_event_id := <<"1">>, + data := Data + }, State1} = parse(<<>>, State0), + <<"data">> = iolist_to_binary(Data), + {event, Event2, State} = parse(<<>>, State1), + 1 = maps:size(Event2), + #{last_event_id := <<"2">>} = Event2, + {more, _} = parse(<<>>, State), + ok. + +parse_repeated_id_without_data_test() -> + {event, Event1, State0} = parse(<< + "id: 1\n" + "\n" + "event: message\n" %% This will be ignored since there's no data. + "\n" + "id: 1\n" + "\n" + "id: 2\n" + "\n">>, init()), + {event, Event1, State1} = parse(<<>>, State0), + 1 = maps:size(Event1), + #{last_event_id := <<"1">>} = Event1, + {event, Event2, State} = parse(<<>>, State1), + 1 = maps:size(Event2), + #{last_event_id := <<"2">>} = Event2, + {more, _} = parse(<<>>, State), + ok. + +parse_split_event_test() -> + {more, State} = parse(<< + "data: AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA" + "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA">>, init()), + {event, _, _} = parse(<<"==\n\n">>, State), + ok. +-endif. + +-spec events([event()]) -> iolist(). +events(Events) -> + [event(Event) || Event <- Events]. + +-spec event(event()) -> iolist(). +event(Event) -> + [ + event_comment(Event), + event_id(Event), + event_name(Event), + event_data(Event), + event_retry(Event), + $\n + ]. + +event_comment(#{comment := Comment}) -> + prefix_lines(Comment, <<>>); +event_comment(_) -> + []. + +event_id(#{id := ID}) -> + nomatch = binary:match(iolist_to_binary(ID), <<"\n">>), + [<<"id: ">>, ID, $\n]; +event_id(_) -> + []. + +event_name(#{event := Name0}) -> + Name = if + is_atom(Name0) -> atom_to_binary(Name0, utf8); + true -> iolist_to_binary(Name0) + end, + nomatch = binary:match(Name, <<"\n">>), + [<<"event: ">>, Name, $\n]; +event_name(_) -> + []. + +event_data(#{data := Data}) -> + prefix_lines(Data, <<"data">>); +event_data(_) -> + []. + +event_retry(#{retry := Retry}) -> + [<<"retry: ">>, integer_to_binary(Retry), $\n]; +event_retry(_) -> + []. + +prefix_lines(IoData, Prefix) -> + Lines = binary:split(iolist_to_binary(IoData), <<"\n">>, [global]), + [[Prefix, <<": ">>, Line, $\n] || Line <- Lines]. + +-ifdef(TEST). +event_test() -> + _ = event(#{}), + _ = event(#{comment => "test"}), + _ = event(#{data => "test"}), + _ = event(#{data => "test\ntest\ntest"}), + _ = event(#{data => "test\ntest\ntest\n"}), + _ = event(#{data => <<"test\ntest\ntest">>}), + _ = event(#{data => [<<"test">>, $\n, <<"test">>, [$\n, "test"]]}), + _ = event(#{event => test}), + _ = event(#{event => "test"}), + _ = event(#{id => "test"}), + _ = event(#{retry => 5000}), + _ = event(#{event => "test", data => "test"}), + _ = event(#{id => "test", event => "test", data => "test"}), + ok. +-endif. diff --git a/deps/cowlib/src/cow_uri.erl b/deps/cowlib/src/cow_uri.erl new file mode 100644 index 0000000..c0d9903 --- /dev/null +++ b/deps/cowlib/src/cow_uri.erl @@ -0,0 +1,339 @@ +%% Copyright (c) 2016-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_uri). + +-export([urldecode/1]). +-export([urlencode/1]). + +%% @doc Decode a percent encoded string. (RFC3986 2.1) + +-spec urldecode(B) -> B when B::binary(). +urldecode(B) -> + urldecode(B, <<>>). + +urldecode(<< $%, H, L, Rest/bits >>, Acc) -> + C = (unhex(H) bsl 4 bor unhex(L)), + urldecode(Rest, << Acc/bits, C >>); +urldecode(<< $!, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $! >>); +urldecode(<< $$, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $$ >>); +urldecode(<< $&, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $& >>); +urldecode(<< $', Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $' >>); +urldecode(<< $(, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $( >>); +urldecode(<< $), Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $) >>); +urldecode(<< $*, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $* >>); +urldecode(<< $+, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $+ >>); +urldecode(<< $,, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $, >>); +urldecode(<< $-, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $- >>); +urldecode(<< $., Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $. >>); +urldecode(<< $0, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $0 >>); +urldecode(<< $1, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $1 >>); +urldecode(<< $2, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $2 >>); +urldecode(<< $3, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $3 >>); +urldecode(<< $4, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $4 >>); +urldecode(<< $5, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $5 >>); +urldecode(<< $6, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $6 >>); +urldecode(<< $7, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $7 >>); +urldecode(<< $8, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $8 >>); +urldecode(<< $9, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $9 >>); +urldecode(<< $:, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $: >>); +urldecode(<< $;, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $; >>); +urldecode(<< $=, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $= >>); +urldecode(<< $@, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $@ >>); +urldecode(<< $A, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $A >>); +urldecode(<< $B, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $B >>); +urldecode(<< $C, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $C >>); +urldecode(<< $D, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $D >>); +urldecode(<< $E, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $E >>); +urldecode(<< $F, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $F >>); +urldecode(<< $G, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $G >>); +urldecode(<< $H, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $H >>); +urldecode(<< $I, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $I >>); +urldecode(<< $J, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $J >>); +urldecode(<< $K, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $K >>); +urldecode(<< $L, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $L >>); +urldecode(<< $M, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $M >>); +urldecode(<< $N, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $N >>); +urldecode(<< $O, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $O >>); +urldecode(<< $P, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $P >>); +urldecode(<< $Q, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $Q >>); +urldecode(<< $R, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $R >>); +urldecode(<< $S, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $S >>); +urldecode(<< $T, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $T >>); +urldecode(<< $U, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $U >>); +urldecode(<< $V, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $V >>); +urldecode(<< $W, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $W >>); +urldecode(<< $X, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $X >>); +urldecode(<< $Y, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $Y >>); +urldecode(<< $Z, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $Z >>); +urldecode(<< $_, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $_ >>); +urldecode(<< $a, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $a >>); +urldecode(<< $b, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $b >>); +urldecode(<< $c, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $c >>); +urldecode(<< $d, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $d >>); +urldecode(<< $e, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $e >>); +urldecode(<< $f, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $f >>); +urldecode(<< $g, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $g >>); +urldecode(<< $h, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $h >>); +urldecode(<< $i, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $i >>); +urldecode(<< $j, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $j >>); +urldecode(<< $k, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $k >>); +urldecode(<< $l, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $l >>); +urldecode(<< $m, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $m >>); +urldecode(<< $n, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $n >>); +urldecode(<< $o, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $o >>); +urldecode(<< $p, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $p >>); +urldecode(<< $q, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $q >>); +urldecode(<< $r, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $r >>); +urldecode(<< $s, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $s >>); +urldecode(<< $t, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $t >>); +urldecode(<< $u, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $u >>); +urldecode(<< $v, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $v >>); +urldecode(<< $w, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $w >>); +urldecode(<< $x, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $x >>); +urldecode(<< $y, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $y >>); +urldecode(<< $z, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $z >>); +urldecode(<< $~, Rest/bits >>, Acc) -> urldecode(Rest, << Acc/bits, $~ >>); +urldecode(<<>>, Acc) -> Acc. + +unhex($0) -> 0; +unhex($1) -> 1; +unhex($2) -> 2; +unhex($3) -> 3; +unhex($4) -> 4; +unhex($5) -> 5; +unhex($6) -> 6; +unhex($7) -> 7; +unhex($8) -> 8; +unhex($9) -> 9; +unhex($A) -> 10; +unhex($B) -> 11; +unhex($C) -> 12; +unhex($D) -> 13; +unhex($E) -> 14; +unhex($F) -> 15; +unhex($a) -> 10; +unhex($b) -> 11; +unhex($c) -> 12; +unhex($d) -> 13; +unhex($e) -> 14; +unhex($f) -> 15. + +-ifdef(TEST). +urldecode_test_() -> + Tests = [ + {<<"%20">>, <<" ">>}, + {<<"+">>, <<"+">>}, + {<<"%00">>, <<0>>}, + {<<"%fF">>, <<255>>}, + {<<"123">>, <<"123">>}, + {<<"%i5">>, error}, + {<<"%5">>, error} + ], + [{Qs, fun() -> + E = try urldecode(Qs) of + R -> R + catch _:_ -> + error + end + end} || {Qs, E} <- Tests]. + +urldecode_identity_test_() -> + Tests = [ + <<"%20">>, + <<"+">>, + <<"nothingnothingnothingnothing">>, + <<"Small+fast+modular+HTTP+server">>, + <<"Small%20fast%20modular%20HTTP%20server">>, + <<"Small%2F+fast%2F+modular+HTTP+server.">>, + <<"%E3%83%84%E3%82%A4%E3%83%B3%E3%82%BD%E3%82%A6%E3%83" + "%AB%E3%80%9C%E8%BC%AA%E5%BB%BB%E3%81%99%E3%82%8B%E6%97%8B%E5" + "%BE%8B%E3%80%9C">> + ], + [{V, fun() -> V = urlencode(urldecode(V)) end} || V <- Tests]. + +horse_urldecode() -> + horse:repeat(100000, + urldecode(<<"nothingnothingnothingnothing">>) + ). + +horse_urldecode_hex() -> + horse:repeat(100000, + urldecode(<<"Small%2C%20fast%2C%20modular%20HTTP%20server.">>) + ). + +horse_urldecode_jp_hex() -> + horse:repeat(100000, + urldecode(<<"%E3%83%84%E3%82%A4%E3%83%B3%E3%82%BD%E3%82%A6%E3%83" + "%AB%E3%80%9C%E8%BC%AA%E5%BB%BB%E3%81%99%E3%82%8B%E6%97%8B%E5" + "%BE%8B%E3%80%9C">>) + ). +-endif. + +%% @doc Percent encode a string. (RFC3986 2.1) +%% +%% This function is meant to be used for path components. + +-spec urlencode(B) -> B when B::binary(). +urlencode(B) -> + urlencode(B, <<>>). + +urlencode(<< $!, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $! >>); +urlencode(<< $$, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $$ >>); +urlencode(<< $&, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $& >>); +urlencode(<< $', Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $' >>); +urlencode(<< $(, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $( >>); +urlencode(<< $), Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $) >>); +urlencode(<< $*, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $* >>); +urlencode(<< $+, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $+ >>); +urlencode(<< $,, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $, >>); +urlencode(<< $-, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $- >>); +urlencode(<< $., Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $. >>); +urlencode(<< $0, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $0 >>); +urlencode(<< $1, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $1 >>); +urlencode(<< $2, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $2 >>); +urlencode(<< $3, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $3 >>); +urlencode(<< $4, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $4 >>); +urlencode(<< $5, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $5 >>); +urlencode(<< $6, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $6 >>); +urlencode(<< $7, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $7 >>); +urlencode(<< $8, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $8 >>); +urlencode(<< $9, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $9 >>); +urlencode(<< $:, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $: >>); +urlencode(<< $;, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $; >>); +urlencode(<< $=, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $= >>); +urlencode(<< $@, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $@ >>); +urlencode(<< $A, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $A >>); +urlencode(<< $B, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $B >>); +urlencode(<< $C, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $C >>); +urlencode(<< $D, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $D >>); +urlencode(<< $E, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $E >>); +urlencode(<< $F, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $F >>); +urlencode(<< $G, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $G >>); +urlencode(<< $H, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $H >>); +urlencode(<< $I, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $I >>); +urlencode(<< $J, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $J >>); +urlencode(<< $K, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $K >>); +urlencode(<< $L, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $L >>); +urlencode(<< $M, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $M >>); +urlencode(<< $N, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $N >>); +urlencode(<< $O, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $O >>); +urlencode(<< $P, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $P >>); +urlencode(<< $Q, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $Q >>); +urlencode(<< $R, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $R >>); +urlencode(<< $S, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $S >>); +urlencode(<< $T, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $T >>); +urlencode(<< $U, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $U >>); +urlencode(<< $V, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $V >>); +urlencode(<< $W, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $W >>); +urlencode(<< $X, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $X >>); +urlencode(<< $Y, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $Y >>); +urlencode(<< $Z, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $Z >>); +urlencode(<< $_, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $_ >>); +urlencode(<< $a, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $a >>); +urlencode(<< $b, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $b >>); +urlencode(<< $c, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $c >>); +urlencode(<< $d, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $d >>); +urlencode(<< $e, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $e >>); +urlencode(<< $f, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $f >>); +urlencode(<< $g, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $g >>); +urlencode(<< $h, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $h >>); +urlencode(<< $i, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $i >>); +urlencode(<< $j, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $j >>); +urlencode(<< $k, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $k >>); +urlencode(<< $l, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $l >>); +urlencode(<< $m, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $m >>); +urlencode(<< $n, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $n >>); +urlencode(<< $o, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $o >>); +urlencode(<< $p, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $p >>); +urlencode(<< $q, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $q >>); +urlencode(<< $r, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $r >>); +urlencode(<< $s, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $s >>); +urlencode(<< $t, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $t >>); +urlencode(<< $u, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $u >>); +urlencode(<< $v, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $v >>); +urlencode(<< $w, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $w >>); +urlencode(<< $x, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $x >>); +urlencode(<< $y, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $y >>); +urlencode(<< $z, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $z >>); +urlencode(<< $~, Rest/bits >>, Acc) -> urlencode(Rest, << Acc/bits, $~ >>); +urlencode(<< C, Rest/bits >>, Acc) -> + H = hex(C bsr 4), + L = hex(C band 16#0f), + urlencode(Rest, << Acc/bits, $%, H, L >>); +urlencode(<<>>, Acc) -> + Acc. + +hex( 0) -> $0; +hex( 1) -> $1; +hex( 2) -> $2; +hex( 3) -> $3; +hex( 4) -> $4; +hex( 5) -> $5; +hex( 6) -> $6; +hex( 7) -> $7; +hex( 8) -> $8; +hex( 9) -> $9; +hex(10) -> $A; +hex(11) -> $B; +hex(12) -> $C; +hex(13) -> $D; +hex(14) -> $E; +hex(15) -> $F. + +-ifdef(TEST). +urlencode_test_() -> + Tests = [ + {<<255, 0>>, <<"%FF%00">>}, + {<<255, " ">>, <<"%FF%20">>}, + {<<"+">>, <<"+">>}, + {<<"aBc123">>, <<"aBc123">>}, + {<<"!$&'()*+,:;=@-._~">>, <<"!$&'()*+,:;=@-._~">>} + ], + [{V, fun() -> E = urlencode(V) end} || {V, E} <- Tests]. + +urlencode_identity_test_() -> + Tests = [ + <<"+">>, + <<"nothingnothingnothingnothing">>, + <<"Small fast modular HTTP server">>, + <<"Small, fast, modular HTTP server.">>, + <<227,131,132,227,130,164,227,131,179,227,130,189,227, + 130,166,227,131,171,227,128,156,232,188,170,229,187,187,227, + 129,153,227,130,139,230,151,139,229,190,139,227,128,156>> + ], + [{V, fun() -> V = urldecode(urlencode(V)) end} || V <- Tests]. + +horse_urlencode() -> + horse:repeat(100000, + urlencode(<<"nothingnothingnothingnothing">>) + ). + +horse_urlencode_spaces() -> + horse:repeat(100000, + urlencode(<<"Small fast modular HTTP server">>) + ). + +horse_urlencode_jp() -> + horse:repeat(100000, + urlencode(<<227,131,132,227,130,164,227,131,179,227,130,189,227, + 130,166,227,131,171,227,128,156,232,188,170,229,187,187,227, + 129,153,227,130,139,230,151,139,229,190,139,227,128,156>>) + ). + +horse_urlencode_mix() -> + horse:repeat(100000, + urlencode(<<"Small, fast, modular HTTP server.">>) + ). +-endif. diff --git a/deps/cowlib/src/cow_uri_template.erl b/deps/cowlib/src/cow_uri_template.erl new file mode 100644 index 0000000..eac784f --- /dev/null +++ b/deps/cowlib/src/cow_uri_template.erl @@ -0,0 +1,356 @@ +%% Copyright (c) 2019, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +%% This is a full level 4 implementation of URI Templates +%% as defined by RFC6570. + +-module(cow_uri_template). + +-export([parse/1]). +-export([expand/2]). + +-type op() :: simple_string_expansion + | reserved_expansion + | fragment_expansion + | label_expansion_with_dot_prefix + | path_segment_expansion + | path_style_parameter_expansion + | form_style_query_expansion + | form_style_query_continuation. + +-type var_list() :: [ + {no_modifier, binary()} + | {{prefix_modifier, pos_integer()}, binary()} + | {explode_modifier, binary()} +]. + +-type uri_template() :: [ + binary() | {expr, op(), var_list()} +]. +-export_type([uri_template/0]). + +-type variables() :: #{ + binary() => binary() + | integer() + | float() + | [binary()] + | #{binary() => binary()} +}. + +-include("cow_inline.hrl"). +-include("cow_parse.hrl"). + +%% Parse a URI template. + +-spec parse(binary()) -> uri_template(). +parse(URITemplate) -> + parse(URITemplate, <<>>). + +parse(<<>>, <<>>) -> + []; +parse(<<>>, Acc) -> + [Acc]; +parse(<<${,R/bits>>, <<>>) -> + parse_expr(R); +parse(<<${,R/bits>>, Acc) -> + [Acc|parse_expr(R)]; +%% @todo Probably should reject unallowed characters so that +%% we don't produce invalid URIs. +parse(<>, Acc) when C =/= $} -> + parse(R, <>). + +parse_expr(<<$+,R/bits>>) -> + parse_var_list(R, reserved_expansion, []); +parse_expr(<<$#,R/bits>>) -> + parse_var_list(R, fragment_expansion, []); +parse_expr(<<$.,R/bits>>) -> + parse_var_list(R, label_expansion_with_dot_prefix, []); +parse_expr(<<$/,R/bits>>) -> + parse_var_list(R, path_segment_expansion, []); +parse_expr(<<$;,R/bits>>) -> + parse_var_list(R, path_style_parameter_expansion, []); +parse_expr(<<$?,R/bits>>) -> + parse_var_list(R, form_style_query_expansion, []); +parse_expr(<<$&,R/bits>>) -> + parse_var_list(R, form_style_query_continuation, []); +parse_expr(R) -> + parse_var_list(R, simple_string_expansion, []). + +parse_var_list(<>, Op, List) + when ?IS_ALPHANUM(C) or (C =:= $_) -> + parse_varname(R, Op, List, <>). + +parse_varname(<>, Op, List, Name) + when ?IS_ALPHANUM(C) or (C =:= $_) or (C =:= $.) or (C =:= $%) -> + parse_varname(R, Op, List, <>); +parse_varname(<<$:,C,R/bits>>, Op, List, Name) + when (C =:= $1) or (C =:= $2) or (C =:= $3) or (C =:= $4) or (C =:= $5) + or (C =:= $6) or (C =:= $7) or (C =:= $8) or (C =:= $9) -> + parse_prefix_modifier(R, Op, List, Name, <>); +parse_varname(<<$*,$,,R/bits>>, Op, List, Name) -> + parse_var_list(R, Op, [{explode_modifier, Name}|List]); +parse_varname(<<$*,$},R/bits>>, Op, List, Name) -> + [{expr, Op, lists:reverse([{explode_modifier, Name}|List])}|parse(R, <<>>)]; +parse_varname(<<$,,R/bits>>, Op, List, Name) -> + parse_var_list(R, Op, [{no_modifier, Name}|List]); +parse_varname(<<$},R/bits>>, Op, List, Name) -> + [{expr, Op, lists:reverse([{no_modifier, Name}|List])}|parse(R, <<>>)]. + +parse_prefix_modifier(<>, Op, List, Name, Acc) + when ?IS_DIGIT(C), byte_size(Acc) < 4 -> + parse_prefix_modifier(R, Op, List, Name, <>); +parse_prefix_modifier(<<$,,R/bits>>, Op, List, Name, Acc) -> + parse_var_list(R, Op, [{{prefix_modifier, binary_to_integer(Acc)}, Name}|List]); +parse_prefix_modifier(<<$},R/bits>>, Op, List, Name, Acc) -> + [{expr, Op, lists:reverse([{{prefix_modifier, binary_to_integer(Acc)}, Name}|List])}|parse(R, <<>>)]. + +%% Expand a URI template (after parsing it if necessary). + +-spec expand(binary() | uri_template(), variables()) -> iodata(). +expand(URITemplate, Vars) when is_binary(URITemplate) -> + expand(parse(URITemplate), Vars); +expand(URITemplate, Vars) -> + expand1(URITemplate, Vars). + +expand1([], _) -> + []; +expand1([Literal|Tail], Vars) when is_binary(Literal) -> + [Literal|expand1(Tail, Vars)]; +expand1([{expr, simple_string_expansion, VarList}|Tail], Vars) -> + [simple_string_expansion(VarList, Vars)|expand1(Tail, Vars)]; +expand1([{expr, reserved_expansion, VarList}|Tail], Vars) -> + [reserved_expansion(VarList, Vars)|expand1(Tail, Vars)]; +expand1([{expr, fragment_expansion, VarList}|Tail], Vars) -> + [fragment_expansion(VarList, Vars)|expand1(Tail, Vars)]; +expand1([{expr, label_expansion_with_dot_prefix, VarList}|Tail], Vars) -> + [label_expansion_with_dot_prefix(VarList, Vars)|expand1(Tail, Vars)]; +expand1([{expr, path_segment_expansion, VarList}|Tail], Vars) -> + [path_segment_expansion(VarList, Vars)|expand1(Tail, Vars)]; +expand1([{expr, path_style_parameter_expansion, VarList}|Tail], Vars) -> + [path_style_parameter_expansion(VarList, Vars)|expand1(Tail, Vars)]; +expand1([{expr, form_style_query_expansion, VarList}|Tail], Vars) -> + [form_style_query_expansion(VarList, Vars)|expand1(Tail, Vars)]; +expand1([{expr, form_style_query_continuation, VarList}|Tail], Vars) -> + [form_style_query_continuation(VarList, Vars)|expand1(Tail, Vars)]. + +simple_string_expansion(VarList, Vars) -> + lists:join($,, [ + apply_modifier(Modifier, unreserved, $,, Value) + || {Modifier, _Name, Value} <- lookup_variables(VarList, Vars)]). + +reserved_expansion(VarList, Vars) -> + lists:join($,, [ + apply_modifier(Modifier, reserved, $,, Value) + || {Modifier, _Name, Value} <- lookup_variables(VarList, Vars)]). + +fragment_expansion(VarList, Vars) -> + case reserved_expansion(VarList, Vars) of + [] -> []; + Expanded -> [$#, Expanded] + end. + +label_expansion_with_dot_prefix(VarList, Vars) -> + segment_expansion(VarList, Vars, $.). + +path_segment_expansion(VarList, Vars) -> + segment_expansion(VarList, Vars, $/). + +segment_expansion(VarList, Vars, Sep) -> + Expanded = lists:join(Sep, [ + apply_modifier(Modifier, unreserved, Sep, Value) + || {Modifier, _Name, Value} <- lookup_variables(VarList, Vars)]), + case Expanded of + [] -> []; + [[]] -> []; + _ -> [Sep, Expanded] + end. + +path_style_parameter_expansion(VarList, Vars) -> + parameter_expansion(VarList, Vars, $;, $;, trim). + +form_style_query_expansion(VarList, Vars) -> + parameter_expansion(VarList, Vars, $?, $&, no_trim). + +form_style_query_continuation(VarList, Vars) -> + parameter_expansion(VarList, Vars, $&, $&, no_trim). + +parameter_expansion(VarList, Vars, LeadingSep, Sep, Trim) -> + Expanded = lists:join(Sep, [ + apply_parameter_modifier(Modifier, unreserved, Sep, Trim, Name, Value) + || {Modifier, Name, Value} <- lookup_variables(VarList, Vars)]), + case Expanded of + [] -> []; + [[]] -> []; + _ -> [LeadingSep, Expanded] + end. + +lookup_variables([], _) -> + []; +lookup_variables([{Modifier, Name}|Tail], Vars) -> + case Vars of + #{Name := Value} -> [{Modifier, Name, Value}|lookup_variables(Tail, Vars)]; + _ -> lookup_variables(Tail, Vars) + end. + +apply_modifier(no_modifier, AllowedChars, _, List) when is_list(List) -> + lists:join($,, [urlencode(Value, AllowedChars) || Value <- List]); +apply_modifier(explode_modifier, AllowedChars, ExplodeSep, List) when is_list(List) -> + lists:join(ExplodeSep, [urlencode(Value, AllowedChars) || Value <- List]); +apply_modifier(Modifier, AllowedChars, ExplodeSep, Map) when is_map(Map) -> + {JoinSep, KVSep} = case Modifier of + no_modifier -> {$,, $,}; + explode_modifier -> {ExplodeSep, $=} + end, + lists:reverse(lists:join(JoinSep, + maps:fold(fun(Key, Value, Acc) -> + [[ + urlencode(Key, AllowedChars), + KVSep, + urlencode(Value, AllowedChars) + ]|Acc] + end, [], Map) + )); +apply_modifier({prefix_modifier, MaxLen}, AllowedChars, _, Value) -> + urlencode(string:slice(binarize(Value), 0, MaxLen), AllowedChars); +apply_modifier(_, AllowedChars, _, Value) -> + urlencode(binarize(Value), AllowedChars). + +apply_parameter_modifier(_, _, _, _, _, []) -> + []; +apply_parameter_modifier(_, _, _, _, _, Map) when Map =:= #{} -> + []; +apply_parameter_modifier(no_modifier, AllowedChars, _, _, Name, List) when is_list(List) -> + [ + Name, + $=, + lists:join($,, [urlencode(Value, AllowedChars) || Value <- List]) + ]; +apply_parameter_modifier(explode_modifier, AllowedChars, ExplodeSep, _, Name, List) when is_list(List) -> + lists:join(ExplodeSep, [[ + Name, + $=, + urlencode(Value, AllowedChars) + ] || Value <- List]); +apply_parameter_modifier(Modifier, AllowedChars, ExplodeSep, _, Name, Map) when is_map(Map) -> + {JoinSep, KVSep} = case Modifier of + no_modifier -> {$,, $,}; + explode_modifier -> {ExplodeSep, $=} + end, + [ + case Modifier of + no_modifier -> + [ + Name, + $= + ]; + explode_modifier -> + [] + end, + lists:reverse(lists:join(JoinSep, + maps:fold(fun(Key, Value, Acc) -> + [[ + urlencode(Key, AllowedChars), + KVSep, + urlencode(Value, AllowedChars) + ]|Acc] + end, [], Map) + )) + ]; +apply_parameter_modifier(Modifier, AllowedChars, _, Trim, Name, Value0) -> + Value1 = binarize(Value0), + Value = case Modifier of + {prefix_modifier, MaxLen} -> + string:slice(Value1, 0, MaxLen); + no_modifier -> + Value1 + end, + [ + Name, + case Value of + <<>> when Trim =:= trim -> + []; + <<>> when Trim =:= no_trim -> + $=; + _ -> + [ + $=, + urlencode(Value, AllowedChars) + ] + end + ]. + +binarize(Value) when is_integer(Value) -> + integer_to_binary(Value); +binarize(Value) when is_float(Value) -> + float_to_binary(Value, [{decimals, 10}, compact]); +binarize(Value) -> + Value. + +urlencode(Value, unreserved) -> + urlencode_unreserved(Value, <<>>); +urlencode(Value, reserved) -> + urlencode_reserved(Value, <<>>). + +urlencode_unreserved(<>, Acc) + when ?IS_URI_UNRESERVED(C) -> + urlencode_unreserved(R, <>); +urlencode_unreserved(<>, Acc) -> + urlencode_unreserved(R, <>); +urlencode_unreserved(<<>>, Acc) -> + Acc. + +urlencode_reserved(<>, Acc) + when ?IS_URI_UNRESERVED(C) or ?IS_URI_GEN_DELIMS(C) or ?IS_URI_SUB_DELIMS(C) -> + urlencode_reserved(R, <>); +urlencode_reserved(<>, Acc) -> + urlencode_reserved(R, <>); +urlencode_reserved(<<>>, Acc) -> + Acc. + +-ifdef(TEST). +expand_uritemplate_test_() -> + Files = filelib:wildcard("deps/uritemplate-tests/*.json"), + lists:flatten([begin + {ok, JSON} = file:read_file(File), + Tests = jsx:decode(JSON, [return_maps]), + [begin + %% Erlang doesn't have a NULL value. + Vars = maps:remove(<<"undef">>, Vars0), + [ + {iolist_to_binary(io_lib:format("~s - ~s: ~s => ~s", + [filename:basename(File), Section, URITemplate, + if + is_list(Expected) -> lists:join(<<" OR ">>, Expected); + true -> Expected + end + ])), + fun() -> + case Expected of + false -> + {'EXIT', _} = (catch expand(URITemplate, Vars)); + [_|_] -> + Result = iolist_to_binary(expand(URITemplate, Vars)), + io:format("~p", [Result]), + true = lists:member(Result, Expected); + _ -> + Expected = iolist_to_binary(expand(URITemplate, Vars)) + end + end} + || [URITemplate, Expected] <- Cases] + end || {Section, #{ + <<"variables">> := Vars0, + <<"testcases">> := Cases + }} <- maps:to_list(Tests)] + end || File <- Files]). +-endif. diff --git a/deps/cowlib/src/cow_ws.erl b/deps/cowlib/src/cow_ws.erl new file mode 100644 index 0000000..3bb46c5 --- /dev/null +++ b/deps/cowlib/src/cow_ws.erl @@ -0,0 +1,741 @@ +%% Copyright (c) 2015-2018, Loรฏc Hoguin +%% +%% Permission to use, copy, modify, and/or distribute this software for any +%% purpose with or without fee is hereby granted, provided that the above +%% copyright notice and this permission notice appear in all copies. +%% +%% THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES +%% WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF +%% MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR +%% ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES +%% WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN +%% ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF +%% OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + +-module(cow_ws). + +-export([key/0]). +-export([encode_key/1]). + +-export([negotiate_permessage_deflate/3]). +-export([negotiate_x_webkit_deflate_frame/3]). + +-export([validate_permessage_deflate/3]). + +-export([parse_header/3]). +-export([parse_payload/9]). +-export([make_frame/4]). + +-export([frame/2]). +-export([masked_frame/2]). + +-type close_code() :: 1000..1003 | 1006..1011 | 3000..4999. +-export_type([close_code/0]). + +-type extensions() :: map(). +-export_type([extensions/0]). + +-type deflate_opts() :: #{ + %% Compression parameters. + level => zlib:zlevel(), + mem_level => zlib:zmemlevel(), + strategy => zlib:zstrategy(), + + %% Whether the compression context will carry over between frames. + server_context_takeover => takeover | no_takeover, + client_context_takeover => takeover | no_takeover, + + %% LZ77 sliding window size limits. + server_max_window_bits => 8..15, + client_max_window_bits => 8..15 +}. +-export_type([deflate_opts/0]). + +-type frag_state() :: undefined | {fin | nofin, text | binary, rsv()}. +-export_type([frag_state/0]). + +-type frame() :: close | ping | pong + | {text | binary | close | ping | pong, iodata()} + | {close, close_code(), iodata()} + | {fragment, fin | nofin, text | binary | continuation, iodata()}. +-export_type([frame/0]). + +-type frame_type() :: fragment | text | binary | close | ping | pong. +-export_type([frame_type/0]). + +-type mask_key() :: undefined | 0..16#ffffffff. +-export_type([mask_key/0]). + +-type rsv() :: <<_:3>>. +-export_type([rsv/0]). + +-type utf8_state() :: 0..8 | undefined. +-export_type([utf8_state/0]). + +%% @doc Generate a key for the Websocket handshake request. + +-spec key() -> binary(). +key() -> + base64:encode(crypto:strong_rand_bytes(16)). + +%% @doc Encode the key into the accept value for the Websocket handshake response. + +-spec encode_key(binary()) -> binary(). +encode_key(Key) -> + base64:encode(crypto:hash(sha, [Key, "258EAFA5-E914-47DA-95CA-C5AB0DC85B11"])). + +%% @doc Negotiate the permessage-deflate extension. + +-spec negotiate_permessage_deflate( + [binary() | {binary(), binary()}], Exts, deflate_opts()) + -> ignore | {ok, iolist(), Exts} when Exts::extensions(). +%% Ignore if deflate already negotiated. +negotiate_permessage_deflate(_, #{deflate := _}, _) -> + ignore; +negotiate_permessage_deflate(Params, Extensions, Opts) -> + case lists:usort(Params) of + %% Ignore if multiple parameters with the same name. + Params2 when length(Params) =/= length(Params2) -> + ignore; + Params2 -> + negotiate_permessage_deflate1(Params2, Extensions, Opts) + end. + +negotiate_permessage_deflate1(Params, Extensions, Opts) -> + %% We are allowed to send back no_takeover even if the client + %% accepts takeover. Therefore we use no_takeover if any of + %% the inputs have it. + ServerTakeover = maps:get(server_context_takeover, Opts, takeover), + ClientTakeover = maps:get(client_context_takeover, Opts, takeover), + %% We can send back window bits smaller than or equal to what + %% the client sends us. + ServerMaxWindowBits = maps:get(server_max_window_bits, Opts, 15), + ClientMaxWindowBits = maps:get(client_max_window_bits, Opts, 15), + %% We may need to send back no_context_takeover depending on configuration. + RespParams0 = case ServerTakeover of + takeover -> []; + no_takeover -> [<<"; server_no_context_takeover">>] + end, + RespParams1 = case ClientTakeover of + takeover -> RespParams0; + no_takeover -> [<<"; client_no_context_takeover">>|RespParams0] + end, + Negotiated0 = #{ + server_context_takeover => ServerTakeover, + client_context_takeover => ClientTakeover, + server_max_window_bits => ServerMaxWindowBits, + client_max_window_bits => ClientMaxWindowBits + }, + case negotiate_params(Params, Negotiated0, RespParams1) of + ignore -> + ignore; + {#{server_max_window_bits := SB}, _} when SB > ServerMaxWindowBits -> + ignore; + {#{client_max_window_bits := CB}, _} when CB > ClientMaxWindowBits -> + ignore; + {Negotiated, RespParams2} -> + %% We add the configured max window bits if necessary. + RespParams = case Negotiated of + #{server_max_window_bits_set := true} -> RespParams2; + _ when ServerMaxWindowBits =:= 15 -> RespParams2; + _ -> [<<"; server_max_window_bits=">>, + integer_to_binary(ServerMaxWindowBits)|RespParams2] + end, + {Inflate, Deflate} = init_permessage_deflate( + maps:get(client_max_window_bits, Negotiated), + maps:get(server_max_window_bits, Negotiated), Opts), + {ok, [<<"permessage-deflate">>, RespParams], Extensions#{ + deflate => Deflate, + deflate_takeover => maps:get(server_context_takeover, Negotiated), + inflate => Inflate, + inflate_takeover => maps:get(client_context_takeover, Negotiated)}} + end. + +negotiate_params([], Negotiated, RespParams) -> + {Negotiated, RespParams}; +%% We must only send the client_max_window_bits parameter if the +%% request explicitly indicated the client supports it. +negotiate_params([<<"client_max_window_bits">>|Tail], Negotiated, RespParams) -> + CB = maps:get(client_max_window_bits, Negotiated), + negotiate_params(Tail, Negotiated#{client_max_window_bits_set => true}, + [<<"; client_max_window_bits=">>, integer_to_binary(CB)|RespParams]); +negotiate_params([{<<"client_max_window_bits">>, Max}|Tail], Negotiated, RespParams) -> + CB0 = maps:get(client_max_window_bits, Negotiated, undefined), + case parse_max_window_bits(Max) of + error -> + ignore; + CB when CB =< CB0 -> + negotiate_params(Tail, Negotiated#{client_max_window_bits => CB}, + [<<"; client_max_window_bits=">>, Max|RespParams]); + %% When the client sends window bits larger than the server wants + %% to use, we use what the server defined. + _ -> + negotiate_params(Tail, Negotiated, + [<<"; client_max_window_bits=">>, integer_to_binary(CB0)|RespParams]) + end; +negotiate_params([{<<"server_max_window_bits">>, Max}|Tail], Negotiated, RespParams) -> + SB0 = maps:get(server_max_window_bits, Negotiated, undefined), + case parse_max_window_bits(Max) of + error -> + ignore; + SB when SB =< SB0 -> + negotiate_params(Tail, Negotiated#{ + server_max_window_bits => SB, + server_max_window_bits_set => true}, + [<<"; server_max_window_bits=">>, Max|RespParams]); + %% When the client sends window bits larger than the server wants + %% to use, we use what the server defined. The parameter will be + %% set only when this function returns. + _ -> + negotiate_params(Tail, Negotiated, RespParams) + end; +%% We only need to send the no_context_takeover parameter back +%% here if we didn't already define it via configuration. +negotiate_params([<<"client_no_context_takeover">>|Tail], Negotiated, RespParams) -> + case maps:get(client_context_takeover, Negotiated) of + no_takeover -> + negotiate_params(Tail, Negotiated, RespParams); + takeover -> + negotiate_params(Tail, Negotiated#{client_context_takeover => no_takeover}, + [<<"; client_no_context_takeover">>|RespParams]) + end; +negotiate_params([<<"server_no_context_takeover">>|Tail], Negotiated, RespParams) -> + case maps:get(server_context_takeover, Negotiated) of + no_takeover -> + negotiate_params(Tail, Negotiated, RespParams); + takeover -> + negotiate_params(Tail, Negotiated#{server_context_takeover => no_takeover}, + [<<"; server_no_context_takeover">>|RespParams]) + end; +%% Ignore if unknown parameter; ignore if parameter with invalid or missing value. +negotiate_params(_, _, _) -> + ignore. + +parse_max_window_bits(<<"8">>) -> 8; +parse_max_window_bits(<<"9">>) -> 9; +parse_max_window_bits(<<"10">>) -> 10; +parse_max_window_bits(<<"11">>) -> 11; +parse_max_window_bits(<<"12">>) -> 12; +parse_max_window_bits(<<"13">>) -> 13; +parse_max_window_bits(<<"14">>) -> 14; +parse_max_window_bits(<<"15">>) -> 15; +parse_max_window_bits(_) -> error. + +%% A negative WindowBits value indicates that zlib headers are not used. +init_permessage_deflate(InflateWindowBits, DeflateWindowBits, Opts) -> + Inflate = zlib:open(), + ok = zlib:inflateInit(Inflate, -InflateWindowBits), + Deflate = zlib:open(), + %% zlib 1.2.11+ now rejects -8. It used to transform it to -9. + %% We need to use 9 when 8 is requested for interoperability. + DeflateWindowBits2 = case DeflateWindowBits of + 8 -> 9; + _ -> DeflateWindowBits + end, + ok = zlib:deflateInit(Deflate, + maps:get(level, Opts, best_compression), + deflated, + -DeflateWindowBits2, + maps:get(mem_level, Opts, 8), + maps:get(strategy, Opts, default)), + %% Set the owner pid of the zlib contexts if requested. + case Opts of + #{owner := Pid} -> set_owner(Pid, Inflate, Deflate); + _ -> ok + end, + {Inflate, Deflate}. + +-ifdef(OTP_RELEASE). +%% Using is_port/1 on a zlib context results in a Dialyzer warning in OTP 21. +%% This function helps silence that warning while staying compatible +%% with all supported versions. + +set_owner(Pid, Inflate, Deflate) -> + zlib:set_controlling_process(Inflate, Pid), + zlib:set_controlling_process(Deflate, Pid). +-else. +%% The zlib port became a reference in OTP 20.1+. There +%% was however no way to change the controlling process +%% until the OTP 20.1.3 patch version. Since we can't +%% enable compression for 20.1, 20.1.1 and 20.1.2 we +%% explicitly crash. The caller should ignore this extension. + +set_owner(Pid, Inflate, Deflate) when is_port(Inflate) -> + true = erlang:port_connect(Inflate, Pid), + true = unlink(Inflate), + true = erlang:port_connect(Deflate, Pid), + true = unlink(Deflate), + ok; +set_owner(Pid, Inflate, Deflate) -> + case erlang:function_exported(zlib, set_controlling_process, 2) of + true -> + zlib:set_controlling_process(Inflate, Pid), + zlib:set_controlling_process(Deflate, Pid); + false -> + exit({error, incompatible_zlib_version, + 'OTP 20.1, 20.1.1 and 20.1.2 are missing required functionality.'}) + end. +-endif. + +%% @doc Negotiate the x-webkit-deflate-frame extension. +%% +%% The implementation is very basic and none of the parameters +%% are currently supported. + +-spec negotiate_x_webkit_deflate_frame( + [binary() | {binary(), binary()}], Exts, deflate_opts()) + -> ignore | {ok, binary(), Exts} when Exts::extensions(). +negotiate_x_webkit_deflate_frame(_, #{deflate := _}, _) -> + ignore; +negotiate_x_webkit_deflate_frame(_Params, Extensions, Opts) -> + % Since we are negotiating an unconstrained deflate-frame + % then we must be willing to accept frames using the + % maximum window size which is 2^15. + {Inflate, Deflate} = init_permessage_deflate(15, 15, Opts), + {ok, <<"x-webkit-deflate-frame">>, + Extensions#{ + deflate => Deflate, + deflate_takeover => takeover, + inflate => Inflate, + inflate_takeover => takeover}}. + +%% @doc Validate the negotiated permessage-deflate extension. + +%% Error when more than one deflate extension was negotiated. +validate_permessage_deflate(_, #{deflate := _}, _) -> + error; +validate_permessage_deflate(Params, Extensions, Opts) -> + case lists:usort(Params) of + %% Error if multiple parameters with the same name. + Params2 when length(Params) =/= length(Params2) -> + error; + Params2 -> + case parse_response_permessage_deflate_params(Params2, 15, takeover, 15, takeover) of + error -> + error; + {ClientWindowBits, ClientTakeOver, ServerWindowBits, ServerTakeOver} -> + {Inflate, Deflate} = init_permessage_deflate(ServerWindowBits, ClientWindowBits, Opts), + {ok, Extensions#{ + deflate => Deflate, + deflate_takeover => ClientTakeOver, + inflate => Inflate, + inflate_takeover => ServerTakeOver}} + end + end. + +parse_response_permessage_deflate_params([], CB, CTO, SB, STO) -> + {CB, CTO, SB, STO}; +parse_response_permessage_deflate_params([{<<"client_max_window_bits">>, Max}|Tail], _, CTO, SB, STO) -> + case parse_max_window_bits(Max) of + error -> error; + CB -> parse_response_permessage_deflate_params(Tail, CB, CTO, SB, STO) + end; +parse_response_permessage_deflate_params([<<"client_no_context_takeover">>|Tail], CB, _, SB, STO) -> + parse_response_permessage_deflate_params(Tail, CB, no_takeover, SB, STO); +parse_response_permessage_deflate_params([{<<"server_max_window_bits">>, Max}|Tail], CB, CTO, _, STO) -> + case parse_max_window_bits(Max) of + error -> error; + SB -> parse_response_permessage_deflate_params(Tail, CB, CTO, SB, STO) + end; +parse_response_permessage_deflate_params([<<"server_no_context_takeover">>|Tail], CB, CTO, SB, _) -> + parse_response_permessage_deflate_params(Tail, CB, CTO, SB, no_takeover); +%% Error if unknown parameter; error if parameter with invalid or missing value. +parse_response_permessage_deflate_params(_, _, _, _, _) -> + error. + +%% @doc Parse and validate the Websocket frame header. +%% +%% This function also updates the fragmentation state according to +%% information found in the frame's header. + +-spec parse_header(binary(), extensions(), frag_state()) + -> error | more | {frame_type(), frag_state(), rsv(), non_neg_integer(), mask_key(), binary()}. +%% RSV bits MUST be 0 unless an extension is negotiated +%% that defines meanings for non-zero values. +parse_header(<< _:1, Rsv:3, _/bits >>, Extensions, _) when Extensions =:= #{}, Rsv =/= 0 -> error; +%% Last 2 RSV bits MUST be 0 if deflate-frame extension is used. +parse_header(<< _:2, 1:1, _/bits >>, #{deflate := _}, _) -> error; +parse_header(<< _:3, 1:1, _/bits >>, #{deflate := _}, _) -> error; +%% Invalid opcode. Note that these opcodes may be used by extensions. +parse_header(<< _:4, 3:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 4:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 5:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 6:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 7:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 11:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 12:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 13:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 14:4, _/bits >>, _, _) -> error; +parse_header(<< _:4, 15:4, _/bits >>, _, _) -> error; +%% Control frames MUST NOT be fragmented. +parse_header(<< 0:1, _:3, Opcode:4, _/bits >>, _, _) when Opcode >= 8 -> error; +%% A frame MUST NOT use the zero opcode unless fragmentation was initiated. +parse_header(<< _:4, 0:4, _/bits >>, _, undefined) -> error; +%% Non-control opcode when expecting control message or next fragment. +parse_header(<< _:4, 1:4, _/bits >>, _, {_, _, _}) -> error; +parse_header(<< _:4, 2:4, _/bits >>, _, {_, _, _}) -> error; +parse_header(<< _:4, 3:4, _/bits >>, _, {_, _, _}) -> error; +parse_header(<< _:4, 4:4, _/bits >>, _, {_, _, _}) -> error; +parse_header(<< _:4, 5:4, _/bits >>, _, {_, _, _}) -> error; +parse_header(<< _:4, 6:4, _/bits >>, _, {_, _, _}) -> error; +parse_header(<< _:4, 7:4, _/bits >>, _, {_, _, _}) -> error; +%% Close control frame length MUST be 0 or >= 2. +parse_header(<< _:4, 8:4, _:1, 1:7, _/bits >>, _, _) -> error; +%% Close control frame with incomplete close code. Need more data. +parse_header(Data = << _:4, 8:4, 0:1, Len:7, _/bits >>, _, _) when Len > 1, byte_size(Data) < 4 -> more; +parse_header(Data = << _:4, 8:4, 1:1, Len:7, _/bits >>, _, _) when Len > 1, byte_size(Data) < 8 -> more; +%% 7 bits payload length. +parse_header(<< Fin:1, Rsv:3/bits, Opcode:4, 0:1, Len:7, Rest/bits >>, _, FragState) when Len < 126 -> + parse_header(Opcode, Fin, FragState, Rsv, Len, undefined, Rest); +parse_header(<< Fin:1, Rsv:3/bits, Opcode:4, 1:1, Len:7, MaskKey:32, Rest/bits >>, _, FragState) when Len < 126 -> + parse_header(Opcode, Fin, FragState, Rsv, Len, MaskKey, Rest); +%% 16 bits payload length. +parse_header(<< Fin:1, Rsv:3/bits, Opcode:4, 0:1, 126:7, Len:16, Rest/bits >>, _, FragState) when Len > 125, Opcode < 8 -> + parse_header(Opcode, Fin, FragState, Rsv, Len, undefined, Rest); +parse_header(<< Fin:1, Rsv:3/bits, Opcode:4, 1:1, 126:7, Len:16, MaskKey:32, Rest/bits >>, _, FragState) when Len > 125, Opcode < 8 -> + parse_header(Opcode, Fin, FragState, Rsv, Len, MaskKey, Rest); +%% 63 bits payload length. +parse_header(<< Fin:1, Rsv:3/bits, Opcode:4, 0:1, 127:7, 0:1, Len:63, Rest/bits >>, _, FragState) when Len > 16#ffff, Opcode < 8 -> + parse_header(Opcode, Fin, FragState, Rsv, Len, undefined, Rest); +parse_header(<< Fin:1, Rsv:3/bits, Opcode:4, 1:1, 127:7, 0:1, Len:63, MaskKey:32, Rest/bits >>, _, FragState) when Len > 16#ffff, Opcode < 8 -> + parse_header(Opcode, Fin, FragState, Rsv, Len, MaskKey, Rest); +%% When payload length is over 63 bits, the most significant bit MUST be 0. +parse_header(<< _:9, 127:7, 1:1, _/bits >>, _, _) -> error; +%% For the next two clauses, it can be one of the following: +%% +%% * The minimal number of bytes MUST be used to encode the length +%% * All control frames MUST have a payload length of 125 bytes or less +parse_header(<< _:8, 0:1, 126:7, _:16, _/bits >>, _, _) -> error; +parse_header(<< _:8, 1:1, 126:7, _:48, _/bits >>, _, _) -> error; +parse_header(<< _:8, 0:1, 127:7, _:64, _/bits >>, _, _) -> error; +parse_header(<< _:8, 1:1, 127:7, _:96, _/bits >>, _, _) -> error; +%% Need more data. +parse_header(_, _, _) -> more. + +parse_header(Opcode, Fin, FragState, Rsv, Len, MaskKey, Rest) -> + Type = opcode_to_frame_type(Opcode), + Type2 = case Fin of + 0 -> fragment; + 1 -> Type + end, + {Type2, frag_state(Type, Fin, Rsv, FragState), Rsv, Len, MaskKey, Rest}. + +opcode_to_frame_type(0) -> fragment; +opcode_to_frame_type(1) -> text; +opcode_to_frame_type(2) -> binary; +opcode_to_frame_type(8) -> close; +opcode_to_frame_type(9) -> ping; +opcode_to_frame_type(10) -> pong. + +frag_state(Type, 0, Rsv, undefined) -> {nofin, Type, Rsv}; +frag_state(fragment, 0, _, FragState = {nofin, _, _}) -> FragState; +frag_state(fragment, 1, _, {nofin, Type, Rsv}) -> {fin, Type, Rsv}; +frag_state(_, 1, _, FragState) -> FragState. + +%% @doc Parse and validate the frame's payload. +%% +%% Validation is only required for text and close frames which feature +%% a UTF-8 payload. + +-spec parse_payload(binary(), mask_key(), utf8_state(), non_neg_integer(), + frame_type(), non_neg_integer(), frag_state(), extensions(), rsv()) + -> {ok, binary(), utf8_state(), binary()} + | {ok, close_code(), binary(), utf8_state(), binary()} + | {more, binary(), utf8_state()} + | {more, close_code(), binary(), utf8_state()} + | {error, badframe | badencoding}. +%% Empty last frame of compressed message. +parse_payload(Data, _, Utf8State, _, _, 0, {fin, _, << 1:1, 0:2 >>}, + #{inflate := Inflate, inflate_takeover := TakeOver}, _) -> + _ = zlib:inflate(Inflate, << 0, 0, 255, 255 >>), + case TakeOver of + no_takeover -> zlib:inflateReset(Inflate); + takeover -> ok + end, + {ok, <<>>, Utf8State, Data}; +%% Compressed fragmented frame. +parse_payload(Data, MaskKey, Utf8State, ParsedLen, Type, Len, FragState = {_, _, << 1:1, 0:2 >>}, + #{inflate := Inflate, inflate_takeover := TakeOver}, _) -> + {Data2, Rest, Eof} = split_payload(Data, Len), + Payload = inflate_frame(unmask(Data2, MaskKey, ParsedLen), Inflate, TakeOver, FragState, Eof), + validate_payload(Payload, Rest, Utf8State, ParsedLen, Type, FragState, Eof); +%% Compressed frame. +parse_payload(Data, MaskKey, Utf8State, ParsedLen, Type, Len, FragState, + #{inflate := Inflate, inflate_takeover := TakeOver}, << 1:1, 0:2 >>) when Type =:= text; Type =:= binary -> + {Data2, Rest, Eof} = split_payload(Data, Len), + Payload = inflate_frame(unmask(Data2, MaskKey, ParsedLen), Inflate, TakeOver, FragState, Eof), + validate_payload(Payload, Rest, Utf8State, ParsedLen, Type, FragState, Eof); +%% Empty frame. +parse_payload(Data, _, Utf8State, 0, _, 0, _, _, _) + when Utf8State =:= 0; Utf8State =:= undefined -> + {ok, <<>>, Utf8State, Data}; +%% Start of close frame. +parse_payload(Data, MaskKey, Utf8State, 0, Type = close, Len, FragState, _, << 0:3 >>) -> + {<< MaskedCode:2/binary, Data2/bits >>, Rest, Eof} = split_payload(Data, Len), + << CloseCode:16 >> = unmask(MaskedCode, MaskKey, 0), + case validate_close_code(CloseCode) of + ok -> + Payload = unmask(Data2, MaskKey, 2), + case validate_payload(Payload, Rest, Utf8State, 2, Type, FragState, Eof) of + {ok, _, Utf8State2, _} -> {ok, CloseCode, Payload, Utf8State2, Rest}; + {more, _, Utf8State2} -> {more, CloseCode, Payload, Utf8State2}; + Error -> Error + end; + error -> + {error, badframe} + end; +%% Normal frame. +parse_payload(Data, MaskKey, Utf8State, ParsedLen, Type, Len, FragState, _, << 0:3 >>) -> + {Data2, Rest, Eof} = split_payload(Data, Len), + Payload = unmask(Data2, MaskKey, ParsedLen), + validate_payload(Payload, Rest, Utf8State, ParsedLen, Type, FragState, Eof). + +split_payload(Data, Len) -> + case byte_size(Data) of + Len -> + {Data, <<>>, true}; + DataLen when DataLen < Len -> + {Data, <<>>, false}; + _ -> + << Data2:Len/binary, Rest/bits >> = Data, + {Data2, Rest, true} + end. + +validate_close_code(Code) -> + if + Code < 1000 -> error; + Code =:= 1004 -> error; + Code =:= 1005 -> error; + Code =:= 1006 -> error; + Code > 1011, Code < 3000 -> error; + Code > 4999 -> error; + true -> ok + end. + +unmask(Data, undefined, _) -> + Data; +unmask(Data, MaskKey, 0) -> + mask(Data, MaskKey, <<>>); +%% We unmask on the fly so we need to continue from the right mask byte. +unmask(Data, MaskKey, UnmaskedLen) -> + Left = UnmaskedLen rem 4, + Right = 4 - Left, + MaskKey2 = (MaskKey bsl (Left * 8)) + (MaskKey bsr (Right * 8)), + mask(Data, MaskKey2, <<>>). + +mask(<<>>, _, Unmasked) -> + Unmasked; +mask(<< O:32, Rest/bits >>, MaskKey, Acc) -> + T = O bxor MaskKey, + mask(Rest, MaskKey, << Acc/binary, T:32 >>); +mask(<< O:24 >>, MaskKey, Acc) -> + << MaskKey2:24, _:8 >> = << MaskKey:32 >>, + T = O bxor MaskKey2, + << Acc/binary, T:24 >>; +mask(<< O:16 >>, MaskKey, Acc) -> + << MaskKey2:16, _:16 >> = << MaskKey:32 >>, + T = O bxor MaskKey2, + << Acc/binary, T:16 >>; +mask(<< O:8 >>, MaskKey, Acc) -> + << MaskKey2:8, _:24 >> = << MaskKey:32 >>, + T = O bxor MaskKey2, + << Acc/binary, T:8 >>. + +inflate_frame(Data, Inflate, TakeOver, FragState, true) + when FragState =:= undefined; element(1, FragState) =:= fin -> + Data2 = zlib:inflate(Inflate, << Data/binary, 0, 0, 255, 255 >>), + case TakeOver of + no_takeover -> zlib:inflateReset(Inflate); + takeover -> ok + end, + iolist_to_binary(Data2); +inflate_frame(Data, Inflate, _T, _F, _E) -> + iolist_to_binary(zlib:inflate(Inflate, Data)). + +%% The Utf8State variable can be set to 'undefined' to disable the validation. +validate_payload(Payload, _, undefined, _, _, _, false) -> + {more, Payload, undefined}; +validate_payload(Payload, Rest, undefined, _, _, _, true) -> + {ok, Payload, undefined, Rest}; +%% Text frames and close control frames MUST have a payload that is valid UTF-8. +validate_payload(Payload, Rest, Utf8State, _, Type, _, Eof) when Type =:= text; Type =:= close -> + case validate_utf8(Payload, Utf8State) of + 1 -> {error, badencoding}; + Utf8State2 when not Eof -> {more, Payload, Utf8State2}; + 0 when Eof -> {ok, Payload, 0, Rest}; + _ -> {error, badencoding} + end; +validate_payload(Payload, Rest, Utf8State, _, fragment, {Fin, text, _}, Eof) -> + case validate_utf8(Payload, Utf8State) of + 1 -> {error, badencoding}; + 0 when Eof -> {ok, Payload, 0, Rest}; + Utf8State2 when Eof, Fin =:= nofin -> {ok, Payload, Utf8State2, Rest}; + Utf8State2 when not Eof -> {more, Payload, Utf8State2}; + _ -> {error, badencoding} + end; +validate_payload(Payload, _, Utf8State, _, _, _, false) -> + {more, Payload, Utf8State}; +validate_payload(Payload, Rest, Utf8State, _, _, _, true) -> + {ok, Payload, Utf8State, Rest}. + +%% Based on the Flexible and Economical UTF-8 Decoder algorithm by +%% Bjoern Hoehrmann (http://bjoern.hoehrmann.de/utf-8/decoder/dfa/). +%% +%% The original algorithm has been unrolled into all combinations of values for C and State +%% each with a clause. The common clauses were then grouped together. +%% +%% This function returns 0 on success, 1 on error, and 2..8 on incomplete data. +validate_utf8(<<>>, State) -> State; +validate_utf8(<< C, Rest/bits >>, 0) when C < 128 -> validate_utf8(Rest, 0); +validate_utf8(<< C, Rest/bits >>, 2) when C >= 128, C < 144 -> validate_utf8(Rest, 0); +validate_utf8(<< C, Rest/bits >>, 3) when C >= 128, C < 144 -> validate_utf8(Rest, 2); +validate_utf8(<< C, Rest/bits >>, 5) when C >= 128, C < 144 -> validate_utf8(Rest, 2); +validate_utf8(<< C, Rest/bits >>, 7) when C >= 128, C < 144 -> validate_utf8(Rest, 3); +validate_utf8(<< C, Rest/bits >>, 8) when C >= 128, C < 144 -> validate_utf8(Rest, 3); +validate_utf8(<< C, Rest/bits >>, 2) when C >= 144, C < 160 -> validate_utf8(Rest, 0); +validate_utf8(<< C, Rest/bits >>, 3) when C >= 144, C < 160 -> validate_utf8(Rest, 2); +validate_utf8(<< C, Rest/bits >>, 5) when C >= 144, C < 160 -> validate_utf8(Rest, 2); +validate_utf8(<< C, Rest/bits >>, 6) when C >= 144, C < 160 -> validate_utf8(Rest, 3); +validate_utf8(<< C, Rest/bits >>, 7) when C >= 144, C < 160 -> validate_utf8(Rest, 3); +validate_utf8(<< C, Rest/bits >>, 2) when C >= 160, C < 192 -> validate_utf8(Rest, 0); +validate_utf8(<< C, Rest/bits >>, 3) when C >= 160, C < 192 -> validate_utf8(Rest, 2); +validate_utf8(<< C, Rest/bits >>, 4) when C >= 160, C < 192 -> validate_utf8(Rest, 2); +validate_utf8(<< C, Rest/bits >>, 6) when C >= 160, C < 192 -> validate_utf8(Rest, 3); +validate_utf8(<< C, Rest/bits >>, 7) when C >= 160, C < 192 -> validate_utf8(Rest, 3); +validate_utf8(<< C, Rest/bits >>, 0) when C >= 194, C < 224 -> validate_utf8(Rest, 2); +validate_utf8(<< 224, Rest/bits >>, 0) -> validate_utf8(Rest, 4); +validate_utf8(<< C, Rest/bits >>, 0) when C >= 225, C < 237 -> validate_utf8(Rest, 3); +validate_utf8(<< 237, Rest/bits >>, 0) -> validate_utf8(Rest, 5); +validate_utf8(<< C, Rest/bits >>, 0) when C =:= 238; C =:= 239 -> validate_utf8(Rest, 3); +validate_utf8(<< 240, Rest/bits >>, 0) -> validate_utf8(Rest, 6); +validate_utf8(<< C, Rest/bits >>, 0) when C =:= 241; C =:= 242; C =:= 243 -> validate_utf8(Rest, 7); +validate_utf8(<< 244, Rest/bits >>, 0) -> validate_utf8(Rest, 8); +validate_utf8(_, _) -> 1. + +%% @doc Return a frame tuple from parsed state and data. + +-spec make_frame(frame_type(), binary(), close_code(), frag_state()) -> frame(). +%% Fragmented frame. +make_frame(fragment, Payload, _, {Fin, Type, _}) -> {fragment, Fin, Type, Payload}; +make_frame(text, Payload, _, _) -> {text, Payload}; +make_frame(binary, Payload, _, _) -> {binary, Payload}; +make_frame(close, <<>>, undefined, _) -> close; +make_frame(close, Payload, CloseCode, _) -> {close, CloseCode, Payload}; +make_frame(ping, <<>>, _, _) -> ping; +make_frame(ping, Payload, _, _) -> {ping, Payload}; +make_frame(pong, <<>>, _, _) -> pong; +make_frame(pong, Payload, _, _) -> {pong, Payload}. + +%% @doc Construct an unmasked Websocket frame. + +-spec frame(frame(), extensions()) -> iodata(). +%% Control frames. Control packets must not be > 125 in length. +frame(close, _) -> + << 1:1, 0:3, 8:4, 0:8 >>; +frame(ping, _) -> + << 1:1, 0:3, 9:4, 0:8 >>; +frame(pong, _) -> + << 1:1, 0:3, 10:4, 0:8 >>; +frame({close, Payload}, Extensions) -> + frame({close, 1000, Payload}, Extensions); +frame({close, StatusCode, Payload}, _) -> + Len = 2 + iolist_size(Payload), + true = Len =< 125, + [<< 1:1, 0:3, 8:4, 0:1, Len:7, StatusCode:16 >>, Payload]; +frame({ping, Payload}, _) -> + Len = iolist_size(Payload), + true = Len =< 125, + [<< 1:1, 0:3, 9:4, 0:1, Len:7 >>, Payload]; +frame({pong, Payload}, _) -> + Len = iolist_size(Payload), + true = Len =< 125, + [<< 1:1, 0:3, 10:4, 0:1, Len:7 >>, Payload]; +%% Data frames, deflate-frame extension. +frame({text, Payload}, #{deflate := Deflate, deflate_takeover := TakeOver}) + when Deflate =/= false -> + Payload2 = deflate_frame(Payload, Deflate, TakeOver), + Len = payload_length(Payload2), + [<< 1:1, 1:1, 0:2, 1:4, 0:1, Len/bits >>, Payload2]; +frame({binary, Payload}, #{deflate := Deflate, deflate_takeover := TakeOver}) + when Deflate =/= false -> + Payload2 = deflate_frame(Payload, Deflate, TakeOver), + Len = payload_length(Payload2), + [<< 1:1, 1:1, 0:2, 2:4, 0:1, Len/bits >>, Payload2]; +%% Data frames. +frame({text, Payload}, _) -> + Len = payload_length(Payload), + [<< 1:1, 0:3, 1:4, 0:1, Len/bits >>, Payload]; +frame({binary, Payload}, _) -> + Len = payload_length(Payload), + [<< 1:1, 0:3, 2:4, 0:1, Len/bits >>, Payload]. + +%% @doc Construct a masked Websocket frame. +%% +%% We use a mask key of 0 if there is no payload for close, ping and pong frames. + +-spec masked_frame(frame(), extensions()) -> iodata(). +%% Control frames. Control packets must not be > 125 in length. +masked_frame(close, _) -> + << 1:1, 0:3, 8:4, 1:1, 0:39 >>; +masked_frame(ping, _) -> + << 1:1, 0:3, 9:4, 1:1, 0:39 >>; +masked_frame(pong, _) -> + << 1:1, 0:3, 10:4, 1:1, 0:39 >>; +masked_frame({close, Payload}, Extensions) -> + frame({close, 1000, Payload}, Extensions); +masked_frame({close, StatusCode, Payload}, _) -> + Len = 2 + iolist_size(Payload), + true = Len =< 125, + MaskKeyBin = << MaskKey:32 >> = crypto:strong_rand_bytes(4), + [<< 1:1, 0:3, 8:4, 1:1, Len:7 >>, MaskKeyBin, mask(iolist_to_binary([<< StatusCode:16 >>, Payload]), MaskKey, <<>>)]; +masked_frame({ping, Payload}, _) -> + Len = iolist_size(Payload), + true = Len =< 125, + MaskKeyBin = << MaskKey:32 >> = crypto:strong_rand_bytes(4), + [<< 1:1, 0:3, 9:4, 1:1, Len:7 >>, MaskKeyBin, mask(iolist_to_binary(Payload), MaskKey, <<>>)]; +masked_frame({pong, Payload}, _) -> + Len = iolist_size(Payload), + true = Len =< 125, + MaskKeyBin = << MaskKey:32 >> = crypto:strong_rand_bytes(4), + [<< 1:1, 0:3, 10:4, 1:1, Len:7 >>, MaskKeyBin, mask(iolist_to_binary(Payload), MaskKey, <<>>)]; +%% Data frames, deflate-frame extension. +masked_frame({text, Payload}, #{deflate := Deflate, deflate_takeover := TakeOver}) + when Deflate =/= false -> + MaskKeyBin = << MaskKey:32 >> = crypto:strong_rand_bytes(4), + Payload2 = mask(deflate_frame(Payload, Deflate, TakeOver), MaskKey, <<>>), + Len = payload_length(Payload2), + [<< 1:1, 1:1, 0:2, 1:4, 1:1, Len/bits >>, MaskKeyBin, Payload2]; +masked_frame({binary, Payload}, #{deflate := Deflate, deflate_takeover := TakeOver}) + when Deflate =/= false -> + MaskKeyBin = << MaskKey:32 >> = crypto:strong_rand_bytes(4), + Payload2 = mask(deflate_frame(Payload, Deflate, TakeOver), MaskKey, <<>>), + Len = payload_length(Payload2), + [<< 1:1, 1:1, 0:2, 2:4, 1:1, Len/bits >>, MaskKeyBin, Payload2]; +%% Data frames. +masked_frame({text, Payload}, _) -> + MaskKeyBin = << MaskKey:32 >> = crypto:strong_rand_bytes(4), + Len = payload_length(Payload), + [<< 1:1, 0:3, 1:4, 1:1, Len/bits >>, MaskKeyBin, mask(iolist_to_binary(Payload), MaskKey, <<>>)]; +masked_frame({binary, Payload}, _) -> + MaskKeyBin = << MaskKey:32 >> = crypto:strong_rand_bytes(4), + Len = payload_length(Payload), + [<< 1:1, 0:3, 2:4, 1:1, Len/bits >>, MaskKeyBin, mask(iolist_to_binary(Payload), MaskKey, <<>>)]. + +payload_length(Payload) -> + case iolist_size(Payload) of + N when N =< 125 -> << N:7 >>; + N when N =< 16#ffff -> << 126:7, N:16 >>; + N when N =< 16#7fffffffffffffff -> << 127:7, N:64 >> + end. + +deflate_frame(Payload, Deflate, TakeOver) -> + Deflated = iolist_to_binary(zlib:deflate(Deflate, Payload, sync)), + case TakeOver of + no_takeover -> zlib:deflateReset(Deflate); + takeover -> ok + end, + Len = byte_size(Deflated) - 4, + case Deflated of + << Body:Len/binary, 0:8, 0:8, 255:8, 255:8 >> -> Body; + _ -> Deflated + end. diff --git a/deps/db_connection/.fetch b/deps/db_connection/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/db_connection/.formatter.exs b/deps/db_connection/.formatter.exs new file mode 100644 index 0000000..d2cda26 --- /dev/null +++ b/deps/db_connection/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/db_connection/.hex b/deps/db_connection/.hex new file mode 100644 index 0000000000000000000000000000000000000000..99efe2ef66d06afb8ee22215c766d32727bb22f1 GIT binary patch literal 277 zcmZ9{%T5C^3h#X*`AhHJDca`iM>Y4I8sCVQnvZ2sxoq>&wZ(GT{>w|D|F(&0!J}o!yL4Q8AYM#Eel7fF39Q*-(0kCUK AW&i*H literal 0 HcmV?d00001 diff --git a/deps/db_connection/CHANGELOG.md b/deps/db_connection/CHANGELOG.md new file mode 100644 index 0000000..67609ba --- /dev/null +++ b/deps/db_connection/CHANGELOG.md @@ -0,0 +1,64 @@ +# Changelog + +## v2.4.2 (2022-03-03) + +* Enhancements + * Add `DBConnection.connection_module/1` + +## v2.4.1 (2021-10-14) + +* Enhancements + * Add `DBConnection.disconnect_all/2` + +## v2.4.0 (2021-04-02) + +* Enhancements + * Add telemetry events for connection errors + * Use `:rand` default algorithm + * Allow decentralized lookups on DBConnection.Ownership + +## v2.3.1 (2020-11-25) + +* Enhancements + * Add `:connection_listeners` to `DBConnection.start_link/2` + * Allow connection `~> 1.0` + +## v2.3.0 (2020-10-14) + +This release requires Elixir v1.7+. + +* Bug fixes + * Fix deprecation warnings related to the use of `System.stacktrace()` + +## v2.2.2 (2020-04-22) + +* Bug fixes + * Make sure all idle connections in the pool are pinged on each idle interval + +## v2.2.1 (2020-02-04) + +* Enhancements + * Remove warnings + +## v2.2.0 (2019-12-11) + +* Enhancements + * Add `:idle_time` to `DBConnection.LogEntry` + * Ping all stale connections on idle interval + * Add `crash_reason` to relevant Logger error reports + * Ping all stale connections on idle interval. One possible downside of this approach is that we may shut down all connections at once and if there is a request around this time, the response time will be higher. However, this is likely better than the current approach, where we ping only the first one, which means we can have a pool of stale connections. The current behaviour is the same as in v1.0 + +## v2.1.1 (2019-07-17) + +* Enhancements + * Reduce severity in client exits to info + * Improve error message on redirect checkout + +* Bug fixes + * Make sure ownership timeout is respected on automatic checkouts + +## v2.1.0 (2019-06-07) + +* Enhancements + * Require Elixir v1.6+ + * Include client stacktrace on check out timeouts diff --git a/deps/db_connection/README.md b/deps/db_connection/README.md new file mode 100644 index 0000000..0cd937d --- /dev/null +++ b/deps/db_connection/README.md @@ -0,0 +1,100 @@ +# DBConnection + +Database connection behaviour and database connection pool designed for +handling transaction, prepare/execute, cursors and client process +describe/encode/decode. + +Examples of using the `DBConnection` behaviour are available in +`./examples/db_agent/` and `./examples/tcp_connection/`. + +There is also [a series of articles on building database adapters](http://blog.plataformatec.com.br/2018/11/building-a-new-mysql-adapter-for-ecto-part-i-hello-world/). It includes articles covering both DBConnection and Ecto integrations. + +## Contributing + +Run unit tests with: + + $ mix test + +To run the integration tests (for each available pool): + + $ mix test.pools + +To run all tests: + + $ mix test.all + +## Design + +This library is made of four main modules: + + * `DBConnection` - this is the code running on the client + and the specification of the DBConnection API + + * `DBConnection.Connection` - this is the process that + establishes the database connection + + * `DBConnection.ConnectionPool` - this is the connection + pool. A client asks the connection pool for a connection. + There is also an ownership pool, used mostly during tests, + which we won't discuss here. + + * `DBConnection.Holder` - the holder is responsible for + keeping the connection and checkout state. It is modelled + by using an ETS table. + +Once a connection is created, it creates a holder and +assigns the connection pool as the heir. Then the holder +is promptly given away to the pool. The connection itself +is mostly a dummy. It is there to handle connections and pings. +The state itself (such as the socket) is all in the holder. + +Once there is a checkout, the pool gives the holder to the +client process and stores all relevant information in the +holder table itself. If the client terminates without +checking in, then the holder is given back to the pool via +the heir mechanism. The pool will then discard the connection. + +One important design detail in DBConnection is that it avoids +copying data. Other database libraries would send a request +to the connection process, perform the query in the connection +process, and then send it back to the client. This means a lot of +data copying in Elixir. DBConnection keeps the socket in the +holder and works on it directly. + +DBConnection also takes all of the care necessary to handle +failures, and it shuts down the connection and the socket +whenever the client does not check in the connection to avoid +recycling sockets/connections in a corrupted state (such as a socket +that is stuck inside a transaction). + +### Deadlines + +When a checkout happens, a deadline is started by the client +to send a message to the pool after a time interval. If the +deadline is reached and the connection is still checked out, +the holder is deleted and the connection is terminated. If the +client tries to use a terminated connection, an error will +be raised (see `Holder.handle/4`). + +### Pool + +The queuing algorithm used by the pool is [CoDel](https://queue.acm.org/appendices/codel.html) +which allows us to plan for overloads and reject requests +without clogging the pool once checkouts do not read a certain +target. + +## License + +Copyright 2015 James Fish + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/db_connection/hex_metadata.config b/deps/db_connection/hex_metadata.config new file mode 100644 index 0000000..63871ed --- /dev/null +++ b/deps/db_connection/hex_metadata.config @@ -0,0 +1,35 @@ +{<<"app">>,<<"db_connection">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>, + <<"Database connection behaviour for database transactions and connection pooling">>}. +{<<"elixir">>,<<"~> 1.7">>}. +{<<"files">>, + [<<"lib">>,<<"lib/db_connection">>,<<"lib/db_connection/backoff.ex">>, + <<"lib/db_connection/log_entry.ex">>,<<"lib/db_connection/ownership.ex">>, + <<"lib/db_connection/connection_pool">>, + <<"lib/db_connection/connection_pool/pool.ex">>, + <<"lib/db_connection/task.ex">>,<<"lib/db_connection/query.ex">>, + <<"lib/db_connection/holder.ex">>,<<"lib/db_connection/ownership">>, + <<"lib/db_connection/ownership/proxy.ex">>, + <<"lib/db_connection/ownership/manager.ex">>, + <<"lib/db_connection/watcher.ex">>, + <<"lib/db_connection/connection_pool.ex">>, + <<"lib/db_connection/connection.ex">>,<<"lib/db_connection/app.ex">>, + <<"lib/db_connection.ex">>,<<".formatter.exs">>,<<"mix.exs">>, + <<"README.md">>,<<"CHANGELOG.md">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"links">>, + [{<<"GitHub">>,<<"https://github.com/elixir-ecto/db_connection">>}]}. +{<<"name">>,<<"db_connection">>}. +{<<"requirements">>, + [[{<<"app">>,<<"connection">>}, + {<<"name">>,<<"connection">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.0">>}], + [{<<"app">>,<<"telemetry">>}, + {<<"name">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}]]}. +{<<"version">>,<<"2.4.2">>}. diff --git a/deps/db_connection/lib/db_connection.ex b/deps/db_connection/lib/db_connection.ex new file mode 100644 index 0000000..d783944 --- /dev/null +++ b/deps/db_connection/lib/db_connection.ex @@ -0,0 +1,1804 @@ +defmodule DBConnection.Stream do + defstruct [:conn, :query, :params, :opts] + + @type t :: %__MODULE__{conn: DBConnection.conn(), query: any, params: any, opts: Keyword.t()} +end + +defimpl Enumerable, for: DBConnection.Stream do + def count(_), do: {:error, __MODULE__} + + def member?(_, _), do: {:error, __MODULE__} + + def slice(_), do: {:error, __MODULE__} + + def reduce(stream, acc, fun), do: DBConnection.reduce(stream, acc, fun) +end + +defmodule DBConnection.PrepareStream do + defstruct [:conn, :query, :params, :opts] + + @type t :: %__MODULE__{conn: DBConnection.conn(), query: any, params: any, opts: Keyword.t()} +end + +defimpl Enumerable, for: DBConnection.PrepareStream do + def count(_), do: {:error, __MODULE__} + + def member?(_, _), do: {:error, __MODULE__} + + def slice(_), do: {:error, __MODULE__} + + def reduce(stream, acc, fun), do: DBConnection.reduce(stream, acc, fun) +end + +defmodule DBConnection do + @moduledoc """ + A behaviour module for implementing efficient database connection + client processes, pools and transactions. + + `DBConnection` handles callbacks differently to most behaviours. Some + callbacks will be called in the calling process, with the state + copied to and from the calling process. This is useful when the data + for a request is large and means that a calling process can interact + with a socket directly. + + A side effect of this is that query handling can be written in a + simple blocking fashion, while the connection process itself will + remain responsive to OTP messages and can enqueue and cancel queued + requests. + + If a request or series of requests takes too long to handle in the + client process a timeout will trigger and the socket can be cleanly + disconnected by the connection process. + + If a calling process waits too long to start its request it will + timeout and its request will be cancelled. This prevents requests + building up when the database can not keep up. + + If no requests are received for an idle interval, the pool will + ping all stale connections which can then ping the database to keep + the connection alive. + + Should the connection be lost, attempts will be made to reconnect with + (configurable) exponential random backoff to reconnect. All state is + lost when a connection disconnects but the process is reused. + + The `DBConnection.Query` protocol provide utility functions so that + queries can be encoded and decoded without blocking the connection or pool. + """ + require Logger + + alias DBConnection.Holder + + require Holder + + defstruct [:pool_ref, :conn_ref, :conn_mode] + + defmodule EncodeError do + defexception [:message] + end + + defmodule TransactionError do + defexception [:status, :message] + + def exception(:idle), + do: %__MODULE__{status: :idle, message: "transaction is not started"} + + def exception(:transaction), + do: %__MODULE__{status: :transaction, message: "transaction is already started"} + + def exception(:error), + do: %__MODULE__{status: :error, message: "transaction is aborted"} + end + + @typedoc """ + Run or transaction connection reference. + """ + @type t :: %__MODULE__{pool_ref: any, conn_ref: reference} + @type conn :: GenServer.server() | t + @type query :: DBConnection.Query.t() + @type params :: any + @type result :: any + @type cursor :: any + @type status :: :idle | :transaction | :error + + @type start_option :: + {:after_connect, (t -> any) | {module, atom, [any]} | nil} + | {:after_connect_timeout, timeout} + | {:connection_listeners, list(Process.dest()) | nil} + | {:backoff_max, non_neg_integer} + | {:backoff_min, non_neg_integer} + | {:backoff_type, :stop | :exp | :rand | :rand_exp} + | {:configure, (keyword -> keyword) | {module, atom, [any]} | nil} + | {:idle_interval, non_neg_integer} + | {:max_restarts, non_neg_integer} + | {:max_seconds, pos_integer} + | {:name, GenServer.name()} + | {:pool, module} + | {:pool_size, pos_integer} + | {:queue_interval, non_neg_integer} + | {:queue_target, non_neg_integer} + | {:show_sensitive_data_on_connection_error, boolean} + + @type option :: + {:log, (DBConnection.LogEntry.t() -> any) | {module, atom, [any]} | nil} + | {:queue, boolean} + | {:timeout, timeout} + | {:deadline, integer | nil} + + @doc """ + Connect to the database. Return `{:ok, state}` on success or + `{:error, exception}` on failure. + + If an error is returned it will be logged and another + connection attempt will be made after a backoff interval. + + This callback is called in the connection process. + """ + @callback connect(opts :: Keyword.t()) :: + {:ok, state :: any} | {:error, Exception.t()} + + @doc """ + Checkouts the state from the connection process. Return `{:ok, state}` + to allow the checkout or `{:disconnect, exception, state}` to disconnect. + + This callback is called immediately after the connection is established + and the state is never effetively checked in again. That's because + DBConnection keeps the connection state in an ETS table that is moved + between the different clients checking out connections. There is no + `checkin` callback. The state is only handed back to the connection + process during pings and (re)connects. + + This callback is called in the connection process. + """ + @callback checkout(state :: any) :: + {:ok, new_state :: any} | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Called when the connection has been idle for a period of time. Return + `{:ok, state}` to continue or `{:disconnect, exception, state}` to + disconnect. + + This callback is called if no callbacks have been called after the + idle timeout and a client process is not using the state. The idle + timeout can be configured by the `:idle_interval` option. This function + can be called whether the connection is checked in or checked out. + + This callback is called in the connection process. + """ + @callback ping(state :: any) :: + {:ok, new_state :: any} | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Handle the beginning of a transaction. + + Return `{:ok, result, state}` to continue, `{status, state}` to notify caller + that the transaction can not begin due to the transaction status `status`, + `{:error, exception, state}` (deprecated) to error without beginning the + transaction, or `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database's transaction status without side effect. + + This callback is called in the client process. + """ + @callback handle_begin(opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {status, new_state :: any} + | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Handle committing a transaction. Return `{:ok, result, state}` on successfully + committing transaction, `{status, state}` to notify caller that the + transaction can not commit due to the transaction status `status`, + `{:error, exception, state}` (deprecated) to error and no longer be inside + transaction, or `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database's transaction status without side effect. + + This callback is called in the client process. + """ + @callback handle_commit(opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {status, new_state :: any} + | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Handle rolling back a transaction. Return `{:ok, result, state}` on successfully + rolling back transaction, `{status, state}` to notify caller that the + transaction can not rollback due to the transaction status `status`, + `{:error, exception, state}` (deprecated) to + error and no longer be inside transaction, or + `{:disconnect, exception, state}` to error and disconnect. + + A callback implementation should only return `status` if it + can determine the database' transaction status without side effect. + + This callback is called in the client and connection process. + """ + @callback handle_rollback(opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {status, new_state :: any} + | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Handle getting the transaction status. Return `{:idle, state}` if outside a + transaction, `{:transaction, state}` if inside a transaction, + `{:error, state}` if inside an aborted transaction, or + `{:disconnect, exception, state}` to error and disconnect. + + If the callback returns a `:disconnect` tuples then `status/2` will return + `:error`. + """ + @callback handle_status(opts :: Keyword.t(), state :: any) :: + {status, new_state :: any} + | {:disconnect, Exception.t(), new_state :: any} + + @doc """ + Prepare a query with the database. Return `{:ok, query, state}` where + `query` is a query to pass to `execute/4` or `close/3`, + `{:error, exception, state}` to return an error and continue or + `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is intended for cases where the state of a connection is + needed to prepare a query and/or the query can be saved in the + database to call later. + + This callback is called in the client process. + """ + @callback handle_prepare(query, opts :: Keyword.t(), state :: any) :: + {:ok, query, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Execute a query prepared by `c:handle_prepare/3`. Return + `{:ok, query, result, state}` to return altered query `query` and result + `result` and continue, `{:error, exception, state}` to return an error and + continue or `{:disconnect, exception, state}` to return an error and + disconnect. + + This callback is called in the client process. + """ + @callback handle_execute(query, params, opts :: Keyword.t(), state :: any) :: + {:ok, query, result, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Close a query prepared by `c:handle_prepare/3` with the database. Return + `{:ok, result, state}` on success and to continue, + `{:error, exception, state}` to return an error and continue, or + `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @callback handle_close(query, opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Declare a cursor using a query prepared by `c:handle_prepare/3`. Return + `{:ok, query, cursor, state}` to return altered query `query` and cursor + `cursor` for a stream and continue, `{:error, exception, state}` to return an + error and continue or `{:disconnect, exception, state}` to return an error + and disconnect. + + This callback is called in the client process. + """ + @callback handle_declare(query, params, opts :: Keyword.t(), state :: any) :: + {:ok, query, cursor, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Fetch the next result from a cursor declared by `c:handle_declare/4`. Return + `{:cont, result, state}` to return the result `result` and continue using + cursor, `{:halt, result, state}` to return the result `result` and close the + cursor, `{:error, exception, state}` to return an error and close the + cursor, `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @callback handle_fetch(query, cursor, opts :: Keyword.t(), state :: any) :: + {:cont | :halt, result, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Deallocate a cursor declared by `c:handle_declare/4` with the database. Return + `{:ok, result, state}` on success and to continue, + `{:error, exception, state}` to return an error and continue, or + `{:disconnect, exception, state}` to return an error and disconnect. + + This callback is called in the client process. + """ + @callback handle_deallocate(query, cursor, opts :: Keyword.t(), state :: any) :: + {:ok, result, new_state :: any} + | {:error | :disconnect, Exception.t(), new_state :: any} + + @doc """ + Disconnect from the database. Return `:ok`. + + The exception as first argument is the exception from a `:disconnect` + 3-tuple returned by a previous callback. + + If the state is controlled by a client and it exits or takes too long + to process a request the state will be last known state. In these + cases the exception will be a `DBConnection.ConnectionError`. + + This callback is called in the connection process. + """ + @callback disconnect(err :: Exception.t(), state :: any) :: :ok + + @connection_module_key :connection_module + + @doc """ + Use `DBConnection` to set the behaviour. + """ + defmacro __using__(_) do + quote location: :keep do + @behaviour DBConnection + end + end + + @doc """ + Starts and links to a database connection process. + + By default the `DBConnection` starts a pool with a single connection. + The size of the pool can be increased with `:pool_size`. A separate + pool can be given with the `:pool` option. + + ### Options + + * `:backoff_min` - The minimum backoff interval (default: `1_000`) + * `:backoff_max` - The maximum backoff interval (default: `30_000`) + * `:backoff_type` - The backoff strategy, `:stop` for no backoff and + to stop, `:exp` for exponential, `:rand` for random and `:rand_exp` for + random exponential (default: `:rand_exp`) + * `:configure` - A function to run before every connect attempt to + dynamically configure the options, either a 1-arity fun, + `{module, function, args}` with options prepended to `args` or `nil` where + only returned options are passed to connect callback (default: `nil`) + * `:after_connect` - A function to run on connect using `run/3`, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.t/0` prepended + to `args` or `nil` (default: `nil`) + * `:after_connect_timeout` - The maximum time allowed to perform + function specified by `:after_connect` option (default: `15_000`) + * `:connection_listeners` - A list of process destinations to send + notification messages whenever a connection is connected or disconnected. + See "Connection listeners" below + * `:name` - A name to register the started process (see the `:name` option + in `GenServer.start_link/3`) + * `:pool` - Chooses the pool to be started (default: `DBConnection.ConnectionPool`) + * `:pool_size` - Chooses the size of the pool + * `:idle_interval` - Controls the frequency we check for idle connections + in the pool. We then notify each idle connection to ping the database. + In practice, the ping happens within `idle_interval <= ping < 2 * idle_interval`. + Defaults to 1000ms. + * `:queue_target` and `:queue_interval` - See "Queue config" below + * `:max_restarts` and `:max_seconds` - Configures the `:max_restarts` and + `:max_seconds` for the connection pool supervisor (see the `Supervisor` docs). + Typically speaking the connection process doesn't terminate, except due to + faults in DBConnection. However, if backoff has been disabled, then they + also terminate whenever a connection is disconnected (for instance, due to + client or server errors) + * `:show_sensitive_data_on_connection_error` - By default, `DBConnection` + hides all information during connection errors to avoid leaking credentials + or other sensitive information. You can set this option if you wish to + see complete errors and stacktraces during connection errors + + ### Example + + {:ok, conn} = DBConnection.start_link(mod, [idle_interval: 5_000]) + + ## Queue config + + Handling requests is done through a queue. When DBConnection is + started, there are two relevant options to control the queue: + + * `:queue_target` in milliseconds, defaults to 50ms + * `:queue_interval` in milliseconds, defaults to 1000ms + + Our goal is to wait at most `:queue_target` for a connection. + If all connections checked out during a `:queue_interval` takes + more than `:queue_target`, then we double the `:queue_target`. + If checking out connections take longer than the new target, + then we start dropping messages. + + For example, by default our target is 50ms. If all connections + checkouts take longer than 50ms for a whole second, we double + the target to 100ms and we start dropping messages if the + time to checkout goes above the new limit. + + This allows us to better plan for overloads as we can refuse + requests before they are sent to the database, which would + otherwise increase the burden on the database, making the + overload worse. + + ## Connection listeners + + The `:connection_listeners` option allows one or more processes to be notified + whenever a connection is connected or disconnected. A listener may be a remote + or local PID, a locally registered name, or a tuple in the form of + `{registered_name, node}` for a registered name at another node. + + Each listener process may receive the following messages where `pid` + identifies the connection process: + + * `{:connected, pid}` + * `{:disconnected, pid}` + + ## Telemetry + + A `[:db_connection, :connection_error]` event is published whenever a connection checkout + receives a `%DBConnection.ConnectionError{}`. + + Measurements: + + * `:count` - A fixed-value measurement which always measures 1. + + Metadata + + * `:error` - The `DBConnection.ConnectionError` struct which triggered the event. + + * `:opts` - All options given to the pool operation + + """ + @spec start_link(module, opts :: Keyword.t()) :: GenServer.on_start() + def start_link(conn_mod, opts) do + case child_spec(conn_mod, opts) do + {_, {m, f, args}, _, _, _, _} -> apply(m, f, args) + %{start: {m, f, args}} -> apply(m, f, args) + end + end + + @doc """ + Creates a supervisor child specification for a pool of connections. + + See `start_link/2` for options. + """ + @spec child_spec(module, opts :: Keyword.t()) :: :supervisor.child_spec() + def child_spec(conn_mod, opts) do + pool = Keyword.get(opts, :pool, DBConnection.ConnectionPool) + pool.child_spec({conn_mod, opts}) + end + + @doc """ + Forces all connections in the pool to disconnect within the given interval. + + Once this function is called, the pool will disconnect all of its connections + as they are checked in or as they are pinged. Checked in connections will be + randomly disconnected within the given time interval. Pinged connections are + immediately disconnected - as they are idle (according to `:idle_interval`). + + If the connection has a backoff configured (which is the case by default), + disconnecting means an attempt at a new connection will be done immediately + after, without starting a new process for each connection. However, if backoff + has been disabled, the connection process will terminate. In such cases, + disconnecting all connections may cause the pool supervisor to restart + depending on the max_restarts/max_seconds configuration of the pool, + so you will want to set those carefully. + """ + @spec disconnect_all(conn, non_neg_integer, opts :: Keyword.t()) :: :ok + def disconnect_all(conn, interval, opts \\ []) when interval >= 0 do + pool = Keyword.get(opts, :pool, DBConnection.ConnectionPool) + interval = System.convert_time_unit(interval, :millisecond, :native) + pool.disconnect_all(conn, interval, opts) + end + + @doc """ + Prepare a query with a database connection for later execution. + + It returns `{:ok, query}` on success or `{:error, exception}` if there was + an error. + + The returned `query` can then be passed to `execute/4` and/or `close/3` + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_prepare/3`. + + ### Example + + DBConnection.transaction(pool, fn conn -> + query = %Query{statement: "SELECT * FROM table"} + query = DBConnection.prepare!(conn, query) + try do + DBConnection.execute!(conn, query, []) + after + DBConnection.close(conn, query) + end + end) + + """ + @spec prepare(conn, query, opts :: Keyword.t()) :: + {:ok, query} | {:error, Exception.t()} + def prepare(conn, query, opts \\ []) do + meter = meter(opts) + + result = + with {:ok, query, meter} <- parse(query, meter, opts) do + run(conn, &run_prepare/4, query, meter, opts) + end + + log(result, :prepare, query, nil) + end + + @doc """ + Prepare a query with a database connection and return the prepared + query. An exception is raised on error. + + See `prepare/3`. + """ + @spec prepare!(conn, query, opts :: Keyword.t()) :: query + def prepare!(conn, query, opts \\ []) do + case prepare(conn, query, opts) do + {:ok, result} -> result + {:error, err} -> raise err + end + end + + @doc """ + Prepare a query and execute it with a database connection and return both the + prepared query and the result, `{:ok, query, result}` on success or + `{:error, exception}` if there was an error. + + The returned `query` can be passed to `execute/4` and `close/3`. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + ### Example + + query = %Query{statement: "SELECT id FROM table WHERE id=$1"} + {:ok, query, result} = DBConnection.prepare_execute(conn, query, [1]) + {:ok, result2} = DBConnection.execute(conn, query, [2]) + :ok = DBConnection.close(conn, query) + """ + @spec prepare_execute(conn, query, params, Keyword.t()) :: + {:ok, query, result} + | {:error, Exception.t()} + def prepare_execute(conn, query, params, opts \\ []) do + result = + with {:ok, query, meter} <- parse(query, meter(opts), opts) do + parsed_prepare_execute(conn, query, params, meter, opts) + end + + log(result, :prepare_execute, query, params) + end + + defp parsed_prepare_execute(conn, query, params, meter, opts) do + with {:ok, query, result, meter} <- + run(conn, &run_prepare_execute/5, query, params, meter, opts), + {:ok, result, meter} <- decode(query, result, meter, opts) do + {:ok, query, result, meter} + end + end + + @doc """ + Prepare a query and execute it with a database connection and return both the + prepared query and result. An exception is raised on error. + + See `prepare_execute/4`. + """ + @spec prepare_execute!(conn, query, Keyword.t()) :: {query, result} + def prepare_execute!(conn, query, params, opts \\ []) do + case prepare_execute(conn, query, params, opts) do + {:ok, query, result} -> {query, result} + {:error, err} -> raise err + end + end + + @doc """ + Execute a prepared query with a database connection and return + `{:ok, query, result}` on success or `{:error, exception}` if there was an error. + + If the query is not prepared on the connection an attempt may be made to + prepare it and then execute again. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `handle_execute/4`. + + See `prepare/3`. + """ + @spec execute(conn, query, params, opts :: Keyword.t()) :: + {:ok, query, result} | {:error, Exception.t()} + def execute(conn, query, params, opts \\ []) do + result = + case maybe_encode(query, params, meter(opts), opts) do + {:prepare, meter} -> + parsed_prepare_execute(conn, query, params, meter, opts) + + {:ok, params, meter} -> + with {:ok, query, result, meter} <- + run(conn, &run_execute/5, query, params, meter, opts), + {:ok, result, meter} <- decode(query, result, meter, opts) do + {:ok, query, result, meter} + end + + {_, _, _, _} = error -> + error + end + + log(result, :execute, query, params) + end + + @doc """ + Execute a prepared query with a database connection and return the + result. Raises an exception on error. + + See `execute/4` + """ + @spec execute!(conn, query, params, opts :: Keyword.t()) :: result + def execute!(conn, query, params, opts \\ []) do + case execute(conn, query, params, opts) do + {:ok, _query, result} -> result + {:error, err} -> raise err + end + end + + @doc """ + Close a prepared query on a database connection and return `{:ok, result}` on + success or `{:error, exception}` on error. + + This function should be used to free resources held by the connection + process and/or the database server. + + ## Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_close/3`. + + See `prepare/3`. + """ + @spec close(conn, query, opts :: Keyword.t()) :: + {:ok, result} | {:error, Exception.t()} + def close(conn, query, opts \\ []) do + conn + |> run_cleanup(&run_close/4, [query], meter(opts), opts) + |> log(:close, query, nil) + end + + @doc """ + Close a prepared query on a database connection and return the result. Raises + an exception on error. + + See `close/3`. + """ + @spec close!(conn, query, opts :: Keyword.t()) :: result + def close!(conn, query, opts \\ []) do + case close(conn, query, opts) do + {:ok, result} -> result + {:error, err} -> raise err + end + end + + @doc """ + Acquire a lock on a connection and run a series of requests on it. + + The return value of this function is the return value of `fun`. + + To use the locked connection call the request with the connection + reference passed as the single argument to the `fun`. If the + connection disconnects all future calls using that connection + reference will fail. + + `run/3` and `transaction/3` can be nested multiple times but a + `transaction/3` call inside another `transaction/3` will be treated + the same as `run/3`. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + + The pool may support other options. + + ### Example + + {:ok, res} = DBConnection.run(conn, fn conn -> + DBConnection.execute!(conn, query, []) + end) + + """ + @spec run(conn, (t -> result), opts :: Keyword.t()) :: result when result: var + def run(conn, fun, opts \\ []) + + def run(%DBConnection{} = conn, fun, _) do + fun.(conn) + end + + def run(pool, fun, opts) do + case checkout(pool, nil, opts) do + {:ok, conn, _} -> + old_status = status(conn, opts) + + try do + result = fun.(conn) + {result, run(conn, &run_status/3, nil, opts)} + catch + kind, error -> + checkin(conn) + :erlang.raise(kind, error, __STACKTRACE__) + else + {result, {:error, _, _}} -> + checkin(conn) + result + + {result, {^old_status, _meter}} -> + checkin(conn) + result + + {_result, {new_status, _meter}} -> + err = + DBConnection.ConnectionError.exception( + "connection was checked out with status #{inspect(old_status)} " <> + "but it was checked in with status #{inspect(new_status)}" + ) + + disconnect(conn, err) + raise err + + {_result, {kind, reason, stack, _meter}} -> + :erlang.raise(kind, reason, stack) + end + + {:error, err, _} -> + raise err + + {kind, reason, stack, _} -> + :erlang.raise(kind, reason, stack) + end + end + + @doc """ + Acquire a lock on a connection and run a series of requests inside a + transaction. The result of the transaction fun is return inside an `:ok` + tuple: `{:ok, result}`. + + To use the locked connection call the request with the connection + reference passed as the single argument to the `fun`. If the + connection disconnects all future calls using that connection + reference will fail. + + `run/3` and `transaction/3` can be nested multiple times. If a transaction is + rolled back or a nested transaction `fun` raises the transaction is marked as + failed. All calls except `run/3`, `transaction/3`, `rollback/2`, `close/3` and + `close!/3` will raise an exception inside a failed transaction until the outer + transaction call returns. All `transaction/3` calls will return + `{:error, :rollback}` if the transaction failed or connection closed and + `rollback/2` is not called for that `transaction/3`. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about begin, commit and rollback + calls made as part of the transaction, either a 1-arity fun, + `{module, function, args}` with `t:DBConnection.LogEntry.t/0` prepended to + `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_begin/2`, `c:handle_commit/2` and + `c:handle_rollback/2`. + + ### Example + + {:ok, res} = DBConnection.transaction(conn, fn conn -> + DBConnection.execute!(conn, query, []) + end) + """ + @spec transaction(conn, (t -> result), opts :: Keyword.t()) :: + {:ok, result} | {:error, reason :: any} + when result: var + def transaction(conn, fun, opts \\ []) + + def transaction(%DBConnection{conn_mode: :transaction} = conn, fun, _opts) do + %DBConnection{conn_ref: conn_ref} = conn + + try do + result = fun.(conn) + conclude(conn, result) + catch + :throw, {__MODULE__, ^conn_ref, reason} -> + fail(conn) + {:error, reason} + + kind, reason -> + stack = __STACKTRACE__ + fail(conn) + :erlang.raise(kind, reason, stack) + else + result -> + {:ok, result} + end + end + + def transaction(%DBConnection{} = conn, fun, opts) do + case begin(conn, &run/4, opts) do + {:ok, _} -> + run_transaction(conn, fun, &run/4, opts) + + {:error, %DBConnection.TransactionError{}} -> + {:error, :rollback} + + {:error, err} -> + raise err + end + end + + def transaction(pool, fun, opts) do + case begin(pool, &checkout/4, opts) do + {:ok, conn, _} -> + run_transaction(conn, fun, &checkin/4, opts) + + {:error, %DBConnection.TransactionError{}} -> + {:error, :rollback} + + {:error, err} -> + raise err + end + end + + @doc """ + Rollback a database transaction and release lock on connection. + + When inside of a `transaction/3` call does a non-local return, using a + `throw/1` to cause the transaction to enter a failed state and the + `transaction/3` call returns `{:error, reason}`. If `transaction/3` calls are + nested the connection is marked as failed until the outermost transaction call + does the database rollback. + + ### Example + + {:error, :oops} = DBConnection.transaction(pool, fun(conn) -> + DBConnection.rollback(conn, :oops) + end) + """ + @spec rollback(t, reason :: any) :: no_return + def rollback(conn, reason) + + def rollback(%DBConnection{conn_mode: :transaction} = conn, reason) do + %DBConnection{conn_ref: conn_ref} = conn + throw({__MODULE__, conn_ref, reason}) + end + + def rollback(%DBConnection{} = _conn, _reason) do + raise "not inside transaction" + end + + @doc """ + Return the transaction status of a connection. + + The callback implementation should return the transaction status according to + the database, and not make assumptions based on client-side state. + + This function will raise a `DBConnection.ConnectionError` when called inside a + deprecated `transaction/3`. + + ### Options + + See module documentation. The pool and connection module may support other + options. All options are passed to `c:handle_status/2`. + + ### Example + + # outside of the transaction, the status is `:idle` + DBConnection.status(conn) #=> :idle + + DBConnection.transaction(conn, fn conn -> + DBConnection.status(conn) #=> :transaction + + # run a query that will cause the transaction to rollback, e.g. + # uniqueness constraint violation + DBConnection.execute(conn, bad_query, []) + + DBConnection.status(conn) #=> :error + end) + + DBConnection.status(conn) #=> :idle + """ + @spec status(conn, opts :: Keyword.t()) :: status + def status(conn, opts \\ []) do + case run(conn, &run_status/3, nil, opts) do + {status, _meter} -> + status + + {:error, _err, _meter} -> + :error + + {kind, reason, stack, _meter} -> + :erlang.raise(kind, reason, stack) + end + end + + @doc """ + Create a stream that will prepare a query, execute it and stream results + using a cursor. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_prepare/3`, `c:handle_close/3`, `c:handle_declare/4`, + and `c:handle_deallocate/4`. + + ### Example + + {:ok, results} = DBConnection.transaction(conn, fn conn -> + query = %Query{statement: "SELECT id FROM table"} + stream = DBConnection.prepare_stream(conn, query, []) + Enum.to_list(stream) + end) + """ + @spec prepare_stream(t, query, params, opts :: Keyword.t()) :: + DBConnection.PrepareStream.t() + def prepare_stream(%DBConnection{} = conn, query, params, opts \\ []) do + %DBConnection.PrepareStream{conn: conn, query: query, params: params, opts: opts} + end + + @doc """ + Create a stream that will execute a prepared query and stream results using a + cursor. + + ### Options + + * `:queue` - Whether to block waiting in an internal queue for the + connection's state (boolean, default: `true`). See "Queue config" in + `start_link/2` docs + * `:timeout` - The maximum time that the caller is allowed to perform + this operation (default: `15_000`) + * `:deadline` - If set, overrides `:timeout` option and specifies absolute + monotonic time in milliseconds by which caller must perform operation. + See `System` module documentation for more information on monotonic time + (default: `nil`) + * `:log` - A function to log information about a call, either + a 1-arity fun, `{module, function, args}` with `t:DBConnection.LogEntry.t/0` + prepended to `args` or `nil`. See `DBConnection.LogEntry` (default: `nil`) + + The pool and connection module may support other options. All options + are passed to `c:handle_declare/4` and `c:handle_deallocate/4`. + + ### Example + + DBConnection.transaction(pool, fn conn -> + query = %Query{statement: "SELECT id FROM table"} + query = DBConnection.prepare!(conn, query) + try do + stream = DBConnection.stream(conn, query, []) + Enum.to_list(stream) + after + # Make sure query is closed! + DBConnection.close(conn, query) + end + end) + """ + @spec stream(t, query, params, opts :: Keyword.t()) :: DBConnection.Stream.t() + def stream(%DBConnection{} = conn, query, params, opts \\ []) do + %DBConnection.Stream{conn: conn, query: query, params: params, opts: opts} + end + + @doc """ + Reduces a previously built stream or prepared stream. + """ + def reduce(%DBConnection.PrepareStream{} = stream, acc, fun) do + %DBConnection.PrepareStream{conn: conn, query: query, params: params, opts: opts} = stream + + declare = fn conn, opts -> + {query, cursor} = prepare_declare!(conn, query, params, opts) + {:cont, query, cursor} + end + + enum = resource(conn, declare, &stream_fetch/3, &stream_deallocate/3, opts) + enum.(acc, fun) + end + + def reduce(%DBConnection.Stream{} = stream, acc, fun) do + %DBConnection.Stream{conn: conn, query: query, params: params, opts: opts} = stream + + declare = fn conn, opts -> + case declare(conn, query, params, opts) do + {:ok, query, cursor} -> + {:cont, query, cursor} + + {:ok, cursor} -> + {:cont, query, cursor} + + {:error, err} -> + raise err + end + end + + enum = resource(conn, declare, &stream_fetch/3, &stream_deallocate/3, opts) + enum.(acc, fun) + end + + @doc false + def register_as_pool(conn_module) do + Process.put(@connection_module_key, conn_module) + end + + @doc """ + Returns connection module used by the given connection pool. + + When given a process that is not a connection pool, returns an `:error`. + """ + @spec connection_module(conn) :: {:ok, module} | :error + def connection_module(conn) do + with pid when pid != nil <- pool_pid(conn), + {:dictionary, dictionary} <- Process.info(pid, :dictionary), + {:ok, module} <- fetch_from_dictionary(dictionary, @connection_module_key), + do: {:ok, module}, + else: (_ -> :error) + end + + defp pool_pid(%DBConnection{pool_ref: Holder.pool_ref(pool: pid)}), do: pid + defp pool_pid(conn), do: GenServer.whereis(conn) + + defp fetch_from_dictionary(dictionary, key) do + Enum.find_value(dictionary, :error, fn + {^key, value} -> {:ok, value} + _pair -> nil + end) + end + + ## Helpers + + defp checkout(pool, meter, opts) do + checkout = System.monotonic_time() + pool_mod = Keyword.get(opts, :pool, DBConnection.ConnectionPool) + + caller = Keyword.get(opts, :caller, self()) + callers = [caller | Process.get(:"$callers") || []] + + try do + pool_mod.checkout(pool, callers, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, past_event(meter, :checkout, checkout)} + else + {:ok, pool_ref, _conn_mod, checkin, _conn_state} -> + conn = %DBConnection{pool_ref: pool_ref, conn_ref: make_ref()} + meter = meter |> past_event(:checkin, checkin) |> past_event(:checkout, checkout) + {:ok, conn, meter} + + {:error, err} -> + {:error, err, past_event(meter, :checkout, checkout)} + end + end + + defp checkout(%DBConnection{} = conn, fun, meter, opts) do + with {:ok, result, meter} <- fun.(conn, meter, opts) do + {:ok, conn, result, meter} + end + end + + defp checkout(pool, fun, meter, opts) do + with {:ok, conn, meter} <- checkout(pool, meter, opts) do + case fun.(conn, meter, opts) do + {:ok, result, meter} -> + {:ok, conn, result, meter} + + error -> + checkin(conn) + error + end + end + end + + defp checkin(%DBConnection{pool_ref: pool_ref}) do + Holder.checkin(pool_ref) + end + + defp checkin(%DBConnection{} = conn, fun, meter, opts) do + return = fun.(conn, meter, opts) + checkin(conn) + return + end + + defp checkin(pool, fun, meter, opts) do + run(pool, fun, meter, opts) + end + + defp disconnect(%DBConnection{pool_ref: pool_ref}, err) do + _ = Holder.disconnect(pool_ref, err) + :ok + end + + defp stop(%DBConnection{pool_ref: pool_ref}, kind, reason, stack) do + msg = "client #{inspect(self())} stopped: " <> Exception.format(kind, reason, stack) + exception = DBConnection.ConnectionError.exception(msg) + _ = Holder.stop(pool_ref, exception) + :ok + end + + defp handle_common_result(return, conn, meter) do + case return do + {:ok, result, _conn_state} -> + {:ok, result, meter} + + {:error, err, _conn_state} -> + {:error, err, meter} + + {:disconnect, err, _conn_state} -> + disconnect(conn, err) + {:error, err, meter} + + {:catch, kind, reason, stack} -> + stop(conn, kind, reason, stack) + {kind, reason, stack, meter} + + other -> + bad_return!(other, conn, meter) + end + end + + @compile {:inline, bad_return!: 3} + + defp bad_return!(other, conn, meter) do + try do + raise DBConnection.ConnectionError, "bad return value: #{inspect(other)}" + catch + :error, reason -> + stack = __STACKTRACE__ + stop(conn, :error, reason, stack) + {:error, reason, stack, meter} + end + end + + defp parse(query, meter, opts) do + try do + DBConnection.Query.parse(query, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, meter} + else + query -> + {:ok, query, meter} + end + end + + defp describe(conn, query, meter, opts) do + try do + DBConnection.Query.describe(query, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + raised_close(conn, query, meter, opts, kind, reason, stack) + else + query -> + {:ok, query, meter} + end + end + + defp encode(conn, query, params, meter, opts) do + try do + DBConnection.Query.encode(query, params, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + raised_close(conn, query, meter, opts, kind, reason, stack) + else + params -> + {:ok, params, meter} + end + end + + defp maybe_encode(query, params, meter, opts) do + try do + DBConnection.Query.encode(query, params, opts) + rescue + DBConnection.EncodeError -> {:prepare, meter} + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, meter} + else + params -> + {:ok, params, meter} + end + end + + defp decode(query, result, meter, opts) do + meter = event(meter, :decode) + + try do + DBConnection.Query.decode(query, result, opts) + catch + kind, reason -> + stack = __STACKTRACE__ + {kind, reason, stack, meter} + else + result -> + {:ok, result, meter} + end + end + + defp prepare_declare(conn, query, params, opts) do + result = + with {:ok, query, meter} <- parse(query, meter(opts), opts) do + parsed_prepare_declare(conn, query, params, meter, opts) + end + + log(result, :prepare_declare, query, params) + end + + defp parsed_prepare_declare(conn, query, params, meter, opts) do + run(conn, &run_prepare_declare/5, query, params, meter, opts) + end + + defp prepare_declare!(conn, query, params, opts) do + case prepare_declare(conn, query, params, opts) do + {:ok, query, cursor} -> + {query, cursor} + + {:error, err} -> + raise err + end + end + + defp declare(conn, query, params, opts) do + result = + case maybe_encode(query, params, meter(opts), opts) do + {:prepare, meter} -> + parsed_prepare_declare(conn, query, params, meter, opts) + + {:ok, params, meter} -> + run(conn, &run_declare/5, query, params, meter, opts) + + {_, _, _, _} = error -> + error + end + + log(result, :declare, query, params) + end + + defp deallocate(conn, query, cursor, opts) do + conn + |> run_cleanup(&run_deallocate/4, [query, cursor], meter(opts), opts) + |> log(:deallocate, query, cursor) + end + + defp run_prepare(conn, query, meter, opts) do + with {:ok, query, meter} <- prepare(conn, query, meter, opts) do + describe(conn, query, meter, opts) + end + end + + defp prepare(%DBConnection{pool_ref: pool_ref} = conn, query, meter, opts) do + pool_ref + |> Holder.handle(:handle_prepare, [query], opts) + |> handle_common_result(conn, event(meter, :prepare)) + end + + defp run_prepare_execute(conn, query, params, meter, opts) do + with {:ok, query, meter} <- run_prepare(conn, query, meter, opts), + {:ok, params, meter} <- encode(conn, query, params, meter, opts) do + run_execute(conn, query, params, meter, opts) + end + end + + defp run_execute(conn, query, params, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :execute) + + case Holder.handle(pool_ref, :handle_execute, [query, params], opts) do + {:ok, query, result, _conn_state} -> + {:ok, query, result, meter} + + {:ok, _, _} = other -> + bad_return!(other, conn, meter) + + other -> + handle_common_result(other, conn, meter) + end + end + + defp raised_close(conn, query, meter, opts, kind, reason, stack) do + with {:ok, _, meter} <- run_close(conn, [query], meter, opts) do + {kind, reason, stack, meter} + end + end + + defp run_close(conn, args, meter, opts) do + meter = event(meter, :close) + cleanup(conn, :handle_close, args, meter, opts) + end + + defp run_cleanup(%DBConnection{} = conn, fun, args, meter, opts) do + fun.(conn, args, meter, opts) + end + + defp run_cleanup(pool, fun, args, meter, opts) do + with {:ok, conn, meter} <- checkout(pool, meter, opts) do + try do + fun.(conn, args, meter, opts) + after + checkin(conn) + end + end + end + + defp cleanup(conn, fun, args, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + + case Holder.cleanup(pool_ref, fun, args, opts) do + {:ok, result, _conn_state} -> + {:ok, result, meter} + + {:error, err, _conn_state} -> + {:error, err, meter} + + {:disconnect, err, _conn_state} -> + disconnect(conn, err) + {:error, err, meter} + + {:catch, kind, reason, stack} -> + stop(conn, kind, reason, stack) + {kind, reason, stack, meter} + + other -> + bad_return!(other, conn, meter) + end + end + + defp run(%DBConnection{} = conn, fun, meter, opts) do + fun.(conn, meter, opts) + end + + defp run(pool, fun, meter, opts) do + with {:ok, conn, meter} <- checkout(pool, meter, opts) do + try do + fun.(conn, meter, opts) + after + checkin(conn) + end + end + end + + defp run(%DBConnection{} = conn, fun, arg, meter, opts) do + fun.(conn, arg, meter, opts) + end + + defp run(pool, fun, arg, meter, opts) do + with {:ok, conn, meter} <- checkout(pool, meter, opts) do + try do + fun.(conn, arg, meter, opts) + after + checkin(conn) + end + end + end + + defp run(%DBConnection{} = conn, fun, arg1, arg2, meter, opts) do + fun.(conn, arg1, arg2, meter, opts) + end + + defp run(pool, fun, arg1, arg2, meter, opts) do + with {:ok, conn, meter} <- checkout(pool, meter, opts) do + try do + fun.(conn, arg1, arg2, meter, opts) + after + checkin(conn) + end + end + end + + defp meter(opts) do + case Keyword.get(opts, :log) do + nil -> nil + log -> {log, []} + end + end + + defp event(nil, _), + do: nil + + defp event({log, events}, event), + do: {log, [{event, System.monotonic_time()} | events]} + + defp past_event(nil, _, _), + do: nil + + defp past_event(log_events, _, nil), + do: log_events + + defp past_event({log, events}, event, time), + do: {log, [{event, time} | events]} + + defp log({:ok, res, meter}, call, query, params), + do: log(meter, call, query, params, {:ok, res}) + + defp log({:ok, res1, res2, meter}, call, query, params), + do: log(meter, call, query, params, {:ok, res1, res2}) + + defp log({ok, res, meter}, call, query, cursor) when ok in [:cont, :halt], + do: log(meter, call, query, cursor, {ok, res}) + + defp log({:error, err, meter}, call, query, params), + do: log(meter, call, query, params, {:error, err}) + + defp log({kind, reason, stack, meter}, call, query, params), + do: log(meter, call, query, params, {kind, reason, stack}) + + defp log(nil, _, _, _, result), + do: log_result(result) + + defp log({log, times}, call, query, params, result) do + entry = DBConnection.LogEntry.new(call, query, params, times, entry_result(result)) + + try do + log(log, entry) + catch + kind, reason -> + stack = __STACKTRACE__ + log_raised(entry, kind, reason, stack) + end + + log_result(result) + end + + defp entry_result({kind, reason, stack}) + when kind in [:error, :exit, :throw] do + msg = "an exception was raised: " <> Exception.format(kind, reason, stack) + {:error, %DBConnection.ConnectionError{message: msg}} + end + + defp entry_result({ok, res}) when ok in [:cont, :halt], + do: {:ok, res} + + defp entry_result(other), do: other + + defp log({mod, fun, args}, entry), do: apply(mod, fun, [entry | args]) + defp log(fun, entry), do: fun.(entry) + + defp log_result({kind, reason, stack}) when kind in [:error, :exit, :throw] do + :erlang.raise(kind, reason, stack) + end + + defp log_result(other), do: other + + defp log_raised(entry, kind, reason, stack) do + reason = Exception.normalize(kind, reason, stack) + + Logger.error( + fn -> + "an exception was raised logging #{inspect(entry)}: " <> + Exception.format(kind, reason, stack) + end, + crash_reason: {crash_reason(kind, reason), stack} + ) + catch + _, _ -> + :ok + end + + defp crash_reason(:throw, value), do: {:nocatch, value} + defp crash_reason(_, value), do: value + + defp run_transaction(conn, fun, run, opts) do + %DBConnection{conn_ref: conn_ref} = conn + + try do + result = fun.(%{conn | conn_mode: :transaction}) + conclude(conn, result) + catch + :throw, {__MODULE__, ^conn_ref, reason} -> + reset(conn) + + case rollback(conn, run, opts) do + {:ok, _} -> + {:error, reason} + + {:error, %DBConnection.TransactionError{}} -> + {:error, reason} + + {:error, %DBConnection.ConnectionError{}} -> + {:error, reason} + + {:error, err} -> + raise err + end + + kind, reason -> + stack = __STACKTRACE__ + reset(conn) + _ = rollback(conn, run, opts) + :erlang.raise(kind, reason, stack) + else + result -> + case commit(conn, run, opts) do + {:ok, _} -> + {:ok, result} + + {:error, %DBConnection.TransactionError{}} -> + {:error, :rollback} + + {:error, err} -> + raise err + end + after + reset(conn) + end + end + + defp fail(%DBConnection{pool_ref: pool_ref}) do + case Holder.status?(pool_ref, :ok) do + true -> Holder.put_status(pool_ref, :aborted) + false -> :ok + end + end + + defp conclude(%DBConnection{pool_ref: pool_ref, conn_ref: conn_ref}, result) do + case Holder.status?(pool_ref, :ok) do + true -> result + false -> throw({__MODULE__, conn_ref, :rollback}) + end + end + + defp reset(%DBConnection{pool_ref: pool_ref}) do + case Holder.status?(pool_ref, :aborted) do + true -> Holder.put_status(pool_ref, :ok) + false -> :ok + end + end + + defp begin(conn, run, opts) do + conn + |> run.(&run_begin/3, meter(opts), opts) + |> log(:begin, :begin, nil) + end + + defp run_begin(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :begin) + + case Holder.handle(pool_ref, :handle_begin, [], opts) do + {status, _conn_state} when status in [:idle, :transaction, :error] -> + status_disconnect(conn, status, meter) + + other -> + handle_common_result(other, conn, meter) + end + end + + defp rollback(conn, run, opts) do + conn + |> run.(&run_rollback/3, meter(opts), opts) + |> log(:rollback, :rollback, nil) + end + + defp run_rollback(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :rollback) + + case Holder.handle(pool_ref, :handle_rollback, [], opts) do + {status, _conn_state} when status in [:idle, :transaction, :error] -> + status_disconnect(conn, status, meter) + + other -> + handle_common_result(other, conn, meter) + end + end + + defp commit(conn, run, opts) do + case run.(conn, &run_commit/3, meter(opts), opts) do + {:rollback, {:ok, result, meter}} -> + log(meter, :commit, :rollback, nil, {:ok, result}) + err = DBConnection.TransactionError.exception(:error) + {:error, err} + + {query, other} -> + log(other, :commit, query, nil) + + {:error, err, meter} -> + log(meter, :commit, :commit, nil, {:error, err}) + + {kind, reason, stack, meter} -> + log(meter, :commit, :commit, nil, {kind, reason, stack}) + end + end + + defp run_commit(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :commit) + + case Holder.handle(pool_ref, :handle_commit, [], opts) do + {:error, _conn_state} -> + {:rollback, run_rollback(conn, meter, opts)} + + {status, _conn_state} when status in [:idle, :transaction] -> + {:commit, status_disconnect(conn, status, meter)} + + other -> + {:commit, handle_common_result(other, conn, meter)} + end + end + + defp status_disconnect(conn, status, meter) do + err = DBConnection.TransactionError.exception(status) + disconnect(conn, err) + {:error, err, meter} + end + + defp run_status(conn, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + + case Holder.handle(pool_ref, :handle_status, [], opts) do + {status, _conn_state} when status in [:idle, :transaction, :error] -> + {status, meter} + + {:disconnect, err, _conn_state} -> + disconnect(conn, err) + {:error, err, meter} + + {:catch, kind, reason, stack} -> + stop(conn, kind, reason, stack) + {kind, reason, stack, meter} + + other -> + bad_return!(other, conn, meter) + end + end + + defp run_prepare_declare(conn, query, params, meter, opts) do + with {:ok, query, meter} <- prepare(conn, query, meter, opts), + {:ok, query, meter} <- describe(conn, query, meter, opts), + {:ok, params, meter} <- encode(conn, query, params, meter, opts), + {:ok, query, cursor, meter} <- run_declare(conn, query, params, meter, opts) do + {:ok, query, cursor, meter} + end + end + + defp run_declare(conn, query, params, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :declare) + + case Holder.handle(pool_ref, :handle_declare, [query, params], opts) do + {:ok, query, result, _conn_state} -> + {:ok, query, result, meter} + + {:ok, _, _} = other -> + bad_return!(other, conn, meter) + + other -> + handle_common_result(other, conn, meter) + end + end + + defp stream_fetch(conn, {:cont, query, cursor}, opts) do + conn + |> run(&run_stream_fetch/4, [query, cursor], meter(opts), opts) + |> log(:fetch, query, cursor) + |> case do + {ok, result} when ok in [:cont, :halt] -> + {[result], {ok, query, cursor}} + + {:error, err} -> + raise err + end + end + + defp stream_fetch(_, {:halt, _, _} = state, _) do + {:halt, state} + end + + defp run_stream_fetch(conn, args, meter, opts) do + [query, _] = args + + with {ok, result, meter} when ok in [:cont, :halt] <- run_fetch(conn, args, meter, opts), + {:ok, result, meter} <- decode(query, result, meter, opts) do + {ok, result, meter} + end + end + + defp run_fetch(conn, args, meter, opts) do + %DBConnection{pool_ref: pool_ref} = conn + meter = event(meter, :fetch) + + case Holder.handle(pool_ref, :handle_fetch, args, opts) do + {:cont, result, _conn_state} -> + {:cont, result, meter} + + {:halt, result, _conn_state} -> + {:halt, result, meter} + + other -> + handle_common_result(other, conn, meter) + end + end + + defp stream_deallocate(conn, {_status, query, cursor}, opts), + do: deallocate(conn, query, cursor, opts) + + defp run_deallocate(conn, args, meter, opts) do + meter = event(meter, :deallocate) + cleanup(conn, :handle_deallocate, args, meter, opts) + end + + defp resource(%DBConnection{} = conn, start, next, stop, opts) do + start = fn -> start.(conn, opts) end + next = fn state -> next.(conn, state, opts) end + stop = fn state -> stop.(conn, state, opts) end + Stream.resource(start, next, stop) + end +end diff --git a/deps/db_connection/lib/db_connection/app.ex b/deps/db_connection/lib/db_connection/app.ex new file mode 100644 index 0000000..2104c6f --- /dev/null +++ b/deps/db_connection/lib/db_connection/app.ex @@ -0,0 +1,22 @@ +defmodule DBConnection.App do + @moduledoc false + use Application + + def start(_, _) do + children = [ + {Task.Supervisor, name: DBConnection.Task}, + dynamic_supervisor(DBConnection.Ownership.Supervisor), + dynamic_supervisor(DBConnection.ConnectionPool.Supervisor), + DBConnection.Watcher + ] + + Supervisor.start_link(children, strategy: :one_for_all, name: __MODULE__) + end + + defp dynamic_supervisor(name) do + Supervisor.child_spec( + {DynamicSupervisor, name: name, strategy: :one_for_one}, + id: name + ) + end +end diff --git a/deps/db_connection/lib/db_connection/backoff.ex b/deps/db_connection/lib/db_connection/backoff.ex new file mode 100644 index 0000000..1cc5dc7 --- /dev/null +++ b/deps/db_connection/lib/db_connection/backoff.ex @@ -0,0 +1,96 @@ +defmodule DBConnection.Backoff do + @moduledoc false + @compile :nowarn_deprecated_function + + alias DBConnection.Backoff + + @default_type :rand_exp + @min 1_000 + @max 30_000 + + defstruct [:type, :min, :max, :state] + + def new(opts) do + case Keyword.get(opts, :backoff_type, @default_type) do + :stop -> + nil + + type -> + {min, max} = min_max(opts) + new(type, min, max) + end + end + + def backoff(%Backoff{type: :rand, min: min, max: max} = s) do + {rand(min, max), s} + end + + def backoff(%Backoff{type: :exp, min: min, state: nil} = s) do + {min, %Backoff{s | state: min}} + end + + def backoff(%Backoff{type: :exp, max: max, state: prev} = s) do + require Bitwise + next = min(Bitwise.<<<(prev, 1), max) + {next, %Backoff{s | state: next}} + end + + def backoff(%Backoff{type: :rand_exp, max: max, state: state} = s) do + {prev, lower} = state + next_min = min(prev, lower) + next_max = min(prev * 3, max) + next = rand(next_min, next_max) + {next, %Backoff{s | state: {next, lower}}} + end + + def reset(%Backoff{type: :rand} = s), do: s + def reset(%Backoff{type: :exp} = s), do: %Backoff{s | state: nil} + + def reset(%Backoff{type: :rand_exp, min: min, state: {_, lower}} = s) do + %Backoff{s | state: {min, lower}} + end + + ## Internal + + defp min_max(opts) do + case {opts[:backoff_min], opts[:backoff_max]} do + {nil, nil} -> {@min, @max} + {nil, max} -> {min(@min, max), max} + {min, nil} -> {min, max(min, @max)} + {min, max} -> {min, max} + end + end + + defp new(_, min, _) when not (is_integer(min) and min >= 0) do + raise ArgumentError, "minimum #{inspect(min)} not 0 or a positive integer" + end + + defp new(_, _, max) when not (is_integer(max) and max >= 0) do + raise ArgumentError, "maximum #{inspect(max)} not 0 or a positive integer" + end + + defp new(_, min, max) when min > max do + raise ArgumentError, "minimum #{min} is greater than maximum #{max}" + end + + defp new(:rand, min, max) do + %Backoff{type: :rand, min: min, max: max, state: nil} + end + + defp new(:exp, min, max) do + %Backoff{type: :exp, min: min, max: max, state: nil} + end + + defp new(:rand_exp, min, max) do + lower = max(min, div(max, 3)) + %Backoff{type: :rand_exp, min: min, max: max, state: {min, lower}} + end + + defp new(type, _, _) do + raise ArgumentError, "unknown type #{inspect(type)}" + end + + defp rand(min, max) do + :rand.uniform(max - min + 1) + min - 1 + end +end diff --git a/deps/db_connection/lib/db_connection/connection.ex b/deps/db_connection/lib/db_connection/connection.ex new file mode 100644 index 0000000..b3d8c91 --- /dev/null +++ b/deps/db_connection/lib/db_connection/connection.ex @@ -0,0 +1,451 @@ +defmodule DBConnection.ConnectionError do + defexception [:message, severity: :error, reason: :error] + + @moduledoc """ + The raised exception might include the reason which would be useful + to programmatically determine what was causing the error. + """ + + @doc false + def exception(message, reason) do + message + |> exception() + |> Map.replace!(:reason, reason) + end +end + +defmodule DBConnection.Connection do + @moduledoc false + + use Connection + require Logger + alias DBConnection.Backoff + alias DBConnection.Holder + + @timeout 15_000 + + @doc false + def start_link(mod, opts, pool, tag) do + start_opts = Keyword.take(opts, [:debug, :spawn_opt]) + Connection.start_link(__MODULE__, {mod, opts, pool, tag}, start_opts) + end + + @doc false + def child_spec(mod, opts, pool, tag, child_opts) do + Supervisor.child_spec( + %{id: __MODULE__, start: {__MODULE__, :start_link, [mod, opts, pool, tag]}}, + child_opts + ) + end + + @doc false + def disconnect({pid, ref}, err, state) do + Connection.cast(pid, {:disconnect, ref, err, state}) + end + + @doc false + def stop({pid, ref}, err, state) do + Connection.cast(pid, {:stop, ref, err, state}) + end + + @doc false + def ping({pid, ref}, state) do + Connection.cast(pid, {:ping, ref, state}) + end + + ## Connection API + + @doc false + def init({mod, opts, pool, tag}) do + s = %{ + mod: mod, + opts: opts, + state: nil, + client: :closed, + pool: pool, + tag: tag, + timer: nil, + backoff: Backoff.new(opts), + connection_listeners: Keyword.get(opts, :connection_listeners, []), + after_connect: Keyword.get(opts, :after_connect), + after_connect_timeout: Keyword.get(opts, :after_connect_timeout, @timeout) + } + + {:connect, :init, s} + end + + @doc false + def connect(_, s) do + %{mod: mod, opts: opts, backoff: backoff, after_connect: after_connect} = s + + try do + apply(mod, :connect, [connect_opts(opts)]) + rescue + e -> + {e, stack} = maybe_sanitize_exception(e, __STACKTRACE__, opts) + reraise e, stack + else + {:ok, state} when after_connect != nil -> + ref = make_ref() + Connection.cast(self(), {:after_connect, ref}) + {:ok, %{s | state: state, client: {ref, :connect}}} + + {:ok, state} -> + backoff = backoff && Backoff.reset(backoff) + ref = make_ref() + Connection.cast(self(), {:connected, ref}) + {:ok, %{s | state: state, client: {ref, :connect}, backoff: backoff}} + + {:error, err} when is_nil(backoff) -> + raise err + + {:error, err} -> + Logger.error( + fn -> + [ + inspect(mod), + ?\s, + ?(, + inspect(self()), + ") failed to connect: " + | Exception.format_banner(:error, err, []) + ] + end, + crash_reason: {err, []} + ) + + {timeout, backoff} = Backoff.backoff(backoff) + {:backoff, timeout, %{s | backoff: backoff}} + end + end + + defp maybe_sanitize_exception(e, stack, opts) do + if Keyword.get(opts, :show_sensitive_data_on_connection_error, false) do + {e, stack} + else + message = + "connect raised #{inspect(e.__struct__)} exception#{sanitized_message(e)}. " <> + "The exception details are hidden, as they may contain sensitive data such as " <> + "database credentials. You may set :show_sensitive_data_on_connection_error " <> + "to true when starting your connection if you wish to see all of the details" + + {RuntimeError.exception(message), cleanup_stacktrace(stack)} + end + end + + defp sanitized_message(%KeyError{} = e), do: ": #{Exception.message(%{e | term: nil})}" + defp sanitized_message(_), do: "" + + @doc false + def disconnect({log, err}, %{mod: mod} = s) do + if log == :log do + severity = + case err do + %DBConnection.ConnectionError{severity: severity} -> severity + _ -> :error + end + + Logger.log(severity, fn -> + [ + inspect(mod), + ?\s, + ?(, + inspect(self()), + ") disconnected: " | Exception.format_banner(:error, err, []) + ] + end) + + :ok + end + + %{state: state, client: client, timer: timer, backoff: backoff} = s + demonitor(client) + cancel_timer(timer) + :ok = apply(mod, :disconnect, [err, state]) + s = %{s | state: nil, client: :closed, timer: nil} + + notify_connection_listeners({:disconnected, self()}, s) + + case client do + _ when backoff == nil -> + {:stop, {:shutdown, err}, s} + + {_, :after_connect} -> + {timeout, backoff} = Backoff.backoff(backoff) + {:backoff, timeout, %{s | backoff: backoff}} + + _ -> + {:connect, :disconnect, s} + end + end + + @doc false + def handle_cast({:ping, ref, state}, %{client: {ref, :pool}, mod: mod} = s) do + case apply(mod, :ping, [state]) do + {:ok, state} -> + pool_update(state, s) + + {:disconnect, err, state} -> + {:disconnect, {:log, err}, %{s | state: state}} + end + end + + def handle_cast({:disconnect, ref, err, state}, %{client: {ref, _}} = s) do + {:disconnect, {:log, err}, %{s | state: state}} + end + + def handle_cast({:stop, ref, err, state}, %{client: {ref, _}} = s) do + {_, stack} = :erlang.process_info(self(), :current_stacktrace) + {:stop, {err, stack}, %{s | state: state}} + end + + def handle_cast({tag, _, _, _}, s) when tag in [:disconnect, :stop] do + handle_timeout(s) + end + + def handle_cast({:after_connect, ref}, %{client: {ref, :connect}} = s) do + %{ + mod: mod, + state: state, + after_connect: after_connect, + after_connect_timeout: timeout, + opts: opts + } = s + + notify_connection_listeners({:connected, self()}, s) + + case apply(mod, :checkout, [state]) do + {:ok, state} -> + opts = [timeout: timeout] ++ opts + {pid, ref} = DBConnection.Task.run_child(mod, state, after_connect, opts) + timer = start_timer(pid, timeout) + s = %{s | client: {ref, :after_connect}, timer: timer, state: state} + {:noreply, s} + + {:disconnect, err, state} -> + {:disconnect, {:log, err}, %{s | state: state}} + end + end + + def handle_cast({:after_connect, _}, s) do + {:noreply, s} + end + + def handle_cast({:connected, ref}, %{client: {ref, :connect}} = s) do + %{mod: mod, state: state} = s + + notify_connection_listeners({:connected, self()}, s) + + case apply(mod, :checkout, [state]) do + {:ok, state} -> + pool_update(state, s) + + {:disconnect, err, state} -> + {:disconnect, {:log, err}, %{s | state: state}} + end + end + + def handle_cast({:connected, _}, s) do + {:noreply, s} + end + + @doc false + def handle_info({:DOWN, ref, _, pid, reason}, %{client: {ref, :after_connect}} = s) do + message = "client #{inspect(pid)} exited: " <> Exception.format_exit(reason) + err = DBConnection.ConnectionError.exception(message) + {:disconnect, {down_log(reason), err}, %{s | client: {nil, :after_connect}}} + end + + def handle_info({:DOWN, mon, _, pid, reason}, %{client: {ref, mon}} = s) do + message = "client #{inspect(pid)} exited: " <> Exception.format_exit(reason) + err = DBConnection.ConnectionError.exception(message) + {:disconnect, {down_log(reason), err}, %{s | client: {ref, nil}}} + end + + def handle_info({:timeout, timer, {__MODULE__, pid, timeout}}, %{timer: timer} = s) + when is_reference(timer) do + message = + "client #{inspect(pid)} timed out because it checked out " <> + "the connection for longer than #{timeout}ms" + + exc = + case Process.info(pid, :current_stacktrace) do + {:current_stacktrace, stacktrace} -> + message <> + "\n\n#{inspect(pid)} was at location:\n\n" <> + Exception.format_stacktrace(stacktrace) + + _ -> + message + end + |> DBConnection.ConnectionError.exception() + + {:disconnect, {:log, exc}, %{s | timer: nil}} + end + + def handle_info( + {:"ETS-TRANSFER", holder, _pid, {msg, ref, extra}}, + %{client: {ref, :after_connect}, timer: timer} = s + ) do + {_, state} = Holder.delete(holder) + cancel_timer(timer) + s = %{s | timer: nil} + + case msg do + :checkin -> handle_checkin(state, s) + :disconnect -> handle_cast({:disconnect, ref, extra, state}, s) + :stop -> handle_cast({:stop, ref, extra, state}, s) + end + end + + def handle_info(msg, %{mod: mod} = s) do + Logger.info(fn -> + [inspect(mod), ?\s, ?(, inspect(self()), ") missed message: " | inspect(msg)] + end) + + handle_timeout(s) + end + + @doc false + def format_status(info, [_, %{client: :closed, mod: mod}]) do + case info do + :normal -> [{:data, [{'Module', mod}]}] + :terminate -> mod + end + end + + def format_status(info, [pdict, %{mod: mod, state: state}]) do + case function_exported?(mod, :format_status, 2) do + true when info == :normal -> + normal_status(mod, pdict, state) + + false when info == :normal -> + normal_status_default(mod, state) + + true when info == :terminate -> + {mod, terminate_status(mod, pdict, state)} + + false when info == :terminate -> + {mod, state} + end + end + + ## Helpers + + defp connect_opts(opts) do + case Keyword.get(opts, :configure) do + {mod, fun, args} -> + apply(mod, fun, [opts | args]) + + fun when is_function(fun, 1) -> + fun.(opts) + + nil -> + opts + end + end + + defp down_log(:normal), do: :nolog + defp down_log(:shutdown), do: :nolog + defp down_log({:shutdown, _}), do: :nolog + defp down_log(_), do: :log + + defp handle_timeout(s), do: {:noreply, s} + + defp demonitor({_, mon}) when is_reference(mon) do + Process.demonitor(mon, [:flush]) + end + + defp demonitor({mon, :after_connect}) when is_reference(mon) do + Process.demonitor(mon, [:flush]) + end + + defp demonitor({_, _}), do: true + defp demonitor(nil), do: true + + defp start_timer(_, :infinity), do: nil + + defp start_timer(pid, timeout) do + :erlang.start_timer(timeout, self(), {__MODULE__, pid, timeout}) + end + + defp cancel_timer(nil), do: :ok + + defp cancel_timer(timer) do + case :erlang.cancel_timer(timer) do + false -> flush_timer(timer) + _ -> :ok + end + end + + defp flush_timer(timer) do + receive do + {:timeout, ^timer, {__MODULE__, _, _}} -> + :ok + after + 0 -> + raise ArgumentError, "timer #{inspect(timer)} does not exist" + end + end + + defp handle_checkin(state, s) do + %{backoff: backoff, client: client} = s + backoff = backoff && Backoff.reset(backoff) + demonitor(client) + pool_update(state, %{s | client: nil, backoff: backoff}) + end + + defp pool_update(state, %{pool: pool, tag: tag, mod: mod} = s) do + case Holder.update(pool, tag, mod, state) do + {:ok, ref} -> + {:noreply, %{s | client: {ref, :pool}, state: state}, :hibernate} + + :error -> + {:stop, {:shutdown, :no_more_pool}, s} + end + end + + defp normal_status(mod, pdict, state) do + try do + mod.format_status(:normal, [pdict, state]) + catch + _, _ -> + normal_status_default(mod, state) + else + status -> + status + end + end + + defp normal_status_default(mod, state) do + [{:data, [{'Module', mod}, {'State', state}]}] + end + + defp terminate_status(mod, pdict, state) do + try do + mod.format_status(:terminate, [pdict, state]) + catch + _, _ -> + state + else + status -> + status + end + end + + defp cleanup_stacktrace(stack) do + case stack do + [{_, _, arity, _} | _rest] = stacktrace when is_integer(arity) -> + stacktrace + + [{mod, fun, args, info} | rest] when is_list(args) -> + [{mod, fun, length(args), info} | rest] + end + end + + defp notify_connection_listeners(message, %{} = state) do + %{connection_listeners: connection_listeners} = state + + Enum.each(connection_listeners, &send(&1, message)) + end +end diff --git a/deps/db_connection/lib/db_connection/connection_pool.ex b/deps/db_connection/lib/db_connection/connection_pool.ex new file mode 100644 index 0000000..31988d7 --- /dev/null +++ b/deps/db_connection/lib/db_connection/connection_pool.ex @@ -0,0 +1,353 @@ +defmodule DBConnection.ConnectionPool do + @moduledoc """ + The default connection pool. + + The queueing algorithm is based on [CoDel](https://queue.acm.org/appendices/codel.html). + """ + + use GenServer + alias DBConnection.Holder + + @queue_target 50 + @queue_interval 1000 + @idle_interval 1000 + @time_unit 1000 + + @doc false + def start_link({mod, opts}) do + GenServer.start_link(__MODULE__, {mod, opts}, start_opts(opts)) + end + + @doc false + def child_spec(opts) do + super(opts) + end + + @doc false + def checkout(pool, callers, opts) do + Holder.checkout(pool, callers, opts) + end + + @doc false + def disconnect_all(pool, interval, _opts) do + GenServer.call(pool, {:disconnect_all, interval}, :infinity) + end + + ## GenServer api + + @impl true + def init({mod, opts}) do + DBConnection.register_as_pool(mod) + + queue = :ets.new(__MODULE__.Queue, [:protected, :ordered_set]) + ts = {System.monotonic_time(), 0} + {:ok, _} = DBConnection.ConnectionPool.Pool.start_supervised(queue, mod, opts) + target = Keyword.get(opts, :queue_target, @queue_target) + interval = Keyword.get(opts, :queue_interval, @queue_interval) + idle_interval = Keyword.get(opts, :idle_interval, @idle_interval) + now_in_native = System.monotonic_time() + now_in_ms = System.convert_time_unit(now_in_native, :native, @time_unit) + + codel = %{ + target: target, + interval: interval, + delay: 0, + slow: false, + next: now_in_ms, + poll: nil, + idle_interval: idle_interval, + idle: nil + } + + codel = start_idle(now_in_native, start_poll(now_in_ms, now_in_ms, codel)) + {:ok, {:busy, queue, codel, ts}} + end + + @impl true + def handle_call({:disconnect_all, interval}, _from, {type, queue, codel, _ts}) do + ts = {System.monotonic_time(), interval} + {:reply, :ok, {type, queue, codel, ts}} + end + + @impl true + def handle_info( + {:db_connection, from, {:checkout, _caller, now, queue?}}, + {:busy, queue, _, _} = busy + ) do + case queue? do + true -> + :ets.insert(queue, {{now, System.unique_integer(), from}}) + {:noreply, busy} + + false -> + message = "connection not available and queuing is disabled" + err = DBConnection.ConnectionError.exception(message) + Holder.reply_error(from, err) + {:noreply, busy} + end + end + + def handle_info( + {:db_connection, from, {:checkout, _caller, _now, _queue?}} = checkout, + {:ready, queue, _codel, _ts} = ready + ) do + case :ets.first(queue) do + {queued_in_native, holder} = key -> + Holder.handle_checkout(holder, from, queue, queued_in_native) and :ets.delete(queue, key) + {:noreply, ready} + + :"$end_of_table" -> + handle_info(checkout, put_elem(ready, 0, :busy)) + end + end + + def handle_info({:"ETS-TRANSFER", holder, pid, queue}, {_, queue, _, _} = data) do + message = "client #{inspect(pid)} exited" + err = DBConnection.ConnectionError.exception(message: message, severity: :info) + Holder.handle_disconnect(holder, err) + {:noreply, data} + end + + def handle_info({:"ETS-TRANSFER", holder, _, {msg, queue, extra}}, {_, queue, _, ts} = data) do + case msg do + :checkin -> + owner = self() + + case :ets.info(holder, :owner) do + ^owner -> + {time, interval} = ts + + if Holder.maybe_disconnect(holder, time, interval) do + {:noreply, data} + else + handle_checkin(holder, extra, data) + end + + :undefined -> + {:noreply, data} + end + + :disconnect -> + Holder.handle_disconnect(holder, extra) + {:noreply, data} + + :stop -> + Holder.handle_stop(holder, extra) + {:noreply, data} + end + end + + def handle_info({:timeout, deadline, {queue, holder, pid, len}}, {_, queue, _, _} = data) do + # Check that timeout refers to current holder (and not previous) + if Holder.handle_deadline(holder, deadline) do + message = + "client #{inspect(pid)} timed out because " <> + "it queued and checked out the connection for longer than #{len}ms" + + exc = + case Process.info(pid, :current_stacktrace) do + {:current_stacktrace, stacktrace} -> + message <> + "\n\n#{inspect(pid)} was at location:\n\n" <> + Exception.format_stacktrace(stacktrace) + + _ -> + message + end + |> DBConnection.ConnectionError.exception() + + Holder.handle_disconnect(holder, exc) + end + + {:noreply, data} + end + + def handle_info({:timeout, poll, {time, last_sent}}, {_, _, %{poll: poll}, _} = data) do + {status, queue, codel, ts} = data + + # If no queue progress since last poll check queue + case :ets.first(queue) do + {sent, _, _} when sent <= last_sent and status == :busy -> + delay = time - sent + timeout(delay, time, queue, start_poll(time, sent, codel), ts) + + {sent, _, _} -> + {:noreply, {status, queue, start_poll(time, sent, codel), ts}} + + _ -> + {:noreply, {status, queue, start_poll(time, time, codel), ts}} + end + end + + def handle_info({:timeout, idle, past_in_native}, {_, _, %{idle: idle}, _} = data) do + {status, queue, codel, ts} = data + drop_idle(past_in_native, status, queue, codel, ts) + end + + defp drop_idle(past_in_native, status, queue, codel, ts) do + # If no queue progress since last idle check oldest connection + case :ets.first(queue) do + {queued_in_native, holder} = key + when queued_in_native <= past_in_native and status == :ready -> + :ets.delete(queue, key) + Holder.maybe_disconnect(holder, elem(ts, 0), 0) or Holder.handle_ping(holder) + drop_idle(past_in_native, status, queue, codel, ts) + + _ -> + {:noreply, {status, queue, start_idle(System.monotonic_time(), codel), ts}} + end + end + + defp timeout(delay, time, queue, codel, ts) do + case codel do + %{delay: min_delay, next: next, target: target, interval: interval} + when time >= next and min_delay > target -> + codel = %{codel | slow: true, delay: delay, next: time + interval} + drop_slow(time, target * 2, queue) + {:noreply, {:busy, queue, codel, ts}} + + %{next: next, interval: interval} when time >= next -> + codel = %{codel | slow: false, delay: delay, next: time + interval} + {:noreply, {:busy, queue, codel, ts}} + + _ -> + {:noreply, {:busy, queue, codel, ts}} + end + end + + defp drop_slow(time, timeout, queue) do + min_sent = time - timeout + match = {{:"$1", :_, :"$2"}} + guards = [{:<, :"$1", min_sent}] + select_slow = [{match, guards, [{{:"$1", :"$2"}}]}] + + for {sent, from} <- :ets.select(queue, select_slow) do + drop(time - sent, from) + end + + :ets.select_delete(queue, [{match, guards, [true]}]) + end + + defp handle_checkin(holder, now_in_native, {:ready, queue, _, _} = data) do + :ets.insert(queue, {{now_in_native, holder}}) + {:noreply, data} + end + + defp handle_checkin(holder, now_in_native, {:busy, queue, codel, ts}) do + now_in_ms = System.convert_time_unit(now_in_native, :native, @time_unit) + + case dequeue(now_in_ms, holder, queue, codel, ts) do + {:busy, _, _, _} = busy -> + {:noreply, busy} + + {:ready, _, _, _} = ready -> + :ets.insert(queue, {{now_in_native, holder}}) + {:noreply, ready} + end + end + + defp dequeue(time, holder, queue, codel, ts) do + case codel do + %{next: next, delay: delay, target: target} when time >= next -> + dequeue_first(time, delay > target, holder, queue, codel, ts) + + %{slow: false} -> + dequeue_fast(time, holder, queue, codel, ts) + + %{slow: true, target: target} -> + dequeue_slow(time, target * 2, holder, queue, codel, ts) + end + end + + defp dequeue_first(time, slow?, holder, queue, codel, ts) do + %{interval: interval} = codel + next = time + interval + + case :ets.first(queue) do + {sent, _, from} = key -> + :ets.delete(queue, key) + delay = time - sent + codel = %{codel | next: next, delay: delay, slow: slow?} + go(delay, from, time, holder, queue, codel, ts) + + :"$end_of_table" -> + codel = %{codel | next: next, delay: 0, slow: slow?} + {:ready, queue, codel, ts} + end + end + + defp dequeue_fast(time, holder, queue, codel, ts) do + case :ets.first(queue) do + {sent, _, from} = key -> + :ets.delete(queue, key) + go(time - sent, from, time, holder, queue, codel, ts) + + :"$end_of_table" -> + {:ready, queue, %{codel | delay: 0}, ts} + end + end + + defp dequeue_slow(time, timeout, holder, queue, codel, ts) do + case :ets.first(queue) do + {sent, _, from} = key when time - sent > timeout -> + :ets.delete(queue, key) + drop(time - sent, from) + dequeue_slow(time, timeout, holder, queue, codel, ts) + + {sent, _, from} = key -> + :ets.delete(queue, key) + go(time - sent, from, time, holder, queue, codel, ts) + + :"$end_of_table" -> + {:ready, queue, %{codel | delay: 0}, ts} + end + end + + defp go(delay, from, time, holder, queue, %{delay: min} = codel, ts) do + case Holder.handle_checkout(holder, from, queue, 0) do + true when delay < min -> + {:busy, queue, %{codel | delay: delay}, ts} + + true -> + {:busy, queue, codel, ts} + + false -> + dequeue(time, holder, queue, codel, ts) + end + end + + defp drop(delay, from) do + message = """ + connection not available and request was dropped from queue after #{delay}ms. \ + This means requests are coming in and your connection pool cannot serve them fast enough. \ + You can address this by: + + 1. Ensuring your database is available and that you can connect to it + 2. Tracking down slow queries and making sure they are running fast enough + 3. Increasing the pool_size (although this increases resource consumption) + 4. Allowing requests to wait longer by increasing :queue_target and :queue_interval + + See DBConnection.start_link/2 for more information + """ + + err = DBConnection.ConnectionError.exception(message, :queue_timeout) + + Holder.reply_error(from, err) + end + + defp start_opts(opts) do + Keyword.take(opts, [:name, :spawn_opt]) + end + + defp start_poll(now, last_sent, %{interval: interval} = codel) do + timeout = now + interval + poll = :erlang.start_timer(timeout, self(), {timeout, last_sent}, abs: true) + %{codel | poll: poll} + end + + defp start_idle(now_in_native, %{idle_interval: interval} = codel) do + timeout = System.convert_time_unit(now_in_native, :native, :millisecond) + interval + idle = :erlang.start_timer(timeout, self(), now_in_native, abs: true) + %{codel | idle: idle} + end +end diff --git a/deps/db_connection/lib/db_connection/connection_pool/pool.ex b/deps/db_connection/lib/db_connection/connection_pool/pool.ex new file mode 100644 index 0000000..8990193 --- /dev/null +++ b/deps/db_connection/lib/db_connection/connection_pool/pool.ex @@ -0,0 +1,29 @@ +defmodule DBConnection.ConnectionPool.Pool do + @moduledoc false + use Supervisor, restart: :temporary + + def start_supervised(tag, mod, opts) do + DBConnection.Watcher.watch( + DBConnection.ConnectionPool.Supervisor, + {DBConnection.ConnectionPool.Pool, {self(), tag, mod, opts}} + ) + end + + def start_link(arg) do + Supervisor.start_link(__MODULE__, arg) + end + + def init({owner, tag, mod, opts}) do + size = Keyword.get(opts, :pool_size, 1) + children = for id <- 1..size, do: conn(owner, tag, id, mod, opts) + sup_opts = [strategy: :one_for_one] ++ Keyword.take(opts, [:max_restarts, :max_seconds]) + Supervisor.init(children, sup_opts) + end + + ## Helpers + + defp conn(owner, tag, id, mod, opts) do + child_opts = [id: {mod, owner, id}] ++ Keyword.take(opts, [:shutdown]) + DBConnection.Connection.child_spec(mod, [pool_index: id] ++ opts, owner, tag, child_opts) + end +end diff --git a/deps/db_connection/lib/db_connection/holder.ex b/deps/db_connection/lib/db_connection/holder.ex new file mode 100644 index 0000000..b27a202 --- /dev/null +++ b/deps/db_connection/lib/db_connection/holder.ex @@ -0,0 +1,441 @@ +defmodule DBConnection.Holder do + @moduledoc false + require Record + + @queue true + @timeout 15000 + @time_unit 1000 + + Record.defrecord(:conn, [:connection, :module, :state, :lock, :ts, deadline: nil, status: :ok]) + Record.defrecord(:pool_ref, [:pool, :reference, :deadline, :holder, :lock]) + + @type t :: :ets.tid() + @type checkin_time :: non_neg_integer() | nil + + ## Holder API + + @spec new(pid, reference, module, term) :: t + def new(pool, ref, mod, state) do + # Insert before setting heir so that pool can't receive empty table + holder = :ets.new(__MODULE__, [:public, :ordered_set]) + + conn = conn(connection: self(), module: mod, state: state, ts: System.monotonic_time()) + true = :ets.insert_new(holder, conn) + + :ets.setopts(holder, {:heir, pool, ref}) + holder + end + + @spec update(pid, reference, module, term) :: {:ok, t} | :error + def update(pool, ref, mod, state) do + holder = new(pool, ref, mod, state) + + try do + :ets.give_away(holder, pool, {:checkin, ref, System.monotonic_time()}) + {:ok, holder} + rescue + ArgumentError -> :error + end + end + + @spec delete(t) :: {module, term} + def delete(holder) do + [conn(module: module, state: state)] = :ets.lookup(holder, :conn) + :ets.delete(holder) + {module, state} + end + + ## Pool API (invoked by caller) + + @callback checkout(pool :: GenServer.server(), [pid], opts :: Keyword.t()) :: + {:ok, pool_ref :: any, module, checkin_time, state :: any} + | {:error, Exception.t()} + def checkout(pool, callers, opts) do + queue? = Keyword.get(opts, :queue, @queue) + now = System.monotonic_time(@time_unit) + timeout = abs_timeout(now, opts) + + case checkout(pool, callers, queue?, now, timeout) do + {:ok, _, _, _, _} = ok -> + ok + + {:error, %DBConnection.ConnectionError{} = connection_error} = error -> + :telemetry.execute( + [:db_connection, :connection_error], + %{count: 1}, + %{ + error: connection_error, + opts: opts + } + ) + + error + + {:error, _} = error -> + error + + {:redirect, caller, proxy} -> + case checkout(proxy, [caller], opts) do + {:ok, _, _, _, _} = ok -> + ok + + {:error, %DBConnection.ConnectionError{message: message} = exception} -> + {:error, + %{ + exception + | message: + "could not checkout the connection owned by #{inspect(caller)}. " <> + "When using the sandbox, connections are shared, so this may imply " <> + "another process is using a connection. Reason: #{message}" + }} + + {:error, _} = error -> + error + end + + {:exit, reason} -> + exit({reason, {__MODULE__, :checkout, [pool, opts]}}) + end + end + + @spec checkin(pool_ref :: any) :: :ok + def checkin(pool_ref) do + # Note we may call checkin after a disconnect/stop. For this reason, we choose + # to not change the status on checkin but strictly speaking nobody can access + # the holder after disconnect/stop unless they store a copy of %DBConnection{}. + # Note status can't be :aborted as aborted is always reverted at the end of a + # transaction. + done(pool_ref, [{conn(:lock) + 1, nil}], :checkin, System.monotonic_time()) + end + + @spec disconnect(pool_ref :: any, err :: Exception.t()) :: :ok + def disconnect(pool_ref, err) do + done(pool_ref, [{conn(:status) + 1, :error}], :disconnect, err) + end + + @spec stop(pool_ref :: any, err :: Exception.t()) :: :ok + def stop(pool_ref, err) do + done(pool_ref, [{conn(:status) + 1, :error}], :stop, err) + end + + @spec handle(pool_ref :: any, fun :: atom, args :: [term], Keyword.t()) :: tuple + def handle(pool_ref, fun, args, opts) do + handle_or_cleanup(:handle, pool_ref, fun, args, opts) + end + + @spec cleanup(pool_ref :: any, fun :: atom, args :: [term], Keyword.t()) :: tuple + def cleanup(pool_ref, fun, args, opts) do + handle_or_cleanup(:cleanup, pool_ref, fun, args, opts) + end + + defp handle_or_cleanup(type, pool_ref, fun, args, opts) do + pool_ref(holder: holder, lock: lock) = pool_ref + + try do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + msg = "connection is closed because of an error, disconnect or timeout" + {:disconnect, DBConnection.ConnectionError.exception(msg), _state = :unused} + else + [conn(lock: conn_lock)] when conn_lock != lock -> + raise "an outdated connection has been given to DBConnection on #{fun}/#{length(args) + 2}" + + [conn(status: :error)] -> + msg = "connection is closed because of an error, disconnect or timeout" + {:disconnect, DBConnection.ConnectionError.exception(msg), _state = :unused} + + [conn(status: :aborted)] when type != :cleanup -> + msg = "transaction rolling back" + {:disconnect, DBConnection.ConnectionError.exception(msg), _state = :unused} + + [conn(module: module, state: state)] -> + holder_apply(holder, module, fun, args ++ [opts, state]) + end + end + + ## Pool state helpers API (invoked by callers) + + @spec put_state(pool_ref :: any, term) :: :ok + def put_state(pool_ref(holder: sink_holder), state) do + :ets.update_element(sink_holder, :conn, [{conn(:state) + 1, state}]) + :ok + end + + @spec status?(pool_ref :: any, :ok | :aborted) :: boolean() + def status?(pool_ref(holder: holder), status) do + try do + :ets.lookup_element(holder, :conn, conn(:status) + 1) == status + rescue + ArgumentError -> false + end + end + + @spec put_status(pool_ref :: any, :ok | :aborted) :: boolean() + def put_status(pool_ref(holder: holder), status) do + try do + :ets.update_element(holder, :conn, [{conn(:status) + 1, status}]) + rescue + ArgumentError -> false + end + end + + ## Pool callbacks (invoked by pools) + + @spec reply_redirect({pid, reference}, pid | :shared | :auto, GenServer.server()) :: :ok + def reply_redirect(from, caller, redirect) do + GenServer.reply(from, {:redirect, caller, redirect}) + :ok + end + + @spec reply_error({pid, reference}, Exception.t()) :: :ok + def reply_error(from, exception) do + GenServer.reply(from, {:error, exception}) + :ok + end + + @spec handle_checkout(t, {pid, reference}, reference, checkin_time) :: boolean + def handle_checkout(holder, {pid, mref}, ref, checkin_time) do + :ets.give_away(holder, pid, {mref, ref, checkin_time}) + rescue + ArgumentError -> + if Process.alive?(pid) or :ets.info(holder, :owner) != self() do + raise ArgumentError, no_holder(holder, pid) + else + false + end + end + + @spec handle_deadline(t, reference) :: boolean + def handle_deadline(holder, deadline) do + :ets.lookup_element(holder, :conn, conn(:deadline) + 1) + rescue + ArgumentError -> false + else + ^deadline -> true + _ -> false + end + + @spec handle_ping(t) :: true + def handle_ping(holder) do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + raise ArgumentError, no_holder(holder, nil) + else + [conn(connection: conn, state: state)] -> + DBConnection.Connection.ping({conn, holder}, state) + :ets.delete(holder) + true + end + + @spec handle_disconnect(t, Exception.t()) :: boolean + def handle_disconnect(holder, err) do + handle_done(holder, &DBConnection.Connection.disconnect/3, err) + end + + @spec handle_stop(t, term) :: boolean + def handle_stop(holder, err) do + handle_done(holder, &DBConnection.Connection.stop/3, err) + end + + @spec maybe_disconnect(t, integer, non_neg_integer) :: boolean() + def maybe_disconnect(holder, start, interval) do + ts = :ets.lookup_element(holder, :conn, conn(:ts) + 1) + + cond do + ts >= start -> + false + + interval == 0 -> + true + + true -> + pid = :ets.lookup_element(holder, :conn, conn(:connection) + 1) + System.monotonic_time() > :erlang.phash2(pid, interval) + start + end + rescue + _ -> false + else + true -> + opts = [message: "disconnect_all requested", severity: :info] + handle_disconnect(holder, DBConnection.ConnectionError.exception(opts)) + + false -> + false + end + + ## Private + + defp checkout(pool, callers, queue?, start, timeout) do + case GenServer.whereis(pool) do + pid when node(pid) == node() -> + checkout_call(pid, callers, queue?, start, timeout) + + pid when node(pid) != node() -> + {:exit, {:badnode, node(pid)}} + + {_name, node} -> + {:exit, {:badnode, node}} + + nil -> + {:exit, :noproc} + end + end + + defp checkout_call(pid, callers, queue?, start, timeout) do + lock = Process.monitor(pid) + send(pid, {:db_connection, {self(), lock}, {:checkout, callers, start, queue?}}) + + receive do + {:"ETS-TRANSFER", holder, pool, {^lock, ref, checkin_time}} -> + Process.demonitor(lock, [:flush]) + {deadline, ops} = start_deadline(timeout, pool, ref, holder, start) + :ets.update_element(holder, :conn, [{conn(:lock) + 1, lock} | ops]) + + pool_ref = + pool_ref(pool: pool, reference: ref, deadline: deadline, holder: holder, lock: lock) + + checkout_result(holder, pool_ref, checkin_time) + + {^lock, reply} -> + Process.demonitor(lock, [:flush]) + reply + + {:DOWN, ^lock, _, _, reason} -> + {:exit, reason} + end + end + + defp checkout_result(holder, pool_ref, checkin_time) do + try do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + # Deadline could hit and be handled pool before using connection + msg = "connection not available because deadline reached while in queue" + {:error, DBConnection.ConnectionError.exception(msg)} + else + [conn(module: mod, state: state)] -> + {:ok, pool_ref, mod, checkin_time, state} + end + end + + defp no_holder(holder, maybe_pid) do + reason = + case :ets.info(holder, :owner) do + :undefined -> "does not exist" + ^maybe_pid -> "is being given to its current owner" + owner when owner != self() -> "does not belong to the giving process" + _ -> "could not be given away" + end + + call_reason = + if maybe_pid do + "Error happened when attempting to transfer to #{inspect(maybe_pid)} " <> + "(alive: #{Process.alive?(maybe_pid)})" + else + "Error happened when looking up connection" + end + + """ + #{inspect(__MODULE__)} #{inspect(holder)} #{reason}, pool inconsistent. + #{call_reason}. + + SELF: #{inspect(self())} + ETS INFO: #{inspect(:ets.info(holder))} + + Please report at https://github.com/elixir-ecto/db_connection/issues" + """ + end + + defp holder_apply(holder, module, fun, args) do + try do + apply(module, fun, args) + catch + kind, reason -> + {:catch, kind, reason, __STACKTRACE__} + else + result when is_tuple(result) -> + state = :erlang.element(:erlang.tuple_size(result), result) + + try do + :ets.update_element(holder, :conn, {conn(:state) + 1, state}) + result + rescue + ArgumentError -> + augment_disconnect(result) + end + + # If it is not a tuple, we just return it as is so we raise bad return. + result -> + result + end + end + + defp augment_disconnect({:disconnect, %DBConnection.ConnectionError{} = err, state}) do + %{message: message} = err + + message = + message <> + " (the connection was closed by the pool, " <> + "possibly due to a timeout or because the pool has been terminated)" + + {:disconnect, %{err | message: message}, state} + end + + defp augment_disconnect(result), do: result + + defp done(pool_ref, ops, tag, info) do + pool_ref(pool: pool, reference: ref, deadline: deadline, holder: holder) = pool_ref + cancel_deadline(deadline) + + try do + :ets.update_element(holder, :conn, [{conn(:deadline) + 1, nil} | ops]) + :ets.give_away(holder, pool, {tag, ref, info}) + rescue + ArgumentError -> :ok + else + true -> :ok + end + end + + defp handle_done(holder, stop, err) do + :ets.lookup(holder, :conn) + rescue + ArgumentError -> + false + else + [conn(connection: pid, deadline: deadline, state: state)] -> + cancel_deadline(deadline) + :ets.delete(holder) + stop.({pid, holder}, err, state) + true + end + + defp abs_timeout(now, opts) do + case Keyword.get(opts, :timeout, @timeout) do + :infinity -> Keyword.get(opts, :deadline) + timeout -> min(now + timeout, Keyword.get(opts, :deadline)) + end + end + + defp start_deadline(nil, _, _, _, _) do + {nil, []} + end + + defp start_deadline(timeout, pid, ref, holder, start) do + deadline = + :erlang.start_timer(timeout, pid, {ref, holder, self(), timeout - start}, abs: true) + + {deadline, [{conn(:deadline) + 1, deadline}]} + end + + defp cancel_deadline(nil) do + :ok + end + + defp cancel_deadline(deadline) do + :erlang.cancel_timer(deadline, async: true, info: false) + end +end diff --git a/deps/db_connection/lib/db_connection/log_entry.ex b/deps/db_connection/lib/db_connection/log_entry.ex new file mode 100644 index 0000000..6e2c39b --- /dev/null +++ b/deps/db_connection/lib/db_connection/log_entry.ex @@ -0,0 +1,81 @@ +defmodule DBConnection.LogEntry do + @moduledoc """ + Struct containing log entry information. + """ + + defstruct [ + :call, + :query, + :params, + :result, + :pool_time, + :connection_time, + :decode_time, + :idle_time + ] + + @typedoc """ + Log entry information. + + * `:call` - The `DBConnection` function called + * `:query` - The query used by the function + * `:params` - The params passed to the function (if any) + * `:result` - The result of the call + * `:pool_time` - The length of time awaiting a connection from the pool (if + the connection was not already checked out) + * `:connection_time` - The length of time using the connection (if a + connection was used) + * `:decode_time` - The length of time decoding the result (if decoded the + result using `DBConnection.Query.decode/3`) + * `:idle_time` - The amount of time the connection was idle before use + + All times are in the native time units of the VM, see + `System.monotonic_time/0`. + """ + @type t :: %__MODULE__{ + call: atom, + query: any, + params: any, + result: {:ok, any} | {:ok, any, any} | {:error, Exception.t()}, + pool_time: non_neg_integer | nil, + connection_time: non_neg_integer | nil, + idle_time: non_neg_integer | nil, + decode_time: non_neg_integer | nil + } + + @doc false + def new(call, query, params, times, result) do + entry = %__MODULE__{call: call, query: query, params: params, result: result} + parse_times(times, entry) + end + + ## Helpers + + defp parse_times([], entry), do: entry + + defp parse_times(times, entry) do + stop = :erlang.monotonic_time() + {_, entry} = Enum.reduce(times, {stop, entry}, &parse_time/2) + entry + end + + defp parse_time({:decode, start}, {stop, entry}) do + {start, %{entry | decode_time: stop - start}} + end + + defp parse_time({:checkout, start}, {stop, entry}) do + {start, %{entry | pool_time: stop - start}} + end + + defp parse_time({:checkin, start}, {stop, entry}) do + # The checkin time was most likely before checkout but it is + # not guaranteed as they are tracked by different processes. + # There should be no further measurements after checkin. + {stop, %{entry | idle_time: max(stop - start, 0)}} + end + + defp parse_time({_, start}, {stop, entry}) do + %{connection_time: connection_time} = entry + {start, %{entry | connection_time: (connection_time || 0) + (stop - start)}} + end +end diff --git a/deps/db_connection/lib/db_connection/ownership.ex b/deps/db_connection/lib/db_connection/ownership.ex new file mode 100644 index 0000000..bf35a52 --- /dev/null +++ b/deps/db_connection/lib/db_connection/ownership.ex @@ -0,0 +1,139 @@ +defmodule DBConnection.OwnershipError do + defexception [:message] + + def exception(message), do: %DBConnection.OwnershipError{message: message} +end + +defmodule DBConnection.Ownership do + @moduledoc """ + A DBConnection pool that requires explicit checkout and checkin + as a mechanism to coordinate between processes. + + ## Options + + * `:ownership_mode` - When mode is `:manual`, all connections must + be explicitly checked out before by using `ownership_checkout/2`. + Otherwise, mode is `:auto` and connections are checked out + implicitly. `{:shared, owner}` mode is also supported so + processes are allowed on demand. On all cases, checkins are + explicit via `ownership_checkin/2`. Defaults to `:auto`. + * `:ownership_timeout` - The maximum time that a process is allowed to own + a connection, default `120_000`. This timeout exists mostly for sanity + checking purposes and can be increased at will, since DBConnection + automatically checks in connections whenever there is a mode change. + * `:ownership_log` - The `Logger.level` to log ownership changes, or `nil` + not to log, default `nil`. + + There are also two experimental options, `:post_checkout` and `:pre_checkin` + which allows a developer to configure what happens when a connection is + checked out and checked in. Those options are meant to be used during tests, + and have the following behaviour: + + * `:post_checkout` - it must be an anonymous function that receives the + connection module, the connection state and it must return either + `{:ok, connection_module, connection_state}` or + `{:disconnect, err, connection_module, connection_state}`. This allows + the developer to change the connection module on post checkout. However, + in case of disconnects, the return `connection_module` must be the same + as the `connection_module` given. Defaults to simply returning the given + connection module and state. + + * `:pre_checkin` - it must be an anonymous function that receives the + checkin reason (`:checkin`, `{:disconnect, err}` or `{:stop, err}`), + the connection module and the connection state returned by `post_checkout`. + It must return either `{:ok, connection_module, connection_state}` or + `{:disconnect, err, connection_module, connection_state}` where the connection + module is the module given to `:post_checkout` Defaults to simply returning + the given connection module and state. + + ## Callers lookup + + When checking out, the ownership pool first looks if there is a connection + assigned to the current process and then checks if there is a connection + assigned to any of the processes listed under the `$callers` process + dictionary entry. The `$callers` entry is set by default for tasks from + Elixir v1.8. + + You can also pass the `:caller` option on checkout with a pid and that + pid will be looked up first, instead of `self()`, and then we fall back + to `$callers`. + """ + + alias DBConnection.Ownership.Manager + alias DBConnection.Holder + + @doc false + defdelegate child_spec(args), to: Manager + + @doc false + defdelegate disconnect_all(pool, interval, opts), to: Manager + + @doc false + def checkout(pool, callers, opts) do + case Manager.proxy_for(callers, opts) do + {caller, pool} -> Holder.checkout(pool, [caller], opts) + nil -> Holder.checkout(pool, callers, opts) + end + end + + @doc """ + Explicitly checks a connection out from the ownership manager. + + It may return `:ok` if the connection is checked out. + `{:already, :owner | :allowed}` if the caller process already + has a connection, `:error` if it could be not checked out or + raise if there was an error. + """ + @spec ownership_checkout(GenServer.server(), Keyword.t()) :: + :ok | {:already, :owner | :allowed} + def ownership_checkout(manager, opts) do + with {:ok, pid} <- Manager.checkout(manager, opts) do + case Holder.checkout(pid, [self()], opts) do + {:ok, pool_ref, _module, _idle_time, _state} -> + Holder.checkin(pool_ref) + + {:error, err} -> + raise err + end + end + end + + @doc """ + Changes the ownership mode. + + `mode` may be `:auto`, `:manual` or `{:shared, owner}`. + + The operation will always succeed when setting the mode to + `:auto` or `:manual`. It may fail with reason `:not_owner` + or `:not_found` when setting `{:shared, pid}` and the + given pid does not own any connection. May return + `:already_shared` if another process set the ownership + mode to `{:shared, _}` and is still alive. + """ + @spec ownership_mode(GenServer.server(), :auto | :manual | {:shared, pid}, Keyword.t()) :: + :ok | :already_shared | :not_owner | :not_found + defdelegate ownership_mode(manager, mode, opts), to: Manager, as: :mode + + @doc """ + Checks a connection back in. + + A connection can only be checked back in by its owner. + """ + @spec ownership_checkin(GenServer.server(), Keyword.t()) :: + :ok | :not_owner | :not_found + defdelegate ownership_checkin(manager, opts), to: Manager, as: :checkin + + @doc """ + Allows the process given by `allow` to use the connection checked out + by `owner_or_allowed`. + + It may return `:ok` if the connection is checked out. + `{:already, :owner | :allowed}` if the `allow` process already + has a connection. `owner_or_allowed` may either be the owner or any + other allowed process. Returns `:not_found` if the given process + does not have any connection checked out. + """ + @spec ownership_allow(GenServer.server(), owner_or_allowed :: pid, allow :: pid, Keyword.t()) :: + :ok | {:already, :owner | :allowed} | :not_found + defdelegate ownership_allow(manager, owner, allow, opts), to: Manager, as: :allow +end diff --git a/deps/db_connection/lib/db_connection/ownership/manager.ex b/deps/db_connection/lib/db_connection/ownership/manager.ex new file mode 100644 index 0000000..1f0c2ec --- /dev/null +++ b/deps/db_connection/lib/db_connection/ownership/manager.ex @@ -0,0 +1,336 @@ +defmodule DBConnection.Ownership.Manager do + @moduledoc false + use GenServer + require Logger + alias DBConnection.Ownership.Proxy + + @timeout 5_000 + + @callback start_link({module, opts :: Keyword.t()}) :: + GenServer.on_start() + def start_link({module, opts}) do + {owner_opts, pool_opts} = Keyword.split(opts, [:name]) + GenServer.start_link(__MODULE__, {module, owner_opts, pool_opts}, owner_opts) + end + + @callback disconnect_all(GenServer.server(), non_neg_integer, Keyword.t()) :: :ok + def disconnect_all(pool, interval, opts) do + inner_pool = GenServer.call(pool, :pool, :infinity) + DBConnection.ConnectionPool.disconnect_all(inner_pool, interval, opts) + end + + @spec proxy_for(callers :: [pid], Keyword.t()) :: {caller :: pid, proxy :: pid} | nil + def proxy_for(callers, opts) do + case Keyword.fetch(opts, :name) do + {:ok, name} -> + Enum.find_value(callers, &List.first(:ets.lookup(name, &1))) + + :error -> + nil + end + end + + @spec checkout(GenServer.server(), Keyword.t()) :: + {:ok, pid} | {:already, :owner | :allowed} + def checkout(manager, opts) do + GenServer.call(manager, {:checkout, opts}, :infinity) + end + + @spec checkin(GenServer.server(), Keyword.t()) :: + :ok | :not_owner | :not_found + def checkin(manager, opts) do + timeout = Keyword.get(opts, :timeout, @timeout) + GenServer.call(manager, :checkin, timeout) + end + + @spec mode(GenServer.server(), :auto | :manual | {:shared, pid}, Keyword.t()) :: + :ok | :already_shared | :not_owner | :not_found + def mode(manager, mode, opts) + when mode in [:auto, :manual] + when elem(mode, 0) == :shared and is_pid(elem(mode, 1)) do + timeout = Keyword.get(opts, :timeout, @timeout) + GenServer.call(manager, {:mode, mode}, timeout) + end + + @spec allow(GenServer.server(), parent :: pid, allow :: pid, Keyword.t()) :: + :ok | {:already, :owner | :allowed} | :not_found + def allow(manager, parent, allow, opts) do + timeout = Keyword.get(opts, :timeout, @timeout) + GenServer.call(manager, {:allow, parent, allow}, timeout) + end + + ## Callbacks + + def init({module, owner_opts, pool_opts}) do + DBConnection.register_as_pool(module) + + ets = + case Keyword.fetch(owner_opts, :name) do + {:ok, name} when is_atom(name) -> + :ets.new(name, [:set, :named_table, :protected, read_concurrency: true]) + + _ -> + nil + end + + # We can only start the connection pool directly because + # neither the pool's GenServer nor the manager trap exits. + # Otherwise we would need a supervisor plus a watcher process. + pool_opts = Keyword.delete(pool_opts, :pool) + {:ok, pool} = DBConnection.start_link(module, pool_opts) + + log = Keyword.get(pool_opts, :ownership_log, nil) + mode = Keyword.get(pool_opts, :ownership_mode, :auto) + checkout_opts = Keyword.take(pool_opts, [:ownership_timeout, :queue_target, :queue_interval]) + + {:ok, + %{ + pool: pool, + checkouts: %{}, + owners: %{}, + checkout_opts: checkout_opts, + mode: mode, + mode_ref: nil, + ets: ets, + log: log + }} + end + + def handle_call(:pool, _from, %{pool: pool} = state) do + {:reply, pool, state} + end + + def handle_call({:mode, {:shared, shared}}, {caller, _}, %{mode: {:shared, current}} = state) do + cond do + shared == current -> + {:reply, :ok, state} + + Process.alive?(current) -> + {:reply, :already_shared, state} + + true -> + share_and_reply(state, shared, caller) + end + end + + def handle_call({:mode, {:shared, shared}}, {caller, _}, state) do + share_and_reply(state, shared, caller) + end + + def handle_call({:mode, mode}, _from, %{mode: mode} = state) do + {:reply, :ok, state} + end + + def handle_call({:mode, mode}, {caller, _}, state) do + state = proxy_checkin_all_except(state, [], caller) + {:reply, :ok, %{state | mode: mode, mode_ref: nil}} + end + + def handle_call(:checkin, {caller, _}, state) do + {reply, state} = proxy_checkin(state, caller, caller) + {:reply, reply, state} + end + + def handle_call({:allow, caller, allow}, _from, %{checkouts: checkouts} = state) do + if kind = already_checked_out(checkouts, allow) do + {:reply, {:already, kind}, state} + else + case Map.get(checkouts, caller, :not_found) do + {:owner, ref, proxy} -> + {:reply, :ok, owner_allow(state, allow, ref, proxy)} + + {:allowed, ref, proxy} -> + {:reply, :ok, owner_allow(state, allow, ref, proxy)} + + :not_found -> + {:reply, :not_found, state} + end + end + end + + def handle_call({:checkout, opts}, {caller, _}, %{checkouts: checkouts} = state) do + if kind = already_checked_out(checkouts, caller) do + {:reply, {:already, kind}, state} + else + {proxy, state} = proxy_checkout(state, caller, opts) + {:reply, {:ok, proxy}, state} + end + end + + def handle_info({:db_connection, from, {:checkout, callers, _now, queue?}}, state) do + %{checkouts: checkouts, mode: mode, checkout_opts: checkout_opts} = state + caller = find_caller(callers, checkouts, mode) + + case Map.get(checkouts, caller, :not_found) do + {status, _ref, proxy} when status in [:owner, :allowed] -> + DBConnection.Holder.reply_redirect(from, caller, proxy) + {:noreply, state} + + :not_found when mode == :auto -> + {proxy, state} = proxy_checkout(state, caller, [queue: queue?] ++ checkout_opts) + DBConnection.Holder.reply_redirect(from, caller, proxy) + {:noreply, state} + + :not_found when mode == :manual -> + not_found(from) + {:noreply, state} + + :not_found -> + {:shared, shared} = mode + {:owner, _ref, proxy} = Map.fetch!(checkouts, shared) + DBConnection.Holder.reply_redirect(from, shared, proxy) + {:noreply, state} + end + end + + def handle_info({:DOWN, ref, _, _, _}, state) do + {:noreply, state |> owner_down(ref) |> unshare(ref)} + end + + def handle_info(_msg, state) do + {:noreply, state} + end + + defp already_checked_out(checkouts, pid) do + case Map.get(checkouts, pid, :not_found) do + {:owner, _, _} -> :owner + {:allowed, _, _} -> :allowed + :not_found -> nil + end + end + + defp proxy_checkout(state, caller, opts) do + %{pool: pool, checkouts: checkouts, owners: owners, ets: ets, log: log} = state + + {:ok, proxy} = + DynamicSupervisor.start_child( + DBConnection.Ownership.Supervisor, + {DBConnection.Ownership.Proxy, {caller, pool, opts}} + ) + + log && Logger.log(log, fn -> [inspect(caller), " owns proxy " | inspect(proxy)] end) + ref = Process.monitor(proxy) + checkouts = Map.put(checkouts, caller, {:owner, ref, proxy}) + owners = Map.put(owners, ref, {proxy, caller, []}) + ets && :ets.insert(ets, {caller, proxy}) + {proxy, %{state | checkouts: checkouts, owners: owners}} + end + + defp proxy_checkin(state, maybe_owner, caller) do + case get_and_update_in(state.checkouts, &Map.pop(&1, maybe_owner, :not_found)) do + {{:owner, ref, proxy}, state} -> + Proxy.stop(proxy, caller) + {:ok, state |> owner_down(ref) |> unshare(ref)} + + {{:allowed, _, _}, _} -> + {:not_owner, state} + + {:not_found, _} -> + {:not_found, state} + end + end + + defp proxy_checkin_all_except(state, except, caller) do + Enum.reduce(state.checkouts, state, fn {pid, _}, state -> + if pid in except do + state + else + {_, state} = proxy_checkin(state, pid, caller) + state + end + end) + end + + defp owner_allow(%{ets: ets, log: log} = state, allow, ref, proxy) do + log && Logger.log(log, fn -> [inspect(allow), " allowed on proxy " | inspect(proxy)] end) + state = put_in(state.checkouts[allow], {:allowed, ref, proxy}) + + state = + update_in(state.owners[ref], fn {proxy, caller, allowed} -> + {proxy, caller, [allow | List.delete(allowed, allow)]} + end) + + ets && :ets.insert(ets, {allow, proxy}) + state + end + + defp owner_down(%{ets: ets, log: log} = state, ref) do + case get_and_update_in(state.owners, &Map.pop(&1, ref)) do + {{proxy, caller, allowed}, state} -> + Process.demonitor(ref, [:flush]) + entries = [caller | allowed] + + log && + Logger.log(log, fn -> + [Enum.map_join(entries, ", ", &inspect/1), " lose proxy " | inspect(proxy)] + end) + + ets && Enum.each(entries, &:ets.delete(ets, &1)) + update_in(state.checkouts, &Map.drop(&1, entries)) + + {nil, state} -> + state + end + end + + defp share_and_reply(%{checkouts: checkouts} = state, shared, caller) do + case Map.get(checkouts, shared, :not_found) do + {:owner, ref, _} -> + state = proxy_checkin_all_except(state, [shared], caller) + {:reply, :ok, %{state | mode: {:shared, shared}, mode_ref: ref}} + + {:allowed, _, _} -> + {:reply, :not_owner, state} + + :not_found -> + {:reply, :not_found, state} + end + end + + defp unshare(%{mode_ref: ref} = state, ref) do + %{state | mode: :manual, mode_ref: nil} + end + + defp unshare(state, _ref) do + state + end + + defp find_caller(callers, checkouts, :manual) do + Enum.find(callers, &Map.has_key?(checkouts, &1)) || hd(callers) + end + + defp find_caller([caller | _], _checkouts, _mode) do + caller + end + + defp not_found({pid, _} = from) do + msg = """ + cannot find ownership process for #{inspect(pid)}. + + When using ownership, you must manage connections in one + of the four ways: + + * By explicitly checking out a connection + * By explicitly allowing a spawned process + * By running the pool in shared mode + * By using :caller option with allowed process + + The first two options require every new process to explicitly + check a connection out or be allowed by calling checkout or + allow respectively. + + The third option requires a {:shared, pid} mode to be set. + If using shared mode in tests, make sure your tests are not + async. + + The fourth option requires [caller: pid] to be used when + checking out a connection from the pool. The caller process + should already be allowed on a connection. + + If you are reading this error, it means you have not done one + of the steps above or that the owner process has crashed. + """ + + DBConnection.Holder.reply_error(from, DBConnection.OwnershipError.exception(msg)) + end +end diff --git a/deps/db_connection/lib/db_connection/ownership/proxy.ex b/deps/db_connection/lib/db_connection/ownership/proxy.ex new file mode 100644 index 0000000..7ec3486 --- /dev/null +++ b/deps/db_connection/lib/db_connection/ownership/proxy.ex @@ -0,0 +1,306 @@ +defmodule DBConnection.Ownership.Proxy do + @moduledoc false + + alias DBConnection.Holder + use GenServer, restart: :temporary + + @time_unit 1000 + @ownership_timeout 120_000 + @queue_target 50 + @queue_interval 1000 + + def start_link({caller, pool, pool_opts}) do + GenServer.start_link(__MODULE__, {caller, pool, pool_opts}, []) + end + + def stop(proxy, caller) do + GenServer.cast(proxy, {:stop, caller}) + end + + # Callbacks + + def init({caller, pool, pool_opts}) do + pool_opts = + pool_opts + |> Keyword.put(:timeout, :infinity) + |> Keyword.delete(:deadline) + + owner_ref = Process.monitor(caller) + ownership_timeout = Keyword.get(pool_opts, :ownership_timeout, @ownership_timeout) + timeout = Keyword.get(pool_opts, :queue_target, @queue_target) * 2 + interval = Keyword.get(pool_opts, :queue_interval, @queue_interval) + + pre_checkin = Keyword.get(pool_opts, :pre_checkin, fn _, mod, state -> {:ok, mod, state} end) + post_checkout = Keyword.get(pool_opts, :post_checkout, &{:ok, &1, &2}) + + state = %{ + client: nil, + timer: nil, + holder: nil, + timeout: timeout, + interval: interval, + poll: nil, + owner: {caller, owner_ref}, + pool: pool, + pool_ref: nil, + pool_opts: pool_opts, + queue: :queue.new(), + mod: nil, + pre_checkin: pre_checkin, + post_checkout: post_checkout, + ownership_timer: start_timer(caller, ownership_timeout) + } + + now = System.monotonic_time(@time_unit) + {:ok, start_poll(now, state)} + end + + def handle_info({:DOWN, ref, _, pid, _reason}, %{owner: {_, ref}} = state) do + down("owner #{inspect(pid)} exited", state) + end + + def handle_info({:timeout, deadline, {_ref, holder, pid, len}}, %{holder: holder} = state) do + if Holder.handle_deadline(holder, deadline) do + message = + "client #{inspect(pid)} timed out because " <> + "it queued and checked out the connection for longer than #{len}ms" + + down(message, state) + else + {:noreply, state} + end + end + + def handle_info( + {:timeout, timer, {__MODULE__, pid, timeout}}, + %{ownership_timer: timer} = state + ) do + message = + "owner #{inspect(pid)} timed out because " <> + "it owned the connection for longer than #{timeout}ms (set via the :ownership_timeout option)" + + # We don't invoke down because this is always a disconnect, even if there is no client. + # On the other hand, those timeouts are unlikely to trigger, as it defaults to 2 mins. + pool_disconnect(DBConnection.ConnectionError.exception(message), state) + end + + def handle_info({:timeout, poll, time}, %{poll: poll} = state) do + state = timeout(time, state) + {:noreply, start_poll(time, state)} + end + + def handle_info( + {:db_connection, from, {:checkout, _caller, _now, _queue?}}, + %{holder: nil} = state + ) do + %{pool: pool, pool_opts: pool_opts, owner: {_, owner_ref}, post_checkout: post_checkout} = + state + + case Holder.checkout(pool, [self()], pool_opts) do + {:ok, pool_ref, original_mod, _idle_time, conn_state} -> + case post_checkout.(original_mod, conn_state) do + {:ok, conn_mod, conn_state} -> + holder = Holder.new(self(), owner_ref, conn_mod, conn_state) + state = %{state | pool_ref: pool_ref, holder: holder, mod: original_mod} + checkout(from, state) + + {:disconnect, err, ^original_mod, _conn_state} -> + Holder.disconnect(pool_ref, err) + Holder.reply_error(from, err) + {:stop, {:shutdown, err}, state} + end + + {:error, err} -> + Holder.reply_error(from, err) + {:stop, {:shutdown, err}, state} + end + end + + def handle_info( + {:db_connection, from, {:checkout, _caller, _now, _queue?}}, + %{client: nil} = state + ) do + checkout(from, state) + end + + def handle_info({:db_connection, from, {:checkout, _caller, now, queue?}}, state) do + if queue? do + %{queue: queue} = state + queue = :queue.in({now, from}, queue) + {:noreply, %{state | queue: queue}} + else + message = "connection not available and queuing is disabled" + err = DBConnection.ConnectionError.exception(message) + Holder.reply_error(from, err) + {:noreply, state} + end + end + + def handle_info( + {:"ETS-TRANSFER", holder, _, {msg, ref, extra}}, + %{holder: holder, client: {_, ref, _}} = state + ) do + case msg do + :checkin -> checkin(state) + :disconnect -> pool_disconnect(extra, state) + :stop -> pool_stop(extra, state) + end + end + + def handle_info({:"ETS-TRANSFER", holder, pid, ref}, %{holder: holder, owner: {_, ref}} = state) do + down("client #{inspect(pid)} exited", state) + end + + def handle_cast({:stop, caller}, %{owner: {owner, _}} = state) do + message = "#{inspect(caller)} checked in the connection owned by #{inspect(owner)}" + + message = + case pruned_stacktrace(caller) do + [] -> + message + + current_stack -> + message <> + "\n\n#{inspect(caller)} triggered the checkin at location:\n\n" <> + Exception.format_stacktrace(current_stack) + end + + down(message, state) + end + + defp checkout({pid, ref} = from, %{holder: holder} = state) do + if Holder.handle_checkout(holder, from, ref, nil) do + {:noreply, %{state | client: {pid, ref, pruned_stacktrace(pid)}}} + else + next(state) + end + end + + defp checkin(state) do + next(%{state | client: nil}) + end + + defp next(%{queue: queue} = state) do + case :queue.out(queue) do + {{:value, {_, from}}, queue} -> + checkout(from, %{state | queue: queue}) + + {:empty, queue} -> + {:noreply, %{state | queue: queue}} + end + end + + defp start_timer(_, :infinity), do: nil + + defp start_timer(pid, timeout) do + :erlang.start_timer(timeout, self(), {__MODULE__, pid, timeout}) + end + + # It is down but never checked out from pool + defp down(reason, %{holder: nil} = state) do + {:stop, {:shutdown, reason}, state} + end + + # If it is down but it has no client, checkin + defp down(reason, %{client: nil} = state) do + pool_checkin(reason, state) + end + + # If it is down but it has a client, disconnect + defp down(reason, %{client: {client, _, checkout_stack}} = state) do + reason = + case pruned_stacktrace(client) do + [] -> + reason + + current_stack -> + reason <> + """ + \n\nClient #{inspect(client)} is still using a connection from owner at location: + + #{Exception.format_stacktrace(current_stack)} + The connection itself was checked out by #{inspect(client)} at location: + + #{Exception.format_stacktrace(checkout_stack)} + """ + end + + err = DBConnection.ConnectionError.exception(reason) + pool_disconnect(err, state) + end + + ## Helpers + + defp pool_checkin(reason, state) do + pool_done(reason, state, :checkin, fn pool_ref, _ -> Holder.checkin(pool_ref) end) + end + + defp pool_disconnect(err, state) do + pool_done(err, state, {:disconnect, err}, &Holder.disconnect/2) + end + + defp pool_stop(err, state) do + pool_done(err, state, {:stop, err}, &Holder.stop/2, &Holder.stop/2) + end + + defp pool_done(err, state, op, done, stop_or_disconnect \\ &Holder.disconnect/2) do + %{holder: holder, pool_ref: pool_ref, pre_checkin: pre_checkin, mod: original_mod} = state + + if holder do + {conn_mod, conn_state} = Holder.delete(holder) + + case pre_checkin.(op, conn_mod, conn_state) do + {:ok, ^original_mod, conn_state} -> + Holder.put_state(pool_ref, conn_state) + done.(pool_ref, err) + {:stop, {:shutdown, err}, state} + + {:disconnect, err, ^original_mod, conn_state} -> + Holder.put_state(pool_ref, conn_state) + stop_or_disconnect.(pool_ref, err) + {:stop, {:shutdown, err}, state} + end + else + {:stop, {:shutdown, err}, state} + end + end + + defp start_poll(now, %{interval: interval} = state) do + timeout = now + interval + poll = :erlang.start_timer(timeout, self(), timeout, abs: true) + %{state | poll: poll} + end + + defp timeout(time, %{queue: queue, timeout: timeout} = state) do + case :queue.out(queue) do + {{:value, {sent, from}}, queue} when sent + timeout < time -> + drop(time - sent, from) + timeout(time, %{state | queue: queue}) + + {_, _} -> + state + end + end + + defp drop(delay, from) do + message = + "connection not available and request was dropped from queue after #{delay}ms. " <> + "You can configure how long requests wait in the queue using :queue_target and " <> + ":queue_interval. See DBConnection.start_link/2 for more information" + + err = DBConnection.ConnectionError.exception(message, :queue_timeout) + Holder.reply_error(from, err) + end + + @prune_modules [:gen, GenServer, DBConnection, DBConnection.Holder, DBConnection.Ownership] + + defp pruned_stacktrace(pid) do + case Process.info(pid, :current_stacktrace) do + {:current_stacktrace, stacktrace} -> + Enum.drop_while(stacktrace, &match?({mod, _, _, _} when mod in @prune_modules, &1)) + + _ -> + [] + end + end +end diff --git a/deps/db_connection/lib/db_connection/query.ex b/deps/db_connection/lib/db_connection/query.ex new file mode 100644 index 0000000..734cee7 --- /dev/null +++ b/deps/db_connection/lib/db_connection/query.ex @@ -0,0 +1,57 @@ +defprotocol DBConnection.Query do + @moduledoc """ + The `DBConnection.Query` protocol is responsible for preparing and + encoding queries. + + All `DBConnection.Query` functions are executed in the caller process which + means it's safe to, for example, raise exceptions or do blocking calls as + they won't affect the connection process. + """ + + @doc """ + Parse a query. + + This function is called to parse a query term before it is prepared using a + connection callback module. + + See `DBConnection.prepare/3`. + """ + @spec parse(any, Keyword.t()) :: any + def parse(query, opts) + + @doc """ + Describe a query. + + This function is called to describe a query after it is prepared using a + connection callback module. + + See `DBConnection.prepare/3`. + """ + @spec describe(any, Keyword.t()) :: any + def describe(query, opts) + + @doc """ + Encode parameters using a query. + + This function is called to encode a query before it is executed using a + connection callback module. + + If this function raises `DBConnection.EncodeError`, then the query is + prepared once again. + + See `DBConnection.execute/3`. + """ + @spec encode(any, any, Keyword.t()) :: any + def encode(query, params, opts) + + @doc """ + Decode a result using a query. + + This function is called to decode a result after it is returned by a + connection callback module. + + See `DBConnection.execute/3`. + """ + @spec decode(any, any, Keyword.t()) :: any + def decode(query, result, opts) +end diff --git a/deps/db_connection/lib/db_connection/task.ex b/deps/db_connection/lib/db_connection/task.ex new file mode 100644 index 0000000..60844c5 --- /dev/null +++ b/deps/db_connection/lib/db_connection/task.ex @@ -0,0 +1,47 @@ +defmodule DBConnection.Task do + @moduledoc false + @name __MODULE__ + + require DBConnection.Holder + + def run_child(mod, state, fun, opts) do + arg = [fun, self(), opts] + {:ok, pid} = Task.Supervisor.start_child(@name, __MODULE__, :init, arg) + ref = Process.monitor(pid) + _ = DBConnection.Holder.update(pid, ref, mod, state) + {pid, ref} + end + + def init(fun, parent, opts) do + try do + Process.link(parent) + catch + :error, :noproc -> + exit({:shutdown, :noproc}) + end + + receive do + {:"ETS-TRANSFER", holder, ^parent, {:checkin, ref, _extra}} -> + Process.unlink(parent) + pool_ref = DBConnection.Holder.pool_ref(pool: parent, reference: ref, holder: holder) + checkout = {:via, __MODULE__, pool_ref} + _ = DBConnection.run(checkout, make_fun(fun), [pool: __MODULE__] ++ opts) + exit(:normal) + end + end + + def checkout({:via, __MODULE__, pool_ref}, _callers, _opts) do + {:ok, pool_ref, _mod = :unused, _idle_time = nil, _state = :unused} + end + + defp make_fun(fun) when is_function(fun, 1) do + fun + end + + defp make_fun(mfargs) do + fn conn -> + {mod, fun, args} = mfargs + apply(mod, fun, [conn | args]) + end + end +end diff --git a/deps/db_connection/lib/db_connection/watcher.ex b/deps/db_connection/lib/db_connection/watcher.ex new file mode 100644 index 0000000..12a4e4c --- /dev/null +++ b/deps/db_connection/lib/db_connection/watcher.ex @@ -0,0 +1,61 @@ +defmodule DBConnection.Watcher do + @moduledoc false + @name __MODULE__ + + use GenServer + + def start_link(_) do + GenServer.start_link(__MODULE__, :ok, name: @name) + end + + def watch(supervisor, args) do + GenServer.call(@name, {:watch, supervisor, args}, :infinity) + end + + def init(:ok) do + Process.flag(:trap_exit, true) + {:ok, {%{}, %{}}} + end + + def handle_call({:watch, supervisor, args}, {caller_pid, _ref}, {caller_refs, started_refs}) do + case DynamicSupervisor.start_child(supervisor, args) do + {:ok, started_pid} -> + Process.link(caller_pid) + caller_ref = Process.monitor(caller_pid) + started_ref = Process.monitor(started_pid) + caller_refs = Map.put(caller_refs, caller_ref, {supervisor, started_pid, started_ref}) + started_refs = Map.put(started_refs, started_ref, {caller_pid, caller_ref}) + {:reply, {:ok, started_pid}, {caller_refs, started_refs}} + + other -> + {:reply, other, {caller_refs, started_refs}} + end + end + + def handle_info({:DOWN, ref, _, _, _}, {caller_refs, started_refs}) do + case caller_refs do + %{^ref => {supervisor, started_pid, started_ref}} -> + Process.demonitor(started_ref, [:flush]) + DynamicSupervisor.terminate_child(supervisor, started_pid) + {:noreply, {Map.delete(caller_refs, ref), Map.delete(started_refs, started_ref)}} + + %{} -> + %{^ref => {caller_pid, caller_ref}} = started_refs + Process.demonitor(caller_ref, [:flush]) + Process.exit(caller_pid, :kill) + {:noreply, {Map.delete(caller_refs, caller_ref), Map.delete(started_refs, ref)}} + end + end + + def handle_info({:EXIT, _, _}, state) do + {:noreply, state} + end + + def terminate(_, {_, started_refs}) do + for {_, {caller_pid, _}} <- started_refs do + Process.exit(caller_pid, :kill) + end + + :ok + end +end diff --git a/deps/db_connection/mix.exs b/deps/db_connection/mix.exs new file mode 100644 index 0000000..e74249e --- /dev/null +++ b/deps/db_connection/mix.exs @@ -0,0 +1,85 @@ +defmodule DBConnection.Mixfile do + use Mix.Project + + @source_url "https://github.com/elixir-ecto/db_connection" + @pools [:connection_pool, :ownership] + @version "2.4.2" + + def project do + [ + app: :db_connection, + version: @version, + elixir: "~> 1.7", + deps: deps(), + docs: docs(), + description: description(), + package: package(), + build_per_environment: false, + consolidate_protocols: false, + test_paths: test_paths(Mix.env()), + aliases: ["test.all": ["test", "test.pools"], "test.pools": &test_pools/1], + preferred_cli_env: ["test.all": :test] + ] + end + + def application do + [ + extra_applications: [:logger], + mod: {DBConnection.App, []} + ] + end + + defp deps do + [ + {:connection, "~> 1.0"}, + {:ex_doc, ">= 0.0.0", only: :dev, runtime: false}, + {:telemetry, "~> 0.4 or ~> 1.0"} + ] + end + + defp docs do + [ + source_url: @source_url, + source_ref: "v#{@version}", + main: DBConnection, + extras: ["CHANGELOG.md"] + ] + end + + defp description do + """ + Database connection behaviour for database transactions and connection pooling + """ + end + + defp package do + %{ + licenses: ["Apache-2.0"], + maintainers: ["James Fish"], + links: %{"GitHub" => @source_url} + } + end + + defp test_paths(pool) when pool in @pools, do: ["integration_test/#{pool}"] + defp test_paths(_), do: ["test"] + + defp test_pools(args) do + for env <- @pools, do: env_run(env, args) + end + + defp env_run(env, args) do + args = if IO.ANSI.enabled?(), do: ["--color" | args], else: ["--no-color" | args] + + IO.puts("==> Running tests for MIX_ENV=#{env} mix test") + + {_, res} = + System.cmd("mix", ["test" | args], + into: IO.binstream(:stdio, :line), + env: [{"MIX_ENV", to_string(env)}] + ) + + if res > 0 do + System.at_exit(fn _ -> exit({:shutdown, 1}) end) + end + end +end diff --git a/deps/decimal/.fetch b/deps/decimal/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/decimal/.formatter.exs b/deps/decimal/.formatter.exs new file mode 100644 index 0000000..ca8e354 --- /dev/null +++ b/deps/decimal/.formatter.exs @@ -0,0 +1,7 @@ +[ + inputs: [ + "*.exs", + "lib/**/*.ex", + "test/**/*.{ex,exs}" + ] +] diff --git a/deps/decimal/.hex b/deps/decimal/.hex new file mode 100644 index 0000000000000000000000000000000000000000..f311004bc38b45bb1801812dac9394022456de2b GIT binary patch literal 271 zcmZ9{JyHWP42EHo1VY78GX9aJRmV*juO+h^VsB=XFzLBC4khhZ=y~-sZKkc5?WFg# zN#<&dc@gtAx2Eeyo-}`5ZngIHJ<;G96&;PXL=niCQ;C$x0R$I-*!faMCw2g@%!4wq zA0vVnWD@gQQ%j$^UQYeqQ%CE3KZ@D4RP{mZMe|(Ksr$?A`ri+Lh{`!e(KI;gf)WYK yK!I#g%p-bs; 2.0"}] +end +``` + +After you are done, run `mix deps.get` in your shell to fetch and compile Decimal. Start an interactive Elixir shell with `iex -S mix`. + +```elixir +iex> alias Decimal, as: D +iex> D.add(6, 7) +#Decimal<13> +iex> D.div(1, 3) +#Decimal<0.333333333> +iex> D.new("0.33") +#Decimal<0.33> +``` + +## Examples + +### Using the context + +The context specifies the maximum precision of the result of calculations and +the rounding algorithm if the result has a higher precision than the specified +maximum. It also holds the list of set of trap enablers and the currently set +flags. + +The context is stored in the process dictionary, this means that you don't have +to pass the context around explicitly and the flags will be updated +automatically. + +The context is accessed with `Decimal.Context.get/0` and set with +`Decimal.Context.set/1`. It can also be temporarily set with +`Decimal.Context.with/2`. + +```elixir +iex> D.Context.get() +%Decimal.Context{flags: [:rounded, :inexact], precision: 9, rounding: :half_up, + traps: [:invalid_operation, :division_by_zero]} +iex> D.Context.with(%D.Context{precision: 2}, fn -> IO.inspect D.Context.get() end) +%Decimal.Context{flags: [], precision: 2, rounding: :half_up, + traps: [:invalid_operation, :division_by_zero]} +%Decimal.Context{flags: [], precision: 2, rounding: :half_up, + traps: [:invalid_operation, :division_by_zero]} +iex> D.Context.set(%D.Context{D.Context.get() | traps: []}) +:ok +iex> D.Context.get() +%Decimal.Context{flags: [:rounded, :inexact], precision: 9, rounding: :half_up, + traps: []} +``` + +### Precision and rounding + +The precision is used to limit the amount of decimal digits in the coefficient: + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | precision: 9}) +:ok +iex> D.div(100, 3) +#Decimal<33.3333333> +iex> D.Context.set(%D.Context{D.Context.get() | precision: 2}) +:ok +iex> D.div(100, 3) +#Decimal<33> +``` + +The rounding algorithm specifies how the result of an operation shall be rounded +when it get be represented with the current precision: + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | rounding: :half_up}) +:ok +iex> D.div(31, 2) +#Decimal<16> +iex> D.Context.set(%D.Context{D.Context.get() | rounding: :floor}) +:ok +iex> D.div(31, 2) +#Decimal<15> +``` + +### Comparisons + +Using compare operators (`<`, `=`, `>`) directly with two decimals may not return +the correct result. Instead use comparison functions. + +```elixir +iex> D.compare(-1, 0) +:lt +iex> D.compare(0, -1) +:gt +iex> D.compare(0, 0) +:eq + +iex> D.equal?(-1, 0) +false +iex> D.equal?(0, "0.0") +true +``` + +### Flags and trap enablers + +When an exceptional condition is signalled its flag is set in the context and if +if the trap enabler is set `Decimal.Error` will be raised. + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | rounding: :floor, precision: 2}) +:ok +iex> D.Context.get().traps +[:invalid_operation, :division_by_zero] +iex> D.Context.get().flags +[] +iex> D.div(31, 2) +#Decimal<15> +iex> D.Context.get().flags +[:inexact, :rounded] +``` + +`:inexact` and `:rounded` were signaled above because the result of the +operation was inexact given the context's precision and had to be rounded to fit +the precision. `Decimal.Error` was not raised because the signals' trap enablers +weren't set. We can, however, set the trap enabler if we what this condition to +raise. + +```elixir +iex> D.Context.set(%D.Context{D.Context.get() | traps: D.Context.get().traps ++ [:inexact]}) +:ok +iex> D.div(31, 2) +** (Decimal.Error) +``` + +The default trap enablers, such as `:division_by_zero` can be unset: + +```elixir +iex> D.Context.get().traps +[:invalid_operation, :division_by_zero] +iex> D.div(42, 0) +** (Decimal.Error) +iex> D.Context.set(%D.Context{D.Context.get() | traps: [], flags: []}) +:ok +iex> D.div(42, 0) +#Decimal +iex> D.Context.get().flags +[:division_by_zero] +``` + +### Mitigating rounding errors + +TODO + +## License + + Copyright 2013 Eric Meadows-Jรถnsson + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deps/decimal/hex_metadata.config b/deps/decimal/hex_metadata.config new file mode 100644 index 0000000..b0e1981 --- /dev/null +++ b/deps/decimal/hex_metadata.config @@ -0,0 +1,14 @@ +{<<"app">>,<<"decimal">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"Arbitrary precision decimal arithmetic.">>}. +{<<"elixir">>,<<"~> 1.2">>}. +{<<"files">>, + [<<"lib">>,<<"lib/decimal">>,<<"lib/decimal/error.ex">>, + <<"lib/decimal/context.ex">>,<<"lib/decimal/macros.ex">>, + <<"lib/decimal.ex">>,<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>, + <<"LICENSE.txt">>,<<"CHANGELOG.md">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/ericmj/decimal">>}]}. +{<<"name">>,<<"decimal">>}. +{<<"requirements">>,[]}. +{<<"version">>,<<"2.0.0">>}. diff --git a/deps/decimal/lib/decimal.ex b/deps/decimal/lib/decimal.ex new file mode 100644 index 0000000..10cf59a --- /dev/null +++ b/deps/decimal/lib/decimal.ex @@ -0,0 +1,1785 @@ +defmodule Decimal do + @moduledoc """ + Decimal arithmetic on arbitrary precision floating-point numbers. + + A number is represented by a signed coefficient and exponent such that: `sign + * coefficient * 10 ^ exponent`. All numbers are represented and calculated + exactly, but the result of an operation may be rounded depending on the + context the operation is performed with, see: `Decimal.Context`. Trailing + zeros in the coefficient are never truncated to preserve the number of + significant digits unless explicitly done so. + + There are also special values such as NaN (not a number) and ยฑInfinity. + -0 and +0 are two distinct values. + Some operation results are not defined and will return NaN. + This kind of NaN is quiet, any operation returning a number will return + NaN when given a quiet NaN (the NaN value will flow through all operations). + + Exceptional conditions are grouped into signals, each signal has a flag and a + trap enabler in the context. Whenever a signal is triggered it's flag is set + in the context and will be set until explicitly cleared. If the signal is trap + enabled `Decimal.Error` will be raised. + + ## Specifications + + * [IBM's General Decimal Arithmetic Specification](http://speleotrove.com/decimal/decarith.html) + * [IEEE standard 854-1987](http://web.archive.org/web/20150908012941/http://754r.ucbtest.org/standards/854.pdf) + + This library follows the above specifications for reference of arithmetic + operation implementations, but the public APIs may differ to provide a + more idiomatic Elixir interface. + + The specification models the sign of the number as 1, for a negative number, + and 0 for a positive number. Internally this implementation models the sign as + 1 or -1 such that the complete number will be `sign * coefficient * + 10 ^ exponent` and will refer to the sign in documentation as either *positive* + or *negative*. + + There is currently no maximum or minimum values for the exponent. Because of + that all numbers are "normal". This means that when an operation should, + according to the specification, return a number that "underflows" 0 is returned + instead of Etiny. This may happen when dividing a number with infinity. + Additionally, overflow, underflow and clamped may never be signalled. + """ + + import Bitwise + import Kernel, except: [abs: 1, div: 2, max: 2, min: 2, rem: 2, round: 1] + import Decimal.Macros + alias Decimal.Context + alias Decimal.Error + + @power_of_2_to_52 4_503_599_627_370_496 + + @typedoc """ + The coefficient of the power of `10`. Non-negative because the sign is stored separately in `sign`. + + * `non_neg_integer` - when the `t` represents a number, instead of one of the special values below. + * `:NaN` - Not a Number. + * `:inf` - Infinity. + + """ + @type coefficient :: non_neg_integer | :NaN | :inf + + @typedoc """ + The exponent to which `10` is raised. + """ + @type exponent :: integer + + @typedoc """ + + * `1` for positive + * `-1` for negative + + """ + @type sign :: 1 | -1 + + @type signal :: + :invalid_operation + | :division_by_zero + | :rounded + | :inexact + + @typedoc """ + Rounding algorithm. + + See `Decimal.Context` for more information. + """ + @type rounding :: + :down + | :half_up + | :half_even + | :ceiling + | :floor + | :half_down + | :up + + @typedoc """ + This implementation models the `sign` as `1` or `-1` such that the complete number will be: `sign * coef * 10 ^ exp`. + + * `coef` - the coefficient of the power of `10`. + * `exp` - the exponent of the power of `10`. + * `sign` - `1` for positive, `-1` for negative. + + """ + @type t :: %__MODULE__{ + sign: sign, + coef: coefficient, + exp: exponent + } + + @type decimal :: t | integer | String.t() + + defstruct sign: 1, coef: 0, exp: 0 + + defmacrop error(flags, reason, result, context \\ nil) do + quote bind_quoted: binding() do + case handle_error(flags, reason, result, context) do + {:ok, result} -> result + {:error, error} -> raise Error, error + end + end + end + + @doc """ + Returns `true` if number is NaN, otherwise `false`. + """ + @spec nan?(t) :: boolean + def nan?(%Decimal{coef: :NaN}), do: true + def nan?(%Decimal{}), do: false + + @doc """ + Returns `true` if number is ยฑInfinity, otherwise `false`. + """ + @spec inf?(t) :: boolean + def inf?(%Decimal{coef: :inf}), do: true + def inf?(%Decimal{}), do: false + + @doc """ + Returns `true` if argument is a decimal number, otherwise `false`. + + ## Examples + + iex> Decimal.is_decimal(Decimal.new(42)) + true + + iex> Decimal.is_decimal(42) + false + + Allowed in guard tests on OTP 21+. + """ + doc_since("1.9.0") + defmacro is_decimal(term) + + if function_exported?(:erlang, :is_map_key, 2) do + defmacro is_decimal(term) do + case __CALLER__.context do + nil -> + quote do + case unquote(term) do + %Decimal{} -> true + _ -> false + end + end + + :match -> + raise ArgumentError, + "invalid expression in match, is_decimal is not allowed in patterns " <> + "such as function clauses, case clauses or on the left side of the = operator" + + :guard -> + quote do + is_map(unquote(term)) and :erlang.is_map_key(:__struct__, unquote(term)) and + :erlang.map_get(:__struct__, unquote(term)) == Decimal + end + end + end + else + # TODO: remove when we require Elixir v1.10 + defmacro is_decimal(term) do + quote do + case unquote(term) do + %Decimal{} -> true + _ -> false + end + end + end + end + + @doc """ + The absolute value of given number. Sets the number's sign to positive. + """ + @spec abs(t) :: t + def abs(%Decimal{coef: :NaN} = num), do: %{num | sign: 1} + def abs(%Decimal{} = num), do: context(%{num | sign: 1}) + + @doc """ + Adds two numbers together. + + ## Exceptional conditions + + * If one number is -Infinity and the other +Infinity, `:invalid_operation` will + be signalled. + + ## Examples + + iex> Decimal.add(1, "1.1") + #Decimal<2.1> + + iex> Decimal.add(1, "Inf") + #Decimal + + """ + @spec add(decimal, decimal) :: t + def add(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def add(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def add(%Decimal{coef: :inf, sign: sign} = num1, %Decimal{coef: :inf, sign: sign} = num2) do + if num1.exp > num2.exp do + num1 + else + num2 + end + end + + def add(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "adding +Infinity and -Infinity", %Decimal{coef: :NaN}) + + def add(%Decimal{coef: :inf} = num1, %Decimal{}), do: num1 + + def add(%Decimal{}, %Decimal{coef: :inf} = num2), do: num2 + + def add(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + + {coef1, coef2} = add_align(coef1, exp1, coef2, exp2) + coef = sign1 * coef1 + sign2 * coef2 + exp = Kernel.min(exp1, exp2) + sign = add_sign(sign1, sign2, coef) + context(%Decimal{sign: sign, coef: Kernel.abs(coef), exp: exp}) + end + + def add(num1, num2), do: add(decimal(num1), decimal(num2)) + + @doc """ + Subtracts second number from the first. Equivalent to `Decimal.add/2` when the + second number's sign is negated. + + ## Exceptional conditions + + * If one number is -Infinity and the other +Infinity `:invalid_operation` will + be signalled. + + ## Examples + + iex> Decimal.sub(1, "0.1") + #Decimal<0.9> + + iex> Decimal.sub(1, "Inf") + #Decimal<-Infinity> + + """ + @spec sub(decimal, decimal) :: t + def sub(%Decimal{} = num1, %Decimal{sign: sign} = num2) do + add(num1, %{num2 | sign: -sign}) + end + + def sub(num1, num2) do + sub(decimal(num1), decimal(num2)) + end + + @doc """ + Compares two numbers numerically. If the first number is greater than the second + `:gt` is returned, if less than `:lt` is returned, if both numbers are equal + `:eq` is returned. + + Neither number can be a NaN. + + ## Examples + + iex> Decimal.compare("1.0", 1) + :eq + + iex> Decimal.compare("Inf", -1) + :gt + + """ + @spec compare(decimal, decimal) :: :lt | :gt | :eq + def compare(%Decimal{coef: :inf, sign: sign}, %Decimal{coef: :inf, sign: sign}), + do: :eq + + def compare(%Decimal{coef: :inf, sign: sign1}, %Decimal{coef: :inf, sign: sign2}) + when sign1 < sign2, + do: :lt + + def compare(%Decimal{coef: :inf, sign: sign1}, %Decimal{coef: :inf, sign: sign2}) + when sign1 > sign2, + do: :gt + + def compare(%Decimal{coef: :inf, sign: 1}, _num2), do: :gt + def compare(%Decimal{coef: :inf, sign: -1}, _num2), do: :lt + + def compare(_num1, %Decimal{coef: :inf, sign: 1}), do: :lt + def compare(_num1, %Decimal{coef: :inf, sign: -1}), do: :gt + + def compare(%Decimal{coef: :NaN} = num1, _num2), + do: error(:invalid_operation, "operation on NaN", num1) + + def compare(_num1, %Decimal{coef: :NaN} = num2), + do: error(:invalid_operation, "operation on NaN", num2) + + def compare(%Decimal{} = num1, %Decimal{} = num2) do + case sub(num1, num2) do + %Decimal{coef: 0} -> :eq + %Decimal{sign: 1} -> :gt + %Decimal{sign: -1} -> :lt + end + end + + def compare(num1, num2) do + compare(decimal(num1), decimal(num2)) + end + + @deprecated "Use compare/2 instead" + @spec cmp(decimal, decimal) :: :lt | :eq | :gt + def cmp(num1, num2) do + compare(num1, num2) + end + + @doc """ + Compares two numbers numerically and returns `true` if they are equal, + otherwise `false`. If one of the operands is a quiet NaN this operation + will always return `false`. + + ## Examples + + iex> Decimal.equal?("1.0", 1) + true + + iex> Decimal.equal?(1, -1) + false + + """ + @spec equal?(decimal, decimal) :: boolean + def equal?(num1, num2) do + eq?(num1, num2) + end + + @doc """ + Compares two numbers numerically and returns `true` if they are equal, + otherwise `false`. If one of the operands is a quiet NaN this operation + will always return `false`. + + ## Examples + + iex> Decimal.eq?("1.0", 1) + true + + iex> Decimal.eq?(1, -1) + false + + """ + doc_since("1.8.0") + @spec eq?(decimal, decimal) :: boolean + def eq?(%Decimal{coef: :NaN}, _num2), do: false + def eq?(_num1, %Decimal{coef: :NaN}), do: false + def eq?(num1, num2), do: compare(num1, num2) == :eq + + @doc """ + Compares two numbers numerically and returns `true` if the the first argument + is greater than the second, otherwise `false`. If one the operands is a + quiet NaN this operation will always return `false`. + + ## Examples + + iex> Decimal.gt?("1.3", "1.2") + true + + iex> Decimal.gt?("1.2", "1.3") + false + + """ + doc_since("1.8.0") + @spec gt?(decimal, decimal) :: boolean + def gt?(%Decimal{coef: :NaN}, _num2), do: false + def gt?(_num1, %Decimal{coef: :NaN}), do: false + def gt?(num1, num2), do: compare(num1, num2) == :gt + + @doc """ + Compares two numbers numerically and returns `true` if the the first number is + less than the second number, otherwise `false`. If one of the operands is a + quiet NaN this operation will always return `false`. + + ## Examples + + iex> Decimal.lt?("1.1", "1.2") + true + + iex> Decimal.lt?("1.4", "1.2") + false + + """ + doc_since("1.8.0") + @spec lt?(decimal, decimal) :: boolean + def lt?(%Decimal{coef: :NaN}, _num2), do: false + def lt?(_num1, %Decimal{coef: :NaN}), do: false + def lt?(num1, num2), do: compare(num1, num2) == :lt + + @doc """ + Divides two numbers. + + ## Exceptional conditions + + * If both numbers are ยฑInfinity `:invalid_operation` is signalled. + * If both numbers are ยฑ0 `:invalid_operation` is signalled. + * If second number (denominator) is ยฑ0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.div(3, 4) + #Decimal<0.75> + + iex> Decimal.div("Inf", -1) + #Decimal<-Infinity> + + """ + @spec div(decimal, decimal) :: t + def div(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def div(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def div(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "ยฑInfinity / ยฑInfinity", %Decimal{coef: :NaN}) + + def div(%Decimal{sign: sign1, coef: :inf} = num1, %Decimal{sign: sign2}) do + sign = if sign1 == sign2, do: 1, else: -1 + %{num1 | sign: sign} + end + + def div(%Decimal{sign: sign1, exp: exp1}, %Decimal{sign: sign2, coef: :inf, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # TODO: Subnormal + # exponent? + %Decimal{sign: sign, coef: 0, exp: exp1 - exp2} + end + + def div(%Decimal{coef: 0}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + + def div(%Decimal{sign: sign1}, %Decimal{sign: sign2, coef: 0}) do + sign = if sign1 == sign2, do: 1, else: -1 + error(:division_by_zero, nil, %Decimal{sign: sign, coef: :inf}) + end + + def div(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + sign = if sign1 == sign2, do: 1, else: -1 + + if coef1 == 0 do + context(%Decimal{sign: sign, coef: 0, exp: exp1 - exp2}, []) + else + prec10 = pow10(Context.get().precision) + {coef1, coef2, adjust} = div_adjust(coef1, coef2, 0) + {coef, adjust, _rem, signals} = div_calc(coef1, coef2, 0, adjust, prec10) + + context(%Decimal{sign: sign, coef: coef, exp: exp1 - exp2 - adjust}, signals) + end + end + + def div(num1, num2) do + div(decimal(num1), decimal(num2)) + end + + @doc """ + Divides two numbers and returns the integer part. + + ## Exceptional conditions + + * If both numbers are ยฑInfinity `:invalid_operation` is signalled. + * If both numbers are ยฑ0 `:invalid_operation` is signalled. + * If second number (denominator) is ยฑ0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.div_int(5, 2) + #Decimal<2> + + iex> Decimal.div_int("Inf", -1) + #Decimal<-Infinity> + + """ + @spec div_int(decimal, decimal) :: t + def div_int(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def div_int(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def div_int(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "ยฑInfinity / ยฑInfinity", %Decimal{coef: :NaN}) + + def div_int(%Decimal{sign: sign1, coef: :inf} = num1, %Decimal{sign: sign2}) do + sign = if sign1 == sign2, do: 1, else: -1 + %{num1 | sign: sign} + end + + def div_int(%Decimal{sign: sign1, exp: exp1}, %Decimal{sign: sign2, coef: :inf, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # TODO: Subnormal + # exponent? + %Decimal{sign: sign, coef: 0, exp: exp1 - exp2} + end + + def div_int(%Decimal{coef: 0}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + + def div_int(%Decimal{sign: sign1}, %Decimal{sign: sign2, coef: 0}) do + div_sign = if sign1 == sign2, do: 1, else: -1 + error(:division_by_zero, nil, %Decimal{sign: div_sign, coef: :inf}) + end + + def div_int(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + div_sign = if sign1 == sign2, do: 1, else: -1 + + cond do + compare(%{num1 | sign: 1}, %{num2 | sign: 1}) == :lt -> + %Decimal{sign: div_sign, coef: 0, exp: exp1 - exp2} + + coef1 == 0 -> + context(%{num1 | sign: div_sign}) + + true -> + case integer_division(div_sign, coef1, exp1, coef2, exp2) do + {:ok, result} -> + result + + {:error, error, reason, num} -> + error(error, reason, num) + end + end + end + + def div_int(num1, num2) do + div_int(decimal(num1), decimal(num2)) + end + + @doc """ + Remainder of integer division of two numbers. The result will have the sign of + the first number. + + ## Exceptional conditions + + * If both numbers are ยฑInfinity `:invalid_operation` is signalled. + * If both numbers are ยฑ0 `:invalid_operation` is signalled. + * If second number (denominator) is ยฑ0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.rem(5, 2) + #Decimal<1> + + """ + @spec rem(decimal, decimal) :: t + def rem(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def rem(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def rem(%Decimal{coef: :inf}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "ยฑInfinity / ยฑInfinity", %Decimal{coef: :NaN}) + + def rem(%Decimal{sign: sign1, coef: :inf}, %Decimal{}), do: %Decimal{sign: sign1, coef: 0} + + def rem(%Decimal{sign: sign1}, %Decimal{coef: :inf} = num2) do + # TODO: Subnormal + # exponent? + %{num2 | sign: sign1} + end + + def rem(%Decimal{coef: 0}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + + def rem(%Decimal{sign: sign1}, %Decimal{coef: 0}), + do: error(:division_by_zero, nil, %Decimal{sign: sign1, coef: 0}) + + def rem(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + + cond do + compare(%{num1 | sign: 1}, %{num2 | sign: 1}) == :lt -> + %{num1 | sign: sign1} + + coef1 == 0 -> + context(%{num2 | sign: sign1}) + + true -> + div_sign = if sign1 == sign2, do: 1, else: -1 + + case integer_division(div_sign, coef1, exp1, coef2, exp2) do + {:ok, result} -> + sub(num1, mult(num2, result)) + + {:error, error, reason, num} -> + error(error, reason, num) + end + end + end + + def rem(num1, num2) do + rem(decimal(num1), decimal(num2)) + end + + @doc """ + Integer division of two numbers and the remainder. Should be used when both + `div_int/2` and `rem/2` is needed. Equivalent to: `{Decimal.div_int(x, y), + Decimal.rem(x, y)}`. + + ## Exceptional conditions + + * If both numbers are ยฑInfinity `:invalid_operation` is signalled. + * If both numbers are ยฑ0 `:invalid_operation` is signalled. + * If second number (denominator) is ยฑ0 `:division_by_zero` is signalled. + + ## Examples + + iex> Decimal.div_rem(5, 2) + {Decimal.new(2), Decimal.new(1)} + + """ + @spec div_rem(decimal, decimal) :: {t, t} + def div_rem(%Decimal{coef: :NaN} = num1, %Decimal{}), do: {num1, num1} + + def div_rem(%Decimal{}, %Decimal{coef: :NaN} = num2), do: {num2, num2} + + def div_rem(%Decimal{coef: :inf}, %Decimal{coef: :inf}) do + numbers = {%Decimal{coef: :NaN}, %Decimal{coef: :NaN}} + error(:invalid_operation, "ยฑInfinity / ยฑInfinity", numbers) + end + + def div_rem(%Decimal{sign: sign1, coef: :inf} = num1, %Decimal{sign: sign2}) do + sign = if sign1 == sign2, do: 1, else: -1 + {%{num1 | sign: sign}, %Decimal{sign: sign1, coef: 0}} + end + + def div_rem(%Decimal{} = num1, %Decimal{coef: :inf} = num2) do + %Decimal{sign: sign1, exp: exp1} = num1 + %Decimal{sign: sign2, exp: exp2} = num2 + + sign = if sign1 == sign2, do: 1, else: -1 + # TODO: Subnormal + # exponent? + {%Decimal{sign: sign, coef: 0, exp: exp1 - exp2}, %{num2 | sign: sign1}} + end + + def div_rem(%Decimal{coef: 0}, %Decimal{coef: 0}) do + error = error(:invalid_operation, "0 / 0", %Decimal{coef: :NaN}) + {error, error} + end + + def div_rem(%Decimal{sign: sign1}, %Decimal{sign: sign2, coef: 0}) do + div_sign = if sign1 == sign2, do: 1, else: -1 + div_error = error(:division_by_zero, nil, %Decimal{sign: div_sign, coef: :inf}) + rem_error = error(:division_by_zero, nil, %Decimal{sign: sign1, coef: 0}) + {div_error, rem_error} + end + + def div_rem(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + div_sign = if sign1 == sign2, do: 1, else: -1 + + cond do + compare(%{num1 | sign: 1}, %{num2 | sign: 1}) == :lt -> + {%Decimal{sign: div_sign, coef: 0, exp: exp1 - exp2}, %{num1 | sign: sign1}} + + coef1 == 0 -> + {context(%{num1 | sign: div_sign}), context(%{num2 | sign: sign1})} + + true -> + case integer_division(div_sign, coef1, exp1, coef2, exp2) do + {:ok, result} -> + {result, sub(num1, mult(num2, result))} + + {:error, error, reason, num} -> + error(error, reason, {num, num}) + end + end + end + + def div_rem(num1, num2) do + div_rem(decimal(num1), decimal(num2)) + end + + @doc """ + Compares two values numerically and returns the maximum. Unlike most other + functions in `Decimal` if a number is NaN the result will be the other number. + Only if both numbers are NaN will NaN be returned. + + ## Examples + + iex> Decimal.max(1, "2.0") + #Decimal<2.0> + + iex> Decimal.max(1, "NaN") + #Decimal<1> + + iex> Decimal.max("NaN", "NaN") + #Decimal + + """ + @spec max(decimal, decimal) :: t + def max(%Decimal{coef: :NaN}, %Decimal{} = num2), do: num2 + + def max(%Decimal{} = num1, %Decimal{coef: :NaN}), do: num1 + + def max(%Decimal{sign: sign1, exp: exp1} = num1, %Decimal{sign: sign2, exp: exp2} = num2) do + case compare(num1, num2) do + :lt -> + num2 + + :gt -> + num1 + + :eq -> + cond do + sign1 != sign2 -> + if sign1 == 1, do: num1, else: num2 + + sign1 == 1 -> + if exp1 > exp2, do: num1, else: num2 + + sign1 == -1 -> + if exp1 < exp2, do: num1, else: num2 + end + end + |> context() + end + + def max(num1, num2) do + max(decimal(num1), decimal(num2)) + end + + @doc """ + Compares two values numerically and returns the minimum. Unlike most other + functions in `Decimal` if a number is NaN the result will be the other number. + Only if both numbers are NaN will NaN be returned. + + ## Examples + + iex> Decimal.min(1, "2.0") + #Decimal<1> + + iex> Decimal.min(1, "NaN") + #Decimal<1> + + iex> Decimal.min("NaN", "NaN") + #Decimal + + """ + @spec min(decimal, decimal) :: t + def min(%Decimal{coef: :NaN}, %Decimal{} = num2), do: num2 + + def min(%Decimal{} = num1, %Decimal{coef: :NaN}), do: num1 + + def min(%Decimal{sign: sign1, exp: exp1} = num1, %Decimal{sign: sign2, exp: exp2} = num2) do + case compare(num1, num2) do + :lt -> + num1 + + :gt -> + num2 + + :eq -> + cond do + sign1 != sign2 -> + if sign1 == -1, do: num1, else: num2 + + sign1 == 1 -> + if exp1 < exp2, do: num1, else: num2 + + sign1 == -1 -> + if exp1 > exp2, do: num1, else: num2 + end + end + |> context() + end + + def min(num1, num2) do + min(decimal(num1), decimal(num2)) + end + + @doc """ + Negates the given number. + + ## Examples + + iex> Decimal.negate(1) + #Decimal<-1> + + iex> Decimal.negate("-Inf") + #Decimal + + """ + doc_since("1.9.0") + @spec negate(decimal) :: t + def negate(%Decimal{coef: :NaN} = num), do: num + def negate(%Decimal{sign: sign} = num), do: context(%{num | sign: -sign}) + def negate(num), do: negate(decimal(num)) + + @doc """ + Applies the context to the given number rounding it to specified precision. + """ + doc_since("1.9.0") + @spec apply_context(t) :: t + def apply_context(%Decimal{} = num), do: context(num) + + @doc """ + Check if given number is positive + """ + doc_since("1.5.0") + @spec positive?(t) :: boolean + def positive?(%Decimal{coef: :NaN}), do: false + def positive?(%Decimal{coef: 0}), do: false + def positive?(%Decimal{sign: -1}), do: false + def positive?(%Decimal{sign: 1}), do: true + + @doc """ + Check if given number is negative + """ + doc_since("1.5.0") + @spec negative?(t) :: boolean + def negative?(%Decimal{coef: :NaN}), do: false + def negative?(%Decimal{coef: 0}), do: false + def negative?(%Decimal{sign: 1}), do: false + def negative?(%Decimal{sign: -1}), do: true + + @doc """ + Multiplies two numbers. + + ## Exceptional conditions + + * If one number is ยฑ0 and the other is ยฑInfinity `:invalid_operation` is + signalled. + + ## Examples + + iex> Decimal.mult("0.5", 3) + #Decimal<1.5> + + iex> Decimal.mult("Inf", -1) + #Decimal<-Infinity> + + """ + @spec mult(decimal, decimal) :: t + def mult(%Decimal{coef: :NaN} = num1, %Decimal{}), do: num1 + + def mult(%Decimal{}, %Decimal{coef: :NaN} = num2), do: num2 + + def mult(%Decimal{coef: 0}, %Decimal{coef: :inf}), + do: error(:invalid_operation, "0 * ยฑInfinity", %Decimal{coef: :NaN}) + + def mult(%Decimal{coef: :inf}, %Decimal{coef: 0}), + do: error(:invalid_operation, "0 * ยฑInfinity", %Decimal{coef: :NaN}) + + def mult(%Decimal{sign: sign1, coef: :inf, exp: exp1}, %Decimal{sign: sign2, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # exponent? + %Decimal{sign: sign, coef: :inf, exp: exp1 + exp2} + end + + def mult(%Decimal{sign: sign1, exp: exp1}, %Decimal{sign: sign2, coef: :inf, exp: exp2}) do + sign = if sign1 == sign2, do: 1, else: -1 + # exponent? + %Decimal{sign: sign, coef: :inf, exp: exp1 + exp2} + end + + def mult(%Decimal{} = num1, %Decimal{} = num2) do + %Decimal{sign: sign1, coef: coef1, exp: exp1} = num1 + %Decimal{sign: sign2, coef: coef2, exp: exp2} = num2 + sign = if sign1 == sign2, do: 1, else: -1 + %Decimal{sign: sign, coef: coef1 * coef2, exp: exp1 + exp2} |> context() + end + + def mult(num1, num2) do + mult(decimal(num1), decimal(num2)) + end + + @doc """ + Normalizes the given decimal: removes trailing zeros from coefficient while + keeping the number numerically equivalent by increasing the exponent. + + ## Examples + + iex> Decimal.normalize(Decimal.new("1.00")) + #Decimal<1> + + iex> Decimal.normalize(Decimal.new("1.01")) + #Decimal<1.01> + + """ + doc_since("1.9.0") + @spec normalize(t) :: t + def normalize(%Decimal{coef: :NaN} = num), do: num + + def normalize(%Decimal{coef: :inf} = num) do + # exponent? + %{num | exp: 0} + end + + def normalize(%Decimal{sign: sign, coef: coef, exp: exp}) do + if coef == 0 do + %Decimal{sign: sign, coef: 0, exp: 0} + else + %{do_normalize(coef, exp) | sign: sign} |> context + end + end + + @doc """ + Rounds the given number to specified decimal places with the given strategy + (default is to round to nearest one). If places is negative, at least that + many digits to the left of the decimal point will be zero. + + See `Decimal.Context` for more information about rounding algorithms. + + ## Examples + + iex> Decimal.round("1.234") + #Decimal<1> + + iex> Decimal.round("1.234", 1) + #Decimal<1.2> + + """ + @spec round(decimal, integer, rounding) :: t + def round(num, places \\ 0, mode \\ :half_up) + + def round(%Decimal{coef: :NaN} = num, _, _), do: num + + def round(%Decimal{coef: :inf} = num, _, _), do: num + + def round(%Decimal{} = num, n, mode) do + %Decimal{sign: sign, coef: coef, exp: exp} = normalize(num) + digits = :erlang.integer_to_list(coef) + target_exp = -n + value = do_round(sign, digits, exp, target_exp, mode) + context(value, []) + end + + def round(num, n, mode) do + round(decimal(num), n, mode) + end + + @doc """ + Finds the square root. + + ## Examples + + iex> Decimal.sqrt("100") + #Decimal<10> + + """ + doc_since("1.7.0") + @spec sqrt(decimal) :: t + def sqrt(%Decimal{coef: :NaN} = num), + do: error(:invalid_operation, "operation on NaN", num) + + def sqrt(%Decimal{coef: 0, exp: exp} = num), + do: %{num | exp: exp >>> 1} + + def sqrt(%Decimal{sign: -1} = num), + do: error(:invalid_operation, "less than zero", num) + + def sqrt(%Decimal{sign: 1, coef: :inf} = num), + do: num + + def sqrt(%Decimal{sign: 1, coef: coef, exp: exp}) do + precision = Context.get().precision + 1 + digits = :erlang.integer_to_list(coef) + num_digits = length(digits) + + # Since the root is calculated from integer operations only, it must be + # large enough to contain the desired precision. Calculate the amount of + # `shift` required (powers of 10). + case exp &&& 1 do + 0 -> + # To get the desired `shift`, subtract the precision of `coef`'s square + # root from the desired precision. + # + # If `coef` is 10_000, the root is 100 (3 digits of precision). + # If `coef` is 100, the root is 10 (2 digits of precision). + shift = precision - ((num_digits + 1) >>> 1) + sqrt(coef, shift, exp) + + _ -> + # If `exp` is odd, multiply `coef` by 10 and reduce shift by 1/2. `exp` + # must be even so the root's exponent is an integer. + shift = precision - ((num_digits >>> 1) + 1) + sqrt(coef * 10, shift, exp) + end + end + + def sqrt(num) do + sqrt(decimal(num)) + end + + defp sqrt(coef, shift, exp) do + if shift >= 0 do + # shift `coef` up by `shift * 2` digits + sqrt(coef * pow10(shift <<< 1), shift, exp, true) + else + # shift `coef` down by `shift * 2` digits + operand = pow10(-shift <<< 1) + sqrt(Kernel.div(coef, operand), shift, exp, Kernel.rem(coef, operand) === 0) + end + end + + defp sqrt(shifted_coef, shift, exp, exact) do + # the preferred exponent is `exp / 2` as per IEEE 754 + exp = exp >>> 1 + # guess a root 10x higher than desired precision + guess = pow10(Context.get().precision + 1) + root = sqrt_loop(shifted_coef, guess) + + if exact and root * root === shifted_coef do + # if the root is exact, use preferred `exp` and shift `coef` to match + coef = + if shift >= 0, + do: Kernel.div(root, pow10(shift)), + else: root * pow10(-shift) + + context(%Decimal{sign: 1, coef: coef, exp: exp}) + else + # otherwise the calculated root is inexact (but still meets precision), + # so use the root as `coef` and get the final exponent by shifting `exp` + context(%Decimal{sign: 1, coef: root, exp: exp - shift}) + end + end + + # Babylonion method + defp sqrt_loop(coef, guess) do + quotient = Kernel.div(coef, guess) + + if guess <= quotient do + guess + else + sqrt_loop(coef, (guess + quotient) >>> 1) + end + end + + @doc """ + Creates a new decimal number from an integer or a string representation. + + A decimal number will always be created exactly as specified with all digits + kept - it will not be rounded with the context. + + ## Backusโ€“Naur form + + sign ::= "+" | "-" + digit ::= "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9" + indicator ::= "e" | "E" + digits ::= digit [digit]... + decimal-part ::= digits "." [digits] | ["."] digits + exponent-part ::= indicator [sign] digits + infinity ::= "Infinity" | "Inf" + nan ::= "NaN" [digits] + numeric-value ::= decimal-part [exponent-part] | infinity + numeric-string ::= [sign] numeric-value | [sign] nan + + ## Floats + + See also `from_float/1`. + + ## Examples + + iex> Decimal.new(1) + #Decimal<1> + + iex> Decimal.new("3.14") + #Decimal<3.14> + """ + @spec new(decimal) :: t + def new(%Decimal{sign: sign, coef: coef, exp: exp} = num) + when sign in [1, -1] and ((is_integer(coef) and coef >= 0) or coef in [:NaN, :inf]) and + is_integer(exp), + do: num + + def new(int) when is_integer(int), + do: %Decimal{sign: if(int < 0, do: -1, else: 1), coef: Kernel.abs(int)} + + def new(binary) when is_binary(binary) do + case parse(binary) do + {decimal, ""} -> decimal + _ -> raise Error, reason: "number parsing syntax: #{inspect(binary)}" + end + end + + @doc """ + Creates a new decimal number from the sign, coefficient and exponent such that + the number will be: `sign * coefficient * 10 ^ exponent`. + + A decimal number will always be created exactly as specified with all digits + kept - it will not be rounded with the context. + """ + @spec new(1 | -1, non_neg_integer | :NaN | :inf, integer) :: t + def new(sign, coef, exp) + when sign in [1, -1] and ((is_integer(coef) and coef >= 0) or coef in [:NaN, :inf]) and + is_integer(exp), + do: %Decimal{sign: sign, coef: coef, exp: exp} + + @doc """ + Creates a new decimal number from a floating point number. + + Floating point numbers use a fixed number of binary digits to represent + a decimal number which has inherent inaccuracy as some decimal numbers cannot + be represented exactly in limited precision binary. + + Floating point numbers will be converted to decimal numbers with + `:io_lib_format.fwrite_g/1`. Since this conversion is not exact and + because of inherent inaccuracy mentioned above, we may run into counter-intuitive results: + + iex> Enum.reduce([0.1, 0.1, 0.1], &+/2) + 0.30000000000000004 + + iex> Enum.reduce([Decimal.new("0.1"), Decimal.new("0.1"), Decimal.new("0.1")], &Decimal.add/2) + #Decimal<0.3> + + For this reason, it's recommended to build decimals with `new/1`, which is always precise, instead. + + ## Examples + + iex> Decimal.from_float(3.14) + #Decimal<3.14> + + """ + doc_since("1.5.0") + @spec from_float(float) :: t + def from_float(float) when is_float(float) do + float + |> :io_lib_format.fwrite_g() + |> fix_float_exp() + |> IO.iodata_to_binary() + |> new() + end + + @doc """ + Creates a new decimal number from an integer, string, float, or existing decimal number. + + Because conversion from a floating point number is not exact, it's recommended + to instead use `new/1` or `from_float/1` when the argument's type is certain. + See `from_float/1`. + + ## Examples + + iex> {:ok, decimal} = Decimal.cast(3) + iex> decimal + #Decimal<3> + + iex> Decimal.cast("bad") + :error + + """ + @spec cast(term) :: {:ok, t} | :error + def cast(integer) when is_integer(integer), do: {:ok, Decimal.new(integer)} + def cast(%Decimal{} = decimal), do: {:ok, decimal} + def cast(float) when is_float(float), do: {:ok, from_float(float)} + + def cast(binary) when is_binary(binary) do + case parse(binary) do + {decimal, ""} -> {:ok, decimal} + _ -> :error + end + end + + def cast(_), do: :error + + @doc """ + Parses a binary into a decimal. + + If successful, returns a tuple in the form of `{decimal, remainder_of_binary}`, + otherwise `:error`. + + ## Examples + + iex> Decimal.parse("3.14") + {%Decimal{coef: 314, exp: -2, sign: 1}, ""} + + iex> Decimal.parse("3.14.15") + {%Decimal{coef: 314, exp: -2, sign: 1}, ".15"} + + iex> Decimal.parse("-1.1e3") + {%Decimal{coef: 11, exp: 2, sign: -1}, ""} + + iex> Decimal.parse("bad") + :error + + """ + @spec parse(binary()) :: {t(), binary()} | :error + def parse("+" <> rest) do + parse_unsign(rest) + end + + def parse("-" <> rest) do + case parse_unsign(rest) do + {%Decimal{} = num, rest} -> {%{num | sign: -1}, rest} + :error -> :error + end + end + + def parse(binary) when is_binary(binary) do + parse_unsign(binary) + end + + @doc """ + Converts given number to its string representation. + + ## Options + + * `:scientific` - number converted to scientific notation. + * `:normal` - number converted without a exponent. + * `:xsd` - number converted to the [canonical XSD representation](https://www.w3.org/TR/xmlschema-2/#decimal). + * `:raw` - number converted to its raw, internal format. + + """ + @spec to_string(t, :scientific | :normal | :xsd | :raw) :: String.t() + def to_string(num, type \\ :scientific) + + def to_string(%Decimal{sign: sign, coef: :NaN}, _type) do + if sign == 1, do: "NaN", else: "-NaN" + end + + def to_string(%Decimal{sign: sign, coef: :inf}, _type) do + if sign == 1, do: "Infinity", else: "-Infinity" + end + + def to_string(%Decimal{sign: sign, coef: coef, exp: exp}, :normal) do + list = integer_to_charlist(coef) + + list = + if exp >= 0 do + list ++ :lists.duplicate(exp, ?0) + else + diff = length(list) + exp + + if diff > 0 do + List.insert_at(list, diff, ?.) + else + '0.' ++ :lists.duplicate(-diff, ?0) ++ list + end + end + + list = if sign == -1, do: [?- | list], else: list + IO.iodata_to_binary(list) + end + + def to_string(%Decimal{sign: sign, coef: coef, exp: exp}, :scientific) do + list = integer_to_charlist(coef) + length = length(list) + adjusted = exp + length - 1 + + list = + cond do + exp == 0 -> + list + + exp < 0 and adjusted >= -6 -> + abs_exp = Kernel.abs(exp) + diff = -length + abs_exp + 1 + + if diff > 0 do + list = :lists.duplicate(diff, ?0) ++ list + List.insert_at(list, 1, ?.) + else + List.insert_at(list, exp - 1, ?.) + end + + true -> + list = if length > 1, do: List.insert_at(list, 1, ?.), else: list + list = list ++ 'E' + list = if exp >= 0, do: list ++ '+', else: list + list ++ integer_to_charlist(adjusted) + end + + list = if sign == -1, do: [?- | list], else: list + IO.iodata_to_binary(list) + end + + def to_string(%Decimal{sign: sign, coef: coef, exp: exp}, :raw) do + str = Integer.to_string(coef) + str = if sign == -1, do: [?- | str], else: str + str = if exp != 0, do: [str, "E", Integer.to_string(exp)], else: str + + IO.iodata_to_binary(str) + end + + def to_string(%Decimal{} = decimal, :xsd) do + decimal |> canonical_xsd() |> to_string(:normal) + end + + defp canonical_xsd(%Decimal{coef: 0} = decimal), do: %{decimal | exp: -1} + + defp canonical_xsd(%Decimal{coef: coef, exp: 0} = decimal), + do: %{decimal | coef: coef * 10, exp: -1} + + defp canonical_xsd(%Decimal{coef: coef, exp: exp} = decimal) + when exp > 0, + do: canonical_xsd(%{decimal | coef: coef * 10, exp: exp - 1}) + + defp canonical_xsd(%Decimal{coef: coef} = decimal) + when Kernel.rem(coef, 10) != 0, + do: decimal + + defp canonical_xsd(%Decimal{coef: coef, exp: exp} = decimal), + do: canonical_xsd(%{decimal | coef: Kernel.div(coef, 10), exp: exp + 1}) + + @doc """ + Returns the decimal represented as an integer. + + Fails when loss of precision will occur. + """ + @spec to_integer(t) :: integer + def to_integer(%Decimal{sign: sign, coef: coef, exp: 0}) + when is_integer(coef), + do: sign * coef + + def to_integer(%Decimal{sign: sign, coef: coef, exp: exp}) + when is_integer(coef) and exp > 0, + do: to_integer(%Decimal{sign: sign, coef: coef * 10, exp: exp - 1}) + + def to_integer(%Decimal{sign: sign, coef: coef, exp: exp}) + when is_integer(coef) and exp < 0 and Kernel.rem(coef, 10) == 0, + do: to_integer(%Decimal{sign: sign, coef: Kernel.div(coef, 10), exp: exp + 1}) + + @doc """ + Returns the decimal converted to a float. + + The returned float may have lower precision than the decimal. Fails if + the decimal cannot be converted to a float. + """ + @spec to_float(t) :: float + def to_float(%Decimal{sign: sign, coef: coef, exp: exp}) when is_integer(coef) do + # Convert back to float without loss + # http://www.exploringbinary.com/correct-decimal-to-floating-point-using-big-integers/ + {num, den} = ratio(coef, exp) + + boundary = den <<< 52 + + cond do + num == 0 -> + 0.0 + + num >= boundary -> + {den, exp} = scale_down(num, boundary, 52) + decimal_to_float(sign, num, den, exp) + + true -> + {num, exp} = scale_up(num, boundary, 52) + decimal_to_float(sign, num, den, exp) + end + end + + defp scale_up(num, den, exp) when num >= den, do: {num, exp} + defp scale_up(num, den, exp), do: scale_up(num <<< 1, den, exp - 1) + + defp scale_down(num, den, exp) do + new_den = den <<< 1 + + if num < new_den do + {den >>> 52, exp} + else + scale_down(num, new_den, exp + 1) + end + end + + defp decimal_to_float(sign, num, den, exp) do + quo = Kernel.div(num, den) + rem = num - quo * den + + tmp = + case den >>> 1 do + den when rem > den -> quo + 1 + den when rem < den -> quo + _ when (quo &&& 1) === 1 -> quo + 1 + _ -> quo + end + + sign = if sign == -1, do: 1, else: 0 + tmp = tmp - @power_of_2_to_52 + exp = if tmp < @power_of_2_to_52, do: exp, else: exp + 1 + <> = <> + tmp + end + + @doc """ + Returns `true` when the given `decimal` has no significant digits after the decimal point. + + ## Examples + + iex> Decimal.integer?("1.00") + true + + iex> Decimal.integer?("1.10") + false + """ + doc_since("2.0.0") + @spec integer?(t) :: boolean + def integer?(%Decimal{coef: :NaN}), do: false + def integer?(%Decimal{coef: :inf}), do: false + def integer?(%Decimal{coef: coef, exp: exp}), do: exp >= 0 or zero_after_dot?(coef, exp) + def integer?(num), do: integer?(decimal(num)) + + defp zero_after_dot?(coef, exp) when coef >= 10 and exp < 0, + do: Kernel.rem(coef, 10) == 0 and zero_after_dot?(Kernel.div(coef, 10), exp + 1) + + defp zero_after_dot?(_coef, exp), + do: exp == 0 + + ## ARITHMETIC ## + + defp add_align(coef1, exp1, coef2, exp2) when exp1 == exp2, do: {coef1, coef2} + + defp add_align(coef1, exp1, coef2, exp2) when exp1 > exp2, + do: {coef1 * pow10(exp1 - exp2), coef2} + + defp add_align(coef1, exp1, coef2, exp2) when exp1 < exp2, + do: {coef1, coef2 * pow10(exp2 - exp1)} + + defp add_sign(sign1, sign2, coef) do + cond do + coef > 0 -> 1 + coef < 0 -> -1 + sign1 == -1 and sign2 == -1 -> -1 + sign1 != sign2 and Context.get().rounding == :floor -> -1 + true -> 1 + end + end + + defp div_adjust(coef1, coef2, adjust) when coef1 < coef2, + do: div_adjust(coef1 * 10, coef2, adjust + 1) + + defp div_adjust(coef1, coef2, adjust) when coef1 >= coef2 * 10, + do: div_adjust(coef1, coef2 * 10, adjust - 1) + + defp div_adjust(coef1, coef2, adjust), do: {coef1, coef2, adjust} + + defp div_calc(coef1, coef2, coef, adjust, prec10) do + cond do + coef1 >= coef2 -> + div_calc(coef1 - coef2, coef2, coef + 1, adjust, prec10) + + coef1 == 0 and adjust >= 0 -> + {coef, adjust, coef1, []} + + coef >= prec10 -> + signals = [:rounded] + signals = if base10?(coef1), do: signals, else: [:inexact | signals] + {coef, adjust, coef1, signals} + + true -> + div_calc(coef1 * 10, coef2, coef * 10, adjust + 1, prec10) + end + end + + defp div_int_calc(coef1, coef2, coef, adjust, precision) do + cond do + coef1 >= coef2 -> + div_int_calc(coef1 - coef2, coef2, coef + 1, adjust, precision) + + adjust != precision -> + div_int_calc(coef1 * 10, coef2, coef * 10, adjust + 1, precision) + + true -> + {coef, coef1} + end + end + + defp integer_division(div_sign, coef1, exp1, coef2, exp2) do + precision = exp1 - exp2 + {coef1, coef2, adjust} = div_adjust(coef1, coef2, 0) + + {coef, _rem} = div_int_calc(coef1, coef2, 0, adjust, precision) + + prec10 = pow10(Context.get().precision) + + if coef > prec10 do + { + :error, + :invalid_operation, + "integer division impossible, quotient too large", + %Decimal{coef: :NaN} + } + else + {:ok, %Decimal{sign: div_sign, coef: coef, exp: 0}} + end + end + + defp do_normalize(coef, exp) do + if Kernel.rem(coef, 10) == 0 do + do_normalize(Kernel.div(coef, 10), exp + 1) + else + %Decimal{coef: coef, exp: exp} + end + end + + defp ratio(coef, exp) when exp >= 0, do: {coef * pow10(exp), 1} + defp ratio(coef, exp) when exp < 0, do: {coef, pow10(-exp)} + + pow10_max = + Enum.reduce(0..104, 1, fn int, acc -> + defp pow10(unquote(int)), do: unquote(acc) + defp base10?(unquote(acc)), do: true + acc * 10 + end) + + defp pow10(num) when num > 104, do: pow10(104) * pow10(num - 104) + + defp base10?(num) when num >= unquote(pow10_max) do + if Kernel.rem(num, unquote(pow10_max)) == 0 do + base10?(Kernel.div(num, unquote(pow10_max))) + else + false + end + end + + defp base10?(_num), do: false + + ## ROUNDING ## + + defp do_round(sign, digits, exp, target_exp, rounding) do + num_digits = length(digits) + precision = num_digits - (target_exp - exp) + + cond do + exp == target_exp -> + %Decimal{sign: sign, coef: digits_to_integer(digits), exp: exp} + + exp < target_exp and precision < 0 -> + zeros = :lists.duplicate(target_exp - exp, ?0) + digits = zeros ++ digits + {signif, remain} = :lists.split(1, digits) + + signif = + if increment?(rounding, sign, signif, remain), + do: digits_increment(signif), + else: signif + + coef = digits_to_integer(signif) + %Decimal{sign: sign, coef: coef, exp: target_exp} + + exp < target_exp and precision >= 0 -> + {signif, remain} = :lists.split(precision, digits) + + signif = + if increment?(rounding, sign, signif, remain), + do: digits_increment(signif), + else: signif + + coef = digits_to_integer(signif) + %Decimal{sign: sign, coef: coef, exp: target_exp} + + exp > target_exp -> + digits = digits ++ Enum.map(1..(exp - target_exp), fn _ -> ?0 end) + coef = digits_to_integer(digits) + %Decimal{sign: sign, coef: coef, exp: target_exp} + end + end + + defp digits_to_integer([]), do: 0 + defp digits_to_integer(digits), do: :erlang.list_to_integer(digits) + + defp precision(%Decimal{coef: :NaN} = num, _precision, _rounding) do + {num, []} + end + + defp precision(%Decimal{coef: :inf} = num, _precision, _rounding) do + {num, []} + end + + defp precision(%Decimal{sign: sign, coef: coef, exp: exp} = num, precision, rounding) do + digits = :erlang.integer_to_list(coef) + num_digits = length(digits) + + if num_digits > precision do + do_precision(sign, digits, num_digits, exp, precision, rounding) + else + {num, []} + end + end + + defp do_precision(sign, digits, num_digits, exp, precision, rounding) do + precision = Kernel.min(num_digits, precision) + {signif, remain} = :lists.split(precision, digits) + + signif = + if increment?(rounding, sign, signif, remain), do: digits_increment(signif), else: signif + + signals = if any_nonzero(remain), do: [:inexact, :rounded], else: [:rounded] + + exp = exp + length(remain) + coef = digits_to_integer(signif) + dec = %Decimal{sign: sign, coef: coef, exp: exp} + {dec, signals} + end + + defp increment?(_, _, _, []), do: false + + defp increment?(:down, _, _, _), do: false + + defp increment?(:up, _, _, _), do: true + + defp increment?(:ceiling, sign, _, remain), do: sign == 1 and any_nonzero(remain) + + defp increment?(:floor, sign, _, remain), do: sign == -1 and any_nonzero(remain) + + defp increment?(:half_up, _, _, [digit | _]), do: digit >= ?5 + + defp increment?(:half_even, _, [], [?5 | rest]), do: any_nonzero(rest) + + defp increment?(:half_even, _, signif, [?5 | rest]), + do: any_nonzero(rest) or Kernel.rem(:lists.last(signif), 2) == 1 + + defp increment?(:half_even, _, _, [digit | _]), do: digit > ?5 + + defp increment?(:half_down, _, _, [digit | rest]), + do: digit > ?5 or (digit == ?5 and any_nonzero(rest)) + + defp any_nonzero(digits), do: :lists.any(fn digit -> digit != ?0 end, digits) + + defp digits_increment(digits), do: digits_increment(:lists.reverse(digits), []) + + defp digits_increment([?9 | rest], acc), do: digits_increment(rest, [?0 | acc]) + + defp digits_increment([head | rest], acc), do: :lists.reverse(rest, [head + 1 | acc]) + + defp digits_increment([], acc), do: [?1 | acc] + + ## CONTEXT ## + + defp context(num, signals \\ []) do + context = Context.get() + {result, prec_signals} = precision(num, context.precision, context.rounding) + error(put_uniq(signals, prec_signals), nil, result, context) + end + + defp put_uniq(list, elems) when is_list(elems) do + Enum.reduce(elems, list, &put_uniq(&2, &1)) + end + + defp put_uniq(list, elem) do + if elem in list, do: list, else: [elem | list] + end + + ## PARSING ## + + defp parse_unsign(<>) when first in [?i, ?I] do + if String.downcase(remainder) == "nfinity" do + {%Decimal{coef: :inf}, rest} + else + :error + end + end + + defp parse_unsign(<>) when first in [?i, ?I] do + if String.downcase(remainder) == "nf" do + {%Decimal{coef: :inf}, rest} + else + :error + end + end + + defp parse_unsign(<>) when first in [?n, ?N] do + if String.downcase(remainder) == "an" do + {%Decimal{coef: :NaN}, rest} + else + :error + end + end + + defp parse_unsign(bin) do + {int, rest} = parse_digits(bin) + {float, rest} = parse_float(rest) + {exp, rest} = parse_exp(rest) + + if int == [] and float == [] do + :error + else + int = if int == [], do: '0', else: int + exp = if exp == [], do: '0', else: exp + + number = %Decimal{ + coef: List.to_integer(int ++ float), + exp: List.to_integer(exp) - length(float) + } + + {number, rest} + end + end + + defp parse_float("." <> rest), do: parse_digits(rest) + defp parse_float(bin), do: {[], bin} + + defp parse_exp(<>) when e in [?e, ?E] do + case rest do + <> when sign in [?+, ?-] -> + {digits, rest} = parse_digits(rest) + {[sign | digits], rest} + + _ -> + parse_digits(rest) + end + end + + defp parse_exp(bin) do + {[], bin} + end + + defp parse_digits(bin), do: parse_digits(bin, []) + + defp parse_digits(<>, acc) when digit in ?0..?9 do + parse_digits(rest, [digit | acc]) + end + + defp parse_digits(rest, acc) do + {:lists.reverse(acc), rest} + end + + # Util + + defp decimal(%Decimal{} = num), do: num + defp decimal(num) when is_integer(num), do: new(num) + defp decimal(num) when is_binary(num), do: new(num) + + defp decimal(other) when is_float(other) do + raise ArgumentError, + "implicit conversion of #{inspect(other)} to Decimal is not allowed. Use Decimal.from_float/1" + end + + defp handle_error(signals, reason, result, context) do + context = context || Context.get() + signals = List.wrap(signals) + + flags = Enum.reduce(signals, context.flags, &put_uniq(&2, &1)) + Context.set(%{context | flags: flags}) + error_signal = Enum.find(signals, &(&1 in context.traps)) + + if error_signal do + error = [signal: error_signal, reason: reason] + {:error, error} + else + {:ok, result} + end + end + + defp fix_float_exp(digits) do + fix_float_exp(digits, []) + end + + defp fix_float_exp([?e | rest], [?0 | [?. | result]]) do + fix_float_exp(rest, [?e | result]) + end + + defp fix_float_exp([digit | rest], result) do + fix_float_exp(rest, [digit | result]) + end + + defp fix_float_exp([], result), do: :lists.reverse(result) + + if Version.compare(System.version(), "1.3.0") == :lt do + defp integer_to_charlist(string), do: Integer.to_char_list(string) + else + defp integer_to_charlist(string), do: Integer.to_charlist(string) + end +end + +defimpl Inspect, for: Decimal do + def inspect(dec, _opts) do + "#Decimal<" <> Decimal.to_string(dec) <> ">" + end +end + +defimpl String.Chars, for: Decimal do + def to_string(dec) do + Decimal.to_string(dec) + end +end diff --git a/deps/decimal/lib/decimal/context.ex b/deps/decimal/lib/decimal/context.ex new file mode 100644 index 0000000..0201eb2 --- /dev/null +++ b/deps/decimal/lib/decimal/context.ex @@ -0,0 +1,125 @@ +defmodule Decimal.Context do + import Decimal.Macros + alias Decimal.Context + + @moduledoc """ + The context is kept in the process dictionary. It can be accessed with + `get/0` and `set/1`. + + The default context has a precision of 28, the rounding algorithm is + `:half_up`. The set trap enablers are `:invalid_operation` and + `:division_by_zero`. + + ## Fields + + * `precision` - maximum number of decimal digits in the coefficient. If an + operation result has more digits it will be rounded to `precision` + digits with the rounding algorithm in `rounding`. + * `rounding` - the rounding algorithm used when the coefficient's number of + exceeds `precision`. Strategies explained below. + * `flags` - a list of signals that for which the flag is sent. When an + exceptional condition is signalled its flag is set. The flags are sticky + and will be set until explicitly cleared. + * `traps` - a list of set trap enablers for signals. When a signal's trap + enabler is set the condition causes `Decimal.Error` to be raised. + + ## Rounding algorithms + + * `:down` - round toward zero (truncate). Discarded digits are ignored, + result is unchanged. + * `:half_up` - if the discarded digits is greater than or equal to half of + the value of a one in the next left position then the coefficient will be + incremented by one (rounded up). Otherwise (the discarded digits are less + than half) the discarded digits will be ignored. + * `:half_even` - also known as "round to nearest" or "banker's rounding". If + the discarded digits is greater than half of the value of a one in the + next left position then the coefficient will be incremented by one + (rounded up). If they represent less than half discarded digits will be + ignored. Otherwise (exactly half), the coefficient is not altered if it's + even, or incremented by one (rounded up) if it's odd (to make an even + number). + * `:ceiling` - round toward +Infinity. If all of the discarded digits are + zero or the sign is negative the result is unchanged. Otherwise, the + coefficient will be incremented by one (rounded up). + * `:floor` - round toward -Infinity. If all of the discarded digits are zero + or the sign is positive the result is unchanged. Otherwise, the sign is + negative and coefficient will be incremented by one. + * `:half_down` - if the discarded digits is greater than half of the value + of a one in the next left position then the coefficient will be + incremented by one (rounded up). Otherwise (the discarded digits are half + or less) the discarded digits are ignored. + * `:up` - round away from zero. If all discarded digits are zero the + coefficient is not changed, otherwise it is incremented by one (rounded + up). + + This table shows the results of rounding operations for all the rounding + algorithms: + + Rounding algorithm | 5.5 | 2.5 | 1.6 | 1.1 | 1.0 | -1.0 | -1.1 | -1.6 | -2.5 | -5.5 + :----------------- | :-- | :-- | :-- | :-- | :-- | :--- | :--- | :--- | :--- | :--- + `:up` | 6 | 3 | 2 | 2 | 1 | -1 | -2 | -2 | -3 | -6 + `:down` | 5 | 2 | 1 | 1 | 1 | -1 | -1 | -1 | -2 | -5 + `:ceiling` | 6 | 3 | 2 | 2 | 1 | -1 | -1 | -1 | -2 | -5 + `:floor` | 5 | 2 | 1 | 1 | 1 | -1 | -2 | -2 | -3 | -6 + `:half_up` | 6 | 3 | 2 | 1 | 1 | -1 | -1 | -2 | -3 | -6 + `:half_down` | 5 | 2 | 2 | 1 | 1 | -1 | -1 | -2 | -2 | -5 + `:half_even` | 6 | 2 | 2 | 1 | 1 | -1 | -1 | -2 | -2 | -6 + + """ + @type t :: %__MODULE__{ + precision: pos_integer, + rounding: Decimal.rounding(), + flags: [Decimal.signal()], + traps: [Decimal.signal()] + } + + defstruct precision: 28, + rounding: :half_up, + flags: [], + traps: [:invalid_operation, :division_by_zero] + + @context_key :"$decimal_context" + + @doc """ + Runs function with given context. + """ + doc_since("1.9.0") + @spec with(t(), (() -> x)) :: x when x: var + def with(%Context{} = context, fun) when is_function(fun, 0) do + old = Process.put(@context_key, context) + + try do + fun.() + after + set(old || %Context{}) + end + end + + @doc """ + Gets the process' context. + """ + doc_since("1.9.0") + @spec get() :: t() + def get() do + Process.get(@context_key, %Context{}) + end + + @doc """ + Set the process' context. + """ + doc_since("1.9.0") + @spec set(t()) :: :ok + def set(%Context{} = context) do + Process.put(@context_key, context) + :ok + end + + @doc """ + Update the process' context. + """ + doc_since("1.9.0") + @spec update((t() -> t())) :: :ok + def update(fun) when is_function(fun, 1) do + get() |> fun.() |> set() + end +end diff --git a/deps/decimal/lib/decimal/error.ex b/deps/decimal/lib/decimal/error.ex new file mode 100644 index 0000000..1f3b08f --- /dev/null +++ b/deps/decimal/lib/decimal/error.ex @@ -0,0 +1,24 @@ +defmodule Decimal.Error do + @moduledoc """ + The exception that all decimal operations may raise. + + ## Fields + + * `signal` - the signalled error, additional signalled errors will be found + in the context. + * `reason` - the reason for the error. + + Rescuing the error to access the result or the other fields of the error is + discouraged and should only be done for exceptional conditions. It is more + pragmatic to set the appropriate traps on the context and check the flags + after the operation if the result needs to be inspected. + """ + + defexception [:signal, :reason] + + @impl true + def message(%{signal: signal, reason: reason}) do + reason = reason && ": " <> reason + "#{signal}#{reason}" + end +end diff --git a/deps/decimal/lib/decimal/macros.ex b/deps/decimal/lib/decimal/macros.ex new file mode 100644 index 0000000..f8fca78 --- /dev/null +++ b/deps/decimal/lib/decimal/macros.ex @@ -0,0 +1,11 @@ +defmodule Decimal.Macros do + @moduledoc false + + defmacro doc_since(version) do + if Version.match?(System.version(), ">= 1.7.0") do + quote do + @doc since: unquote(version) + end + end + end +end diff --git a/deps/decimal/mix.exs b/deps/decimal/mix.exs new file mode 100644 index 0000000..aeca126 --- /dev/null +++ b/deps/decimal/mix.exs @@ -0,0 +1,42 @@ +defmodule Decimal.Mixfile do + use Mix.Project + + @version "2.0.0" + @source_url "https://github.com/ericmj/decimal" + + def project() do + [ + app: :decimal, + version: @version, + elixir: "~> 1.2", + deps: deps(), + name: "Decimal", + source_url: @source_url, + docs: [source_ref: "v#{@version}", main: "readme", extras: ["README.md"]], + description: description(), + package: package() + ] + end + + def application() do + [] + end + + defp deps() do + [ + {:ex_doc, ">= 0.0.0", only: :dev} + ] + end + + defp description() do + "Arbitrary precision decimal arithmetic." + end + + defp package() do + [ + maintainers: ["Eric Meadows-Jรถnsson"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url} + ] + end +end diff --git a/deps/ecto/.fetch b/deps/ecto/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/ecto/.formatter.exs b/deps/ecto/.formatter.exs new file mode 100644 index 0000000..00b386f --- /dev/null +++ b/deps/ecto/.formatter.exs @@ -0,0 +1,31 @@ +locals_without_parens = [ + # Query + from: 2, + + # Schema + field: 1, + field: 2, + field: 3, + timestamps: 1, + belongs_to: 2, + belongs_to: 3, + has_one: 2, + has_one: 3, + has_many: 2, + has_many: 3, + many_to_many: 2, + many_to_many: 3, + embeds_one: 2, + embeds_one: 3, + embeds_one: 4, + embeds_many: 2, + embeds_many: 3, + embeds_many: 4 +] + +[ + locals_without_parens: locals_without_parens, + export: [ + locals_without_parens: locals_without_parens + ] +] diff --git a/deps/ecto/.hex b/deps/ecto/.hex new file mode 100644 index 0000000000000000000000000000000000000000..1b6b49c255b0f0b847f03ee0a53779ac96af3a93 GIT binary patch literal 268 zcmZ9{J95J?3z<8IuO+K+rs zyQEg7)C;NisWo0sW#sa8z17;&57s`%90PmE5QLKnLLlNb8W(_wjJ9?VGQN@vHOHT8MQMGy-ixj vB36(!JC8XJ1#l3KAq&LhDQpK2c4gtqyeZH9)k`Jy`qP(8bNkEeF&^;&nf^&l literal 0 HcmV?d00001 diff --git a/deps/ecto/CHANGELOG.md b/deps/ecto/CHANGELOG.md new file mode 100644 index 0000000..e5f244f --- /dev/null +++ b/deps/ecto/CHANGELOG.md @@ -0,0 +1,735 @@ +# Changelog for v3.x + +## v3.8.4 (2022-06-04) + +### Enhancements + + * [Ecto.Multi] Add `one/2` and `all/2` functions + * [Ecto.Query] Support `literal(...)` in `fragment` + +### Bug fix + + * [Ecto.Schema] Make sure fields are inspected in the correct order in Elixir v1.14+ + +## v3.8.3 (2022-05-11) + +### Bug fix + + * [Ecto.Query] Allow source aliases to be used in `type/2` + * [Ecto.Schema] Avoid "undefined behaviour/struct" warnings and errors during compilation + +## v3.8.2 (2022-05-05) + +### Bug fix + + * [Ecto.Adapter] Do not require adapter metadata to be raw maps + * [Ecto.Association] Respect `join_where` in many to many `on_replace` deletes + * [Ecto.Changeset] Check if list is in `empty_values` before nested validations + +## v3.8.1 (2022-04-27) + +### Bug fix + + * [Ecto.Query] Fix regression where a join's on parameter on `update_all` was out of order + +## v3.8.0 (2022-04-26) + +Ecto v3.8 requires Elixir v1.10+. + +### Enhancements + + * [Ecto] Add new Embedded chapter to Introductory guides + * [Ecto.Changeset] Allow custom `:error_key` in unique_constraint + * [Ecto.Changeset] Add `:match` option to all constraint functions + * [Ecto.Query] Support dynamic aliases + * [Ecto.Query] Allow using `type/2` with virtual fields + * [Ecto.Query] Suggest alternatives to inexistent fields in queries + * [Ecto.Query] Support passing queries using subqueries to `insert_all` + * [Ecto.Repo] Allow `stacktrace: true` so stacktraces are included in telemetry events and logs + * [Ecto.Schema] Validate options given to schema fields + +### Bug fixes + + * [Ecto.Changeset] Address regression on `validate_subset` no longer working with custom array types + * [Ecto.Changeset] **Potentially breaking change**: Detect `empty_values` inside lists when casting. This may cause issues if you were relying on the casting of empty values (by default, only `""`). + * [Ecto.Query] Handle atom list sigils in `select` + * [Ecto.Query] Improve tracking of `select_merge` inside subqueries + * [Ecto.Repo] Properly handle literals in queries given to `insert_all` + * [Ecto.Repo] Don't surface persisted data as changes on embed updates + * [Ecto.Schema] Preserve parent prefix on join tables + +## v3.7.2 (2022-03-13) + +### Enhancements + + * [Ecto.Schema] Add option to skip validations for default values + * [Ecto.Query] Allow coalesce in `type/2` + * [Ecto.Query] Support parameterized types in type/2 + * [Ecto.Query] Allow arbitrary parentheses in query expressions + +## v3.7.1 (2021-08-27) + +### Enhancements + + * [Ecto.Embedded] Make `Ecto.Embedded` public and describe struct fields + +### Bug fixes + + * [Ecto.Repo] Make sure parent changeset is included in changes for `insert`/`update`/`delete` when there are errors processing the parent itself + +## v3.7.0 (2021-08-19) + +### Enhancements + + * [Ecto.Changeset] Add `Ecto.Changeset.traverse_validations/2` + * [Ecto.Enum] Add `Ecto.Enum.mappings/2` and `Ecto.Enum.dump_values/2` + * [Ecto.Query] Add support for dynamic `as(^as)` and `parent_as(^as)` + * [Ecto.Repo] Add stale changeset to `Ecto.StaleEntryError` fields + * [Ecto.Schema] Add support for `@schema_context` to set context metadata on schema definition + +### Bug fixes + + * [Ecto.Changeset] Fix changeset inspection not redacting when embedded + * [Ecto.Changeset] Use semantic comparison on `validate_inclusion`, `validate_exclusion`, and `validate_subset` + * [Ecto.Enum] Raise on duplicate values in `Ecto.Enum` + * [Ecto.Query] Make sure `hints` are included in the query cache + * [Ecto.Repo] Support placeholders in `insert_all` without schemas + * [Ecto.Repo] Wrap in a subquery when query given to `Repo.aggregate` has combination + * [Ecto.Repo] Fix CTE subqueries not finding parent bindings + * [Ecto.Repo] Return changeset with assocs if any of the assocs are invalid + +## v3.6.2 (2021-05-28) + +### Enhancements + + * [Ecto.Query] Support macros in `with_cte` + * [Ecto.Repo] Add `Ecto.Repo.all_running/0` to list all running repos + +### Bug fixes + + * [Ecto.Query] Do not omit nil fields in a subquery select + * [Ecto.Query] Allow `parent_as` to look for an alias all the way up across subqueries + * [Ecto.Query] Raise if a nil value is given to a query from a nested map parameter + * [Ecto.Query] Fix `insert_all` when using both `:on_conflict` and `:placeholders` + * [mix ecto.load] Do not pass `--force` to underlying compile task + +## v3.6.1 (2021-04-12) + +### Enhancements + + * [Ecto.Changeset] Allow the `:query` option in `unsafe_validate_unique` + +### Bug fixes + + * [Ecto.Changeset] Add the relation id in `apply_changes` if the relation key exists (instead of hardcoding it to `id`) + +## v3.6.0 (2021-04-03) + +### Enhancements + + * [Ecto.Changeset] Support `:repo_opts` in `unsafe_validate_unique` + * [Ecto.Changeset] Add a validation error if trying to cast a cardinality one embed/assoc with anything other than a map or keyword list + * [Ecto.Enum] Allow enums to map to custom values + * [Ecto.Multi] Add `Ecto.Multi.put/3` for directly storing values + * [Ecto.Query] **Potentially breaking change**: optimize `many_to_many` queries so it no longer load intermediary tables in more occasions. This may cause issues if you are using `Ecto.assoc/2` to load `many_to_many` associations and then trying to access intermediate bindings (which is discouraged but it was possible) + * [Ecto.Repo] Allow `insert_all` to be called with a query instead of rows + * [Ecto.Repo] Add `:placeholders` support to `insert_all` to avoid sending the same value multiple times + * [Ecto.Schema] Support `:preload_order` on `has_many` and `many_to_many` associations + * [Ecto.UUID] Add bang UUID conversion methods + * [Ecto.Query] The `:hints` option now accepts dynamic values when supplied as tuples + * [Ecto.Query] Support `select: map(source, fields)` where `source` is a fragment + * [Ecto.Query] Allow referring to the parent query in a join's subquery select via `parent_as` + * [mix ecto] Support file and line interpolation on `ECTO_EDITOR` + +### Bug fixes + + * [Ecto.Changeset] Change `apply_changes/1` to add the relation to the `struct.relation_id` if relation struct is persisted + * [Ecto.Query] Remove unnecessary INNER JOIN in many to many association query + * [Ecto.Query] Allow parametric types to be interpolated in queries + * [Ecto.Schema] Raise `ArgumentError` when default has invalid type + +## v3.5.8 (2021-02-21) + +### Enhancements + + * [Ecto.Query] Support map/2 on fragments and subqueries + +## v3.5.7 (2021-02-07) + +### Bug fixes + + * [Ecto.Query] Fixes param ordering issue on dynamic queries with subqueries + +## v3.5.6 (2021-01-20) + +### Enhancements + + * [Ecto.Schema] Support `on_replace: :delete_if_exists` on associations + +### Bug fixes + + * [Ecto.Query] Allow unary minus operator in query expressions + * [Ecto.Schema] Allow nil values on typed maps + +## v3.5.5 (2020-11-12) + +### Enhancements + + * [Ecto.Query] Add support for subqueries operators: `all`, `any`, and `exists` + +### Bug fixes + + * [Ecto.Changeset] Use association source on `put_assoc` with maps/keywords + * [Ecto.Enum] Add `cast` clause for nil values on `Ecto.Enum` + * [Ecto.Schema] Allow nested type `:any` for non-virtual fields + +## v3.5.4 (2020-10-28) + +### Enhancements + + * [mix ecto.drop] Provide `--force-drop` for databases that may support it + * [guides] Add new "Multi tenancy with foreign keys" guide + +### Bug fixes + + * [Ecto.Changeset] Make keys optional in specs + * [Ecto.Enum] Make sure `values/2` works for virtual fields + * [Ecto.Query] Fix missing type on CTE queries that select a single field + +## v3.5.3 (2020-10-21) + +### Bug fixes + + * [Ecto.Query] Do not reset parameter counter for nested CTEs + * [Ecto.Type] Fix regression where array type with nils could no longer be cast/load/dump + * [Ecto.Type] Fix CaseClauseError when casting a decimal with a binary remainder + +## v3.5.2 (2020-10-12) + +### Enhancements + + * [Ecto.Repo] Add Repo.reload/2 and Repo.reload!/2 + +### Bug fixes + + * [Ecto.Changeset] Fix "__schema__/1 is undefined or private" error while inspecting a schemaless changeset + * [Ecto.Repo] Invoke `c:Ecto.Repo.default_options/1` per entry-point operation + +## v3.5.1 (2020-10-08) + +### Enhancements + + * [Ecto.Changeset] Warn if there are duplicate IDs in the parent schema for `cast_assoc/3`/`cast_embed/3` + * [Ecto.Schema] Allow `belongs_to` to accept options for parameterized types + +### Bug fixes + + * [Ecto.Query] Keep field types when using a subquery with source + +## v3.5.0 (2020-10-03) + +v3.5 requires Elixir v1.8+. + +### Bug fixes + + * [Ecto.Changeset] Ensure `:empty_values` in `cast/4` does not automatically propagate to following cast calls. If you want a given set of `:empty_values` to apply to all `cast/4` calls, change the value stored in `changeset.empty_values` instead + * [Ecto.Changeset] **Potentially breaking change**: Do not force repository updates to happen when using `optimistic_lock`. The lock field will only be incremented if the record has other changes. If no changes, nothing happens. + * [Ecto.Changeset] Do not automatically share empty values across `cast/3` calls + * [Ecto.Query] Consider query prefix in cte/combination query cache + * [Ecto.Query] Allow the entry to be marked as nil when using left join with subqueries + * [Ecto.Query] Support subqueries inside dynamic expressions + * [Ecto.Repo] Fix preloading when using dynamic repos and the sandbox in automatic mode + * [Ecto.Repo] Do not duplicate collections when associations are preloaded for repeated elements + +### Enhancements + + * [Ecto.Enum] Add `Ecto.Enum` as a custom parameterized type + * [Ecto.Query] Allow `:prefix` in `from` to be set to nil + * [Ecto.Query] Do not restrict subqueries in `where` to map/struct types + * [Ecto.Query] Allow atoms in query without interpolation in order to support Ecto.Enum + * [Ecto.Schema] Do not validate uniqueness if there is a prior error on the field + * [Ecto.Schema] Allow `redact: true` in `field` + * [Ecto.Schema] Support parameterized types via `Ecto.ParameterizedType` + * [Ecto.Schema] Rewrite embeds and assocs as parameterized types. This means `__schema__(:type, assoc_or_embed)` now returns a parameterized type. To check if something is an association, use `__schema__(:assocs)` or `__schema__(:embeds)` instead + +## v3.4.6 (2020-08-07) + +### Enhancements + + * [Ecto.Query] Allow `count/0` on `type/2` + * [Ecto.Multi] Support anonymous functions in multiple functions + +### Bug fixes + + * [Ecto.Query] Consider booleans as literals in unions, subqueries, ctes, etc + * [Ecto.Schema] Generate IDs for nested embeds + +## v3.4.5 (2020-06-14) + +### Enhancements + + * [Ecto.Changeset] Allow custom error key in `unsafe_validate_unique` + * [Ecto.Changeset] Improve performance when casting large params maps + +### Bug fixes + + * [Ecto.Changeset] Improve error message for invalid `cast_assoc` + * [Ecto.Query] Fix inspecting query with fragment CTE + * [Ecto.Query] Fix inspecting dynamics with aliased bindings + * [Ecto.Query] Improve error message when selecting a single atom + * [Ecto.Repo] Reduce data-copying when preloading multiple associations + * [Ecto.Schema] Do not define a compile-time dependency for schema in `:join_through` + +## v3.4.4 (2020-05-11) + +### Enhancements + + * [Ecto.Schema] Add `join_where` support to `many_to_many` + +## v3.4.3 (2020-04-27) + +### Enhancements + + * [Ecto.Query] Support `as/1` and `parent_as/1` for lazy named bindings and to allow parent references from subqueries + * [Ecto.Query] Support `x in subquery(query)` + +### Bug fixes + + * [Ecto.Query] Do not raise for missing assocs if :force is given to preload + * [Ecto.Repo] Return error from `Repo.delete` on invalid changeset from `prepare_changeset` + +## v3.4.2 (2020-04-10) + +### Enhancements + + * [Ecto.Changeset] Support multiple fields in `unique_constraint/3` + +## v3.4.1 (2020-04-08) + +### Enhancements + + * [Ecto] Add `Ecto.embedded_load/3` and `Ecto.embedded_dump/2` + * [Ecto.Query] Improve error message on invalid JSON expressions + * [Ecto.Repo] Emit `[:ecto, :repo, :init]` telemetry event upon Repo init + +### Bug fixes + + * [Ecto.Query] Do not support JSON selectors on `type/2` + +### Deprecations + + * [Ecto.Repo] Deprecate `conflict_target: {:constraint, _}`. It is a discouraged approach and `{:unsafe_fragment, _}` is still available if someone definitely needs it + +## v3.4.0 (2020-03-24) + +v3.4 requires Elixir v1.7+. + +### Enhancements + + * [Ecto.Query] Allow dynamic queries in CTE and improve error message + * [Ecto.Query] Add `Ecto.Query.API.json_extract_path/2` and JSON path support to query syntax. For example, `posts.metadata["tags"][0]["name"]` will return the name of the first tag stored in the `:map` metadata field + * [Ecto.Repo] Add new `default_options/1` callback to repository + * [Ecto.Repo] Support passing `:telemetry_options` to repository operations + +### Bug fixes + + * [Ecto.Changeset] Properly add validation annotation to `validate_acceptance` + * [Ecto.Query] Raise if there is loaded non-empty association data without related key when preloading. This typically means not all fields have been loaded in a query + * [Ecto.Schema] Show meaningful error in case `schema` is invoked twice in an `Ecto.Schema` + +## v3.3.4 (2020-02-27) + +### Bug fixes + + * [mix ecto] Do not rely on map ordering when parsing repos + * [mix ecto.gen.repo] Improve error message when a repo is not given + +## v3.3.3 (2020-02-14) + +### Enhancements + + * [Ecto.Query] Support fragments in `lock` + * [Ecto.Query] Handle `nil` in `select_merge` with similar semantics to SQL databases (i.e. it simply returns `nil` itself) + +## v3.3.2 (2020-01-28) + +### Enhancements + + * [Ecto.Changeset] Only bump optimistic lock in case of success + * [Ecto.Query] Allow macros in Ecto window expressions + * [Ecto.Schema] Support `:join_defaults` on `many_to_many` associations + * [Ecto.Schema] Allow MFargs to be given to association `:defaults` + * [Ecto.Type] Add `Ecto.Type.embedded_load` and `Ecto.Type.embedded_dump` + +### Bug fixes + + * [Ecto.Repo] Ignore empty hostname when parsing database url (Elixir v1.10 support) + * [Ecto.Repo] Rewrite combinations on Repo.exists? queries + * [Ecto.Schema] Respect child `@schema_prefix` in `cast_assoc` + * [mix ecto.gen.repo] Use `config_path` when writing new config in `mix ecto.gen.repo` + +## v3.3.1 (2019-12-27) + +### Enhancements + + * [Ecto.Query.WindowAPI] Support `filter/2` + +### Bug fixes + + * [Ecto.Query.API] Fix `coalesce/2` usage with mixed types + +## v3.3.0 (2019-12-11) + +### Enhancements + + * [Ecto.Adapter] Add `storage_status/1` callback to `Ecto.Adapters.Storage` behaviour + * [Ecto.Changeset] Add `Ecto.Changeset.apply_action!/2` + * [Ecto.Changeset] Remove actions restriction in `Ecto.Changeset.apply_action/2` + * [Ecto.Repo] Introduce `c:Ecto.Repo.aggregate/2` + * [Ecto.Repo] Support `{:replace_all_except, fields}` in `:on_conflict` + +### Bug fixes + + * [Ecto.Query] Make sure the `:prefix` option in `:from`/`:join` also cascades to subqueries + * [Ecto.Query] Make sure the `:prefix` option in `:join` also cascades to queries + * [Ecto.Query] Use database returned values for literals. Previous Ecto versions knew literals from queries should not be discarded for combinations but, even if they were not discarded, we would ignore the values returned by the database + * [Ecto.Repo] Do not wrap schema operations in a transaction if already inside a transaction. We have also removed the **private** option called `:skip_transaction` + +### Deprecations + + * [Ecto.Repo] `:replace_all_except_primary_keys` is deprecated in favor of `{:replace_all_except, fields}` in `:on_conflict` + +## v3.2.5 (2019-11-03) + +### Bug fixes + + * [Ecto.Query] Fix a bug where executing some queries would leak the `{:maybe, ...}` type + +## v3.2.4 (2019-11-02) + +### Bug fixes + + * [Ecto.Query] Improve error message on invalid join binding + * [Ecto.Query] Make sure the `:prefix` option in `:join` also applies to through associations + * [Ecto.Query] Invoke custom type when loading aggregations from the database (but fallback to database value if it can't be cast) + * [mix ecto.gen.repo] Support Elixir v1.9 style configs + +## v3.2.3 (2019-10-17) + +### Bug fixes + + * [Ecto.Changeset] Do not convert enums given to `validate_inclusion` to a list + +### Enhancements + + * [Ecto.Changeset] Improve error message on non-atom keys to change/put_change + * [Ecto.Changeset] Allow :with to be given as a `{module, function, args}` tuple on `cast_association/cast_embed` + * [Ecto.Changeset] Add `fetch_change!/2` and `fetch_field!/2` + +## v3.2.2 (2019-10-01) + +### Bug fixes + + * [Ecto.Query] Fix keyword arguments given to `:on` when a bind is not given to join + * [Ecto.Repo] Make sure a preload given to an already preloaded has_many :through is loaded + +## v3.2.1 (2019-09-17) + +### Enhancements + + * [Ecto.Changeset] Add rollover logic for default incrementer in `optimistic_lock` + * [Ecto.Query] Also expand macros when used inside `type/2` + +### Bug fixes + + * [Ecto.Query] Ensure queries with non-cacheable queries in CTEs/combinations are also not-cacheable + +## v3.2.0 (2019-09-07) + +v3.2 requires Elixir v1.6+. + +### Enhancements + + * [Ecto.Query] Add common table expressions support `with_cte/3` and `recursive_ctes/2` + * [Ecto.Query] Allow `dynamic/3` to be used in `order_by`, `distinct`, `group_by`, as well as in `partition_by`, `order_by`, and `frame` inside `windows` + * [Ecto.Query] Allow filters in `type/2` expressions + * [Ecto.Repo] Merge options given to the repository into the changeset `repo_opts` and assign it back to make it available down the chain + * [Ecto.Repo] Add `prepare_query/3` callback that is invoked before query operations + * [Ecto.Repo] Support `:returning` option in `Ecto.Repo.update/2` + * [Ecto.Repo] Support passing a one arity function to `Ecto.Repo.transaction/2`, where the argument is the current repo + * [Ecto.Type] Add a new `embed_as/1` callback to `Ecto.Type` that allows adapters to control embedding behaviour + * [Ecto.Type] Add `use Ecto.Type` for convenience that implements the new required callbacks + +### Bug fixes + + * [Ecto.Association] Ensure we delete an association before inserting when replacing on `has_one` + * [Ecto.Query] Do not allow interpolated `nil` in literal keyword list when building query + * [Ecto.Query] Do not remove literals from combinations, otherwise UNION/INTERSECTION queries may not match the number of values in `select` + * [Ecto.Query] Do not attempt to merge at compile-time non-keyword lists given to `select_merge` + * [Ecto.Repo] Do not override `:through` associations on preload unless forcing + * [Ecto.Repo] Make sure prefix option cascades to combinations and recursive queries + * [Ecto.Schema] Use OS time without drift when generating timestamps + * [Ecto.Type] Allow any datetime in `datetime_add` + +## v3.1.7 (2019-06-27) + +### Bug fixes + + * [Ecto.Changeset] Make sure `put_assoc` with empty changeset propagates on insert + +## v3.1.6 (2019-06-19) + +### Enhancements + + * [Ecto.Repo] Add `:read_only` repositories + * [Ecto.Schema] Also validate options given to `:through` associations + +### Bug fixes + + * [Ecto.Changeset] Do not mark `put_assoc` from `[]` to `[]` or from `nil` to `nil` as change + * [Ecto.Query] Remove named binding when excluding joins + * [mix ecto.gen.repo] Use `:config_path` instead of hardcoding to `config/config.exs` + +## v3.1.5 (2019-06-06) + +### Enhancements + + * [Ecto.Repo] Allow `:default_dynamic_repo` option on `use Ecto.Repo` + * [Ecto.Schema] Support `{:fragment, ...}` in the `:where` option for associations + +### Bug fixes + + * [Ecto.Query] Fix handling of literals in combinators (union, except, intersection) + +## v3.1.4 (2019-05-07) + +### Bug fixes + + * [Ecto.Changeset] Convert validation enums to lists before adding them as validation metadata + * [Ecto.Schema] Properly propagate prefix to join_through source in many_to_many associations + +## v3.1.3 (2019-04-30) + +### Enhancements + + * [Ecto.Changeset] Expose the enum that was validated against in errors from enum-based validations + +## v3.1.2 (2019-04-24) + +### Enhancements + + * [Ecto.Query] Add support for `type+over` + * [Ecto.Schema] Allow schema fields to be excluded from queries + +### Bug fixes + + * [Ecto.Changeset] Do not list a field as changed if it is updated to its original value + * [Ecto.Query] Keep literal numbers and bitstring in subqueries and unions + * [Ecto.Query] Improve error message for invalid `type/2` expression + * [Ecto.Query] Properly count interpolations in `select_merge/2` + +## v3.1.1 (2019-04-04) + +### Bug fixes + + * [Ecto] Do not require Jason (i.e. it should continue to be an optional dependency) + * [Ecto.Repo] Make sure `many_to_many` and `Ecto.Multi` work with dynamic repos + +## v3.1.0 (2019-04-02) + +v3.1 requires Elixir v1.5+. + +### Enhancements + + * [Ecto.Changeset] Add `not_equal_to` option for `validate_number` + * [Ecto.Query] Improve error message for missing `fragment` arguments + * [Ecto.Query] Improve error message on missing struct key for structs built in `select` + * [Ecto.Query] Allow dynamic named bindings + * [Ecto.Repo] Add dynamic repository support with `Ecto.Repo.put_dynamic_repo/1` and `Ecto.Repo.get_dynamic_repo/0` (experimental) + * [Ecto.Type] Cast naive_datetime/utc_datetime strings without seconds + +### Bug fixes + + * [Ecto.Changeset] Do not run `unsafe_validate_unique` query unless relevant fields were changed + * [Ecto.Changeset] Raise if an unknown field is given on `Ecto.Changeset.change/2` + * [Ecto.Changeset] Expose the type that was validated in errors generated by `validate_length/3` + * [Ecto.Query] Add support for `field/2` as first element of `type/2` and alias as second element of `type/2` + * [Ecto.Query] Do not attempt to assert types of named bindings that are not known at compile time + * [Ecto.Query] Properly cast boolean expressions in select + * [Mix.Ecto] Load applications during repo lookup so their app environment is available + +### Deprecations + + * [Ecto.LogEntry] Fully deprecate previously soft deprecated API + +## v3.0.7 (2019-02-06) + +### Bug fixes + + * [Ecto.Query] `reverse_order` reverses by primary key if no order is given + +## v3.0.6 (2018-12-31) + +### Enhancements + + * [Ecto.Query] Add `reverse_order/1` + +### Bug fixes + + * [Ecto.Multi] Raise better error message on accidental rollback inside `Ecto.Multi` + * [Ecto.Query] Properly merge deeply nested preloaded joins + * [Ecto.Query] Raise better error message on missing select on schemaless queries + * [Ecto.Schema] Fix parameter ordering in assoc `:where` + +## v3.0.5 (2018-12-08) + +### Backwards incompatible changes + + * [Ecto.Schema] The `:where` option added in Ecto 3.0.0 had a major flaw and it has been reworked in this version. This means a tuple of three elements can no longer be passed to `:where`, instead a keyword list must be given. Check the "Filtering associations" section in `has_many/3` docs for more information + +### Bug fixes + + * [Ecto.Query] Do not raise on lists of tuples that are not keywords. Instead, let custom Ecto.Type handle them + * [Ecto.Query] Allow `prefix: nil` to be given to subqueries + * [Ecto.Query] Use different cache keys for unions/intersections/excepts + * [Ecto.Repo] Fix support for upserts with `:replace` without a schema + * [Ecto.Type] Do not lose precision when casting `utc_datetime_usec` with a time zone different than Etc/UTC + +## v3.0.4 (2018-11-29) + +### Enhancements + + * [Decimal] Bump decimal dependency + * [Ecto.Repo] Remove unused `:pool_timeout` + +## v3.0.3 (2018-11-20) + +### Enhancements + + * [Ecto.Changeset] Add `count: :bytes` option in `validate_length/3` + * [Ecto.Query] Support passing `Ecto.Query` in `Ecto.Repo.insert_all` + +### Bug fixes + + * [Ecto.Type] Respect adapter types when loading/dumping arrays and maps + * [Ecto.Query] Ensure no bindings in order_by when using combinations in `Ecto.Query` + * [Ecto.Repo] Ensure adapter is compiled (instead of only loaded) before invoking it + * [Ecto.Repo] Support new style child spec from adapters + +## v3.0.2 (2018-11-17) + +### Bug fixes + + * [Ecto.LogEntry] Bring old Ecto.LogEntry APIs back for compatibility + * [Ecto.Repo] Consider non-joined fields when merging preloaded assocs only at root + * [Ecto.Repo] Take field sources into account in :replace_all_fields upsert option + * [Ecto.Type] Convert `:utc_datetime` to `DateTime` when sending it to adapters + +## v3.0.1 (2018-11-03) + +### Bug fixes + + * [Ecto.Query] Ensure parameter order is preserved when using more than 32 parameters + * [Ecto.Query] Consider query prefix when planning association joins + * [Ecto.Repo] Consider non-joined fields as unique parameters when merging preloaded query assocs + +## v3.0.0 (2018-10-29) + +Note this version includes changes from `ecto` and `ecto_sql` but in future releases all `ecto_sql` entries will be listed in their own CHANGELOG. + +### Enhancements + + * [Ecto.Adapters.MySQL] Add ability to specify cli_protocol for `ecto.create` and `ecto.drop` commands + * [Ecto.Adapters.PostgreSQL] Add ability to specify maintenance database name for PostgreSQL adapter for `ecto.create` and `ecto.drop` commands + * [Ecto.Changeset] Store constraint name in error metadata for constraints + * [Ecto.Changeset] Add `validations/1` and `constraints/1` instead of allowing direct access on the struct fields + * [Ecto.Changeset] Add `:force_update` option when casting relations, to force an update even if there are no changes + * [Ecto.Migration] Migrations now lock the migrations table in order to avoid concurrent migrations in a cluster. The type of lock can be configured via the `:migration_lock` repository configuration and defaults to "FOR UPDATE" or disabled if set to nil + * [Ecto.Migration] Add `:migration_default_prefix` repository configuration + * [Ecto.Migration] Add reversible version of `remove/2` subcommand + * [Ecto.Migration] Add support for non-empty arrays as defaults in migrations + * [Ecto.Migration] Add support for logging notices/alerts/warnings when running migrations (only supported by Postgres currently) + * [Ecto.Migrator] Warn when migrating and there is a higher version already migrated in the database + * [Ecto.Multi] Add support for anonymous functions in `insert/4`, `update/4`, `insert_or_update/4`, and `delete/4` + * [Ecto.Query] Support tuples in `where` and `having`, allowing queries such as `where: {p.foo, p.bar} > {^foo, ^bar}` + * [Ecto.Query] Support arithmetic operators in queries as a thin layer around the DB functionality + * [Ecto.Query] Allow joins in queries to be named via `:as` and allow named bindings + * [Ecto.Query] Support excluding specific join types in `exclude/2` + * [Ecto.Query] Allow virtual field update in subqueries + * [Ecto.Query] Support `coalesce/2` in queries, such as `select: coalesce(p.title, p.old_title)` + * [Ecto.Query] Support `filter/2` in queries, such as `select: filter(count(p.id), p.public == true)` + * [Ecto.Query] The `:prefix` and `:hints` options are now supported on both `from` and `join` expressions + * [Ecto.Query] Support `:asc_nulls_last`, `:asc_nulls_first`, `:desc_nulls_last`, and `:desc_nulls_first` in `order_by` + * [Ecto.Query] Allow variables (sources) to be given in queries, for example, useful for invoking functions, such as `fragment("some_function(?)", p)` + * [Ecto.Query] Add support for `union`, `union_all`, `intersection`, `intersection_all`, `except` and `except_all` + * [Ecto.Query] Add support for `windows` and `over` + * [Ecto.Query] Raise when comparing a string with a charlist during planning + * [Ecto.Repo] Only start transactions if an association or embed has changed, this reduces the overhead during repository operations + * [Ecto.Repo] Support `:replace_all_except_primary_key` as `:on_conflict` strategy + * [Ecto.Repo] Support `{:replace, fields}` as `:on_conflict` strategy + * [Ecto.Repo] Support `:unsafe_fragment` as `:conflict_target` + * [Ecto.Repo] Support `select` in queries given to `update_all` and `delete_all` + * [Ecto.Repo] Add `Repo.exists?/2` + * [Ecto.Repo] Add `Repo.checkout/2` - useful when performing multiple operations in short-time to interval, allowing the pool to be bypassed + * [Ecto.Repo] Add `:stale_error_field` to `Repo.insert/update/delete` that converts `Ecto.StaleEntryError` into a changeset error. The message can also be set with `:stale_error_message` + * [Ecto.Repo] Preloading now only sorts results by the relationship key instead of sorting by the whole struct + * [Ecto.Schema] Allow `:where` option to be given to `has_many`/`has_one`/`belongs_to`/`many_to_many` + +### Bug fixes + + * [Ecto.Inspect] Do not fail when inspecting query expressions which have a number of bindings more than bindings available + * [Ecto.Migration] Keep double underscores on autogenerated index names to be consistent with changesets + * [Ecto.Query] Fix `Ecto.Query.API.map/2` for single nil column with join + * [Ecto.Migration] Ensure `create_if_not_exists` is properly reversible + * [Ecto.Repo] Allow many_to_many associations to be preloaded via a function (before the behaviour was erratic) + * [Ecto.Schema] Make autogen ID loading work with custom type + * [Ecto.Schema] Make `updated_at` have the same value as `inserted_at` + * [Ecto.Schema] Ensure all fields are replaced with `on_conflict: :replace_all/:replace_all_except_primary_key` and not only the fields sent as changes + * [Ecto.Type] Return `:error` when casting NaN or infinite decimals + * [mix ecto.migrate] Properly run migrations after ECTO_EDITOR changes + * [mix ecto.migrations] List migrated versions even if the migration file is deleted + * [mix ecto.load] The task now fails on SQL errors on Postgres + +### Deprecations + +Although Ecto 3.0 is a major bump version, the functionality below emits deprecation warnings to ease the migration process. The functionality below will be removed in future Ecto 3.1+ releases. + + * [Ecto.Changeset] Passing a list of binaries to `cast/3` is deprecated, please pass a list of atoms instead + * [Ecto.Multi] `Ecto.Multi.run/3` now receives the repo in which the transaction is executing as the first argument to functions, and the changes so far as the second argument + * [Ecto.Query] `join/5` now expects `on: expr` as last argument instead of simply `expr`. This was done in order to properly support the `:as`, `:hints` and `:prefix` options + * [Ecto.Repo] The `:returning` option for `update_all` and `delete_all` has been deprecated as those statements now support `select` clauses + * [Ecto.Repo] Passing `:adapter` via config is deprecated in favor of passing it on `use Ecto.Repo` + * [Ecto.Repo] The `:loggers` configuration is deprecated in favor of "Telemetry Events" + +### Backwards incompatible changes + + * [Ecto.DateTime] `Ecto.Date`, `Ecto.Time` and `Ecto.DateTime` were previously deprecated and have now been removed + * [Ecto.DataType] `Ecto.DataType` protocol has been removed + * [Ecto.Migration] Automatically inferred index names may differ in Ecto v3.0 for indexes on complex column names + * [Ecto.Multi] `Ecto.Multi.run/5` now receives the repo in which the transaction is executing as the first argument to functions, and the changes so far as the second argument + * [Ecto.Query] A `join` no longer wraps `fragment` in parentheses. In some cases, such as common table expressions, you will have to explicitly wrap the fragment in parens. + * [Ecto.Repo] The `on_conflict: :replace_all` option now will also send fields with default values to the database. If you prefer the old behaviour that only sends the changes in the changeset, you can set it to `on_conflict: {:replace, Map.keys(changeset.changes)}` (this change is also listed as a bug fix) + * [Ecto.Repo] The repository operations are no longer called from association callbacks - this behaviour was not guaranteed in previous versions but we are listing as backwards incompatible changes to help with users relying on this behaviour + * [Ecto.Repo] `:pool_timeout` is no longer supported in favor of a new queue system described in `DBConnection.start_link/2` under "Queue config". For most users, configuring `:timeout` is enough, as it now includes both queue and query time + * [Ecto.Schema] `:time`, `:naive_datetime` and `:utc_datetime` no longer keep microseconds information. If you want to keep microseconds, use `:time_usec`, `:naive_datetime_usec`, `:utc_datetime_usec` + * [Ecto.Schema] The `@schema_prefix` option now only affects the `from`/`join` of where the schema is used and no longer the whole query + * [Ecto.Schema.Metadata] The `source` key no longer returns a tuple of the schema_prefix and the table/collection name. It now returns just the table/collection string. You can now access the schema_prefix via the `prefix` key. + * [Mix.Ecto] `Mix.Ecto.ensure_started/2` has been removed. However, in Ecto 2.2 the `Mix.Ecto` module was not considered part of the public API and should not have been used but we are listing this for guidance. + +### Adapter changes + + * [Ecto.Adapter] Split `Ecto.Adapter` into `Ecto.Adapter.Queryable` and `Ecto.Adapter.Schema` to provide more granular repository APIs + * [Ecto.Adapter] The `:sources` field in `query_meta` now contains three elements tuples with `{source, schema, prefix}` in order to support `from`/`join` prefixes (#2572) + * [Ecto.Adapter] The database types `time`, `utc_datetime` and `naive_datetime` should translate to types with seconds precision while the database types `time_usec`, `utc_datetime_usec` and `naive_datetime_usec` should have microseconds precision (#2291) + * [Ecto.Adapter] The `on_conflict` argument for `insert` and `insert_all` no longer receives a `{:replace_all, list(), atom()}` tuple. Instead, it receives a `{fields :: [atom()], list(), atom()}` where `fields` is a list of atoms of the fields to be replaced (#2181) + * [Ecto.Adapter] `insert`/`update`/`delete` now receive both `:source` and `:prefix` fields instead of a single `:source` field with both `source` and `prefix` in it (#2490) + * [Ecto.Adapter.Migration] A new `lock_for_migration/4` callback has been added. It is implemented by default by `Ecto.Adapters.SQL` (#2215) + * [Ecto.Adapter.Migration] The `execute_ddl` should now return `{:ok, []}` to make space for returning notices/hints/warnings in the future (adapters leveraging `Ecto.Adapters.SQL` do not have to perform any change) + * [Ecto.Query] The `from` field in `Ecto.Query` now returns a `Ecto.Query.FromExpr` with the `:source` field, unifying the behaviour in `from` and `join` expressions (#2497) + * [Ecto.Query] Tuple expressions are now supported in queries. For example, `where: {p.foo, p.bar} > {p.bar, p.baz}` should translate to `WHERE (p.foo, p.bar) > (p.bar, p.baz)` in SQL databases. Adapters should be changed to handle `{:{}, meta, exprs}` in the query AST (#2344) + * [Ecto.Query] Adapters should support the following arithmetic operators in queries `+`, `-`, `*` and `/` (#2400) + * [Ecto.Query] Adapters should support `filter/2` in queries, as in `select: filter(count(p.id), p.public == true)` (#2487) + +## Previous versions + + * See the CHANGELOG.md [in the v2.2 branch](https://github.com/elixir-ecto/ecto/blob/v2.2/CHANGELOG.md) diff --git a/deps/ecto/README.md b/deps/ecto/README.md new file mode 100644 index 0000000..d8b88ce --- /dev/null +++ b/deps/ecto/README.md @@ -0,0 +1,205 @@ +Ecto + +--- + +[![Build Status](https://github.com/elixir-ecto/ecto/workflows/CI/badge.svg)](https://github.com/elixir-ecto/ecto/actions) [![Hex.pm](https://img.shields.io/hexpm/v/ecto.svg)](https://hex.pm/packages/ecto) [![Documentation](https://img.shields.io/badge/documentation-gray)](https://hexdocs.pm/ecto/) + +## Installation + +Add `:ecto` to the list of dependencies in `mix.exs`: + +```elixir +def deps do + [ + {:ecto, "~> 3.8"} + ] +end +``` + +## About + +Ecto is a toolkit for data mapping and language integrated query for Elixir. Here is an example: + +```elixir +# In your config/config.exs file +config :my_app, ecto_repos: [Sample.Repo] + +config :my_app, Sample.Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost", + port: "5432" + +# In your application code +defmodule Sample.Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres +end + +defmodule Sample.Weather do + use Ecto.Schema + + schema "weather" do + field :city # Defaults to type :string + field :temp_lo, :integer + field :temp_hi, :integer + field :prcp, :float, default: 0.0 + end +end + +defmodule Sample.App do + import Ecto.Query + alias Sample.{Weather, Repo} + + def keyword_query do + query = + from w in Weather, + where: w.prcp > 0 or is_nil(w.prcp), + select: w + + Repo.all(query) + end + + def pipe_query do + Weather + |> where(city: "Krakรณw") + |> order_by(:temp_lo) + |> limit(10) + |> Repo.all + end +end +``` + +Ecto is commonly used to interact with databases, such as Postgres and MySQL via [Ecto.Adapters.SQL](https://hexdocs.pm/ecto_sql) ([source code](https://github.com/elixir-ecto/ecto_sql)). Ecto is also commonly used to map data from any source into Elixir structs, whether they are backed by a database or not. + +See the [getting started guide](https://hexdocs.pm/ecto/getting-started.html) and the [online documentation](https://hexdocs.pm/ecto) for more information. Other resources available are: + + * [Programming Ecto](https://pragprog.com/book/wmecto/programming-ecto), by Darin Wilson and Eric Meadows-Jรถnsson, which guides you from fundamentals up to advanced concepts + + * [The Little Ecto Cookbook](https://dashbit.co/ebooks/the-little-ecto-cookbook), a free ebook by Dashbit, which is a curation of the existing Ecto guides with some extra contents + +## Usage + +You need to add both Ecto and the database adapter as a dependency to your `mix.exs` file. The supported databases and their adapters are: + +| Database | Ecto Adapter | Dependencies | +| :--------- | :--------------------- | :------------------------------------------------------------------------ | +| PostgreSQL | Ecto.Adapters.Postgres | [ecto_sql][ecto_sql] (requires Ecto v3.0+) + [postgrex][postgrex] | +| MySQL | Ecto.Adapters.MyXQL | [ecto_sql][ecto_sql] (requires Ecto v3.3+) + [myxql][myxql] | +| MSSQL | Ecto.Adapters.Tds | [ecto_sql][ecto_sql] (requires Ecto v3.4+) + [tds][tds] | +| SQLite3 | Ecto.Adapters.SQLite3 | [ecto_sql][ecto_sql] (requires Ecto v3.5+) + [ecto_sqlite3][ecto_sqlite3] | +| ETS ย  ย  | Etso ย  | [ecto][ecto] + [etso][etso] | + +[ecto]: https://github.com/elixir-ecto/ecto +[ecto_sql]: https://github.com/elixir-ecto/ecto_sql +[postgrex]: https://github.com/elixir-ecto/postgrex +[myxql]: https://github.com/elixir-ecto/myxql +[tds]: https://github.com/livehelpnow/tds +[ecto_sqlite3]: https://github.com/elixir-sqlite/ecto_sqlite3 +[etso]: https://github.com/evadne/etso + +For example, if you want to use PostgreSQL, add to your `mix.exs` file: + +```elixir +defp deps do + [ + {:ecto_sql, "~> 3.0"}, + {:postgrex, ">= 0.0.0"} + ] +end +``` + +Then run `mix deps.get` in your shell to fetch the dependencies. If you want to use another database, just choose the proper dependency from the table above. + +Finally, in the repository definition, you will need to specify the `adapter:` respective to the chosen dependency. For PostgreSQL it is: + +```elixir +defmodule MyApp.Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres, + ... +``` + +## Supported Versions + +| Branch | Support | +| ------ | ------------------------ | +| v3.7 | Bug fixes | +| v3.6 | Security patches only | +| v3.5 | Security patches only | +| v3.4 | Security patches only | +| v3.3 | Security patches only | +| v3.2 | Unsupported from 02/2022 | +| v3.1 | Unsupported from 02/2020 | +| v3.0 | Unsupported from 02/2020 | +| v2.2 | Unsupported from 02/2022 | +| v2.1 | Unsupported from 10/2018 | +| v2.0 | Unsupported from 08/2017 | +| v1.1 | Unsupported from 03/2018 | +| v1.0 | Unsupported from 05/2017 | + +With version 3.0, Ecto API has become stable. Our main focus is on providing +bug fixes and incremental changes. + +## Important links + + * [Documentation](https://hexdocs.pm/ecto) + * [Mailing list](https://groups.google.com/forum/#!forum/elixir-ecto) + * [Examples](https://github.com/elixir-ecto/ecto/tree/master/examples) + +## Running tests + +Clone the repo and fetch its dependencies: + + $ git clone https://github.com/elixir-ecto/ecto.git + $ cd ecto + $ mix deps.get + $ mix test + +Note that `mix test` does not run the tests in the `integration_test` folder. To run integration tests, you can clone `ecto_sql` in a sibling directory and then run its integration tests with the `ECTO_PATH` environment variable pointing to your Ecto checkout: + + $ cd .. + $ git clone https://github.com/elixir-ecto/ecto_sql.git + $ cd ecto_sql + $ mix deps.get + $ ECTO_PATH=../ecto mix test.all + +### Running containerized tests + +It is also possible to run the integration tests under a containerized environment using [earthly](https://earthly.dev/get-earthly): + + $ earthly -P +all + +You can also use this to interactively debug any failing integration tests using: + + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-21.3.8.21-alpine-3.13.1 +integration-test + +Then once you enter the containerized shell, you can inspect the underlying databases with the respective commands: + + PGPASSWORD=postgres psql -h 127.0.0.1 -U postgres -d postgres ecto_test + MYSQL_PASSWORD=root mysql -h 127.0.0.1 -uroot -proot ecto_test + sqlcmd -U sa -P 'some!Password' + +## Logo + +"Ecto" and the Ecto logo are Copyright (c) 2020 Dashbit. + +The Ecto logo was designed by [Dane Wesolko](https://www.danewesolko.com). + +## License + +Copyright (c) 2013 Plataformatec \ +Copyright (c) 2020 Dashbit + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at [https://www.apache.org/licenses/LICENSE-2.0](https://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/ecto/hex_metadata.config b/deps/ecto/hex_metadata.config new file mode 100644 index 0000000..1f89237 --- /dev/null +++ b/deps/ecto/hex_metadata.config @@ -0,0 +1,71 @@ +{<<"app">>,<<"ecto">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>, + <<"A toolkit for data mapping and language integrated query for Elixir">>}. +{<<"elixir">>,<<"~> 1.10">>}. +{<<"files">>, + [<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>,<<"CHANGELOG.md">>, + <<"lib">>,<<"lib/ecto">>,<<"lib/ecto/adapter.ex">>,<<"lib/ecto/multi.ex">>, + <<"lib/ecto/queryable.ex">>,<<"lib/ecto/type.ex">>, + <<"lib/ecto/changeset.ex">>,<<"lib/ecto/parameterized_type.ex">>, + <<"lib/ecto/changeset">>,<<"lib/ecto/changeset/relation.ex">>, + <<"lib/ecto/adapter">>,<<"lib/ecto/adapter/transaction.ex">>, + <<"lib/ecto/adapter/queryable.ex">>,<<"lib/ecto/adapter/schema.ex">>, + <<"lib/ecto/adapter/storage.ex">>,<<"lib/ecto/query.ex">>, + <<"lib/ecto/enum.ex">>,<<"lib/ecto/schema">>, + <<"lib/ecto/schema/metadata.ex">>,<<"lib/ecto/schema/loader.ex">>, + <<"lib/ecto/uuid.ex">>,<<"lib/ecto/json.ex">>,<<"lib/ecto/embedded.ex">>, + <<"lib/ecto/schema.ex">>,<<"lib/ecto/association.ex">>, + <<"lib/ecto/application.ex">>,<<"lib/ecto/query">>, + <<"lib/ecto/query/planner.ex">>,<<"lib/ecto/query/window_api.ex">>, + <<"lib/ecto/query/api.ex">>,<<"lib/ecto/query/inspect.ex">>, + <<"lib/ecto/query/builder.ex">>,<<"lib/ecto/query/builder">>, + <<"lib/ecto/query/builder/preload.ex">>, + <<"lib/ecto/query/builder/group_by.ex">>, + <<"lib/ecto/query/builder/join.ex">>,<<"lib/ecto/query/builder/update.ex">>, + <<"lib/ecto/query/builder/distinct.ex">>, + <<"lib/ecto/query/builder/limit_offset.ex">>, + <<"lib/ecto/query/builder/select.ex">>, + <<"lib/ecto/query/builder/windows.ex">>, + <<"lib/ecto/query/builder/from.ex">>,<<"lib/ecto/query/builder/cte.ex">>, + <<"lib/ecto/query/builder/combination.ex">>, + <<"lib/ecto/query/builder/dynamic.ex">>, + <<"lib/ecto/query/builder/order_by.ex">>, + <<"lib/ecto/query/builder/lock.ex">>,<<"lib/ecto/query/builder/filter.ex">>, + <<"lib/ecto/repo.ex">>,<<"lib/ecto/repo">>, + <<"lib/ecto/repo/transaction.ex">>,<<"lib/ecto/repo/supervisor.ex">>, + <<"lib/ecto/repo/registry.ex">>,<<"lib/ecto/repo/queryable.ex">>, + <<"lib/ecto/repo/preloader.ex">>,<<"lib/ecto/repo/schema.ex">>, + <<"lib/ecto/repo/assoc.ex">>,<<"lib/ecto/exceptions.ex">>,<<"lib/mix">>, + <<"lib/mix/tasks">>,<<"lib/mix/tasks/ecto.create.ex">>, + <<"lib/mix/tasks/ecto.ex">>,<<"lib/mix/tasks/ecto.gen.repo.ex">>, + <<"lib/mix/tasks/ecto.drop.ex">>,<<"lib/mix/ecto.ex">>,<<"lib/ecto.ex">>, + <<"integration_test/cases">>,<<"integration_test/cases/type.exs">>, + <<"integration_test/cases/interval.exs">>, + <<"integration_test/cases/preload.exs">>, + <<"integration_test/cases/assoc.exs">>, + <<"integration_test/cases/joins.exs">>, + <<"integration_test/cases/windows.exs">>, + <<"integration_test/cases/repo.exs">>,<<"integration_test/support">>, + <<"integration_test/support/schemas.exs">>, + <<"integration_test/support/types.exs">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/elixir-ecto/ecto">>}]}. +{<<"name">>,<<"ecto">>}. +{<<"requirements">>, + [[{<<"app">>,<<"telemetry">>}, + {<<"name">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}], + [{<<"app">>,<<"decimal">>}, + {<<"name">>,<<"decimal">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.6 or ~> 2.0">>}], + [{<<"app">>,<<"jason">>}, + {<<"name">>,<<"jason">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.0">>}]]}. +{<<"version">>,<<"3.8.4">>}. diff --git a/deps/ecto/integration_test/cases/assoc.exs b/deps/ecto/integration_test/cases/assoc.exs new file mode 100644 index 0000000..fe2eeb1 --- /dev/null +++ b/deps/ecto/integration_test/cases/assoc.exs @@ -0,0 +1,865 @@ +defmodule Ecto.Integration.AssocTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Custom + alias Ecto.Integration.Post + alias Ecto.Integration.User + alias Ecto.Integration.PostUser + alias Ecto.Integration.Comment + alias Ecto.Integration.Permalink + + test "has_many assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + [c1, c2] = TestRepo.all Ecto.assoc(p1, :comments) + assert c1.id == cid1 + assert c2.id == cid2 + + [c1, c2, c3] = TestRepo.all Ecto.assoc([p1, p2], :comments) + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + end + + test "has_one assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + %Permalink{id: lid1} = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + %Permalink{} = TestRepo.insert!(%Permalink{url: "2"}) + %Permalink{id: lid3} = TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + [l1, l3] = TestRepo.all Ecto.assoc([p1, p2], :permalink) + assert l1.id == lid1 + assert l3.id == lid3 + end + + test "belongs_to assoc" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + l1 = TestRepo.insert!(%Permalink{url: "1", post_id: pid1}) + l2 = TestRepo.insert!(%Permalink{url: "2"}) + l3 = TestRepo.insert!(%Permalink{url: "3", post_id: pid2}) + + assert [p1, p2] = TestRepo.all Ecto.assoc([l1, l2, l3], :post) + assert p1.id == pid1 + assert p2.id == pid2 + end + + test "has_many through assoc" do + p1 = TestRepo.insert!(%Post{}) + p2 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{name: "zzz"}) + u2 = TestRepo.insert!(%User{name: "aaa"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + query = Ecto.assoc([p1, p2], :comments_authors) |> order_by([a], a.name) + assert [^u2, ^u1] = TestRepo.all(query) + + # Dynamic through + query = Ecto.assoc([p1, p2], [:comments, :author]) |> order_by([a], a.name) + assert [^u2, ^u1] = TestRepo.all(query) + end + + @tag :on_replace_nilify + test "has_many through-through assoc leading" do + p1 = TestRepo.insert!(%Post{}) + p2 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{}) + u2 = TestRepo.insert!(%User{}) + + pl1 = TestRepo.insert!(%Permalink{user_id: u1.id, url: "zzz"}) + pl2 = TestRepo.insert!(%Permalink{user_id: u2.id, url: "aaa"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + query = Ecto.assoc([p1, p2], :comments_authors_permalinks) |> order_by([p], p.url) + assert [^pl2, ^pl1] = TestRepo.all(query) + + # Dynamic through + query = Ecto.assoc([p1, p2], [:comments, :author, :permalink]) |> order_by([p], p.url) + assert [^pl2, ^pl1] = TestRepo.all(query) + end + + test "has_many through-through assoc trailing" do + p1 = TestRepo.insert!(%Post{}) + u1 = TestRepo.insert!(%User{}) + pl1 = TestRepo.insert!(%Permalink{user_id: u1.id, post_id: p1.id}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + + query = Ecto.assoc([pl1], :post_comments_authors) + assert [^u1] = TestRepo.all(query) + + # Dynamic through + query = Ecto.assoc([pl1], [:post, :comments, :author]) + assert [^u1] = TestRepo.all(query) + end + + test "has_many through has_many, many_to_many and has_many" do + user1 = %User{id: uid1} = TestRepo.insert!(%User{name: "Gabriel"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "Isadora"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "Joey Mush"}) + + p1 = TestRepo.insert!(%Post{title: "p1", author_id: uid1}) + p2 = TestRepo.insert!(%Post{title: "p2", author_id: uid2}) + p3 = TestRepo.insert!(%Post{title: "p3", author_id: uid2}) + TestRepo.insert!(%Post{title: "p4", author_id: uid3}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid3]] + + [pid1, pid2, pid3] = + Ecto.assoc(user1, :related_2nd_order_posts) + |> TestRepo.all() + |> Enum.map(fn %Post{id: id} -> id end) + |> Enum.sort() + + assert p1.id == pid1 + assert p2.id == pid2 + assert p3.id == pid3 + end + + test "has_many through has_many, belongs_to and a nested has through" do + user1 = TestRepo.insert!(%User{name: "Gabriel"}) + user2 = TestRepo.insert!(%User{name: "Isadora"}) + user3 = TestRepo.insert!(%User{name: "Joey"}) + + post1 = TestRepo.insert!(%Post{title: "p1"}) + post2 = TestRepo.insert!(%Post{title: "p2"}) + + TestRepo.insert!(%Comment{author_id: user1.id, text: "c1", post_id: post1.id}) + TestRepo.insert!(%Comment{author_id: user2.id, text: "c2", post_id: post1.id}) + TestRepo.insert!(%Comment{author_id: user3.id, text: "c3", post_id: post2.id}) + + [u1_id, u2_id] = + Ecto.assoc(user1, :co_commenters) + |> TestRepo.all() + |> Enum.map(fn %User{id: id} -> id end) + |> Enum.sort() + + assert u1_id == user1.id + assert u2_id == user2.id + end + + test "has_many through two many_to_many associations" do + user1 = %User{id: uid1} = TestRepo.insert!(%User{name: "Gabriel"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "Isadora"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "Joey Mush"}) + + p1 = TestRepo.insert!(%Post{title: "p1", author_id: uid1}) + TestRepo.insert!(%Post{title: "p2", author_id: uid2}) + p3 = TestRepo.insert!(%Post{title: "p3", author_id: uid2}) + p4 = TestRepo.insert!(%Post{title: "p4", author_id: uid3}) + + TestRepo.insert_all "posts_users", [[post_id: p3.id, user_id: uid1], + [post_id: p3.id, user_id: uid2], + [post_id: p1.id, user_id: uid3]] + + TestRepo.insert!(%PostUser{post_id: p1.id, user_id: uid2}) + TestRepo.insert!(%PostUser{post_id: p3.id, user_id: uid1}) + TestRepo.insert!(%PostUser{post_id: p3.id, user_id: uid2}) + TestRepo.insert!(%PostUser{post_id: p4.id, user_id: uid3}) + + [u1, u2] = + Ecto.assoc(user1, :users_through_schema_posts) + |> TestRepo.all() + |> Enum.map(fn %User{id: id} -> id end) + |> Enum.sort() + + assert uid1 == u1 + assert uid2 == u2 + end + + test "has_many through with where" do + post1 = TestRepo.insert!(%Post{title: "p1"}) + post2 = TestRepo.insert!(%Post{title: "p2"}) + post3 = TestRepo.insert!(%Post{title: "p3"}) + + author = TestRepo.insert!(%User{name: "john"}) + + TestRepo.insert!(%Comment{text: "1", lock_version: 1, post_id: post1.id, author_id: author.id}) + TestRepo.insert!(%Comment{text: "2", lock_version: 2, post_id: post2.id, author_id: author.id}) + TestRepo.insert!(%Comment{text: "3", lock_version: 2, post_id: post3.id, author_id: author.id}) + + [p2, p3] = Ecto.assoc(author, :v2_comments_posts) |> TestRepo.all() |> Enum.sort_by(&(&1.id)) + assert p2.id == post2.id + assert p3.id == post3.id + end + + test "many_to_many assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "john"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "mary"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid2]] + + [u1, u2] = TestRepo.all Ecto.assoc([p1], :users) + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = TestRepo.all Ecto.assoc([p2], :users) + assert u2.id == uid2 + [] = TestRepo.all Ecto.assoc([p3], :users) + + [u1, u2, u2] = TestRepo.all Ecto.assoc([p1, p2, p3], :users) + assert u1.id == uid1 + assert u2.id == uid2 + end + + ## Changesets + + test "has_one changeset assoc (on_replace: :delete)" do + # Insert new + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "1"}) + post = TestRepo.insert!(changeset) + assert post.permalink.id + assert post.permalink.post_id == post.id + assert post.permalink.url == "1" + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + assert post.permalink.url == "1" + + # Replace with new + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "2"}) + post = TestRepo.update!(changeset) + assert post.permalink.id + assert post.permalink.post_id == post.id + assert post.permalink.url == "2" + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + assert post.permalink.url == "2" + + # Replacing with existing + existing = TestRepo.insert!(%Permalink{url: "3"}) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, existing) + post = TestRepo.update!(changeset) + assert post.permalink.id + assert post.permalink.post_id == post.id + assert post.permalink.url == "3" + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + assert post.permalink.url == "3" + + # Replacing with nil (on_replace: :delete) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, nil) + post = TestRepo.update!(changeset) + refute post.permalink + post = TestRepo.get!(from(Post, preload: [:permalink]), post.id) + refute post.permalink + + assert [0] == TestRepo.all(from(p in Permalink, select: count(p.id))) + end + + test "has_one changeset assoc (on_replace: :delete_if_exists)" do + permalink = TestRepo.insert!(%Permalink{url: "1"}) + post = TestRepo.insert!(%Post{title: "1", permalink: permalink, force_permalink: permalink}) + TestRepo.delete!(permalink) + + assert_raise Ecto.StaleEntryError, fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:permalink, nil) + |> TestRepo.update!() + end + + post = + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:force_permalink, nil) + |> TestRepo.update!() + + assert post.force_permalink == nil + end + + @tag :on_replace_nilify + test "has_one changeset assoc (on_replace: :nilify)" do + # Insert new + changeset = + %User{name: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "1"}) + user = TestRepo.insert!(changeset) + assert user.permalink.id + assert user.permalink.user_id == user.id + assert user.permalink.url == "1" + user = TestRepo.get!(from(User, preload: [:permalink]), user.id) + assert user.permalink.url == "1" + + # Replace with new + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, %Permalink{url: "2"}) + user = TestRepo.update!(changeset) + assert user.permalink.id + assert user.permalink.user_id == user.id + assert user.permalink.url == "2" + user = TestRepo.get!(from(User, preload: [:permalink]), user.id) + assert user.permalink.url == "2" + + # Replacing with nil (on_replace: :nilify) + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:permalink, nil) + user = TestRepo.update!(changeset) + refute user.permalink + user = TestRepo.get!(from(User, preload: [:permalink]), user.id) + refute user.permalink + + assert [2] == TestRepo.all(from(p in Permalink, select: count(p.id))) + end + + @tag :on_replace_update + test "has_one changeset assoc (on_replace: :update)" do + # Insert new + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_permalink, %Permalink{url: "1"}) + post = TestRepo.insert!(changeset) + assert post.update_permalink.id + assert post.update_permalink.post_id == post.id + assert post.update_permalink.url == "1" + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + assert post.update_permalink.url == "1" + + perma = post.update_permalink + + # Put on update + changeset = + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:update_permalink, %{url: "2"}) + post = TestRepo.update!(changeset) + assert post.update_permalink.id == perma.id + assert post.update_permalink.post_id == post.id + assert post.update_permalink.url == "2" + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + assert post.update_permalink.url == "2" + + # Cast on update + changeset = + post + |> Ecto.Changeset.cast(%{update_permalink: %{url: "3"}}, []) + |> Ecto.Changeset.cast_assoc(:update_permalink) + post = TestRepo.update!(changeset) + assert post.update_permalink.id == perma.id + assert post.update_permalink.post_id == post.id + assert post.update_permalink.url == "3" + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + assert post.update_permalink.url == "3" + + # Replace with new struct + assert_raise RuntimeError, ~r"you are only allowed\sto update the existing entry", fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:update_permalink, %Permalink{url: "4"}) + end + + # Replace with existing struct + assert_raise RuntimeError, ~r"you are only allowed\sto update the existing entry", fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:update_permalink, TestRepo.insert!(%Permalink{url: "5"})) + end + + # Replacing with nil (on_replace: :update) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_permalink, nil) + post = TestRepo.update!(changeset) + refute post.update_permalink + post = TestRepo.get!(from(Post, preload: [:update_permalink]), post.id) + refute post.update_permalink + + assert [2] == TestRepo.all(from(p in Permalink, select: count(p.id))) + end + + test "has_many changeset assoc (on_replace: :delete)" do + c1 = TestRepo.insert! %Comment{text: "1"} + c2 = %Comment{text: "2"} + + # Inserting + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, [c2]) + post = TestRepo.insert!(changeset) + [c2] = post.comments + assert c2.id + assert c2.post_id == post.id + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + [c2] = post.comments + assert c2.text == "2" + + # Updating + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, [Ecto.Changeset.change(c1, text: "11"), + Ecto.Changeset.change(c2, text: "22")]) + post = TestRepo.update!(changeset) + [c1, _c2] = post.comments |> Enum.sort_by(&(&1.id)) + assert c1.id + assert c1.post_id == post.id + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + [c1, c2] = post.comments |> Enum.sort_by(&(&1.id)) + assert c1.text == "11" + assert c2.text == "22" + + # Replacing (on_replace: :delete) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, []) + post = TestRepo.update!(changeset) + assert post.comments == [] + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + assert post.comments == [] + + assert [0] == TestRepo.all(from(c in Comment, select: count(c.id))) + end + + test "has_many changeset assoc (on_replace: :delete_if_exists)" do + comment = TestRepo.insert!(%Comment{text: "1"}) + post = TestRepo.insert!(%Post{title: "1", comments: [comment], force_comments: [comment]}) + + TestRepo.delete!(comment) + + assert_raise Ecto.StaleEntryError, fn -> + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, []) + |> TestRepo.update!() + end + + post = + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:force_comments, []) + |> TestRepo.update!() + + assert post.force_comments == [] + end + + test "has_many changeset assoc (on_replace: :nilify)" do + c1 = TestRepo.insert! %Comment{text: "1"} + c2 = %Comment{text: "2"} + + # Inserting + changeset = + %User{name: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, [c1, c2]) + user = TestRepo.insert!(changeset) + [c1, c2] = user.comments + assert c1.id + assert c1.author_id == user.id + assert c2.id + assert c2.author_id == user.id + user = TestRepo.get!(from(User, preload: [:comments]), user.id) + [c1, c2] = user.comments + assert c1.text == "1" + assert c2.text == "2" + + # Replacing (on_replace: :nilify) + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, []) + user = TestRepo.update!(changeset) + assert user.comments == [] + user = TestRepo.get!(from(User, preload: [:comments]), user.id) + assert user.comments == [] + + assert [2] == TestRepo.all(from(c in Comment, select: count(c.id))) + end + + test "many_to_many changeset assoc" do + u1 = TestRepo.insert! %User{name: "1"} + u2 = %User{name: "2"} + + # Inserting + changeset = + %Post{title: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:users, [u2]) + post = TestRepo.insert!(changeset) + [u2] = post.users + assert u2.id + post = TestRepo.get!(from(Post, preload: [:users]), post.id) + [u2] = post.users + assert u2.name == "2" + + assert [1] == TestRepo.all(from(j in "posts_users", select: count(j.post_id))) + + # Updating + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:users, [Ecto.Changeset.change(u1, name: "11"), + Ecto.Changeset.change(u2, name: "22")]) + post = TestRepo.update!(changeset) + [u1, _u2] = post.users |> Enum.sort_by(&(&1.id)) + assert u1.id + post = TestRepo.get!(from(Post, preload: [:users]), post.id) + [u1, u2] = post.users |> Enum.sort_by(&(&1.id)) + assert u1.name == "11" + assert u2.name == "22" + + assert [2] == TestRepo.all(from(j in "posts_users", select: count(j.post_id))) + + # Replacing (on_replace: :delete) + changeset = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:users, []) + post = TestRepo.update!(changeset) + assert post.users == [] + post = TestRepo.get!(from(Post, preload: [:users]), post.id) + assert post.users == [] + + assert [0] == TestRepo.all(from(j in "posts_users", select: count(j.post_id))) + assert [2] == TestRepo.all(from(c in User, select: count(c.id))) + end + + test "many_to_many changeset assoc with schema" do + p1 = TestRepo.insert! %Post{title: "1"} + p2 = %Post{title: "2"} + + # Inserting + changeset = + %User{name: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:schema_posts, [p2]) + user = TestRepo.insert!(changeset) + [p2] = user.schema_posts + assert p2.id + user = TestRepo.get!(from(User, preload: [:schema_posts]), user.id) + [p2] = user.schema_posts + assert p2.title == "2" + + [up2] = TestRepo.all(PostUser) |> Enum.sort_by(&(&1.id)) + assert up2.post_id == p2.id + assert up2.user_id == user.id + assert up2.inserted_at + assert up2.updated_at + + # Updating + changeset = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:schema_posts, [Ecto.Changeset.change(p1, title: "11"), + Ecto.Changeset.change(p2, title: "22")]) + user = TestRepo.update!(changeset) + [p1, _p2] = user.schema_posts |> Enum.sort_by(&(&1.id)) + assert p1.id + user = TestRepo.get!(from(User, preload: [:schema_posts]), user.id) + [p1, p2] = user.schema_posts |> Enum.sort_by(&(&1.id)) + assert p1.title == "11" + assert p2.title == "22" + + [_up2, up1] = TestRepo.all(PostUser) |> Enum.sort_by(&(&1.id)) + assert up1.post_id == p1.id + assert up1.user_id == user.id + assert up1.inserted_at + assert up1.updated_at + end + + test "many_to_many changeset assoc with self-referential binary_id" do + assoc_custom = TestRepo.insert!(%Custom{uuid: Ecto.UUID.generate()}) + custom = TestRepo.insert!(%Custom{customs: [assoc_custom]}) + + custom = Custom |> TestRepo.get!(custom.bid) |> TestRepo.preload(:customs) + assert [_] = custom.customs + + custom = + custom + |> Ecto.Changeset.change(%{}) + |> Ecto.Changeset.put_assoc(:customs, []) + |> TestRepo.update! + assert [] = custom.customs + + custom = Custom |> TestRepo.get!(custom.bid) |> TestRepo.preload(:customs) + assert [] = custom.customs + end + + @tag :unique_constraint + test "has_many changeset assoc with constraints" do + author = TestRepo.insert!(%User{name: "john doe"}) + p1 = TestRepo.insert!(%Post{title: "hello", author_id: author.id}) + TestRepo.insert!(%Post{title: "world", author_id: author.id}) + + # Asserts that `unique_constraint` for `uuid` exists + assert_raise Ecto.ConstraintError, fn -> + TestRepo.insert!(%Post{title: "another", author_id: author.id, uuid: p1.uuid}) + end + + author = TestRepo.preload author, [:posts] + posts_params = Enum.map author.posts, fn %Post{uuid: u} -> + %{uuid: u, title: "fresh"} + end + + # This will only work if we delete before performing inserts + changeset = + author + |> Ecto.Changeset.cast(%{"posts" => posts_params}, ~w()) + |> Ecto.Changeset.cast_assoc(:posts) + author = TestRepo.update! changeset + assert Enum.map(author.posts, &(&1.title)) == ["fresh", "fresh"] + end + + test "belongs_to changeset assoc" do + # Insert new + changeset = + %Permalink{url: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, %Post{title: "1"}) + perma = TestRepo.insert!(changeset) + post = perma.post + assert perma.post_id + assert perma.post_id == post.id + assert perma.post.title == "1" + + # Replace with new + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, %Post{title: "2"}) + perma = TestRepo.update!(changeset) + assert perma.post.id != post.id + post = perma.post + assert perma.post_id + assert perma.post_id == post.id + assert perma.post.title == "2" + + # Replace with existing + existing = TestRepo.insert!(%Post{title: "3"}) + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, existing) + perma = TestRepo.update!(changeset) + post = perma.post + assert perma.post_id == post.id + assert perma.post_id == existing.id + assert perma.post.title == "3" + + # Replace with nil + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:post, nil) + perma = TestRepo.update!(changeset) + assert perma.post == nil + assert perma.post_id == nil + end + + test "belongs_to changeset assoc (on_replace: :update)" do + # Insert new + changeset = + %Permalink{url: "1"} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_post, %Post{title: "1"}) + perma = TestRepo.insert!(changeset) + post = perma.update_post + assert perma.post_id + assert perma.post_id == post.id + assert perma.update_post.title == "1" + + # Casting on update + changeset = + perma + |> Ecto.Changeset.cast(%{update_post: %{title: "2"}}, []) + |> Ecto.Changeset.cast_assoc(:update_post) + perma = TestRepo.update!(changeset) + assert perma.update_post.id == post.id + post = perma.update_post + assert perma.post_id + assert perma.post_id == post.id + assert perma.update_post.title == "2" + + # Replace with nil + changeset = + perma + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:update_post, nil) + perma = TestRepo.update!(changeset) + assert perma.update_post == nil + assert perma.post_id == nil + end + + test "inserting struct with associations" do + tree = %Permalink{ + url: "root", + post: %Post{ + title: "belongs_to", + comments: [ + %Comment{text: "child 1"}, + %Comment{text: "child 2"}, + ] + } + } + + tree = TestRepo.insert!(tree) + assert tree.id + assert tree.post.id + assert length(tree.post.comments) == 2 + assert Enum.all?(tree.post.comments, & &1.id) + + tree = TestRepo.get!(from(Permalink, preload: [post: :comments]), tree.id) + assert tree.id + assert tree.post.id + assert length(tree.post.comments) == 2 + assert Enum.all?(tree.post.comments, & &1.id) + end + + test "inserting struct with empty associations" do + permalink = TestRepo.insert!(%Permalink{url: "root", post: nil}) + assert permalink.post == nil + + post = TestRepo.insert!(%Post{title: "empty", comments: []}) + assert post.comments == [] + end + + test "inserting changeset with empty cast associations" do + changeset = + %Permalink{} + |> Ecto.Changeset.cast(%{url: "root", post: nil}, [:url]) + |> Ecto.Changeset.cast_assoc(:post) + permalink = TestRepo.insert!(changeset) + assert permalink.post == nil + + changeset = + %Post{} + |> Ecto.Changeset.cast(%{title: "root", comments: []}, [:title]) + |> Ecto.Changeset.cast_assoc(:comments) + post = TestRepo.insert!(changeset) + assert post.comments == [] + end + + test "inserting changeset with empty put associations" do + changeset = + %Permalink{} + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:post, nil) + permalink = TestRepo.insert!(changeset) + assert permalink.post == nil + + changeset = + %Post{} + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, []) + post = TestRepo.insert!(changeset) + assert post.comments == [] + end + + test "updating changeset with empty cast associations" do + post = TestRepo.insert!(%Post{}) + c1 = TestRepo.insert!(%Comment{post_id: post.id}) + c2 = TestRepo.insert!(%Comment{post_id: post.id}) + + assert TestRepo.all(Comment) == [c1, c2] + + post = TestRepo.get!(from(Post, preload: [:comments]), post.id) + + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:comments, []) + |> TestRepo.update!() + + assert TestRepo.all(Comment) == [] + end + + ## Dependent + + test "has_many assoc on delete deletes all" do + post = TestRepo.insert!(%Post{}) + TestRepo.insert!(%Comment{post_id: post.id}) + TestRepo.insert!(%Comment{post_id: post.id}) + TestRepo.delete!(post) + + assert TestRepo.all(Comment) == [] + refute Process.get(Comment) + end + + test "has_many assoc on delete nilifies all" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Comment{author_id: user.id}) + TestRepo.insert!(%Comment{author_id: user.id}) + TestRepo.delete!(user) + + author_ids = Comment |> TestRepo.all() |> Enum.map(fn(comment) -> comment.author_id end) + + assert author_ids == [nil, nil] + refute Process.get(Comment) + end + + test "has_many assoc on delete does nothing" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Post{author_id: user.id}) + + TestRepo.delete!(user) + assert Enum.count(TestRepo.all(Post)) == 1 + end + + test "many_to_many assoc on delete deletes all" do + p1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + p2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + u1 = TestRepo.insert!(%User{name: "john"}) + u2 = TestRepo.insert!(%User{name: "mary"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: u1.id], + [post_id: p1.id, user_id: u1.id], + [post_id: p2.id, user_id: u2.id]] + TestRepo.delete!(p1) + + [pid2] = TestRepo.all from(p in Post, select: p.id) + assert pid2 == p2.id + + [[pid2, uid2]] = TestRepo.all from(j in "posts_users", select: [j.post_id, j.user_id]) + assert pid2 == p2.id + assert uid2 == u2.id + + [uid1, uid2] = TestRepo.all from(u in User, select: u.id) + assert uid1 == u1.id + assert uid2 == u2.id + end +end diff --git a/deps/ecto/integration_test/cases/interval.exs b/deps/ecto/integration_test/cases/interval.exs new file mode 100644 index 0000000..0954bc9 --- /dev/null +++ b/deps/ecto/integration_test/cases/interval.exs @@ -0,0 +1,419 @@ +defmodule Ecto.Integration.IntervalTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.{Post, User, Usec} + alias Ecto.Integration.TestRepo + import Ecto.Query + + @posted ~D[2014-01-01] + @inserted_at ~N[2014-01-01 02:00:00] + + setup do + TestRepo.insert!(%Post{posted: @posted, inserted_at: @inserted_at}) + :ok + end + + test "date_add with year" do + dec = Decimal.new(1) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 1, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 1.0, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^1, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^1.0, "year")) + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "year")) + end + + test "date_add with month" do + dec = Decimal.new(3) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3.0, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3.0, "month")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "month")) + end + + test "date_add with week" do + dec = Decimal.new(3) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, 3.0, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^3.0, "week")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "week")) + end + + test "date_add with day" do + dec = Decimal.new(5) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, 5, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, 5.0, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^5, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^5.0, "day")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "day")) + end + + test "date_add with hour" do + dec = Decimal.new(48) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, 48, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, 48.0, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^48, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^48.0, "hour")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "hour")) + end + + test "date_add with dynamic" do + posted = @posted + assert [~D[2015-01-01]] = TestRepo.all(from p in Post, select: date_add(^posted, ^1, ^"year")) + assert [~D[2014-04-01]] = TestRepo.all(from p in Post, select: date_add(^posted, ^3, ^"month")) + assert [~D[2014-01-22]] = TestRepo.all(from p in Post, select: date_add(^posted, ^3, ^"week")) + assert [~D[2014-01-06]] = TestRepo.all(from p in Post, select: date_add(^posted, ^5, ^"day")) + assert [~D[2014-01-03]] = TestRepo.all(from p in Post, select: date_add(^posted, ^48, ^"hour")) + end + + test "date_add with negative interval" do + dec = Decimal.new(-1) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, -1, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, -1.0, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^-1, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^-1.0, "year")) + assert [~D[2013-01-01]] = TestRepo.all(from p in Post, select: date_add(p.posted, ^dec, "year")) + end + + test "datetime_add with year" do + dec = Decimal.new(1) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1.0, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1.0, "year")) + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "year")) + end + + test "datetime_add with month" do + dec = Decimal.new(3) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3.0, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3.0, "month")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "month")) + end + + test "datetime_add with week" do + dec = Decimal.new(3) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 3.0, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^3.0, "week")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "week")) + end + + test "datetime_add with day" do + dec = Decimal.new(5) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 5, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 5.0, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^5, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^5.0, "day")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "day")) + end + + test "datetime_add with hour" do + dec = Decimal.new(60) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 60, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 60.0, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^60, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^60.0, "hour")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "hour")) + end + + test "datetime_add with minute" do + dec = Decimal.new(90) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90.0, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90.0, "minute")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "minute")) + end + + test "datetime_add with second" do + dec = Decimal.new(90) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 90.0, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^90.0, "second")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "second")) + end + + @tag :uses_msec + test "datetime_add with millisecond" do + dec = Decimal.new(1500) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500.0, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500.0, "millisecond")) + assert [~N[2014-01-01 02:00:01]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "millisecond")) + end + + @tag :microsecond_precision + @tag :uses_usec + test "datetime_add with microsecond" do + dec = Decimal.new(1500) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, 1500.0, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^1500.0, "microsecond")) + assert [~N[2014-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "microsecond")) + end + + test "datetime_add with dynamic" do + inserted_at = @inserted_at + assert [~N[2015-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^1, ^"year")) + assert [~N[2014-04-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^3, ^"month")) + assert [~N[2014-01-22 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^3, ^"week")) + assert [~N[2014-01-06 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^5, ^"day")) + assert [~N[2014-01-03 14:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^60, ^"hour")) + assert [~N[2014-01-01 03:30:00]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^90, ^"minute")) + assert [~N[2014-01-01 02:01:30]] = + TestRepo.all(from p in Post, select: datetime_add(^inserted_at, ^90, ^"second")) + end + + test "datetime_add with dynamic in filters" do + inserted_at = @inserted_at + field = :inserted_at + assert [_] = + TestRepo.all(from p in Post, where: p.inserted_at > datetime_add(^inserted_at, ^-1, "year")) + assert [_] = + TestRepo.all(from p in Post, where: p.inserted_at > datetime_add(^inserted_at, -3, "month")) + assert [_] = + TestRepo.all(from p in Post, where: field(p, ^field) > datetime_add(^inserted_at, ^-3, ^"week")) + assert [_] = + TestRepo.all(from p in Post, where: field(p, ^field) > datetime_add(^inserted_at, -5, ^"day")) + end + + test "datetime_add with negative interval" do + dec = Decimal.new(-1) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, -1, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, -1.0, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^-1, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^-1.0, "year")) + assert [~N[2013-01-01 02:00:00]] = + TestRepo.all(from p in Post, select: datetime_add(p.inserted_at, ^dec, "year")) + end + + test "from_now" do + current = DateTime.utc_now().year + dec = Decimal.new(5) + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(5, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(5.0, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^5, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^5.0, "year")) + assert year > current + assert [%{year: year}] = TestRepo.all(from p in Post, select: from_now(^dec, "year")) + assert year > current + end + + test "ago" do + current = DateTime.utc_now().year + dec = Decimal.new(5) + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(5, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(5.0, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^5, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^5.0, "year")) + assert year < current + assert [%{year: year}] = TestRepo.all(from p in Post, select: ago(^dec, "year")) + assert year < current + end + + test "datetime_add with utc_datetime" do + {:ok, datetime} = DateTime.from_naive(@inserted_at, "Etc/UTC") + TestRepo.insert!(%User{inserted_at: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2015-01-01 02:00:00], "Etc/UTC") + dec = Decimal.new(1) + + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(type(^datetime, :utc_datetime), 0, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, 1, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, 1.0, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^1, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^1.0, "year")) + assert [^datetime] = + TestRepo.all(from p in User, select: datetime_add(p.inserted_at, ^dec, "year")) + end + + @tag :microsecond_precision + test "datetime_add with naive_datetime_usec" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.000001]}) + datetime = ~N[2014-01-01 02:00:00.001501] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :naive_datetime_usec), 0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 1500, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^1500, "microsecond")) + end + + @tag :microsecond_precision + @tag :decimal_precision + test "datetime_add with naive_datetime_usec and decimal increment" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.000001]}) + dec = Decimal.new(1500) + datetime = ~N[2014-01-01 02:00:00.001501] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^dec, "microsecond")) + end + + @tag :microsecond_precision + test "datetime_add with utc_datetime_usec" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.000001], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001501], "Etc/UTC") + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :utc_datetime_usec), 0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 1500, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^1500, "microsecond")) + end + + @tag :microsecond_precision + @tag :decimal_precision + test "datetime_add uses utc_datetime_usec with decimal increment" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.000001], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001501], "Etc/UTC") + dec = Decimal.new(1500) + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^1500.0, "microsecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^dec, "microsecond")) + end + + test "datetime_add with utc_datetime_usec in milliseconds" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001000], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.151000], "Etc/UTC") + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :utc_datetime_usec), 0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 150, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, 150, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^150, "millisecond")) + end + + @tag :decimal_precision + test "datetime_add uses utc_datetime_usec with decimal increment in milliseconds" do + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.001000], "Etc/UTC") + TestRepo.insert!(%Usec{utc_datetime_usec: datetime}) + + {:ok, datetime} = DateTime.from_naive(~N[2014-01-01 02:00:00.151000], "Etc/UTC") + dec = Decimal.new(150) + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^150.0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.utc_datetime_usec, ^dec, "millisecond")) + end + + test "datetime_add with naive_datetime_usec in milliseconds" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.001000]}) + datetime = ~N[2014-01-01 02:00:00.151000] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(type(^datetime, :naive_datetime_usec), 0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 150, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, 150.0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^150, "millisecond")) + end + + @tag :decimal_precision + test "datetime_add with naive_datetime_usec and decimal increment in milliseconds" do + TestRepo.insert!(%Usec{naive_datetime_usec: ~N[2014-01-01 02:00:00.001000]}) + dec = Decimal.new(150) + datetime = ~N[2014-01-01 02:00:00.151000] + + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^150.0, "millisecond")) + assert [^datetime] = + TestRepo.all(from u in Usec, select: datetime_add(u.naive_datetime_usec, ^dec, "millisecond")) + end +end diff --git a/deps/ecto/integration_test/cases/joins.exs b/deps/ecto/integration_test/cases/joins.exs new file mode 100644 index 0000000..543cf81 --- /dev/null +++ b/deps/ecto/integration_test/cases/joins.exs @@ -0,0 +1,592 @@ +defmodule Ecto.Integration.JoinsTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + alias Ecto.Integration.Permalink + alias Ecto.Integration.User + alias Ecto.Integration.PostUserCompositePk + + @tag :update_with_join + test "update all with joins" do + user = TestRepo.insert!(%User{name: "Tester"}) + post = TestRepo.insert!(%Post{title: "foo"}) + comment = TestRepo.insert!(%Comment{text: "hey", author_id: user.id, post_id: post.id}) + + another_post = TestRepo.insert!(%Post{title: "bar"}) + another_comment = TestRepo.insert!(%Comment{text: "another", author_id: user.id, post_id: another_post.id}) + + query = from(c in Comment, join: u in User, on: u.id == c.author_id, + where: c.post_id in ^[post.id]) + + assert {1, nil} = TestRepo.update_all(query, set: [text: "hoo"]) + assert %Comment{text: "hoo"} = TestRepo.get(Comment, comment.id) + assert %Comment{text: "another"} = TestRepo.get(Comment, another_comment.id) + end + + @tag :delete_with_join + test "delete all with joins" do + user = TestRepo.insert!(%User{name: "Tester"}) + post = TestRepo.insert!(%Post{title: "foo"}) + TestRepo.insert!(%Comment{text: "hey", author_id: user.id, post_id: post.id}) + TestRepo.insert!(%Comment{text: "foo", author_id: user.id, post_id: post.id}) + TestRepo.insert!(%Comment{text: "bar", author_id: user.id}) + + query = from(c in Comment, join: u in User, on: u.id == c.author_id, + where: is_nil(c.post_id)) + assert {1, nil} = TestRepo.delete_all(query) + assert [%Comment{}, %Comment{}] = TestRepo.all(Comment) + + query = from(c in Comment, join: u in assoc(c, :author), + join: p in assoc(c, :post), + where: p.id in ^[post.id]) + assert {2, nil} = TestRepo.delete_all(query) + assert [] = TestRepo.all(Comment) + end + + test "joins" do + _p = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, join: c in assoc(p, :permalink), order_by: p.id, select: {p, c}) + assert [{^p2, ^c1}] = TestRepo.all(query) + + query = from(p in Post, join: c in assoc(p, :permalink), on: c.id == ^c1.id, select: {p, c}) + assert [{^p2, ^c1}] = TestRepo.all(query) + end + + test "joins with queries" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + # Joined query without parameter + permalink = from c in Permalink, where: c.url == "1" + + query = from(p in Post, join: c in ^permalink, on: c.post_id == p.id, select: {p, c}) + assert [{^p2, ^c1}] = TestRepo.all(query) + + # Joined query with parameter + permalink = from c in Permalink, where: c.url == "1" + + query = from(p in Post, join: c in ^permalink, on: c.id == ^c1.id, order_by: p.title, select: {p, c}) + assert [{^p1, ^c1}, {^p2, ^c1}] = TestRepo.all(query) + end + + test "named joins" do + _p = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = + from(p in Post, join: c in assoc(p, :permalink), as: :permalink, order_by: p.id) + |> select([p, permalink: c], {p, c}) + + assert [{^p2, ^c1}] = TestRepo.all(query) + end + + test "joins with dynamic in :on" do + p = TestRepo.insert!(%Post{title: "1"}) + c = TestRepo.insert!(%Permalink{url: "1", post_id: p.id}) + + join_on = dynamic([p, ..., c], c.id == ^c.id) + + query = + from(p in Post, join: c in Permalink, on: ^join_on) + |> select([p, c], {p, c}) + + assert [{^p, ^c}] = TestRepo.all(query) + + join_on = dynamic([p, permalink: c], c.id == ^c.id) + + query = + from(p in Post, join: c in Permalink, as: :permalink, on: ^join_on) + |> select([p, c], {p, c}) + + assert [{^p, ^c}] = TestRepo.all(query) + end + + @tag :cross_join + test "cross joins with missing entries" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, cross_join: c in Permalink, order_by: p.id, select: {p, c}) + assert [{^p1, ^c1}, {^p2, ^c1}] = TestRepo.all(query) + end + + @tag :left_join + test "left joins with missing entries" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, left_join: c in assoc(p, :permalink), order_by: p.id, select: {p, c}) + assert [{^p1, nil}, {^p2, ^c1}] = TestRepo.all(query) + end + + @tag :left_join + test "left join with missing entries from subquery" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + c1 = TestRepo.insert!(%Permalink{url: "1", post_id: p2.id}) + + query = from(p in Post, left_join: c in subquery(Permalink), on: p.id == c.post_id, order_by: p.id, select: {p, c}) + assert [{^p1, nil}, {^p2, ^c1}] = TestRepo.all(query) + end + + @tag :right_join + test "right joins with missing entries" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %Permalink{id: plid1} = TestRepo.insert!(%Permalink{url: "1", post_id: pid2}) + + TestRepo.insert!(%Comment{text: "1", post_id: pid1}) + TestRepo.insert!(%Comment{text: "2", post_id: pid2}) + TestRepo.insert!(%Comment{text: "3", post_id: nil}) + + query = from(p in Post, right_join: c in assoc(p, :comments), + preload: :permalink, order_by: c.id) + assert [p1, p2, p3] = TestRepo.all(query) + assert p1.id == pid1 + assert p2.id == pid2 + assert is_nil(p3.id) + + assert p1.permalink == nil + assert p2.permalink.id == plid1 + end + + ## Associations joins + + test "has_many association join" do + post = TestRepo.insert!(%Post{title: "1"}) + c1 = TestRepo.insert!(%Comment{text: "hey", post_id: post.id}) + c2 = TestRepo.insert!(%Comment{text: "heya", post_id: post.id}) + + query = from(p in Post, join: c in assoc(p, :comments), select: {p, c}, order_by: p.id) + [{^post, ^c1}, {^post, ^c2}] = TestRepo.all(query) + end + + test "has_one association join" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{}) + p1 = TestRepo.insert!(%Permalink{url: "hey", user_id: user.id, post_id: post1.id}) + p2 = TestRepo.insert!(%Permalink{url: "heya", user_id: user.id, post_id: post2.id}) + + query = from(p in User, join: c in assoc(p, :permalink), select: {p, c}, order_by: c.id) + [{^user, ^p1}, {^user, ^p2}] = TestRepo.all(query) + end + + test "belongs_to association join" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{}) + p1 = TestRepo.insert!(%Permalink{url: "hey", user_id: user.id, post_id: post1.id}) + p2 = TestRepo.insert!(%Permalink{url: "heya", user_id: user.id, post_id: post2.id}) + + query = from(p in Permalink, join: c in assoc(p, :user), select: {p, c}, order_by: p.id) + [{^p1, ^user}, {^p2, ^user}] = TestRepo.all(query) + end + + test "has_many through association join" do + p1 = TestRepo.insert!(%Post{}) + p2 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{name: "zzz"}) + u2 = TestRepo.insert!(%User{name: "aaa"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + query = from p in Post, join: a in assoc(p, :comments_authors), select: {p, a}, order_by: [p.id, a.name] + assert [{^p1, ^u2}, {^p1, ^u1}, {^p1, ^u1}, {^p2, ^u2}] = TestRepo.all(query) + end + + test "has_many through nested association joins" do + u1 = TestRepo.insert!(%User{name: "Alice"}) + u2 = TestRepo.insert!(%User{name: "John"}) + + p1 = TestRepo.insert!(%Post{title: "p1", author_id: u1.id}) + p2 = TestRepo.insert!(%Post{title: "p2", author_id: u1.id}) + + TestRepo.insert!(%Comment{text: "c1", author_id: u1.id, post_id: p1.id}) + TestRepo.insert!(%Comment{text: "c2", author_id: u2.id, post_id: p1.id}) + TestRepo.insert!(%Comment{text: "c3", author_id: u2.id, post_id: p2.id}) + TestRepo.insert!(%Comment{text: "c4", post_id: p2.id}) + TestRepo.insert!(%Comment{text: "c5", author_id: u1.id, post_id: p2.id}) + + assert %{ + comments: [ + %{text: "c1"}, + %{text: "c5"} + ], + posts: [ + %{title: "p1"} = p1, + %{title: "p2"} = p2 + ] + } = + from(u in User) + |> join(:left, [u], p in assoc(u, :posts)) + |> join(:left, [u], c in assoc(u, :comments)) + |> join(:left, [_, p], c in assoc(p, :comments)) + |> preload( + [user, posts, comments, post_comments], + comments: comments, + posts: {posts, comments: {post_comments, :author}} + ) + |> TestRepo.get(u1.id) + + assert [ + %{text: "c1", author: %{name: "Alice"}}, + %{text: "c2", author: %{name: "John"}} + ] = Enum.sort_by(p1.comments, & &1.text) + + assert [ + %{text: "c3", author: %{name: "John"}}, + %{text: "c4", author: nil}, + %{text: "c5", author: %{name: "Alice"}} + ] = Enum.sort_by(p2.comments, & &1.text) + end + + test "many_to_many association join" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + _p = TestRepo.insert!(%Post{title: "3"}) + u1 = TestRepo.insert!(%User{name: "john"}) + u2 = TestRepo.insert!(%User{name: "mary"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: u1.id], + [post_id: p1.id, user_id: u2.id], + [post_id: p2.id, user_id: u2.id]] + + query = from(p in Post, join: u in assoc(p, :users), select: {p, u}, order_by: p.id) + [{^p1, ^u1}, {^p1, ^u2}, {^p2, ^u2}] = TestRepo.all(query) + end + + ## Association preload + + test "has_many assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + c1 = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + c2 = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + c3 = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + # Without on + query = from(p in Post, join: c in assoc(p, :comments), preload: [comments: c]) + [p1, p2] = TestRepo.all(query) + assert p1.comments == [c1, c2] + assert p2.comments == [c3] + + # With on + query = from(p in Post, left_join: c in assoc(p, :comments), + on: p.title == c.text, preload: [comments: c]) + [p1, p2] = TestRepo.all(query) + assert p1.comments == [c1] + assert p2.comments == [] + end + + test "has_one assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + pl1 = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + _pl = TestRepo.insert!(%Permalink{url: "2"}) + pl3 = TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + query = from(p in Post, join: pl in assoc(p, :permalink), preload: [permalink: pl]) + assert [post1, post3] = TestRepo.all(query) + + assert post1.permalink == pl1 + assert post3.permalink == pl3 + end + + test "belongs_to assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + TestRepo.insert!(%Permalink{url: "2"}) + TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + query = from(pl in Permalink, left_join: p in assoc(pl, :post), preload: [post: p], order_by: pl.id) + assert [pl1, pl2, pl3] = TestRepo.all(query) + + assert pl1.post == p1 + refute pl2.post + assert pl3.post == p2 + end + + test "many_to_many assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + _p = TestRepo.insert!(%Post{title: "3"}) + u1 = TestRepo.insert!(%User{name: "1"}) + u2 = TestRepo.insert!(%User{name: "2"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: u1.id], + [post_id: p1.id, user_id: u2.id], + [post_id: p2.id, user_id: u2.id]] + + # Without on + query = from(p in Post, left_join: u in assoc(p, :users), preload: [users: u], order_by: p.id) + [p1, p2, p3] = TestRepo.all(query) + assert Enum.sort_by(p1.users, & &1.name) == [u1, u2] + assert p2.users == [u2] + assert p3.users == [] + + # With on + query = from(p in Post, left_join: u in assoc(p, :users), on: p.title == u.name, + preload: [users: u], order_by: p.id) + [p1, p2, p3] = TestRepo.all(query) + assert p1.users == [u1] + assert p2.users == [u2] + assert p3.users == [] + end + + test "has_many through assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + u1 = TestRepo.insert!(%User{name: "1"}) + u2 = TestRepo.insert!(%User{name: "2"}) + + TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + TestRepo.insert!(%Comment{post_id: p1.id, author_id: u1.id}) + TestRepo.insert!(%Comment{post_id: p1.id, author_id: u2.id}) + TestRepo.insert!(%Comment{post_id: p2.id, author_id: u2.id}) + + # Without on + query = from(p in Post, left_join: ca in assoc(p, :comments_authors), + preload: [comments_authors: ca]) + [p1, p2] = TestRepo.all(query) + assert p1.comments_authors == [u1, u2] + assert p2.comments_authors == [u2] + + # With on + query = from(p in Post, left_join: ca in assoc(p, :comments_authors), + on: ca.name == p.title, preload: [comments_authors: ca]) + [p1, p2] = TestRepo.all(query) + assert p1.comments_authors == [u1] + assert p2.comments_authors == [u2] + end + + test "has_many through-through assoc selector" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + %Post{id: pid2} = TestRepo.insert!(%Post{}) + + %Permalink{} = TestRepo.insert!(%Permalink{post_id: pid1, url: "1"}) + %Permalink{} = TestRepo.insert!(%Permalink{post_id: pid2, url: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{}) + %User{id: uid2} = TestRepo.insert!(%User{}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid2, author_id: uid2}) + + query = from(p in Permalink, left_join: ca in assoc(p, :post_comments_authors), + preload: [post_comments_authors: ca], order_by: ca.id) + + [l1, l2] = TestRepo.all(query) + [u1, u2] = l1.post_comments_authors + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = l2.post_comments_authors + assert u2.id == uid2 + + # Insert some intermediary joins to check indexes won't be shuffled + query = from(p in Permalink, + left_join: assoc(p, :post), + left_join: ca in assoc(p, :post_comments_authors), + left_join: assoc(p, :post), + left_join: assoc(p, :post), + preload: [post_comments_authors: ca], order_by: ca.id) + + [l1, l2] = TestRepo.all(query) + [u1, u2] = l1.post_comments_authors + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = l2.post_comments_authors + assert u2.id == uid2 + end + + ## Nested + + test "nested assoc" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1, author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid1, author_id: uid2}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: pid2, author_id: uid2}) + + # use multiple associations to force parallel preloader + query = from p in Post, + left_join: c in assoc(p, :comments), + left_join: u in assoc(c, :author), + order_by: [p.id, c.id, u.id], + preload: [:permalink, comments: {c, author: {u, [:comments, :custom]}}], + select: {0, [p], 1, 2} + + posts = TestRepo.all(query) + assert [p1, p2] = Enum.map(posts, fn {0, [p], 1, 2} -> p end) + assert p1.id == pid1 + assert p2.id == pid2 + + assert [c1, c2] = p1.comments + assert [c3] = p2.comments + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + + assert c1.author.id == uid1 + assert c2.author.id == uid2 + assert c3.author.id == uid2 + end + + test "nested assoc with missing entries" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + %Post{id: pid3} = TestRepo.insert!(%Post{title: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1, author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid1, author_id: nil}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: pid3, author_id: uid2}) + + query = from p in Post, + left_join: c in assoc(p, :comments), + left_join: u in assoc(c, :author), + order_by: [p.id, c.id, u.id], + preload: [comments: {c, author: u}] + + assert [p1, p2, p3] = TestRepo.all(query) + assert p1.id == pid1 + assert p2.id == pid2 + assert p3.id == pid3 + + assert [c1, c2] = p1.comments + assert [] = p2.comments + assert [c3] = p3.comments + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + + assert c1.author.id == uid1 + assert c2.author == nil + assert c3.author.id == uid2 + end + + test "nested assoc with child preload" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1, author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid1, author_id: uid2}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: pid2, author_id: uid2}) + + query = from p in Post, + left_join: c in assoc(p, :comments), + order_by: [p.id, c.id], + preload: [comments: {c, :author}], + select: p + + assert [p1, p2] = TestRepo.all(query) + assert p1.id == pid1 + assert p2.id == pid2 + + assert [c1, c2] = p1.comments + assert [c3] = p2.comments + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + + assert c1.author.id == uid1 + assert c2.author.id == uid2 + assert c3.author.id == uid2 + end + + test "nested assoc with sibling preload" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + %Permalink{id: plid1} = TestRepo.insert!(%Permalink{url: "1", post_id: pid2}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: pid2}) + %Comment{id: _} = TestRepo.insert!(%Comment{text: "3", post_id: pid2}) + + query = from p in Post, + left_join: c in assoc(p, :comments), + where: c.text in ~w(1 2), + preload: [:permalink, comments: c], + select: {0, [p], 1, 2} + + posts = TestRepo.all(query) + assert [p1, p2] = Enum.map(posts, fn {0, [p], 1, 2} -> p end) + assert p1.id == pid1 + assert p2.id == pid2 + + assert p2.permalink.id == plid1 + + assert [c1] = p1.comments + assert [c2] = p2.comments + assert c1.id == cid1 + assert c2.id == cid2 + end + + test "mixing regular join and assoc selector" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + c1 = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + c2 = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + c3 = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + pl1 = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + _pl = TestRepo.insert!(%Permalink{url: "2"}) + pl3 = TestRepo.insert!(%Permalink{url: "3", post_id: p2.id}) + + # Without on + query = from(p in Post, join: pl in assoc(p, :permalink), + join: c in assoc(p, :comments), + preload: [permalink: pl], + select: {p, c}) + [{p1, ^c1}, {p1, ^c2}, {p2, ^c3}] = TestRepo.all(query) + assert p1.permalink == pl1 + assert p2.permalink == pl3 + end + + test "association with composite pk join" do + post = TestRepo.insert!(%Post{title: "1"}) + user = TestRepo.insert!(%User{name: "1"}) + TestRepo.insert!(%PostUserCompositePk{post_id: post.id, user_id: user.id}) + + query = from(p in Post, join: a in assoc(p, :post_user_composite_pk), + preload: [post_user_composite_pk: a], select: p) + assert [post] = TestRepo.all(query) + assert post.post_user_composite_pk + end +end diff --git a/deps/ecto/integration_test/cases/preload.exs b/deps/ecto/integration_test/cases/preload.exs new file mode 100644 index 0000000..dffbf21 --- /dev/null +++ b/deps/ecto/integration_test/cases/preload.exs @@ -0,0 +1,714 @@ +defmodule Ecto.Integration.PreloadTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + alias Ecto.Integration.Item + alias Ecto.Integration.Permalink + alias Ecto.Integration.User + alias Ecto.Integration.Custom + + test "preload with parameter from select_merge" do + p1 = TestRepo.insert!(%Post{title: "p1"}) + TestRepo.insert!(%Comment{text: "c1", post: p1}) + + comments = + from(c in Comment, select: struct(c, [:text])) + |> select_merge([c], %{post_id: c.post_id}) + |> preload(:post) + |> TestRepo.all() + + assert [%{text: "c1", post: %{title: "p1"}}] = comments + end + + test "preload has_many" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same text to expose bugs in preload sorting + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert %Ecto.Association.NotLoaded{} = p1.comments + + [p3, p1, p2] = TestRepo.preload([p3, p1, p2], :comments) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id() + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = p2.comments |> sort_by_id() + assert [] = p3.comments + end + + test "preload has_many multiple times" do + p1 = TestRepo.insert!(%Post{title: "1"}) + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + + [p1, p1] = TestRepo.preload([p1, p1], :comments) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id() + + [p1, p1] = TestRepo.preload([p1, p1], :comments) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id() + end + + test "preload has_one" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + %Permalink{id: pid1} = TestRepo.insert!(%Permalink{url: "1", post_id: p1.id}) + %Permalink{} = TestRepo.insert!(%Permalink{url: "2", post_id: nil}) + %Permalink{id: pid3} = TestRepo.insert!(%Permalink{url: "3", post_id: p3.id}) + + assert %Ecto.Association.NotLoaded{} = p1.permalink + assert %Ecto.Association.NotLoaded{} = p2.permalink + + [p3, p1, p2] = TestRepo.preload([p3, p1, p2], :permalink) + assert %Permalink{id: ^pid1} = p1.permalink + refute p2.permalink + assert %Permalink{id: ^pid3} = p3.permalink + end + + test "preload belongs_to" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "2"}) + %Post{id: pid3} = TestRepo.insert!(%Post{title: "3"}) + + pl1 = TestRepo.insert!(%Permalink{url: "1", post_id: pid1}) + pl2 = TestRepo.insert!(%Permalink{url: "2", post_id: nil}) + pl3 = TestRepo.insert!(%Permalink{url: "3", post_id: pid3}) + assert %Ecto.Association.NotLoaded{} = pl1.post + + [pl3, pl1, pl2] = TestRepo.preload([pl3, pl1, pl2], :post) + assert %Post{id: ^pid1} = pl1.post + refute pl2.post + assert %Post{id: ^pid3} = pl3.post + end + + test "preload multiple belongs_to" do + %User{id: uid} = TestRepo.insert!(%User{name: "foo"}) + %Post{id: pid} = TestRepo.insert!(%Post{title: "1"}) + %Comment{id: cid} = TestRepo.insert!(%Comment{post_id: pid, author_id: uid}) + + comment = TestRepo.get!(Comment, cid) + comment = TestRepo.preload(comment, [:author, :post]) + assert comment.author.id == uid + assert comment.post.id == pid + end + + test "preload belongs_to with shared parent" do + %Post{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %Post{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + + c1 = TestRepo.insert!(%Comment{text: "1", post_id: pid1}) + c2 = TestRepo.insert!(%Comment{text: "2", post_id: pid1}) + c3 = TestRepo.insert!(%Comment{text: "3", post_id: pid2}) + + [c3, c1, c2] = TestRepo.preload([c3, c1, c2], :post) + assert %Post{id: ^pid1} = c1.post + assert %Post{id: ^pid1} = c2.post + assert %Post{id: ^pid2} = c3.post + end + + test "preload many_to_many" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same name to expose bugs in preload sorting + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid4} = TestRepo.insert!(%User{name: "3"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid3], + [post_id: p2.id, user_id: uid4], + [post_id: p3.id, user_id: uid1], + [post_id: p3.id, user_id: uid4]] + + assert %Ecto.Association.NotLoaded{} = p1.users + + [p1, p2, p3] = TestRepo.preload([p1, p2, p3], :users) + assert [%User{id: ^uid1}, %User{id: ^uid2}] = p1.users |> sort_by_id + assert [%User{id: ^uid3}, %User{id: ^uid4}] = p2.users |> sort_by_id + assert [%User{id: ^uid1}, %User{id: ^uid4}] = p3.users |> sort_by_id + end + + test "preload has_many through" do + %Post{id: pid1} = p1 = TestRepo.insert!(%Post{}) + %Post{id: pid2} = p2 = TestRepo.insert!(%Post{}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid2, author_id: uid2}) + + [p1, p2] = TestRepo.preload([p1, p2], :comments_authors) + + # Through was preloaded + [u1, u2] = p1.comments_authors |> sort_by_id + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = p2.comments_authors + assert u2.id == uid2 + + # But we also preloaded everything along the way + assert [c1, c2, c3] = p1.comments |> sort_by_id + assert c1.author.id == uid1 + assert c2.author.id == uid1 + assert c3.author.id == uid2 + + assert [c4] = p2.comments + assert c4.author.id == uid2 + end + + test "preload has_one through" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + %Post{id: pid2} = TestRepo.insert!(%Post{}) + + %Permalink{id: lid1} = TestRepo.insert!(%Permalink{post_id: pid1, url: "1"}) + %Permalink{id: lid2} = TestRepo.insert!(%Permalink{post_id: pid2, url: "2"}) + + %Comment{} = c1 = TestRepo.insert!(%Comment{post_id: pid1}) + %Comment{} = c2 = TestRepo.insert!(%Comment{post_id: pid1}) + %Comment{} = c3 = TestRepo.insert!(%Comment{post_id: pid2}) + + [c1, c2, c3] = TestRepo.preload([c1, c2, c3], :post_permalink) + + # Through was preloaded + assert c1.post.id == pid1 + assert c1.post.permalink.id == lid1 + assert c1.post_permalink.id == lid1 + + assert c2.post.id == pid1 + assert c2.post.permalink.id == lid1 + assert c2.post_permalink.id == lid1 + + assert c3.post.id == pid2 + assert c3.post.permalink.id == lid2 + assert c3.post_permalink.id == lid2 + end + + test "preload through with nil association" do + %Comment{} = c = TestRepo.insert!(%Comment{post_id: nil}) + + c = TestRepo.preload(c, [:post, :post_permalink]) + assert c.post == nil + assert c.post_permalink == nil + + c = TestRepo.preload(c, [:post, :post_permalink]) + assert c.post == nil + assert c.post_permalink == nil + end + + test "preload has_many through-through" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + %Post{id: pid2} = TestRepo.insert!(%Post{}) + + %Permalink{} = l1 = TestRepo.insert!(%Permalink{post_id: pid1, url: "1"}) + %Permalink{} = l2 = TestRepo.insert!(%Permalink{post_id: pid2, url: "2"}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid2, author_id: uid2}) + + # With assoc query + [l1, l2] = TestRepo.preload([l1, l2], :post_comments_authors) + + # Through was preloaded + [u1, u2] = l1.post_comments_authors |> sort_by_id + assert u1.id == uid1 + assert u2.id == uid2 + + [u2] = l2.post_comments_authors + assert u2.id == uid2 + + # But we also preloaded everything along the way + assert l1.post.id == pid1 + assert l1.post.comments != [] + + assert l2.post.id == pid2 + assert l2.post.comments != [] + end + + test "preload has_many through many_to_many" do + %Post{} = p1 = TestRepo.insert!(%Post{}) + %Post{} = p2 = TestRepo.insert!(%Post{}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid2]] + + %Comment{id: cid1} = TestRepo.insert!(%Comment{author_id: uid1}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{author_id: uid1}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{author_id: uid2}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{author_id: uid2}) + + [p1, p2] = TestRepo.preload([p1, p2], :users_comments) + + # Through was preloaded + [c1, c2, c3, c4] = p1.users_comments |> sort_by_id + assert c1.id == cid1 + assert c2.id == cid2 + assert c3.id == cid3 + assert c4.id == cid4 + + [c3, c4] = p2.users_comments |> sort_by_id + assert c3.id == cid3 + assert c4.id == cid4 + + # But we also preloaded everything along the way + assert [u1, u2] = p1.users |> sort_by_id + assert u1.id == uid1 + assert u2.id == uid2 + + assert [u2] = p2.users + assert u2.id == uid2 + end + + ## Empties + + test "preload empty" do + assert TestRepo.preload([], :anything_goes) == [] + end + + test "preload has_many with no associated entries" do + p = TestRepo.insert!(%Post{title: "1"}) + p = TestRepo.preload(p, :comments) + + assert p.title == "1" + assert p.comments == [] + end + + test "preload has_one with no associated entries" do + p = TestRepo.insert!(%Post{title: "1"}) + p = TestRepo.preload(p, :permalink) + + assert p.title == "1" + assert p.permalink == nil + end + + test "preload belongs_to with no associated entry" do + c = TestRepo.insert!(%Comment{text: "1"}) + c = TestRepo.preload(c, :post) + + assert c.text == "1" + assert c.post == nil + end + + test "preload many_to_many with no associated entries" do + p = TestRepo.insert!(%Post{title: "1"}) + p = TestRepo.preload(p, :users) + + assert p.title == "1" + assert p.users == [] + end + + ## With queries + + test "preload with function" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same text to expose bugs in preload sorting + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: fn _ -> TestRepo.all(Comment) end) + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = pe1.comments + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = pe2.comments + assert [] = pe3.comments + end + + test "preload many_to_many with function" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same name to expose bugs in preload sorting + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + %User{id: uid3} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "2"}) + %User{id: uid4} = TestRepo.insert!(%User{name: "3"}) + + TestRepo.insert_all "posts_users", [[post_id: p1.id, user_id: uid1], + [post_id: p1.id, user_id: uid2], + [post_id: p2.id, user_id: uid3], + [post_id: p2.id, user_id: uid4], + [post_id: p3.id, user_id: uid1], + [post_id: p3.id, user_id: uid4]] + + wrong_preloader = fn post_ids -> + TestRepo.all( + from u in User, + join: pu in "posts_users", + where: pu.post_id in ^post_ids and pu.user_id == u.id, + order_by: u.id, + select: map(u, [:id]) + ) + end + + assert_raise RuntimeError, ~r/invalid custom preload for `users` on `Ecto.Integration.Post`/, fn -> + TestRepo.preload([p1, p2, p3], users: wrong_preloader) + end + + right_preloader = fn post_ids -> + TestRepo.all( + from u in User, + join: pu in "posts_users", + where: pu.post_id in ^post_ids and pu.user_id == u.id, + order_by: u.id, + select: {pu.post_id, map(u, [:id])} + ) + end + + [p1, p2, p3] = TestRepo.preload([p1, p2, p3], users: right_preloader) + assert p1.users == [%{id: uid1}, %{id: uid2}] + assert p2.users == [%{id: uid3}, %{id: uid4}] + assert p3.users == [%{id: uid1}, %{id: uid4}] + end + + test "preload with query" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + p3 = TestRepo.insert!(%Post{title: "3"}) + + # We use the same text to expose bugs in preload sorting + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert %Ecto.Association.NotLoaded{} = p1.comments + + # With empty query + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: from(c in Comment, where: false)) + assert [] = pe1.comments + assert [] = pe2.comments + assert [] = pe3.comments + + # With custom select + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: from(c in Comment, select: c.id, order_by: c.id)) + assert [^cid1, ^cid2] = pe1.comments + assert [^cid3, ^cid4] = pe2.comments + assert [] = pe3.comments + + # With custom ordered query + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: from(c in Comment, order_by: [desc: c.text])) + assert [%Comment{id: ^cid2}, %Comment{id: ^cid1}] = pe1.comments + assert [%Comment{id: ^cid4}, %Comment{id: ^cid3}] = pe2.comments + assert [] = pe3.comments + + # With custom ordered query with preload + assert [pe3, pe1, pe2] = TestRepo.preload([p3, p1, p2], + comments: {from(c in Comment, order_by: [desc: c.text]), :post}) + assert [%Comment{id: ^cid2} = c2, %Comment{id: ^cid1} = c1] = pe1.comments + assert [%Comment{id: ^cid4} = c4, %Comment{id: ^cid3} = c3] = pe2.comments + assert [] = pe3.comments + + assert c1.post.title == "1" + assert c2.post.title == "1" + assert c3.post.title == "2" + assert c4.post.title == "2" + end + + test "preload through with query" do + %Post{id: pid1} = p1 = TestRepo.insert!(%Post{}) + + u1 = TestRepo.insert!(%User{name: "foo"}) + u2 = TestRepo.insert!(%User{name: "bar"}) + u3 = TestRepo.insert!(%User{name: "baz"}) + u4 = TestRepo.insert!(%User{name: "norf"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u1.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u2.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u3.id}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: u4.id}) + + np1 = TestRepo.preload(p1, comments_authors: from(u in User, where: u.name == "foo")) + assert np1.comments_authors == [u1] + + assert_raise ArgumentError, ~r/Ecto expected a map\/struct with the key `id` but got: \d+/, fn -> + TestRepo.preload(p1, comments_authors: from(u in User, order_by: u.name, select: u.id)) + end + + # The subpreload order does not matter because the result is dictated by comments + np1 = TestRepo.preload(p1, comments_authors: from(u in User, order_by: u.name, select: %{id: u.id})) + assert np1.comments_authors == + [%{id: u1.id}, %{id: u2.id}, %{id: u3.id}, %{id: u4.id}] + end + + ## With take + + test "preload with take" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + _p = TestRepo.insert!(%Post{title: "3"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + + assert %Ecto.Association.NotLoaded{} = p1.comments + + posts = TestRepo.all(from Post, preload: [:comments], select: [:id, comments: [:id, :post_id]]) + [p1, p2, p3] = sort_by_id(posts) + assert p1.title == nil + assert p2.title == nil + assert p3.title == nil + + assert [%{id: ^cid1, text: nil}, %{id: ^cid2, text: nil}] = sort_by_id(p1.comments) + assert [%{id: ^cid3, text: nil}, %{id: ^cid4, text: nil}] = sort_by_id(p2.comments) + assert [] = sort_by_id(p3.comments) + end + + test "preload through with take" do + %Post{id: pid1} = TestRepo.insert!(%Post{}) + + %User{id: uid1} = TestRepo.insert!(%User{name: "foo"}) + %User{id: uid2} = TestRepo.insert!(%User{name: "bar"}) + + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid1}) + %Comment{} = TestRepo.insert!(%Comment{post_id: pid1, author_id: uid2}) + + [p1] = TestRepo.all from Post, preload: [:comments_authors], select: [:id, comments_authors: :id] + [%{id: ^uid1, name: nil}, %{id: ^uid2, name: nil}] = p1.comments_authors |> sort_by_id + end + + ## Nested + + test "preload many assocs" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + assert [p2, p1] = TestRepo.preload([p2, p1], [:comments, :users]) + assert p1.comments == [] + assert p2.comments == [] + assert p1.users == [] + assert p2.users == [] + end + + test "preload nested" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + TestRepo.insert!(%Comment{text: "4", post_id: p2.id}) + + assert [p2, p1] = TestRepo.preload([p2, p1], [comments: :post]) + assert [c1, c2] = p1.comments + assert [c3, c4] = p2.comments + assert p1.id == c1.post.id + assert p1.id == c2.post.id + assert p2.id == c3.post.id + assert p2.id == c4.post.id + end + + test "preload nested via custom query" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + TestRepo.insert!(%Comment{text: "4", post_id: p2.id}) + + query = from(c in Comment, preload: :post, order_by: [desc: c.text]) + assert [p2, p1] = TestRepo.preload([p2, p1], comments: query) + assert [c2, c1] = p1.comments + assert [c4, c3] = p2.comments + assert p1.id == c1.post.id + assert p1.id == c2.post.id + assert p2.id == c3.post.id + assert p2.id == c4.post.id + end + + test "custom preload_order" do + post = TestRepo.insert!(%Post{users: [%User{name: "bar"}, %User{name: "foo"}], title: "1"}) + + TestRepo.insert!(%Comment{text: "2", post_id: post.id}) + TestRepo.insert!(%Comment{text: "1", post_id: post.id}) + + post = TestRepo.preload(post, [:ordered_comments, :ordered_users]) + + # asc + assert [%{text: "1"}, %{text: "2"}] = post.ordered_comments + + # desc + assert [%{name: "foo"}, %{name: "bar"}] = post.ordered_users + end + + ## Others + + @tag :invalid_prefix + test "preload custom prefix from schema" do + p = TestRepo.insert!(%Post{title: "1"}) + p = Ecto.put_meta(p, prefix: "this_surely_does_not_exist") + # This preload should fail because it points to a prefix that does not exist + assert catch_error(TestRepo.preload(p, [:comments])) + end + + @tag :invalid_prefix + test "preload custom prefix from options" do + p = TestRepo.insert!(%Post{title: "1"}) + # This preload should fail because it points to a prefix that does not exist + assert catch_error(TestRepo.preload(p, [:comments], prefix: "this_surely_does_not_exist")) + end + + test "preload with binary_id" do + c = TestRepo.insert!(%Custom{}) + u = TestRepo.insert!(%User{custom_id: c.bid}) + + u = TestRepo.preload(u, :custom) + assert u.custom.bid == c.bid + end + + test "preload raises with association set but without id" do + c1 = TestRepo.insert!(%Comment{text: "1"}) + u1 = TestRepo.insert!(%User{name: "name"}) + updated = %{c1 | author: u1, author_id: nil} + + assert ExUnit.CaptureLog.capture_log(fn -> + assert TestRepo.preload(updated, [:author]).author == u1 + end) =~ ~r/its association key `author_id` is nil/ + + assert TestRepo.preload(updated, [:author], force: true).author == nil + end + + test "preload skips already loaded for cardinality one" do + %Post{id: pid} = TestRepo.insert!(%Post{title: "1"}) + + c1 = %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: pid}) + c2 = %Comment{id: _cid} = TestRepo.insert!(%Comment{text: "2", post_id: nil}) + + [c1, c2] = TestRepo.preload([c1, c2], :post) + assert %Post{id: ^pid} = c1.post + assert c2.post == nil + + [c1, c2] = TestRepo.preload([c1, c2], post: :comments) + assert [%Comment{id: ^cid1}] = c1.post.comments + + TestRepo.update_all Post, set: [title: "0"] + TestRepo.update_all Comment, set: [post_id: pid] + + # Preloading once again shouldn't change the result + [c1, c2] = TestRepo.preload([c1, c2], :post) + assert %Post{id: ^pid, title: "1", comments: [_|_]} = c1.post + assert c2.post == nil + + [c1, c2] = TestRepo.preload([c1, %{c2 | post_id: pid}], :post, force: true) + assert %Post{id: ^pid, title: "0", comments: %Ecto.Association.NotLoaded{}} = c1.post + assert %Post{id: ^pid, title: "0", comments: %Ecto.Association.NotLoaded{}} = c2.post + end + + test "preload skips already loaded for cardinality many" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p2.id}) + + [p1, p2] = TestRepo.preload([p1, p2], :comments) + assert [%Comment{id: ^cid1}] = p1.comments + assert [%Comment{id: ^cid2}] = p2.comments + + [p1, p2] = TestRepo.preload([p1, p2], comments: :post) + assert hd(p1.comments).post.id == p1.id + assert hd(p2.comments).post.id == p2.id + + TestRepo.update_all Comment, set: [text: "0"] + + # Preloading once again shouldn't change the result + [p1, p2] = TestRepo.preload([p1, p2], :comments) + assert [%Comment{id: ^cid1, text: "1", post: %Post{}}] = p1.comments + assert [%Comment{id: ^cid2, text: "2", post: %Post{}}] = p2.comments + + [p1, p2] = TestRepo.preload([p1, p2], :comments, force: true) + assert [%Comment{id: ^cid1, text: "0", post: %Ecto.Association.NotLoaded{}}] = p1.comments + assert [%Comment{id: ^cid2, text: "0", post: %Ecto.Association.NotLoaded{}}] = p2.comments + end + + test "preload keyword query" do + p1 = TestRepo.insert!(%Post{title: "1"}) + p2 = TestRepo.insert!(%Post{title: "2"}) + TestRepo.insert!(%Post{title: "3"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + %Comment{id: cid3} = TestRepo.insert!(%Comment{text: "3", post_id: p2.id}) + %Comment{id: cid4} = TestRepo.insert!(%Comment{text: "4", post_id: p2.id}) + + # Regular query + query = from(p in Post, preload: [:comments], select: p) + + assert [p1, p2, p3] = TestRepo.all(query) |> sort_by_id + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = p2.comments |> sort_by_id + assert [] = p3.comments + + # Query with interpolated preload query + query = from(p in Post, preload: [comments: ^from(c in Comment, where: false)], select: p) + + assert [p1, p2, p3] = TestRepo.all(query) + assert [] = p1.comments + assert [] = p2.comments + assert [] = p3.comments + + # Now let's use an interpolated preload too + comments = [:comments] + query = from(p in Post, preload: ^comments, select: {0, [p], 1, 2}) + + posts = TestRepo.all(query) + [p1, p2, p3] = Enum.map(posts, fn {0, [p], 1, 2} -> p end) |> sort_by_id + + assert [%Comment{id: ^cid1}, %Comment{id: ^cid2}] = p1.comments |> sort_by_id + assert [%Comment{id: ^cid3}, %Comment{id: ^cid4}] = p2.comments |> sort_by_id + assert [] = p3.comments + end + + + test "preload belongs_to in embedded_schema" do + %User{id: uid1} = TestRepo.insert!(%User{name: "1"}) + item = %Item{user_id: uid1} + + # Starts as not loaded + assert %Ecto.Association.NotLoaded{} = item.user + + # Now we preload it + item = TestRepo.preload(item, :user) + assert %User{id: ^uid1} = item.user + end + + defp sort_by_id(values) do + Enum.sort_by(values, &(&1.id)) + end +end diff --git a/deps/ecto/integration_test/cases/repo.exs b/deps/ecto/integration_test/cases/repo.exs new file mode 100644 index 0000000..8c5923e --- /dev/null +++ b/deps/ecto/integration_test/cases/repo.exs @@ -0,0 +1,2038 @@ +defmodule Ecto.Integration.RepoTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.Post + alias Ecto.Integration.Order + alias Ecto.Integration.User + alias Ecto.Integration.Comment + alias Ecto.Integration.Permalink + alias Ecto.Integration.Custom + alias Ecto.Integration.Barebone + alias Ecto.Integration.CompositePk + alias Ecto.Integration.PostUserCompositePk + + test "returns already started for started repos" do + assert {:error, {:already_started, _}} = TestRepo.start_link() + end + + test "supports unnamed repos" do + assert {:ok, pid} = TestRepo.start_link(name: nil) + assert Ecto.Repo.Queryable.all(pid, Post, Ecto.Repo.Supervisor.tuplet(pid, [])) == [] + end + + test "all empty" do + assert TestRepo.all(Post) == [] + assert TestRepo.all(from p in Post) == [] + end + + test "all with in" do + TestRepo.insert!(%Post{title: "hello"}) + + # Works without the query cache. + assert_raise Ecto.Query.CastError, fn -> + TestRepo.all(from p in Post, where: p.title in ^nil) + end + + assert [] = TestRepo.all from p in Post, where: p.title in [] + assert [] = TestRepo.all from p in Post, where: p.title in ["1", "2", "3"] + assert [] = TestRepo.all from p in Post, where: p.title in ^[] + + assert [_] = TestRepo.all from p in Post, where: p.title not in [] + assert [_] = TestRepo.all from p in Post, where: p.title in ["1", "hello", "3"] + assert [_] = TestRepo.all from p in Post, where: p.title in ["1", ^"hello", "3"] + assert [_] = TestRepo.all from p in Post, where: p.title in ^["1", "hello", "3"] + + # Still doesn't work after the query cache. + assert_raise Ecto.Query.CastError, fn -> + TestRepo.all(from p in Post, where: p.title in ^nil) + end + end + + test "all using named from" do + TestRepo.insert!(%Post{title: "hello"}) + + query = + from(p in Post, as: :post) + |> where([post: p], p.title == "hello") + + assert [_] = TestRepo.all query + end + + test "all without schema" do + %Post{} = TestRepo.insert!(%Post{title: "title1"}) + %Post{} = TestRepo.insert!(%Post{title: "title2"}) + + assert ["title1", "title2"] = + TestRepo.all(from(p in "posts", order_by: p.title, select: p.title)) + + assert [_] = + TestRepo.all(from(p in "posts", where: p.title == "title1", select: p.id)) + end + + test "all shares metadata" do + TestRepo.insert!(%Post{title: "title1"}) + TestRepo.insert!(%Post{title: "title2"}) + + [post1, post2] = TestRepo.all(Post) + assert :erts_debug.same(post1.__meta__, post2.__meta__) + + [new_post1, new_post2] = TestRepo.all(Post) + assert :erts_debug.same(post1.__meta__, new_post1.__meta__) + assert :erts_debug.same(post2.__meta__, new_post2.__meta__) + end + + @tag :invalid_prefix + test "all with invalid prefix" do + assert catch_error(TestRepo.all("posts", prefix: "oops")) + end + + test "insert, update and delete" do + post = %Post{title: "insert, update, delete", visits: 1} + meta = post.__meta__ + + assert %Post{} = inserted = TestRepo.insert!(post) + assert %Post{} = updated = TestRepo.update!(Ecto.Changeset.change(inserted, visits: 2)) + + deleted_meta = put_in meta.state, :deleted + assert %Post{__meta__: ^deleted_meta} = TestRepo.delete!(updated) + + loaded_meta = put_in meta.state, :loaded + assert %Post{__meta__: ^loaded_meta} = TestRepo.insert!(post) + + post = TestRepo.one(Post) + assert post.__meta__.state == :loaded + assert post.inserted_at + end + + test "insert, update and delete with field source" do + permalink = %Permalink{url: "url"} + assert %Permalink{url: "url"} = inserted = + TestRepo.insert!(permalink) + assert %Permalink{url: "new"} = updated = + TestRepo.update!(Ecto.Changeset.change(inserted, url: "new")) + assert %Permalink{url: "new"} = + TestRepo.delete!(updated) + end + + @tag :composite_pk + test "insert, update and delete with composite pk" do + c1 = TestRepo.insert!(%CompositePk{a: 1, b: 2, name: "first"}) + c2 = TestRepo.insert!(%CompositePk{a: 1, b: 3, name: "second"}) + + assert CompositePk |> first |> TestRepo.one == c1 + assert CompositePk |> last |> TestRepo.one == c2 + + changeset = Ecto.Changeset.cast(c1, %{name: "first change"}, ~w(name)a) + c1 = TestRepo.update!(changeset) + assert TestRepo.get_by!(CompositePk, %{a: 1, b: 2}) == c1 + + TestRepo.delete!(c2) + assert TestRepo.all(CompositePk) == [c1] + + assert_raise ArgumentError, ~r"to have exactly one primary key", fn -> + TestRepo.get(CompositePk, []) + end + + assert_raise ArgumentError, ~r"to have exactly one primary key", fn -> + TestRepo.get!(CompositePk, [1, 2]) + end + end + + @tag :composite_pk + test "insert, update and delete with associated composite pk" do + user = TestRepo.insert!(%User{}) + post = TestRepo.insert!(%Post{title: "post title"}) + + user_post = TestRepo.insert!(%PostUserCompositePk{user_id: user.id, post_id: post.id}) + assert TestRepo.get_by!(PostUserCompositePk, [user_id: user.id, post_id: post.id]) == user_post + TestRepo.delete!(user_post) + assert TestRepo.all(PostUserCompositePk) == [] + end + + @tag :invalid_prefix + test "insert, update and delete with invalid prefix" do + post = TestRepo.insert!(%Post{}) + changeset = Ecto.Changeset.change(post, title: "foo") + assert catch_error(TestRepo.insert(%Post{}, prefix: "oops")) + assert catch_error(TestRepo.update(changeset, prefix: "oops")) + assert catch_error(TestRepo.delete(changeset, prefix: "oops")) + + # Check we can still insert the post after the invalid prefix attempt + assert %Post{id: _} = TestRepo.insert!(%Post{}) + end + + test "insert and update with changeset" do + # On insert we merge the fields and changes + changeset = Ecto.Changeset.cast(%Post{visits: 13, title: "wrong"}, + %{"title" => "hello", "temp" => "unknown"}, ~w(title temp)a) + + post = TestRepo.insert!(changeset) + assert %Post{visits: 13, title: "hello", temp: "unknown"} = post + assert %Post{visits: 13, title: "hello", temp: "temp"} = TestRepo.get!(Post, post.id) + + # On update we merge only fields, direct schema changes are discarded + changeset = Ecto.Changeset.cast(%{post | visits: 17}, + %{"title" => "world", "temp" => "unknown"}, ~w(title temp)a) + + assert %Post{visits: 17, title: "world", temp: "unknown"} = TestRepo.update!(changeset) + assert %Post{visits: 13, title: "world", temp: "temp"} = TestRepo.get!(Post, post.id) + end + + test "insert and update with empty changeset" do + # On insert we merge the fields and changes + changeset = Ecto.Changeset.cast(%Permalink{}, %{}, ~w()) + assert %Permalink{} = permalink = TestRepo.insert!(changeset) + + # Assert we can update the same value twice, + # without changes, without triggering stale errors. + changeset = Ecto.Changeset.cast(permalink, %{}, ~w()) + assert TestRepo.update!(changeset) == permalink + assert TestRepo.update!(changeset) == permalink + end + + @tag :no_primary_key + test "insert with no primary key" do + assert %Barebone{num: nil} = TestRepo.insert!(%Barebone{}) + assert %Barebone{num: 13} = TestRepo.insert!(%Barebone{num: 13}) + end + + @tag :read_after_writes + test "insert and update with changeset read after writes" do + defmodule RAW do + use Ecto.Schema + + schema "comments" do + field :text, :string + field :lock_version, :integer, read_after_writes: true + end + end + + changeset = Ecto.Changeset.cast(struct(RAW, %{}), %{}, ~w()) + + # If the field is nil, we will not send it + # and read the value back from the database. + assert %{id: cid, lock_version: 1} = raw = TestRepo.insert!(changeset) + + # Set the counter to 11, so we can read it soon + TestRepo.update_all from(u in RAW, where: u.id == ^cid), set: [lock_version: 11] + + # We will read back on update too + changeset = Ecto.Changeset.cast(raw, %{"text" => "0"}, ~w(text)a) + assert %{id: ^cid, lock_version: 11, text: "0"} = TestRepo.update!(changeset) + end + + test "insert autogenerates for custom type" do + post = TestRepo.insert!(%Post{uuid: nil}) + assert byte_size(post.uuid) == 36 + assert TestRepo.get_by(Post, uuid: post.uuid) == post + end + + @tag :id_type + test "insert autogenerates for custom id type" do + defmodule ID do + use Ecto.Schema + + @primary_key {:id, CustomPermalink, autogenerate: true} + schema "posts" do + end + end + + id = TestRepo.insert!(struct(ID, id: nil)) + assert id.id + assert TestRepo.get_by(ID, id: "#{id.id}-hello") == id + end + + @tag :id_type + @tag :assigns_id_type + test "insert with user-assigned primary key" do + assert %Post{id: 1} = TestRepo.insert!(%Post{id: 1}) + end + + @tag :id_type + @tag :assigns_id_type + test "insert and update with user-assigned primary key in changeset" do + changeset = Ecto.Changeset.cast(%Post{id: 11}, %{"id" => "13"}, ~w(id)a) + assert %Post{id: 13} = post = TestRepo.insert!(changeset) + + changeset = Ecto.Changeset.cast(post, %{"id" => "15"}, ~w(id)a) + assert %Post{id: 15} = TestRepo.update!(changeset) + end + + test "insert and fetch a schema with utc timestamps" do + datetime = DateTime.from_unix!(System.os_time(:second), :second) + TestRepo.insert!(%User{inserted_at: datetime}) + assert [%{inserted_at: ^datetime}] = TestRepo.all(User) + end + + test "optimistic locking in update/delete operations" do + import Ecto.Changeset, only: [cast: 3, optimistic_lock: 2] + base_comment = TestRepo.insert!(%Comment{}) + + changeset_ok = + base_comment + |> cast(%{"text" => "foo.bar"}, ~w(text)a) + |> optimistic_lock(:lock_version) + TestRepo.update!(changeset_ok) + + changeset_stale = + base_comment + |> cast(%{"text" => "foo.bat"}, ~w(text)a) + |> optimistic_lock(:lock_version) + + assert_raise Ecto.StaleEntryError, fn -> TestRepo.update!(changeset_stale) end + assert_raise Ecto.StaleEntryError, fn -> TestRepo.delete!(changeset_stale) end + end + + test "optimistic locking in update operation with nil field" do + import Ecto.Changeset, only: [cast: 3, optimistic_lock: 3] + + base_comment = + %Comment{} + |> cast(%{lock_version: nil}, [:lock_version]) + |> TestRepo.insert!() + + incrementer = + fn + nil -> 1 + old_value -> old_value + 1 + end + + changeset_ok = + base_comment + |> cast(%{"text" => "foo.bar"}, ~w(text)a) + |> optimistic_lock(:lock_version, incrementer) + + updated = TestRepo.update!(changeset_ok) + assert updated.text == "foo.bar" + assert updated.lock_version == 1 + end + + test "optimistic locking in delete operation with nil field" do + import Ecto.Changeset, only: [cast: 3, optimistic_lock: 3] + + base_comment = + %Comment{} + |> cast(%{lock_version: nil}, [:lock_version]) + |> TestRepo.insert!() + + incrementer = + fn + nil -> 1 + old_value -> old_value + 1 + end + + changeset_ok = optimistic_lock(base_comment, :lock_version, incrementer) + TestRepo.delete!(changeset_ok) + + refute TestRepo.get(Comment, base_comment.id) + end + + @tag :unique_constraint + test "unique constraint" do + changeset = Ecto.Changeset.change(%Post{}, uuid: Ecto.UUID.generate()) + {:ok, _} = TestRepo.insert(changeset) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> + changeset + |> TestRepo.insert() + end + + assert exception.message =~ "posts_uuid_index (unique_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + assert exception.message =~ "call `unique_constraint/3`" + + message = ~r/constraint error when attempting to insert struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + changeset + |> Ecto.Changeset.unique_constraint(:uuid, name: :posts_email_changeset) + |> TestRepo.insert() + end + + assert exception.message =~ "posts_email_changeset (unique_constraint)" + + {:error, changeset} = + changeset + |> Ecto.Changeset.unique_constraint(:uuid) + |> TestRepo.insert() + assert changeset.errors == [uuid: {"has already been taken", [constraint: :unique, constraint_name: "posts_uuid_index"]}] + assert changeset.data.__meta__.state == :built + end + + @tag :unique_constraint + test "unique constraint from association" do + uuid = Ecto.UUID.generate() + post = & %Post{} |> Ecto.Changeset.change(uuid: &1) |> Ecto.Changeset.unique_constraint(:uuid) + + {:error, changeset} = + TestRepo.insert %User{ + comments: [%Comment{}], + permalink: %Permalink{}, + posts: [post.(uuid), post.(uuid), post.(Ecto.UUID.generate())] + } + + [_, p2, _] = changeset.changes.posts + assert p2.errors == [uuid: {"has already been taken", [constraint: :unique, constraint_name: "posts_uuid_index"]}] + end + + @tag :id_type + @tag :unique_constraint + test "unique constraint with binary_id" do + changeset = Ecto.Changeset.change(%Custom{}, uuid: Ecto.UUID.generate()) + {:ok, _} = TestRepo.insert(changeset) + + {:error, changeset} = + changeset + |> Ecto.Changeset.unique_constraint(:uuid) + |> TestRepo.insert() + assert changeset.errors == [uuid: {"has already been taken", [constraint: :unique, constraint_name: "customs_uuid_index"]}] + assert changeset.data.__meta__.state == :built + end + + test "unique pseudo-constraint violation error message with join table at the repository" do + post = + TestRepo.insert!(%Post{title: "some post"}) + |> TestRepo.preload(:unique_users) + + user = + TestRepo.insert!(%User{name: "some user"}) + + # Violate the unique composite index + {:error, changeset} = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:unique_users, [user, user]) + |> TestRepo.update + + errors = Ecto.Changeset.traverse_errors(changeset, fn {msg, _opts} -> msg end) + assert errors == %{unique_users: [%{}, %{id: ["has already been taken"]}]} + refute changeset.valid? + end + + @tag :join + @tag :unique_constraint + test "unique constraint violation error message with join table in single changeset" do + post = + TestRepo.insert!(%Post{title: "some post"}) + |> TestRepo.preload(:constraint_users) + + user = + TestRepo.insert!(%User{name: "some user"}) + + # Violate the unique composite index + {:error, changeset} = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:constraint_users, [user, user]) + |> Ecto.Changeset.unique_constraint(:user, + name: :posts_users_composite_pk_post_id_user_id_index, + message: "has already been assigned") + |> TestRepo.update + + errors = Ecto.Changeset.traverse_errors(changeset, fn {msg, _opts} -> msg end) + assert errors == %{constraint_users: [%{}, %{user: ["has already been assigned"]}]} + + refute changeset.valid? + end + + @tag :join + @tag :unique_constraint + test "unique constraint violation error message with join table and separate changesets" do + post = + TestRepo.insert!(%Post{title: "some post"}) + |> TestRepo.preload(:constraint_users) + + user = TestRepo.insert!(%User{name: "some user"}) + + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:constraint_users, [user]) + |> TestRepo.update + + # Violate the unique composite index + {:error, changeset} = + post + |> Ecto.Changeset.change + |> Ecto.Changeset.put_assoc(:constraint_users, [user]) + |> Ecto.Changeset.unique_constraint(:user, + name: :posts_users_composite_pk_post_id_user_id_index, + message: "has already been assigned") + |> TestRepo.update + + errors = Ecto.Changeset.traverse_errors(changeset, fn {msg, _opts} -> msg end) + assert errors == %{constraint_users: [%{user: ["has already been assigned"]}]} + + refute changeset.valid? + end + + @tag :foreign_key_constraint + test "foreign key constraint" do + changeset = Ecto.Changeset.change(%Comment{post_id: 0}) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> + changeset + |> TestRepo.insert() + end + + assert exception.message =~ "comments_post_id_fkey (foreign_key_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + assert exception.message =~ "call `foreign_key_constraint/3`" + + message = ~r/constraint error when attempting to insert struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + changeset + |> Ecto.Changeset.foreign_key_constraint(:post_id, name: :comments_post_id_other) + |> TestRepo.insert() + end + + assert exception.message =~ "comments_post_id_other (foreign_key_constraint)" + + {:error, changeset} = + changeset + |> Ecto.Changeset.foreign_key_constraint(:post_id) + |> TestRepo.insert() + assert changeset.errors == [post_id: {"does not exist", [constraint: :foreign, constraint_name: "comments_post_id_fkey"]}] + end + + @tag :foreign_key_constraint + test "assoc constraint" do + changeset = Ecto.Changeset.change(%Comment{post_id: 0}) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to insert struct/, fn -> + changeset + |> TestRepo.insert() + end + + assert exception.message =~ "comments_post_id_fkey (foreign_key_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + + message = ~r/constraint error when attempting to insert struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + changeset + |> Ecto.Changeset.assoc_constraint(:post, name: :comments_post_id_other) + |> TestRepo.insert() + end + + assert exception.message =~ "comments_post_id_other (foreign_key_constraint)" + + {:error, changeset} = + changeset + |> Ecto.Changeset.assoc_constraint(:post) + |> TestRepo.insert() + assert changeset.errors == [post: {"does not exist", [constraint: :assoc, constraint_name: "comments_post_id_fkey"]}] + end + + @tag :foreign_key_constraint + test "no assoc constraint error" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Permalink{user_id: user.id}) + + exception = + assert_raise Ecto.ConstraintError, ~r/constraint error when attempting to delete struct/, fn -> + TestRepo.delete!(user) + end + + assert exception.message =~ "permalinks_user_id_fkey (foreign_key_constraint)" + assert exception.message =~ "The changeset has not defined any constraint." + end + + @tag :foreign_key_constraint + test "no assoc constraint with changeset mismatch" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Permalink{user_id: user.id}) + + message = ~r/constraint error when attempting to delete struct/ + exception = + assert_raise Ecto.ConstraintError, message, fn -> + user + |> Ecto.Changeset.change + |> Ecto.Changeset.no_assoc_constraint(:permalink, name: :permalinks_user_id_pther) + |> TestRepo.delete() + end + + assert exception.message =~ "permalinks_user_id_pther (foreign_key_constraint)" + end + + @tag :foreign_key_constraint + test "no assoc constraint with changeset match" do + user = TestRepo.insert!(%User{}) + TestRepo.insert!(%Permalink{user_id: user.id}) + + {:error, changeset} = + user + |> Ecto.Changeset.change + |> Ecto.Changeset.no_assoc_constraint(:permalink) + |> TestRepo.delete() + assert changeset.errors == [permalink: {"is still associated with this entry", [constraint: :no_assoc, constraint_name: "permalinks_user_id_fkey"]}] + end + + @tag :foreign_key_constraint + test "insert and update with embeds during failing child foreign key" do + changeset = + Order + |> struct(%{}) + |> order_changeset(%{item: %{price: 10}, permalink: %{post_id: 0}}) + + {:error, changeset} = TestRepo.insert(changeset) + assert %Ecto.Changeset{} = changeset.changes.item + + order = + Order + |> struct(%{}) + |> order_changeset(%{}) + |> TestRepo.insert!() + |> TestRepo.preload([:permalink]) + + changeset = order_changeset(order, %{item: %{price: 10}, permalink: %{post_id: 0}}) + assert %Ecto.Changeset{} = changeset.changes.item + + {:error, changeset} = TestRepo.update(changeset) + assert %Ecto.Changeset{} = changeset.changes.item + end + + def order_changeset(order, params) do + order + |> Ecto.Changeset.cast(params, [:permalink_id]) + |> Ecto.Changeset.cast_embed(:item, with: &item_changeset/2) + |> Ecto.Changeset.cast_assoc(:permalink, with: &permalink_changeset/2) + end + + def item_changeset(item, params) do + item + |> Ecto.Changeset.cast(params, [:price]) + end + + def permalink_changeset(comment, params) do + comment + |> Ecto.Changeset.cast(params, [:post_id]) + |> Ecto.Changeset.assoc_constraint(:post) + end + + test "unsafe_validate_unique/3" do + {:ok, inserted_post} = TestRepo.insert(%Post{title: "Greetings", visits: 13}) + new_post_changeset = Post.changeset(%Post{}, %{title: "Greetings", visits: 17}) + + changeset = Ecto.Changeset.unsafe_validate_unique(new_post_changeset, [:title], TestRepo) + assert changeset.errors[:title] == + {"has already been taken", validation: :unsafe_unique, fields: [:title]} + + changeset = Ecto.Changeset.unsafe_validate_unique(new_post_changeset, [:title, :text], TestRepo) + assert changeset.errors[:title] == nil + + update_changeset = Post.changeset(inserted_post, %{visits: 17}) + changeset = Ecto.Changeset.unsafe_validate_unique(update_changeset, [:title], TestRepo) + assert changeset.errors[:title] == nil # cannot conflict with itself + end + + test "unsafe_validate_unique/3 with composite keys" do + {:ok, inserted_post} = TestRepo.insert(%CompositePk{a: 123, b: 456, name: "UniqueName"}) + + different_pk = CompositePk.changeset(%CompositePk{}, %{name: "UniqueName", a: 789, b: 321}) + changeset = Ecto.Changeset.unsafe_validate_unique(different_pk, [:name], TestRepo) + assert changeset.errors[:name] == + {"has already been taken", validation: :unsafe_unique, fields: [:name]} + + partial_pk = CompositePk.changeset(%CompositePk{}, %{name: "UniqueName", a: 789, b: 456}) + changeset = Ecto.Changeset.unsafe_validate_unique(partial_pk, [:name], TestRepo) + assert changeset.errors[:name] == + {"has already been taken", validation: :unsafe_unique, fields: [:name]} + + update_changeset = CompositePk.changeset(inserted_post, %{name: "NewName"}) + changeset = Ecto.Changeset.unsafe_validate_unique(update_changeset, [:name], TestRepo) + assert changeset.valid? + assert changeset.errors[:name] == nil # cannot conflict with itself + end + + test "get(!)" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "2"}) + + assert post1 == TestRepo.get(Post, post1.id) + assert post2 == TestRepo.get(Post, to_string post2.id) # With casting + + assert post1 == TestRepo.get!(Post, post1.id) + assert post2 == TestRepo.get!(Post, to_string post2.id) # With casting + + TestRepo.delete!(post1) + + assert TestRepo.get(Post, post1.id) == nil + assert_raise Ecto.NoResultsError, fn -> + TestRepo.get!(Post, post1.id) + end + end + + test "get(!) with custom source" do + custom = Ecto.put_meta(%Custom{}, source: "posts") + custom = TestRepo.insert!(custom) + bid = custom.bid + assert %Custom{bid: ^bid, __meta__: %{source: "posts"}} = + TestRepo.get(from(c in {"posts", Custom}), bid) + end + + test "get(!) with binary_id" do + custom = TestRepo.insert!(%Custom{}) + bid = custom.bid + assert %Custom{bid: ^bid} = TestRepo.get(Custom, bid) + end + + test "get_by(!)" do + post1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + post2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + assert post1 == TestRepo.get_by(Post, id: post1.id) + assert post1 == TestRepo.get_by(Post, title: post1.title) + assert post1 == TestRepo.get_by(Post, id: post1.id, title: post1.title) + assert post2 == TestRepo.get_by(Post, id: to_string(post2.id)) # With casting + assert nil == TestRepo.get_by(Post, title: "hey") + assert nil == TestRepo.get_by(Post, id: post2.id, visits: 3) + + assert post1 == TestRepo.get_by!(Post, id: post1.id) + assert post1 == TestRepo.get_by!(Post, title: post1.title) + assert post1 == TestRepo.get_by!(Post, id: post1.id, visits: 1) + assert post2 == TestRepo.get_by!(Post, id: to_string(post2.id)) # With casting + + assert post1 == TestRepo.get_by!(Post, %{id: post1.id}) + + assert_raise Ecto.NoResultsError, fn -> + TestRepo.get_by!(Post, id: post2.id, title: "hey") + end + end + + test "reload" do + post1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + post2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + assert post1 == TestRepo.reload(post1) + assert [post1, post2] == TestRepo.reload([post1, post2]) + assert [post1, post2, nil] == TestRepo.reload([post1, post2, %Post{id: 0}]) + assert nil == TestRepo.reload(%Post{id: 0}) + + # keeps order as received in the params + assert [post2, post1] == TestRepo.reload([post2, post1]) + + TestRepo.update_all(Post, inc: [visits: 1]) + + assert [%{visits: 2}, %{visits: 3}] = TestRepo.reload([post1, post2]) + end + + test "reload ignores preloads" do + post = TestRepo.insert!(%Post{title: "1", visits: 1}) |> TestRepo.preload(:comments) + + assert %{comments: %Ecto.Association.NotLoaded{}} = TestRepo.reload(post) + end + + test "reload!" do + post1 = TestRepo.insert!(%Post{title: "1", visits: 1}) + post2 = TestRepo.insert!(%Post{title: "2", visits: 2}) + + assert post1 == TestRepo.reload!(post1) + assert [post1, post2] == TestRepo.reload!([post1, post2]) + + assert_raise RuntimeError, ~r"could not reload", fn -> + TestRepo.reload!([post1, post2, %Post{id: -1}]) + end + + assert_raise Ecto.NoResultsError, fn -> + TestRepo.reload!(%Post{id: -1}) + end + + assert [post2, post1] == TestRepo.reload([post2, post1]) + + TestRepo.update_all(Post, inc: [visits: 1]) + + assert [%{visits: 2}, %{visits: 3}] = TestRepo.reload!([post1, post2]) + end + + test "first, last and one(!)" do + post1 = TestRepo.insert!(%Post{title: "1"}) + post2 = TestRepo.insert!(%Post{title: "2"}) + + assert post1 == Post |> first |> TestRepo.one + assert post2 == Post |> last |> TestRepo.one + + query = from p in Post, order_by: p.title + assert post1 == query |> first |> TestRepo.one + assert post2 == query |> last |> TestRepo.one + + query = from p in Post, order_by: [desc: p.title], limit: 10 + assert post2 == query |> first |> TestRepo.one + assert post1 == query |> last |> TestRepo.one + + query = from p in Post, where: is_nil(p.id) + refute query |> first |> TestRepo.one + refute query |> last |> TestRepo.one + assert_raise Ecto.NoResultsError, fn -> query |> first |> TestRepo.one! end + assert_raise Ecto.NoResultsError, fn -> query |> last |> TestRepo.one! end + end + + test "exists?" do + TestRepo.insert!(%Post{title: "1", visits: 2}) + TestRepo.insert!(%Post{title: "2", visits: 1}) + + query = from p in Post, where: not is_nil(p.title), limit: 2 + assert query |> TestRepo.exists? == true + + query = from p in Post, where: p.title == "1", select: p.title + assert query |> TestRepo.exists? == true + + query = from p in Post, where: is_nil(p.id) + assert query |> TestRepo.exists? == false + + query = from p in Post, where: is_nil(p.id) + assert query |> TestRepo.exists? == false + + query = from(p in Post, select: {p.visits, avg(p.visits)}, group_by: p.visits, having: avg(p.visits) > 1) + assert query |> TestRepo.exists? == true + end + + test "aggregate" do + assert TestRepo.aggregate(Post, :max, :visits) == nil + + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Post{visits: 14}) + TestRepo.insert!(%Post{visits: 14}) + + # Barebones + assert TestRepo.aggregate(Post, :max, :visits) == 14 + assert TestRepo.aggregate(Post, :min, :visits) == 10 + assert TestRepo.aggregate(Post, :count, :visits) == 4 + assert "50" = to_string(TestRepo.aggregate(Post, :sum, :visits)) + + # With order_by + query = from Post, order_by: [asc: :visits] + assert TestRepo.aggregate(query, :max, :visits) == 14 + + # With order_by and limit + query = from Post, order_by: [asc: :visits], limit: 2 + assert TestRepo.aggregate(query, :max, :visits) == 12 + end + + @tag :decimal_precision + test "aggregate avg" do + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Post{visits: 14}) + TestRepo.insert!(%Post{visits: 14}) + + assert "12.5" <> _ = to_string(TestRepo.aggregate(Post, :avg, :visits)) + end + + @tag :inline_order_by + test "aggregate with distinct" do + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 12}) + TestRepo.insert!(%Post{visits: 14}) + TestRepo.insert!(%Post{visits: 14}) + + query = from Post, order_by: [asc: :visits], distinct: true + assert TestRepo.aggregate(query, :count, :visits) == 3 + end + + @tag :insert_cell_wise_defaults + test "insert all" do + assert {2, nil} = TestRepo.insert_all("comments", [[text: "1"], %{text: "2", lock_version: 2}]) + assert {2, nil} = TestRepo.insert_all({"comments", Comment}, [[text: "3"], %{text: "4", lock_version: 2}]) + assert [%Comment{text: "1", lock_version: 1}, + %Comment{text: "2", lock_version: 2}, + %Comment{text: "3", lock_version: 1}, + %Comment{text: "4", lock_version: 2}] = TestRepo.all(Comment) + + assert {2, nil} = TestRepo.insert_all(Post, [[], []]) + assert [%Post{}, %Post{}] = TestRepo.all(Post) + + assert {0, nil} = TestRepo.insert_all("posts", []) + assert {0, nil} = TestRepo.insert_all({"posts", Post}, []) + end + + @tag :insert_select + test "insert all with query for single fields" do + comment = TestRepo.insert!(%Comment{text: "1", lock_version: 1}) + + text_query = from(c in Comment, select: c.text, where: [id: ^comment.id, lock_version: 1]) + + lock_version_query = from(c in Comment, select: c.lock_version, where: [id: ^comment.id]) + + rows = [ + [text: "2", lock_version: lock_version_query], + [lock_version: lock_version_query, text: "3"], + [text: text_query], + [text: text_query, lock_version: lock_version_query], + [lock_version: 6, text: "6"] + ] + assert {5, nil} = TestRepo.insert_all(Comment, rows, []) + + inserted_rows = Comment + |> where([c], c.id != ^comment.id) + |> TestRepo.all() + + assert [%Comment{text: "2", lock_version: 1}, + %Comment{text: "3", lock_version: 1}, + %Comment{text: "1"}, + %Comment{text: "1", lock_version: 1}, + %Comment{text: "6", lock_version: 6}] = inserted_rows + end + + describe "insert_all with source query" do + @tag :upsert_all + @tag :with_conflict_target + @tag :concat + test "insert_all with query and conflict target" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, _} = TestRepo.insert_all(Post, source, conflict_target: [:id], on_conflict: :replace_all) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + + assert %Post{title: ^expected_title} = TestRepo.get(Post, expected_id) + end + + @tag :returning + @tag :concat + test "insert_all with query and returning" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, returns} = TestRepo.insert_all(Post, source, returning: [:id, :title]) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + assert [%Post{id: ^expected_id, title: ^expected_title}] = returns + end + + @tag :upsert_all + @tag :without_conflict_target + @tag :concat + test "insert_all with query and on_conflict" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, _} = TestRepo.insert_all(Post, source, on_conflict: :replace_all) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + + assert %Post{title: ^expected_title} = TestRepo.get(Post, expected_id) + end + + @tag :concat + test "insert_all with query" do + {:ok, %Post{id: id}} = TestRepo.insert(%Post{ + title: "A generic title" + }) + + source = from p in Post, + select: %{ + title: fragment("concat(?, ?, ?)", p.title, type(^" suffix ", :string), p.id) + } + + assert {1, _} = TestRepo.insert_all(Post, source) + + expected_id = id + 1 + expected_title = "A generic title suffix #{id}" + + assert %Post{title: ^expected_title} = TestRepo.get(Post, expected_id) + end + end + + @tag :invalid_prefix + @tag :insert_cell_wise_defaults + test "insert all with invalid prefix" do + assert catch_error(TestRepo.insert_all(Post, [[], []], prefix: "oops")) + end + + @tag :returning + @tag :insert_cell_wise_defaults + test "insert all with returning with schema" do + assert {0, []} = TestRepo.insert_all(Comment, [], returning: true) + assert {0, nil} = TestRepo.insert_all(Comment, [], returning: false) + + {2, [c1, c2]} = TestRepo.insert_all(Comment, [[text: "1"], [text: "2"]], returning: [:id, :text]) + assert %Comment{text: "1", __meta__: %{state: :loaded}} = c1 + assert %Comment{text: "2", __meta__: %{state: :loaded}} = c2 + + {2, [c1, c2]} = TestRepo.insert_all(Comment, [[text: "3"], [text: "4"]], returning: true) + assert %Comment{text: "3", __meta__: %{state: :loaded}} = c1 + assert %Comment{text: "4", __meta__: %{state: :loaded}} = c2 + end + + @tag :returning + @tag :insert_cell_wise_defaults + test "insert all with returning with schema with field source" do + assert {0, []} = TestRepo.insert_all(Permalink, [], returning: true) + assert {0, nil} = TestRepo.insert_all(Permalink, [], returning: false) + + {2, [c1, c2]} = TestRepo.insert_all(Permalink, [[url: "1"], [url: "2"]], returning: [:id, :url]) + assert %Permalink{url: "1", __meta__: %{state: :loaded}} = c1 + assert %Permalink{url: "2", __meta__: %{state: :loaded}} = c2 + + {2, [c1, c2]} = TestRepo.insert_all(Permalink, [[url: "3"], [url: "4"]], returning: true) + assert %Permalink{url: "3", __meta__: %{state: :loaded}} = c1 + assert %Permalink{url: "4", __meta__: %{state: :loaded}} = c2 + end + + @tag :returning + @tag :insert_cell_wise_defaults + test "insert all with returning without schema" do + {2, [c1, c2]} = TestRepo.insert_all("comments", [[text: "1"], [text: "2"]], returning: [:id, :text]) + assert %{id: _, text: "1"} = c1 + assert %{id: _, text: "2"} = c2 + + assert_raise ArgumentError, fn -> + TestRepo.insert_all("comments", [[text: "1"], [text: "2"]], returning: true) + end + end + + @tag :insert_cell_wise_defaults + test "insert all with dumping" do + uuid = Ecto.UUID.generate() + assert {1, nil} = TestRepo.insert_all(Post, [%{uuid: uuid}]) + assert [%Post{uuid: ^uuid, title: nil}] = TestRepo.all(Post) + end + + @tag :insert_cell_wise_defaults + test "insert all autogenerates for binary_id type" do + custom = TestRepo.insert!(%Custom{bid: nil}) + assert custom.bid + assert TestRepo.get(Custom, custom.bid) + assert TestRepo.delete!(custom) + refute TestRepo.get(Custom, custom.bid) + + uuid = Ecto.UUID.generate() + assert {2, nil} = TestRepo.insert_all(Custom, [%{uuid: uuid}, %{bid: custom.bid}]) + assert [%Custom{bid: bid2, uuid: nil}, + %Custom{bid: bid1, uuid: ^uuid}] = Enum.sort_by(TestRepo.all(Custom), & &1.uuid) + assert bid1 && bid2 + assert custom.bid != bid1 + assert custom.bid == bid2 + end + + describe "placeholders" do + @describetag :placeholders + + test "Repo.insert_all fills in placeholders" do + placeholders = %{foo: 100, bar: "test"} + bar_ph = {:placeholder, :bar} + foo_ph = {:placeholder, :foo} + + entries = [ + %{intensity: 1.0, title: bar_ph, posted: ~D[2020-12-21], visits: foo_ph}, + %{intensity: 2.0, title: bar_ph, posted: ~D[2000-12-21], visits: foo_ph} + ] |> Enum.map(&Map.put(&1, :uuid, Ecto.UUID.generate)) + + TestRepo.insert_all(Post, entries, placeholders: placeholders) + + query = from(p in Post, select: {p.intensity, p.title, p.visits}) + assert [{1.0, "test", 100}, {2.0, "test", 100}] == TestRepo.all(query) + end + + test "Repo.insert_all accepts non-atom placeholder keys" do + placeholders = %{10 => "integer key", {:foo, :bar} => "tuple key"} + entries = [%{text: {:placeholder, 10}}, %{text: {:placeholder, {:foo, :bar}}}] + TestRepo.insert_all(Comment, entries, placeholders: placeholders) + + query = from(c in Comment, select: c.text) + assert ["integer key", "tuple key"] == TestRepo.all(query) + end + + test "Repo.insert_all fills in placeholders with keyword list entries" do + TestRepo.insert_all(Barebone, [[num: {:placeholder, :foo}]], placeholders: %{foo: 100}) + + query = from(b in Barebone, select: b.num) + assert [100] == TestRepo.all(query) + end + + @tag :upsert_all + @tag :with_conflict_target + test "Repo.insert_all upserts and fills in placeholders with conditioned on_conflict query" do + do_not_update_title = "don't touch me" + + on_conflict = + from p in Post, update: [set: [title: "updated"]], where: p.title != ^do_not_update_title + + placeholders = %{posted: Date.utc_today(), title: "title"} + + post1 = [ + title: {:placeholder, :title}, + uuid: Ecto.UUID.generate(), + posted: {:placeholder, :posted} + ] + + post2 = [ + title: do_not_update_title, + uuid: Ecto.UUID.generate(), + posted: {:placeholder, :posted} + ] + + assert TestRepo.insert_all(Post, [post1, post2], + placeholders: placeholders, + on_conflict: on_conflict, + conflict_target: [:uuid] + ) == + {2, nil} + + # only update first post + assert TestRepo.insert_all(Post, [post1, post2], + placeholders: placeholders, + on_conflict: on_conflict, + conflict_target: [:uuid] + ) == + {1, nil} + + assert TestRepo.aggregate(where(Post, title: "updated"), :count) == 1 + end + end + + test "update all" do + assert post1 = TestRepo.insert!(%Post{title: "1"}) + assert post2 = TestRepo.insert!(%Post{title: "2"}) + assert post3 = TestRepo.insert!(%Post{title: "3"}) + + assert {3, nil} = TestRepo.update_all(Post, set: [title: "x"]) + + assert %Post{title: "x"} = TestRepo.reload(post1) + assert %Post{title: "x"} = TestRepo.reload(post2) + assert %Post{title: "x"} = TestRepo.reload(post3) + + assert {3, nil} = TestRepo.update_all("posts", [set: [title: nil]]) + + assert %Post{title: nil} = TestRepo.reload(post1) + assert %Post{title: nil} = TestRepo.reload(post2) + assert %Post{title: nil} = TestRepo.reload(post3) + end + + @tag :invalid_prefix + test "update all with invalid prefix" do + assert catch_error(TestRepo.update_all(Post, [set: [title: "x"]], prefix: "oops")) + end + + @tag :returning + test "update all with returning with schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.update_all(select(Post, [p], p), [set: [title: "x"]]) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert %Post{id: ^id1, title: "x"} = p1 + assert %Post{id: ^id2, title: "x"} = p2 + assert %Post{id: ^id3, title: "x"} = p3 + + assert {3, posts} = TestRepo.update_all(select(Post, [:id, :visits]), [set: [visits: 11]]) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert %Post{id: ^id1, title: nil, visits: 11} = p1 + assert %Post{id: ^id2, title: nil, visits: 11} = p2 + assert %Post{id: ^id3, title: nil, visits: 11} = p3 + end + + @tag :returning + test "update all with returning without schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.update_all(select("posts", [:id, :title]), [set: [title: "x"]]) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert p1 == %{id: id1, title: "x"} + assert p2 == %{id: id2, title: "x"} + assert p3 == %{id: id3, title: "x"} + end + + test "update all with filter" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "1" or p.title == "2", + update: [set: [visits: ^17]]) + assert {2, nil} = TestRepo.update_all(query, set: [title: "x"]) + + assert %Post{title: "x", visits: 17} = TestRepo.get(Post, id1) + assert %Post{title: "x", visits: 17} = TestRepo.get(Post, id2) + assert %Post{title: "3", visits: nil} = TestRepo.get(Post, id3) + end + + test "update all no entries" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "4") + assert {0, nil} = TestRepo.update_all(query, set: [title: "x"]) + + assert %Post{title: "1"} = TestRepo.get(Post, id1) + assert %Post{title: "2"} = TestRepo.get(Post, id2) + assert %Post{title: "3"} = TestRepo.get(Post, id3) + end + + test "update all increment syntax" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1", visits: 0}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2", visits: 1}) + + # Positive + query = from p in Post, where: not is_nil(p.id), update: [inc: [visits: 2]] + assert {2, nil} = TestRepo.update_all(query, []) + + assert %Post{visits: 2} = TestRepo.get(Post, id1) + assert %Post{visits: 3} = TestRepo.get(Post, id2) + + # Negative + query = from p in Post, where: not is_nil(p.id), update: [inc: [visits: -1]] + assert {2, nil} = TestRepo.update_all(query, []) + + assert %Post{visits: 1} = TestRepo.get(Post, id1) + assert %Post{visits: 2} = TestRepo.get(Post, id2) + end + + @tag :id_type + test "update all with casting and dumping on id type field" do + assert %Post{id: id1} = TestRepo.insert!(%Post{}) + assert {1, nil} = TestRepo.update_all(Post, set: [counter: to_string(id1)]) + assert %Post{counter: ^id1} = TestRepo.get(Post, id1) + end + + test "update all with casting and dumping" do + visits = 13 + datetime = ~N[2014-01-16 20:26:51] + assert %Post{id: id} = TestRepo.insert!(%Post{}) + + assert {1, nil} = TestRepo.update_all(Post, set: [visits: visits, inserted_at: datetime]) + assert %Post{visits: 13, inserted_at: ^datetime} = TestRepo.get(Post, id) + end + + test "delete all" do + assert %Post{} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, nil} = TestRepo.delete_all(Post) + assert [] = TestRepo.all(Post) + end + + @tag :invalid_prefix + test "delete all with invalid prefix" do + assert catch_error(TestRepo.delete_all(Post, prefix: "oops")) + end + + @tag :returning + test "delete all with returning with schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.delete_all(select(Post, [p], p)) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert %Post{id: ^id1, title: "1"} = p1 + assert %Post{id: ^id2, title: "2"} = p2 + assert %Post{id: ^id3, title: "3"} = p3 + end + + @tag :returning + test "delete all with returning without schema" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + assert {3, posts} = TestRepo.delete_all(select("posts", [:id, :title])) + + [p1, p2, p3] = Enum.sort_by(posts, & &1.id) + assert p1 == %{id: id1, title: "1"} + assert p2 == %{id: id2, title: "2"} + assert p3 == %{id: id3, title: "3"} + end + + test "delete all with filter" do + assert %Post{} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "1" or p.title == "2") + assert {2, nil} = TestRepo.delete_all(query) + assert [%Post{}] = TestRepo.all(Post) + end + + test "delete all no entries" do + assert %Post{id: id1} = TestRepo.insert!(%Post{title: "1"}) + assert %Post{id: id2} = TestRepo.insert!(%Post{title: "2"}) + assert %Post{id: id3} = TestRepo.insert!(%Post{title: "3"}) + + query = from(p in Post, where: p.title == "4") + assert {0, nil} = TestRepo.delete_all(query) + assert %Post{title: "1"} = TestRepo.get(Post, id1) + assert %Post{title: "2"} = TestRepo.get(Post, id2) + assert %Post{title: "3"} = TestRepo.get(Post, id3) + end + + test "virtual field" do + assert %Post{id: id} = TestRepo.insert!(%Post{title: "1"}) + assert TestRepo.get(Post, id).temp == "temp" + end + + ## Query syntax + + defmodule Foo do + defstruct [:title] + end + + describe "query select" do + test "expressions" do + %Post{} = TestRepo.insert!(%Post{title: "1", visits: 13}) + + assert [{"1", 13}] == + TestRepo.all(from p in Post, select: {p.title, p.visits}) + + assert [["1", 13]] == + TestRepo.all(from p in Post, select: [p.title, p.visits]) + + assert [%{:title => "1", 3 => 13, "visits" => 13}] == + TestRepo.all(from p in Post, select: %{ + :title => p.title, + "visits" => p.visits, + 3 => p.visits + }) + + assert [%{:title => "1", "1" => 13, "visits" => 13}] == + TestRepo.all(from p in Post, select: %{ + :title => p.title, + p.title => p.visits, + "visits" => p.visits + }) + + assert [%Foo{title: "1"}] == + TestRepo.all(from p in Post, select: %Foo{title: p.title}) + end + + test "map update" do + %Post{} = TestRepo.insert!(%Post{title: "1", visits: 13}) + + assert [%Post{:title => "new title", visits: 13}] = + TestRepo.all(from p in Post, select: %{p | title: "new title"}) + + assert [%Post{title: "new title", visits: 13}] = + TestRepo.all(from p in Post, select: %Post{p | title: "new title"}) + + assert_raise KeyError, fn -> + TestRepo.all(from p in Post, select: %{p | unknown: "new title"}) + end + + assert_raise BadMapError, fn -> + TestRepo.all(from p in Post, select: %{p.title | title: "new title"}) + end + + assert_raise BadStructError, fn -> + TestRepo.all(from p in Post, select: %Foo{p | title: p.title}) + end + end + + test "take with structs" do + %{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + %{id: pid3} = TestRepo.insert!(%Post{title: "3"}) + + [p1, p2, p3] = Post |> select([p], struct(p, [:title])) |> order_by([:title]) |> TestRepo.all + refute p1.id + assert p1.title == "1" + assert match?(%Post{}, p1) + refute p2.id + assert p2.title == "2" + assert match?(%Post{}, p2) + refute p3.id + assert p3.title == "3" + assert match?(%Post{}, p3) + + [p1, p2, p3] = Post |> select([:id]) |> order_by([:id]) |> TestRepo.all + assert %Post{id: ^pid1} = p1 + assert %Post{id: ^pid2} = p2 + assert %Post{id: ^pid3} = p3 + end + + test "take with maps" do + %{id: pid1} = TestRepo.insert!(%Post{title: "1"}) + %{id: pid2} = TestRepo.insert!(%Post{title: "2"}) + %{id: pid3} = TestRepo.insert!(%Post{title: "3"}) + + [p1, p2, p3] = "posts" |> select([p], map(p, [:title])) |> order_by([:title]) |> TestRepo.all + assert p1 == %{title: "1"} + assert p2 == %{title: "2"} + assert p3 == %{title: "3"} + + [p1, p2, p3] = "posts" |> select([:id]) |> order_by([:id]) |> TestRepo.all + assert p1 == %{id: pid1} + assert p2 == %{id: pid2} + assert p3 == %{id: pid3} + end + + test "take with preload assocs" do + %{id: pid} = TestRepo.insert!(%Post{title: "post"}) + TestRepo.insert!(%Comment{post_id: pid, text: "comment"}) + fields = [:id, :title, comments: [:text, :post_id]] + + [p] = Post |> preload(:comments) |> select([p], ^fields) |> TestRepo.all + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + [p] = Post |> preload(:comments) |> select([p], struct(p, ^fields)) |> TestRepo.all + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + [p] = Post |> preload(:comments) |> select([p], map(p, ^fields)) |> TestRepo.all + assert p == %{id: pid, title: "post", comments: [%{text: "comment", post_id: pid}]} + end + + test "take with nil preload assoc" do + %{id: cid} = TestRepo.insert!(%Comment{text: "comment"}) + fields = [:id, :text, post: [:title]] + + [c] = Comment |> preload(:post) |> select([c], ^fields) |> TestRepo.all + assert %Comment{id: ^cid, text: "comment", post: nil} = c + + [c] = Comment |> preload(:post) |> select([c], struct(c, ^fields)) |> TestRepo.all + assert %Comment{id: ^cid, text: "comment", post: nil} = c + + [c] = Comment |> preload(:post) |> select([c], map(c, ^fields)) |> TestRepo.all + assert c == %{id: cid, text: "comment", post: nil} + end + + test "take with join assocs" do + %{id: pid} = TestRepo.insert!(%Post{title: "post"}) + %{id: cid} = TestRepo.insert!(%Comment{post_id: pid, text: "comment"}) + fields = [:id, :title, comments: [:text, :post_id, :id]] + query = from p in Post, where: p.id == ^pid, join: c in assoc(p, :comments), preload: [comments: c] + + p = TestRepo.one(from q in query, select: ^fields) + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + p = TestRepo.one(from q in query, select: struct(q, ^fields)) + assert %Post{title: "post"} = p + assert [%Comment{text: "comment"}] = p.comments + + p = TestRepo.one(from q in query, select: map(q, ^fields)) + assert p == %{id: pid, title: "post", comments: [%{text: "comment", post_id: pid, id: cid}]} + end + + test "take with single nil column" do + %Post{} = TestRepo.insert!(%Post{title: "1", counter: nil}) + assert %{counter: nil} = + TestRepo.one(from p in Post, where: p.title == "1", select: [:counter]) + end + + test "take with join assocs and single nil column" do + %{id: post_id} = TestRepo.insert!(%Post{title: "1"}, counter: nil) + TestRepo.insert!(%Comment{post_id: post_id, text: "comment"}) + assert %{counter: nil} == + TestRepo.one(from p in Post, join: c in assoc(p, :comments), where: p.title == "1", select: map(p, [:counter])) + end + + test "field source" do + TestRepo.insert!(%Permalink{url: "url"}) + assert ["url"] = Permalink |> select([p], p.url) |> TestRepo.all() + assert [1] = Permalink |> select([p], count(p.url)) |> TestRepo.all() + end + + test "merge" do + date = Date.utc_today() + %Post{id: post_id} = TestRepo.insert!(%Post{title: "1", counter: nil, posted: date, public: false}) + + # Merge on source + assert [%Post{title: "2"}] = + Post |> select([p], merge(p, %{title: "2"})) |> TestRepo.all() + assert [%Post{title: "2"}] = + Post |> select([p], p) |> select_merge([p], %{title: "2"}) |> TestRepo.all() + + # Merge on struct + assert [%Post{title: "2"}] = + Post |> select([p], merge(%Post{title: p.title}, %{title: "2"})) |> TestRepo.all() + assert [%Post{title: "2"}] = + Post |> select([p], %Post{title: p.title}) |> select_merge([p], %{title: "2"}) |> TestRepo.all() + + # Merge on map + assert [%{title: "2"}] = + Post |> select([p], merge(%{title: p.title}, %{title: "2"})) |> TestRepo.all() + assert [%{title: "2"}] = + Post |> select([p], %{title: p.title}) |> select_merge([p], %{title: "2"}) |> TestRepo.all() + + # Merge on outer join with map + %Permalink{} = TestRepo.insert!(%Permalink{post_id: post_id, url: "Q", title: "Z"}) + + # left join record is present + assert [%{url: "Q", title: "1", posted: _date}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id) + |> select([l, p], merge(l, map(p, ^~w(title posted)a))) + |> TestRepo.all() + + assert [%{url: "Q", title: "1", posted: _date}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id) + |> select_merge([_l, p], map(p, ^~w(title posted)a)) + |> TestRepo.all() + + # left join record is not present + assert [%{url: "Q", title: "Z", posted: nil}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id and p.public == true) + |> select([l, p], merge(l, map(p, ^~w(title posted)a))) + |> TestRepo.all() + + assert [%{url: "Q", title: "Z", posted: nil}] = + Permalink + |> join(:left, [l], p in Post, on: l.post_id == p.id and p.public == true) + |> select_merge([_l, p], map(p, ^~w(title posted)a)) + |> TestRepo.all() + end + + test "merge with update on self" do + %Post{} = TestRepo.insert!(%Post{title: "1", counter: 1}) + + assert [%Post{title: "1", counter: 2}] = + Post |> select([p], merge(p, %{p | counter: 2})) |> TestRepo.all() + assert [%Post{title: "1", counter: 2}] = + Post |> select([p], p) |> select_merge([p], %{p | counter: 2}) |> TestRepo.all() + end + + test "merge within subquery" do + %Post{} = TestRepo.insert!(%Post{title: "1", counter: 1}) + + subquery = + Post + |> select_merge([p], %{p | counter: 2}) + |> subquery() + + assert [%Post{title: "1", counter: 2}] = TestRepo.all(subquery) + end + end + + test "query count distinct" do + TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "1"}) + TestRepo.insert!(%Post{title: "2"}) + + assert [3] == Post |> select([p], count(p.title)) |> TestRepo.all + assert [2] == Post |> select([p], count(p.title, :distinct)) |> TestRepo.all + end + + test "query where interpolation" do + post1 = TestRepo.insert!(%Post{title: "hello"}) + post2 = TestRepo.insert!(%Post{title: "goodbye"}) + + assert [post1, post2] == Post |> where([], []) |> TestRepo.all |> Enum.sort_by(& &1.id) + assert [post1] == Post |> where([], [title: "hello"]) |> TestRepo.all + assert [post1] == Post |> where([], [title: "hello", id: ^post1.id]) |> TestRepo.all + + params0 = [] + params1 = [title: "hello"] + params2 = [title: "hello", id: post1.id] + assert [post1, post2] == (from Post, where: ^params0) |> TestRepo.all |> Enum.sort_by(& &1.id) + assert [post1] == (from Post, where: ^params1) |> TestRepo.all + assert [post1] == (from Post, where: ^params2) |> TestRepo.all + + post3 = TestRepo.insert!(%Post{title: "goodbye", uuid: nil}) + params3 = [title: "goodbye", uuid: post3.uuid] + assert [post3] == (from Post, where: ^params3) |> TestRepo.all + end + + describe "upsert via insert" do + @describetag :upsert + + test "on conflict raise" do + {:ok, inserted} = TestRepo.insert(%Post{title: "first"}, on_conflict: :raise) + assert catch_error(TestRepo.insert(%Post{id: inserted.id, title: "second"}, on_conflict: :raise)) + end + + test "on conflict ignore" do + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :nothing) + assert inserted.id + assert inserted.__meta__.state == :loaded + + {:ok, not_inserted} = TestRepo.insert(post, on_conflict: :nothing) + assert not_inserted.id == nil + assert not_inserted.__meta__.state == :loaded + end + + @tag :with_conflict_target + test "on conflict and associations" do + on_conflict = [set: [title: "second"]] + post = %Post{uuid: Ecto.UUID.generate(), + title: "first", comments: [%Comment{}]} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert inserted.id + end + + @tag :with_conflict_target + test "on conflict with inc" do + uuid = "6fa459ea-ee8a-3ca4-894e-db77e160355e" + post = %Post{title: "first", uuid: uuid} + {:ok, _} = TestRepo.insert(post) + post = %{title: "upsert", uuid: uuid} + TestRepo.insert_all(Post, [post], on_conflict: [inc: [visits: 1]], conflict_target: :uuid) + end + + @tag :with_conflict_target + test "on conflict ignore and conflict target" do + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid]) + assert inserted.id + + # Error on non-conflict target + assert catch_error(TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:id])) + + # Error on conflict target + {:ok, not_inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid]) + assert not_inserted.id == nil + end + + @tag :without_conflict_target + test "on conflict keyword list" do + on_conflict = [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict) + assert inserted.id + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :with_conflict_target + test "on conflict keyword list and conflict target" do + on_conflict = [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert inserted.id + + # Error on non-conflict target + assert catch_error(TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:id])) + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :returning + @tag :with_conflict_target + test "on conflict keyword list and conflict target and returning" do + {:ok, c1} = TestRepo.insert(%Post{}) + {:ok, c2} = TestRepo.insert(%Post{id: c1.id}, on_conflict: [set: [id: c1.id]], conflict_target: [:id], returning: [:id, :uuid]) + {:ok, c3} = TestRepo.insert(%Post{id: c1.id}, on_conflict: [set: [id: c1.id]], conflict_target: [:id], returning: true) + {:ok, c4} = TestRepo.insert(%Post{id: c1.id}, on_conflict: [set: [id: c1.id]], conflict_target: [:id], returning: false) + + assert c2.uuid == c1.uuid + assert c3.uuid == c1.uuid + assert c4.uuid != c1.uuid + end + + @tag :returning + @tag :with_conflict_target + test "on conflict keyword list and conflict target and returning and field source" do + TestRepo.insert!(%Permalink{url: "old"}) + {:ok, c1} = TestRepo.insert(%Permalink{url: "old"}, + on_conflict: [set: [url: "new1"]], + conflict_target: [:url], + returning: [:url]) + + TestRepo.insert!(%Permalink{url: "old"}) + {:ok, c2} = TestRepo.insert(%Permalink{url: "old"}, + on_conflict: [set: [url: "new2"]], + conflict_target: [:url], + returning: true) + + assert c1.url == "new1" + assert c2.url == "new2" + end + + @tag :returning + @tag :with_conflict_target + test "on conflict ignore and returning" do + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid]) + assert inserted.id + + {:ok, not_inserted} = TestRepo.insert(post, on_conflict: :nothing, conflict_target: [:uuid], returning: true) + assert not_inserted.id == nil + end + + @tag :without_conflict_target + test "on conflict query" do + on_conflict = from Post, update: [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict) + assert inserted.id + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :with_conflict_target + test "on conflict query and conflict target" do + on_conflict = from Post, update: [set: [title: "second"]] + post = %Post{title: "first", uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert inserted.id + + # Error on non-conflict target + assert catch_error(TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:id])) + + {:ok, updated} = TestRepo.insert(post, on_conflict: on_conflict, conflict_target: [:uuid]) + assert updated.id == inserted.id + assert updated.title != "second" + assert TestRepo.get!(Post, inserted.id).title == "second" + end + + @tag :with_conflict_target + test "on conflict query having condition" do + post = %Post{title: "first", counter: 1, uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post) + + on_conflict = from Post, where: [counter: 2], update: [set: [title: "second"]] + + insert_options = [ + on_conflict: on_conflict, + conflict_target: [:uuid], + stale_error_field: :counter + ] + + assert {:error, changeset} = TestRepo.insert(post, insert_options) + assert changeset.errors == [counter: {"is stale", [stale: true]}] + + assert TestRepo.get!(Post, inserted.id).title == "first" + end + + @tag :without_conflict_target + test "on conflict replace_all" do + post = %Post{title: "first", visits: 13, uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :replace_all) + assert inserted.id + + post = %Post{title: "updated", visits: 17, uuid: post.uuid} + post = TestRepo.insert!(post, on_conflict: :replace_all) + assert post.id != inserted.id + assert post.title == "updated" + assert post.visits == 17 + + assert TestRepo.all(from p in Post, select: {p.id, p.title, p.visits}) == + [{post.id, "updated", 17}] + assert TestRepo.all(from p in Post, select: count(p.id)) == [1] + end + + @tag :with_conflict_target + test "on conflict replace_all and conflict target" do + post = %Post{title: "first", visits: 13, uuid: Ecto.UUID.generate()} + {:ok, inserted} = TestRepo.insert(post, on_conflict: :replace_all, conflict_target: :uuid) + assert inserted.id + + post = %Post{title: "updated", visits: 17, uuid: post.uuid} + post = TestRepo.insert!(post, on_conflict: :replace_all, conflict_target: :uuid) + assert post.id != inserted.id + assert post.title == "updated" + assert post.visits == 17 + + assert TestRepo.all(from p in Post, select: {p.id, p.title, p.visits}) == + [{post.id, "updated", 17}] + assert TestRepo.all(from p in Post, select: count(p.id)) == [1] + end + end + + describe "upsert via insert_all" do + @describetag :upsert_all + + test "on conflict raise" do + post = [title: "first", uuid: Ecto.UUID.generate()] + {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: :raise) + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: :raise)) + end + + test "on conflict ignore" do + post = [title: "first", uuid: Ecto.UUID.generate()] + assert TestRepo.insert_all(Post, [post], on_conflict: :nothing) == {1, nil} + + # PG returns 0, MySQL returns 1 + {entries, nil} = TestRepo.insert_all(Post, [post], on_conflict: :nothing) + assert entries == 0 or entries == 1 + + assert length(TestRepo.all(Post)) == 1 + end + + @tag :with_conflict_target + test "on conflict ignore and conflict target" do + post = [title: "first", uuid: Ecto.UUID.generate()] + assert TestRepo.insert_all(Post, [post], on_conflict: :nothing, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: :nothing, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: :nothing, conflict_target: [:uuid]) == + {0, nil} + end + + @tag :with_conflict_target + test "on conflict keyword list and conflict target" do + on_conflict = [set: [title: "second"]] + post = [title: "first", uuid: Ecto.UUID.generate()] + {1, nil} = TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + + @tag :with_conflict_target + @tag :returning + test "on conflict keyword list and conflict target and returning and source field" do + on_conflict = [set: [url: "new"]] + permalink = [url: "old"] + + assert {1, [%Permalink{url: "old"}]} = + TestRepo.insert_all(Permalink, [permalink], + on_conflict: on_conflict, conflict_target: [:url], returning: [:url]) + + assert {1, [%Permalink{url: "new"}]} = + TestRepo.insert_all(Permalink, [permalink], + on_conflict: on_conflict, conflict_target: [:url], returning: [:url]) + end + + @tag :with_conflict_target + test "on conflict query and conflict target" do + on_conflict = from Post, update: [set: [title: "second"]] + post = [title: "first", uuid: Ecto.UUID.generate()] + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all(Post, [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + + @tag :returning + @tag :with_conflict_target + test "on conflict query and conflict target and returning" do + on_conflict = from Post, update: [set: [title: "second"]] + post = [title: "first", uuid: Ecto.UUID.generate()] + {1, [%{id: id}]} = TestRepo.insert_all(Post, [post], on_conflict: on_conflict, + conflict_target: [:uuid], returning: [:id]) + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all(Post, [post], on_conflict: on_conflict, + conflict_target: [:id], returning: [:id])) + + # Error on conflict target + {1, [%Post{id: ^id, title: "second"}]} = + TestRepo.insert_all(Post, [post], on_conflict: on_conflict, + conflict_target: [:uuid], returning: [:id, :title]) + end + + @tag :with_conflict_target + test "source (without an Ecto schema) on conflict query and conflict target" do + on_conflict = [set: [title: "second"]] + {:ok, uuid} = Ecto.UUID.dump(Ecto.UUID.generate()) + post = [title: "first", uuid: uuid] + assert TestRepo.insert_all("posts", [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + + # Error on non-conflict target + assert catch_error(TestRepo.insert_all("posts", [post], on_conflict: on_conflict, conflict_target: [:id])) + + # Error on conflict target + assert TestRepo.insert_all("posts", [post], on_conflict: on_conflict, conflict_target: [:uuid]) == + {1, nil} + assert TestRepo.all(from p in Post, select: p.title) == ["second"] + end + + @tag :without_conflict_target + test "on conflict replace_all" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: :replace_all) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: :replace_all) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are also replaced + changes = [%{id: post_first.id + 2, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: :replace_all) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id + 2) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id + 2) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :with_conflict_target + test "on conflict replace_all and conflict_target" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: :replace_all, conflict_target: :uuid) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: :replace_all, conflict_target: :uuid) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are also replaced + changes = [%{id: post_second.id + 1, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: :replace_all, conflict_target: :uuid) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_second.id + 1) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id + 2) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :without_conflict_target + test "on conflict replace_all_except" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: {:replace_all_except, [:id]}) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: {:replace_all_except, [:id]}) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are not replaced + changes = [%{id: post_first.id + 2, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: {:replace_all_except, [:id]}) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :with_conflict_target + test "on conflict replace_all_except and conflict_target" do + post_first = %Post{title: "first", public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: {:replace_all_except, [:id]}, conflict_target: :uuid) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: {:replace_all_except, [:id]}, conflict_target: :uuid) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note IDS are not replaced + changes = [%{id: post_first.id + 2, title: "first_updated", + visits: 1, uuid: post_first.uuid}, + %{id: post_second.id + 2, title: "second_updated", + visits: 2, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: {:replace_all_except, [:id]}, conflict_target: :uuid) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id) + assert updated_first.title == "first_updated" + assert updated_first.visits == 1 + + updated_second = TestRepo.get(Post, post_second.id) + assert updated_second.title == "second_updated" + assert updated_second.visits == 2 + end + + @tag :with_conflict_target + test "on conflict replace and conflict_target" do + post_first = %Post{title: "first", visits: 10, public: true, uuid: Ecto.UUID.generate()} + post_second = %Post{title: "second", visits: 20, public: false, uuid: Ecto.UUID.generate()} + + {:ok, post_first} = TestRepo.insert(post_first, on_conflict: {:replace, [:title, :visits]}, conflict_target: :uuid) + {:ok, post_second} = TestRepo.insert(post_second, on_conflict: {:replace, [:title, :visits]}, conflict_target: :uuid) + + assert post_first.id + assert post_second.id + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + # Multiple record change value: note `public` field is not changed + changes = [%{id: post_first.id, title: "first_updated", visits: 11, public: false, uuid: post_first.uuid}, + %{id: post_second.id, title: "second_updated", visits: 21, public: true, uuid: post_second.uuid}] + + TestRepo.insert_all(Post, changes, on_conflict: {:replace, [:title, :visits]}, conflict_target: :uuid) + assert TestRepo.all(from p in Post, select: count(p.id)) == [2] + + updated_first = TestRepo.get(Post, post_first.id) + assert updated_first.title == "first_updated" + assert updated_first.visits == 11 + assert updated_first.public == true + + updated_second = TestRepo.get(Post, post_second.id) + assert updated_second.title == "second_updated" + assert updated_second.visits == 21 + assert updated_second.public == false + end + end +end diff --git a/deps/ecto/integration_test/cases/type.exs b/deps/ecto/integration_test/cases/type.exs new file mode 100644 index 0000000..8eea0c5 --- /dev/null +++ b/deps/ecto/integration_test/cases/type.exs @@ -0,0 +1,527 @@ +defmodule Ecto.Integration.TypeTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.{Custom, Item, ItemColor, Order, Post, User, Tag, Usec} + alias Ecto.Integration.TestRepo + import Ecto.Query + + @parameterized_type Ecto.ParameterizedType.init(Ecto.Enum, values: [:a, :b]) + + test "primitive types" do + integer = 1 + float = 0.1 + blob = <<0, 1>> + uuid = "00010203-0405-4607-8809-0a0b0c0d0e0f" + datetime = ~N[2014-01-16 20:26:51] + + TestRepo.insert!(%Post{blob: blob, public: true, visits: integer, uuid: uuid, + counter: integer, inserted_at: datetime, intensity: float}) + + # nil + assert [nil] = TestRepo.all(from Post, select: nil) + + # ID + assert [1] = TestRepo.all(from p in Post, where: p.counter == ^integer, select: p.counter) + + # Integers + assert [1] = TestRepo.all(from p in Post, where: p.visits == ^integer, select: p.visits) + assert [1] = TestRepo.all(from p in Post, where: p.visits == 1, select: p.visits) + assert [3] = TestRepo.all(from p in Post, select: p.visits + 2) + + # Floats + assert [0.1] = TestRepo.all(from p in Post, where: p.intensity == ^float, select: p.intensity) + assert [0.1] = TestRepo.all(from p in Post, where: p.intensity == 0.1, select: p.intensity) + assert [1500.0] = TestRepo.all(from p in Post, select: 1500.0) + assert [0.5] = TestRepo.all(from p in Post, select: p.intensity * 5) + + # Booleans + assert [true] = TestRepo.all(from p in Post, where: p.public == ^true, select: p.public) + assert [true] = TestRepo.all(from p in Post, where: p.public == true, select: p.public) + + # Binaries + assert [^blob] = TestRepo.all(from p in Post, where: p.blob == <<0, 1>>, select: p.blob) + assert [^blob] = TestRepo.all(from p in Post, where: p.blob == ^blob, select: p.blob) + + # UUID + assert [^uuid] = TestRepo.all(from p in Post, where: p.uuid == ^uuid, select: p.uuid) + + # NaiveDatetime + assert [^datetime] = TestRepo.all(from p in Post, where: p.inserted_at == ^datetime, select: p.inserted_at) + + # Datetime + datetime = DateTime.from_unix!(System.os_time(:second), :second) + TestRepo.insert!(%User{inserted_at: datetime}) + assert [^datetime] = TestRepo.all(from u in User, where: u.inserted_at == ^datetime, select: u.inserted_at) + + # usec + naive_datetime = ~N[2014-01-16 20:26:51.000000] + datetime = DateTime.from_naive!(~N[2014-01-16 20:26:51.000000], "Etc/UTC") + TestRepo.insert!(%Usec{naive_datetime_usec: naive_datetime, utc_datetime_usec: datetime}) + assert [^naive_datetime] = TestRepo.all(from u in Usec, where: u.naive_datetime_usec == ^naive_datetime, select: u.naive_datetime_usec) + assert [^datetime] = TestRepo.all(from u in Usec, where: u.utc_datetime_usec == ^datetime, select: u.utc_datetime_usec) + + naive_datetime = ~N[2014-01-16 20:26:51.123000] + datetime = DateTime.from_naive!(~N[2014-01-16 20:26:51.123000], "Etc/UTC") + TestRepo.insert!(%Usec{naive_datetime_usec: naive_datetime, utc_datetime_usec: datetime}) + assert [^naive_datetime] = TestRepo.all(from u in Usec, where: u.naive_datetime_usec == ^naive_datetime, select: u.naive_datetime_usec) + assert [^datetime] = TestRepo.all(from u in Usec, where: u.utc_datetime_usec == ^datetime, select: u.utc_datetime_usec) + end + + @tag :select_not + test "primitive types boolean negate" do + TestRepo.insert!(%Post{public: true}) + assert [false] = TestRepo.all(from p in Post, where: p.public == true, select: not p.public) + assert [true] = TestRepo.all(from p in Post, where: p.public == true, select: not not p.public) + end + + test "aggregate types" do + datetime = ~N[2014-01-16 20:26:51] + TestRepo.insert!(%Post{inserted_at: datetime}) + query = from p in Post, select: max(p.inserted_at) + assert [^datetime] = TestRepo.all(query) + end + + # We don't specifically assert on the tuple content because + # some databases would return integer, others decimal. + # The important is that the type has been invoked for wrapping. + test "aggregate custom types" do + TestRepo.insert!(%Post{wrapped_visits: {:int, 10}}) + query = from p in Post, select: sum(p.wrapped_visits) + assert [{:int, _}] = TestRepo.all(query) + end + + @tag :aggregate_filters + test "aggregate filter types" do + datetime = ~N[2014-01-16 20:26:51] + TestRepo.insert!(%Post{inserted_at: datetime}) + query = from p in Post, select: filter(max(p.inserted_at), p.public == ^true) + assert [^datetime] = TestRepo.all(query) + end + + test "coalesce text type when default" do + TestRepo.insert!(%Post{blob: nil}) + blob = <<0, 1>> + query = from p in Post, select: coalesce(p.blob, ^blob) + assert [^blob] = TestRepo.all(query) + end + + test "coalesce text type when value" do + blob = <<0, 2>> + default_blob = <<0, 1>> + TestRepo.insert!(%Post{blob: blob}) + query = from p in Post, select: coalesce(p.blob, ^default_blob) + assert [^blob] = TestRepo.all(query) + end + + test "tagged types" do + TestRepo.insert!(%Post{visits: 12}) + + # Numbers + assert [1] = TestRepo.all(from Post, select: type(^"1", :integer)) + assert [1.0] = TestRepo.all(from Post, select: type(^1.0, :float)) + assert [1] = TestRepo.all(from p in Post, select: type(^"1", p.visits)) + assert [1.0] = TestRepo.all(from p in Post, select: type(^"1", p.intensity)) + + # Custom wrappers + assert [1] = TestRepo.all(from Post, select: type(^"1", CustomPermalink)) + + # Custom types + uuid = Ecto.UUID.generate() + assert [^uuid] = TestRepo.all(from Post, select: type(^uuid, Ecto.UUID)) + + # Parameterized types + assert [:a] = TestRepo.all(from Post, select: type(^"a", ^@parameterized_type)) + + # Math operations + assert [4] = TestRepo.all(from Post, select: type(2 + ^"2", :integer)) + assert [4.0] = TestRepo.all(from Post, select: type(2.0 + ^"2", :float)) + assert [4] = TestRepo.all(from p in Post, select: type(2 + ^"2", p.visits)) + assert [4.0] = TestRepo.all(from p in Post, select: type(2.0 + ^"2", p.intensity)) + + # Comparison expression + assert [12] = TestRepo.all(from p in Post, select: type(coalesce(p.visits, 0), :integer)) + assert [1.0] = TestRepo.all(from p in Post, select: type(coalesce(p.intensity, 1.0), :float)) + end + + test "binary id type" do + assert %Custom{} = custom = TestRepo.insert!(%Custom{}) + bid = custom.bid + assert [^bid] = TestRepo.all(from c in Custom, select: c.bid) + assert [^bid] = TestRepo.all(from c in Custom, select: type(^bid, :binary_id)) + end + + @tag :like_match_blob + test "text type as blob" do + assert %Post{} = post = TestRepo.insert!(%Post{blob: <<0, 1, 2>>}) + id = post.id + assert post.blob == <<0, 1, 2>> + assert [^id] = TestRepo.all(from p in Post, where: like(p.blob, ^<<0, 1, 2>>), select: p.id) + end + + @tag :like_match_blob + @tag :text_type_as_string + test "text type as string" do + assert %Post{} = post = TestRepo.insert!(%Post{blob: "hello"}) + id = post.id + assert post.blob == "hello" + assert [^id] = TestRepo.all(from p in Post, where: like(p.blob, ^"hello"), select: p.id) + end + + @tag :array_type + test "array type" do + ints = [1, 2, 3] + tag = TestRepo.insert!(%Tag{ints: ints}) + + assert TestRepo.all(from t in Tag, where: t.ints == ^[], select: t.ints) == [] + assert TestRepo.all(from t in Tag, where: t.ints == ^[1, 2, 3], select: t.ints) == [ints] + + # Both sides interpolation + assert TestRepo.all(from t in Tag, where: ^"b" in ^["a", "b", "c"], select: t.ints) == [ints] + assert TestRepo.all(from t in Tag, where: ^"b" in [^"a", ^"b", ^"c"], select: t.ints) == [ints] + + # Querying + assert TestRepo.all(from t in Tag, where: t.ints == [1, 2, 3], select: t.ints) == [ints] + assert TestRepo.all(from t in Tag, where: 0 in t.ints, select: t.ints) == [] + assert TestRepo.all(from t in Tag, where: 1 in t.ints, select: t.ints) == [ints] + + # Update + tag = TestRepo.update!(Ecto.Changeset.change tag, ints: nil) + assert TestRepo.get!(Tag, tag.id).ints == nil + + tag = TestRepo.update!(Ecto.Changeset.change tag, ints: [3, 2, 1]) + assert TestRepo.get!(Tag, tag.id).ints == [3, 2, 1] + + # Update all + {1, _} = TestRepo.update_all(Tag, push: [ints: 0]) + assert TestRepo.get!(Tag, tag.id).ints == [3, 2, 1, 0] + + {1, _} = TestRepo.update_all(Tag, pull: [ints: 2]) + assert TestRepo.get!(Tag, tag.id).ints == [3, 1, 0] + + {1, _} = TestRepo.update_all(Tag, set: [ints: nil]) + assert TestRepo.get!(Tag, tag.id).ints == nil + end + + @tag :array_type + test "array type with custom types" do + uuids = ["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"] + TestRepo.insert!(%Tag{uuids: ["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"]}) + + assert TestRepo.all(from t in Tag, where: t.uuids == ^[], select: t.uuids) == [] + assert TestRepo.all(from t in Tag, where: t.uuids == ^["51fcfbdd-ad60-4ccb-8bf9-47aabd66d075"], + select: t.uuids) == [uuids] + + {1, _} = TestRepo.update_all(Tag, set: [uuids: nil]) + assert TestRepo.all(from t in Tag, select: t.uuids) == [nil] + end + + @tag :array_type + test "array type with nil in array" do + tag = TestRepo.insert!(%Tag{ints: [1, nil, 3]}) + assert tag.ints == [1, nil, 3] + end + + @tag :map_type + test "untyped map" do + post1 = TestRepo.insert!(%Post{meta: %{"foo" => "bar", "baz" => "bat"}}) + post2 = TestRepo.insert!(%Post{meta: %{foo: "bar", baz: "bat"}}) + + assert TestRepo.all(from p in Post, where: p.id == ^post1.id, select: p.meta) == + [%{"foo" => "bar", "baz" => "bat"}] + assert TestRepo.all(from p in Post, where: p.id == ^post2.id, select: p.meta) == + [%{"foo" => "bar", "baz" => "bat"}] + end + + @tag :map_type + test "typed string map" do + post1 = TestRepo.insert!(%Post{links: %{"foo" => "http://foo.com", "bar" => "http://bar.com"}}) + post2 = TestRepo.insert!(%Post{links: %{foo: "http://foo.com", bar: "http://bar.com"}}) + + assert TestRepo.all(from p in Post, where: p.id == ^post1.id, select: p.links) == + [%{"foo" => "http://foo.com", "bar" => "http://bar.com"}] + assert TestRepo.all(from p in Post, where: p.id == ^post2.id, select: p.links) == + [%{"foo" => "http://foo.com", "bar" => "http://bar.com"}] + end + + @tag :map_type + test "typed float map" do + post = TestRepo.insert!(%Post{intensities: %{"foo" => 1.0, "bar" => 416500.0}}) + + # Note we are using === since we want to check integer vs float + assert TestRepo.all(from p in Post, where: p.id == ^post.id, select: p.intensities) === + [%{"foo" => 1.0, "bar" => 416500.0}] + end + + @tag :map_type + test "map type on update" do + post = TestRepo.insert!(%Post{meta: %{"world" => "hello"}}) + assert TestRepo.get!(Post, post.id).meta == %{"world" => "hello"} + + post = TestRepo.update!(Ecto.Changeset.change post, meta: %{hello: "world"}) + assert TestRepo.get!(Post, post.id).meta == %{"hello" => "world"} + + query = from(p in Post, where: p.id == ^post.id) + TestRepo.update_all(query, set: [meta: %{world: "hello"}]) + assert TestRepo.get!(Post, post.id).meta == %{"world" => "hello"} + end + + @tag :map_type + test "embeds one" do + item = %Item{price: 123, valid_at: ~D[2014-01-16]} + + order = + %Order{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:item, item) + |> TestRepo.insert!() + + dbitem = TestRepo.get!(Order, order.id).item + assert item.reference == dbitem.reference + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + [dbitem] = TestRepo.all(from o in Order, select: o.item) + assert item.reference == dbitem.reference + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + {1, _} = TestRepo.update_all(Order, set: [item: %{dbitem | price: 456}]) + assert TestRepo.get!(Order, order.id).item.price == 456 + end + + @tag :map_type + @tag :json_extract_path + test "json_extract_path with primitive values" do + order = %Order{meta: + %{ + :id => 123, + :time => ~T[09:00:00], + "code" => "good", + "'single quoted'" => "bar", + "\"double quoted\"" => "baz", + "enabled" => true, + "extra" => [%{"enabled" => false}] + } + } + + order = TestRepo.insert!(order) + + assert TestRepo.one(from o in Order, select: o.meta["id"]) == 123 + assert TestRepo.one(from o in Order, select: o.meta["bad"]) == nil + assert TestRepo.one(from o in Order, select: o.meta["bad"]["bad"]) == nil + + field = "id" + assert TestRepo.one(from o in Order, select: o.meta[^field]) == 123 + assert TestRepo.one(from o in Order, select: o.meta["time"]) == "09:00:00" + assert TestRepo.one(from o in Order, select: o.meta["'single quoted'"]) == "bar" + assert TestRepo.one(from o in Order, select: o.meta["';"]) == nil + assert TestRepo.one(from o in Order, select: o.meta["\"double quoted\""]) == "baz" + assert TestRepo.one(from o in Order, select: o.meta["enabled"]) == true + assert TestRepo.one(from o in Order, select: o.meta["extra"][0]["enabled"]) == false + + # where + assert TestRepo.one(from o in Order, where: o.meta["id"] == 123, select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.meta["id"] == 456, select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.meta["code"] == "good", select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.meta["code"] == "bad", select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.meta["enabled"] == true, select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.meta["extra"][0]["enabled"] == false, select: o.id) == order.id + end + + @tag :map_type + @tag :json_extract_path + test "json_extract_path with arrays and objects" do + order = %Order{meta: %{tags: [%{name: "red"}, %{name: "green"}]}} + order = TestRepo.insert!(order) + + assert TestRepo.one(from o in Order, select: o.meta["tags"][0]["name"]) == "red" + assert TestRepo.one(from o in Order, select: o.meta["tags"][99]["name"]) == nil + + index = 1 + assert TestRepo.one(from o in Order, select: o.meta["tags"][^index]["name"]) == "green" + + # where + assert TestRepo.one(from o in Order, where: o.meta["tags"][0]["name"] == "red", select: o.id) == order.id + assert TestRepo.one(from o in Order, where: o.meta["tags"][0]["name"] == "blue", select: o.id) == nil + assert TestRepo.one(from o in Order, where: o.meta["tags"][99]["name"] == "red", select: o.id) == nil + end + + @tag :map_type + @tag :json_extract_path + test "json_extract_path with embeds" do + order = %Order{items: [%{valid_at: ~D[2020-01-01]}]} + TestRepo.insert!(order) + + assert TestRepo.one(from o in Order, select: o.items[0]["valid_at"]) == "2020-01-01" + end + + @tag :map_type + @tag :map_type_schemaless + test "embeds one with custom type" do + item = %Item{price: 123, reference: "PREFIX-EXAMPLE"} + + order = + %Order{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:item, item) + |> TestRepo.insert!() + + dbitem = TestRepo.get!(Order, order.id).item + assert dbitem.reference == "PREFIX-EXAMPLE" + assert [%{"reference" => "EXAMPLE"}] = TestRepo.all(from o in "orders", select: o.item) + end + + @tag :map_type + test "empty embeds one" do + order = TestRepo.insert!(%Order{}) + assert order.item == nil + assert TestRepo.get!(Order, order.id).item == nil + end + + @tag :map_type + @tag :array_type + test "embeds many" do + item = %Item{price: 123, valid_at: ~D[2014-01-16]} + tag = + %Tag{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:items, [item]) + tag = TestRepo.insert!(tag) + + [dbitem] = TestRepo.get!(Tag, tag.id).items + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + [[dbitem]] = TestRepo.all(from t in Tag, select: t.items) + assert item.price == dbitem.price + assert item.valid_at == dbitem.valid_at + assert dbitem.id + + {1, _} = TestRepo.update_all(Tag, set: [items: [%{dbitem | price: 456}]]) + assert (TestRepo.get!(Tag, tag.id).items |> hd).price == 456 + end + + @tag :map_type + @tag :array_type + test "empty embeds many" do + tag = TestRepo.insert!(%Tag{}) + assert tag.items == [] + assert TestRepo.get!(Tag, tag.id).items == [] + end + + @tag :map_type + @tag :array_type + test "nested embeds" do + red = %ItemColor{name: "red"} + blue = %ItemColor{name: "blue"} + item = %Item{ + primary_color: red, + secondary_colors: [blue] + } + + order = + %Order{} + |> Ecto.Changeset.change + |> Ecto.Changeset.put_embed(:item, item) + order = TestRepo.insert!(order) + + dbitem = TestRepo.get!(Order, order.id).item + assert dbitem.primary_color.name == "red" + assert Enum.map(dbitem.secondary_colors, & &1.name) == ["blue"] + assert dbitem.id + assert dbitem.primary_color.id + + [dbitem] = TestRepo.all(from o in Order, select: o.item) + assert dbitem.primary_color.name == "red" + assert Enum.map(dbitem.secondary_colors, & &1.name) == ["blue"] + assert dbitem.id + assert dbitem.primary_color.id + end + + @tag :decimal_type + test "decimal type" do + decimal = Decimal.new("1.0") + TestRepo.insert!(%Post{cost: decimal}) + + [cost] = TestRepo.all(from p in Post, where: p.cost == ^decimal, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == ^1.0, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == ^1, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == 1.0, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, where: p.cost == 1, select: p.cost) + assert Decimal.equal?(decimal, cost) + [cost] = TestRepo.all(from p in Post, select: p.cost * 2) + assert Decimal.equal?(Decimal.new("2.0"), cost) + [cost] = TestRepo.all(from p in Post, select: p.cost - p.cost) + assert Decimal.equal?(Decimal.new("0.0"), cost) + end + + @tag :decimal_type + @tag :decimal_precision + test "decimal typed aggregations" do + decimal = Decimal.new("1.0") + TestRepo.insert!(%Post{cost: decimal}) + + assert [1] = TestRepo.all(from p in Post, select: type(sum(p.cost), :integer)) + assert [1.0] = TestRepo.all(from p in Post, select: type(sum(p.cost), :float)) + [cost] = TestRepo.all(from p in Post, select: type(sum(p.cost), :decimal)) + assert Decimal.equal?(decimal, cost) + end + + @tag :decimal_type + test "on coalesce with mixed types" do + decimal = Decimal.new("1.0") + TestRepo.insert!(%Post{cost: decimal}) + [cost] = TestRepo.all(from p in Post, select: coalesce(p.cost, 0)) + assert Decimal.equal?(decimal, cost) + end + + @tag :union_with_literals + test "unions with literals" do + TestRepo.insert!(%Post{}) + TestRepo.insert!(%Post{}) + + query1 = from(p in Post, select: %{n: 1}) + query2 = from(p in Post, select: %{n: 2}) + + assert TestRepo.all(union_all(query1, ^query2)) == + [%{n: 1}, %{n: 1}, %{n: 2}, %{n: 2}] + + query1 = from(p in Post, select: %{n: 1.0}) + query2 = from(p in Post, select: %{n: 2.0}) + + assert TestRepo.all(union_all(query1, ^query2)) == + [%{n: 1.0}, %{n: 1.0}, %{n: 2.0}, %{n: 2.0}] + + query1 = from(p in Post, select: %{n: "foo"}) + query2 = from(p in Post, select: %{n: "bar"}) + + assert TestRepo.all(union_all(query1, ^query2)) == + [%{n: "foo"}, %{n: "foo"}, %{n: "bar"}, %{n: "bar"}] + end + + test "schemaless types" do + TestRepo.insert!(%Post{visits: 123}) + assert [123] = TestRepo.all(from p in "posts", select: type(p.visits, :integer)) + end + + test "schemaless calendar types" do + datetime = ~N[2014-01-16 20:26:51] + assert {1, _} = + TestRepo.insert_all("posts", [[inserted_at: datetime]]) + assert {1, _} = + TestRepo.update_all("posts", set: [inserted_at: datetime]) + assert [_] = + TestRepo.all(from p in "posts", where: p.inserted_at >= ^datetime, select: p.inserted_at) + assert [_] = + TestRepo.all(from p in "posts", where: p.inserted_at in [^datetime], select: p.inserted_at) + assert [_] = + TestRepo.all(from p in "posts", where: p.inserted_at in ^[datetime], select: p.inserted_at) + end +end diff --git a/deps/ecto/integration_test/cases/windows.exs b/deps/ecto/integration_test/cases/windows.exs new file mode 100644 index 0000000..a52eda2 --- /dev/null +++ b/deps/ecto/integration_test/cases/windows.exs @@ -0,0 +1,53 @@ +defmodule Ecto.Integration.WindowsTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + + alias Ecto.Integration.{Comment, User, Post} + + test "over" do + u1 = TestRepo.insert!(%User{name: "Tester"}) + u2 = TestRepo.insert!(%User{name: "Developer"}) + c1 = TestRepo.insert!(%Comment{text: "1", author_id: u1.id}) + c2 = TestRepo.insert!(%Comment{text: "2", author_id: u1.id}) + c3 = TestRepo.insert!(%Comment{text: "3", author_id: u1.id}) + c4 = TestRepo.insert!(%Comment{text: "4", author_id: u2.id}) + + # Over nothing + query = from(c in Comment, select: [c, count(c.id) |> over()]) + assert [[^c1, 4], [^c2, 4], [^c3, 4], [^c4, 4]] = TestRepo.all(query) + + # Over partition + query = from(c in Comment, select: [c, count(c.id) |> over(partition_by: c.author_id)]) + assert [[^c1, 3], [^c2, 3], [^c3, 3], [^c4, 1]] = TestRepo.all(query) + + # Over window + query = from(c in Comment, windows: [w: [partition_by: c.author_id]], select: [c, count(c.id) |> over(:w)]) + assert [[^c1, 3], [^c2, 3], [^c3, 3], [^c4, 1]] = TestRepo.all(query) + end + + test "frame" do + posts = Enum.map(0..6, &%{counter: &1, visits: round(:math.pow(2, &1))}) + TestRepo.insert_all(Post, posts) + + n = 1 + query = from(p in Post, + windows: [w: [order_by: p.counter, frame: fragment("ROWS BETWEEN ? PRECEDING AND ? FOLLOWING", ^n, ^n)]], + select: [p.counter, sum(p.visits) |> over(:w)] + ) + assert [[0, 3], [1, 7], [2, 14], [3, 28], [4, 56], [5, 112], [6, 96]] = TestRepo.all(query) + + query = from(p in Post, + windows: [w: [order_by: p.counter, frame: fragment("ROWS BETWEEN 1 FOLLOWING AND UNBOUNDED FOLLOWING")]], + select: [p.counter, sum(p.visits) |> over(:w)] + ) + assert [[0, 126], [1, 124], [2, 120], [3, 112], [4, 96], [5, 64], [6, nil]] = TestRepo.all(query) + + query = from(p in Post, + windows: [w: [order_by: p.counter, frame: fragment("ROWS CURRENT ROW")]], + select: [p.counter, sum(p.visits) |> over(:w)] + ) + assert [[0, 1], [1, 2], [2, 4], [3, 8], [4, 16], [5, 32], [6, 64]] = TestRepo.all(query) + end +end diff --git a/deps/ecto/integration_test/support/schemas.exs b/deps/ecto/integration_test/support/schemas.exs new file mode 100644 index 0000000..90e4de0 --- /dev/null +++ b/deps/ecto/integration_test/support/schemas.exs @@ -0,0 +1,345 @@ +Code.require_file("types.exs", __DIR__) + +defmodule Ecto.Integration.Schema do + defmacro __using__(_) do + quote do + use Ecto.Schema + + type = + Application.compile_env(:ecto, :primary_key_type) || + raise ":primary_key_type not set in :ecto application" + + @primary_key {:id, type, autogenerate: true} + @foreign_key_type type + end + end +end + +defmodule Ecto.Integration.Post do + @moduledoc """ + This module is used to test: + + * Overall functionality + * Overall types + * Non-null timestamps + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + import Ecto.Changeset + + schema "posts" do + field :counter, :id # Same as integer + field :title, :string + field :blob, :binary + field :temp, :string, default: "temp", virtual: true + field :public, :boolean, default: true + field :cost, :decimal + field :visits, :integer + field :wrapped_visits, WrappedInteger + field :intensity, :float + field :bid, :binary_id + field :uuid, Ecto.Integration.TestRepo.uuid(), autogenerate: true + field :meta, :map + field :links, {:map, :string} + field :intensities, {:map, :float} + field :posted, :date + has_many :comments, Ecto.Integration.Comment, on_delete: :delete_all, on_replace: :delete + has_many :force_comments, Ecto.Integration.Comment, on_replace: :delete_if_exists + has_many :ordered_comments, Ecto.Integration.Comment, preload_order: [:text] + # The post<->permalink relationship should be marked as uniq + has_one :permalink, Ecto.Integration.Permalink, on_delete: :delete_all, on_replace: :delete + has_one :force_permalink, Ecto.Integration.Permalink, on_replace: :delete_if_exists + has_one :update_permalink, Ecto.Integration.Permalink, foreign_key: :post_id, on_delete: :delete_all, on_replace: :update + has_many :comments_authors, through: [:comments, :author] + belongs_to :author, Ecto.Integration.User + many_to_many :users, Ecto.Integration.User, + join_through: "posts_users", on_delete: :delete_all, on_replace: :delete + many_to_many :ordered_users, Ecto.Integration.User, join_through: "posts_users", preload_order: [desc: :name] + many_to_many :unique_users, Ecto.Integration.User, + join_through: "posts_users", unique: true + many_to_many :constraint_users, Ecto.Integration.User, + join_through: Ecto.Integration.PostUserCompositePk + has_many :users_comments, through: [:users, :comments] + has_many :comments_authors_permalinks, through: [:comments_authors, :permalink] + has_one :post_user_composite_pk, Ecto.Integration.PostUserCompositePk + timestamps() + end + + def changeset(schema, params) do + cast(schema, params, ~w(counter title blob temp public cost visits + intensity bid uuid meta posted)a) + end +end + +defmodule Ecto.Integration.Comment do + @moduledoc """ + This module is used to test: + + * Optimistic lock + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + + schema "comments" do + field :text, :string + field :lock_version, :integer, default: 1 + belongs_to :post, Ecto.Integration.Post + belongs_to :author, Ecto.Integration.User + has_one :post_permalink, through: [:post, :permalink] + end + + def changeset(schema, params) do + Ecto.Changeset.cast(schema, params, [:text]) + end +end + +defmodule Ecto.Integration.Permalink do + @moduledoc """ + This module is used to test: + + * Field sources + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + + schema "permalinks" do + field :url, :string, source: :uniform_resource_locator + field :title, :string + field :posted, :date, virtual: true + belongs_to :post, Ecto.Integration.Post, on_replace: :nilify + belongs_to :update_post, Ecto.Integration.Post, on_replace: :update, foreign_key: :post_id, define_field: false + belongs_to :user, Ecto.Integration.User + has_many :post_comments_authors, through: [:post, :comments_authors] + end + + def changeset(schema, params) do + Ecto.Changeset.cast(schema, params, [:url, :title]) + end +end + +defmodule Ecto.Integration.PostUser do + @moduledoc """ + This module is used to test: + + * Many to many associations join_through with schema + + """ + use Ecto.Integration.Schema + + schema "posts_users_pk" do + belongs_to :user, Ecto.Integration.User + belongs_to :post, Ecto.Integration.Post + timestamps() + end +end + +defmodule Ecto.Integration.User do + @moduledoc """ + This module is used to test: + + * UTC Timestamps + * Relationships + * Dependent callbacks + + """ + use Ecto.Integration.Schema + + schema "users" do + field :name, :string + has_many :comments, Ecto.Integration.Comment, foreign_key: :author_id, on_delete: :nilify_all, on_replace: :nilify + has_one :permalink, Ecto.Integration.Permalink, on_replace: :nilify + has_many :posts, Ecto.Integration.Post, foreign_key: :author_id, on_delete: :nothing, on_replace: :delete + belongs_to :custom, Ecto.Integration.Custom, references: :bid, type: :binary_id + many_to_many :schema_posts, Ecto.Integration.Post, join_through: Ecto.Integration.PostUser + many_to_many :unique_posts, Ecto.Integration.Post, join_through: Ecto.Integration.PostUserCompositePk + + has_many :related_2nd_order_posts, through: [:posts, :users, :posts] + has_many :users_through_schema_posts, through: [:schema_posts, :users] + + has_many :v2_comments, Ecto.Integration.Comment, foreign_key: :author_id, where: [lock_version: 2] + has_many :v2_comments_posts, through: [:v2_comments, :post] + has_many :co_commenters, through: [:comments, :post, :comments_authors] + + timestamps(type: :utc_datetime) + end +end + +defmodule Ecto.Integration.Custom do + @moduledoc """ + This module is used to test: + + * binary_id primary key + * Tying another schemas to an existing schema + + Due to the second item, it must be a subset of posts. + """ + use Ecto.Integration.Schema + + @primary_key {:bid, :binary_id, autogenerate: true} + schema "customs" do + field :uuid, Ecto.Integration.TestRepo.uuid() + many_to_many :customs, Ecto.Integration.Custom, + join_through: "customs_customs", join_keys: [custom_id1: :bid, custom_id2: :bid], + on_delete: :delete_all, on_replace: :delete + end +end + +defmodule Ecto.Integration.Barebone do + @moduledoc """ + This module is used to test: + + * A schema without primary keys + + """ + use Ecto.Integration.Schema + + @primary_key false + schema "barebones" do + field :num, :integer + end +end + +defmodule Ecto.Integration.Tag do + @moduledoc """ + This module is used to test: + + * The array type + * Embedding many schemas (uses array) + + """ + use Ecto.Integration.Schema + + schema "tags" do + field :ints, {:array, :integer} + field :uuids, {:array, Ecto.Integration.TestRepo.uuid()} + embeds_many :items, Ecto.Integration.Item + end +end + +defmodule Ecto.Integration.Item do + @moduledoc """ + This module is used to test: + + * Embedding + * Preloading associations in embedded schemas + + """ + use Ecto.Schema + + embedded_schema do + field :reference, PrefixedString + field :price, :integer + field :valid_at, :date + + embeds_one :primary_color, Ecto.Integration.ItemColor + embeds_many :secondary_colors, Ecto.Integration.ItemColor + + belongs_to :user, Ecto.Integration.User + end +end + +defmodule Ecto.Integration.ItemColor do + @moduledoc """ + This module is used to test: + + * Nested embeds + + """ + use Ecto.Schema + + embedded_schema do + field :name, :string + end +end + +defmodule Ecto.Integration.Order do + @moduledoc """ + This module is used to test: + + * Text columns + * Embedding one schema + + """ + use Ecto.Integration.Schema + + schema "orders" do + field :meta, :map + embeds_one :item, Ecto.Integration.Item + embeds_many :items, Ecto.Integration.Item + belongs_to :permalink, Ecto.Integration.Permalink + end +end + +defmodule Ecto.Integration.CompositePk do + @moduledoc """ + This module is used to test: + + * Composite primary keys + + """ + use Ecto.Integration.Schema + import Ecto.Changeset + + @primary_key false + schema "composite_pk" do + field :a, :integer, primary_key: true + field :b, :integer, primary_key: true + field :name, :string + end + def changeset(schema, params) do + cast(schema, params, ~w(a b name)a) + end +end + +defmodule Ecto.Integration.CorruptedPk do + @moduledoc """ + This module is used to test: + + * Primary keys that is not unique on a DB side + + """ + use Ecto.Integration.Schema + + @primary_key false + schema "corrupted_pk" do + field :a, :string, primary_key: true + end +end + +defmodule Ecto.Integration.PostUserCompositePk do + @moduledoc """ + This module is used to test: + + * Composite primary keys for 2 belongs_to fields + + """ + use Ecto.Integration.Schema + + @primary_key false + schema "posts_users_composite_pk" do + belongs_to :user, Ecto.Integration.User, primary_key: true + belongs_to :post, Ecto.Integration.Post, primary_key: true + timestamps() + end +end + +defmodule Ecto.Integration.Usec do + @moduledoc """ + This module is used to test: + + * usec datetime types + + """ + use Ecto.Integration.Schema + + schema "usecs" do + field :naive_datetime_usec, :naive_datetime_usec + field :utc_datetime_usec, :utc_datetime_usec + end +end diff --git a/deps/ecto/integration_test/support/types.exs b/deps/ecto/integration_test/support/types.exs new file mode 100644 index 0000000..196012f --- /dev/null +++ b/deps/ecto/integration_test/support/types.exs @@ -0,0 +1,53 @@ +defmodule CustomPermalink do + def type, do: :id + + def cast(string) when is_binary(string) do + case Integer.parse(string) do + {int, _} -> {:ok, int} + :error -> :error + end + end + + def cast(integer) when is_integer(integer), do: {:ok, integer} + def cast(_), do: :error + + def load(integer) when is_integer(integer), do: {:ok, integer} + def dump(integer) when is_integer(integer), do: {:ok, integer} +end + +defmodule PrefixedString do + use Ecto.Type + def type(), do: :string + def cast(string), do: {:ok, string} + def load(string), do: {:ok, "PREFIX-" <> string} + def dump("PREFIX-" <> string), do: {:ok, string} + def dump(_string), do: :error + def embed_as(_), do: :dump +end + +defmodule WrappedInteger do + use Ecto.Type + def type(), do: :integer + def cast(integer), do: {:ok, {:int, integer}} + def load(integer), do: {:ok, {:int, integer}} + def dump({:int, integer}), do: {:ok, integer} +end + +defmodule ParameterizedPrefixedString do + use Ecto.ParameterizedType + def init(opts), do: Enum.into(opts, %{}) + def type(_), do: :string + + def cast(data, %{prefix: prefix}) do + if String.starts_with?(data, [prefix <> "-"]) do + {:ok, data} + else + {:ok, prefix <> "-" <> data} + end + end + + def load(string, _, %{prefix: prefix}), do: {:ok, prefix <> "-" <> string} + def dump(nil, _, _), do: {:ok, nil} + def dump(data, _, %{prefix: _prefix}), do: {:ok, data |> String.split("-") |> List.last()} + def embed_as(_, _), do: :dump +end diff --git a/deps/ecto/lib/ecto.ex b/deps/ecto/lib/ecto.ex new file mode 100644 index 0000000..2dbe0f2 --- /dev/null +++ b/deps/ecto/lib/ecto.ex @@ -0,0 +1,699 @@ +defmodule Ecto do + @moduledoc ~S""" + Ecto is split into 4 main components: + + * `Ecto.Repo` - repositories are wrappers around the data store. + Via the repository, we can create, update, destroy and query + existing entries. A repository needs an adapter and credentials + to communicate to the database + + * `Ecto.Schema` - schemas are used to map external data into Elixir + structs. We often use them to map database tables to Elixir data but + they have many other use cases + + * `Ecto.Query` - written in Elixir syntax, queries are used to retrieve + information from a given repository. Ecto queries are secure and composable + + * `Ecto.Changeset` - changesets provide a way for track and validate changes + before they are applied to the data + + In summary: + + * `Ecto.Repo` - **where** the data is + * `Ecto.Schema` - **what** the data is + * `Ecto.Query` - **how to read** the data + * `Ecto.Changeset` - **how to change** the data + + Besides the four components above, most developers use Ecto to interact + with SQL databases, such as Postgres and MySQL via the + [`ecto_sql`](https://hexdocs.pm/ecto_sql) project. `ecto_sql` provides many + conveniences for working with SQL databases as well as the ability to version + how your database changes through time via + [database migrations](https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.html#module-migrations). + + If you want to quickly check a sample application using Ecto, please check + the [getting started guide](https://hexdocs.pm/ecto/getting-started.html) and + the accompanying sample application. [Ecto's README](https://github.com/elixir-ecto/ecto) + also links to other resources. + + In the following sections, we will provide an overview of those components and + how they interact with each other. Feel free to access their respective module + documentation for more specific examples, options and configuration. + + ## Repositories + + `Ecto.Repo` is a wrapper around the database. We can define a + repository as follows: + + defmodule Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres + end + + Where the configuration for the Repo must be in your application + environment, usually defined in your `config/config.exs`: + + config :my_app, Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost", + # OR use a URL to connect instead + url: "postgres://postgres:postgres@localhost/ecto_simple" + + Each repository in Ecto defines a `start_link/0` function that needs to be invoked + before using the repository. In general, this function is not called directly, + but used as part of your application supervision tree. + + If your application was generated with a supervisor (by passing `--sup` to `mix new`) + you will have a `lib/my_app/application.ex` file containing the application start + callback that defines and starts your supervisor. You just need to edit the `start/2` + function to start the repo as a supervisor on your application's supervisor: + + def start(_type, _args) do + children = [ + MyApp.Repo, + ] + + opts = [strategy: :one_for_one, name: MyApp.Supervisor] + Supervisor.start_link(children, opts) + end + + ## Schema + + Schemas allow developers to define the shape of their data. + Let's see an example: + + defmodule Weather do + use Ecto.Schema + + # weather is the DB table + schema "weather" do + field :city, :string + field :temp_lo, :integer + field :temp_hi, :integer + field :prcp, :float, default: 0.0 + end + end + + By defining a schema, Ecto automatically defines a struct with + the schema fields: + + iex> weather = %Weather{temp_lo: 30} + iex> weather.temp_lo + 30 + + The schema also allows us to interact with a repository: + + iex> weather = %Weather{temp_lo: 0, temp_hi: 23} + iex> Repo.insert!(weather) + %Weather{...} + + After persisting `weather` to the database, it will return a new copy of + `%Weather{}` with the primary key (the `id`) set. We can use this value + to read a struct back from the repository: + + # Get the struct back + iex> weather = Repo.get Weather, 1 + %Weather{id: 1, ...} + + # Delete it + iex> Repo.delete!(weather) + %Weather{...} + + > NOTE: by using `Ecto.Schema`, an `:id` field with type `:id` (:id means :integer) is + > generated by default, which is the primary key of the Schema. If you want + > to use a different primary key, you can declare custom `@primary_key` + > before the `schema/2` call. Consult the `Ecto.Schema` documentation + > for more information. + + Notice how the storage (repository) and the data are decoupled. This provides + two main benefits: + + * By having structs as data, we guarantee they are light-weight, + serializable structures. In many languages, the data is often represented + by large, complex objects, with entwined state transactions, which makes + serialization, maintenance and understanding hard; + + * You do not need to define schemas in order to interact with repositories, + operations like `all`, `insert_all` and so on allow developers to directly + access and modify the data, keeping the database at your fingertips when + necessary; + + ## Changesets + + Although in the example above we have directly inserted and deleted the + struct in the repository, operations on top of schemas are done through + changesets so Ecto can efficiently track changes. + + Changesets allow developers to filter, cast, and validate changes before + we apply them to the data. Imagine the given schema: + + defmodule User do + use Ecto.Schema + + import Ecto.Changeset + + schema "users" do + field :name + field :email + field :age, :integer + end + + def changeset(user, params \\ %{}) do + user + |> cast(params, [:name, :email, :age]) + |> validate_required([:name, :email]) + |> validate_format(:email, ~r/@/) + |> validate_inclusion(:age, 18..100) + end + end + + The `changeset/2` function first invokes `Ecto.Changeset.cast/4` with + the struct, the parameters and a list of allowed fields; this returns a changeset. + The parameters is a map with binary keys and values that will be cast based + on the type defined on the schema. + + Any parameter that was not explicitly listed in the fields list will be ignored. + + After casting, the changeset is given to many `Ecto.Changeset.validate_*` + functions that validate only the **changed fields**. In other words: + if a field was not given as a parameter, it won't be validated at all. + For example, if the params map contain only the "name" and "email" keys, + the "age" validation won't run. + + Once a changeset is built, it can be given to functions like `insert` and + `update` in the repository that will return an `:ok` or `:error` tuple: + + case Repo.update(changeset) do + {:ok, user} -> + # user updated + {:error, changeset} -> + # an error occurred + end + + The benefit of having explicit changesets is that we can easily provide + different changesets for different use cases. For example, one + could easily provide specific changesets for registering and updating + users: + + def registration_changeset(user, params) do + # Changeset on create + end + + def update_changeset(user, params) do + # Changeset on update + end + + Changesets are also capable of transforming database constraints, + like unique indexes and foreign key checks, into errors. Allowing + developers to keep their database consistent while still providing + proper feedback to end users. Check `Ecto.Changeset.unique_constraint/3` + for some examples as well as the other `_constraint` functions. + + ## Query + + Last but not least, Ecto allows you to write queries in Elixir and send + them to the repository, which translates them to the underlying database. + Let's see an example: + + import Ecto.Query, only: [from: 2] + + query = from u in User, + where: u.age > 18 or is_nil(u.email), + select: u + + # Returns %User{} structs matching the query + Repo.all(query) + + In the example above we relied on our schema but queries can also be + made directly against a table by giving the table name as a string. In + such cases, the data to be fetched must be explicitly outlined: + + query = from u in "users", + where: u.age > 18 or is_nil(u.email), + select: %{name: u.name, age: u.age} + + # Returns maps as defined in select + Repo.all(query) + + Queries are defined and extended with the `from` macro. The supported + keywords are: + + * `:distinct` + * `:where` + * `:order_by` + * `:offset` + * `:limit` + * `:lock` + * `:group_by` + * `:having` + * `:join` + * `:select` + * `:preload` + + Examples and detailed documentation for each of those are available + in the `Ecto.Query` module. Functions supported in queries are listed + in `Ecto.Query.API`. + + When writing a query, you are inside Ecto's query syntax. In order to + access params values or invoke Elixir functions, you need to use the `^` + operator, which is overloaded by Ecto: + + def min_age(min) do + from u in User, where: u.age > ^min + end + + Besides `Repo.all/1` which returns all entries, repositories also + provide `Repo.one/1` which returns one entry or nil, `Repo.one!/1` + which returns one entry or raises, `Repo.get/2` which fetches + entries for a particular ID and more. + + Finally, if you need an escape hatch, Ecto provides fragments + (see `Ecto.Query.API.fragment/1`) to inject SQL (and non-SQL) + fragments into queries. Also, most adapters provide direct + APIs for queries, like `Ecto.Adapters.SQL.query/4`, allowing + developers to completely bypass Ecto queries. + + ## Other topics + + ### Associations + + Ecto supports defining associations on schemas: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :comments, Comment + end + end + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + field :title, :string + belongs_to :post, Post + end + end + + When an association is defined, Ecto also defines a field in the schema + with the association name. By default, associations are not loaded into + this field: + + iex> post = Repo.get(Post, 42) + iex> post.comments + #Ecto.Association.NotLoaded<...> + + However, developers can use the preload functionality in queries to + automatically pre-populate the field: + + Repo.all from p in Post, preload: [:comments] + + Preloading can also be done with a pre-defined join value: + + Repo.all from p in Post, + join: c in assoc(p, :comments), + where: c.votes > p.votes, + preload: [comments: c] + + Finally, for the simple cases, preloading can also be done after + a collection was fetched: + + posts = Repo.all(Post) |> Repo.preload(:comments) + + The `Ecto` module also provides conveniences for working + with associations. For example, `Ecto.assoc/2` returns a query + with all associated data to a given struct: + + import Ecto + + # Get all comments for the given post + Repo.all assoc(post, :comments) + + # Or build a query on top of the associated comments + query = from c in assoc(post, :comments), where: not is_nil(c.title) + Repo.all(query) + + Another function in `Ecto` is `build_assoc/3`, which allows + someone to build an associated struct with the proper fields: + + Repo.transaction fn -> + post = Repo.insert!(%Post{title: "Hello", body: "world"}) + + # Build a comment from post + comment = Ecto.build_assoc(post, :comments, body: "Excellent!") + + Repo.insert!(comment) + end + + In the example above, `Ecto.build_assoc/3` is equivalent to: + + %Comment{post_id: post.id, body: "Excellent!"} + + You can find more information about defining associations and each + respective association module in `Ecto.Schema` docs. + + > NOTE: Ecto does not lazy load associations. While lazily loading + > associations may sound convenient at first, in the long run it + > becomes a source of confusion and performance issues. + + ### Embeds + + Ecto also supports embeds. While associations keep parent and child + entries in different tables, embeds stores the child along side the + parent. + + Databases like MongoDB have native support for embeds. Databases + like PostgreSQL uses a mixture of JSONB (`embeds_one/3`) and ARRAY + columns to provide this functionality. + + Check `Ecto.Schema.embeds_one/3` and `Ecto.Schema.embeds_many/3` + for more information. + + ### Mix tasks and generators + + Ecto provides many tasks to help your workflow as well as code generators. + You can find all available tasks by typing `mix help` inside a project + with Ecto listed as a dependency. + + Ecto generators will automatically open the generated files if you have + `ECTO_EDITOR` set in your environment variable. + + #### Repo resolution + + Ecto requires developers to specify the key `:ecto_repos` in their + application configuration before using tasks like `ecto.create` and + `ecto.migrate`. For example: + + config :my_app, :ecto_repos, [MyApp.Repo] + + config :my_app, MyApp.Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost" + + """ + + @doc """ + Returns the schema primary keys as a keyword list. + """ + @spec primary_key(Ecto.Schema.t) :: Keyword.t + def primary_key(%{__struct__: schema} = struct) do + Enum.map schema.__schema__(:primary_key), fn(field) -> + {field, Map.fetch!(struct, field)} + end + end + + @doc """ + Returns the schema primary keys as a keyword list. + + Raises `Ecto.NoPrimaryKeyFieldError` if the schema has no + primary key field. + """ + @spec primary_key!(Ecto.Schema.t) :: Keyword.t + def primary_key!(%{__struct__: schema} = struct) do + case primary_key(struct) do + [] -> raise Ecto.NoPrimaryKeyFieldError, schema: schema + pk -> pk + end + end + + @doc """ + Builds a struct from the given `assoc` in `struct`. + + ## Examples + + If the relationship is a `has_one` or `has_many` and + the primary key is set in the parent struct, the key will + automatically be set in the built association: + + iex> post = Repo.get(Post, 13) + %Post{id: 13} + iex> build_assoc(post, :comments) + %Comment{id: nil, post_id: 13} + + Note though it doesn't happen with `belongs_to` cases, as the + key is often the primary key and such is usually generated + dynamically: + + iex> comment = Repo.get(Comment, 13) + %Comment{id: 13, post_id: 25} + iex> build_assoc(comment, :post) + %Post{id: nil} + + You can also pass the attributes, which can be a map or + a keyword list, to set the struct's fields except the + association key. + + iex> build_assoc(post, :comments, text: "cool") + %Comment{id: nil, post_id: 13, text: "cool"} + + iex> build_assoc(post, :comments, %{text: "cool"}) + %Comment{id: nil, post_id: 13, text: "cool"} + + iex> build_assoc(post, :comments, post_id: 1) + %Comment{id: nil, post_id: 13} + + The given attributes are expected to be structured data. + If you want to build an association with external data, + such as a request parameters, you can use `Ecto.Changeset.cast/3` + after `build_assoc/3`: + + parent + |> Ecto.build_assoc(:child) + |> Ecto.Changeset.cast(params, [:field1, :field2]) + + """ + def build_assoc(%{__struct__: schema} = struct, assoc, attributes \\ %{}) do + assoc = Ecto.Association.association_from_schema!(schema, assoc) + assoc.__struct__.build(assoc, struct, drop_meta(attributes)) + end + + defp drop_meta(%{} = attrs), do: Map.drop(attrs, [:__struct__, :__meta__]) + defp drop_meta([_|_] = attrs), do: Keyword.drop(attrs, [:__struct__, :__meta__]) + + @doc """ + Builds a query for the association in the given struct or structs. + + ## Examples + + In the example below, we get all comments associated to the given + post: + + post = Repo.get Post, 1 + Repo.all Ecto.assoc(post, :comments) + + `assoc/2` can also receive a list of posts, as long as the posts are + not empty: + + posts = Repo.all from p in Post, where: is_nil(p.published_at) + Repo.all Ecto.assoc(posts, :comments) + + This function can also be used to dynamically load through associations + by giving it a list. For example, to get all authors for all comments for + the given posts, do: + + posts = Repo.all from p in Post, where: is_nil(p.published_at) + Repo.all Ecto.assoc(posts, [:comments, :author]) + + ## Options + + * `:prefix` - the prefix to fetch assocs from. By default, queries + will use the same prefix as the first struct in the given collection. + This option allows the prefix to be changed. + + """ + def assoc(struct_or_structs, assocs, opts \\ []) do + [assoc | assocs] = List.wrap(assocs) + + structs = + case struct_or_structs do + nil -> raise ArgumentError, "cannot retrieve association #{inspect(assoc)} for nil" + [] -> raise ArgumentError, "cannot retrieve association #{inspect(assoc)} for empty list" + struct_or_structs -> List.wrap(struct_or_structs) + end + + sample = hd(structs) + prefix = assoc_prefix(sample, opts) + schema = sample.__struct__ + refl = %{owner_key: owner_key} = Ecto.Association.association_from_schema!(schema, assoc) + + values = + Enum.uniq for(struct <- structs, + assert_struct!(schema, struct), + key = Map.fetch!(struct, owner_key), + do: key) + + case assocs do + [] -> + %module{} = refl + %{module.assoc_query(refl, nil, values) | prefix: prefix} + + assocs -> + %{Ecto.Association.filter_through_chain(schema, [assoc | assocs], values) | prefix: prefix} + end + end + + defp assoc_prefix(sample, opts) do + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} -> + prefix + + :error -> + case sample do + %{__meta__: %{prefix: prefix}} -> prefix + # Must be an embedded schema + _ -> nil + end + end + end + + @doc """ + Checks if an association is loaded. + + ## Examples + + iex> post = Repo.get(Post, 1) + iex> Ecto.assoc_loaded?(post.comments) + false + iex> post = post |> Repo.preload(:comments) + iex> Ecto.assoc_loaded?(post.comments) + true + + """ + def assoc_loaded?(%Ecto.Association.NotLoaded{}), do: false + def assoc_loaded?(list) when is_list(list), do: true + def assoc_loaded?(%_{}), do: true + def assoc_loaded?(nil), do: true + + @doc """ + Gets the metadata from the given struct. + """ + def get_meta(struct, :context), + do: struct.__meta__.context + def get_meta(struct, :state), + do: struct.__meta__.state + def get_meta(struct, :source), + do: struct.__meta__.source + def get_meta(struct, :prefix), + do: struct.__meta__.prefix + + @doc """ + Returns a new struct with updated metadata. + + It is possible to set: + + * `:source` - changes the struct query source + * `:prefix` - changes the struct query prefix + * `:context` - changes the struct meta context + * `:state` - changes the struct state + + Please refer to the `Ecto.Schema.Metadata` module for more information. + """ + @spec put_meta(Ecto.Schema.schema, meta) :: Ecto.Schema.schema + when meta: [source: Ecto.Schema.source, prefix: Ecto.Schema.prefix, + context: Ecto.Schema.Metadata.context, state: Ecto.Schema.Metadata.state] + def put_meta(%{__meta__: meta} = struct, opts) do + case put_or_noop_meta(opts, meta, false) do + :noop -> struct + meta -> %{struct | __meta__: meta} + end + end + + defp put_or_noop_meta([{key, value}|t], meta, updated?) do + case meta do + %{^key => ^value} -> put_or_noop_meta(t, meta, updated?) + _ -> put_or_noop_meta(t, put_meta(meta, key, value), true) + end + end + + defp put_or_noop_meta([], meta, true), do: meta + defp put_or_noop_meta([], _meta, false), do: :noop + + defp put_meta(meta, :state, state) do + if state in [:built, :loaded, :deleted] do + %{meta | state: state} + else + raise ArgumentError, "invalid state #{inspect state}" + end + end + + defp put_meta(meta, :source, source) do + %{meta | source: source} + end + + defp put_meta(meta, :prefix, prefix) do + %{meta | prefix: prefix} + end + + defp put_meta(meta, :context, context) do + %{meta | context: context} + end + + defp put_meta(_meta, key, _value) do + raise ArgumentError, "unknown meta key #{inspect key}" + end + + defp assert_struct!(module, %{__struct__: struct}) do + if struct != module do + raise ArgumentError, "expected a homogeneous list containing the same struct, " <> + "got: #{inspect module} and #{inspect struct}" + else + true + end + end + + @doc """ + Loads previously dumped `data` in the given `format` into a schema. + + The first argument can be a an embedded schema module, or a map (of types) and + determines the return value: a struct or a map, respectively. + + The second argument `data` specifies fields and values that are to be loaded. + It can be a map, a keyword list, or a `{fields, values}` tuple. Fields can be + atoms or strings. + + The third argument `format` is the format the data has been dumped as. For + example, databases may dump embedded to `:json`, this function allows such + dumped data to be put back into the schemas. + + Fields that are not present in the schema (or `types` map) are ignored. + If any of the values has invalid type, an error is raised. + + Note that if you want to load data into a non-embedded schema that was + directly persisted into a given repository, then use `c:Ecto.Repo.load/2`. + + ## Examples + + iex> result = Ecto.Adapters.SQL.query!(MyRepo, "SELECT users.settings FROM users", []) + iex> Enum.map(result.rows, fn [settings] -> Ecto.embedded_load(Setting, Jason.decode!(settings), :json) end) + [%Setting{...}, ...] + """ + @spec embedded_load( + module_or_map :: module | map(), + data :: map(), + format :: atom() + ) :: Ecto.Schema.t() | map() + def embedded_load(schema_or_types, data, format) do + Ecto.Schema.Loader.unsafe_load(schema_or_types, data, &Ecto.Type.embedded_load(&1, &2, format)) + end + + @doc """ + Dumps the given struct defined by an embedded schema. + + This converts the given embedded schema to a map to be serialized + with the given format. For example: + + iex> Ecto.embedded_dump(%Post{}, :json) + %{title: "hello"} + + """ + @spec embedded_dump(Ecto.Schema.t(), format :: atom()) :: map() + def embedded_dump(%schema{} = data, format) do + Ecto.Schema.Loader.safe_dump(data, schema.__schema__(:dump), &Ecto.Type.embedded_dump(&1, &2, format)) + end +end diff --git a/deps/ecto/lib/ecto/adapter.ex b/deps/ecto/lib/ecto/adapter.ex new file mode 100644 index 0000000..d97599f --- /dev/null +++ b/deps/ecto/lib/ecto/adapter.ex @@ -0,0 +1,139 @@ +defmodule Ecto.Adapter do + @moduledoc """ + Specifies the minimal API required from adapters. + """ + + @type t :: module + + @typedoc """ + The metadata returned by the adapter `c:init/1`. + + It must be a map and Ecto itself will always inject + two keys into the meta: + + * the `:cache` key, which as ETS table that can be used as a cache (if available) + * the `:pid` key, which is the PID returned by the child spec returned in `c:init/1` + + """ + @type adapter_meta :: %{optional(:stacktrace) => boolean(), optional(any()) => any()} + + @doc """ + The callback invoked in case the adapter needs to inject code. + """ + @macrocallback __before_compile__(env :: Macro.Env.t()) :: Macro.t() + + @doc """ + Ensure all applications necessary to run the adapter are started. + """ + @callback ensure_all_started(config :: Keyword.t(), type :: :permanent | :transient | :temporary) :: + {:ok, [atom]} | {:error, atom} + + @doc """ + Initializes the adapter supervision tree by returning the children and adapter metadata. + """ + @callback init(config :: Keyword.t()) :: {:ok, :supervisor.child_spec(), adapter_meta} + + @doc """ + Checks out a connection for the duration of the given function. + + In case the adapter provides a pool, this guarantees all of the code + inside the given `fun` runs against the same connection, which + might improve performance by for instance allowing multiple related + calls to the datastore to share cache information: + + Repo.checkout(fn -> + for _ <- 100 do + Repo.insert!(%Post{}) + end + end) + + If the adapter does not provide a pool, just calling the passed function + and returning its result are enough. + + If the adapter provides a pool, it is supposed to "check out" one of the + pool connections for the duration of the function call. Which connection + is checked out is not passed to the calling function, so it should be done + using a stateful method like using the current process' dictionary, process + tracking, or some kind of other lookup method. Make sure that this stored + connection is then used in the other callbacks implementations, such as + `Ecto.Adapter.Queryable` and `Ecto.Adapter.Schema`. + """ + @callback checkout(adapter_meta, config :: Keyword.t(), (() -> result)) :: result when result: var + + @doc """ + Returns true if a connection has been checked out. + """ + @callback checked_out?(adapter_meta) :: boolean + + @doc """ + Returns the loaders for a given type. + + It receives the primitive type and the Ecto type (which may be + primitive as well). It returns a list of loaders with the given + type usually at the end. + + This allows developers to properly translate values coming from + the adapters into Ecto ones. For example, if the database does not + support booleans but instead returns 0 and 1 for them, you could + add: + + def loaders(:boolean, type), do: [&bool_decode/1, type] + def loaders(_primitive, type), do: [type] + + defp bool_decode(0), do: {:ok, false} + defp bool_decode(1), do: {:ok, true} + + All adapters are required to implement a clause for `:binary_id` types, + since they are adapter specific. If your adapter does not provide binary + ids, you may simply use `Ecto.UUID`: + + def loaders(:binary_id, type), do: [Ecto.UUID, type] + def loaders(_primitive, type), do: [type] + + """ + @callback loaders(primitive_type :: Ecto.Type.primitive(), ecto_type :: Ecto.Type.t()) :: + [(term -> {:ok, term} | :error) | Ecto.Type.t()] + + @doc """ + Returns the dumpers for a given type. + + It receives the primitive type and the Ecto type (which may be + primitive as well). It returns a list of dumpers with the given + type usually at the beginning. + + This allows developers to properly translate values coming from + the Ecto into adapter ones. For example, if the database does not + support booleans but instead returns 0 and 1 for them, you could + add: + + def dumpers(:boolean, type), do: [type, &bool_encode/1] + def dumpers(_primitive, type), do: [type] + + defp bool_encode(false), do: {:ok, 0} + defp bool_encode(true), do: {:ok, 1} + + All adapters are required to implement a clause for :binary_id types, + since they are adapter specific. If your adapter does not provide + binary ids, you may simply use `Ecto.UUID`: + + def dumpers(:binary_id, type), do: [type, Ecto.UUID] + def dumpers(_primitive, type), do: [type] + + """ + @callback dumpers(primitive_type :: Ecto.Type.primitive(), ecto_type :: Ecto.Type.t()) :: + [(term -> {:ok, term} | :error) | Ecto.Type.t()] + + @doc """ + Returns the adapter metadata from its `c:init/1` callback. + + It expects a process name of a repository. The name is either + an atom or a PID. For a given repository, you often want to + call this function based on the repository dynamic repo: + + Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) + + """ + def lookup_meta(repo_name_or_pid) do + Ecto.Repo.Registry.lookup(repo_name_or_pid) + end +end diff --git a/deps/ecto/lib/ecto/adapter/queryable.ex b/deps/ecto/lib/ecto/adapter/queryable.ex new file mode 100644 index 0000000..84aa2cb --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/queryable.ex @@ -0,0 +1,126 @@ +defmodule Ecto.Adapter.Queryable do + @moduledoc """ + Specifies the query API required from adapters. + + If your adapter is only able to respond to one or a couple of the query functions, + add custom implementations of those functions directly to the Repo + by using `c:Ecto.Adapter.__before_compile__/1` instead. + """ + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @typedoc "Ecto.Query metadata fields (stored in cache)" + @type query_meta :: %{sources: tuple, preloads: term, select: map} + + @typedoc """ + Cache query metadata that is passed to `c:execute/5`. + + The cache can be in 3 states, documented below. + + If `{:nocache, prepared}` is given, it means the query was + not and cannot be cached. The `prepared` value is the value + returned by `c:prepare/2`. + + If `{:cache, cache_function, prepared}` is given, it means + the query can be cached and it must be cached by calling + the `cache_function` function with the cache entry of your + choice. Once `cache_function` is called, the next time the + same query is given to `c:execute/5`, it will receive the + `:cached` tuple. + + If `{:cached, update_function, reset_function, cached}` is + given, it means the query has been cached. You may call + `update_function/1` if you want to update the cached result. + Or you may call `reset_function/1`, with a new prepared query, + to force the query to be cached again. If `reset_function/1` + is called, the next time the same query is given to + `c:execute/5`, it will receive the `:cache` tuple. + """ + @type query_cache :: {:nocache, prepared} + | {:cache, cache_function :: (cached -> :ok), prepared} + | {:cached, update_function :: (cached -> :ok), reset_function :: (prepared -> :ok), cached} + + @type prepared :: term + @type cached :: term + @type options :: Keyword.t() + @type selected :: term + + @doc """ + Commands invoked to prepare a query. + + It is used on `c:Ecto.Repo.all/2`, `c:Ecto.Repo.update_all/3`, + and `c:Ecto.Repo.delete_all/2`. If returns a tuple, saying if + this query can be cached or not, and the `prepared` query. + The `prepared` query is any term that will be passed to the + adapter's `c:execute/5`. + """ + @callback prepare(atom :: :all | :update_all | :delete_all, query :: Ecto.Query.t()) :: + {:cache, prepared} | {:nocache, prepared} + + @doc """ + Executes a previously prepared query. + + The `query_meta` field is a map containing some of the fields + found in the `Ecto.Query` struct, after they have been normalized. + For example, the values `selected` by the query, which then have + to be returned, can be found in `query_meta`. + + The `query_cache` and its state is documented in `t:query_cache/0`. + + The `params` is the list of query parameters. For example, for + a query such as `from Post, where: [id: ^123]`, `params` will be + `[123]`. + + Finally, `options` is a keyword list of options given to the + `Repo` operation that triggered the adapter call. Any option is + allowed, as this is a mechanism to allow users of Ecto to customize + how the adapter behaves per operation. + + It must return a tuple containing the number of entries and + the result set as a list of lists. The entries in the actual + list will depend on what has been selected by the query. The + result set may also be `nil`, if no value is being selected. + """ + @callback execute(adapter_meta, query_meta, query_cache, params :: list(), options) :: + {non_neg_integer, [[selected]] | nil} + + @doc """ + Streams a previously prepared query. + + See `c:execute/5` for a description of arguments. + + It returns a stream of values. + """ + @callback stream(adapter_meta, query_meta, query_cache, params :: list(), options) :: + Enumerable.t + + @doc """ + Plans and prepares a query for the given repo, leveraging its query cache. + + This operation uses the query cache if one is available. + """ + def prepare_query(operation, repo_name_or_pid, queryable) do + %{adapter: adapter, cache: cache} = Ecto.Repo.Registry.lookup(repo_name_or_pid) + + {_meta, prepared, params} = + queryable + |> Ecto.Queryable.to_query() + |> Ecto.Query.Planner.ensure_select(operation == :all) + |> Ecto.Query.Planner.query(operation, cache, adapter, 0) + + {prepared, params} + end + + @doc """ + Plans a query using the given adapter. + + This does not expect the repository and therefore does not leverage the cache. + """ + def plan_query(operation, adapter, queryable) do + query = Ecto.Queryable.to_query(queryable) + {query, params, _key} = Ecto.Query.Planner.plan(query, operation, adapter) + {query, _} = Ecto.Query.Planner.normalize(query, operation, adapter, 0) + {query, params} + end +end diff --git a/deps/ecto/lib/ecto/adapter/schema.ex b/deps/ecto/lib/ecto/adapter/schema.ex new file mode 100644 index 0000000..09f6639 --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/schema.ex @@ -0,0 +1,92 @@ +defmodule Ecto.Adapter.Schema do + @moduledoc """ + Specifies the schema API required from adapters. + """ + + @typedoc "Proxy type to the adapter meta" + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @typedoc "Ecto.Schema metadata fields" + @type schema_meta :: %{ + autogenerate_id: {schema_field :: atom, source_field :: atom, Ecto.Type.t()}, + context: term, + prefix: binary | nil, + schema: atom, + source: binary + } + + @type fields :: Keyword.t() + @type filters :: Keyword.t() + @type constraints :: Keyword.t() + @type returning :: [atom] + @type placeholders :: [term] + @type options :: Keyword.t() + + @type on_conflict :: + {:raise, list(), []} + | {:nothing, list(), [atom]} + | {[atom], list(), [atom]} + | {Ecto.Query.t(), list(), [atom]} + + @doc """ + Called to autogenerate a value for id/embed_id/binary_id. + + Returns the autogenerated value, or nil if it must be + autogenerated inside the storage or raise if not supported. + """ + @callback autogenerate(field_type :: :id | :binary_id | :embed_id) :: term | nil + + @doc """ + Inserts multiple entries into the data store. + + In case an `Ecto.Query` given as any of the field values by the user, + it will be sent to the adapter as a tuple with in the shape of + `{query, params}`. + """ + @callback insert_all( + adapter_meta, + schema_meta, + header :: [atom], + [[{atom, term | {Ecto.Query.t(), list()}}]], + on_conflict, + returning, + placeholders, + options + ) :: {non_neg_integer, [[term]] | nil} + + @doc """ + Inserts a single new struct in the data store. + + ## Autogenerate + + The primary key will be automatically included in `returning` if the + field has type `:id` or `:binary_id` and no value was set by the + developer or none was autogenerated by the adapter. + """ + @callback insert(adapter_meta, schema_meta, fields, on_conflict, returning, options) :: + {:ok, fields} | {:invalid, constraints} + + @doc """ + Updates a single struct with the given filters. + + While `filters` can be any record column, it is expected that + at least the primary key (or any other key that uniquely + identifies an existing record) be given as a filter. Therefore, + in case there is no record matching the given filters, + `{:error, :stale}` is returned. + """ + @callback update(adapter_meta, schema_meta, fields, filters, returning, options) :: + {:ok, fields} | {:invalid, constraints} | {:error, :stale} + + @doc """ + Deletes a single struct with the given filters. + + While `filters` can be any record column, it is expected that + at least the primary key (or any other key that uniquely + identifies an existing record) be given as a filter. Therefore, + in case there is no record matching the given filters, + `{:error, :stale}` is returned. + """ + @callback delete(adapter_meta, schema_meta, filters, options) :: + {:ok, fields} | {:invalid, constraints} | {:error, :stale} +end diff --git a/deps/ecto/lib/ecto/adapter/storage.ex b/deps/ecto/lib/ecto/adapter/storage.ex new file mode 100644 index 0000000..7590da2 --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/storage.ex @@ -0,0 +1,53 @@ +defmodule Ecto.Adapter.Storage do + @moduledoc """ + Specifies the adapter storage API. + """ + + @doc """ + Creates the storage given by options. + + Returns `:ok` if it was created successfully. + + Returns `{:error, :already_up}` if the storage has already been created or + `{:error, term}` in case anything else goes wrong. + + ## Examples + + storage_up(username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback storage_up(options :: Keyword.t) :: :ok | {:error, :already_up} | {:error, term} + + @doc """ + Drops the storage given by options. + + Returns `:ok` if it was dropped successfully. + + Returns `{:error, :already_down}` if the storage has already been dropped or + `{:error, term}` in case anything else goes wrong. + + ## Examples + + storage_down(username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback storage_down(options :: Keyword.t) :: :ok | {:error, :already_down} | {:error, term} + + @doc """ + Returns the status of a storage given by options. + + Can return `:up`, `:down` or `{:error, term}` in case anything goes wrong. + + ## Examples + + storage_status(username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback storage_status(options :: Keyword.t()) :: :up | :down | {:error, term()} +end diff --git a/deps/ecto/lib/ecto/adapter/transaction.ex b/deps/ecto/lib/ecto/adapter/transaction.ex new file mode 100644 index 0000000..3676e2e --- /dev/null +++ b/deps/ecto/lib/ecto/adapter/transaction.ex @@ -0,0 +1,31 @@ +defmodule Ecto.Adapter.Transaction do + @moduledoc """ + Specifies the adapter transactions API. + """ + + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @doc """ + Runs the given function inside a transaction. + + Returns `{:ok, value}` if the transaction was successful where `value` + is the value return by the function or `{:error, value}` if the transaction + was rolled back where `value` is the value given to `rollback/1`. + """ + @callback transaction(adapter_meta, options :: Keyword.t(), function :: fun) :: + {:ok, any} | {:error, any} + + @doc """ + Returns true if the given process is inside a transaction. + """ + @callback in_transaction?(adapter_meta) :: boolean + + @doc """ + Rolls back the current transaction. + + The transaction will return the value given as `{:error, value}`. + + See `c:Ecto.Repo.rollback/1`. + """ + @callback rollback(adapter_meta, value :: any) :: no_return +end diff --git a/deps/ecto/lib/ecto/application.ex b/deps/ecto/lib/ecto/application.ex new file mode 100644 index 0000000..af8e9bb --- /dev/null +++ b/deps/ecto/lib/ecto/application.ex @@ -0,0 +1,13 @@ +defmodule Ecto.Application do + @moduledoc false + use Application + + def start(_type, _args) do + children = [ + Ecto.Repo.Registry, + ] + + opts = [strategy: :one_for_one, name: Ecto.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/deps/ecto/lib/ecto/association.ex b/deps/ecto/lib/ecto/association.ex new file mode 100644 index 0000000..ad5ec7d --- /dev/null +++ b/deps/ecto/lib/ecto/association.ex @@ -0,0 +1,1440 @@ +import Ecto.Query, only: [from: 1, from: 2, join: 4, join: 5, distinct: 3, where: 3] + +defmodule Ecto.Association.NotLoaded do + @moduledoc """ + Struct returned by associations when they are not loaded. + + The fields are: + + * `__field__` - the association field in `owner` + * `__owner__` - the schema that owns the association + * `__cardinality__` - the cardinality of the association + """ + + @type t :: %__MODULE__{ + __field__: atom(), + __owner__: any(), + __cardinality__: atom() + } + + defstruct [:__field__, :__owner__, :__cardinality__] + + defimpl Inspect do + def inspect(not_loaded, _opts) do + msg = "association #{inspect not_loaded.__field__} is not loaded" + ~s(#Ecto.Association.NotLoaded<#{msg}>) + end + end +end + +defmodule Ecto.Association do + @moduledoc false + + @type t :: %{required(:__struct__) => atom, + required(:on_cast) => nil | fun, + required(:cardinality) => :one | :many, + required(:relationship) => :parent | :child, + required(:owner) => atom, + required(:owner_key) => atom, + required(:field) => atom, + required(:unique) => boolean, + optional(atom) => any} + + alias Ecto.Query.Builder.OrderBy + + @doc """ + Helper to check if a queryable is compiled. + """ + def ensure_compiled(queryable, env) do + if not is_atom(queryable) or queryable in env.context_modules do + :skip + else + case Code.ensure_compiled(queryable) do + {:module, _} -> :compiled + {:error, :unavailable} -> :skip + {:error, _} -> :not_found + end + end + end + + @doc """ + Builds the association struct. + + The struct must be defined in the module that implements the + callback and it must contain at least the following keys: + + * `:cardinality` - tells if the association is one to one + or one/many to many + + * `:field` - tells the field in the owner struct where the + association should be stored + + * `:owner` - the owner module of the association + + * `:owner_key` - the key in the owner with the association value + + * `:relationship` - if the relationship to the specified schema is + of a `:child` or a `:parent` + + """ + @callback struct(module, field :: atom, opts :: Keyword.t) :: t + + @doc """ + Invoked after the schema is compiled to validate associations. + + Useful for checking if associated modules exist without running + into deadlocks. + """ + @callback after_compile_validation(t, Macro.Env.t) :: :ok | {:error, String.t} + + @doc """ + Builds a struct for the given association. + + The struct to build from is given as argument in case default values + should be set in the struct. + + Invoked by `Ecto.build_assoc/3`. + """ + @callback build(t, owner :: Ecto.Schema.t, %{atom => term} | [Keyword.t]) :: Ecto.Schema.t + + @doc """ + Returns an association join query. + + This callback receives the association struct and it must return + a query that retrieves all associated entries using joins up to + the owner association. + + For example, a `has_many :comments` inside a `Post` module would + return: + + from c in Comment, join: p in Post, on: c.post_id == p.id + + Note all the logic must be expressed inside joins, as fields like + `where` and `order_by` won't be used by the caller. + + This callback is invoked when `join: assoc(p, :comments)` is used + inside queries. + """ + @callback joins_query(t) :: Ecto.Query.t + + @doc """ + Returns the association query on top of the given query. + + If the query is `nil`, the association target must be used. + + This callback receives the association struct and it must return + a query that retrieves all associated entries with the given + values for the owner key. + + This callback is used by `Ecto.assoc/2` and when preloading. + """ + @callback assoc_query(t, Ecto.Query.t | nil, values :: [term]) :: Ecto.Query.t + + @doc """ + Returns information used by the preloader. + """ + @callback preload_info(t) :: + {:assoc, t, {integer, atom} | {integer, atom, Ecto.Type.t()}} | {:through, t, [atom]} + + @doc """ + Performs the repository change on the association. + + Receives the parent changeset, the current changesets + and the repository action options. Must return the + persisted struct (or nil) or the changeset error. + """ + @callback on_repo_change(t, parent :: Ecto.Changeset.t, changeset :: Ecto.Changeset.t, Ecto.Adapter.t, Keyword.t) :: + {:ok, Ecto.Schema.t | nil} | {:error, Ecto.Changeset.t} + + @doc """ + Retrieves the association from the given schema. + """ + def association_from_schema!(schema, assoc) do + schema.__schema__(:association, assoc) || + raise ArgumentError, "schema #{inspect schema} does not have association #{inspect assoc}" + end + + @doc """ + Returns the association key for the given module with the given suffix. + + ## Examples + + iex> Ecto.Association.association_key(Hello.World, :id) + :world_id + + iex> Ecto.Association.association_key(Hello.HTTP, :id) + :http_id + + iex> Ecto.Association.association_key(Hello.HTTPServer, :id) + :http_server_id + + """ + def association_key(module, suffix) do + prefix = module |> Module.split |> List.last |> Macro.underscore + :"#{prefix}_#{suffix}" + end + + @doc """ + Build an association query through the given associations from the specified owner table + and through the given associations. Finally filter by the provided values of the owner_key of + the first relationship in the chain. Used in Ecto.assoc/2. + """ + def filter_through_chain(owner, through, values) do + chain_through(owner, through, nil, values) + |> distinct([x], true) + end + + @doc """ + Join the target table given a list of associations to go through starting from the owner table. + """ + def join_through_chain(owner, through, query) do + chain_through(owner, through, query, nil) + end + + # This function is used by both join_through_chain/3 and filter_through_chain/3 since the algorithm for both + # is nearly identical barring a few differences. + defp chain_through(owner, through, join_to, values) do + # Flatten the chain of throughs. If any of the associations is a HasThrough this allows us to expand it so we have + # a list of atomic associations to join through. + {_, through} = flatten_through_chain(owner, through, []) + + # If we're joining then we're going forward from the owner table to the destination table. + # Otherwise we're going backward from the destination table then filtering by values. + chain_direction = if(join_to != nil, do: :forward, else: :backward) + + # This stage produces a list of joins represented as a keyword list with the following structure: + # [ + # [schema: (The Schema), in_key: (The key used to join into the table), out_key: (The key used to join with the next), where: (The condition KW list)] + # ] + relation_list = resolve_through_tables(owner, through, chain_direction) + + # Filter out the joins which are redundant + filtered_list = Enum.with_index(relation_list) + |> Enum.filter(fn + # We always keep the first table in the chain since it's our source table for the query + {_, 0} -> true + + {rel, _} -> + # If the condition is not empty we need to join to the table. Otherwise if the in_key and out_key is the same + # then this join is redundant since we can just join to the next table in the chain. + rel.in_key != rel.out_key or rel.where != [] + end) + |> Enum.map(&elem(&1, 0)) + + # If we're preloading we don't need the last table since it is the owner table. + filtered_list = if(join_to == nil, do: Enum.slice(filtered_list, 0..-2), else: filtered_list) + + [source | joins] = filtered_list + + source_schema = source.schema + query = join_to || from(s in source_schema) + + counter = Ecto.Query.Builder.count_binds(query) - 1 + + # We need to create the query by joining all the tables, and also we need the out_key of the final table to use + # for the final WHERE clause with values. + {_, query, _, dest_out_key} = Enum.reduce(joins, {source, query, counter, source.out_key}, fn curr_rel, {prev_rel, query, counter, _} -> + related_queryable = curr_rel.schema + + next = join(query, :inner, [{src, counter}], dest in ^related_queryable, on: field(src, ^prev_rel.out_key) == field(dest, ^curr_rel.in_key)) + |> combine_joins_query(curr_rel.where, counter + 1) + + {curr_rel, next, counter + 1, curr_rel.out_key} + end) + + final_bind = Ecto.Query.Builder.count_binds(query) - 1 + + values = List.wrap(values) + query = case {join_to, values} do + {nil, [single_value]} -> + query + |> where([{dest, final_bind}], field(dest, ^dest_out_key) == ^single_value) + + {nil, values} -> + query + |> where([{dest, final_bind}], field(dest, ^dest_out_key) in ^values) + + {_, _} -> + query + end + + combine_assoc_query(query, source.where || []) + end + + defp flatten_through_chain(owner, [], acc), do: {owner, acc} + defp flatten_through_chain(owner, [assoc | tl], acc) do + refl = association_from_schema!(owner, assoc) + case refl do + %{through: nested_throughs} -> + {owner, acc} = flatten_through_chain(owner, nested_throughs, acc) + flatten_through_chain(owner, tl, acc) + + _ -> + flatten_through_chain(refl.related, tl, acc ++ [assoc]) + end + end + + defp resolve_through_tables(owner, through, :backward) do + # This step generates a list of maps with the following keys: + # [ + # %{schema: ..., out_key: ..., in_key: ..., where: ...} + # ] + # This is a list of all tables that we will need to join to follow the chain of throughs and which key is used + # to join in and out of the table, along with the where condition for that table. The final table of the chain will + # be "owner", and the first table of the chain will be the final destination table of all the throughs. + initial_owner_map = %{schema: owner, out_key: nil, in_key: nil, where: nil} + + Enum.reduce(through, {owner, [initial_owner_map]}, fn assoc, {owner, table_list} -> + refl = association_from_schema!(owner, assoc) + [owner_map | table_list] = table_list + + table_list = case refl do + %{join_through: join_through, join_keys: join_keys, join_where: join_where, where: where} -> + [{owner_join_key, owner_key}, {related_join_key, related_key}] = join_keys + + owner_map = %{owner_map | in_key: owner_key} + join_map = %{schema: join_through, out_key: owner_join_key, in_key: related_join_key, where: join_where} + related_map = %{schema: refl.related, out_key: related_key, in_key: nil, where: where} + + [related_map, join_map, owner_map | table_list] + + _ -> + owner_map = %{owner_map | in_key: refl.owner_key} + related_map = %{schema: refl.related, out_key: refl.related_key, in_key: nil, where: refl.where} + + [related_map, owner_map | table_list] + end + + {refl.related, table_list} + end) + |> elem(1) + end + + defp resolve_through_tables(owner, through, :forward) do + # In the forward case (joining) we need to reverse the list and swap the in_key for the out_key + # since we've changed directions. + resolve_through_tables(owner, through, :backward) + |> Enum.reverse() + |> Enum.map(fn %{out_key: out_key, in_key: in_key} = join -> + %{join | out_key: in_key, in_key: out_key} + end) + end + + @doc """ + Add the default assoc query where clauses to a join. + + This handles only `where` and converts it to a `join`, + as that is the only information propagate in join queries. + """ + def combine_joins_query(query, [], _binding), do: query + + def combine_joins_query(%{joins: joins} = query, [_ | _] = conditions, binding) do + {joins, [join_expr]} = Enum.split(joins, -1) + %{on: %{params: params, expr: expr} = join_on} = join_expr + {expr, params} = expand_where(conditions, expr, Enum.reverse(params), length(params), binding) + %{query | joins: joins ++ [%{join_expr | on: %{join_on | expr: expr, params: params}}]} + end + + @doc """ + Add the default assoc query where clauses a provided query. + """ + def combine_assoc_query(query, []), do: query + def combine_assoc_query(%{wheres: []} = query, conditions) do + {expr, params} = expand_where(conditions, true, [], 0, 0) + %{query | wheres: [%Ecto.Query.BooleanExpr{op: :and, expr: expr, params: params, line: __ENV__.line, file: __ENV__.file}]} + end + def combine_assoc_query(%{wheres: wheres} = query, conditions) do + {wheres, [where_expr]} = Enum.split(wheres, -1) + %{params: params, expr: expr} = where_expr + {expr, params} = expand_where(conditions, expr, Enum.reverse(params), length(params), 0) + %{query | wheres: wheres ++ [%{where_expr | expr: expr, params: params}]} + end + + defp expand_where(conditions, expr, params, counter, binding) do + conjoin_exprs = fn + true, r -> r + l, r-> {:and, [], [l, r]} + end + + {expr, params, _counter} = + Enum.reduce(conditions, {expr, params, counter}, fn + {key, nil}, {expr, params, counter} -> + expr = conjoin_exprs.(expr, {:is_nil, [], [to_field(binding, key)]}) + {expr, params, counter} + + {key, {:not, nil}}, {expr, params, counter} -> + expr = conjoin_exprs.(expr, {:not, [], [{:is_nil, [], [to_field(binding, key)]}]}) + {expr, params, counter} + + {key, {:fragment, frag}}, {expr, params, counter} when is_binary(frag) -> + pieces = Ecto.Query.Builder.fragment_pieces(frag, [to_field(binding, key)]) + expr = conjoin_exprs.(expr, {:fragment, [], pieces}) + {expr, params, counter} + + {key, {:in, value}}, {expr, params, counter} when is_list(value) -> + expr = conjoin_exprs.(expr, {:in, [], [to_field(binding, key), {:^, [], [counter]}]}) + {expr, [{value, {:in, {binding, key}}} | params], counter + 1} + + {key, value}, {expr, params, counter} -> + expr = conjoin_exprs.(expr, {:==, [], [to_field(binding, key), {:^, [], [counter]}]}) + {expr, [{value, {binding, key}} | params], counter + 1} + end) + + {expr, Enum.reverse(params)} + end + + defp to_field(binding, field), + do: {{:., [], [{:&, [], [binding]}, field]}, [], []} + + @doc """ + Build a join query with the given `through` associations starting at `counter`. + """ + def joins_query(query, through, counter) do + Enum.reduce(through, {query, counter}, fn current, {acc, counter} -> + query = join(acc, :inner, [{x, counter}], assoc(x, ^current)) + {query, counter + 1} + end) |> elem(0) + end + + @doc """ + Retrieves related module from queryable. + + ## Examples + + iex> Ecto.Association.related_from_query({"custom_source", Schema}, :comments_v1) + Schema + + iex> Ecto.Association.related_from_query(Schema, :comments_v1) + Schema + + iex> Ecto.Association.related_from_query("wrong", :comments_v1) + ** (ArgumentError) association :comments_v1 queryable must be a schema or a {source, schema}. got: "wrong" + """ + def related_from_query(atom, _name) when is_atom(atom), do: atom + def related_from_query({source, schema}, _name) when is_binary(source) and is_atom(schema), do: schema + def related_from_query(queryable, name) do + raise ArgumentError, "association #{inspect name} queryable must be a schema or " <> + "a {source, schema}. got: #{inspect queryable}" + end + + @doc """ + Applies default values into the struct. + """ + def apply_defaults(struct, defaults, _owner) when is_list(defaults) do + struct(struct, defaults) + end + + def apply_defaults(struct, {mod, fun, args}, owner) do + apply(mod, fun, [struct.__struct__, owner | args]) + end + + @doc """ + Validates `defaults` for association named `name`. + """ + def validate_defaults!(_module, _name, {mod, fun, args} = defaults) + when is_atom(mod) and is_atom(fun) and is_list(args), + do: defaults + + def validate_defaults!(module, _name, fun) when is_atom(fun), + do: {module, fun, []} + + def validate_defaults!(_module, _name, defaults) when is_list(defaults), + do: defaults + + def validate_defaults!(_module, name, defaults), + do: raise ArgumentError, + "expected defaults for #{inspect name} to be a keyword list " <> + "or a {module, fun, args} tuple, got: `#{inspect defaults}`" + + @doc """ + Validates `preload_order` for association named `name`. + """ + def validate_preload_order!(name, preload_order) when is_list(preload_order) do + Enum.map(preload_order, fn + field when is_atom(field) -> + field + + {direction, field} when is_atom(direction) and is_atom(field) -> + unless OrderBy.valid_direction?(direction) do + raise ArgumentError, + "expected `:preload_order` for #{inspect name} to be a keyword list or a list of atoms/fields, " <> + "got: `#{inspect preload_order}`, " <> + "`#{inspect direction}` is not a valid direction" + end + + {direction, field} + + item -> + raise ArgumentError, + "expected `:preload_order` for #{inspect name} to be a keyword list or a list of atoms/fields, " <> + "got: `#{inspect preload_order}`, " <> + "`#{inspect item}` is not valid" + end) + end + + def validate_preload_order!(name, preload_order) do + raise ArgumentError, + "expected `:preload_order` for #{inspect name} to be a keyword list or a list of atoms/fields, " <> + "got: `#{inspect preload_order}`" + end + + @doc """ + Merges source from query into to the given schema. + + In case the query does not have a source, returns + the schema unchanged. + """ + def merge_source(schema, query) + + def merge_source(%{__meta__: %{source: source}} = struct, {source, _}) do + struct + end + + def merge_source(struct, {source, _}) do + Ecto.put_meta(struct, source: source) + end + + def merge_source(struct, _query) do + struct + end + + @doc """ + Updates the prefix of a changeset based on the metadata. + """ + def update_parent_prefix( + %{data: %{__meta__: %{prefix: prefix}}} = changeset, + %{__meta__: %{prefix: prefix}} + ), + do: changeset + + def update_parent_prefix( + %{data: %{__meta__: %{prefix: nil}}} = changeset, + %{__meta__: %{prefix: prefix}} + ), + do: update_in(changeset.data, &Ecto.put_meta(&1, prefix: prefix)) + + + def update_parent_prefix(changeset, _), + do: changeset + + @doc """ + Performs the repository action in the related changeset, + returning `{:ok, data}` or `{:error, changes}`. + """ + def on_repo_change(%{data: struct}, [], _adapter, _opts) do + {:ok, struct} + end + + def on_repo_change(changeset, assocs, adapter, opts) do + %{data: struct, changes: changes, action: action} = changeset + + {struct, changes, _halt, valid?} = + Enum.reduce(assocs, {struct, changes, false, true}, fn {refl, value}, acc -> + on_repo_change(refl, value, changeset, action, adapter, opts, acc) + end) + + case valid? do + true -> {:ok, struct} + false -> {:error, changes} + end + end + + defp on_repo_change(%{cardinality: :one, field: field} = meta, nil, parent_changeset, + _repo_action, adapter, opts, {parent, changes, halt, valid?}) do + if not halt, do: maybe_replace_one!(meta, nil, parent, parent_changeset, adapter, opts) + {Map.put(parent, field, nil), Map.put(changes, field, nil), halt, valid?} + end + + defp on_repo_change(%{cardinality: :one, field: field, __struct__: mod} = meta, + %{action: action, data: current} = changeset, parent_changeset, + repo_action, adapter, opts, {parent, changes, halt, valid?}) do + check_action!(meta, action, repo_action) + if not halt, do: maybe_replace_one!(meta, current, parent, parent_changeset, adapter, opts) + + case on_repo_change_unless_halted(halt, mod, meta, parent_changeset, changeset, adapter, opts) do + {:ok, struct} -> + {Map.put(parent, field, struct), Map.put(changes, field, changeset), halt, valid?} + + {:error, error_changeset} -> + {parent, Map.put(changes, field, error_changeset), + halted?(halt, changeset, error_changeset), false} + end + end + + defp on_repo_change(%{cardinality: :many, field: field, __struct__: mod} = meta, + changesets, parent_changeset, repo_action, adapter, opts, + {parent, changes, halt, all_valid?}) do + {changesets, structs, halt, valid?} = + Enum.reduce(changesets, {[], [], halt, true}, fn + %{action: action} = changeset, {changesets, structs, halt, valid?} -> + check_action!(meta, action, repo_action) + + case on_repo_change_unless_halted(halt, mod, meta, parent_changeset, changeset, adapter, opts) do + {:ok, nil} -> + {[changeset | changesets], structs, halt, valid?} + + {:ok, struct} -> + {[changeset | changesets], [struct | structs], halt, valid?} + + {:error, error_changeset} -> + {[error_changeset | changesets], structs, halted?(halt, changeset, error_changeset), false} + end + end) + + if valid? do + {Map.put(parent, field, Enum.reverse(structs)), + Map.put(changes, field, Enum.reverse(changesets)), + halt, all_valid?} + else + {parent, + Map.put(changes, field, Enum.reverse(changesets)), + halt, false} + end + end + + defp check_action!(%{related: schema}, :delete, :insert), + do: raise(ArgumentError, "got action :delete in changeset for associated #{inspect schema} while inserting") + defp check_action!(_, _, _), do: :ok + + defp halted?(true, _, _), do: true + defp halted?(_, %{valid?: true}, %{valid?: false}), do: true + defp halted?(_, _, _), do: false + + defp on_repo_change_unless_halted(true, _mod, _meta, _parent, changeset, _adapter, _opts) do + {:error, changeset} + end + defp on_repo_change_unless_halted(false, mod, meta, parent, changeset, adapter, opts) do + mod.on_repo_change(meta, parent, changeset, adapter, opts) + end + + defp maybe_replace_one!(%{field: field, __struct__: mod} = meta, current, parent, + parent_changeset, adapter, opts) do + previous = Map.get(parent, field) + if replaceable?(previous) and primary_key!(previous) != primary_key!(current) do + changeset = %{Ecto.Changeset.change(previous) | action: :replace} + + case mod.on_repo_change(meta, parent_changeset, changeset, adapter, opts) do + {:ok, _} -> + :ok + {:error, changeset} -> + raise Ecto.InvalidChangesetError, + action: changeset.action, changeset: changeset + end + end + end + + defp maybe_replace_one!(_, _, _, _, _, _), do: :ok + + defp replaceable?(nil), do: false + defp replaceable?(%Ecto.Association.NotLoaded{}), do: false + defp replaceable?(%{__meta__: %{state: :built}}), do: false + defp replaceable?(_), do: true + + defp primary_key!(nil), do: [] + defp primary_key!(struct), do: Ecto.primary_key!(struct) +end + +defmodule Ecto.Association.Has do + @moduledoc """ + The association struct for `has_one` and `has_many` associations. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `related` - The schema that is associated + * `owner_key` - The key on the `owner` schema used for the association + * `related_key` - The key on the `related` schema used for the association + * `queryable` - The real query to use for querying association + * `on_delete` - The action taken on associations when schema is deleted + * `on_replace` - The action taken on associations when schema is replaced + * `defaults` - Default fields used when building the association + * `relationship` - The relationship to the specified schema, default is `:child` + * `preload_order` - Default `order_by` of the association, used only by preload + """ + + @behaviour Ecto.Association + @on_delete_opts [:nothing, :nilify_all, :delete_all] + @on_replace_opts [:raise, :mark_as_invalid, :delete, :delete_if_exists, :nilify] + @has_one_on_replace_opts @on_replace_opts ++ [:update] + defstruct [:cardinality, :field, :owner, :related, :owner_key, :related_key, :on_cast, + :queryable, :on_delete, :on_replace, where: [], unique: true, defaults: [], + relationship: :child, ordered: false, preload_order: []] + + @impl true + def after_compile_validation(%{queryable: queryable, related_key: related_key}, env) do + compiled = Ecto.Association.ensure_compiled(queryable, env) + + cond do + compiled == :skip -> + :ok + compiled == :not_found -> + {:error, "associated schema #{inspect queryable} does not exist"} + not function_exported?(queryable, :__schema__, 2) -> + {:error, "associated module #{inspect queryable} is not an Ecto schema"} + is_nil queryable.__schema__(:type, related_key) -> + {:error, "associated schema #{inspect queryable} does not have field `#{related_key}`"} + true -> + :ok + end + end + + @impl true + def struct(module, name, opts) do + queryable = Keyword.fetch!(opts, :queryable) + cardinality = Keyword.fetch!(opts, :cardinality) + related = Ecto.Association.related_from_query(queryable, name) + + ref = + module + |> Module.get_attribute(:primary_key) + |> get_ref(opts[:references], name) + + unless Module.get_attribute(module, :ecto_fields)[ref] do + raise ArgumentError, "schema does not have the field #{inspect ref} used by " <> + "association #{inspect name}, please set the :references option accordingly" + end + + if opts[:through] do + raise ArgumentError, "invalid association #{inspect name}. When using the :through " <> + "option, the schema should not be passed as second argument" + end + + on_delete = Keyword.get(opts, :on_delete, :nothing) + unless on_delete in @on_delete_opts do + raise ArgumentError, "invalid :on_delete option for #{inspect name}. " <> + "The only valid options are: " <> + Enum.map_join(@on_delete_opts, ", ", &"`#{inspect &1}`") + end + + on_replace = Keyword.get(opts, :on_replace, :raise) + on_replace_opts = if cardinality == :one, do: @has_one_on_replace_opts, else: @on_replace_opts + + unless on_replace in on_replace_opts do + raise ArgumentError, "invalid `:on_replace` option for #{inspect name}. " <> + "The only valid options are: " <> + Enum.map_join(@on_replace_opts, ", ", &"`#{inspect &1}`") + end + + defaults = Ecto.Association.validate_defaults!(module, name, opts[:defaults] || []) + preload_order = Ecto.Association.validate_preload_order!(name, opts[:preload_order] || []) + where = opts[:where] || [] + + unless is_list(where) do + raise ArgumentError, "expected `:where` for #{inspect name} to be a keyword list, got: `#{inspect where}`" + end + + %__MODULE__{ + field: name, + cardinality: cardinality, + owner: module, + related: related, + owner_key: ref, + related_key: opts[:foreign_key] || Ecto.Association.association_key(module, ref), + queryable: queryable, + on_delete: on_delete, + on_replace: on_replace, + defaults: defaults, + where: where, + preload_order: preload_order + } + end + + defp get_ref(primary_key, nil, name) when primary_key in [nil, false] do + raise ArgumentError, "need to set :references option for " <> + "association #{inspect name} when schema has no primary key" + end + defp get_ref(primary_key, nil, _name), do: elem(primary_key, 0) + defp get_ref(_primary_key, references, _name), do: references + + @impl true + def build(%{owner_key: owner_key, related_key: related_key} = refl, owner, attributes) do + data = refl |> build(owner) |> struct(attributes) + %{data | related_key => Map.get(owner, owner_key)} + end + + @impl true + def joins_query(%{related_key: related_key, owner: owner, owner_key: owner_key, queryable: queryable} = assoc) do + from(o in owner, join: q in ^queryable, on: field(q, ^related_key) == field(o, ^owner_key)) + |> Ecto.Association.combine_joins_query(assoc.where, 1) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, [value]) do + from(x in (query || queryable), where: field(x, ^related_key) == ^value) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, values) do + from(x in (query || queryable), where: field(x, ^related_key) in ^values) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def preload_info(%{related_key: related_key} = refl) do + {:assoc, refl, {0, related_key}} + end + + @impl true + def on_repo_change(%{on_replace: :delete_if_exists} = refl, parent_changeset, + %{action: :replace} = changeset, adapter, opts) do + try do + on_repo_change(%{refl | on_replace: :delete}, parent_changeset, changeset, adapter, opts) + rescue + Ecto.StaleEntryError -> {:ok, nil} + end + end + + def on_repo_change(%{on_replace: on_replace} = refl, %{data: parent} = parent_changeset, + %{action: :replace} = changeset, adapter, opts) do + changeset = case on_replace do + :nilify -> %{changeset | action: :update} + :update -> %{changeset | action: :update} + :delete -> %{changeset | action: :delete} + end + + changeset = Ecto.Association.update_parent_prefix(changeset, parent) + + case on_repo_change(refl, %{parent_changeset | data: nil}, changeset, adapter, opts) do + {:ok, _} -> {:ok, nil} + {:error, changeset} -> {:error, changeset} + end + end + + def on_repo_change(assoc, parent_changeset, changeset, _adapter, opts) do + %{data: parent, repo: repo} = parent_changeset + %{action: action, changes: changes} = changeset + + {key, value} = parent_key(assoc, parent) + changeset = update_parent_key(changeset, action, key, value) + changeset = Ecto.Association.update_parent_prefix(changeset, parent) + + case apply(repo, action, [changeset, opts]) do + {:ok, _} = ok -> + if action == :delete, do: {:ok, nil}, else: ok + {:error, changeset} -> + original = Map.get(changes, key) + {:error, put_in(changeset.changes[key], original)} + end + end + + defp update_parent_key(changeset, :delete, _key, _value), + do: changeset + defp update_parent_key(changeset, _action, key, value), + do: Ecto.Changeset.put_change(changeset, key, value) + + defp parent_key(%{related_key: related_key}, nil) do + {related_key, nil} + end + defp parent_key(%{owner_key: owner_key, related_key: related_key}, owner) do + {related_key, Map.get(owner, owner_key)} + end + + ## Relation callbacks + @behaviour Ecto.Changeset.Relation + + @impl true + def build(%{related: related, queryable: queryable, defaults: defaults}, owner) do + related + |> Ecto.Association.apply_defaults(defaults, owner) + |> Ecto.Association.merge_source(queryable) + end + + ## On delete callbacks + + @doc false + def delete_all(refl, parent, repo_name, opts) do + if query = on_delete_query(refl, parent) do + Ecto.Repo.Queryable.delete_all repo_name, query, opts + end + end + + @doc false + def nilify_all(%{related_key: related_key} = refl, parent, repo_name, opts) do + if query = on_delete_query(refl, parent) do + Ecto.Repo.Queryable.update_all repo_name, query, [set: [{related_key, nil}]], opts + end + end + + defp on_delete_query(%{owner_key: owner_key, related_key: related_key, + queryable: queryable}, parent) do + if value = Map.get(parent, owner_key) do + from x in queryable, where: field(x, ^related_key) == ^value + end + end +end + +defmodule Ecto.Association.HasThrough do + @moduledoc """ + The association struct for `has_one` and `has_many` through associations. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `owner_key` - The key on the `owner` schema used for the association + * `through` - The through associations + * `relationship` - The relationship to the specified schema, default `:child` + """ + + @behaviour Ecto.Association + defstruct [:cardinality, :field, :owner, :owner_key, :through, :on_cast, + relationship: :child, unique: true, ordered: false] + + @impl true + def after_compile_validation(_, _) do + :ok + end + + @impl true + def struct(module, name, opts) do + through = Keyword.fetch!(opts, :through) + + refl = + case through do + [h,_|_] -> + Module.get_attribute(module, :ecto_assocs)[h] + _ -> + raise ArgumentError, ":through expects a list with at least two entries: " <> + "the association in the current module and one step through, got: #{inspect through}" + end + + unless refl do + raise ArgumentError, "schema does not have the association #{inspect hd(through)} " <> + "used by association #{inspect name}, please ensure the association exists and " <> + "is defined before the :through one" + end + + %__MODULE__{ + field: name, + cardinality: Keyword.fetch!(opts, :cardinality), + through: through, + owner: module, + owner_key: refl.owner_key, + } + end + + @impl true + def build(%{field: name}, %{__struct__: owner}, _attributes) do + raise ArgumentError, + "cannot build through association `#{inspect name}` for #{inspect owner}. " <> + "Instead build the intermediate steps explicitly." + end + + @impl true + def preload_info(%{through: through} = refl) do + {:through, refl, through} + end + + @impl true + def on_repo_change(%{field: name}, _, _, _, _) do + raise ArgumentError, + "cannot insert/update/delete through associations `#{inspect name}` via the repository. " <> + "Instead build the intermediate steps explicitly." + end + + @impl true + def joins_query(%{owner: owner, through: through}) do + Ecto.Association.join_through_chain(owner, through, from(x in owner)) + end + + @impl true + def assoc_query(%{owner: owner, through: through}, _, values) do + Ecto.Association.filter_through_chain(owner, through, values) + end +end + +defmodule Ecto.Association.BelongsTo do + @moduledoc """ + The association struct for a `belongs_to` association. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `owner_key` - The key on the `owner` schema used for the association + * `related` - The schema that is associated + * `related_key` - The key on the `related` schema used for the association + * `queryable` - The real query to use for querying association + * `defaults` - Default fields used when building the association + * `relationship` - The relationship to the specified schema, default `:parent` + * `on_replace` - The action taken on associations when schema is replaced + """ + + @behaviour Ecto.Association + @on_replace_opts [:raise, :mark_as_invalid, :delete, :delete_if_exists, :nilify, :update] + defstruct [:field, :owner, :related, :owner_key, :related_key, :queryable, :on_cast, + :on_replace, where: [], defaults: [], cardinality: :one, relationship: :parent, + unique: true, ordered: false] + + @impl true + def after_compile_validation(%{queryable: queryable, related_key: related_key}, env) do + compiled = Ecto.Association.ensure_compiled(queryable, env) + + cond do + compiled == :skip -> + :ok + compiled == :not_found -> + {:error, "associated schema #{inspect queryable} does not exist"} + not function_exported?(queryable, :__schema__, 2) -> + {:error, "associated module #{inspect queryable} is not an Ecto schema"} + is_nil queryable.__schema__(:type, related_key) -> + {:error, "associated schema #{inspect queryable} does not have field `#{related_key}`"} + true -> + :ok + end + end + + @impl true + def struct(module, name, opts) do + ref = if ref = opts[:references], do: ref, else: :id + queryable = Keyword.fetch!(opts, :queryable) + related = Ecto.Association.related_from_query(queryable, name) + on_replace = Keyword.get(opts, :on_replace, :raise) + + unless on_replace in @on_replace_opts do + raise ArgumentError, "invalid `:on_replace` option for #{inspect name}. " <> + "The only valid options are: " <> + Enum.map_join(@on_replace_opts, ", ", &"`#{inspect &1}`") + end + + defaults = Ecto.Association.validate_defaults!(module, name, opts[:defaults] || []) + where = opts[:where] || [] + + unless is_list(where) do + raise ArgumentError, "expected `:where` for #{inspect name} to be a keyword list, got: `#{inspect where}`" + end + + %__MODULE__{ + field: name, + owner: module, + related: related, + owner_key: Keyword.fetch!(opts, :foreign_key), + related_key: ref, + queryable: queryable, + on_replace: on_replace, + defaults: defaults, + where: where + } + end + + @impl true + def build(refl, owner, attributes) do + refl + |> build(owner) + |> struct(attributes) + end + + @impl true + def joins_query(%{related_key: related_key, owner: owner, owner_key: owner_key, queryable: queryable} = assoc) do + from(o in owner, join: q in ^queryable, on: field(q, ^related_key) == field(o, ^owner_key)) + |> Ecto.Association.combine_joins_query(assoc.where, 1) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, [value]) do + from(x in (query || queryable), where: field(x, ^related_key) == ^value) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def assoc_query(%{related_key: related_key, queryable: queryable} = assoc, query, values) do + from(x in (query || queryable), where: field(x, ^related_key) in ^values) + |> Ecto.Association.combine_assoc_query(assoc.where) + end + + @impl true + def preload_info(%{related_key: related_key} = refl) do + {:assoc, refl, {0, related_key}} + end + + @impl true + def on_repo_change(%{on_replace: :nilify}, _, %{action: :replace}, _adapter, _opts) do + {:ok, nil} + end + + def on_repo_change(%{on_replace: :delete_if_exists} = refl, parent_changeset, + %{action: :replace} = changeset, adapter, opts) do + try do + on_repo_change(%{refl | on_replace: :delete}, parent_changeset, changeset, adapter, opts) + rescue + Ecto.StaleEntryError -> {:ok, nil} + end + end + + def on_repo_change(%{on_replace: on_replace} = refl, parent_changeset, + %{action: :replace} = changeset, adapter, opts) do + changeset = + case on_replace do + :delete -> %{changeset | action: :delete} + :update -> %{changeset | action: :update} + end + + on_repo_change(refl, parent_changeset, changeset, adapter, opts) + end + + def on_repo_change(_refl, %{data: parent, repo: repo}, %{action: action} = changeset, _adapter, opts) do + changeset = Ecto.Association.update_parent_prefix(changeset, parent) + + case apply(repo, action, [changeset, opts]) do + {:ok, _} = ok -> + if action == :delete, do: {:ok, nil}, else: ok + {:error, changeset} -> + {:error, changeset} + end + end + + ## Relation callbacks + @behaviour Ecto.Changeset.Relation + + @impl true + def build(%{related: related, queryable: queryable, defaults: defaults}, owner) do + related + |> Ecto.Association.apply_defaults(defaults, owner) + |> Ecto.Association.merge_source(queryable) + end +end + +defmodule Ecto.Association.ManyToMany do + @moduledoc """ + The association struct for `many_to_many` associations. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `related` - The schema that is associated + * `owner_key` - The key on the `owner` schema used for the association + * `queryable` - The real query to use for querying association + * `on_delete` - The action taken on associations when schema is deleted + * `on_replace` - The action taken on associations when schema is replaced + * `defaults` - Default fields used when building the association + * `relationship` - The relationship to the specified schema, default `:child` + * `join_keys` - The keyword list with many to many join keys + * `join_through` - Atom (representing a schema) or a string (representing a table) + for many to many associations + * `join_defaults` - A list of defaults for join associations + * `preload_order` - Default `order_by` of the association, used only by preload + """ + + @behaviour Ecto.Association + @on_delete_opts [:nothing, :delete_all] + @on_replace_opts [:raise, :mark_as_invalid, :delete] + defstruct [:field, :owner, :related, :owner_key, :queryable, :on_delete, + :on_replace, :join_keys, :join_through, :on_cast, where: [], + join_where: [], defaults: [], join_defaults: [], relationship: :child, + cardinality: :many, unique: false, ordered: false, preload_order: []] + + @impl true + def after_compile_validation(%{queryable: queryable, join_through: join_through}, env) do + compiled = Ecto.Association.ensure_compiled(queryable, env) + join_compiled = Ecto.Association.ensure_compiled(join_through, env) + + cond do + compiled == :skip -> + :ok + compiled == :not_found -> + {:error, "associated schema #{inspect queryable} does not exist"} + not function_exported?(queryable, :__schema__, 2) -> + {:error, "associated module #{inspect queryable} is not an Ecto schema"} + join_compiled == :skip -> + :ok + join_compiled == :not_found -> + {:error, ":join_through schema #{inspect join_through} does not exist"} + not function_exported?(join_through, :__schema__, 2) -> + {:error, ":join_through module #{inspect join_through} is not an Ecto schema"} + true -> + :ok + end + end + + @impl true + def struct(module, name, opts) do + queryable = Keyword.fetch!(opts, :queryable) + related = Ecto.Association.related_from_query(queryable, name) + + join_keys = opts[:join_keys] + join_through = opts[:join_through] + validate_join_through(name, join_through) + + {owner_key, join_keys} = + case join_keys do + [{join_owner_key, owner_key}, {join_related_key, related_key}] + when is_atom(join_owner_key) and is_atom(owner_key) and + is_atom(join_related_key) and is_atom(related_key) -> + {owner_key, join_keys} + nil -> + {:id, default_join_keys(module, related)} + _ -> + raise ArgumentError, + "many_to_many #{inspect name} expect :join_keys to be a keyword list " <> + "with two entries, the first being how the join table should reach " <> + "the current schema and the second how the join table should reach " <> + "the associated schema. For example: #{inspect default_join_keys(module, related)}" + end + + unless Module.get_attribute(module, :ecto_fields)[owner_key] do + raise ArgumentError, "schema does not have the field #{inspect owner_key} used by " <> + "association #{inspect name}, please set the :join_keys option accordingly" + end + + on_delete = Keyword.get(opts, :on_delete, :nothing) + on_replace = Keyword.get(opts, :on_replace, :raise) + + unless on_delete in @on_delete_opts do + raise ArgumentError, "invalid :on_delete option for #{inspect name}. " <> + "The only valid options are: " <> + Enum.map_join(@on_delete_opts, ", ", &"`#{inspect &1}`") + end + + unless on_replace in @on_replace_opts do + raise ArgumentError, "invalid `:on_replace` option for #{inspect name}. " <> + "The only valid options are: " <> + Enum.map_join(@on_replace_opts, ", ", &"`#{inspect &1}`") + end + + where = opts[:where] || [] + join_where = opts[:join_where] || [] + defaults = Ecto.Association.validate_defaults!(module, name, opts[:defaults] || []) + join_defaults = Ecto.Association.validate_defaults!(module, name, opts[:join_defaults] || []) + preload_order = Ecto.Association.validate_preload_order!(name, opts[:preload_order] || []) + + unless is_list(where) do + raise ArgumentError, "expected `:where` for #{inspect name} to be a keyword list, got: `#{inspect where}`" + end + + unless is_list(join_where) do + raise ArgumentError, "expected `:join_where` for #{inspect name} to be a keyword list, got: `#{inspect join_where}`" + end + + if opts[:join_defaults] && is_binary(join_through) do + raise ArgumentError, ":join_defaults has no effect for a :join_through without a schema" + end + + %__MODULE__{ + field: name, + cardinality: Keyword.fetch!(opts, :cardinality), + owner: module, + related: related, + owner_key: owner_key, + join_keys: join_keys, + join_where: join_where, + join_through: join_through, + join_defaults: join_defaults, + queryable: queryable, + on_delete: on_delete, + on_replace: on_replace, + unique: Keyword.get(opts, :unique, false), + defaults: defaults, + where: where, + preload_order: preload_order + } + end + + defp default_join_keys(module, related) do + [{Ecto.Association.association_key(module, :id), :id}, + {Ecto.Association.association_key(related, :id), :id}] + end + + @impl true + def joins_query(%{owner: owner, queryable: queryable, + join_through: join_through, join_keys: join_keys} = assoc) do + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + + from(o in owner, + join: j in ^join_through, on: field(j, ^join_owner_key) == field(o, ^owner_key), + join: q in ^queryable, on: field(j, ^join_related_key) == field(q, ^related_key)) + |> Ecto.Association.combine_joins_query(assoc.where, 2) + |> Ecto.Association.combine_joins_query(assoc.join_where, 1) + end + + def assoc_query(%{queryable: queryable} = refl, values) do + assoc_query(refl, queryable, values) + end + + @impl true + def assoc_query(assoc, query, values) do + %{queryable: queryable, join_through: join_through, join_keys: join_keys, owner: owner} = assoc + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + + owner_key_type = owner.__schema__(:type, owner_key) + + # We only need to join in the "join table". Preload and Ecto.assoc expressions can then filter + # by &1.join_owner_key in ^... to filter down to the associated entries in the related table. + from(q in (query || queryable), + join: j in ^join_through, on: field(q, ^related_key) == field(j, ^join_related_key), + where: field(j, ^join_owner_key) in type(^values, {:in, ^owner_key_type}) + ) + |> Ecto.Association.combine_assoc_query(assoc.where) + |> Ecto.Association.combine_joins_query(assoc.join_where, 1) + end + + @impl true + def build(refl, owner, attributes) do + refl + |> build(owner) + |> struct(attributes) + end + + @impl true + def preload_info(%{join_keys: [{join_owner_key, owner_key}, {_, _}], owner: owner} = refl) do + owner_key_type = owner.__schema__(:type, owner_key) + + # When preloading use the last bound table (which is the join table) and the join_owner_key + # to filter out related entities to the owner structs we're preloading with. + {:assoc, refl, {-1, join_owner_key, owner_key_type}} + end + + @impl true + def on_repo_change(%{on_replace: :delete} = refl, parent_changeset, + %{action: :replace} = changeset, adapter, opts) do + on_repo_change(refl, parent_changeset, %{changeset | action: :delete}, adapter, opts) + end + + def on_repo_change(%{join_keys: join_keys, join_through: join_through, join_where: join_where}, + %{repo: repo, data: owner}, %{action: :delete, data: related}, adapter, opts) do + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + owner_value = dump! :delete, join_through, owner, owner_key, adapter + related_value = dump! :delete, join_through, related, related_key, adapter + + query = + join_through + |> where([j], field(j, ^join_owner_key) == ^owner_value) + |> where([j], field(j, ^join_related_key) == ^related_value) + |> Ecto.Association.combine_assoc_query(join_where) + + query = %{query | prefix: owner.__meta__.prefix} + repo.delete_all(query, opts) + {:ok, nil} + end + + def on_repo_change(%{field: field, join_through: join_through, join_keys: join_keys} = refl, + %{repo: repo, data: owner} = parent_changeset, + %{action: action} = changeset, adapter, opts) do + changeset = Ecto.Association.update_parent_prefix(changeset, owner) + + case apply(repo, action, [changeset, opts]) do + {:ok, related} -> + [{join_owner_key, owner_key}, {join_related_key, related_key}] = join_keys + + if insert_join?(parent_changeset, changeset, field, related_key) do + owner_value = dump! :insert, join_through, owner, owner_key, adapter + related_value = dump! :insert, join_through, related, related_key, adapter + data = %{join_owner_key => owner_value, join_related_key => related_value} + + case insert_join(join_through, refl, parent_changeset, data, opts) do + {:error, join_changeset} -> + {:error, %{changeset | errors: join_changeset.errors ++ changeset.errors, + valid?: join_changeset.valid? and changeset.valid?}} + _ -> + {:ok, related} + end + else + {:ok, related} + end + + {:error, changeset} -> + {:error, changeset} + end + end + + defp validate_join_through(name, nil) do + raise ArgumentError, "many_to_many #{inspect name} associations require the :join_through option to be given" + end + defp validate_join_through(_, join_through) when is_atom(join_through) or is_binary(join_through) do + :ok + end + defp validate_join_through(name, _join_through) do + raise ArgumentError, + "many_to_many #{inspect name} associations require the :join_through option to be " <> + "an atom (representing a schema) or a string (representing a table)" + end + + defp insert_join?(%{action: :insert}, _, _field, _related_key), do: true + defp insert_join?(_, %{action: :insert}, _field, _related_key), do: true + defp insert_join?(%{data: owner}, %{data: related}, field, related_key) do + current_key = Map.fetch!(related, related_key) + not Enum.any? Map.fetch!(owner, field), fn child -> + Map.get(child, related_key) == current_key + end + end + + defp insert_join(join_through, _refl, %{repo: repo, data: owner}, data, opts) when is_binary(join_through) do + opts = Keyword.put_new(opts, :prefix, owner.__meta__.prefix) + repo.insert_all(join_through, [data], opts) + end + + defp insert_join(join_through, refl, parent_changeset, data, opts) when is_atom(join_through) do + %{repo: repo, constraints: constraints, data: owner} = parent_changeset + + changeset = + join_through + |> Ecto.Association.apply_defaults(refl.join_defaults, owner) + |> Map.merge(data) + |> Ecto.Changeset.change() + |> Map.put(:constraints, constraints) + |> put_new_prefix(owner.__meta__.prefix) + + repo.insert(changeset, opts) + end + + defp put_new_prefix(%{data: %{__meta__: %{prefix: prefix}}} = changeset, prefix), + do: changeset + + defp put_new_prefix(%{data: %{__meta__: %{prefix: nil}}} = changeset, prefix), + do: update_in(changeset.data, &Ecto.put_meta(&1, prefix: prefix)) + + defp put_new_prefix(changeset, _), + do: changeset + + defp field!(op, struct, field) do + Map.get(struct, field) || raise "could not #{op} join entry because `#{field}` is nil in #{inspect struct}" + end + + defp dump!(action, join_through, struct, field, adapter) when is_binary(join_through) do + value = field!(action, struct, field) + type = struct.__struct__.__schema__(:type, field) + + case Ecto.Type.adapter_dump(adapter, type, value) do + {:ok, value} -> + value + :error -> + raise Ecto.ChangeError, + "value `#{inspect value}` for `#{inspect struct.__struct__}.#{field}` " <> + "in `#{action}` does not match type #{inspect type}" + end + end + + defp dump!(action, join_through, struct, field, _) when is_atom(join_through) do + field!(action, struct, field) + end + + ## Relation callbacks + @behaviour Ecto.Changeset.Relation + + @impl true + def build(%{related: related, queryable: queryable, defaults: defaults}, owner) do + related + |> Ecto.Association.apply_defaults(defaults, owner) + |> Ecto.Association.merge_source(queryable) + end + + ## On delete callbacks + + @doc false + def delete_all(refl, parent, repo_name, opts) do + %{join_through: join_through, join_keys: join_keys, owner: owner} = refl + [{join_owner_key, owner_key}, {_, _}] = join_keys + + if value = Map.get(parent, owner_key) do + owner_type = owner.__schema__(:type, owner_key) + query = from j in join_through, where: field(j, ^join_owner_key) == type(^value, ^owner_type) + Ecto.Repo.Queryable.delete_all repo_name, query, opts + end + end +end diff --git a/deps/ecto/lib/ecto/changeset.ex b/deps/ecto/lib/ecto/changeset.ex new file mode 100644 index 0000000..3ae7205 --- /dev/null +++ b/deps/ecto/lib/ecto/changeset.ex @@ -0,0 +1,3173 @@ +defmodule Ecto.Changeset do + @moduledoc ~S""" + Changesets allow filtering, casting, validation and + definition of constraints when manipulating structs. + + There is an example of working with changesets in the introductory + documentation in the `Ecto` module. The functions `cast/4` and + `change/2` are the usual entry points for creating changesets. + The first one is used to cast and validate external parameters, + such as parameters sent through a form, API, command line, etc. + The second one is used to change data directly from your application. + + The remaining functions in this module, such as validations, + constraints, association handling, are about manipulating + changesets. Let's discuss some of this extra functionality. + + ## External vs internal data + + Changesets allow working with both kinds of data: + + * internal to the application - for example programmatically generated, + or coming from other subsystems. This use case is primarily covered + by the `change/2` and `put_change/3` functions. + + * external to the application - for example data provided by the user in + a form that needs to be type-converted and properly validated. This + use case is primarily covered by the `cast/4` function. + + ## Validations and constraints + + Ecto changesets provide both validations and constraints which + are ultimately turned into errors in case something goes wrong. + + The difference between them is that most validations can be + executed without a need to interact with the database and, therefore, + are always executed before attempting to insert or update the entry + in the database. Validations run immediately when a validation function + is called on the data that is contained in the changeset at that time. + + Some validations may happen against the database but + they are inherently unsafe. Those validations start with a `unsafe_` + prefix, such as `unsafe_validate_unique/3`. + + On the other hand, constraints rely on the database and are always safe. + As a consequence, validations are always checked before constraints. + Constraints won't even be checked in case validations failed. + + Let's see an example: + + defmodule User do + use Ecto.Schema + import Ecto.Changeset + + schema "users" do + field :name + field :email + field :age, :integer + end + + def changeset(user, params \\ %{}) do + user + |> cast(params, [:name, :email, :age]) + |> validate_required([:name, :email]) + |> validate_format(:email, ~r/@/) + |> validate_inclusion(:age, 18..100) + |> unique_constraint(:email) + end + end + + In the `changeset/2` function above, we define three validations. + They check that `name` and `email` fields are present in the + changeset, the e-mail is of the specified format, and the age is + between 18 and 100 - as well as a unique constraint in the email + field. + + Let's suppose the e-mail is given but the age is invalid. The + changeset would have the following errors: + + changeset = User.changeset(%User{}, %{age: 0, email: "mary@example.com"}) + {:error, changeset} = Repo.insert(changeset) + changeset.errors #=> [age: {"is invalid", []}, name: {"can't be blank", []}] + + In this case, we haven't checked the unique constraint in the + e-mail field because the data did not validate. Let's fix the + age and the name, and assume that the e-mail already exists in the + database: + + changeset = User.changeset(%User{}, %{age: 42, name: "Mary", email: "mary@example.com"}) + {:error, changeset} = Repo.insert(changeset) + changeset.errors #=> [email: {"has already been taken", []}] + + Validations and constraints define an explicit boundary when the check + happens. By moving constraints to the database, we also provide a safe, + correct and data-race free means of checking the user input. + + ### Deferred constraints + + Some databases support deferred constraints, i.e., constraints which are + checked at the end of the transaction rather than at the end of each statement. + + Changesets do not support this type of constraints. When working with deferred + constraints, a violation while invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2` won't + return `{:error, changeset}`, but rather raise an error at the end of the + transaction. + + ## Empty values + + Many times, the data given on cast needs to be further pruned, specially + regarding empty values. For example, if you are gathering data to be + cast from the command line or through an HTML form or any other text-based + format, it is likely those means cannot express nil values. For + those reasons, changesets include the concept of empty values, which are + values that will be automatically converted to the field's default value + on `cast/4`. Those values are stored in the changeset `empty_values` field + and default to `[""]`. You can also pass the `:empty_values` option to + `cast/4` in case you want to change how a particular `cast/4` work. + + ## Associations, embeds and on replace + + Using changesets you can work with associations as well as with embedded + structs. There are two primary APIs: + + * `cast_assoc/3` and `cast_embed/3` - those functions are used when + working with external data. In particular, they allow you to change + associations and embeds alongside the parent struct, all at once. + + * `put_assoc/4` and `put_embed/4` - it allows you to replace the + association or embed as a whole. This can be used to move associated + data from one entry to another, to completely remove or replace + existing entries. + + See the documentation for those functions for more information. + + ### The `:on_replace` option + + When using any of those APIs, you may run into situations where Ecto sees + data is being replaced. For example, imagine a Post has many Comments where + the comments have IDs 1, 2 and 3. If you call `cast_assoc/3` passing only + the IDs 1 and 2, Ecto will consider 3 is being "replaced" and it will raise + by default. Such behaviour can be changed when defining the relation by + setting `:on_replace` option when defining your association/embed according + to the values below: + + * `:raise` (default) - do not allow removing association or embedded + data via parent changesets + * `:mark_as_invalid` - if attempting to remove the association or + embedded data via parent changeset - an error will be added to the parent + changeset, and it will be marked as invalid + * `:nilify` - sets owner reference column to `nil` (available only for + associations). Use this on a `belongs_to` column to allow the association + to be cleared out so that it can be set to a new value. Will set `action` + on associated changesets to `:replace` + * `:update` - updates the association, available only for `has_one`, `belongs_to` + and `embeds_one`. This option will update all the fields given to the changeset + including the id for the association + * `:delete` - removes the association or related data from the database. + This option has to be used carefully (see below). Will set `action` on associated + changesets to `:replace` + * `:delete_if_exists` - like `:delete` except that it ignores any stale entry + error. For instance, if you set `on_replace: :delete` but the replaced + resource was already deleted by a separate request, it will raise a + `Ecto.StaleEntryError`. `:delete_if_exists` makes it so it will only delete + if the entry still exists + + The `:delete` and `:delete_if_exists` options must be used carefully as they allow + users to delete any associated data by simply not sending the associated data. + If you need deletion, it is often preferred to add a separate boolean virtual field + in the schema and manually mark the changeset for deletion if the `:delete` field is + set in the params, as in the example below. Note that we don't call `cast/4` in this + case because we don't want to prevent deletion if a change is invalid (changes are + irrelevant if the entity needs to be deleted). + + defmodule Comment do + use Ecto.Schema + import Ecto.Changeset + + schema "comments" do + field :body, :string + field :delete, :boolean, virtual: true + end + + def changeset(comment, %{"delete" => "true"}) do + %{Ecto.Changeset.change(comment, delete: true) | action: :delete} + end + + def changeset(comment, params) do + cast(comment, params, [:body]) + end + end + + ## Schemaless changesets + + In the changeset examples so far, we have always used changesets to validate + and cast data contained in a struct defined by an Ecto schema, such as the `%User{}` + struct defined by the `User` module. + + However, changesets can also be used with "regular" structs too by passing a tuple + with the data and its types: + + user = %User{} + types = %{first_name: :string, last_name: :string, email: :string} + changeset = + {user, types} + |> Ecto.Changeset.cast(params, Map.keys(types)) + |> Ecto.Changeset.validate_required(...) + |> Ecto.Changeset.validate_length(...) + + where the user struct refers to the definition in the following module: + + defmodule User do + defstruct [:name, :age] + end + + Changesets can also be used with data in a plain map, by following the same API: + + data = %{} + types = %{name: :string} + params = %{name: "Callum"} + changeset = + {data, types} + |> Ecto.Changeset.cast(params, Map.keys(types)) + |> Ecto.Changeset.validate_required(...) + |> Ecto.Changeset.validate_length(...) + + Such functionality makes Ecto extremely useful to cast, validate and prune data even + if it is not meant to be persisted to the database. + + ### Changeset actions + + Changesets have an action field which is usually set by `Ecto.Repo` + whenever one of the operations such as `insert` or `update` is called: + + changeset = User.changeset(%User{}, %{age: 42, email: "mary@example.com"}) + {:error, changeset} = Repo.insert(changeset) + changeset.action + #=> :insert + + This means that when working with changesets that are not meant to be + persisted to the database, such as schemaless changesets, you may need + to explicitly set the action to one specific value. Frameworks such as + Phoenix use the action value to define how HTML forms should act. + + Instead of setting the action manually, you may use `apply_action/2` that + emulates operations such as `c:Ecto.Repo.insert`. `apply_action/2` will return + `{:ok, changes}` if the changeset is valid or `{:error, changeset}`, with + the given `action` set in the changeset in case of errors. + + ## The Ecto.Changeset struct + + The public fields are: + + * `valid?` - Stores if the changeset is valid + * `data` - The changeset source data, for example, a struct + * `params` - The parameters as given on changeset creation + * `changes` - The `changes` from parameters that were approved in casting + * `errors` - All errors from validations + * `required` - All required fields as a list of atoms + * `action` - The action to be performed with the changeset + * `types` - Cache of the data's field types + * `empty_values` - A list of values to be considered empty + * `repo` - The repository applying the changeset (only set after a Repo function is called) + * `repo_opts` - A keyword list of options given to the underlying repository operation + + The following fields are private and must not be accessed directly. + + * `validations` + * `constraints` + * `filters` + * `prepare` + + ### Redacting fields in inspect + + To hide a field's value from the inspect protocol of `Ecto.Changeset`, mark + the field as `redact: true` in the schema, and it will display with the + value `**redacted**`. + """ + + require Ecto.Query + alias __MODULE__ + alias Ecto.Changeset.Relation + + @empty_values [""] + + # If a new field is added here, def merge must be adapted + defstruct valid?: false, data: nil, params: nil, changes: %{}, + errors: [], validations: [], required: [], prepare: [], + constraints: [], filters: %{}, action: nil, types: nil, + empty_values: @empty_values, repo: nil, repo_opts: [] + + @type t(data_type) :: %Changeset{ + valid?: boolean(), + repo: atom | nil, + repo_opts: Keyword.t(), + data: data_type, + params: %{optional(String.t()) => term} | nil, + changes: %{optional(atom) => term}, + required: [atom], + prepare: [(t -> t)], + errors: [{atom, error}], + constraints: [constraint], + validations: [{atom, term}], + filters: %{optional(atom) => term}, + action: action, + types: nil | %{atom => Ecto.Type.t() | {:assoc, term()} | {:embed, term()}} + } + + @type t :: t(Ecto.Schema.t | map | nil) + @type error :: {String.t, Keyword.t} + @type action :: nil | :insert | :update | :delete | :replace | :ignore | atom + @type constraint :: %{type: :check | :exclusion | :foreign_key | :unique, + constraint: String.t, match: :exact | :suffix | :prefix, + field: atom, error_message: String.t, error_type: atom} + @type data :: map() + @type types :: map() + + @number_validators %{ + less_than: {&/2, "must be greater than %{number}"}, + less_than_or_equal_to: {&<=/2, "must be less than or equal to %{number}"}, + greater_than_or_equal_to: {&>=/2, "must be greater than or equal to %{number}"}, + equal_to: {&==/2, "must be equal to %{number}"}, + not_equal_to: {&!=/2, "must be not equal to %{number}"}, + } + + @relations [:embed, :assoc] + @match_types [:exact, :suffix, :prefix] + + @doc """ + Wraps the given data in a changeset or adds changes to a changeset. + + `changes` is a map or keyword where the key is an atom representing a + field, association or embed and the value is a term. Note the `value` is + directly stored in the changeset with no validation whatsoever. For this + reason, this function is meant for working with data internal to the + application. + + When changing embeds and associations, see `put_assoc/4` for a complete + reference on the accepted values. + + This function is useful for: + + * wrapping a struct inside a changeset + * directly changing a struct without performing castings nor validations + * directly bulk-adding changes to a changeset + + Changed attributes will only be added if the change does not have the + same value as the field in the data. + + When a changeset is passed as the first argument, the changes passed as the + second argument are merged over the changes already in the changeset if they + differ from the values in the struct. + + When a `{data, types}` is passed as the first argument, a changeset is + created with the given data and types and marked as valid. + + See `cast/4` if you'd prefer to cast and validate external parameters. + + ## Examples + + iex> changeset = change(%Post{}) + %Ecto.Changeset{...} + iex> changeset.valid? + true + iex> changeset.changes + %{} + + iex> changeset = change(%Post{author: "bar"}, title: "title") + iex> changeset.changes + %{title: "title"} + + iex> changeset = change(%Post{title: "title"}, title: "title") + iex> changeset.changes + %{} + + iex> changeset = change(changeset, %{title: "new title", body: "body"}) + iex> changeset.changes.title + "new title" + iex> changeset.changes.body + "body" + + """ + @spec change(Ecto.Schema.t | t | {data, types}, %{atom => term} | Keyword.t) :: t + def change(data, changes \\ %{}) + + def change({data, types}, changes) when is_map(data) do + change(%Changeset{data: data, types: Enum.into(types, %{}), valid?: true}, changes) + end + + def change(%Changeset{types: nil}, _changes) do + raise ArgumentError, "changeset does not have types information" + end + + def change(%Changeset{changes: changes, types: types} = changeset, new_changes) + when is_map(new_changes) or is_list(new_changes) do + {changes, errors, valid?} = + get_changed(changeset.data, types, changes, new_changes, + changeset.errors, changeset.valid?) + %{changeset | changes: changes, errors: errors, valid?: valid?} + end + + def change(%{__struct__: struct} = data, changes) when is_map(changes) or is_list(changes) do + types = struct.__changeset__() + {changes, errors, valid?} = get_changed(data, types, %{}, changes, [], true) + %Changeset{valid?: valid?, data: data, changes: changes, + errors: errors, types: types} + end + + defp get_changed(data, types, old_changes, new_changes, errors, valid?) do + Enum.reduce(new_changes, {old_changes, errors, valid?}, fn + {key, value}, {changes, errors, valid?} -> + put_change(data, changes, errors, valid?, key, value, Map.get(types, key)) + _, _ -> + raise ArgumentError, + "invalid changes being applied to changeset. " <> + "Expected a keyword list or a map, got: #{inspect(new_changes)}" + end) + end + + @doc """ + Applies the given `params` as changes on the `data` according to + the set of `permitted` keys. Returns a changeset. + + `data` may be either a changeset, a schema struct or a `{data, types}` + tuple. The second argument is a map of `params` that are cast according + to the type information from `data`. `params` is a map with string keys + or a map with atom keys, containing potentially invalid data. Mixed keys + are not allowed. + + During casting, all `permitted` parameters whose values match the specified + type information will have their key name converted to an atom and stored + together with the value as a change in the `:changes` field of the changeset. + All parameters that are not explicitly permitted are ignored. + + If casting of all fields is successful, the changeset is returned as valid. + + Note that `cast/4` validates the types in the `params`, but not in the given + `data`. + + ## Options + + * `:empty_values` - a list of values to be considered as empty when casting. + Empty values are always replaced by the default value of the respective key. Defaults to `[""]` + + ## Examples + + iex> changeset = cast(post, params, [:title]) + iex> if changeset.valid? do + ...> Repo.update!(changeset) + ...> end + + Passing a changeset as the first argument: + + iex> changeset = cast(post, %{title: "Hello"}, [:title]) + iex> new_changeset = cast(changeset, %{title: "Foo", body: "World"}, [:body]) + iex> new_changeset.params + %{"title" => "Hello", "body" => "World"} + + Or creating a changeset from a simple map with types: + + iex> data = %{title: "hello"} + iex> types = %{title: :string} + iex> changeset = cast({data, types}, %{title: "world"}, [:title]) + iex> apply_changes(changeset) + %{title: "world"} + + ## Composing casts + + `cast/4` also accepts a changeset as its first argument. In such cases, all + the effects caused by the call to `cast/4` (additional errors and changes) + are simply added to the ones already present in the argument changeset. + Parameters are merged (**not deep-merged**) and the ones passed to `cast/4` + take precedence over the ones already in the changeset. + """ + @spec cast(Ecto.Schema.t | t | {data, types}, + %{binary => term} | %{atom => term} | :invalid, + [atom], + Keyword.t) :: t + def cast(data, params, permitted, opts \\ []) + + def cast(_data, %{__struct__: _} = params, _permitted, _opts) do + raise Ecto.CastError, type: :map, value: params, + message: "expected params to be a :map, got: `#{inspect(params)}`" + end + + def cast({data, types}, params, permitted, opts) when is_map(data) do + cast(data, types, %{}, params, permitted, opts) + end + + def cast(%Changeset{types: nil}, _params, _permitted, _opts) do + raise ArgumentError, "changeset does not have types information" + end + + def cast(%Changeset{changes: changes, data: data, types: types, empty_values: empty_values} = changeset, + params, permitted, opts) do + opts = Keyword.put_new(opts, :empty_values, empty_values) + new_changeset = cast(data, types, changes, params, permitted, opts) + cast_merge(changeset, new_changeset) + end + + def cast(%{__struct__: module} = data, params, permitted, opts) do + cast(data, module.__changeset__(), %{}, params, permitted, opts) + end + + defp cast(%{} = data, %{} = types, %{} = changes, :invalid, permitted, _opts) when is_list(permitted) do + _ = Enum.each(permitted, &cast_key/1) + %Changeset{params: nil, data: data, valid?: false, errors: [], + changes: changes, types: types} + end + + defp cast(%{} = data, %{} = types, %{} = changes, %{} = params, permitted, opts) when is_list(permitted) do + empty_values = Keyword.get(opts, :empty_values, @empty_values) + params = convert_params(params) + + defaults = case data do + %{__struct__: struct} -> struct.__struct__() + %{} -> %{} + end + + {changes, errors, valid?} = + Enum.reduce(permitted, {changes, [], true}, + &process_param(&1, params, types, data, empty_values, defaults, &2)) + + %Changeset{params: params, data: data, valid?: valid?, + errors: Enum.reverse(errors), changes: changes, types: types} + end + + defp cast(%{}, %{}, %{}, params, permitted, _opts) when is_list(permitted) do + raise Ecto.CastError, type: :map, value: params, + message: "expected params to be a :map, got: `#{inspect params}`" + end + + defp process_param(key, params, types, data, empty_values, defaults, {changes, errors, valid?}) do + {key, param_key} = cast_key(key) + type = cast_type!(types, key) + + current = + case changes do + %{^key => value} -> value + _ -> Map.get(data, key) + end + + case cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do + {:ok, value, valid?} -> + {Map.put(changes, key, value), errors, valid?} + :missing -> + {changes, errors, valid?} + {:invalid, custom_errors} -> + {message, new_errors} = + custom_errors + |> Keyword.put_new(:validation, :cast) + |> Keyword.put(:type, type) + |> Keyword.pop(:message, "is invalid") + {changes, [{key, {message, new_errors}} | errors], false} + end + end + + defp cast_type!(types, key) do + case types do + %{^key => {tag, _}} when tag in @relations -> + raise "casting #{tag}s with cast/4 for #{inspect key} field is not supported, use cast_#{tag}/3 instead" + %{^key => type} -> + type + _ -> + known_fields = types |> Map.keys() |> Enum.map_join(", ", &inspect/1) + raise ArgumentError, + "unknown field `#{inspect(key)}` given to cast. Either the field does not exist or it is a " <> + ":through association (which are read-only). The known fields are: #{known_fields}" + end + end + + defp cast_key(key) when is_atom(key), + do: {key, Atom.to_string(key)} + + defp cast_key(key), + do: raise ArgumentError, "cast/3 expects a list of atom keys, got key: `#{inspect key}`" + + defp cast_field(key, param_key, type, params, current, empty_values, defaults, valid?) do + case params do + %{^param_key => value} -> + value = filter_empty_values(type, value, empty_values, defaults, key) + case Ecto.Type.cast(type, value) do + {:ok, value} -> + if Ecto.Type.equal?(type, current, value) do + :missing + else + {:ok, value, valid?} + end + + :error -> + {:invalid, []} + + {:error, custom_errors} when is_list(custom_errors) -> + {:invalid, custom_errors} + end + + _ -> + :missing + end + end + + defp filter_empty_values(type, value, empty_values, defaults, key) do + case Ecto.Type.filter_empty_values(type, value, empty_values) do + :empty -> Map.get(defaults, key) + {:ok, value} -> value + end + end + + # We only look at the first element because traversing the whole map + # can be expensive and it was showing up during profiling. This means + # we won't always raise, but the check only exists for user convenience + # anyway, and it is not a guarantee. + defp convert_params(params) do + case :maps.next(:maps.iterator(params)) do + {key, _, _} when is_atom(key) -> + for {key, value} <- params, into: %{} do + if is_atom(key) do + {Atom.to_string(key), value} + else + raise Ecto.CastError, type: :map, value: params, + message: "expected params to be a map with atoms or string keys, " <> + "got a map with mixed keys: #{inspect params}" + end + end + + _ -> + params + end + end + + ## Casting related + + @doc """ + Casts the given association with the changeset parameters. + + This function should be used when working with the entire association at + once (and not a single element of a many-style association) and receiving + data external to the application. + + `cast_assoc/3` works matching the records extracted from the database + and compares it with the parameters received from an external source. + Therefore, it is expected that the data in the changeset has explicitly + preloaded the association being cast and that all of the IDs exist and + are unique. + + For example, imagine a user has many addresses relationship where + post data is sent as follows + + %{"name" => "john doe", "addresses" => [ + %{"street" => "somewhere", "country" => "brazil", "id" => 1}, + %{"street" => "elsewhere", "country" => "poland"}, + ]} + + and then + + User + |> Repo.get!(id) + |> Repo.preload(:addresses) # Only required when updating data + |> Ecto.Changeset.cast(params, []) + |> Ecto.Changeset.cast_assoc(:addresses, with: &MyApp.Address.changeset/2) + + The parameters for the given association will be retrieved + from `changeset.params`. Those parameters are expected to be + a map with attributes, similar to the ones passed to `cast/4`. + Once parameters are retrieved, `cast_assoc/3` will match those + parameters with the associations already in the changeset record. + + Once `cast_assoc/3` is called, Ecto will compare each parameter + with the user's already preloaded addresses and act as follows: + + * If the parameter does not contain an ID, the parameter data + will be passed to `MyApp.Address.changeset/2` with a new struct + and become an insert operation + * If the parameter contains an ID and there is no associated child + with such ID, the parameter data will be passed to + `MyApp.Address.changeset/2` with a new struct and become an insert + operation + * If the parameter contains an ID and there is an associated child + with such ID, the parameter data will be passed to + `MyApp.Address.changeset/2` with the existing struct and become an + update operation + * If there is an associated child with an ID and its ID is not given + as parameter, the `:on_replace` callback for that association will + be invoked (see the "On replace" section on the module documentation) + + Every time the `MyApp.Address.changeset/2` function is invoked, it must + return a changeset. Once the parent changeset is given to an `Ecto.Repo` + function, all entries will be inserted/updated/deleted within the same + transaction. + + Note developers are allowed to explicitly set the `:action` field of a + changeset to instruct Ecto how to act in certain situations. Let's suppose + that, if one of the associations has only empty fields, you want to ignore + the entry altogether instead of showing an error. The changeset function could + be written like this: + + def changeset(struct, params) do + struct + |> cast(params, [:title, :body]) + |> validate_required([:title, :body]) + |> case do + %{valid?: false, changes: changes} = changeset when changes == %{} -> + # If the changeset is invalid and has no changes, it is + # because all required fields are missing, so we ignore it. + %{changeset | action: :ignore} + changeset -> + changeset + end + end + + ## Partial changes for many-style associations + + By preloading an association using a custom query you can confine the behavior + of `cast_assoc/3`. This opens up the possibility to work on a subset of the data, + instead of all associations in the database. + + Taking the initial example of users having addresses imagine those addresses + are set up to belong to a country. If you want to allow users to bulk edit all + addresses that belong to a single country, you can do so by changing the preload + query: + + query = from MyApp.Address, where: [country: ^edit_country] + + User + |> Repo.get!(id) + |> Repo.preload(addresses: query) + |> Ecto.Changeset.cast(params, []) + |> Ecto.Changeset.cast_assoc(:addresses) + + This will allow you to cast and update only the association for the given country. + The important point for partial changes is that any addresses, which were not + preloaded won't be changed. + + ## Options + + * `:required` - if the association is a required field + * `:required_message` - the message on failure, defaults to "can't be blank" + * `:invalid_message` - the message on failure, defaults to "is invalid" + * `:force_update_on_change` - force the parent record to be updated in the + repository if there is a change, defaults to `true` + * `:with` - the function to build the changeset from params. Defaults to the + `changeset/2` function of the associated module. It can be changed by passing + an anonymous function or an MFA tuple. If using an MFA, the default changeset + and parameters arguments will be prepended to the given args. For example, + using `with: {Author, :special_changeset, ["hello"]}` will be invoked as + `Author.special_changeset(changeset, params, "hello")` + + """ + def cast_assoc(changeset, name, opts \\ []) when is_atom(name) do + cast_relation(:assoc, changeset, name, opts) + end + + @doc """ + Casts the given embed with the changeset parameters. + + The parameters for the given embed will be retrieved + from `changeset.params`. Those parameters are expected to be + a map with attributes, similar to the ones passed to `cast/4`. + Once parameters are retrieved, `cast_embed/3` will match those + parameters with the embeds already in the changeset record. + See `cast_assoc/3` for an example of working with casts and + associations which would also apply for embeds. + + The changeset must have been previously `cast` using + `cast/4` before this function is invoked. + + ## Options + + * `:required` - if the embed is a required field + * `:required_message` - the message on failure, defaults to "can't be blank" + * `:invalid_message` - the message on failure, defaults to "is invalid" + * `:force_update_on_change` - force the parent record to be updated in the + repository if there is a change, defaults to `true` + * `:with` - the function to build the changeset from params. Defaults to the + `changeset/2` function of the embedded module. It can be changed by passing + an anonymous function or an MFA tuple. If using an MFA, the default changeset + and parameters arguments will be prepended to the given args. For example, + using `with: {Author, :special_changeset, ["hello"]}` will be invoked as + `Author.special_changeset(changeset, params, "hello")` + """ + def cast_embed(changeset, name, opts \\ []) when is_atom(name) do + cast_relation(:embed, changeset, name, opts) + end + + defp cast_relation(type, %Changeset{data: data, types: types}, _name, _opts) + when data == nil or types == nil do + raise ArgumentError, "cast_#{type}/3 expects the changeset to be cast. " <> + "Please call cast/4 before calling cast_#{type}/3" + end + + defp cast_relation(type, %Changeset{} = changeset, key, opts) do + {key, param_key} = cast_key(key) + %{data: data, types: types, params: params, changes: changes} = changeset + %{related: related} = relation = relation!(:cast, type, key, Map.get(types, key)) + params = params || %{} + + {changeset, required?} = + if opts[:required] do + {update_in(changeset.required, &[key|&1]), true} + else + {changeset, false} + end + + on_cast = Keyword.get_lazy(opts, :with, fn -> on_cast_default(type, related) end) + original = Map.get(data, key) + + changeset = + case Map.fetch(params, param_key) do + {:ok, value} -> + current = Relation.load!(data, original) + case Relation.cast(relation, data, value, current, on_cast) do + {:ok, change, relation_valid?} when change != original -> + valid? = changeset.valid? and relation_valid? + changes = Map.put(changes, key, change) + changeset = %{force_update(changeset, opts) | changes: changes, valid?: valid?} + missing_relation(changeset, key, current, required?, relation, opts) + + {:error, {message, meta}} -> + meta = [validation: type] ++ meta + error = {key, {message(opts, :invalid_message, message), meta}} + %{changeset | errors: [error | changeset.errors], valid?: false} + + # ignore or ok with change == original + _ -> + missing_relation(changeset, key, current, required?, relation, opts) + end + + :error -> + missing_relation(changeset, key, original, required?, relation, opts) + end + + update_in changeset.types[key], fn {type, relation} -> + {type, %{relation | on_cast: on_cast}} + end + end + + defp on_cast_default(type, module) do + fn struct, params -> + try do + module.changeset(struct, params) + rescue + e in UndefinedFunctionError -> + case __STACKTRACE__ do + [{^module, :changeset, args_or_arity, _}] when args_or_arity == 2 + when length(args_or_arity) == 2 -> + raise ArgumentError, """ + the module #{inspect module} does not define a changeset/2 function, + which is used by cast_#{type}/3. You need to either: + + 1. implement the #{type}.changeset/2 function + 2. pass the :with option to cast_#{type}/3 with an anonymous + function that expects 2 args or an MFA tuple + + When using an inline embed, the :with option must be given + """ + stacktrace -> + reraise e, stacktrace + end + end + end + end + + defp missing_relation(%{changes: changes, errors: errors} = changeset, + name, current, required?, relation, opts) do + current_changes = Map.get(changes, name, current) + if required? and Relation.empty?(relation, current_changes) do + errors = [{name, {message(opts, :required_message, "can't be blank"), [validation: :required]}} | errors] + %{changeset | errors: errors, valid?: false} + else + changeset + end + end + + defp relation!(_op, type, _name, {type, relation}), + do: relation + defp relation!(op, :assoc, name, nil), + do: raise(ArgumentError, "cannot #{op} assoc `#{name}`, assoc `#{name}` not found. Make sure it is spelled correctly and that the association type is not read-only") + defp relation!(op, type, name, nil), + do: raise(ArgumentError, "cannot #{op} #{type} `#{name}`, #{type} `#{name}` not found. Make sure that it exists and is spelled correctly") + defp relation!(op, type, name, {other, _}) when other in @relations, + do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{other}`") + defp relation!(op, type, name, schema_type), + do: raise(ArgumentError, "expected `#{name}` to be an #{type} in `#{op}_#{type}`, got: `#{inspect schema_type}`") + + defp force_update(changeset, opts) do + if Keyword.get(opts, :force_update_on_change, true) do + put_in(changeset.repo_opts[:force], true) + else + changeset + end + end + + ## Working with changesets + + @doc """ + Merges two changesets. + + This function merges two changesets provided they have been applied to the + same data (their `:data` field is equal); if the data differs, an + `ArgumentError` exception is raised. If one of the changesets has a `:repo` + field which is not `nil`, then the value of that field is used as the `:repo` + field of the resulting changeset; if both changesets have a non-`nil` and + different `:repo` field, an `ArgumentError` exception is raised. + + The other fields are merged with the following criteria: + + * `params` - params are merged (not deep-merged) giving precedence to the + params of `changeset2` in case of a conflict. If both changesets have their + `:params` fields set to `nil`, the resulting changeset will have its params + set to `nil` too. + * `changes` - changes are merged giving precedence to the `changeset2` + changes. + * `errors` and `validations` - they are simply concatenated. + * `required` - required fields are merged; all the fields that appear + in the required list of both changesets are moved to the required + list of the resulting changeset. + + ## Examples + + iex> changeset1 = cast(%Post{}, %{title: "Title"}, [:title]) + iex> changeset2 = cast(%Post{}, %{title: "New title", body: "Body"}, [:title, :body]) + iex> changeset = merge(changeset1, changeset2) + iex> changeset.changes + %{body: "Body", title: "New title"} + + iex> changeset1 = cast(%Post{body: "Body"}, %{title: "Title"}, [:title]) + iex> changeset2 = cast(%Post{}, %{title: "New title"}, [:title]) + iex> merge(changeset1, changeset2) + ** (ArgumentError) different :data when merging changesets + + """ + @spec merge(t, t) :: t + def merge(changeset1, changeset2) + + def merge(%Changeset{data: data} = cs1, %Changeset{data: data} = cs2) do + new_repo = merge_identical(cs1.repo, cs2.repo, "repos") + new_repo_opts = Keyword.merge(cs1.repo_opts, cs2.repo_opts) + new_action = merge_identical(cs1.action, cs2.action, "actions") + new_filters = Map.merge(cs1.filters, cs2.filters) + new_validations = cs1.validations ++ cs2.validations + new_constraints = cs1.constraints ++ cs2.constraints + + cast_merge %{cs1 | repo: new_repo, repo_opts: new_repo_opts, filters: new_filters, + action: new_action, validations: new_validations, + constraints: new_constraints}, cs2 + end + + def merge(%Changeset{}, %Changeset{}) do + raise ArgumentError, message: "different :data when merging changesets" + end + + defp cast_merge(cs1, cs2) do + new_params = (cs1.params || cs2.params) && Map.merge(cs1.params || %{}, cs2.params || %{}) + new_changes = Map.merge(cs1.changes, cs2.changes) + new_errors = Enum.uniq(cs1.errors ++ cs2.errors) + new_required = Enum.uniq(cs1.required ++ cs2.required) + new_types = cs1.types || cs2.types + new_valid? = cs1.valid? and cs2.valid? + + %{cs1 | params: new_params, valid?: new_valid?, errors: new_errors, types: new_types, + changes: new_changes, required: new_required} + end + + defp merge_identical(object, nil, _thing), do: object + defp merge_identical(nil, object, _thing), do: object + defp merge_identical(object, object, _thing), do: object + defp merge_identical(lhs, rhs, thing) do + raise ArgumentError, "different #{thing} (`#{inspect lhs}` and " <> + "`#{inspect rhs}`) when merging changesets" + end + + @doc """ + Fetches the given field from changes or from the data. + + While `fetch_change/2` only looks at the current `changes` + to retrieve a value, this function looks at the changes and + then falls back on the data, finally returning `:error` if + no value is available. + + For relations, these functions will return the changeset + original data with changes applied. To retrieve raw changesets, + please use `fetch_change/2`. + + ## Examples + + iex> post = %Post{title: "Foo", body: "Bar baz bong"} + iex> changeset = change(post, %{title: "New title"}) + iex> fetch_field(changeset, :title) + {:changes, "New title"} + iex> fetch_field(changeset, :body) + {:data, "Bar baz bong"} + iex> fetch_field(changeset, :not_a_field) + :error + + """ + @spec fetch_field(t, atom) :: {:changes, term} | {:data, term} | :error + def fetch_field(%Changeset{changes: changes, data: data, types: types}, key) when is_atom(key) do + case Map.fetch(changes, key) do + {:ok, value} -> + {:changes, change_as_field(types, key, value)} + :error -> + case Map.fetch(data, key) do + {:ok, value} -> {:data, data_as_field(data, types, key, value)} + :error -> :error + end + end + end + + @doc """ + Same as `fetch_field/2` but returns the value or raises if the given key was not found. + + ## Examples + + iex> post = %Post{title: "Foo", body: "Bar baz bong"} + iex> changeset = change(post, %{title: "New title"}) + iex> fetch_field!(changeset, :title) + "New title" + iex> fetch_field!(changeset, :other) + ** (KeyError) key :other not found in: %Post{...} + """ + @spec fetch_field!(t, atom) :: term + def fetch_field!(changeset, key) do + case fetch_field(changeset, key) do + {_, value} -> + value + + :error -> + raise KeyError, key: key, term: changeset.data + end + end + + @doc """ + Gets a field from changes or from the data. + + While `get_change/3` only looks at the current `changes` + to retrieve a value, this function looks at the changes and + then falls back on the data, finally returning `default` if + no value is available. + + For relations, these functions will return the changeset data + with changes applied. To retrieve raw changesets, please use `get_change/3`. + + iex> post = %Post{title: "A title", body: "My body is a cage"} + iex> changeset = change(post, %{title: "A new title"}) + iex> get_field(changeset, :title) + "A new title" + iex> get_field(changeset, :not_a_field, "Told you, not a field!") + "Told you, not a field!" + + """ + @spec get_field(t, atom, term) :: term + def get_field(%Changeset{changes: changes, data: data, types: types}, key, default \\ nil) do + case Map.fetch(changes, key) do + {:ok, value} -> + change_as_field(types, key, value) + :error -> + case Map.fetch(data, key) do + {:ok, value} -> data_as_field(data, types, key, value) + :error -> default + end + end + end + + defp change_as_field(types, key, value) do + case Map.get(types, key) do + {tag, relation} when tag in @relations -> + Relation.apply_changes(relation, value) + _other -> + value + end + end + + defp data_as_field(data, types, key, value) do + case Map.get(types, key) do + {tag, _relation} when tag in @relations -> + Relation.load!(data, value) + _other -> + value + end + end + + @doc """ + Fetches a change from the given changeset. + + This function only looks at the `:changes` field of the given `changeset` and + returns `{:ok, value}` if the change is present or `:error` if it's not. + + ## Examples + + iex> changeset = change(%Post{body: "foo"}, %{title: "bar"}) + iex> fetch_change(changeset, :title) + {:ok, "bar"} + iex> fetch_change(changeset, :body) + :error + + """ + @spec fetch_change(t, atom) :: {:ok, term} | :error + def fetch_change(%Changeset{changes: changes} = _changeset, key) when is_atom(key) do + Map.fetch(changes, key) + end + + @doc """ + Same as `fetch_change/2` but returns the value or raises if the given key was not found. + + ## Examples + + iex> changeset = change(%Post{body: "foo"}, %{title: "bar"}) + iex> fetch_change!(changeset, :title) + "bar" + iex> fetch_change!(changeset, :body) + ** (KeyError) key :body not found in: %{title: "bar"} + """ + @spec fetch_change!(t, atom) :: term + def fetch_change!(changeset, key) do + case fetch_change(changeset, key) do + {:ok, value} -> + value + + :error -> + raise KeyError, key: key, term: changeset.changes + end + end + + @doc """ + Gets a change or returns a default value. + + ## Examples + + iex> changeset = change(%Post{body: "foo"}, %{title: "bar"}) + iex> get_change(changeset, :title) + "bar" + iex> get_change(changeset, :body) + nil + + """ + @spec get_change(t, atom, term) :: term + def get_change(%Changeset{changes: changes} = _changeset, key, default \\ nil) when is_atom(key) do + Map.get(changes, key, default) + end + + @doc """ + Updates a change. + + The given `function` is invoked with the change value only if there + is a change for `key`. Note that the value of the change + can still be `nil` (unless the field was marked as required on `validate_required/3`). + + ## Examples + + iex> changeset = change(%Post{}, %{impressions: 1}) + iex> changeset = update_change(changeset, :impressions, &(&1 + 1)) + iex> changeset.changes.impressions + 2 + + """ + @spec update_change(t, atom, (term -> term)) :: t + def update_change(%Changeset{changes: changes} = changeset, key, function) when is_atom(key) do + case Map.fetch(changes, key) do + {:ok, value} -> + put_change(changeset, key, function.(value)) + :error -> + changeset + end + end + + @doc """ + Puts a change on the given `key` with `value`. + + `key` is an atom that represents any field, embed or + association in the changeset. Note the `value` is directly + stored in the changeset with no validation whatsoever. + For this reason, this function is meant for working with + data internal to the application. + + If the change is already present, it is overridden with + the new value. If the change has the same value as in the + changeset data, it is not added to the list of changes. + + When changing embeds and associations, see `put_assoc/4` + for a complete reference on the accepted values. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = put_change(changeset, :title, "bar") + iex> changeset.changes + %{title: "bar"} + + iex> changeset = change(%Post{title: "foo"}) + iex> changeset = put_change(changeset, :title, "foo") + iex> changeset.changes + %{} + + """ + @spec put_change(t, atom, term) :: t + def put_change(%Changeset{types: nil}, _key, _value) do + raise ArgumentError, "changeset does not have types information" + end + + def put_change(%Changeset{data: data, types: types} = changeset, key, value) do + type = Map.get(types, key) + {changes, errors, valid?} = + put_change(data, changeset.changes, changeset.errors, changeset.valid?, key, value, type) + %{changeset | changes: changes, errors: errors, valid?: valid?} + end + + defp put_change(data, changes, errors, valid?, key, value, {tag, relation}) + when tag in @relations do + original = Map.get(data, key) + current = Relation.load!(data, original) + + case Relation.change(relation, value, current) do + {:ok, change, relation_valid?} when change != original -> + {Map.put(changes, key, change), errors, valid? and relation_valid?} + {:error, error} -> + {changes, [{key, error} | errors], false} + # ignore or ok with change == original + _ -> + {Map.delete(changes, key), errors, valid?} + end + end + + defp put_change(data, _changes, _errors, _valid?, key, _value, nil) when is_atom(key) do + raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(data)}" + end + + defp put_change(_data, _changes, _errors, _valid?, key, _value, nil) when not is_atom(key) do + raise ArgumentError, "field names given to change/put_change must be atoms, got: `#{inspect(key)}`" + end + + defp put_change(data, changes, errors, valid?, key, value, type) do + if not Ecto.Type.equal?(type, Map.get(data, key), value) do + {Map.put(changes, key, value), errors, valid?} + else + {Map.delete(changes, key), errors, valid?} + end + end + + @doc """ + Puts the given association entry or entries as a change in the changeset. + + This function is used to work with associations as a whole. For example, + if a Post has many Comments, it allows you to add, remove or change all + comments at once. If your goal is to simply add a new comment to a post, + then it is preferred to do so manually, as we will describe later in the + "Example: Adding a comment to a post" section. + + This function requires the associated data to have been preloaded, except + when the parent changeset has been newly built and not yet persisted. + Missing data will invoke the `:on_replace` behaviour defined on the + association. + + For associations with cardinality one, `nil` can be used to remove the existing + entry. For associations with many entries, an empty list may be given instead. + + If the association has no changes, it will be skipped. If the association is + invalid, the changeset will be marked as invalid. If the given value is not any + of values below, it will raise. + + The associated data may be given in different formats: + + * a map or a keyword list representing changes to be applied to the + associated data. A map or keyword list can be given to update the + associated data as long as they have matching primary keys. + For example, `put_assoc(changeset, :comments, [%{id: 1, title: "changed"}])` + will locate the comment with `:id` of 1 and update its title. + If no comment with such id exists, one is created on the fly. + Since only a single comment was given, any other associated comment + will be replaced. On all cases, it is expected the keys to be atoms. + Opposite to `cast_assoc` and `embed_assoc`, the given map (or struct) + is not validated in any way and will be inserted as is. + This API is mostly used in scripts and tests, to make it straight- + forward to create schemas with associations at once, such as: + + Ecto.Changeset.change( + %Post{}, + title: "foo", + comments: [ + %{body: "first"}, + %{body: "second"} + ] + ) + + * changesets - when changesets are given, they are treated as the canonical + data and the associated data currently stored in the association is either + updated or replaced. For example, if you call + `put_assoc(post_changeset, :comments, [list_of_comments_changesets])`, + all comments with matching IDs will be updated according to the changesets. + New comments or comments not associated to any post will be correctly + associated. Currently associated comments that do not have a matching ID + in the list of changesets will act according to the `:on_replace` association + configuration (you can chose to raise, ignore the operation, update or delete + them). If there are changes in any of the changesets, they will be + persisted too. + + * structs - when structs are given, they are treated as the canonical data + and the associated data currently stored in the association is replaced. + For example, if you call + `put_assoc(post_changeset, :comments, [list_of_comments_structs])`, + all comments with matching IDs will be replaced by the new structs. + New comments or comments not associated to any post will be correctly + associated. Currently associated comments that do not have a matching ID + in the list of changesets will act according to the `:on_replace` + association configuration (you can chose to raise, ignore the operation, + update or delete them). Different to passing changesets, structs are not + change tracked in any fashion. In other words, if you change a comment + struct and give it to `put_assoc/4`, the updates in the struct won't be + persisted. You must use changesets instead. `put_assoc/4` with structs + only takes care of guaranteeing that the comments and the parent data + are associated. This is extremely useful when associating existing data, + as we will see in the "Example: Adding tags to a post" section. + + Once the parent changeset is given to an `Ecto.Repo` function, all entries + will be inserted/updated/deleted within the same transaction. + + ## Example: Adding a comment to a post + + Imagine a relationship where Post has many comments and you want to add a + new comment to an existing post. While it is possible to use `put_assoc/4` + for this, it would be unnecessarily complex. Let's see an example. + + First, let's fetch the post with all existing comments: + + post = Post |> Repo.get!(1) |> Repo.preload(:comments) + + The following approach is **wrong**: + + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "bad example!"}]) + |> Repo.update!() + + The reason why the example above is wrong is because `put_assoc/4` always + works with the **full data**. So the example above will effectively **erase + all previous comments** and only keep the comment you are currently adding. + Instead, you could try: + + post + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:comments, [%Comment{body: "so-so example!"} | post.comments]) + |> Repo.update!() + + In this example, we prepend the new comment to the list of existing comments. + Ecto will diff the list of comments currently in `post` with the list of comments + given, and correctly insert the new comment to the database. Note, however, + Ecto is doing a lot of work just to figure out something we knew since the + beginning, which is that there is only one new comment. + + In cases like above, when you want to work only on a single entry, it is + much easier to simply work on the associated directly. For example, we + could instead set the `post` association in the comment: + + %Comment{body: "better example"} + |> Ecto.Changeset.change() + |> Ecto.Changeset.put_assoc(:post, post) + |> Repo.insert!() + + Alternatively, we can make sure that when we create a comment, it is already + associated to the post: + + Ecto.build_assoc(post, :comments) + |> Ecto.Changeset.change(body: "great example!") + |> Repo.insert!() + + Or we can simply set the post_id in the comment itself: + + %Comment{body: "better example", post_id: post.id} + |> Repo.insert!() + + In other words, when you find yourself wanting to work only with a subset + of the data, then using `put_assoc/4` is most likely unnecessary. Instead, + you want to work on the other side of the association. + + Let's see an example where using `put_assoc/4` is a good fit. + + ## Example: Adding tags to a post + + Imagine you are receiving a set of tags you want to associate to a post. + Let's imagine that those tags exist upfront and are all persisted to the + database. Imagine we get the data in this format: + + params = %{"title" => "new post", "tags" => ["learner"]} + + Now, since the tags already exist, we will bring all of them from the + database and put them directly in the post: + + tags = Repo.all(from t in Tag, where: t.name in ^params["tags"]) + + post + |> Repo.preload(:tags) + |> Ecto.Changeset.cast(params, [:title]) # No need to allow :tags as we put them directly + |> Ecto.Changeset.put_assoc(:tags, tags) # Explicitly set the tags + + Since in this case we always require the user to pass all tags + directly, using `put_assoc/4` is a great fit. It will automatically + remove any tag not given and properly associate all of the given + tags with the post. + + Furthermore, since the tag information is given as structs read directly + from the database, Ecto will treat the data as correct and only do the + minimum necessary to guarantee that posts and tags are associated, + without trying to update or diff any of the fields in the tag struct. + + Although it accepts an `opts` argument, there are no options currently + supported by `put_assoc/4`. + """ + def put_assoc(%Changeset{} = changeset, name, value, opts \\ []) do + put_relation(:assoc, changeset, name, value, opts) + end + + @doc """ + Puts the given embed entry or entries as a change in the changeset. + + This function is used to work with embeds as a whole. For embeds with + cardinality one, `nil` can be used to remove the existing entry. For + embeds with many entries, an empty list may be given instead. + + If the embed has no changes, it will be skipped. If the embed is + invalid, the changeset will be marked as invalid. + + The list of supported values and their behaviour is described in + `put_assoc/4`. If the given value is not any of values listed there, + it will raise. + + Although this function accepts an `opts` argument, there are no options + currently supported by `put_embed/4`. + """ + def put_embed(%Changeset{} = changeset, name, value, opts \\ []) do + put_relation(:embed, changeset, name, value, opts) + end + + defp put_relation(_tag, %{types: nil}, _name, _value, _opts) do + raise ArgumentError, "changeset does not have types information" + end + + defp put_relation(tag, changeset, name, value, _opts) do + %{data: data, types: types, changes: changes, errors: errors, valid?: valid?} = changeset + relation = relation!(:put, tag, name, Map.get(types, name)) + {changes, errors, valid?} = + put_change(data, changes, errors, valid?, name, value, {tag, relation}) + %{changeset | changes: changes, errors: errors, valid?: valid?} + end + + @doc """ + Forces a change on the given `key` with `value`. + + If the change is already present, it is overridden with + the new value. + + ## Examples + + iex> changeset = change(%Post{author: "bar"}, %{title: "foo"}) + iex> changeset = force_change(changeset, :title, "bar") + iex> changeset.changes + %{title: "bar"} + + iex> changeset = force_change(changeset, :author, "bar") + iex> changeset.changes + %{title: "bar", author: "bar"} + + """ + @spec force_change(t, atom, term) :: t + def force_change(%Changeset{types: nil}, _key, _value) do + raise ArgumentError, "changeset does not have types information" + end + + def force_change(%Changeset{types: types} = changeset, key, value) do + case Map.get(types, key) do + {tag, _} when tag in @relations -> + raise "changing #{tag}s with force_change/3 is not supported, " <> + "please use put_#{tag}/4 instead" + nil -> + raise ArgumentError, "unknown field `#{inspect(key)}` in #{inspect(changeset.data)}" + _ -> + put_in changeset.changes[key], value + end + end + + @doc """ + Deletes a change with the given key. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = delete_change(changeset, :title) + iex> get_change(changeset, :title) + nil + + """ + @spec delete_change(t, atom) :: t + def delete_change(%Changeset{} = changeset, key) when is_atom(key) do + update_in changeset.changes, &Map.delete(&1, key) + end + + + @doc """ + Applies the changeset changes to the changeset data. + + This operation will return the underlying data with changes + regardless if the changeset is valid or not. See `apply_action/2` + for a similar function that ensures the changeset is valid. + + ## Examples + + iex> changeset = change(%Post{author: "bar"}, %{title: "foo"}) + iex> apply_changes(changeset) + %Post{author: "bar", title: "foo"} + + """ + @spec apply_changes(t) :: Ecto.Schema.t | data + def apply_changes(%Changeset{changes: changes, data: data}) when changes == %{} do + data + end + + def apply_changes(%Changeset{changes: changes, data: data, types: types}) do + Enum.reduce(changes, data, fn {key, value}, acc -> + case Map.fetch(types, key) do + {:ok, {tag, relation}} when tag in @relations -> + apply_relation_changes(acc, key, relation, value) + + {:ok, _} -> + Map.put(acc, key, value) + :error -> + acc + end + end) + end + + @doc """ + Applies the changeset action only if the changes are valid. + + If the changes are valid, all changes are applied to the changeset data. + If the changes are invalid, no changes are applied, and an error tuple + is returned with the changeset containing the action that was attempted + to be applied. + + The action may be any atom. + + ## Examples + + iex> {:ok, data} = apply_action(changeset, :update) + + iex> {:error, changeset} = apply_action(changeset, :update) + %Ecto.Changeset{action: :update} + """ + @spec apply_action(t, atom) :: {:ok, Ecto.Schema.t() | data} | {:error, t} + def apply_action(%Changeset{} = changeset, action) when is_atom(action) do + if changeset.valid? do + {:ok, apply_changes(changeset)} + else + {:error, %Changeset{changeset | action: action}} + end + end + + def apply_action(%Changeset{}, action) do + raise ArgumentError, "expected action to be an atom, got: #{inspect action}" + end + + @doc """ + Applies the changeset action if the changes are valid or raises an error. + + ## Examples + + iex> changeset = change(%Post{author: "bar"}, %{title: "foo"}) + iex> apply_action!(changeset, :update) + %Post{author: "bar", title: "foo"} + + iex> changeset = change(%Post{author: "bar"}, %{title: :bad}) + iex> apply_action!(changeset, :update) + ** (Ecto.InvalidChangesetError) could not perform update because changeset is invalid. + + See `apply_action/2` for more information. + """ + @spec apply_action!(t, atom) :: Ecto.Schema.t() | data + def apply_action!(%Changeset{} = changeset, action) do + case apply_action(changeset, action) do + {:ok, data} -> + data + + {:error, changeset} -> + raise Ecto.InvalidChangesetError, action: action, changeset: changeset + end + end + + ## Validations + + @doc ~S""" + Returns a keyword list of the validations for this changeset. + + The keys in the list are the names of fields, and the values are a + validation associated with the field. A field may occur multiple + times in the list. + + ## Example + + %Post{} + |> change() + |> validate_format(:title, ~r/^\w+:\s/, message: "must start with a topic") + |> validate_length(:title, max: 100) + |> validations() + #=> [ + title: {:length, [ max: 100 ]}, + title: {:format, ~r/^\w+:\s/} + ] + + The following validations may be included in the result. The list is + not necessarily exhaustive. For example, custom validations written + by the developer will also appear in our return value. + + This first group contains validations that hold a keyword list of validators. + This list may also include a `:message` key. + + * `{:length, [option]}` + + * `min: n` + * `max: n` + * `is: n` + * `count: :graphemes | :codepoints` + + * `{:number, [option]}` + + * `equal_to: n` + * `greater_than: n` + * `greater_than_or_equal_to: n` + * `less_than: n` + * `less_than_or_equal_to: n` + + The other validators simply take a value: + + * `{:exclusion, Enum.t}` + * `{:format, ~r/pattern/}` + * `{:inclusion, Enum.t}` + * `{:subset, Enum.t}` + + Note that calling `validate_required/3` does not store the validation under the + `changeset.validations` key (and so won't be included in the result of this + function). The required fields are stored under the `changeset.required` key. + """ + @spec validations(t) :: [{atom, term}] + def validations(%Changeset{validations: validations}) do + validations + end + + @doc """ + Adds an error to the changeset. + + An additional keyword list `keys` can be passed to provide additional + contextual information for the error. This is useful when using + `traverse_errors/2` and when translating errors with `Gettext` + + ## Examples + + iex> changeset = change(%Post{}, %{title: ""}) + iex> changeset = add_error(changeset, :title, "empty") + iex> changeset.errors + [title: {"empty", []}] + iex> changeset.valid? + false + + iex> changeset = change(%Post{}, %{title: ""}) + iex> changeset = add_error(changeset, :title, "empty", additional: "info") + iex> changeset.errors + [title: {"empty", [additional: "info"]}] + iex> changeset.valid? + false + + iex> changeset = change(%Post{}, %{tags: ["ecto", "elixir", "x"]}) + iex> changeset = add_error(changeset, :tags, "tag '%{val}' is too short", val: "x") + iex> changeset.errors + [tags: {"tag '%{val}' is too short", [val: "x"]}] + iex> changeset.valid? + false + """ + @spec add_error(t, atom, String.t, Keyword.t) :: t + def add_error(%Changeset{errors: errors} = changeset, key, message, keys \\ []) when is_binary(message) do + %{changeset | errors: [{key, {message, keys}}|errors], valid?: false} + end + + @doc """ + Validates the given `field` change. + + It invokes the `validator` function to perform the validation + only if a change for the given `field` exists and the change + value is not `nil`. The function must return a list of errors + (with an empty list meaning no errors). + + In case there's at least one error, the list of errors will be appended to the + `:errors` field of the changeset and the `:valid?` flag will be set to + `false`. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = validate_change changeset, :title, fn :title, title -> + ...> # Value must not be "foo"! + ...> if title == "foo" do + ...> [title: "cannot be foo"] + ...> else + ...> [] + ...> end + ...> end + iex> changeset.errors + [title: {"cannot be foo", []}] + + """ + @spec validate_change(t, atom, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t + def validate_change(%Changeset{} = changeset, field, validator) when is_atom(field) do + %{changes: changes, types: types, errors: errors} = changeset + ensure_field_exists!(changeset, types, field) + + value = Map.get(changes, field) + new = if is_nil(value), do: [], else: validator.(field, value) + new = + Enum.map(new, fn + {key, val} when is_atom(key) and is_binary(val) -> + {key, {val, []}} + {key, {val, opts}} when is_atom(key) and is_binary(val) and is_list(opts) -> + {key, {val, opts}} + end) + + case new do + [] -> changeset + [_|_] -> %{changeset | errors: new ++ errors, valid?: false} + end + end + + @doc """ + Stores the validation `metadata` and validates the given `field` change. + + Similar to `validate_change/3` but stores the validation metadata + into the changeset validators. The validator metadata is often used + as a reflection mechanism, to automatically generate code based on + the available validations. + + ## Examples + + iex> changeset = change(%Post{}, %{title: "foo"}) + iex> changeset = validate_change changeset, :title, :useless_validator, fn + ...> _, _ -> [] + ...> end + iex> changeset.validations + [title: :useless_validator] + + """ + @spec validate_change(t, atom, term, (atom, term -> [{atom, String.t} | {atom, {String.t, Keyword.t}}])) :: t + def validate_change(%Changeset{validations: validations} = changeset, + field, metadata, validator) do + changeset = %{changeset | validations: [{field, metadata}|validations]} + validate_change(changeset, field, validator) + end + + @doc """ + Validates that one or more fields are present in the changeset. + + You can pass a single field name or a list of field names that + are required. + + If the value of a field is `nil` or a string made only of whitespace, + the changeset is marked as invalid, the field is removed from the + changeset's changes, and an error is added. An error won't be added if + the field already has an error. + + If a field is given to `validate_required/3` but it has not been passed + as parameter during `cast/3` (i.e. it has not been changed), then + `validate_required/3` will check for its current value in the data. + If the data contains an non-empty value for the field, then no error is + added. This allows developers to use `validate_required/3` to perform + partial updates. For example, on `insert` all fields would be required, + because their default values on the data are all `nil`, but on `update`, + if you don't want to change a field that has been previously set, + you are not required to pass it as a parameter, since `validate_required/3` + won't add an error for missing changes as long as the value in the + data given to the `changeset` is not empty. + + Do not use this function to validate associations that are required, + instead pass the `:required` option to `cast_assoc/3` or `cast_embed/3`. + + Opposite to other validations, calling this function does not store + the validation under the `changeset.validations` key. Instead, it + stores all required fields under `changeset.required`. + + ## Options + + * `:message` - the message on failure, defaults to "can't be blank" + * `:trim` - a boolean that sets whether whitespaces are removed before + running the validation on binaries/strings, defaults to true + + ## Examples + + validate_required(changeset, :title) + validate_required(changeset, [:title, :body]) + + """ + @spec validate_required(t, list | atom, Keyword.t) :: t + def validate_required(%Changeset{} = changeset, fields, opts \\ []) when not is_nil(fields) do + %{required: required, errors: errors, changes: changes, types: types} = changeset + trim = Keyword.get(opts, :trim, true) + fields = List.wrap(fields) + + fields_with_errors = + for field <- fields, + ensure_field_not_many!(types, field), + missing?(changeset, field, trim), + ensure_field_exists!(changeset, types, field), + is_nil(errors[field]), + do: field + + case fields_with_errors do + [] -> + %{changeset | required: fields ++ required} + + _ -> + message = message(opts, "can't be blank") + new_errors = Enum.map(fields_with_errors, &{&1, {message, [validation: :required]}}) + changes = Map.drop(changes, fields_with_errors) + %{changeset | changes: changes, required: fields ++ required, errors: new_errors ++ errors, valid?: false} + end + end + + @doc """ + Validates that no existing record with a different primary key + has the same values for these fields. + + This function exists to provide quick feedback to users of your + application. It should not be relied on for any data guarantee as it + has race conditions and is inherently unsafe. For example, if this + check happens twice in the same time interval (because the user + submitted a form twice), both checks may pass and you may end-up with + duplicate entries in the database. Therefore, a `unique_constraint/3` + should also be used to ensure your data won't get corrupted. + + However, because constraints are only checked if all validations + succeed, this function can be used as an early check to provide + early feedback to users, since most conflicting data will have been + inserted prior to the current validation phase. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "has already been taken". + + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact` or `:suffix`. Defaults to `:exact`. + `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. + + * `:error_key` - the key to which changeset error will be added when + check fails, defaults to the first field name of the given list of + fields. + + * `:prefix` - the prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). See `Ecto.Repo` documentation + for more information. + + * `:repo_opts` - the options to pass to the `Ecto.Repo` call. + + * `:query` - the base query to use for the check. Defaults to the schema of + the changeset. If the primary key is set, a clause will be added to exclude + the changeset row itself from the check. + + ## Examples + + unsafe_validate_unique(changeset, :city_name, repo) + unsafe_validate_unique(changeset, [:city_name, :state_name], repo) + unsafe_validate_unique(changeset, [:city_name, :state_name], repo, message: "city must be unique within state") + unsafe_validate_unique(changeset, [:city_name, :state_name], repo, prefix: "public") + unsafe_validate_unique(changeset, [:city_name, :state_name], repo, query: from(c in City, where: is_nil(c.deleted_at))) + + """ + @spec unsafe_validate_unique(t, atom | [atom, ...], Ecto.Repo.t, Keyword.t) :: t + def unsafe_validate_unique(changeset, fields, repo, opts \\ []) when is_list(opts) do + fields = List.wrap(fields) + {repo_opts, opts} = Keyword.pop(opts, :repo_opts, []) + {validations, schema} = + case changeset do + %Ecto.Changeset{validations: validations, data: %schema{}} -> + {validations, schema} + %Ecto.Changeset{} -> + raise ArgumentError, "unsafe_validate_unique/4 does not work with schemaless changesets" + end + changeset = %{changeset | validations: [{hd(fields), {:unsafe_unique, fields: fields}} | validations]} + + where_clause = for field <- fields do + {field, get_field(changeset, field)} + end + + # No need to query if there is a prior error for the fields + any_prior_errors_for_fields? = Enum.any?(changeset.errors, &(elem(&1, 0) in fields)) + + # No need to query if we haven't changed any of the fields in question + unrelated_changes? = Enum.all?(fields, ¬ Map.has_key?(changeset.changes, &1)) + + # If we don't have values for all fields, we can't query for uniqueness + any_nil_values_for_fields? = Enum.any?(where_clause, &(&1 |> elem(1) |> is_nil())) + + if unrelated_changes? || any_nil_values_for_fields? || any_prior_errors_for_fields? do + changeset + else + query = + Keyword.get(opts, :query, schema) + |> maybe_exclude_itself(schema, changeset) + |> Ecto.Query.where(^where_clause) + + query = + if prefix = opts[:prefix] do + Ecto.Query.put_query_prefix(query, prefix) + else + query + end + + if repo.exists?(query, repo_opts) do + error_key = Keyword.get(opts, :error_key, hd(fields)) + + add_error(changeset, error_key, message(opts, "has already been taken"), + validation: :unsafe_unique, fields: fields) + else + changeset + end + end + end + + defp maybe_exclude_itself(base_query, schema, changeset) do + :primary_key + |> schema.__schema__() + |> Enum.map(&{&1, get_field(changeset, &1)}) + |> case do + [{_pk_field, nil} | _remaining_pks] -> + base_query + + [{pk_field, value} | remaining_pks] -> + # generate a clean query (one that does not start with 'TRUE OR ...') + first_expr = Ecto.Query.dynamic([q], field(q, ^pk_field) == ^value) + + Enum.reduce_while(remaining_pks, first_expr, fn + {_pk_field, nil}, _expr -> + {:halt, nil} + + {pk_field, value}, expr -> + {:cont, Ecto.Query.dynamic([q], ^expr and field(q, ^pk_field) == ^value)} + end) + |> case do + nil -> + base_query + + matches_pk -> + Ecto.Query.where(base_query, ^Ecto.Query.dynamic(not (^matches_pk))) + end + + [] -> + base_query + end + end + + defp ensure_field_exists!(changeset = %Changeset{}, types, field) do + unless Map.has_key?(types, field) do + raise ArgumentError, "unknown field #{inspect(field)} in #{inspect(changeset.data)}" + end + true + end + + defp ensure_field_not_many!(types, field) do + case types do + %{^field => {:assoc, %Ecto.Association.Has{cardinality: :many}}} -> + IO.warn("attempting to validate has_many association #{inspect(field)} " <> + "with validate_required/3 which has no effect. You can pass the " <> + ":required option to Ecto.Changeset.cast_assoc/3 to achieve this.") + + %{^field => {:embed, %Ecto.Embedded{cardinality: :many}}} -> + IO.warn("attempting to validate embed_many field #{inspect(field)} " <> + "with validate_required/3 which has no effect. You can pass the " <> + ":required option to Ecto.Changeset.cast_embed/3 to achieve this.") + + _ -> + true + end + end + + defp missing?(changeset, field, trim) when is_atom(field) do + case get_field(changeset, field) do + %{__struct__: Ecto.Association.NotLoaded} -> + raise ArgumentError, "attempting to validate association `#{field}` " <> + "that was not loaded. Please preload your associations " <> + "before calling validate_required/3 or pass the :required " <> + "option to Ecto.Changeset.cast_assoc/3" + value when is_binary(value) and trim -> String.trim_leading(value) == "" + value when is_binary(value) -> value == "" + nil -> true + _ -> false + end + end + + defp missing?(_changeset, field, _trim) do + raise ArgumentError, "validate_required/3 expects field names to be atoms, got: `#{inspect field}`" + end + + @doc """ + Validates a change has the given format. + + The format has to be expressed as a regular expression. + + ## Options + + * `:message` - the message on failure, defaults to "has invalid format" + + ## Examples + + validate_format(changeset, :email, ~r/@/) + + """ + @spec validate_format(t, atom, Regex.t, Keyword.t) :: t + def validate_format(changeset, field, format, opts \\ []) do + validate_change changeset, field, {:format, format}, fn _, value -> + if value =~ format, do: [], else: [{field, {message(opts, "has invalid format"), [validation: :format]}}] + end + end + + @doc """ + Validates a change is included in the given enumerable. + + ## Options + + * `:message` - the message on failure, defaults to "is invalid" + + ## Examples + + validate_inclusion(changeset, :cardinal_direction, ["north", "east", "south", "west"]) + validate_inclusion(changeset, :age, 0..99) + + """ + @spec validate_inclusion(t, atom, Enum.t, Keyword.t) :: t + def validate_inclusion(changeset, field, data, opts \\ []) do + validate_change changeset, field, {:inclusion, data}, fn _, value -> + type = Map.fetch!(changeset.types, field) + + if Ecto.Type.include?(type, value, data), + do: [], + else: [{field, {message(opts, "is invalid"), [validation: :inclusion, enum: data]}}] + end + end + + @doc ~S""" + Validates a change, of type enum, is a subset of the given enumerable. + + This validates if a list of values belongs to the given enumerable. + If you need to validate if a single value is inside the given enumerable, + you should use `validate_inclusion/4` instead. + + Type of the field must be array. + + ## Options + + * `:message` - the message on failure, defaults to "has an invalid entry" + + ## Examples + + validate_subset(changeset, :pets, ["cat", "dog", "parrot"]) + validate_subset(changeset, :lottery_numbers, 0..99) + + """ + @spec validate_subset(t, atom, Enum.t, Keyword.t) :: t + def validate_subset(changeset, field, data, opts \\ []) do + validate_change changeset, field, {:subset, data}, fn _, value -> + element_type = + case Map.fetch!(changeset.types, field) do + {:array, element_type} -> + element_type + + type -> + # backwards compatibility: custom types use underlying type + {:array, element_type} = Ecto.Type.type(type) + element_type + end + + case Enum.any?(value, fn element -> not Ecto.Type.include?(element_type, element, data) end) do + true -> [{field, {message(opts, "has an invalid entry"), [validation: :subset, enum: data]}}] + false -> [] + end + end + end + + @doc """ + Validates a change is not included in the given enumerable. + + ## Options + + * `:message` - the message on failure, defaults to "is reserved" + + ## Examples + + validate_exclusion(changeset, :name, ~w(admin superadmin)) + + """ + @spec validate_exclusion(t, atom, Enum.t, Keyword.t) :: t + def validate_exclusion(changeset, field, data, opts \\ []) do + validate_change changeset, field, {:exclusion, data}, fn _, value -> + type = Map.fetch!(changeset.types, field) + + if Ecto.Type.include?(type, value, data), do: + [{field, {message(opts, "is reserved"), [validation: :exclusion, enum: data]}}], else: [] + end + end + + @doc """ + Validates a change is a string or list of the given length. + + Note that the length of a string is counted in graphemes by default. If using + this validation to match a character limit of a database backend, + it's likely that the limit ignores graphemes and limits the number + of unicode characters. Then consider using the `:count` option to + limit the number of codepoints (`:codepoints`), or limit the number of bytes (`:bytes`). + + ## Options + + * `:is` - the length must be exactly this value + * `:min` - the length must be greater than or equal to this value + * `:max` - the length must be less than or equal to this value + * `:count` - what length to count for string, `:graphemes` (default), `:codepoints` or `:bytes` + * `:message` - the message on failure, depending on the validation, is one of: + * for strings: + * "should be %{count} character(s)" + * "should be at least %{count} character(s)" + * "should be at most %{count} character(s)" + * for binary: + * "should be %{count} byte(s)" + * "should be at least %{count} byte(s)" + * "should be at most %{count} byte(s)" + * for lists: + * "should have %{count} item(s)" + * "should have at least %{count} item(s)" + * "should have at most %{count} item(s)" + + ## Examples + + validate_length(changeset, :title, min: 3) + validate_length(changeset, :title, max: 100) + validate_length(changeset, :title, min: 3, max: 100) + validate_length(changeset, :code, is: 9) + validate_length(changeset, :topics, is: 2) + validate_length(changeset, :icon, count: :bytes, max: 1024 * 16) + + """ + @spec validate_length(t, atom, Keyword.t) :: t + def validate_length(changeset, field, opts) when is_list(opts) do + validate_change changeset, field, {:length, opts}, fn + _, value -> + count_type = opts[:count] || :graphemes + {type, length} = case {value, count_type} do + {value, :codepoints} when is_binary(value) -> + {:string, codepoints_length(value, 0)} + {value, :graphemes} when is_binary(value) -> + {:string, String.length(value)} + {value, :bytes} when is_binary(value) -> + {:binary, byte_size(value)} + {value, _} when is_list(value) -> + {:list, list_length(changeset, field, value)} + end + + error = ((is = opts[:is]) && wrong_length(type, length, is, opts)) || + ((min = opts[:min]) && too_short(type, length, min, opts)) || + ((max = opts[:max]) && too_long(type, length, max, opts)) + + if error, do: [{field, error}], else: [] + end + end + + defp codepoints_length(<<_::utf8, rest::binary>>, acc), do: codepoints_length(rest, acc + 1) + defp codepoints_length(<<_, rest::binary>>, acc), do: codepoints_length(rest, acc + 1) + defp codepoints_length(<<>>, acc), do: acc + + defp list_length(%{types: types}, field, value) do + case Map.fetch(types, field) do + {:ok, {tag, _association}} when tag in [:embed, :assoc] -> + length(Relation.filter_empty(value)) + _ -> + length(value) + end + end + + defp wrong_length(_type, value, value, _opts), do: nil + defp wrong_length(:string, _length, value, opts), do: + {message(opts, "should be %{count} character(s)"), count: value, validation: :length, kind: :is, type: :string} + defp wrong_length(:binary, _length, value, opts), do: + {message(opts, "should be %{count} byte(s)"), count: value, validation: :length, kind: :is, type: :binary} + defp wrong_length(:list, _length, value, opts), do: + {message(opts, "should have %{count} item(s)"), count: value, validation: :length, kind: :is, type: :list} + + defp too_short(_type, length, value, _opts) when length >= value, do: nil + defp too_short(:string, _length, value, opts), do: + {message(opts, "should be at least %{count} character(s)"), count: value, validation: :length, kind: :min, type: :string} + defp too_short(:binary, _length, value, opts), do: + {message(opts, "should be at least %{count} byte(s)"), count: value, validation: :length, kind: :min, type: :binary} + defp too_short(:list, _length, value, opts), do: + {message(opts, "should have at least %{count} item(s)"), count: value, validation: :length, kind: :min, type: :list} + + defp too_long(_type, length, value, _opts) when length <= value, do: nil + defp too_long(:string, _length, value, opts), do: + {message(opts, "should be at most %{count} character(s)"), count: value, validation: :length, kind: :max, type: :string} + defp too_long(:binary, _length, value, opts), do: + {message(opts, "should be at most %{count} byte(s)"), count: value, validation: :length, kind: :max, type: :binary} + defp too_long(:list, _length, value, opts), do: + {message(opts, "should have at most %{count} item(s)"), count: value, validation: :length, kind: :max, type: :list} + + @doc """ + Validates the properties of a number. + + ## Options + + * `:less_than` + * `:greater_than` + * `:less_than_or_equal_to` + * `:greater_than_or_equal_to` + * `:equal_to` + * `:not_equal_to` + * `:message` - the message on failure, defaults to one of: + * "must be less than %{number}" + * "must be greater than %{number}" + * "must be less than or equal to %{number}" + * "must be greater than or equal to %{number}" + * "must be equal to %{number}" + * "must be not equal to %{number}" + + ## Examples + + validate_number(changeset, :count, less_than: 3) + validate_number(changeset, :pi, greater_than: 3, less_than: 4) + validate_number(changeset, :the_answer_to_life_the_universe_and_everything, equal_to: 42) + + """ + @spec validate_number(t, atom, Keyword.t) :: t + def validate_number(changeset, field, opts) do + validate_change changeset, field, {:number, opts}, fn + field, value -> + {message, opts} = Keyword.pop(opts, :message) + + unless valid_number?(value) do + raise ArgumentError, "expected field `#{field}` to be a decimal, integer, or float, got: #{inspect(value)}" + end + + Enum.find_value opts, [], fn {spec_key, target_value} -> + case Map.fetch(@number_validators, spec_key) do + {:ok, {spec_function, default_message}} -> + unless valid_number?(target_value) do + raise ArgumentError, "expected option `#{spec_key}` to be a decimal, integer, or float, got: #{inspect(target_value)}" + end + + compare_numbers(field, value, message || default_message, + spec_key, spec_function, target_value) + + :error -> + supported_options = @number_validators |> Map.keys() |> Enum.map_join("\n", &" * #{inspect(&1)}") + + raise ArgumentError, """ + unknown option #{inspect spec_key} given to validate_number/3 + + The supported options are: + + #{supported_options} + """ + end + end + end + end + + defp valid_number?(%Decimal{}), do: true + defp valid_number?(other), do: is_number(other) + + defp compare_numbers(field, %Decimal{} = value, message, spec_key, _spec_function, %Decimal{} = target_value) do + result = Decimal.compare(value, target_value) |> normalize_compare() + case decimal_compare(result, spec_key) do + true -> nil + false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}] + end + end + + defp compare_numbers(field, value, message, spec_key, spec_function, %Decimal{} = target_value) do + compare_numbers(field, decimal_new(value), message, spec_key, spec_function, target_value) + end + + defp compare_numbers(field, %Decimal{} = value, message, spec_key, spec_function, target_value) do + compare_numbers(field, value, message, spec_key, spec_function, decimal_new(target_value)) + end + + defp compare_numbers(field, value, message, spec_key, spec_function, target_value) do + case apply(spec_function, [value, target_value]) do + true -> nil + false -> [{field, {message, validation: :number, kind: spec_key, number: target_value}}] + end + end + + # TODO: Remove me once we support Decimal 2.0 only + # Support mismatch between API for Decimal.compare/2 for versions 1.6 and 2.0 + defp normalize_compare(result) do + case result do + %Decimal{coef: 1, sign: -1} -> :lt + %Decimal{coef: 0} -> :eq + %Decimal{coef: 1, sign: 1} -> :gt + _ -> result + end + end + + defp decimal_new(term) when is_float(term), do: Decimal.from_float(term) + defp decimal_new(term), do: Decimal.new(term) + + defp decimal_compare(:lt, spec), do: spec in [:less_than, :less_than_or_equal_to, :not_equal_to] + defp decimal_compare(:gt, spec), do: spec in [:greater_than, :greater_than_or_equal_to, :not_equal_to] + defp decimal_compare(:eq, spec), do: spec in [:equal_to, :less_than_or_equal_to, :greater_than_or_equal_to] + + @doc """ + Validates that the given parameter matches its confirmation. + + By calling `validate_confirmation(changeset, :email)`, this + validation will check if both "email" and "email_confirmation" + in the parameter map matches. Note this validation only looks + at the parameters themselves, never the fields in the schema. + As such as, the "email_confirmation" field does not need to be + added as a virtual field in your schema. + + Note that if the confirmation field is nil or missing, this does + not add a validation error. You can specify that the confirmation + parameter is required in the options (see below). + + ## Options + + * `:message` - the message on failure, defaults to "does not match confirmation" + * `:required` - boolean, sets whether existence of confirmation parameter + is required for addition of error. Defaults to false + + ## Examples + + validate_confirmation(changeset, :email) + validate_confirmation(changeset, :password, message: "does not match password") + + cast(data, params, [:password]) + |> validate_confirmation(:password, message: "does not match password") + + """ + @spec validate_confirmation(t, atom, Keyword.t) :: t + def validate_confirmation(changeset, field, opts \\ []) + def validate_confirmation(%{params: params} = changeset, field, opts) when is_map(params) do + param = Atom.to_string(field) + error_param = "#{param}_confirmation" + error_field = String.to_atom(error_param) + value = Map.get(params, param) + + errors = + case Map.fetch(params, error_param) do + {:ok, ^value} -> + [] + {:ok, _} -> + [{error_field, + {message(opts, "does not match confirmation"), [validation: :confirmation]}}] + :error -> + confirmation_missing(opts, error_field) + end + + %{changeset | validations: [{field, {:confirmation, opts}} | changeset.validations], + errors: errors ++ changeset.errors, + valid?: changeset.valid? and errors == []} + end + def validate_confirmation(%{params: nil} = changeset, _, _) do + changeset + end + + defp confirmation_missing(opts, error_field) do + required = Keyword.get(opts, :required, false) + if required, do: [{error_field, {message(opts, "can't be blank"), [validation: :required]}}], else: [] + end + + defp message(opts, key \\ :message, default) do + Keyword.get(opts, key, default) + end + + @doc """ + Validates the given parameter is true. + + Note this validation only checks the parameter itself is true, never + the field in the schema. That's because acceptance parameters do not need + to be persisted, as by definition they would always be stored as `true`. + + ## Options + + * `:message` - the message on failure, defaults to "must be accepted" + + ## Examples + + validate_acceptance(changeset, :terms_of_service) + validate_acceptance(changeset, :rules, message: "please accept rules") + + """ + @spec validate_acceptance(t, atom, Keyword.t) :: t + def validate_acceptance(changeset, field, opts \\ []) + def validate_acceptance(%{params: params} = changeset, field, opts) do + errors = validate_acceptance_errors(params, field, opts) + + %{changeset | validations: [{field, {:acceptance, opts}} | changeset.validations], + errors: errors ++ changeset.errors, + valid?: changeset.valid? and errors == []} + end + + defp validate_acceptance_errors(nil, _field, _opts), do: [] + + defp validate_acceptance_errors(params, field, opts) do + param = Atom.to_string(field) + value = Map.get(params, param) + + case Ecto.Type.cast(:boolean, value) do + {:ok, true} -> [] + _ -> [{field, {message(opts, "must be accepted"), validation: :acceptance}}] + end + end + + ## Optimistic lock + + @doc ~S""" + Applies optimistic locking to the changeset. + + [Optimistic + locking](https://en.wikipedia.org/wiki/Optimistic_concurrency_control) (or + *optimistic concurrency control*) is a technique that allows concurrent edits + on a single record. While pessimistic locking works by locking a resource for + an entire transaction, optimistic locking only checks if the resource changed + before updating it. + + This is done by regularly fetching the record from the database, then checking + whether another user has made changes to the record *only when updating the + record*. This behaviour is ideal in situations where the chances of concurrent + updates to the same record are low; if they're not, pessimistic locking or + other concurrency patterns may be more suited. + + ## Usage + + Optimistic locking works by keeping a "version" counter for each record; this + counter gets incremented each time a modification is made to a record. Hence, + in order to use optimistic locking, a field must exist in your schema for + versioning purpose. Such field is usually an integer but other types are + supported. + + ## Examples + + Assuming we have a `Post` schema (stored in the `posts` table), the first step + is to add a version column to the `posts` table: + + alter table(:posts) do + add :lock_version, :integer, default: 1 + end + + The column name is arbitrary and doesn't need to be `:lock_version`. Now add + a field to the schema too: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + field :title, :string + field :lock_version, :integer, default: 1 + end + + def changeset(:update, struct, params \\ %{}) do + struct + |> Ecto.Changeset.cast(params, [:title]) + |> Ecto.Changeset.optimistic_lock(:lock_version) + end + end + + Now let's take optimistic locking for a spin: + + iex> post = Repo.insert!(%Post{title: "foo"}) + %Post{id: 1, title: "foo", lock_version: 1} + iex> valid_change = Post.changeset(:update, post, %{title: "bar"}) + iex> stale_change = Post.changeset(:update, post, %{title: "baz"}) + iex> Repo.update!(valid_change) + %Post{id: 1, title: "bar", lock_version: 2} + iex> Repo.update!(stale_change) + ** (Ecto.StaleEntryError) attempted to update a stale entry: + + %Post{id: 1, title: "baz", lock_version: 1} + + When a conflict happens (a record which has been previously fetched is + being updated, but that same record has been modified since it was + fetched), an `Ecto.StaleEntryError` exception is raised. + + Optimistic locking also works with delete operations. Just call the + `optimistic_lock/3` function with the data before delete: + + iex> changeset = Ecto.Changeset.optimistic_lock(post, :lock_version) + iex> Repo.delete(changeset) + + `optimistic_lock/3` by default assumes the field + being used as a lock is an integer. If you want to use another type, + you need to pass the third argument customizing how the next value + is generated: + + iex> Ecto.Changeset.optimistic_lock(post, :lock_uuid, fn _ -> Ecto.UUID.generate end) + + """ + @spec optimistic_lock(Ecto.Schema.t | t, atom, (term -> term)) :: t + def optimistic_lock(data_or_changeset, field, incrementer \\ &increment_with_rollover/1) do + changeset = change(data_or_changeset, %{}) + current = get_field(changeset, field) + + # Apply these changes only inside the repo because we + # don't want to permanently track the lock change. + changeset = prepare_changes(changeset, fn changeset -> + put_in(changeset.changes[field], incrementer.(current)) + end) + + changeset = put_in(changeset.filters[field], current) + changeset + end + + # increment_with_rollover expect to be used with lock_version set as :integer in db schema + # 2_147_483_647 is upper limit for signed integer for both PostgreSQL and MySQL + defp increment_with_rollover(val) when val >= 2_147_483_647 do + 1 + end + + defp increment_with_rollover(val) when is_integer(val) do + val + 1 + end + + @doc """ + Provides a function executed by the repository on insert/update/delete. + + If the changeset given to the repository is valid, the function given to + `prepare_changes/2` will be called with the changeset and must return a + changeset, allowing developers to do final adjustments to the changeset or + to issue data consistency commands. The repository itself can be accessed + inside the function under the `repo` field in the changeset. If the + changeset given to the repository is invalid, the function will not be + invoked. + + The given function is guaranteed to run inside the same transaction + as the changeset operation for databases that do support transactions. + + ## Example + + A common use case is updating a counter cache, in this case updating a post's + comment count when a comment is created: + + def create_comment(comment, params) do + comment + |> cast(params, [:body, :post_id]) + |> prepare_changes(fn changeset -> + if post_id = get_change(changeset, :post_id) do + query = from Post, where: [id: ^post_id] + changeset.repo.update_all(query, inc: [comment_count: 1]) + end + changeset + end) + end + + We retrieve the repo from the comment changeset itself and use + update_all to update the counter cache in one query. Finally, the original + changeset must be returned. + """ + @spec prepare_changes(t, (t -> t)) :: t + def prepare_changes(%Changeset{prepare: prepare} = changeset, function) when is_function(function, 1) do + %{changeset | prepare: [function | prepare]} + end + + ## Constraints + + @doc """ + Returns all constraints in a changeset. + + A constraint is a map with the following fields: + + * `:type` - the type of the constraint that will be checked in the database, + such as `:check`, `:unique`, etc + * `:constraint` - the database constraint name as a string + * `:match` - the type of match Ecto will perform on a violated constraint + against the `:constraint` value. It is `:exact`, `:suffix` or `:prefix` + * `:field` - the field a violated constraint will apply the error to + * `:error_message` - the error message in case of violated constraints + * `:error_type` - the type of error that identifies the error message + + """ + @spec constraints(t) :: [constraint] + def constraints(%Changeset{constraints: constraints}) do + constraints + end + + @doc """ + Checks for a check constraint in the given field. + + The check constraint works by relying on the database to check + if the check constraint has been violated or not and, if so, + Ecto converts it into a changeset error. + + In order to use the check constraint, the first step is + to define the check constraint in a migration: + + create constraint("users", :age_must_be_positive, check: "age > 0") + + Now that a constraint exists, when modifying users, we could + annotate the changeset with a check constraint so Ecto knows + how to convert it into an error message: + + cast(user, params, [:age]) + |> check_constraint(:age, name: :age_must_be_positive) + + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the + age is not positive, it will be converted into an error and + `{:error, changeset}` returned by the repository. Note that the error + will occur only after hitting the database so it will not be visible + until all other validations pass. + + ## Options + + * `:message` - the message in case the constraint check fails. + Defaults to "is invalid" + * `:name` - the name of the constraint. Required. + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + def check_constraint(changeset, field, opts \\ []) do + constraint = opts[:name] || raise ArgumentError, "must supply the name of the constraint" + message = message(opts, "is invalid") + match_type = Keyword.get(opts, :match, :exact) + add_constraint(changeset, :check, to_string(constraint), match_type, field, message) + end + + @doc """ + Checks for a unique constraint in the given field or list of fields. + + The unique constraint works by relying on the database to check + if the unique constraint has been violated or not and, if so, + Ecto converts it into a changeset error. + + In order to use the uniqueness constraint, the first step is + to define the unique index in a migration: + + create unique_index(:users, [:email]) + + Now that a constraint exists, when modifying users, we could + annotate the changeset with a unique constraint so Ecto knows + how to convert it into an error message: + + cast(user, params, [:email]) + |> unique_constraint(:email) + + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the + email already exists, it will be converted into an error and + `{:error, changeset}` returned by the repository. Note that the error + will occur only after hitting the database so it will not be visible + until all other validations pass. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "has already been taken" + + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field(s). May be required + explicitly for complex cases + + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + * `:error_key` - the key to which changeset error will be added when + check fails, defaults to the first field name of the given list of + fields. + + ## Complex constraints + + Because the constraint logic is in the database, we can leverage + all the database functionality when defining them. For example, + let's suppose the e-mails are scoped by company id: + + # In migration + create unique_index(:users, [:email, :company_id]) + + # In the changeset function + cast(user, params, [:email]) + |> unique_constraint([:email, :company_id]) + + The first field name, `:email` in this case, will be used as the error + key to the changeset errors keyword list. For example, the above + `unique_constraint/3` would generate something like: + + Repo.insert!(%User{email: "john@elixir.org", company_id: 1}) + changeset = User.changeset(%User{}, %{email: "john@elixir.org", company_id: 1}) + {:error, changeset} = Repo.insert(changeset) + changeset.errors #=> [email: {"has already been taken", []}] + + In complex cases, instead of relying on name inference, it may be best + to set the constraint name explicitly: + + # In the migration + create unique_index(:users, [:email, :company_id], name: :users_email_company_id_index) + + # In the changeset function + cast(user, params, [:email]) + |> unique_constraint(:email, name: :users_email_company_id_index) + + ### Partitioning + + If your table is partitioned, then your unique index might look different + per partition, e.g. Postgres adds p to the middle of your key, like: + + users_p0_email_key + users_p1_email_key + ... + users_p99_email_key + + In this case you can use the name and suffix options together to match on + these dynamic indexes, like: + + cast(user, params, [:email]) + |> unique_constraint(:email, name: :email_key, match: :suffix) + + ## Case sensitivity + + Unfortunately, different databases provide different guarantees + when it comes to case-sensitiveness. For example, in MySQL, comparisons + are case-insensitive by default. In Postgres, users can define case + insensitive column by using the `:citext` type/extension. In your migration: + + execute "CREATE EXTENSION IF NOT EXISTS citext" + create table(:users) do + ... + add :email, :citext + ... + end + + If for some reason your database does not support case insensitive columns, + you can explicitly downcase values before inserting/updating them: + + cast(data, params, [:email]) + |> update_change(:email, &String.downcase/1) + |> unique_constraint(:email) + + """ + @spec unique_constraint(t, atom | [atom, ...], Keyword.t) :: t + def unique_constraint(changeset, field_or_fields, opts \\ []) + + def unique_constraint(changeset, field, opts) when is_atom(field) do + unique_constraint(changeset, [field], opts) + end + + def unique_constraint(changeset, [first_field | _] = fields, opts) do + constraint = opts[:name] || unique_index_name(changeset, fields) + message = message(opts, "has already been taken") + match_type = Keyword.get(opts, :match, :exact) + error_key = Keyword.get(opts, :error_key, first_field) + add_constraint(changeset, :unique, to_string(constraint), match_type, error_key, message) + end + + defp unique_index_name(changeset, fields) do + field_names = Enum.map(fields, &get_field_source(changeset, &1)) + Enum.join([get_source(changeset)] ++ field_names ++ ["index"], "_") + end + + @doc """ + Checks for foreign key constraint in the given field. + + The foreign key constraint works by relying on the database to + check if the associated data exists or not. This is useful to + guarantee that a child will only be created if the parent exists + in the database too. + + In order to use the foreign key constraint the first step is + to define the foreign key in a migration. This is often done + with references. For example, imagine you are creating a + comments table that belongs to posts. One would have: + + create table(:comments) do + add :post_id, references(:posts) + end + + By default, Ecto will generate a foreign key constraint with + name "comments_post_id_fkey" (the name is configurable). + + Now that a constraint exists, when creating comments, we could + annotate the changeset with foreign key constraint so Ecto knows + how to convert it into an error message: + + cast(comment, params, [:post_id]) + |> foreign_key_constraint(:post_id) + + Now, when invoking `c:Ecto.Repo.insert/2` or `c:Ecto.Repo.update/2`, if the + associated post does not exist, it will be converted into an + error and `{:error, changeset}` returned by the repository. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "does not exist" + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. May be required + explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + @spec foreign_key_constraint(t, atom, Keyword.t) :: t + def foreign_key_constraint(changeset, field, opts \\ []) do + constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_fkey" + match_type = Keyword.get(opts, :match, :exact) + message = message(opts, "does not exist") + add_constraint(changeset, :foreign_key, to_string(constraint), match_type, field, message, :foreign) + end + + @doc """ + Checks the associated field exists. + + This is similar to `foreign_key_constraint/3` except that the + field is inferred from the association definition. This is useful + to guarantee that a child will only be created if the parent exists + in the database too. Therefore, it only applies to `belongs_to` + associations. + + As the name says, a constraint is required in the database for + this function to work. Such constraint is often added as a + reference to the child table: + + create table(:comments) do + add :post_id, references(:posts) + end + + Now, when inserting a comment, it is possible to forbid any + comment to be added if the associated post does not exist: + + comment + |> Ecto.Changeset.cast(params, [:post_id]) + |> Ecto.Changeset.assoc_constraint(:post) + |> Repo.insert + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "does not exist" + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + association field. + May be required explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + """ + @spec assoc_constraint(t, atom, Keyword.t) :: t + def assoc_constraint(changeset, assoc, opts \\ []) do + constraint = opts[:name] || + case get_assoc(changeset, assoc) do + %Ecto.Association.BelongsTo{owner_key: owner_key} -> + "#{get_source(changeset)}_#{owner_key}_fkey" + other -> + raise ArgumentError, + "assoc_constraint can only be added to belongs to associations, got: #{inspect other}" + end + + match_type = Keyword.get(opts, :match, :exact) + message = message(opts, "does not exist") + add_constraint(changeset, :foreign_key, to_string(constraint), match_type, assoc, message, :assoc) + end + + @doc """ + Checks the associated field does not exist. + + This is similar to `foreign_key_constraint/3` except that the + field is inferred from the association definition. This is useful + to guarantee that parent can only be deleted (or have its primary + key changed) if no child exists in the database. Therefore, it only + applies to `has_*` associations. + + As the name says, a constraint is required in the database for + this function to work. Such constraint is often added as a + reference to the child table: + + create table(:comments) do + add :post_id, references(:posts) + end + + Now, when deleting the post, it is possible to forbid any post to + be deleted if they still have comments attached to it: + + post + |> Ecto.Changeset.change + |> Ecto.Changeset.no_assoc_constraint(:comments) + |> Repo.delete + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "is still associated with this entry" (for `has_one`) + and "are still associated with this entry" (for `has_many`) + * `:name` - the constraint name. By default, the constraint + name is inferred from the association table + association + field. May be required explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + @spec no_assoc_constraint(t, atom, Keyword.t) :: t + def no_assoc_constraint(changeset, assoc, opts \\ []) do + {constraint, message} = + case get_assoc(changeset, assoc) do + %Ecto.Association.Has{cardinality: cardinality, + related_key: related_key, related: related} -> + {opts[:name] || "#{related.__schema__(:source)}_#{related_key}_fkey", + message(opts, no_assoc_message(cardinality))} + other -> + raise ArgumentError, + "no_assoc_constraint can only be added to has one/many associations, got: #{inspect other}" + end + + match_type = Keyword.get(opts, :match, :exact) + add_constraint(changeset, :foreign_key, to_string(constraint), match_type, assoc, message, :no_assoc) + end + + @doc """ + Checks for an exclusion constraint in the given field. + + The exclusion constraint works by relying on the database to check + if the exclusion constraint has been violated or not and, if so, + Ecto converts it into a changeset error. + + ## Options + + * `:message` - the message in case the constraint check fails, + defaults to "violates an exclusion constraint" + * `:name` - the constraint name. By default, the constraint + name is inferred from the table + field. May be required + explicitly for complex cases + * `:match` - how the changeset constraint name is matched against the + repo constraint, may be `:exact`, `:suffix` or `:prefix`. Defaults to + `:exact`. `:suffix` matches any repo constraint which `ends_with?` `:name` + to this changeset constraint. `:prefix` matches any repo constraint which + `starts_with?` `:name` to this changeset constraint. + + """ + def exclusion_constraint(changeset, field, opts \\ []) do + constraint = opts[:name] || "#{get_source(changeset)}_#{get_field_source(changeset, field)}_exclusion" + message = message(opts, "violates an exclusion constraint") + match_type = Keyword.get(opts, :match, :exact) + add_constraint(changeset, :exclusion, to_string(constraint), match_type, field, message, :exclusion) + end + + defp no_assoc_message(:one), do: "is still associated with this entry" + defp no_assoc_message(:many), do: "are still associated with this entry" + + defp add_constraint(changeset, type, constraint, match, field, message) do + add_constraint(changeset, type, constraint, match, field, message, type) + end + + defp add_constraint(%Changeset{constraints: constraints} = changeset, + type, constraint, match, field, error_message, error_type) + when is_binary(constraint) and is_atom(field) and is_binary(error_message) do + unless match in @match_types do + raise ArgumentError, "invalid match type: #{inspect match}. Allowed match types: #{inspect @match_types}" + end + + constraint = %{ + constraint: constraint, + error_message: error_message, + error_type: error_type, + field: field, + match: match, + type: type + } + + %{changeset | constraints: [constraint | constraints]} + end + + defp get_source(%{data: %{__meta__: %{source: source}}}) when is_binary(source), + do: source + defp get_source(%{data: data}), do: + raise ArgumentError, "cannot add constraint to changeset because it does not have a source, got: #{inspect data}" + defp get_source(item), do: + raise ArgumentError, "cannot add constraint because a changeset was not supplied, got: #{inspect item}" + + defp get_assoc(%{types: types}, assoc) do + case Map.fetch(types, assoc) do + {:ok, {:assoc, association}} -> + association + _ -> + raise_invalid_assoc(types, assoc) + end + end + + defp raise_invalid_assoc(types, assoc) do + associations = for {_key, {:assoc, %{field: field}}} <- types, do: field + one_of = if match?([_], associations), do: "", else: "one of " + + raise ArgumentError, + "cannot add constraint to changeset because association `#{assoc}` does not exist. " <> + "Did you mean #{one_of}`#{Enum.join(associations, "`, `")}`?" + end + + defp get_field_source(%{data: %{__struct__: schema}}, field) when is_atom(schema), + do: schema.__schema__(:field_source, field) || field + defp get_field_source(%{}, field), + do: field + + @doc ~S""" + Traverses changeset errors and applies the given function to error messages. + + This function is particularly useful when associations and embeds + are cast in the changeset as it will traverse all associations and + embeds and place all errors in a series of nested maps. + + A changeset is supplied along with a function to apply to each + error message as the changeset is traversed. The error message + function receives an error tuple `{msg, opts}`, for example: + + {"should be at least %{count} characters", [count: 3, validation: :length, min: 3]} + + ## Examples + + iex> traverse_errors(changeset, fn {msg, opts} -> + ...> Regex.replace(~r"%{(\w+)}", msg, fn _, key -> + ...> opts |> Keyword.get(String.to_existing_atom(key), key) |> to_string() + ...> end) + ...> end) + %{title: ["should be at least 3 characters"]} + + Optionally function can accept three arguments: `changeset`, `field` + and error tuple `{msg, opts}`. It is useful whenever you want to extract + validations rules from `changeset.validations` to build detailed error + description. + """ + @spec traverse_errors(t, (error -> String.t) | (Changeset.t, atom, error -> String.t)) :: %{atom => [term]} + def traverse_errors(%Changeset{errors: errors, changes: changes, types: types} = changeset, msg_func) + when is_function(msg_func, 1) or is_function(msg_func, 3) do + errors + |> Enum.reverse() + |> merge_keyword_keys(msg_func, changeset) + |> merge_related_keys(changes, types, msg_func, &traverse_errors/2) + end + + defp merge_keyword_keys(keyword_list, msg_func, _) when is_function(msg_func, 1) do + Enum.reduce(keyword_list, %{}, fn({key, val}, acc) -> + val = msg_func.(val) + Map.update(acc, key, [val], &[val|&1]) + end) + end + + defp merge_keyword_keys(keyword_list, msg_func, changeset) when is_function(msg_func, 3) do + Enum.reduce(keyword_list, %{}, fn({key, val}, acc) -> + val = msg_func.(changeset, key, val) + Map.update(acc, key, [val], &[val|&1]) + end) + end + + defp merge_related_keys(_, _, nil, _, _) do + raise ArgumentError, "changeset does not have types information" + end + + defp merge_related_keys(map, changes, types, msg_func, traverse_function) do + Enum.reduce types, map, fn + {field, {tag, %{cardinality: :many}}}, acc when tag in @relations -> + if changesets = Map.get(changes, field) do + {child, all_empty?} = + Enum.map_reduce(changesets, true, fn changeset, all_empty? -> + child = traverse_function.(changeset, msg_func) + {child, all_empty? and child == %{}} + end) + + case all_empty? do + true -> acc + false -> Map.put(acc, field, child) + end + else + acc + end + {field, {tag, %{cardinality: :one}}}, acc when tag in @relations -> + if changeset = Map.get(changes, field) do + case traverse_function.(changeset, msg_func) do + child when child == %{} -> acc + child -> Map.put(acc, field, child) + end + else + acc + end + {_, _}, acc -> + acc + end + end + + defp apply_relation_changes(acc, key, relation, value) do + relation_changed = Relation.apply_changes(relation, value) + + acc = Map.put(acc, key, relation_changed) + + with %Ecto.Association.BelongsTo{related_key: related_key} <- relation, + %{^related_key => id} <- relation_changed do + Map.put(acc, relation.owner_key, id) + else + _ -> acc + end + end + + @doc ~S""" + Traverses changeset validations and applies the given function to validations. + + This behaves the same as `traverse_errors/2`, but operates on changeset + validations instead of errors. + + ## Examples + + iex> traverse_validations(changeset, &(&1)) + %{title: [format: ~r/pattern/, length: [min: 1, max: 20]]} + + iex> traverse_validations(changeset, fn + ...> {:length, opts} -> {:length, "#{Keyword.get(opts, :min, 0)}-#{Keyword.get(opts, :max, 32)}"} + ...> {:format, %Regex{source: source}} -> {:format, "/#{source}/"} + ...> {other, opts} -> {other, inspect(opts)} + ...> end) + %{title: [format: "/pattern/", length: "1-20"]} + """ + @spec traverse_validations(t, (error -> String.t) | (Changeset.t, atom, error -> String.t)) :: %{atom => [term]} + def traverse_validations(%Changeset{validations: validations, changes: changes, types: types} = changeset, msg_func) + when is_function(msg_func, 1) or is_function(msg_func, 3) do + validations + |> Enum.reverse() + |> merge_keyword_keys(msg_func, changeset) + |> merge_related_keys(changes, types, msg_func, &traverse_validations/2) + end +end + +defimpl Inspect, for: Ecto.Changeset do + import Inspect.Algebra + + def inspect(%Ecto.Changeset{data: data} = changeset, opts) do + list = for attr <- [:action, :changes, :errors, :data, :valid?] do + {attr, Map.get(changeset, attr)} + end + + redacted_fields = case data do + %type{} -> + if function_exported?(type, :__schema__, 1) do + type.__schema__(:redact_fields) + else + [] + end + _ -> [] + end + + container_doc("#Ecto.Changeset<", list, ">", opts, fn + {:action, action}, opts -> concat("action: ", to_doc(action, opts)) + {:changes, changes}, opts -> concat("changes: ", changes |> filter(redacted_fields) |> to_doc(opts)) + {:data, data}, _opts -> concat("data: ", to_struct(data, opts)) + {:errors, errors}, opts -> concat("errors: ", to_doc(errors, opts)) + {:valid?, valid?}, opts -> concat("valid?: ", to_doc(valid?, opts)) + end) + end + + defp to_struct(%{__struct__: struct}, _opts), do: "#" <> Kernel.inspect(struct) <> "<>" + defp to_struct(other, opts), do: to_doc(other, opts) + + defp filter(changes, redacted_fields) do + Enum.reduce(redacted_fields, changes, fn redacted_field, changes -> + if Map.has_key?(changes, redacted_field) do + Map.put(changes, redacted_field, "**redacted**") + else + changes + end + end) + end +end diff --git a/deps/ecto/lib/ecto/changeset/relation.ex b/deps/ecto/lib/ecto/changeset/relation.ex new file mode 100644 index 0000000..b60182a --- /dev/null +++ b/deps/ecto/lib/ecto/changeset/relation.ex @@ -0,0 +1,565 @@ +defmodule Ecto.Changeset.Relation do + @moduledoc false + + require Logger + alias Ecto.Changeset + alias Ecto.Association.NotLoaded + + @type t :: %{required(:__struct__) => atom(), + required(:cardinality) => :one | :many, + required(:on_replace) => :raise | :mark_as_invalid | atom, + required(:relationship) => :parent | :child, + required(:ordered) => boolean, + required(:owner) => atom, + required(:related) => atom, + required(:field) => atom, + optional(atom()) => any()} + + @doc """ + Builds the related data. + """ + @callback build(t, owner :: Ecto.Schema.t) :: Ecto.Schema.t + + @doc """ + Returns empty container for relation. + """ + def empty(%{cardinality: cardinality}), do: cardinality_to_empty(cardinality) + + defp cardinality_to_empty(:one), do: nil + defp cardinality_to_empty(:many), do: [] + + @doc """ + Checks if the container can be considered empty. + """ + def empty?(%{cardinality: _}, %NotLoaded{}), do: true + def empty?(%{cardinality: :many}, []), do: true + def empty?(%{cardinality: :many}, changes), do: filter_empty(changes) == [] + def empty?(%{cardinality: :one}, nil), do: true + def empty?(%{}, _), do: false + + @doc """ + Filter empty changes + """ + def filter_empty(changes) do + Enum.filter(changes, fn + %Changeset{action: action} when action in [:replace, :delete] -> false + _ -> true + end) + end + + @doc """ + Applies related changeset changes + """ + def apply_changes(%{cardinality: :one}, nil) do + nil + end + + def apply_changes(%{cardinality: :one}, changeset) do + apply_changes(changeset) + end + + def apply_changes(%{cardinality: :many}, changesets) do + for changeset <- changesets, + struct = apply_changes(changeset), + do: struct + end + + defp apply_changes(%Changeset{action: :delete}), do: nil + defp apply_changes(%Changeset{action: :replace}), do: nil + defp apply_changes(changeset), do: Changeset.apply_changes(changeset) + + @doc """ + Loads the relation with the given struct. + + Loading will fail if the association is not loaded but the struct is. + """ + def load!(%{__meta__: %{state: :built}}, %NotLoaded{__cardinality__: cardinality}) do + cardinality_to_empty(cardinality) + end + + def load!(struct, %NotLoaded{__field__: field}) do + raise "attempting to cast or change association `#{field}` " <> + "from `#{inspect struct.__struct__}` that was not loaded. Please preload your " <> + "associations before manipulating them through changesets" + end + + def load!(_struct, loaded), do: loaded + + @doc """ + Casts related according to the `on_cast` function. + """ + def cast(%{cardinality: :one} = relation, _owner, nil, current, _on_cast) do + case current && on_replace(relation, current) do + :error -> {:error, {"is invalid", [type: expected_type(relation)]}} + _ -> {:ok, nil, true} + end + end + + def cast(%{cardinality: :one} = relation, owner, params, current, on_cast) when is_list(params) do + if Keyword.keyword?(params) do + cast(relation, owner, Map.new(params), current, on_cast) + else + {:error, {"is invalid", [type: expected_type(relation)]}} + end + end + + def cast(%{cardinality: :many} = relation, owner, params, current, on_cast) when is_map(params) do + params = + params + |> Enum.map(&key_as_int/1) + |> Enum.sort + |> Enum.map(&elem(&1, 1)) + cast(relation, owner, params, current, on_cast) + end + + def cast(%{related: mod} = relation, owner, params, current, on_cast) do + pks = mod.__schema__(:primary_key) + fun = &do_cast(relation, owner, &1, &2, &3, on_cast) + data_pk = data_pk(pks) + param_pk = param_pk(mod, pks) + + with :error <- cast_or_change(relation, params, current, data_pk, param_pk, fun) do + {:error, {"is invalid", [type: expected_type(relation)]}} + end + end + + defp do_cast(meta, owner, params, struct, allowed_actions, {module, fun, args}) + when is_atom(module) and is_atom(fun) and is_list(args) do + on_cast = fn changeset, attrs -> + apply(module, fun, [changeset, attrs | args]) + end + + do_cast(meta, owner, params, struct, allowed_actions, on_cast) + end + + defp do_cast(meta, owner, params, nil = _struct, allowed_actions, on_cast) do + {:ok, + on_cast.(meta.__struct__.build(meta, owner), params) + |> put_new_action(:insert) + |> check_action!(allowed_actions)} + end + + defp do_cast(relation, _owner, nil = _params, current, _allowed_actions, _on_cast) do + on_replace(relation, current) + end + + defp do_cast(_meta, _owner, params, struct, allowed_actions, on_cast) do + {:ok, + on_cast.(struct, params) + |> put_new_action(:update) + |> check_action!(allowed_actions)} + end + + @doc """ + Wraps related structs in changesets. + """ + def change(%{cardinality: :one} = relation, nil, current) do + case current && on_replace(relation, current) do + :error -> {:error, {"is invalid", [type: expected_type(relation)]}} + _ -> {:ok, nil, true} + end + end + + def change(%{related: mod} = relation, value, current) do + get_pks = data_pk(mod.__schema__(:primary_key)) + with :error <- cast_or_change(relation, value, current, get_pks, get_pks, + &do_change(relation, &1, &2, &3)) do + {:error, {"is invalid", [type: expected_type(relation)]}} + end + end + + # This may be an insert or an update, get all fields. + defp do_change(relation, %{__struct__: _} = changeset_or_struct, nil, _allowed_actions) do + changeset = Changeset.change(changeset_or_struct) + {:ok, + changeset + |> assert_changeset_struct!(relation) + |> put_new_action(action_from_changeset(changeset, nil))} + end + + defp do_change(relation, nil, current, _allowed_actions) do + on_replace(relation, current) + end + + defp do_change(relation, %Changeset{} = changeset, _current, allowed_actions) do + {:ok, + changeset + |> assert_changeset_struct!(relation) + |> put_new_action(:update) + |> check_action!(allowed_actions)} + end + + defp do_change(_relation, %{__struct__: _} = struct, _current, allowed_actions) do + {:ok, + struct + |> Ecto.Changeset.change + |> put_new_action(:update) + |> check_action!(allowed_actions)} + end + + defp do_change(relation, changes, current, allowed_actions) + when is_list(changes) or is_map(changes) do + changeset = Ecto.Changeset.change(current || relation.__struct__.build(relation, nil), changes) + changeset = put_new_action(changeset, action_from_changeset(changeset, current)) + do_change(relation, changeset, current, allowed_actions) + end + + defp action_from_changeset(%{data: %{__meta__: %{state: state}}}, _current) do + case state do + :built -> :insert + :loaded -> :update + :deleted -> :delete + end + end + + defp action_from_changeset(_, nil) do + :insert + end + + defp action_from_changeset(_, _current) do + :update + end + + defp assert_changeset_struct!(%{data: %{__struct__: mod}} = changeset, %{related: mod}) do + changeset + end + defp assert_changeset_struct!(%{data: data}, %{related: mod}) do + raise ArgumentError, "expected changeset data to be a #{mod} struct, got: #{inspect data}" + end + + @doc """ + Handles the changeset or struct when being replaced. + """ + def on_replace(%{on_replace: :mark_as_invalid}, _changeset_or_struct) do + :error + end + + def on_replace(%{on_replace: :raise, field: name, owner: owner}, _) do + raise """ + you are attempting to change relation #{inspect name} of + #{inspect owner} but the `:on_replace` option of this relation + is set to `:raise`. + + By default it is not possible to replace or delete embeds and + associations during `cast`. Therefore Ecto requires the parameters + given to `cast` to have IDs matching the data currently associated + to #{inspect owner}. Failing to do so results in this error message. + + If you want to replace data or automatically delete any data + not sent to `cast`, please set the appropriate `:on_replace` + option when defining the relation. The docs for `Ecto.Changeset` + covers the supported options in the "Associations, embeds and on + replace" section. + + However, if you don't want to allow data to be replaced or + deleted, only updated, make sure that: + + * If you are attempting to update an existing entry, you + are including the entry primary key (ID) in the data. + + * If you have a relationship with many children, all children + must be given on update. + + """ + end + + def on_replace(_relation, changeset_or_struct) do + {:ok, Changeset.change(changeset_or_struct) |> put_new_action(:replace)} + end + + defp raise_if_updating_with_struct!(%{field: name, owner: owner}, %{__struct__: _} = new) do + raise """ + you have set that the relation #{inspect name} of #{inspect owner} + has `:on_replace` set to `:update` but you are giving it a struct/ + changeset to put_assoc/put_change. + + Since you have set `:on_replace` to `:update`, you are only allowed + to update the existing entry by giving updated fields as a map or + keyword list or set it to nil. + + If you indeed want to replace the existing #{inspect name}, you have + to change the foreign key field directly. + + Got: #{inspect new} + """ + end + + defp raise_if_updating_with_struct!(_, _) do + true + end + + defp cast_or_change(%{cardinality: :one} = relation, value, current, current_pks_fun, new_pks_fun, fun) + when is_map(value) or is_list(value) or is_nil(value) do + single_change(relation, value, current_pks_fun, new_pks_fun, fun, current) + end + + defp cast_or_change(%{cardinality: :many}, [], [], _current_pks, _new_pks, _fun) do + {:ok, [], true} + end + + defp cast_or_change(%{cardinality: :many} = relation, value, current, current_pks_fun, new_pks_fun, fun) + when is_list(value) do + {current_pks, current_map} = process_current(current, current_pks_fun, relation) + %{unique: unique, ordered: ordered} = relation + ordered = if ordered, do: current_pks, else: [] + map_changes(value, new_pks_fun, fun, current_map, [], true, true, unique && %{}, ordered) + end + + defp cast_or_change(_, _, _, _, _, _), do: :error + + # single change + + defp single_change(_relation, nil, _current_pks_fun, _new_pks_fun, fun, current) do + single_change(nil, current, fun, [:update, :delete], false) + end + + defp single_change(_relation, new, _current_pks_fun, _new_pks_fun, fun, nil) do + single_change(new, nil, fun, [:insert], false) + end + + defp single_change(%{on_replace: on_replace} = relation, new, current_pks_fun, new_pks_fun, fun, current) do + pk_values = new_pks_fun.(new) + + if (pk_values == current_pks_fun.(current) and pk_values != []) or + (on_replace == :update and raise_if_updating_with_struct!(relation, new)) do + single_change(new, current, fun, allowed_actions(pk_values), true) + else + case on_replace(relation, current) do + {:ok, _changeset} -> single_change(new, nil, fun, [:insert], false) + :error -> :error + end + end + end + + defp single_change(new, current, fun, allowed_actions, skippable?) do + case fun.(new, current, allowed_actions) do + {:ok, %{action: :ignore}} -> + :ignore + {:ok, changeset} -> + if skippable? and skip?(changeset) do + :ignore + else + {:ok, changeset, changeset.valid?} + end + :error -> + :error + end + end + + # map changes + + defp map_changes([changes | rest], new_pks, fun, current, acc, valid?, skip?, unique, ordered) + when is_map(changes) or is_list(changes) do + pk_values = new_pks.(changes) + {struct, current, allowed_actions} = pop_current(current, pk_values) + + case fun.(changes, struct, allowed_actions) do + {:ok, %{action: :ignore}} -> + ordered = pop_ordered(pk_values, ordered) + map_changes(rest, new_pks, fun, current, acc, valid?, skip?, unique, ordered) + {:ok, changeset} -> + changeset = maybe_add_error_on_pk(changeset, pk_values, unique) + acc = [changeset | acc] + valid? = valid? and changeset.valid? + skip? = (struct != nil) and skip? and skip?(changeset) + unique = unique && Map.put(unique, pk_values, true) + ordered = pop_ordered(pk_values, ordered) + map_changes(rest, new_pks, fun, current, acc, valid?, skip?, unique, ordered) + :error -> + :error + end + end + + defp map_changes([], _new_pks, fun, current, acc, valid?, skip?, _unique, ordered) do + current_structs = Enum.map(current, &elem(&1, 1)) + skip? = skip? and ordered == [] + reduce_delete_changesets(current_structs, fun, Enum.reverse(acc), valid?, skip?) + end + + defp map_changes(_params, _new_pks, _fun, _current, _acc, _valid?, _skip?, _unique, _ordered) do + :error + end + + defp pop_ordered(pk_values, [pk_values | tail]), do: tail + defp pop_ordered(_pk_values, tail), do: tail + + defp maybe_add_error_on_pk(%{data: %{__struct__: schema}} = changeset, pk_values, unique) do + if is_map(unique) and not missing_pks?(pk_values) and Map.has_key?(unique, pk_values) do + Enum.reduce(schema.__schema__(:primary_key), changeset, fn pk, acc -> + Changeset.add_error(acc, pk, "has already been taken") + end) + else + changeset + end + end + + defp missing_pks?(pk_values) do + pk_values == [] or Enum.any?(pk_values, &is_nil/1) + end + + defp allowed_actions(pk_values) do + if Enum.all?(pk_values, &is_nil/1) do + [:insert, :update, :delete] + else + [:update, :delete] + end + end + + defp reduce_delete_changesets([struct | rest], fun, acc, valid?, _skip?) do + case fun.(nil, struct, [:update, :delete]) do + {:ok, changeset} -> + valid? = valid? and changeset.valid? + reduce_delete_changesets(rest, fun, [changeset | acc], valid?, false) + + :error -> + :error + end + end + + defp reduce_delete_changesets([], _fun, _acc, _valid?, true), do: :ignore + defp reduce_delete_changesets([], _fun, acc, valid?, false), do: {:ok, acc, valid?} + + # helpers + + defp check_action!(changeset, allowed_actions) do + action = changeset.action + + cond do + action in allowed_actions -> + changeset + + action == :ignore -> + changeset + + action == :insert -> + raise "cannot insert related #{inspect changeset.data} " <> + "because it is already associated with the given struct" + + action == :replace -> + raise "cannot replace related #{inspect changeset.data}. " <> + "This typically happens when you are calling put_assoc/put_embed " <> + "with the results of a previous put_assoc/put_embed/cast_assoc/cast_embed " <> + "operation, which is not supported. You must call such operations only once " <> + "per embed/assoc, in order for Ecto to track changes efficiently" + + true -> + raise "cannot #{action} related #{inspect changeset.data} because " <> + "it already exists and it is not currently associated with the " <> + "given struct. Ecto forbids casting existing records through " <> + "the association field for security reasons. Instead, set " <> + "the foreign key value accordingly" + end + end + + defp key_as_int({key, val}) when is_binary(key) do + case Integer.parse(key) do + {key, ""} -> {key, val} + _ -> {key, val} + end + end + defp key_as_int(key_val), do: key_val + + defp process_current(nil, _get_pks, _relation), + do: {[], %{}} + defp process_current(current, get_pks, relation) do + {pks, {map, counter}} = + Enum.map_reduce(current, {%{}, 0}, fn struct, {acc, counter} -> + pks = get_pks.(struct) + key = if pks == [], do: map_size(acc), else: pks + {pks, {Map.put(acc, key, struct), counter+ 1}} + end) + + if map_size(map) != counter do + Logger.warn """ + found duplicate primary keys for association/embed `#{inspect(relation.field)}` \ + in `#{inspect(relation.owner)}`. In case of duplicate IDs, only the last entry \ + with the same ID will be kept. Make sure that all entries in `#{inspect(relation.field)}` \ + have an ID and the IDs are unique between them + """ + end + + {pks, map} + end + + defp pop_current(current, pk_values) do + case Map.pop(current, pk_values) do + {nil, current} -> {nil, current, [:insert]} + {struct, current} -> {struct, current, allowed_actions(pk_values)} + end + end + + defp data_pk(pks) do + fn + %Changeset{data: data} -> Enum.map(pks, &Map.get(data, &1)) + map when is_map(map) -> Enum.map(pks, &Map.get(map, &1)) + list when is_list(list) -> Enum.map(pks, &Keyword.get(list, &1)) + end + end + + defp param_pk(mod, pks) do + pks = Enum.map(pks, &{&1, Atom.to_string(&1), mod.__schema__(:type, &1)}) + fn params -> + Enum.map pks, fn {atom_key, string_key, type} -> + original = Map.get(params, string_key) || Map.get(params, atom_key) + case Ecto.Type.cast(type, original) do + {:ok, value} -> value + _ -> original + end + end + end + end + + defp put_new_action(%{action: action} = changeset, new_action) when is_nil(action), + do: Map.put(changeset, :action, new_action) + defp put_new_action(changeset, _new_action), + do: changeset + + defp skip?(%{valid?: true, changes: empty, action: :update}) when empty == %{}, + do: true + defp skip?(_changeset), + do: false + + defp expected_type(%{cardinality: :one}), do: :map + defp expected_type(%{cardinality: :many}), do: {:array, :map} + + ## Surface changes on insert + + def surface_changes(%{changes: changes, types: types} = changeset, struct, fields) do + {changes, errors} = + Enum.reduce fields, {changes, []}, fn field, {changes, errors} -> + case {struct, changes, types} do + # User has explicitly changed it + {_, %{^field => _}, _} -> + {changes, errors} + + # Handle associations specially + {_, _, %{^field => {tag, embed_or_assoc}}} when tag in [:assoc, :embed] -> + # This is partly reimplementing the logic behind put_relation + # in Ecto.Changeset but we need to do it in a way where we have + # control over the current value. + value = load!(struct, Map.get(struct, field)) + empty = empty(embed_or_assoc) + case change(embed_or_assoc, value, empty) do + {:ok, change, _} when change != empty -> + {Map.put(changes, field, change), errors} + {:error, error} -> + {changes, [{field, error}]} + _ -> # :ignore or ok with change == empty + {changes, errors} + end + + # Struct has a non nil value + {%{^field => value}, _, %{^field => _}} when value != nil -> + {Map.put(changes, field, value), errors} + + {_, _, _} -> + {changes, errors} + end + end + + case errors do + [] -> %{changeset | changes: changes} + _ -> %{changeset | errors: errors ++ changeset.errors, valid?: false, changes: changes} + end + end +end diff --git a/deps/ecto/lib/ecto/embedded.ex b/deps/ecto/lib/ecto/embedded.ex new file mode 100644 index 0000000..bfd7103 --- /dev/null +++ b/deps/ecto/lib/ecto/embedded.ex @@ -0,0 +1,286 @@ +defmodule Ecto.Embedded do + @moduledoc """ + The embedding struct for `embeds_one` and `embeds_many`. + + Its fields are: + + * `cardinality` - The association cardinality + * `field` - The name of the association field on the schema + * `owner` - The schema where the association was defined + * `related` - The schema that is embedded + * `on_cast` - Function name to call by default when casting embeds + * `on_replace` - The action taken on associations when schema is replaced + + """ + alias __MODULE__ + alias Ecto.Changeset + alias Ecto.Changeset.Relation + + use Ecto.ParameterizedType + + @behaviour Relation + @on_replace_opts [:raise, :mark_as_invalid, :delete] + @embeds_one_on_replace_opts @on_replace_opts ++ [:update] + + defstruct [ + :cardinality, + :field, + :owner, + :related, + :on_cast, + on_replace: :raise, + unique: true, + ordered: true + ] + + ## Parameterized API + + # We treat even embed_many as maps, as that's often the + # most efficient format to encode them in the database. + @impl Ecto.ParameterizedType + def type(_), do: {:map, :any} + + @impl Ecto.ParameterizedType + def init(opts) do + opts = Keyword.put_new(opts, :on_replace, :raise) + cardinality = Keyword.fetch!(opts, :cardinality) + + on_replace_opts = + if cardinality == :one, do: @embeds_one_on_replace_opts, else: @on_replace_opts + + unless opts[:on_replace] in on_replace_opts do + raise ArgumentError, "invalid `:on_replace` option for #{inspect Keyword.fetch!(opts, :field)}. " <> + "The only valid options are: " <> + Enum.map_join(on_replace_opts, ", ", &"`#{inspect &1}`") + end + + struct(%Embedded{}, opts) + end + + @impl Ecto.ParameterizedType + def load(nil, _fun, %{cardinality: :one}), do: {:ok, nil} + + def load(value, fun, %{cardinality: :one, related: schema, field: field}) when is_map(value) do + {:ok, load_field(field, schema, value, fun)} + end + + def load(nil, _fun, %{cardinality: :many}), do: {:ok, []} + + def load(value, fun, %{cardinality: :many, related: schema, field: field}) when is_list(value) do + {:ok, Enum.map(value, &load_field(field, schema, &1, fun))} + end + + def load(_value, _fun, _embed) do + :error + end + + defp load_field(_field, schema, value, loader) when is_map(value) do + Ecto.Schema.Loader.unsafe_load(schema, value, loader) + end + + defp load_field(field, _schema, value, _fun) do + raise ArgumentError, "cannot load embed `#{field}`, expected a map but got: #{inspect value}" + end + + @impl Ecto.ParameterizedType + def dump(nil, _, _), do: {:ok, nil} + + def dump(value, fun, %{cardinality: :one, related: schema, field: field}) when is_map(value) do + {:ok, dump_field(field, schema, value, schema.__schema__(:dump), fun, _one_embed? = true)} + end + + def dump(value, fun, %{cardinality: :many, related: schema, field: field}) when is_list(value) do + types = schema.__schema__(:dump) + {:ok, Enum.map(value, &dump_field(field, schema, &1, types, fun, _one_embed? = false))} + end + + def dump(_value, _fun, _embed) do + :error + end + + defp dump_field(_field, schema, %{__struct__: schema} = struct, types, dumper, _one_embed?) do + Ecto.Schema.Loader.safe_dump(struct, types, dumper) + end + + defp dump_field(field, schema, value, _types, _dumper, one_embed?) do + one_or_many = + if one_embed?, + do: "a struct #{inspect schema} value", + else: "a list of #{inspect schema} struct values" + + raise ArgumentError, + "cannot dump embed `#{field}`, expected #{one_or_many} but got: #{inspect value}" + end + + @impl Ecto.ParameterizedType + def cast(nil, %{cardinality: :one}), do: {:ok, nil} + def cast(%{__struct__: schema} = struct, %{cardinality: :one, related: schema}) do + {:ok, struct} + end + + def cast(nil, %{cardinality: :many}), do: {:ok, []} + def cast(value, %{cardinality: :many, related: schema}) when is_list(value) do + if Enum.all?(value, &Kernel.match?(%{__struct__: ^schema}, &1)) do + {:ok, value} + else + :error + end + end + + def cast(_value, _embed) do + :error + end + + @impl Ecto.ParameterizedType + def embed_as(_, _), do: :dump + + ## End of parameterized API + + # Callback invoked by repository to prepare embeds. + # + # It replaces the changesets for embeds inside changes + # by actual structs so it can be dumped by adapters and + # loaded into the schema struct afterwards. + @doc false + def prepare(changeset, embeds, adapter, repo_action) do + %{changes: changes, types: types, repo: repo} = changeset + prepare(Map.take(changes, embeds), types, adapter, repo, repo_action) + end + + defp prepare(embeds, _types, _adapter, _repo, _repo_action) when embeds == %{} do + embeds + end + + defp prepare(embeds, types, adapter, repo, repo_action) do + Enum.reduce embeds, embeds, fn {name, changeset_or_changesets}, acc -> + {:embed, embed} = Map.get(types, name) + Map.put(acc, name, prepare_each(embed, changeset_or_changesets, adapter, repo, repo_action)) + end + end + + defp prepare_each(%{cardinality: :one}, nil, _adapter, _repo, _repo_action) do + nil + end + + defp prepare_each(%{cardinality: :one} = embed, changeset, adapter, repo, repo_action) do + action = check_action!(changeset.action, repo_action, embed) + changeset = run_prepare(changeset, repo) + to_struct(changeset, action, embed, adapter) + end + + defp prepare_each(%{cardinality: :many} = embed, changesets, adapter, repo, repo_action) do + for changeset <- changesets, + action = check_action!(changeset.action, repo_action, embed), + changeset = run_prepare(changeset, repo), + prepared = to_struct(changeset, action, embed, adapter), + do: prepared + end + + defp to_struct(%Changeset{valid?: false}, _action, + %{related: schema}, _adapter) do + raise ArgumentError, "changeset for embedded #{inspect schema} is invalid, " <> + "but the parent changeset was not marked as invalid" + end + + defp to_struct(%Changeset{data: %{__struct__: actual}}, _action, + %{related: expected}, _adapter) when actual != expected do + raise ArgumentError, "expected changeset for embedded schema `#{inspect expected}`, " <> + "got: #{inspect actual}" + end + + defp to_struct(%Changeset{changes: changes, data: schema}, :update, + _embed, _adapter) when changes == %{} do + schema + end + + defp to_struct(%Changeset{}, :delete, _embed, _adapter) do + nil + end + + defp to_struct(%Changeset{data: data} = changeset, action, %{related: schema}, adapter) do + %{data: struct, changes: changes} = changeset = + maybe_surface_changes(changeset, data, schema, action) + + embeds = prepare(changeset, schema.__schema__(:embeds), adapter, action) + + changes + |> Map.merge(embeds) + |> autogenerate_id(struct, action, schema, adapter) + |> autogenerate(action, schema) + |> apply_embeds(struct) + end + + defp maybe_surface_changes(changeset, data, schema, :insert) do + Relation.surface_changes(changeset, data, schema.__schema__(:fields)) + end + + defp maybe_surface_changes(changeset, _data, _schema, _action) do + changeset + end + + defp run_prepare(changeset, repo) do + changeset = %{changeset | repo: repo} + + Enum.reduce(Enum.reverse(changeset.prepare), changeset, fn fun, acc -> + case fun.(acc) do + %Ecto.Changeset{} = acc -> acc + other -> + raise "expected function #{inspect fun} given to Ecto.Changeset.prepare_changes/2 " <> + "to return an Ecto.Changeset, got: `#{inspect other}`" + end + end) + end + + defp apply_embeds(changes, struct) do + struct(struct, changes) + end + + defp check_action!(:replace, action, %{on_replace: :delete} = embed), + do: check_action!(:delete, action, embed) + defp check_action!(:update, :insert, %{related: schema}), + do: raise(ArgumentError, "got action :update in changeset for embedded #{inspect schema} while inserting") + defp check_action!(action, _, _), do: action + + defp autogenerate_id(changes, _struct, :insert, schema, adapter) do + case schema.__schema__(:autogenerate_id) do + {key, _source, :binary_id} -> + Map.put_new_lazy(changes, key, fn -> adapter.autogenerate(:embed_id) end) + {_key, :id} -> + raise ArgumentError, "embedded schema `#{inspect schema}` cannot autogenerate `:id` primary keys, " <> + "those are typically used for auto-incrementing constraints. " <> + "Maybe you meant to use `:binary_id` instead?" + nil -> + changes + end + end + + defp autogenerate_id(changes, struct, :update, _schema, _adapter) do + for {_, nil} <- Ecto.primary_key(struct) do + raise Ecto.NoPrimaryKeyValueError, struct: struct + end + changes + end + + defp autogenerate(changes, action, schema) do + autogen_fields = action |> action_to_auto() |> schema.__schema__() + + Enum.reduce(autogen_fields, changes, fn {fields, {mod, fun, args}}, acc -> + case Enum.reject(fields, &Map.has_key?(changes, &1)) do + [] -> + acc + + fields -> + generated = apply(mod, fun, args) + Enum.reduce(fields, acc, &Map.put(&2, &1, generated)) + end + end) + end + + defp action_to_auto(:insert), do: :autogenerate + defp action_to_auto(:update), do: :autoupdate + + @impl Relation + def build(%Embedded{related: related}, _owner) do + related.__struct__ + end +end diff --git a/deps/ecto/lib/ecto/enum.ex b/deps/ecto/lib/ecto/enum.ex new file mode 100644 index 0000000..3fd7d8f --- /dev/null +++ b/deps/ecto/lib/ecto/enum.ex @@ -0,0 +1,198 @@ +defmodule Ecto.Enum do + @moduledoc """ + A custom type that maps atoms to strings or integers. + + `Ecto.Enum` must be used whenever you want to keep atom values in a field. + Since atoms cannot be persisted to the database, `Ecto.Enum` converts them + to a string or an integer when writing to the database and converts them back + to atoms when loading data. It can be used in your schemas as follows: + + # Stored as strings + field :status, Ecto.Enum, values: [:foo, :bar, :baz] + + or + + # Stored as integers + field :status, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + + Therefore, the type to be used in your migrations for enum fields depend + on the choice above. For the cases above, one would do, respectively: + + add :status, :string + + or + + add :status, :integer + + Some databases also support enum types, which you could use in combination + with the above. + + Composite types, such as `:array`, are also supported which allow selecting + multiple values per record: + + field :roles, {:array, Ecto.Enum}, values: [:author, :editor, :admin] + + Overall, `:values` must be a list of atoms or a keyword list. Values will be + cast to atoms safely and only if the atom exists in the list (otherwise an + error will be raised). Attempting to load any string/integer not represented + by an atom in the list will be invalid. + + The helper function `mappings/2` returns the mappings for a given schema and + field, which can be used in places like form drop-downs. For example, given + the following schema: + + defmodule EnumSchema do + use Ecto.Schema + + schema "my_schema" do + field :my_enum, Ecto.Enum, values: [:foo, :bar, :baz] + end + end + + you can call `mappings/2` like this: + + Ecto.Enum.mappings(EnumSchema, :my_enum) + #=> [foo: "foo", bar: "bar", baz: "baz"] + + If you want the values only, you can use `Ecto.Enum.values/2`, and if you want + the dump values only, you can use `Ecto.Enum.dump_values/2`. + """ + + use Ecto.ParameterizedType + + @impl true + def type(params), do: params.type + + @impl true + def init(opts) do + values = opts[:values] + + {type, mappings} = + cond do + is_list(values) and Enum.all?(values, &is_atom/1) -> + validate_unique!(values) + {:string, Enum.map(values, fn atom -> {atom, to_string(atom)} end)} + + type = Keyword.keyword?(values) and infer_type(Keyword.values(values)) -> + validate_unique!(Keyword.keys(values)) + validate_unique!(Keyword.values(values)) + {type, values} + + true -> + raise ArgumentError, """ + Ecto.Enum types must have a values option specified as a list of atoms or a + keyword list with a mapping from atoms to either integer or string values. + + For example: + + field :my_field, Ecto.Enum, values: [:foo, :bar] + + or + + field :my_field, Ecto.Enum, values: [foo: 1, bar: 2, baz: 5] + """ + end + + on_load = Map.new(mappings, fn {key, val} -> {val, key} end) + on_dump = Map.new(mappings) + on_cast = Map.new(mappings, fn {key, _} -> {Atom.to_string(key), key} end) + + %{on_load: on_load, on_dump: on_dump, on_cast: on_cast, mappings: mappings, type: type} + end + + defp validate_unique!(values) do + if length(Enum.uniq(values)) != length(values) do + raise ArgumentError, """ + Ecto.Enum type values must be unique. + + For example: + + field :my_field, Ecto.Enum, values: [:foo, :bar, :foo] + + is invalid, while + + field :my_field, Ecto.Enum, values: [:foo, :bar, :baz] + + is valid + """ + end + end + + defp infer_type(values) do + cond do + Enum.all?(values, &is_integer/1) -> :integer + Enum.all?(values, &is_binary/1) -> :string + true -> nil + end + end + + @impl true + def cast(nil, _params), do: {:ok, nil} + + def cast(data, params) do + case params do + %{on_load: %{^data => as_atom}} -> {:ok, as_atom} + %{on_dump: %{^data => _}} -> {:ok, data} + %{on_cast: %{^data => as_atom}} -> {:ok, as_atom} + _ -> :error + end + end + + @impl true + def load(nil, _, _), do: {:ok, nil} + + def load(data, _loader, %{on_load: on_load}) do + case on_load do + %{^data => as_atom} -> {:ok, as_atom} + _ -> :error + end + end + + @impl true + def dump(nil, _, _), do: {:ok, nil} + + def dump(data, _dumper, %{on_dump: on_dump}) do + case on_dump do + %{^data => as_string} -> {:ok, as_string} + _ -> :error + end + end + + @impl true + def equal?(a, b, _params), do: a == b + + @impl true + def embed_as(_, _), do: :self + + @doc "Returns the possible values for a given schema and field" + @spec values(module, atom) :: [atom()] + def values(schema, field) do + schema + |> mappings(field) + |> Keyword.keys() + end + + @doc "Returns the possible dump values for a given schema and field" + @spec dump_values(module, atom) :: [String.t()] | [integer()] + def dump_values(schema, field) do + schema + |> mappings(field) + |> Keyword.values() + end + + @doc "Returns the mappings for a given schema and field" + @spec mappings(module, atom) :: Keyword.t() + def mappings(schema, field) do + try do + schema.__changeset__() + rescue + _ in UndefinedFunctionError -> + raise ArgumentError, "#{inspect(schema)} is not an Ecto schema" + else + %{^field => {:parameterized, Ecto.Enum, %{mappings: mappings}}} -> mappings + %{^field => {_, {:parameterized, Ecto.Enum, %{mappings: mappings}}}} -> mappings + %{^field => _} -> raise ArgumentError, "#{field} is not an Ecto.Enum field" + %{} -> raise ArgumentError, "#{field} does not exist" + end + end +end diff --git a/deps/ecto/lib/ecto/exceptions.ex b/deps/ecto/lib/ecto/exceptions.ex new file mode 100644 index 0000000..c2cf301 --- /dev/null +++ b/deps/ecto/lib/ecto/exceptions.ex @@ -0,0 +1,307 @@ +defmodule Ecto.Query.CompileError do + @moduledoc """ + Raised at compilation time when the query cannot be compiled. + """ + defexception [:message] +end + +defmodule Ecto.Query.CastError do + @moduledoc """ + Raised at runtime when a value cannot be cast. + """ + defexception [:type, :value, :message] + + def exception(opts) do + value = Keyword.fetch!(opts, :value) + type = Keyword.fetch!(opts, :type) + msg = Keyword.fetch!(opts, :message) + %__MODULE__{value: value, type: type, message: msg} + end +end + +defmodule Ecto.QueryError do + @moduledoc """ + Raised at runtime when the query is invalid. + """ + defexception [:message] + + def exception(opts) do + message = Keyword.fetch!(opts, :message) + query = Keyword.fetch!(opts, :query) + hint = Keyword.get(opts, :hint) + + message = """ + #{message} in query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + file = opts[:file] + line = opts[:line] + + message = + if file && line do + relative = Path.relative_to_cwd(file) + Exception.format_file_line(relative, line) <> " " <> message + else + message + end + + message = + if hint do + message <> "\n" <> hint <> "\n" + else + message + end + + %__MODULE__{message: message} + end +end + +defmodule Ecto.SubQueryError do + @moduledoc """ + Raised at runtime when a subquery is invalid. + """ + defexception [:message, :exception] + + def exception(opts) do + exception = Keyword.fetch!(opts, :exception) + query = Keyword.fetch!(opts, :query) + + message = """ + the following exception happened when compiling a subquery. + + #{Exception.format(:error, exception, []) |> String.replace("\n", "\n ")} + + The subquery originated from the following query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + %__MODULE__{message: message, exception: exception} + end +end + +defmodule Ecto.InvalidChangesetError do + @moduledoc """ + Raised when we cannot perform an action because the + changeset is invalid. + """ + defexception [:action, :changeset] + + def message(%{action: action, changeset: changeset}) do + changes = extract_changes(changeset) + errors = Ecto.Changeset.traverse_errors(changeset, & &1) + + """ + could not perform #{action} because changeset is invalid. + + Errors + + #{pretty errors} + + Applied changes + + #{pretty changes} + + Params + + #{pretty changeset.params} + + Changeset + + #{pretty changeset} + """ + end + + defp pretty(term) do + inspect(term, pretty: true) + |> String.split("\n") + |> Enum.map_join("\n", &" " <> &1) + end + + defp extract_changes(%Ecto.Changeset{changes: changes}) do + Enum.reduce(changes, %{}, fn({key, value}, acc) -> + case value do + %Ecto.Changeset{action: :delete} -> acc + _ -> Map.put(acc, key, extract_changes(value)) + end + end) + end + defp extract_changes([%Ecto.Changeset{action: :delete} | tail]), + do: extract_changes(tail) + defp extract_changes([%Ecto.Changeset{} = changeset | tail]), + do: [extract_changes(changeset) | extract_changes(tail)] + defp extract_changes(other), + do: other +end + +defmodule Ecto.CastError do + @moduledoc """ + Raised when a changeset can't cast a value. + """ + defexception [:message, :type, :value] + + def exception(opts) do + type = Keyword.fetch!(opts, :type) + value = Keyword.fetch!(opts, :value) + msg = opts[:message] || "cannot cast #{inspect value} to #{inspect type}" + %__MODULE__{message: msg, type: type, value: value} + end +end + +defmodule Ecto.InvalidURLError do + defexception [:message, :url] + + def exception(opts) do + url = Keyword.fetch!(opts, :url) + msg = Keyword.fetch!(opts, :message) + msg = "invalid url #{url}, #{msg}. The parsed URL is: #{inspect(URI.parse(url))}" + %__MODULE__{message: msg, url: url} + end +end + +defmodule Ecto.NoPrimaryKeyFieldError do + @moduledoc """ + Raised at runtime when an operation that requires a primary key is invoked + with a schema that does not define a primary key by using `@primary_key false` + """ + defexception [:message, :schema] + + def exception(opts) do + schema = Keyword.fetch!(opts, :schema) + message = "schema `#{inspect schema}` has no primary key" + %__MODULE__{message: message, schema: schema} + end +end + +defmodule Ecto.NoPrimaryKeyValueError do + @moduledoc """ + Raised at runtime when an operation that requires a primary key is invoked + with a schema missing value for its primary key + """ + defexception [:message, :struct] + + def exception(opts) do + struct = Keyword.fetch!(opts, :struct) + message = "struct `#{inspect struct}` is missing primary key value" + %__MODULE__{message: message, struct: struct} + end +end + +defmodule Ecto.ChangeError do + defexception [:message] +end + +defmodule Ecto.NoResultsError do + defexception [:message] + + def exception(opts) do + query = Keyword.fetch!(opts, :queryable) |> Ecto.Queryable.to_query + + msg = """ + expected at least one result but got none in query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + %__MODULE__{message: msg} + end +end + +defmodule Ecto.MultipleResultsError do + defexception [:message] + + def exception(opts) do + query = Keyword.fetch!(opts, :queryable) |> Ecto.Queryable.to_query + count = Keyword.fetch!(opts, :count) + + msg = """ + expected at most one result but got #{count} in query: + + #{Inspect.Ecto.Query.to_string(query)} + """ + + %__MODULE__{message: msg} + end +end + +defmodule Ecto.MultiplePrimaryKeyError do + defexception [:message] + + def exception(opts) do + operation = Keyword.fetch!(opts, :operation) + source = Keyword.fetch!(opts, :source) + params = Keyword.fetch!(opts, :params) + count = Keyword.fetch!(opts, :count) + + msg = """ + expected #{operation} on #{source} to return at most one entry but got #{count} entries. + + This typically means the field(s) set as primary_key in your schema/source + are not enough to uniquely identify entries in the repository. + + Those are the parameters sent to the repository: + + #{inspect params} + """ + + %__MODULE__{message: msg} + end +end + +defmodule Ecto.MigrationError do + defexception [:message] +end + +defmodule Ecto.StaleEntryError do + defexception [:message, :changeset] + + def exception(opts) do + action = Keyword.fetch!(opts, :action) + changeset = Keyword.fetch!(opts, :changeset) + + msg = """ + attempted to #{action} a stale struct: + + #{inspect changeset.data} + """ + + %__MODULE__{message: msg, changeset: changeset} + end +end + +defmodule Ecto.ConstraintError do + defexception [:type, :constraint, :message] + + def exception(opts) do + type = Keyword.fetch!(opts, :type) + constraint = Keyword.fetch!(opts, :constraint) + changeset = Keyword.fetch!(opts, :changeset) + action = Keyword.fetch!(opts, :action) + + constraints = + case changeset.constraints do + [] -> + "The changeset has not defined any constraint." + constraints -> + "The changeset defined the following constraints:\n\n" <> + Enum.map_join(constraints, "\n", &" * #{&1.constraint} (#{&1.type}_constraint)") + end + + msg = """ + constraint error when attempting to #{action} struct: + + * #{constraint} (#{type}_constraint) + + If you would like to stop this constraint violation from raising an + exception and instead add it as an error to your changeset, please + call `#{type}_constraint/3` on your changeset with the constraint + `:name` as an option. + + #{constraints} + """ + + %__MODULE__{message: msg, type: type, constraint: constraint} + end +end diff --git a/deps/ecto/lib/ecto/json.ex b/deps/ecto/lib/ecto/json.ex new file mode 100644 index 0000000..8318abb --- /dev/null +++ b/deps/ecto/lib/ecto/json.ex @@ -0,0 +1,42 @@ +if Code.ensure_loaded?(Jason.Encoder) do + defimpl Jason.Encoder, for: Ecto.Association.NotLoaded do + def encode(%{__owner__: owner, __field__: field}, _) do + raise """ + cannot encode association #{inspect(field)} from #{inspect(owner)} to \ + JSON because the association was not loaded. + + You can either preload the association: + + Repo.preload(#{inspect(owner)}, #{inspect(field)}) + + Or choose to not encode the association when converting the struct \ + to JSON by explicitly listing the JSON fields in your schema: + + defmodule #{inspect(owner)} do + # ... + + @derive {Jason.Encoder, only: [:name, :title, ...]} + schema ... do + """ + end + end + + defimpl Jason.Encoder, for: Ecto.Schema.Metadata do + def encode(%{schema: schema}, _) do + raise """ + cannot encode metadata from the :__meta__ field for #{inspect(schema)} \ + to JSON. This metadata is used internally by Ecto and should never be \ + exposed externally. + + You can either map the schemas to remove the :__meta__ field before \ + encoding to JSON, or explicit list the JSON fields in your schema: + + defmodule #{inspect(schema)} do + # ... + + @derive {Jason.Encoder, only: [:name, :title, ...]} + schema ... do + """ + end + end +end diff --git a/deps/ecto/lib/ecto/multi.ex b/deps/ecto/lib/ecto/multi.ex new file mode 100644 index 0000000..35fa969 --- /dev/null +++ b/deps/ecto/lib/ecto/multi.ex @@ -0,0 +1,877 @@ +defmodule Ecto.Multi do + @moduledoc """ + `Ecto.Multi` is a data structure for grouping multiple Repo operations. + + `Ecto.Multi` makes it possible to pack operations that should be + performed in a single database transaction and gives a way to introspect + the queued operations without actually performing them. Each operation + is given a name that is unique and will identify its result in case of + success or failure. + + If a multi is valid (i.e. all the changesets in it are valid), + all operations will be executed in the order they were added. + + The `Ecto.Multi` structure should be considered opaque. You can use + `%Ecto.Multi{}` to pattern match the type, but accessing fields or + directly modifying them is not advised. + + `Ecto.Multi.to_list/1` returns a canonical representation of the + structure that can be used for introspection. + + ## Changesets + + If multi contains operations that accept changesets (like `insert/4`, + `update/4` or `delete/4`) they will be checked before starting the + transaction. If any changeset has errors, the transaction won't even + be started and the error will be immediately returned. + + Note: `insert/4`, `update/4`, `insert_or_update/4`, and `delete/4` + variants that accept a function are not performing such checks since + the functions are executed after the transaction has started. + + ## Run + + Multi allows you to run arbitrary functions as part of your transaction + via `run/3` and `run/5`. This is especially useful when an operation + depends on the value of a previous operation. For this reason, the + function given as a callback to `run/3` and `run/5` will receive the repo + as the first argument, and all changes performed by the multi so far as a + map for the second argument. + + The function given to `run` must return `{:ok, value}` or `{:error, value}` + as its result. Returning an error will abort any further operations + and make the whole multi fail. + + ## Example + + Let's look at an example definition and usage. The use case we'll be + looking into is resetting a password. We need to update the account + with proper information, log the request and remove all current sessions: + + defmodule PasswordManager do + alias Ecto.Multi + + def reset(account, params) do + Multi.new() + |> Multi.update(:account, Account.password_reset_changeset(account, params)) + |> Multi.insert(:log, Log.password_reset_changeset(account, params)) + |> Multi.delete_all(:sessions, Ecto.assoc(account, :sessions)) + end + end + + We can later execute it in the integration layer using Repo: + + Repo.transaction(PasswordManager.reset(account, params)) + + By pattern matching on the result we can differentiate different conditions: + + case result do + {:ok, %{account: account, log: log, sessions: sessions}} -> + # Operation was successful, we can access results (exactly the same + # we would get from running corresponding Repo functions) under keys + # we used for naming the operations. + {:error, failed_operation, failed_value, changes_so_far} -> + # One of the operations failed. We can access the operation's failure + # value (like changeset for operations on changesets) to prepare a + # proper response. We also get access to the results of any operations + # that succeeded before the indicated operation failed. However, any + # successful operations would have been rolled back. + end + + We can also easily unit test our transaction without actually running it. + Since changesets can use in-memory-data, we can use an account that is + constructed in memory as well (without persisting it to the database): + + test "dry run password reset" do + account = %Account{password: "letmein"} + multi = PasswordManager.reset(account, params) + + assert [ + {:account, {:update, account_changeset, []}}, + {:log, {:insert, log_changeset, []}}, + {:sessions, {:delete_all, query, []}} + ] = Ecto.Multi.to_list(multi) + + # We can introspect changesets and query to see if everything + # is as expected, for example: + assert account_changeset.valid? + assert log_changeset.valid? + assert inspect(query) == "#Ecto.Query" + end + + The name of each operation does not have to be an atom. This can be particularly + useful when you wish to update a collection of changesets at once, and track their + errors individually: + + accounts = [%Account{id: 1}, %Account{id: 2}] + + Enum.reduce(accounts, Multi.new(), fn account, multi -> + Multi.update( + multi, + {:account, account.id}, + Account.password_reset_changeset(account, params) + ) + end) + """ + + alias __MODULE__ + alias Ecto.Changeset + + defstruct operations: [], names: MapSet.new() + + @type changes :: map + @type run :: ((Ecto.Repo.t, changes) -> {:ok | :error, any}) | {module, atom, [any]} + @type fun(result) :: (changes -> result) + @type merge :: (changes -> t) | {module, atom, [any]} + @typep schema_or_source :: binary | {binary, module} | module + @typep operation :: {:changeset, Changeset.t, Keyword.t} | + {:run, run} | + {:put, any} | + {:inspect, Keyword.t} | + {:merge, merge} | + {:update_all, Ecto.Query.t, Keyword.t} | + {:delete_all, Ecto.Query.t, Keyword.t} | + {:insert_all, schema_or_source, [map | Keyword.t], Keyword.t} + @typep operations :: [{name, operation}] + @typep names :: MapSet.t + @type name :: any + @type t :: %__MODULE__{operations: operations, names: names} + + @doc """ + Returns an empty `Ecto.Multi` struct. + + ## Example + + iex> Ecto.Multi.new() |> Ecto.Multi.to_list() + [] + + """ + @spec new :: t + def new do + %Multi{} + end + + @doc """ + Appends the second multi to the first one. + + All names must be unique between both structures. + + ## Example + + iex> lhs = Ecto.Multi.new() |> Ecto.Multi.run(:left, fn _, changes -> {:ok, changes} end) + iex> rhs = Ecto.Multi.new() |> Ecto.Multi.run(:right, fn _, changes -> {:error, changes} end) + iex> Ecto.Multi.append(lhs, rhs) |> Ecto.Multi.to_list |> Keyword.keys + [:left, :right] + + """ + @spec append(t, t) :: t + def append(lhs, rhs) do + merge_structs(lhs, rhs, &(&2 ++ &1)) + end + + @doc """ + Prepends the second multi to the first one. + + All names must be unique between both structures. + + ## Example + + iex> lhs = Ecto.Multi.new() |> Ecto.Multi.run(:left, fn _, changes -> {:ok, changes} end) + iex> rhs = Ecto.Multi.new() |> Ecto.Multi.run(:right, fn _, changes -> {:error, changes} end) + iex> Ecto.Multi.prepend(lhs, rhs) |> Ecto.Multi.to_list |> Keyword.keys + [:right, :left] + + """ + @spec prepend(t, t) :: t + def prepend(lhs, rhs) do + merge_structs(lhs, rhs, &(&1 ++ &2)) + end + + defp merge_structs(%Multi{} = lhs, %Multi{} = rhs, joiner) do + %{names: lhs_names, operations: lhs_ops} = lhs + %{names: rhs_names, operations: rhs_ops} = rhs + case MapSet.intersection(lhs_names, rhs_names) |> MapSet.to_list do + [] -> + %Multi{names: MapSet.union(lhs_names, rhs_names), + operations: joiner.(lhs_ops, rhs_ops)} + common -> + raise ArgumentError, """ + error when merging the following Ecto.Multi structs: + + #{Kernel.inspect lhs} + + #{Kernel.inspect rhs} + + both declared operations: #{Kernel.inspect common} + """ + end + end + + @doc """ + Merges a multi returned dynamically by an anonymous function. + + This function is useful when the multi to be merged requires information + from the original multi. Hence the second argument is an anonymous function + that receives the multi changes so far. The anonymous function must return + another multi. + + If you would prefer to simply merge two multis together, see `append/2` or + `prepend/2`. + + Duplicated operations are not allowed. + + ## Example + + multi = + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{title: "first"}) + + multi + |> Ecto.Multi.merge(fn %{post: post} -> + Ecto.Multi.new() + |> Ecto.Multi.insert(:comment, Ecto.build_assoc(post, :comments)) + end) + |> MyApp.Repo.transaction() + """ + @spec merge(t, (changes -> t)) :: t + def merge(%Multi{} = multi, merge) when is_function(merge, 1) do + Map.update!(multi, :operations, &[{:merge, {:merge, merge}} | &1]) + end + + @doc """ + Merges a multi returned dynamically by calling `module` and `function` with `args`. + + Similar to `merge/2`, but allows to pass module name, function and arguments. + The function should return an `Ecto.Multi`, and receives changes so far + as the first argument (prepended to those passed in the call to the function). + + Duplicated operations are not allowed. + """ + @spec merge(t, module, function, args) :: t when function: atom, args: [any] + def merge(%Multi{} = multi, mod, fun, args) + when is_atom(mod) and is_atom(fun) and is_list(args) do + Map.update!(multi, :operations, &[{:merge, {:merge, {mod, fun, args}}} | &1]) + end + + @doc """ + Adds an insert operation to the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.insert/2` does. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.insert(:insert, %Post{title: "first"}) + |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{title: "first"}) + |> Ecto.Multi.insert(:comment, fn %{post: post} -> + Ecto.build_assoc(post, :comments) + end) + |> MyApp.Repo.transaction() + + """ + @spec insert(t, name, Changeset.t | Ecto.Schema.t | fun(Changeset.t | Ecto.Schema.t), Keyword.t) :: t + def insert(multi, name, changeset_or_struct_or_fun, opts \\ []) + + def insert(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :insert, name, changeset, opts) + end + + def insert(multi, name, %_{} = struct, opts) do + insert(multi, name, Changeset.change(struct), opts) + end + + def insert(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:insert, fun}, opts)) + end + + @doc """ + Adds an update operation to the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.update/2` does. + + ## Example + + post = MyApp.Repo.get!(Post, 1) + changeset = Ecto.Changeset.change(post, title: "New title") + Ecto.Multi.new() + |> Ecto.Multi.update(:update, changeset) + |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{title: "first"}) + |> Ecto.Multi.update(:fun, fn %{post: post} -> + Ecto.Changeset.change(post, title: "New title") + end) + |> MyApp.Repo.transaction() + + """ + @spec update(t, name, Changeset.t | fun(Changeset.t), Keyword.t) :: t + def update(multi, name, changeset_or_fun, opts \\ []) + + def update(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :update, name, changeset, opts) + end + + def update(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:update, fun}, opts)) + end + + @doc """ + Inserts or updates a changeset depending on whether the changeset was persisted or not. + + Accepts the same arguments and options as `c:Ecto.Repo.insert_or_update/2` does. + + ## Example + + changeset = Post.changeset(%Post{}, %{title: "New title"}) + Ecto.Multi.new() + |> Ecto.Multi.insert_or_update(:insert_or_update, changeset) + |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + {:ok, repo.get(Post, 1) || %Post{}} + end) + |> Ecto.Multi.insert_or_update(:update, fn %{post: post} -> + Ecto.Changeset.change(post, title: "New title") + end) + |> MyApp.Repo.transaction() + + """ + @spec insert_or_update(t, name, Changeset.t | fun(Changeset.t), Keyword.t) :: t + def insert_or_update(multi, name, changeset_or_fun, opts \\ []) + + def insert_or_update(multi, name, %Changeset{data: %{__meta__: %{state: :loaded}}} = changeset, opts) do + add_changeset(multi, :update, name, changeset, opts) + end + + def insert_or_update(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :insert, name, changeset, opts) + end + + def insert_or_update(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:insert_or_update, fun}, opts)) + end + + @doc """ + Adds a delete operation to the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.delete/2` does. + + ## Example + + post = MyApp.Repo.get!(Post, 1) + Ecto.Multi.new() + |> Ecto.Multi.delete(:delete, post) + |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.delete(:delete, fn %{post: post} -> + # Others validations + post + end) + |> MyApp.Repo.transaction() + + """ + @spec delete(t, name, Changeset.t | Ecto.Schema.t | fun(Changeset.t | Ecto.Schema.t), Keyword.t) :: t + def delete(multi, name, changeset_or_struct_fun, opts \\ []) + + def delete(multi, name, %Changeset{} = changeset, opts) do + add_changeset(multi, :delete, name, changeset, opts) + end + + def delete(multi, name, %_{} = struct, opts) do + delete(multi, name, Changeset.change(struct), opts) + end + + def delete(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:delete, fun}, opts)) + end + + @doc """ + Runs a query expecting one result and stores it in the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.one/2`. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.one(:post, Post) + |> MyApp.Repo.transaction() + """ + @spec one( + t, + name, + queryable :: Ecto.Queryable.t | (any -> Ecto.Queryable.t), + opts :: Keyword.t + ) :: t + def one(multi, name, queryable_or_fun, opts \\ []) + + def one(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:one, fun}, opts)) + end + + def one(multi, name, queryable, opts) do + run(multi, name, operation_fun({:one, fn _ -> queryable end}, opts)) + end + + @doc """ + Runs a query and stores all entries in the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.all/2` does. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.all(:all, Post) + |> MyApp.Repo.transaction() + """ + @spec all( + t, + name, + queryable :: Ecto.Queryable.t | (any -> Ecto.Queryable.t), + opts :: Keyword.t + ) :: t + def all(multi, name, queryable_or_fun, opts \\ []) + + def all(multi, name, fun, opts) when is_function(fun, 1) do + run(multi, name, operation_fun({:all, fun}, opts)) + end + + def all(multi, name, queryable, opts) do + run(multi, name, operation_fun({:all, fn _ -> queryable end}, opts)) + end + + defp add_changeset(multi, action, name, changeset, opts) when is_list(opts) do + add_operation(multi, name, {:changeset, put_action(changeset, action), opts}) + end + + defp put_action(%{action: nil} = changeset, action) do + %{changeset | action: action} + end + + defp put_action(%{action: action} = changeset, action) do + changeset + end + + defp put_action(%{action: original}, action) do + raise ArgumentError, "you provided a changeset with an action already set " <> + "to #{Kernel.inspect original} when trying to #{action} it" + end + + @doc """ + Causes the multi to fail with the given value. + + Running the multi in a transaction will execute + no previous steps and returns the value of the first + error added. + """ + @spec error(t, name, error :: term) :: t + def error(multi, name, value) do + add_operation(multi, name, {:error, value}) + end + + @doc """ + Adds a function to run as part of the multi. + + The function should return either `{:ok, value}` or `{:error, value}`, + and receives the repo as the first argument, and the changes so far + as the second argument. + + ## Example + + Ecto.Multi.run(multi, :write, fn _repo, %{image: image} -> + with :ok <- File.write(image.name, image.contents) do + {:ok, nil} + end + end) + """ + @spec run(t, name, run) :: t + def run(multi, name, run) when is_function(run, 2) do + add_operation(multi, name, {:run, run}) + end + + @doc """ + Adds a function to run as part of the multi. + + Similar to `run/3`, but allows to pass module name, function and arguments. + The function should return either `{:ok, value}` or `{:error, value}`, and + receives the repo as the first argument, and the changes so far as the + second argument (prepended to those passed in the call to the function). + """ + @spec run(t, name, module, function, args) :: t when function: atom, args: [any] + def run(multi, name, mod, fun, args) + when is_atom(mod) and is_atom(fun) and is_list(args) do + add_operation(multi, name, {:run, {mod, fun, args}}) + end + + @doc """ + Adds an insert_all operation to the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.insert_all/3` does. + + ## Example + + posts = [%{title: "My first post"}, %{title: "My second post"}] + Ecto.Multi.new() + |> Ecto.Multi.insert_all(:insert_all, Post, posts) + |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.insert_all(:insert_all, Comment, fn %{post: post} -> + # Others validations + + entries + |> Enum.map(fn comment -> + Map.put(comment, :post_id, post.id) + end) + end) + |> MyApp.Repo.transaction() + + """ + @spec insert_all( + t, + name, + schema_or_source, + entries_or_query_or_fun :: [map | Keyword.t()] | fun([map | Keyword.t()]) | Ecto.Query.t(), + Keyword.t() + ) :: t + def insert_all(multi, name, schema_or_source, entries_or_query_or_fun, opts \\ []) + + def insert_all(multi, name, schema_or_source, entries_fun, opts) + when is_function(entries_fun, 1) and is_list(opts) do + run(multi, name, operation_fun({:insert_all, schema_or_source, entries_fun}, opts)) + end + + def insert_all(multi, name, schema_or_source, entries_or_query, opts) when is_list(opts) do + add_operation(multi, name, {:insert_all, schema_or_source, entries_or_query, opts}) + end + + @doc """ + Adds an update_all operation to the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.update_all/3` does. + + ## Example + + Ecto.Multi.new() + |> Ecto.Multi.update_all(:update_all, Post, set: [title: "New title"]) + |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.update_all(:update_all, fn %{post: post} -> + # Others validations + from(c in Comment, where: c.post_id == ^post.id, update: [set: [title: "New title"]]) + end, []) + |> MyApp.Repo.transaction() + + """ + @spec update_all(t, name, Ecto.Queryable.t | fun(Ecto.Queryable.t), Keyword.t, Keyword.t) :: t + def update_all(multi, name, queryable_or_fun, updates, opts \\ []) + + def update_all(multi, name, queryable_fun, updates, opts) when is_function(queryable_fun, 1) and is_list(opts) do + run(multi, name, operation_fun({:update_all, queryable_fun, updates}, opts)) + end + + def update_all(multi, name, queryable, updates, opts) when is_list(opts) do + query = Ecto.Queryable.to_query(queryable) + add_operation(multi, name, {:update_all, query, updates, opts}) + end + + @doc """ + Adds a delete_all operation to the multi. + + Accepts the same arguments and options as `c:Ecto.Repo.delete_all/2` does. + + ## Example + + queryable = from(p in Post, where: p.id < 5) + Ecto.Multi.new() + |> Ecto.Multi.delete_all(:delete_all, queryable) + |> MyApp.Repo.transaction() + + Ecto.Multi.new() + |> Ecto.Multi.run(:post, fn repo, _changes -> + case repo.get(Post, 1) do + nil -> {:error, :not_found} + post -> {:ok, post} + end + end) + |> Ecto.Multi.delete_all(:delete_all, fn %{post: post} -> + # Others validations + from(c in Comment, where: c.post_id == ^post.id) + end) + |> MyApp.Repo.transaction() + + """ + @spec delete_all(t, name, Ecto.Queryable.t | fun(Ecto.Queryable.t), Keyword.t) :: t + def delete_all(multi, name, queryable_or_fun, opts \\ []) + + def delete_all(multi, name, fun, opts) when is_function(fun, 1) and is_list(opts) do + run(multi, name, operation_fun({:delete_all, fun}, opts)) + end + + def delete_all(multi, name, queryable, opts) when is_list(opts) do + query = Ecto.Queryable.to_query(queryable) + add_operation(multi, name, {:delete_all, query, opts}) + end + + defp add_operation(%Multi{} = multi, name, operation) do + %{operations: operations, names: names} = multi + if MapSet.member?(names, name) do + raise "#{Kernel.inspect name} is already a member of the Ecto.Multi: \n#{Kernel.inspect multi}" + else + %{multi | operations: [{name, operation} | operations], + names: MapSet.put(names, name)} + end + end + + @doc """ + Returns the list of operations stored in `multi`. + + Always use this function when you need to access the operations you + have defined in `Ecto.Multi`. Inspecting the `Ecto.Multi` struct internals + directly is discouraged. + """ + @spec to_list(t) :: [{name, term}] + def to_list(%Multi{operations: operations}) do + operations + |> Enum.reverse + |> Enum.map(&format_operation/1) + end + + defp format_operation({name, {:changeset, changeset, opts}}), + do: {name, {changeset.action, changeset, opts}} + defp format_operation(other), + do: other + + @doc """ + Adds a value to the changes so far under the given name. + + The given `value` is added to the multi before the transaction starts. + If you would like to run arbitrary functions as part of your transaction, + see `run/3` or `run/5`. + + ## Example + + Imagine there is an existing company schema that you retrieved from + the database. You can insert it as a change in the multi using `put/3`: + + Ecto.Multi.new() + |> Ecto.Multi.put(:company, company) + |> Ecto.Multi.insert(:user, fn changes -> User.changeset(changes.company) end) + |> Ecto.Multi.insert(:person, fn changes -> Person.changeset(changes.user, changes.company) end) + |> MyApp.Repo.transaction() + + In the example above there isn't a large benefit in putting the + `company` in the multi, because you could also access the + `company` variable directly inside the anonymous function. + + However, the benefit of `put/3` is when composing `Ecto.Multi`s. + If the insert operations above were defined in another module, + you could use `put(:company, company)` to inject changes that + will be accessed by other functions down the chain, removing + the need to pass both `multi` and `company` values around. + """ + @spec put(t, name, any) :: t + def put(multi, name, value) do + add_operation(multi, name, {:put, value}) + end + + @doc """ + Inspects results from a Multi + + By default, the name is shown as a label to the inspect, custom labels are + supported through the `IO.inspect/2` `label` option. + + ## Options + + All options for IO.inspect/2 are supported, it also support the following ones: + + * `:only` - A field or a list of fields to inspect, will print the entire + map by default. + + ## Examples + + Ecto.Multi.new() + |> Ecto.Multi.insert(:person_a, changeset) + |> Ecto.Multi.insert(:person_b, changeset) + |> Ecto.Multi.inspect() + |> MyApp.Repo.transaction() + + Prints: + %{person_a: %Person{...}, person_b: %Person{...}} + + We can use the `:only` option to limit which fields will be printed: + + Ecto.Multi.new() + |> Ecto.Multi.insert(:person_a, changeset) + |> Ecto.Multi.insert(:person_b, changeset) + |> Ecto.Multi.inspect(only: :person_a) + |> MyApp.Repo.transaction() + + Prints: + %{person_a: %Person{...}} + + """ + @spec inspect(t, Keyword.t) :: t + def inspect(multi, opts \\ []) do + Map.update!(multi, :operations, &[{:inspect, {:inspect, opts}} | &1]) + end + + @doc false + @spec __apply__(t, Ecto.Repo.t, fun, (term -> no_return)) :: {:ok, term} | {:error, term} + def __apply__(%Multi{} = multi, repo, wrap, return) do + operations = Enum.reverse(multi.operations) + + with {:ok, operations} <- check_operations_valid(operations) do + apply_operations(operations, multi.names, repo, wrap, return) + end + end + + defp check_operations_valid(operations) do + Enum.find_value(operations, &invalid_operation/1) || {:ok, operations} + end + + defp invalid_operation({name, {:changeset, %{valid?: false} = changeset, _}}), + do: {:error, {name, changeset, %{}}} + defp invalid_operation({name, {:error, value}}), + do: {:error, {name, value, %{}}} + defp invalid_operation(_operation), + do: nil + + defp apply_operations([], _names, _repo, _wrap, _return), do: {:ok, %{}} + defp apply_operations(operations, names, repo, wrap, return) do + wrap.(fn -> + operations + |> Enum.reduce({%{}, names}, &apply_operation(&1, repo, wrap, return, &2)) + |> elem(0) + end) + end + + defp apply_operation({_, {:merge, merge}}, repo, wrap, return, {acc, names}) do + case __apply__(apply_merge_fun(merge, acc), repo, wrap, return) do + {:ok, value} -> + merge_results(acc, value, names) + {:error, {name, value, nested_acc}} -> + {acc, _names} = merge_results(acc, nested_acc, names) + return.({name, value, acc}) + end + end + + defp apply_operation({_name, {:inspect, opts}}, _repo, _wrap_, _return, {acc, names}) do + if opts[:only] do + acc |> Map.take(List.wrap(opts[:only])) |> IO.inspect(opts) + else + IO.inspect(acc, opts) + end + + {acc, names} + end + + defp apply_operation({name, operation}, repo, wrap, return, {acc, names}) do + case apply_operation(operation, acc, {wrap, return}, repo) do + {:ok, value} -> + {Map.put(acc, name, value), names} + {:error, value} -> + return.({name, value, acc}) + other -> + raise "expected Ecto.Multi callback named `#{Kernel.inspect name}` to return either {:ok, value} or {:error, value}, got: #{Kernel.inspect other}" + end + end + + defp apply_operation({:changeset, changeset, opts}, _acc, _apply_args, repo), + do: apply(repo, changeset.action, [changeset, opts]) + defp apply_operation({:run, run}, acc, _apply_args, repo), + do: apply_run_fun(run, repo, acc) + defp apply_operation({:error, value}, _acc, _apply_args, _repo), + do: {:error, value} + defp apply_operation({:insert_all, source, entries, opts}, _acc, _apply_args, repo), + do: {:ok, repo.insert_all(source, entries, opts)} + defp apply_operation({:update_all, query, updates, opts}, _acc, _apply_args, repo), + do: {:ok, repo.update_all(query, updates, opts)} + defp apply_operation({:delete_all, query, opts}, _acc, _apply_args, repo), + do: {:ok, repo.delete_all(query, opts)} + defp apply_operation({:put, value}, _acc, _apply_args, _repo), + do: {:ok, value} + + defp apply_merge_fun({mod, fun, args}, acc), do: apply(mod, fun, [acc | args]) + defp apply_merge_fun(fun, acc), do: apply(fun, [acc]) + + defp apply_run_fun({mod, fun, args}, repo, acc), do: apply(mod, fun, [repo, acc | args]) + defp apply_run_fun(fun, repo, acc), do: apply(fun, [repo, acc]) + + defp merge_results(changes, new_changes, names) do + new_names = new_changes |> Map.keys |> MapSet.new() + case MapSet.intersection(names, new_names) |> MapSet.to_list do + [] -> + {Map.merge(changes, new_changes), MapSet.union(names, new_names)} + common -> + raise "cannot merge multi, the following operations were found in " <> + "both Ecto.Multi: #{Kernel.inspect common}" + end + end + + defp operation_fun({:update_all, queryable_fun, updates}, opts) do + fn repo, changes -> + {:ok, repo.update_all(queryable_fun.(changes), updates, opts)} + end + end + + defp operation_fun({:insert_all, schema_or_source, entries_fun}, opts) do + fn repo, changes -> + {:ok, repo.insert_all(schema_or_source, entries_fun.(changes), opts)} + end + end + + defp operation_fun({:delete_all, fun}, opts) do + fn repo, changes -> + {:ok, repo.delete_all(fun.(changes), opts)} + end + end + + defp operation_fun({:one, fun}, opts) do + fn repo, changes -> + {:ok, repo.one(fun.(changes), opts)} + end + end + + defp operation_fun({:one!, fun}, opts) do + fn repo, changes -> + {:ok, repo.one!(fun.(changes), opts)} + end + end + + defp operation_fun({:all, fun}, opts) do + fn repo, changes -> + {:ok, repo.all(fun.(changes), opts)} + end + end + + defp operation_fun({operation, fun}, opts) do + fn repo, changes -> + apply(repo, operation, [fun.(changes), opts]) + end + end +end diff --git a/deps/ecto/lib/ecto/parameterized_type.ex b/deps/ecto/lib/ecto/parameterized_type.ex new file mode 100644 index 0000000..79bbee7 --- /dev/null +++ b/deps/ecto/lib/ecto/parameterized_type.ex @@ -0,0 +1,197 @@ +defmodule Ecto.ParameterizedType do + @moduledoc """ + Parameterized types are Ecto types that can be customized per field. + + Parameterized types allow a set of options to be specified in the schema + which are initialized on compilation and passed to the callback functions + as the last argument. + + For example, `field :foo, :string` behaves the same for every field. + On the other hand, `field :foo, Ecto.Enum, values: [:foo, :bar, :baz]` + will likely have a different set of values per field. + + Note that options are specified as a keyword, but it is idiomatic to + convert them to maps inside `c:init/1` for easier pattern matching in + other callbacks. + + Parameterized types are a superset of regular types. In other words, + with parameterized types you can do everything a regular type does, + and more. For example, parameterized types can handle `nil` values + in both `load` and `dump` callbacks, they can customize `cast` behavior + per query and per changeset, and also control how values are embedded. + + However, parameterized types are also more complex. Therefore, if + everything you need to achieve can be done with basic types, they + should be preferred to parameterized ones. + + ## Examples + + To create a parameterized type, create a module as shown below: + + defmodule MyApp.MyType do + use Ecto.ParameterizedType + + def type(_params), do: :string + + def init(opts) do + validate_opts(opts) + Enum.into(opts, %{}) + end + + def cast(data, params) do + ... + cast_data + end + + def load(data, _loader, params) do + ... + {:ok, loaded_data} + end + + def dump(data, dumper, params) do + ... + {:ok, dumped_data} + end + + def equal?(a, b, _params) do + a == b + end + end + + To use this type in a schema field, specify the type and parameters like this: + + schema "foo" do + field :bar, MyApp.MyType, opt1: :baz, opt2: :boo + end + + To use this type in places where you need it to be initialized (for example, + schemaless changesets), you can use `init/2`. + """ + + @typedoc """ + The keyword options passed from the Schema's field macro into `c:init/1` + """ + @type opts :: keyword() + + @typedoc """ + The parameters for the ParameterizedType + + This is the value passed back from `c:init/1` and subsequently passed + as the last argument to all callbacks. Idiomatically it is a map. + """ + @type params :: term() + + @doc """ + Callback to convert the options specified in the field macro into parameters + to be used in other callbacks. + + This function is called at compile time, and should raise if invalid values are + specified. It is idiomatic that the parameters returned from this are a map. + `field` and `schema` will be injected into the options automatically. + + For example, this schema specification + + schema "my_table" do + field :my_field, MyParameterizedType, opt1: :foo, opt2: nil + end + + will result in the call: + + MyParameterizedType.init([schema: "my_table", field: :my_field, opt1: :foo, opt2: nil]) + + """ + @callback init(opts :: opts()) :: params() + + @doc """ + Casts the given input to the ParameterizedType with the given parameters. + + If the parameterized type is also a composite type, + the inner type can be cast by calling `Ecto.Type.cast/2` + directly. + + For more information on casting, see `c:Ecto.Type.cast/1`. + """ + @callback cast(data :: term, params()) :: + {:ok, term} | :error | {:error, keyword()} + + @doc """ + Loads the given term into a ParameterizedType. + + It receives a `loader` function in case the parameterized + type is also a composite type. In order to load the inner + type, the `loader` must be called with the inner type and + the inner value as argument. + + For more information on loading, see `c:Ecto.Type.load/1`. + Note that this callback *will* be called when loading a `nil` + value, unlike `c:Ecto.Type.load/1`. + """ + @callback load(value :: any(), loader :: function(), params()) :: {:ok, value :: any()} | :error + + @doc """ + Dumps the given term into an Ecto native type. + + It receives a `dumper` function in case the parameterized + type is also a composite type. In order to dump the inner + type, the `dumper` must be called with the inner type and + the inner value as argument. + + For more information on dumping, see `c:Ecto.Type.dump/1`. + Note that this callback *will* be called when dumping a `nil` + value, unlike `c:Ecto.Type.dump/1`. + """ + @callback dump(value :: any(), dumper :: function(), params()) :: {:ok, value :: any()} | :error + + @doc """ + Returns the underlying schema type for the ParameterizedType. + + For more information on schema types, see `c:Ecto.Type.type/0` + """ + @callback type(params()) :: Ecto.Type.t() + + @doc """ + Checks if two terms are semantically equal. + """ + @callback equal?(value1 :: any(), value2 :: any(), params()) :: boolean() + + @doc """ + Dictates how the type should be treated inside embeds. + + For more information on embedding, see `c:Ecto.Type.embed_as/1` + """ + @callback embed_as(format :: atom(), params()) :: :self | :dump + + @doc """ + Generates a loaded version of the data. + + This is callback is invoked when a parameterized type is given + to `field` with the `:autogenerate` flag. + """ + @callback autogenerate(params()) :: term() + + @optional_callbacks autogenerate: 1 + + @doc """ + Inits a parameterized type given by `type` with `opts`. + + Useful when manually initializing a type for schemaless changesets. + """ + def init(type, opts) do + {:parameterized, type, type.init(opts)} + end + + @doc false + defmacro __using__(_) do + quote location: :keep do + @behaviour Ecto.ParameterizedType + + @doc false + def embed_as(_, _), do: :self + + @doc false + def equal?(term1, term2, _params), do: term1 == term2 + + defoverridable embed_as: 2, equal?: 3 + end + end +end diff --git a/deps/ecto/lib/ecto/query.ex b/deps/ecto/lib/ecto/query.ex new file mode 100644 index 0000000..7c22a14 --- /dev/null +++ b/deps/ecto/lib/ecto/query.ex @@ -0,0 +1,2152 @@ +defmodule Ecto.SubQuery do + @moduledoc """ + A struct representing subqueries. + + See `Ecto.Query.subquery/2` for more information. + """ + defstruct [:query, :params, :select, :cache] + + @type t :: %__MODULE__{} +end + +defmodule Ecto.Query do + @moduledoc ~S""" + Provides the Query DSL. + + Queries are used to retrieve and manipulate data from a repository + (see `Ecto.Repo`). Ecto queries come in two flavors: keyword-based + and macro-based. Most examples will use the keyword-based syntax, + the macro one will be explored in later sections. + + Let's see a sample query: + + # Imports only from/2 of Ecto.Query + import Ecto.Query, only: [from: 2] + + # Create a query + query = from u in "users", + where: u.age > 18, + select: u.name + + # Send the query to the repository + Repo.all(query) + + In the example above, we are directly querying the "users" table + from the database. + + ## Query expressions + + Ecto allows a limited set of expressions inside queries. In the + query below, for example, we use `u.age` to access a field, the + `>` comparison operator and the literal `0`: + + query = from u in "users", where: u.age > 0, select: u.name + + You can find the full list of operations in `Ecto.Query.API`. + Besides the operations listed there, the following literals are + supported in queries: + + * Integers: `1`, `2`, `3` + * Floats: `1.0`, `2.0`, `3.0` + * Booleans: `true`, `false` + * Binaries: `<<1, 2, 3>>` + * Strings: `"foo bar"`, `~s(this is a string)` + * Atoms (other than booleans and `nil`): `:foo`, `:bar` + * Arrays: `[1, 2, 3]`, `~w(interpolate words)` + + All other types and dynamic values must be passed as a parameter using + interpolation as explained below. + + ## Interpolation and casting + + External values and Elixir expressions can be injected into a query + expression with `^`: + + def with_minimum(age, height_ft) do + from u in "users", + where: u.age > ^age and u.height > ^(height_ft * 3.28), + select: u.name + end + + with_minimum(18, 5.0) + + When interpolating values, you may want to explicitly tell Ecto + what is the expected type of the value being interpolated: + + age = "18" + Repo.all(from u in "users", + where: u.age > type(^age, :integer), + select: u.name) + + In the example above, Ecto will cast the age to type integer. When + a value cannot be cast, `Ecto.Query.CastError` is raised. + + To avoid the repetition of always specifying the types, you may define + an `Ecto.Schema`. In such cases, Ecto will analyze your queries and + automatically cast the interpolated "age" when compared to the `u.age` + field, as long as the age field is defined with type `:integer` in + your schema: + + age = "18" + Repo.all(from u in User, where: u.age > ^age, select: u.name) + + Another advantage of using schemas is that we no longer need to specify + the select option in queries, as by default Ecto will retrieve all + fields specified in the schema: + + age = "18" + Repo.all(from u in User, where: u.age > ^age) + + For this reason, we will use schemas on the remaining examples but + remember Ecto does not require them in order to write queries. + + ## `nil` comparison + + `nil` comparison in filters, such as where and having, is forbidden + and it will raise an error: + + # Raises if age is nil + from u in User, where: u.age == ^age + + This is done as a security measure to avoid attacks that attempt + to traverse entries with nil columns. To check that value is `nil`, + use `is_nil/1` instead: + + from u in User, where: is_nil(u.age) + + ## Composition + + Ecto queries are composable. For example, the query above can + actually be defined in two parts: + + # Create a query + query = from u in User, where: u.age > 18 + + # Extend the query + query = from u in query, select: u.name + + Composing queries uses the same syntax as creating a query. + The difference is that, instead of passing a schema like `User` + on the right-hand side of `in`, we passed the query itself. + + Any value can be used on the right-hand side of `in` as long as it implements + the `Ecto.Queryable` protocol. For now, we know the protocol is + implemented for both atoms (like `User`) and strings (like "users"). + + In any case, regardless if a schema has been given or not, Ecto + queries are always composable thanks to its binding system. + + ### Positional bindings + + On the left-hand side of `in` we specify the query bindings. This is + done inside `from` and `join` clauses. In the query below `u` is a + binding and `u.age` is a field access using this binding. + + query = from u in User, where: u.age > 18 + + Bindings are not exposed from the query. When composing queries, you + must specify bindings again for each refinement query. For example, + to further narrow down the above query, we again need to tell Ecto what + bindings to expect: + + query = from u in query, select: u.city + + Bindings in Ecto are positional, and the names do not have to be + consistent between input and refinement queries. For example, the + query above could also be written as: + + query = from q in query, select: q.city + + It would make no difference to Ecto. This is important because + it allows developers to compose queries without caring about + the bindings used in the initial query. + + When using joins, the bindings should be matched in the order they + are specified: + + # Create a query + query = from p in Post, + join: c in Comment, on: c.post_id == p.id + + # Extend the query + query = from [p, c] in query, + select: {p.title, c.body} + + You are not required to specify all bindings when composing. + For example, if we would like to order the results above by + post insertion date, we could further extend it as: + + query = from q in query, order_by: q.inserted_at + + The example above will work if the input query has 1 or 10 + bindings. As long as the number of bindings is less than the + number of `from`s + `join`s, Ecto will match only what you have + specified. The first binding always matches the source given + in `from`. + + Similarly, if you are interested only in the last binding + (or the last bindings) in a query, you can use `...` to + specify "all bindings before" and match on the last one. + + For instance, imagine you wrote: + + posts_with_comments = + from p in query, join: c in Comment, on: c.post_id == p.id + + And now we want to make sure to return both the post title + and the comment body. Although we may not know how many + bindings there are in the query, we are sure posts is the + first binding and comments are the last one, so we can write: + + from [p, ..., c] in posts_with_comments, select: {p.title, c.body} + + In other words, `...` will include all the bindings between the + first and the last, which may be one, many or no bindings at all. + + ### Named bindings + + Another option for flexibly building queries with joins are named + bindings. Coming back to the previous example, we can use the + `as: :comment` option to bind the comments join to a concrete name: + + posts_with_comments = + from p in Post, + join: c in Comment, as: :comment, on: c.post_id == p.id + + Now we can refer to it using the following form of a bindings list: + + from [p, comment: c] in posts_with_comments, select: {p.title, c.body} + + This approach lets us not worry about keeping track of the position + of the bindings when composing the query. The `:as` option can be + given both on joins and on `from`: + + from p in Post, as: :post + + Only atoms are accepted for binding names. Named binding references + must always be placed at the end of the bindings list: + + [positional_binding_1, positional_binding_2, named_1: binding, named_2: binding] + + Named bindings can also be used for late binding with the `as/1` + construct, allowing you to refer to a binding that has not been + defined yet: + + from c in Comment, where: as(:posts).id == c.post_id + + This is especially useful when working with subqueries, where you + may need to refer to a parent binding with `parent_as`, which is + not known when writing the subquery: + + child_query = from c in Comment, where: parent_as(:posts).id == c.post_id + from p in Post, as: :posts, inner_lateral_join: c in subquery(child_query) + + You can also match on a specific binding when building queries. For + example, let's suppose you want to create a generic sort function + that will order by a given `field` with a given `as` in `query`: + + # Knowing the name of the binding + def sort(query, as, field) do + from [{^as, x}] in query, order_by: field(x, ^field) + end + + ### Bindingless operations + + Although bindings are extremely useful when working with joins, + they are not necessary when the query has only the `from` clause. + For such cases, Ecto supports a way for building queries + without specifying the binding: + + from Post, + where: [category: "fresh and new"], + order_by: [desc: :published_at], + select: [:id, :title, :body] + + The query above will select all posts with category "fresh and new", + order by the most recently published, and return Post structs with + only the id, title and body fields set. It is equivalent to: + + from p in Post, + where: p.category == "fresh and new", + order_by: [desc: p.published_at], + select: struct(p, [:id, :title, :body]) + + One advantage of bindingless queries is that they are data-driven + and therefore useful for dynamically building queries. For example, + the query above could also be written as: + + where = [category: "fresh and new"] + order_by = [desc: :published_at] + select = [:id, :title, :body] + from Post, where: ^where, order_by: ^order_by, select: ^select + + This feature is very useful when queries need to be built based + on some user input, like web search forms, CLIs and so on. + + ## Fragments + + If you need an escape hatch, Ecto provides fragments + (see `Ecto.Query.API.fragment/1`) to inject SQL (and non-SQL) + fragments into queries. + + For example, to get all posts while running the "lower(?)" + function in the database where `p.title` is interpolated + in place of `?`, one can write: + + from p in Post, + where: is_nil(p.published_at) and + fragment("lower(?)", p.title) == ^title + + Also, most adapters provide direct APIs for queries, like + `Ecto.Adapters.SQL.query/4`, allowing developers to + completely bypass Ecto queries. + + ## Macro API + + In all examples so far we have used the **keywords query syntax** to + create a query: + + import Ecto.Query + from u in "users", where: u.age > 18, select: u.name + + Due to the prevalence of the pipe operator in Elixir, Ecto also supports + a pipe-based syntax: + + "users" + |> where([u], u.age > 18) + |> select([u], u.name) + + The keyword-based and pipe-based examples are equivalent. The downside + of using macros is that the binding must be specified for every operation. + However, since keyword-based and pipe-based examples are equivalent, the + bindingless syntax also works for macros: + + "users" + |> where([u], u.age > 18) + |> select([:name]) + + Such a syntax allows developers to write queries using bindings only in more + complex query expressions. + + This module documents each of those macros, providing examples in + both the keywords query and pipe expression formats. + + ## Query prefix + + It is possible to set a prefix for the queries. For Postgres users, + this will specify the schema where the table is located, while for + MySQL users this will specify the database where the table is + located. When no prefix is set, Postgres queries are assumed to be + in the public schema, while MySQL queries are assumed to be in the + database set in the config for the repo. + + The query prefix may be set either for the whole query or on each + individual `from` and `join` expression. If a `prefix` is not given + to a `from` or a `join`, the prefix of the schema given to the `from` + or `join` is used. The query prefix is used only if none of the above + are declared. + + Let's see some examples. To see the query prefix globally, the simplest + mechanism is to pass an option to the repository operation: + + results = Repo.all(query, prefix: "accounts") + + You may also set the prefix for the whole query by setting the prefix field: + + results = + query # May be User or an Ecto.Query itself + |> Ecto.Query.put_query_prefix("accounts") + |> Repo.all() + + Setting the prefix in the query changes the default prefix of all `from` + and `join` expressions. You can override the query prefix by either setting + the `@schema_prefix` in your schema definitions or by passing the prefix + option: + + from u in User, + prefix: "accounts", + join: p in assoc(u, :posts), + prefix: "public" + + Overall, here is the prefix lookup precedence: + + 1. The `:prefix` option given to `from`/`join` has the highest precedence + 2. Then it falls back to the `@schema_prefix` attribute declared in the schema + given to `from`/`join` + 3. Then it falls back to the query prefix + + The prefixes set in the query will be preserved when loading data. + """ + + defstruct [prefix: nil, sources: nil, from: nil, joins: [], aliases: %{}, wheres: [], select: nil, + order_bys: [], limit: nil, offset: nil, group_bys: [], combinations: [], updates: [], + havings: [], preloads: [], assocs: [], distinct: nil, lock: nil, windows: [], + with_ctes: nil] + + defmodule FromExpr do + @moduledoc false + defstruct [:source, :as, :prefix, hints: []] + end + + defmodule DynamicExpr do + @moduledoc false + defstruct [:fun, :binding, :file, :line] + end + + defmodule QueryExpr do + @moduledoc false + defstruct [:expr, :file, :line, params: []] + end + + defmodule BooleanExpr do + @moduledoc false + defstruct [:op, :expr, :file, :line, params: [], subqueries: []] + end + + defmodule SelectExpr do + @moduledoc false + defstruct [:expr, :file, :line, :fields, params: [], take: %{}] + end + + defmodule JoinExpr do + @moduledoc false + defstruct [:qual, :source, :on, :file, :line, :assoc, :as, :ix, :prefix, params: [], hints: []] + end + + defmodule WithExpr do + @moduledoc false + defstruct [recursive: false, queries: []] + end + + defmodule Tagged do + @moduledoc false + # * value is the tagged value + # * tag is the directly tagged value, like Ecto.UUID + # * type is the underlying tag type, like :string + defstruct [:value, :tag, :type] + end + + @type t :: %__MODULE__{} + @opaque dynamic :: %DynamicExpr{} + + alias Ecto.Query.Builder + + @doc """ + Builds a dynamic query expression. + + Dynamic query expressions allow developers to compose query + expressions bit by bit, so that they can be interpolated into + parts of a query or another dynamic expression later on. + + ## Examples + + Imagine you have a set of conditions you want to build your query on: + + conditions = false + + conditions = + if params["is_public"] do + dynamic([p], p.is_public or ^conditions) + else + conditions + end + + conditions = + if params["allow_reviewers"] do + dynamic([p, a], a.reviewer == true or ^conditions) + else + conditions + end + + from query, where: ^conditions + + In the example above, we were able to build the query expressions + bit by bit, using different bindings, and later interpolate it all + at once into the actual query. + + A dynamic expression can always be interpolated inside another dynamic + expression and into the constructs described below. + + ## `where`, `having` and a `join`'s `on` + + The `dynamic` macro can be interpolated at the root of a `where`, + `having` or a `join`'s `on`. + + For example, assuming the `conditions` variable defined in the + previous section, the following is forbidden because it is not + at the root of a `where`: + + from q in query, where: q.some_condition and ^conditions + + Fortunately that's easily solved by simply rewriting it to: + + conditions = dynamic([q], q.some_condition and ^conditions) + from query, where: ^conditions + + ## `order_by` + + Dynamics can be interpolated inside keyword lists at the root of + `order_by`. For example, you can write: + + order_by = [ + asc: :some_field, + desc: dynamic([p], fragment("?>>?", p.another_field, "json_key")) + ] + + from query, order_by: ^order_by + + Dynamics are also supported in `order_by/2` clauses inside `windows/2`. + + As with `where` and friends, it is not possible to pass dynamics + outside of a root. For example, this won't work: + + from query, order_by: [asc: ^dynamic(...)] + + But this will: + + from query, order_by: ^[asc: dynamic(...)] + + ## `group_by` + + Dynamics can be interpolated inside keyword lists at the root of + `group_by`. For example, you can write: + + group_by = [ + :some_field, + dynamic([p], fragment("?>>?", p.another_field, "json_key")) + ] + + from query, group_by: ^group_by + + Dynamics are also supported in `partition_by/2` clauses inside `windows/2`. + + As with `where` and friends, it is not possible to pass dynamics + outside of a root. For example, this won't work: + + from query, group_by: [:some_field, ^dynamic(...)] + + But this will: + + from query, group_by: ^[:some_field, dynamic(...)] + + ## Updates + + A `dynamic` is also supported inside updates, for example: + + updates = [ + set: [average: dynamic([p], p.sum / p.count)] + ] + + from query, update: ^updates + """ + defmacro dynamic(binding \\ [], expr) do + Builder.Dynamic.build(binding, expr, __CALLER__) + end + + @doc """ + Defines windows which can be used with `Ecto.Query.WindowAPI`. + + Receives a keyword list where keys are names of the windows + and values are a keyword list with window expressions. + + ## Examples + + # Compare each employee's salary with the average salary in his or her department + from e in Employee, + select: {e.depname, e.empno, e.salary, over(avg(e.salary), :department)}, + windows: [department: [partition_by: e.depname]] + + In the example above, we get the average salary per department. + `:department` is the window name, partitioned by `e.depname` + and `avg/1` is the window function. For more information + on windows functions, see `Ecto.Query.WindowAPI`. + + ## Window expressions + + The following keys are allowed when specifying a window. + + ### :partition_by + + A list of fields to partition the window by, for example: + + windows: [department: [partition_by: e.depname]] + + A list of atoms can also be interpolated for dynamic partitioning: + + fields = [:depname, :year] + windows: [dynamic_window: [partition_by: ^fields]] + + ### :order_by + + A list of fields to order the window by, for example: + + windows: [ordered_names: [order_by: e.name]] + + It works exactly as the keyword query version of `order_by/3`. + + ### :frame + + A fragment which defines the frame for window functions. + + ## Examples + + # Compare each employee's salary for each month with his average salary for previous 3 months + from p in Payroll, + select: {p.empno, p.date, p.salary, over(avg(p.salary), :prev_months)}, + windows: [prev_months: [partition_by: p.empno, order_by: p.date, frame: fragment("ROWS 3 PRECEDING EXCLUDE CURRENT ROW")]] + + """ + defmacro windows(query, binding \\ [], expr) do + Builder.Windows.build(query, binding, expr, __CALLER__) + end + + @doc """ + Converts a query into a subquery. + + If a subquery is given, returns the subquery itself. + If any other value is given, it is converted to a query via + `Ecto.Queryable` and wrapped in the `Ecto.SubQuery` struct. + + `subquery` is supported in `from`, `join`, and `where`, in the + form `p.x in subquery(q)`. + + ## Examples + + # Get the average salary of the top 10 highest salaries + query = from Employee, order_by: [desc: :salary], limit: 10 + from e in subquery(query), select: avg(e.salary) + + A prefix can be specified for a subquery, similar to standard repo operations: + + query = from Employee, order_by: [desc: :salary], limit: 10 + from e in subquery(query, prefix: "my_prefix"), select: avg(e.salary) + + + Subquery can also be used in a `join` expression. + + UPDATE posts + SET sync_started_at = $1 + WHERE id IN ( + SELECT id FROM posts + WHERE synced = false AND (sync_started_at IS NULL OR sync_started_at < $1) + LIMIT $2 + ) + + We can write it as a join expression: + + subset = from(p in Post, + where: p.synced == false and + (is_nil(p.sync_started_at) or p.sync_started_at < ^min_sync_started_at), + limit: ^batch_size + ) + + Repo.update_all( + from(p in Post, join: s in subquery(subset), on: s.id == p.id), + set: [sync_started_at: NaiveDateTime.utc_now()] + ) + + Or as a `where` condition: + + subset_ids = from(p in subset, select: p.id) + Repo.update_all( + from(p in Post, where: p.id in subquery(subset_ids)), + set: [sync_started_at: NaiveDateTime.utc_now()] + ) + + If you need to refer to a parent binding which is not known when writing the subquery, + you can use `parent_as` as shown in the examples under "Named bindings" in this module doc. + """ + def subquery(query, opts \\ []) do + subquery = wrap_in_subquery(query) + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} when is_binary(prefix) or is_nil(prefix) -> put_in(subquery.query.prefix, prefix) + :error -> subquery + end + end + + defp wrap_in_subquery(%Ecto.SubQuery{} = subquery), do: subquery + defp wrap_in_subquery(%Ecto.Query{} = query), do: %Ecto.SubQuery{query: query} + defp wrap_in_subquery(queryable), do: %Ecto.SubQuery{query: Ecto.Queryable.to_query(queryable)} + + @joins [:join, :inner_join, :cross_join, :left_join, :right_join, :full_join, + :inner_lateral_join, :left_lateral_join] + + @doc """ + Puts the given prefix in a query. + """ + def put_query_prefix(%Ecto.Query{} = query, prefix) when is_binary(prefix) do + %{query | prefix: prefix} + end + + def put_query_prefix(other, prefix) when is_binary(prefix) do + other |> Ecto.Queryable.to_query() |> put_query_prefix(prefix) + end + + @doc """ + Resets a previously set field on a query. + + It can reset many fields except the query source (`from`). When excluding + a `:join`, it will remove *all* types of joins. If you prefer to remove a + single type of join, please see paragraph below. + + ## Examples + + Ecto.Query.exclude(query, :join) + Ecto.Query.exclude(query, :where) + Ecto.Query.exclude(query, :order_by) + Ecto.Query.exclude(query, :group_by) + Ecto.Query.exclude(query, :having) + Ecto.Query.exclude(query, :distinct) + Ecto.Query.exclude(query, :select) + Ecto.Query.exclude(query, :combinations) + Ecto.Query.exclude(query, :with_ctes) + Ecto.Query.exclude(query, :limit) + Ecto.Query.exclude(query, :offset) + Ecto.Query.exclude(query, :lock) + Ecto.Query.exclude(query, :preload) + + You can also remove specific joins as well such as `left_join` and + `inner_join`: + + Ecto.Query.exclude(query, :inner_join) + Ecto.Query.exclude(query, :cross_join) + Ecto.Query.exclude(query, :left_join) + Ecto.Query.exclude(query, :right_join) + Ecto.Query.exclude(query, :full_join) + Ecto.Query.exclude(query, :inner_lateral_join) + Ecto.Query.exclude(query, :left_lateral_join) + + However, keep in mind that if a join is removed and its bindings + were referenced elsewhere, the bindings won't be removed, leading + to a query that won't compile. + """ + def exclude(%Ecto.Query{} = query, field), do: do_exclude(query, field) + def exclude(query, field), do: do_exclude(Ecto.Queryable.to_query(query), field) + + defp do_exclude(%Ecto.Query{} = query, :join) do + %{query | joins: [], aliases: Map.take(query.aliases, [query.from.as])} + end + defp do_exclude(%Ecto.Query{} = query, join_keyword) when join_keyword in @joins do + qual = join_qual(join_keyword) + {excluded, remaining} = Enum.split_with(query.joins, &(&1.qual == qual)) + aliases = Map.drop(query.aliases, Enum.map(excluded, & &1.as)) + %{query | joins: remaining, aliases: aliases} + end + defp do_exclude(%Ecto.Query{} = query, :where), do: %{query | wheres: []} + defp do_exclude(%Ecto.Query{} = query, :order_by), do: %{query | order_bys: []} + defp do_exclude(%Ecto.Query{} = query, :group_by), do: %{query | group_bys: []} + defp do_exclude(%Ecto.Query{} = query, :combinations), do: %{query | combinations: []} + defp do_exclude(%Ecto.Query{} = query, :with_ctes), do: %{query | with_ctes: nil} + defp do_exclude(%Ecto.Query{} = query, :having), do: %{query | havings: []} + defp do_exclude(%Ecto.Query{} = query, :distinct), do: %{query | distinct: nil} + defp do_exclude(%Ecto.Query{} = query, :select), do: %{query | select: nil} + defp do_exclude(%Ecto.Query{} = query, :limit), do: %{query | limit: nil} + defp do_exclude(%Ecto.Query{} = query, :offset), do: %{query | offset: nil} + defp do_exclude(%Ecto.Query{} = query, :lock), do: %{query | lock: nil} + defp do_exclude(%Ecto.Query{} = query, :preload), do: %{query | preloads: [], assocs: []} + + @doc """ + Creates a query. + + It can either be a keyword query or a query expression. + + If it is a keyword query the first argument must be + either an `in` expression, or a value that implements + the `Ecto.Queryable` protocol. If the query needs a + reference to the data source in any other part of the + expression, then an `in` must be used to create a reference + variable. The second argument should be a keyword query + where the keys are expression types and the values are + expressions. + + If it is a query expression the first argument must be + a value that implements the `Ecto.Queryable` protocol + and the second argument the expression. + + ## Keywords example + + from(c in City, select: c) + + ## Expressions example + + City |> select([c], c) + + ## Examples + + def paginate(query, page, size) do + from query, + limit: ^size, + offset: ^((page-1) * size) + end + + The example above does not use `in` because `limit` and `offset` + do not require a reference to the data source. However, extending + the query with a where expression would require the use of `in`: + + def published(query) do + from p in query, where: not(is_nil(p.published_at)) + end + + Notice we have created a `p` variable to reference the query's + original data source. This assumes that the original query + only had one source. When the given query has more than one source, + positional or named bindings may be used to access the additional sources. + + def published_multi(query) do + from [p,o] in query, + where: not(is_nil(p.published_at)) and not(is_nil(o.published_at)) + end + + Note that the variables `p` and `o` can be named whatever you like + as they have no importance in the query sent to the database. + """ + defmacro from(expr, kw \\ []) do + unless Keyword.keyword?(kw) do + raise ArgumentError, "second argument to `from` must be a compile time keyword list" + end + + {kw, as, prefix, hints} = collect_as_and_prefix_and_hints(kw, nil, nil, nil) + {quoted, binds, count_bind} = Builder.From.build(expr, __CALLER__, as, prefix, hints) + from(kw, __CALLER__, count_bind, quoted, to_query_binds(binds)) + end + + @from_join_opts [:as, :prefix, :hints] + @no_binds [:union, :union_all, :except, :except_all, :intersect, :intersect_all] + @binds [:lock, :where, :or_where, :select, :distinct, :order_by, :group_by, :windows] ++ + [:having, :or_having, :limit, :offset, :preload, :update, :select_merge, :with_ctes] + + defp from([{type, expr}|t], env, count_bind, quoted, binds) when type in @binds do + # If all bindings are integer indexes keep AST Macro expandable to %Query{}, + # otherwise ensure that quoted code is evaluated before macro call + quoted = + if Enum.all?(binds, fn {_, value} -> is_integer(value) end) do + quote do + Ecto.Query.unquote(type)(unquote(quoted), unquote(binds), unquote(expr)) + end + else + quote do + query = unquote(quoted) + Ecto.Query.unquote(type)(query, unquote(binds), unquote(expr)) + end + end + + from(t, env, count_bind, quoted, binds) + end + + defp from([{type, expr}|t], env, count_bind, quoted, binds) when type in @no_binds do + quoted = + quote do + Ecto.Query.unquote(type)(unquote(quoted), unquote(expr)) + end + + from(t, env, count_bind, quoted, binds) + end + + defp from([{join, expr}|t], env, count_bind, quoted, binds) when join in @joins do + qual = join_qual(join) + {t, on, as, prefix, hints} = collect_on(t, nil, nil, nil, nil) + + {quoted, binds, count_bind} = + Builder.Join.build(quoted, qual, binds, expr, count_bind, on, as, prefix, hints, env) + + from(t, env, count_bind, quoted, to_query_binds(binds)) + end + + defp from([{:on, _value}|_], _env, _count_bind, _quoted, _binds) do + Builder.error! "`on` keyword must immediately follow a join" + end + + defp from([{key, _value}|_], _env, _count_bind, _quoted, _binds) when key in @from_join_opts do + Builder.error! "`#{key}` keyword must immediately follow a from/join" + end + + defp from([{key, _value}|_], _env, _count_bind, _quoted, _binds) do + Builder.error! "unsupported #{inspect key} in keyword query expression" + end + + defp from([], _env, _count_bind, quoted, _binds) do + quoted + end + + defp to_query_binds(binds) do + for {k, v} <- binds, do: {{k, [], nil}, v} + end + + defp join_qual(:join), do: :inner + defp join_qual(:full_join), do: :full + defp join_qual(:left_join), do: :left + defp join_qual(:right_join), do: :right + defp join_qual(:inner_join), do: :inner + defp join_qual(:cross_join), do: :cross + defp join_qual(:left_lateral_join), do: :left_lateral + defp join_qual(:inner_lateral_join), do: :inner_lateral + + defp collect_on([{key, _} | _] = t, on, as, prefix, hints) when key in @from_join_opts do + {t, as, prefix, hints} = collect_as_and_prefix_and_hints(t, as, prefix, hints) + collect_on(t, on, as, prefix, hints) + end + + defp collect_on([{:on, on} | t], nil, as, prefix, hints), + do: collect_on(t, on, as, prefix, hints) + defp collect_on([{:on, expr} | t], on, as, prefix, hints), + do: collect_on(t, {:and, [], [on, expr]}, as, prefix, hints) + defp collect_on(t, on, as, prefix, hints), + do: {t, on, as, prefix, hints} + + defp collect_as_and_prefix_and_hints([{:as, as} | t], nil, prefix, hints), + do: collect_as_and_prefix_and_hints(t, as, prefix, hints) + defp collect_as_and_prefix_and_hints([{:as, _} | _], _, _, _), + do: Builder.error! "`as` keyword was given more than once to the same from/join" + defp collect_as_and_prefix_and_hints([{:prefix, prefix} | t], as, nil, hints), + do: collect_as_and_prefix_and_hints(t, as, {:ok, prefix}, hints) + defp collect_as_and_prefix_and_hints([{:prefix, _} | _], _, _, _), + do: Builder.error! "`prefix` keyword was given more than once to the same from/join" + defp collect_as_and_prefix_and_hints([{:hints, hints} | t], as, prefix, nil), + do: collect_as_and_prefix_and_hints(t, as, prefix, hints) + defp collect_as_and_prefix_and_hints([{:hints, _} | _], _, _, _), + do: Builder.error! "`hints` keyword was given more than once to the same from/join" + defp collect_as_and_prefix_and_hints(t, as, prefix, hints), + do: {t, as, prefix, hints} + + @doc """ + A join query expression. + + Receives a source that is to be joined to the query and a condition for + the join. The join condition can be any expression that evaluates + to a boolean value. The qualifier must be one of `:inner`, `:left`, + `:right`, `:cross`, `:full`, `:inner_lateral` or `:left_lateral`. + + For a keyword query the `:join` keyword can be changed to `:inner_join`, + `:left_join`, `:right_join`, `:cross_join`, `:full_join`, `:inner_lateral_join` + or `:left_lateral_join`. `:join` is equivalent to `:inner_join`. + + Currently it is possible to join on: + + * an `Ecto.Schema`, such as `p in Post` + * an interpolated Ecto query with zero or more `where` clauses, + such as `c in ^(from "posts", where: [public: true])` + * an association, such as `c in assoc(post, :comments)` + * a subquery, such as `c in subquery(another_query)` + * a query fragment, such as `c in fragment("SOME COMPLEX QUERY")`, + see "Joining with fragments" below. + + ## Options + + Each join accepts the following options: + + * `:on` - a query expression or keyword list to filter the join + * `:as` - a named binding for the join + * `:prefix` - the prefix to be used for the join when issuing a database query + * `:hints` - a string or a list of strings to be used as database hints + + In the keyword query syntax, those options must be given immediately + after the join. In the expression syntax, the options are given as + the fifth argument. + + ## Keywords examples + + from c in Comment, + join: p in Post, + on: p.id == c.post_id, + select: {p.title, c.text} + + from p in Post, + left_join: c in assoc(p, :comments), + select: {p, c} + + Keywords can also be given or interpolated as part of `on`: + + from c in Comment, + join: p in Post, + on: [id: c.post_id], + select: {p.title, c.text} + + Any key in `on` will apply to the currently joined expression. + + It is also possible to interpolate an Ecto query on the right-hand side + of `in`. For example, the query above can also be written as: + + posts = Post + from c in Comment, + join: p in ^posts, + on: [id: c.post_id], + select: {p.title, c.text} + + The above is specially useful to dynamically join on existing + queries, for example, to dynamically choose a source, or by + choosing between public posts or posts that have been recently + published: + + posts = + if params["drafts"] do + from p in Post, where: [drafts: true] + else + from p in Post, where: [public: true] + end + + from c in Comment, + join: p in ^posts, on: [id: c.post_id], + select: {p.title, c.text} + + Only simple queries with `where` expressions can be interpolated + in a join. + + ## Expressions examples + + Comment + |> join(:inner, [c], p in Post, on: c.post_id == p.id) + |> select([c, p], {p.title, c.text}) + + Post + |> join(:left, [p], c in assoc(p, :comments)) + |> select([p, c], {p, c}) + + Post + |> join(:left, [p], c in Comment, on: c.post_id == p.id and c.is_visible == true) + |> select([p, c], {p, c}) + + ## Joining with fragments + + When you need to join on a complex query, Ecto supports fragments in joins: + + Comment + |> join(:inner, [c], p in fragment("SOME COMPLEX QUERY", c.id, ^some_param)) + + Although using fragments in joins is discouraged in favor of Ecto + Query syntax, they are necessary when writing lateral joins as + lateral joins require a subquery that refer to previous bindings: + + Game + |> join(:inner_lateral, [g], gs in fragment("SELECT * FROM games_sold AS gs WHERE gs.game_id = ? ORDER BY gs.sold_on LIMIT 2", g.id)) + |> select([g, gs], {g.name, gs.sold_on}) + + Note that the `join` does not automatically wrap the fragment in + parentheses, since some expressions require parens and others + require no parens. Therefore, in cases such as common table + expressions, you will have to explicitly wrap the fragment content + in parens. + + ## Hints + + `from` and `join` also support index hints, as found in databases such as + [MySQL](https://dev.mysql.com/doc/refman/8.0/en/index-hints.html), + [MSSQL](https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-table?view=sql-server-2017) and + [Clickhouse](https://clickhouse.tech/docs/en/sql-reference/statements/select/sample/). + + For example, a developer using MySQL may write: + + from p in Post, + join: c in Comment, + hints: ["USE INDEX FOO", "USE INDEX BAR"], + where: p.id == c.post_id, + select: c + + Keep in mind you want to use hints rarely, so don't forget to read the database + disclaimers about such functionality. + + Hints must be static compile-time strings when they are specified as (list of) strings. + Certain Ecto adapters may also accept dynamic hints using the tuple form: + + from e in Event, + hints: [sample: sample_threshold()], + select: e + + """ + @join_opts [:on | @from_join_opts] + + defmacro join(query, qual, binding \\ [], expr, opts \\ []) + defmacro join(query, qual, binding, expr, opts) + when is_list(binding) and is_list(opts) do + {t, on, as, prefix, hints} = collect_on(opts, nil, nil, nil, nil) + + with [{key, _} | _] <- t do + raise ArgumentError, "invalid option `#{key}` passed to Ecto.Query.join/5, " <> + "valid options are: #{inspect(@join_opts)}" + end + + query + |> Builder.Join.build(qual, binding, expr, nil, on, as, prefix, hints, __CALLER__) + |> elem(0) + end + + defmacro join(_query, _qual, binding, _expr, opts) when is_list(opts) do + raise ArgumentError, "invalid binding passed to Ecto.Query.join/5, should be " <> + "list of variables, got: #{Macro.to_string(binding)}" + end + + defmacro join(_query, _qual, _binding, _expr, opts) do + raise ArgumentError, "invalid opts passed to Ecto.Query.join/5, should be " <> + "list, got: #{Macro.to_string(opts)}" + end + + @doc ~S''' + A common table expression (CTE) also known as WITH expression. + + `name` must be a compile-time literal string that is being used + as the table name to join the CTE in the main query or in the + recursive CTE. + + **IMPORTANT!** Beware of using CTEs. In raw SQL, CTEs can be + used as a mechanism to organize queries, but said mechanism + has no purpose in Ecto since Ecto queries are composable by + definition. In other words, if you need to break a large query + into parts, use all of the functionality in Elixir and in this + module to structure your code. Furthermore, breaking a query + into CTEs can negatively impact performance, as the database + may not optimize efficiently across CTEs. The main use case + for CTEs in Ecto is to provide recursive definitions, which + we outline in the following section. Non-recursive CTEs can + often be written as joins or subqueries, which provide better + performance. + + ## Options + + * `:as` - the CTE query itself or a fragment + + ## Recursive CTEs + + Use `recursive_ctes/2` to enable recursive mode for CTEs. + + In the CTE query itself use the same table name to leverage + recursion that has been passed to the `name` argument. Make sure + to write a stop condition to avoid an infinite recursion loop. + Generally speaking, you should only use CTEs in Ecto for + writing recursive queries. + + ## Expression examples + + Products and their category names for breadcrumbs: + + category_tree_initial_query = + Category + |> where([c], is_nil(c.parent_id)) + + category_tree_recursion_query = + Category + |> join(:inner, [c], ct in "category_tree", on: c.parent_id == ct.id) + + category_tree_query = + category_tree_initial_query + |> union_all(^category_tree_recursion_query) + + Product + |> recursive_ctes(true) + |> with_cte("category_tree", as: ^category_tree_query) + |> join(:left, [p], c in "category_tree", on: c.id == p.category_id) + |> group_by([p], p.id) + |> select([p, c], %{p | category_names: fragment("ARRAY_AGG(?)", c.name)}) + + It's also possible to pass a raw SQL fragment: + + @raw_sql_category_tree """ + SELECT * FROM categories WHERE c.parent_id IS NULL + UNION ALL + SELECT * FROM categories AS c, category_tree AS ct WHERE ct.id = c.parent_id + """ + + Product + |> recursive_ctes(true) + |> with_cte("category_tree", as: fragment(@raw_sql_category_tree)) + |> join(:inner, [p], c in "category_tree", on: c.id == p.category_id) + + If you don't have any Ecto schema pointing to the CTE table, you can pass a + tuple with the CTE table name as the first element and an Ecto schema as the second + element. This will cast the result rows to Ecto structs as long as the Ecto + schema maps to the same fields in the CTE table: + + {"category_tree", Category} + |> recursive_ctes(true) + |> with_cte("category_tree", as: ^category_tree_query) + |> join(:left, [c], p in assoc(c, :products)) + |> group_by([c], c.id) + |> select([c, p], %{c | products_count: count(p.id)}) + + Keyword syntax is not supported for this feature. + + ## Limitation: CTEs on schemas with source fields + + Ecto allows developers to say that a table in their Ecto schema + maps to a different column in their database: + + field :group_id, :integer, source: :iGroupId + + At the moment, using a schema with source fields in CTE may emit + invalid queries. If you are running into such scenarios, your best + option is to use a fragment as your CTE. + ''' + defmacro with_cte(query, name, as: with_query) do + Builder.CTE.build(query, name, with_query, __CALLER__) + end + + @doc """ + Enables or disables recursive mode for CTEs. + + According to the SQL standard it affects all CTEs in the query, not individual ones. + + See `with_cte/3` on example of how to build a query with a recursive CTE. + """ + def recursive_ctes(%__MODULE__{with_ctes: with_expr} = query, value) when is_boolean(value) do + with_expr = with_expr || %WithExpr{} + with_expr = %{with_expr | recursive: value} + %{query | with_ctes: with_expr} + end + + def recursive_ctes(queryable, value) do + recursive_ctes(Ecto.Queryable.to_query(queryable), value) + end + + @doc """ + A select query expression. + + Selects which fields will be selected from the schema and any transformations + that should be performed on the fields. Any expression that is accepted in a + query can be a select field. + + Select also allows each expression to be wrapped in lists, tuples or maps as + shown in the examples below. A full schema can also be selected. + + There can only be one select expression in a query, if the select expression + is omitted, the query will by default select the full schema. If `select` is + given more than once, an error is raised. Use `exclude/2` if you would like + to remove a previous select for overriding or see `select_merge/3` for a + limited version of `select` that is composable and can be called multiple + times. + + `select` also accepts a list of atoms where each atom refers to a field in + the source to be selected. + + ## Keywords examples + + from(c in City, select: c) # returns the schema as a struct + from(c in City, select: {c.name, c.population}) + from(c in City, select: [c.name, c.county]) + from(c in City, select: %{n: c.name, answer: 42}) + from(c in City, select: %{c | alternative_name: c.name}) + from(c in City, select: %Data{name: c.name}) + + It is also possible to select a struct and limit the returned + fields at the same time: + + from(City, select: [:name]) + + The syntax above is equivalent to: + + from(city in City, select: struct(city, [:name])) + + You can also write: + + from(city in City, select: map(city, [:name])) + + If you want a map with only the selected fields to be returned. + + For more information, read the docs for `Ecto.Query.API.struct/2` + and `Ecto.Query.API.map/2`. + + ## Expressions examples + + City |> select([c], c) + City |> select([c], {c.name, c.country}) + City |> select([c], %{"name" => c.name}) + City |> select([:name]) + City |> select([c], struct(c, [:name])) + City |> select([c], map(c, [:name])) + + """ + defmacro select(query, binding \\ [], expr) do + Builder.Select.build(:select, query, binding, expr, __CALLER__) + end + + @doc """ + Mergeable select query expression. + + This macro is similar to `select/3` except it may be specified + multiple times as long as every entry is a map. This is useful + for merging and composing selects. For example: + + query = from p in Post, select: %{} + + query = + if include_title? do + from p in query, select_merge: %{title: p.title} + else + query + end + + query = + if include_visits? do + from p in query, select_merge: %{visits: p.visits} + else + query + end + + In the example above, the query is built little by little by merging + into a final map. If both conditions above are true, the final query + would be equivalent to: + + from p in Post, select: %{title: p.title, visits: p.visits} + + If `:select_merge` is called and there is no value selected previously, + it will default to the source, `p` in the example above. + + The argument given to `:select_merge` must always be a map. The value + being merged on must be a struct or a map. If it is a struct, the fields + merged later on must be part of the struct, otherwise an error is raised. + + If the argument to `:select_merge` is a constructed struct + (`Ecto.Query.API.struct/2`) or map (`Ecto.Query.API.map/2`) where the source + to struct or map may be a `nil` value (as in an outer join), the source will + be returned unmodified. + + query = + Post + |> join(:left, [p], t in Post.Translation, + on: t.post_id == p.id and t.locale == ^"en" + ) + |> select_merge([_p, t], map(t, ^~w(title summary)a)) + + If there is no English translation for the post, the untranslated post + `title` will be returned and `summary` will be `nil`. If there is, both + `title` and `summary` will be the value from `Post.Translation`. + + `select_merge` cannot be used to set fields in associations, as + associations are always loaded later, overriding any previous value. + """ + defmacro select_merge(query, binding \\ [], expr) do + Builder.Select.build(:merge, query, binding, expr, __CALLER__) + end + + @doc """ + A distinct query expression. + + When true, only keeps distinct values from the resulting + select expression. + + If supported by your database, you can also pass query expressions + to distinct and it will generate a query with DISTINCT ON. In such + cases, `distinct` accepts exactly the same expressions as `order_by` + and any `distinct` expression will be automatically prepended to the + `order_by` expressions in case there is any `order_by` expression. + + ## Keywords examples + + # Returns the list of different categories in the Post schema + from(p in Post, distinct: true, select: p.category) + + # If your database supports DISTINCT ON(), + # you can pass expressions to distinct too + from(p in Post, + distinct: p.category, + order_by: [p.date]) + + # The DISTINCT ON() also supports ordering similar to ORDER BY. + from(p in Post, + distinct: [desc: p.category], + order_by: [p.date]) + + # Using atoms + from(p in Post, distinct: :category, order_by: :date) + + ## Expressions example + + Post + |> distinct(true) + |> order_by([p], [p.category, p.author]) + + """ + defmacro distinct(query, binding \\ [], expr) do + Builder.Distinct.build(query, binding, expr, __CALLER__) + end + + @doc """ + An AND where query expression. + + `where` expressions are used to filter the result set. If there is more + than one where expression, they are combined with an `and` operator. All + where expressions have to evaluate to a boolean value. + + `where` also accepts a keyword list where the field given as key is going to + be compared with the given value. The fields will always refer to the source + given in `from`. + + ## Keywords example + + from(c in City, where: c.country == "Sweden") + from(c in City, where: [country: "Sweden"]) + + It is also possible to interpolate the whole keyword list, allowing you to + dynamically filter the source: + + filters = [country: "Sweden"] + from(c in City, where: ^filters) + + ## Expressions examples + + City |> where([c], c.country == "Sweden") + City |> where(country: "Sweden") + + """ + defmacro where(query, binding \\ [], expr) do + Builder.Filter.build(:where, :and, query, binding, expr, __CALLER__) + end + + @doc """ + An OR where query expression. + + Behaves exactly the same as `where` except it combines with any previous + expression by using an `OR`. All expressions have to evaluate to a boolean + value. + + `or_where` also accepts a keyword list where each key is a field to be + compared with the given value. Each key-value pair will be combined + using `AND`, exactly as in `where`. + + ## Keywords example + + from(c in City, where: [country: "Sweden"], or_where: [country: "Brazil"]) + + If interpolating keyword lists, the keyword list entries are combined + using ANDs and joined to any existing expression with an OR: + + filters = [country: "USA", name: "New York"] + from(c in City, where: [country: "Sweden"], or_where: ^filters) + + is equivalent to: + + from c in City, where: (c.country == "Sweden") or + (c.country == "USA" and c.name == "New York") + + The behaviour above is by design to keep the changes between `where` + and `or_where` minimal. Plus, if you have a keyword list and you + would like each pair to be combined using `or`, it can be easily done + with `Enum.reduce/3`: + + filters = [country: "USA", is_tax_exempt: true] + Enum.reduce(filters, City, fn {key, value}, query -> + from q in query, or_where: field(q, ^key) == ^value + end) + + which will be equivalent to: + + from c in City, or_where: (c.country == "USA"), or_where: c.is_tax_exempt == true + + ## Expressions example + + City |> where([c], c.country == "Sweden") |> or_where([c], c.country == "Brazil") + + """ + defmacro or_where(query, binding \\ [], expr) do + Builder.Filter.build(:where, :or, query, binding, expr, __CALLER__) + end + + @doc """ + An order by query expression. + + Orders the fields based on one or more fields. It accepts a single field + or a list of fields. The default direction is ascending (`:asc`) and can be + customized in a keyword list as one of the following: + + * `:asc` + * `:asc_nulls_last` + * `:asc_nulls_first` + * `:desc` + * `:desc_nulls_last` + * `:desc_nulls_first` + + The `*_nulls_first` and `*_nulls_last` variants are not supported by all + databases. While all databases default to ascending order, the choice of + "nulls first" or "nulls last" is specific to each database implementation. + + `order_by` may be invoked or listed in a query many times. New expressions + are always appended to the previous ones. + + `order_by` also accepts a list of atoms where each atom refers to a field in + source or a keyword list where the direction is given as key and the field + to order as value. + + ## Keywords examples + + from(c in City, order_by: c.name, order_by: c.population) + from(c in City, order_by: [c.name, c.population]) + from(c in City, order_by: [asc: c.name, desc: c.population]) + + from(c in City, order_by: [:name, :population]) + from(c in City, order_by: [asc: :name, desc_nulls_first: :population]) + + A keyword list can also be interpolated: + + values = [asc: :name, desc_nulls_first: :population] + from(c in City, order_by: ^values) + + A fragment can also be used: + + from c in City, order_by: [ + # A deterministic shuffled order + fragment("? % ? DESC", c.id, ^modulus), + desc: c.id, + ] + + It's also possible to order by an aliased or calculated column: + + from(c in City, + select: %{ + name: c.name, + total_population: + fragment( + "COALESCE(?, ?) + ? AS total_population", + c.animal_population, + 0, + c.human_population + ) + }, + order_by: [ + # based on `AS total_population` in the previous fragment + {:desc, fragment("total_population")} + ] + ) + + ## Expressions examples + + City |> order_by([c], asc: c.name, desc: c.population) + City |> order_by(asc: :name) # Sorts by the cities name + + """ + defmacro order_by(query, binding \\ [], expr) do + Builder.OrderBy.build(query, binding, expr, __CALLER__) + end + + @doc """ + A union query expression. + + Combines result sets of multiple queries. The `select` of each query + must be exactly the same, with the same types in the same order. + + Union expression returns only unique rows as if each query returned + distinct results. This may cause a performance penalty. If you need + to combine multiple result sets without removing duplicate rows + consider using `union_all/2`. + + Note that the operations `order_by`, `limit` and `offset` of the + current `query` apply to the result of the union. + + ## Keywords example + + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, union: ^supplier_query + + ## Expressions example + + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> union(^supplier_query) + + """ + defmacro union(query, other_query) do + Builder.Combination.build(:union, query, other_query, __CALLER__) + end + + @doc """ + A union all query expression. + + Combines result sets of multiple queries. The `select` of each query + must be exactly the same, with the same types in the same order. + + Note that the operations `order_by`, `limit` and `offset` of the + current `query` apply to the result of the union. + + ## Keywords example + + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, union_all: ^supplier_query + + ## Expressions example + + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> union_all(^supplier_query) + """ + defmacro union_all(query, other_query) do + Builder.Combination.build(:union_all, query, other_query, __CALLER__) + end + + @doc """ + An except (set difference) query expression. + + Takes the difference of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + Except expression returns only unique rows as if each query returned + distinct results. This may cause a performance penalty. If you need + to take the difference of multiple result sets without + removing duplicate rows consider using `except_all/2`. + + Note that the operations `order_by`, `limit` and `offset` of the + current `query` apply to the result of the set difference. + + ## Keywords example + + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, except: ^supplier_query + + ## Expressions example + + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> except(^supplier_query) + """ + defmacro except(query, other_query) do + Builder.Combination.build(:except, query, other_query, __CALLER__) + end + + @doc """ + An except (set difference) query expression. + + Takes the difference of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + Note that the operations `order_by`, `limit` and `offset` of the + current `query` apply to the result of the set difference. + + ## Keywords example + + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, except_all: ^supplier_query + + ## Expressions example + + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> except_all(^supplier_query) + """ + defmacro except_all(query, other_query) do + Builder.Combination.build(:except_all, query, other_query, __CALLER__) + end + + @doc """ + An intersect query expression. + + Takes the overlap of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + Intersect expression returns only unique rows as if each query returned + distinct results. This may cause a performance penalty. If you need + to take the intersection of multiple result sets without + removing duplicate rows consider using `intersect_all/2`. + + Note that the operations `order_by`, `limit` and `offset` of the + current `query` apply to the result of the set difference. + + ## Keywords example + + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, intersect: ^supplier_query + + ## Expressions example + + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> intersect(^supplier_query) + """ + defmacro intersect(query, other_query) do + Builder.Combination.build(:intersect, query, other_query, __CALLER__) + end + + @doc """ + An intersect query expression. + + Takes the overlap of the result sets of multiple queries. The + `select` of each query must be exactly the same, with the same + types in the same order. + + Note that the operations `order_by`, `limit` and `offset` of the + current `query` apply to the result of the set difference. + + ## Keywords example + + supplier_query = from s in Supplier, select: s.city + from c in Customer, select: c.city, intersect_all: ^supplier_query + + ## Expressions example + + supplier_query = Supplier |> select([s], s.city) + Customer |> select([c], c.city) |> intersect_all(^supplier_query) + """ + defmacro intersect_all(query, other_query) do + Builder.Combination.build(:intersect_all, query, other_query, __CALLER__) + end + + @doc """ + A limit query expression. + + Limits the number of rows returned from the result. Can be any expression but + has to evaluate to an integer value and it can't include any field. + + If `limit` is given twice, it overrides the previous value. + + ## Keywords example + + from(u in User, where: u.id == ^current_user, limit: 1) + + ## Expressions example + + User |> where([u], u.id == ^current_user) |> limit(1) + + """ + defmacro limit(query, binding \\ [], expr) do + Builder.LimitOffset.build(:limit, query, binding, expr, __CALLER__) + end + + @doc """ + An offset query expression. + + Offsets the number of rows selected from the result. Can be any expression + but it must evaluate to an integer value and it can't include any field. + + If `offset` is given twice, it overrides the previous value. + + ## Keywords example + + # Get all posts on page 4 + from(p in Post, limit: 10, offset: 30) + + ## Expressions example + + Post |> limit(10) |> offset(30) + + """ + defmacro offset(query, binding \\ [], expr) do + Builder.LimitOffset.build(:offset, query, binding, expr, __CALLER__) + end + + @doc ~S""" + A lock query expression. + + Provides support for row-level pessimistic locking using + `SELECT ... FOR UPDATE` or other, database-specific, locking clauses. + `expr` can be any expression but has to evaluate to a boolean value or to a + string and it can't include any fields. + + If `lock` is used more than once, the last one used takes precedence. + + Ecto also supports [optimistic + locking](https://en.wikipedia.org/wiki/Optimistic_concurrency_control) but not + through queries. For more information on optimistic locking, have a look at + the `Ecto.Changeset.optimistic_lock/3` function. + + ## Keywords example + + from(u in User, where: u.id == ^current_user, lock: "FOR SHARE NOWAIT") + + ## Expressions example + + User |> where([u], u.id == ^current_user) |> lock("FOR SHARE NOWAIT") + + """ + defmacro lock(query, binding \\ [], expr) do + Builder.Lock.build(query, binding, expr, __CALLER__) + end + + @doc ~S""" + An update query expression. + + Updates are used to update the filtered entries. In order for + updates to be applied, `c:Ecto.Repo.update_all/3` must be invoked. + + ## Keywords example + + from(u in User, update: [set: [name: "new name"]]) + + ## Expressions examples + + User |> update([u], set: [name: "new name"]) + User |> update(set: [name: "new name"]) + + ## Interpolation + + new_name = "new name" + from(u in User, update: [set: [name: ^new_name]]) + + new_name = "new name" + from(u in User, update: [set: [name: fragment("upper(?)", ^new_name)]]) + + ## Operators + + The update expression in Ecto supports the following operators: + + * `set` - sets the given field in the table to the given value + + from(u in User, update: [set: [name: "new name"]]) + + * `inc` - increments (or decrements if the value is negative) the given field in the table by the given value + + from(u in User, update: [inc: [accesses: 1]]) + + * `push` - pushes (appends) the given value to the end of the array field + + from(u in User, update: [push: [tags: "cool"]]) + + * `pull` - pulls (removes) the given value from the array field + + from(u in User, update: [pull: [tags: "not cool"]]) + + """ + defmacro update(query, binding \\ [], expr) do + Builder.Update.build(query, binding, expr, __CALLER__) + end + + @doc """ + A group by query expression. + + Groups together rows from the schema that have the same values in the given + fields. Using `group_by` "groups" the query giving it different semantics + in the `select` expression. If a query is grouped, only fields that were + referenced in the `group_by` can be used in the `select` or if the field + is given as an argument to an aggregate function. + + `group_by` also accepts a list of atoms where each atom refers to + a field in source. For more complicated queries you can access fields + directly instead of atoms. + + ## Keywords examples + + # Returns the number of posts in each category + from(p in Post, + group_by: p.category, + select: {p.category, count(p.id)}) + + # Using atoms + from(p in Post, group_by: :category, select: {p.category, count(p.id)}) + + # Using direct fields access + from(p in Post, + join: c in assoc(p, :category), + group_by: [p.id, c.name]) + + ## Expressions example + + Post |> group_by([p], p.category) |> select([p], count(p.id)) + + """ + defmacro group_by(query, binding \\ [], expr) do + Builder.GroupBy.build(query, binding, expr, __CALLER__) + end + + @doc """ + An AND having query expression. + + Like `where`, `having` filters rows from the schema, but after the grouping is + performed giving it the same semantics as `select` for a grouped query + (see `group_by/3`). `having` groups the query even if the query has no + `group_by` expression. + + ## Keywords example + + # Returns the number of posts in each category where the + # average number of comments is above ten + from(p in Post, + group_by: p.category, + having: avg(p.num_comments) > 10, + select: {p.category, count(p.id)}) + + ## Expressions example + + Post + |> group_by([p], p.category) + |> having([p], avg(p.num_comments) > 10) + |> select([p], count(p.id)) + """ + defmacro having(query, binding \\ [], expr) do + Builder.Filter.build(:having, :and, query, binding, expr, __CALLER__) + end + + @doc """ + An OR having query expression. + + Like `having` but combines with the previous expression by using + `OR`. `or_having` behaves for `having` the same way `or_where` + behaves for `where`. + + ## Keywords example + + # Augment a previous group_by with a having condition. + from(p in query, or_having: avg(p.num_comments) > 10) + + ## Expressions example + + # Augment a previous group_by with a having condition. + Post |> or_having([p], avg(p.num_comments) > 10) + + """ + defmacro or_having(query, binding \\ [], expr) do + Builder.Filter.build(:having, :or, query, binding, expr, __CALLER__) + end + + @doc """ + Preloads the associations into the result set. + + Imagine you have a schema `Post` with a `has_many :comments` + association and you execute the following query: + + Repo.all from p in Post, preload: [:comments] + + The example above will fetch all posts from the database and then do + a separate query returning all comments associated with the given posts. + The comments are then processed and associated to each returned `post` + under the `comments` field. + + Often times, you may want posts and comments to be selected and + filtered in the same query. For such cases, you can explicitly tell + an existing join to be preloaded into the result set: + + Repo.all from p in Post, + join: c in assoc(p, :comments), + where: c.published_at > p.updated_at, + preload: [comments: c] + + In the example above, instead of issuing a separate query to fetch + comments, Ecto will fetch posts and comments in a single query and + then do a separate pass associating each comment to its parent post. + Therefore, instead of returning `number_of_posts * number_of_comments` + results, like a `join` would, it returns only posts with the `comments` + fields properly filled in. + + Nested associations can also be preloaded in both formats: + + Repo.all from p in Post, + preload: [comments: :likes] + + Repo.all from p in Post, + join: c in assoc(p, :comments), + join: l in assoc(c, :likes), + where: l.inserted_at > c.updated_at, + preload: [comments: {c, likes: l}] + + Applying a limit to the association can be achieved with `inner_lateral_join`: + + Repo.all from p in Post, as: :post, + join: c in assoc(p, :comments), + inner_lateral_join: top_five in subquery( + from Comment, + where: [post_id: parent_as(:post).id], + order_by: :popularity, + limit: 5, + select: [:id] + ), on: top_five.id == c.id, + preload: [comments: c] + + ## Preload queries + + Preload also allows queries to be given, allowing you to filter or + customize how the preloads are fetched: + + comments_query = from c in Comment, order_by: c.published_at + Repo.all from p in Post, preload: [comments: ^comments_query] + + The example above will issue two queries, one for loading posts and + then another for loading the comments associated with the posts. + Comments will be ordered by `published_at`. + + When specifying a preload query, you can still preload the associations of + those records. For instance, you could preload an author's published posts and + the comments on those posts: + + posts_query = from p in Post, where: p.state == :published + Repo.all from a in Author, preload: [posts: ^{posts_query, [:comments]}] + + Note: keep in mind operations like limit and offset in the preload + query will affect the whole result set and not each association. For + example, the query below: + + comments_query = from c in Comment, order_by: c.popularity, limit: 5 + Repo.all from p in Post, preload: [comments: ^comments_query] + + won't bring the top of comments per post. Rather, it will only bring + the 5 top comments across all posts. Instead, use a window: + + ranking_query = + from c in Comment, + select: %{id: c.id, row_number: over(row_number(), :posts_partition)}, + windows: [posts_partition: [partition_by: :post_id, order_by: :popularity]] + + comments_query = + from c in Comment, + join: r in subquery(ranking_query), + on: c.id == r.id and r.row_number <= 5 + + Repo.all from p in Post, preload: [comments: ^comments_query] + + ## Preload functions + + Preload also allows functions to be given. In such cases, the function + receives the IDs of the parent association and it must return the associated + data. Ecto then will map this data and sort it by the relationship key: + + comment_preloader = fn post_ids -> fetch_comments_by_post_ids(post_ids) end + Repo.all from p in Post, preload: [comments: ^comment_preloader] + + This is useful when the whole dataset was already loaded or must be + explicitly fetched from elsewhere. The IDs received by the preloading + function and the result returned depends on the association type: + + * For `has_many` and `belongs_to` - the function receives the IDs of + the parent association and it must return a list of maps or structs + with the associated entries. The associated map/struct must contain + the "foreign_key" field. For example, if a post has many comments, + when preloading the comments with a custom function, the function + will receive a list of "post_ids" as the argument and it must return + maps or structs representing the comments. The maps/structs must + include the `:post_id` field + + * For `has_many :through` - it behaves similarly to a regular `has_many` + but note that the IDs received are of the last association. Imagine, + for example, a post has many comments and each comment has an author. + Therefore, a post may have many comments_authors, written as + `has_many :comments_authors, through: [:comments, :author]`. When + preloading authors with a custom function via `:comments_authors`, + the function will receive the IDs of the authors as the last step + + * For `many_to_many` - the function receives the IDs of the parent + association and it must return a tuple with the parent id as the first + element and the association map or struct as the second. For example, + if a post has many tags, when preloading the tags with a custom + function, the function will receive a list of "post_ids" as the argument + and it must return a tuple in the format of `{post_id, tag}` + + ## Keywords example + + # Returns all posts, their associated comments, and the associated + # likes for those comments. + from(p in Post, + preload: [comments: :likes], + select: p + ) + + ## Expressions examples + + Post |> preload(:comments) |> select([p], p) + + Post + |> join(:left, [p], c in assoc(p, :comments)) + |> preload([p, c], [:user, comments: c]) + |> select([p], p) + + """ + defmacro preload(query, bindings \\ [], expr) do + Builder.Preload.build(query, bindings, expr, __CALLER__) + end + + @doc """ + Restricts the query to return the first result ordered by primary key. + + The query will be automatically ordered by the primary key + unless `order_by` is given or `order_by` is set in the query. + Limit is always set to 1. + + ## Examples + + Post |> first |> Repo.one + query |> first(:inserted_at) |> Repo.one + """ + def first(queryable, order_by \\ nil) + + def first(%Ecto.Query{} = query, nil) do + query = %{query | limit: limit()} + case query do + %{order_bys: []} -> + %{query | order_bys: [order_by_pk(query, :asc)]} + %{} -> + query + end + end + def first(queryable, nil), do: first(Ecto.Queryable.to_query(queryable), nil) + def first(queryable, key), do: first(order_by(queryable, ^key), nil) + + @doc """ + Restricts the query to return the last result ordered by primary key. + + The query ordering will be automatically reversed, with ASC + columns becoming DESC columns (and vice-versa) and limit is set + to 1. If there is no ordering, the query will be automatically + ordered decreasingly by primary key. + + ## Examples + + Post |> last |> Repo.one + query |> last(:inserted_at) |> Repo.one + """ + def last(queryable, order_by \\ nil) + def last(queryable, nil), do: %{reverse_order(queryable) | limit: limit()} + def last(queryable, key), do: last(order_by(queryable, ^key), nil) + + defp limit do + %QueryExpr{expr: 1, params: [], file: __ENV__.file, line: __ENV__.line} + end + + defp field(ix, field) when is_integer(ix) and is_atom(field) do + {{:., [], [{:&, [], [ix]}, field]}, [], []} + end + + defp order_by_pk(query, dir) do + schema = assert_schema!(query) + pks = schema.__schema__(:primary_key) + expr = for pk <- pks, do: {dir, field(0, pk)} + %QueryExpr{expr: expr, file: __ENV__.file, line: __ENV__.line} + end + + defp assert_schema!(%{from: %Ecto.Query.FromExpr{source: {_source, schema}}}) when schema != nil, do: schema + defp assert_schema!(query) do + raise Ecto.QueryError, query: query, message: "expected a from expression with a schema" + end + + @doc """ + Returns `true` if the query has a binding with the given name, otherwise `false`. + + For more information on named bindings see "Named bindings" in this module doc. + """ + def has_named_binding?(%Ecto.Query{aliases: aliases}, key) do + Map.has_key?(aliases, key) + end + + def has_named_binding?(queryable, _key) + when is_atom(queryable) or is_binary(queryable) or is_tuple(queryable) do + false + end + + def has_named_binding?(queryable, key) do + has_named_binding?(Ecto.Queryable.to_query(queryable), key) + end + + @doc """ + Reverses the ordering of the query. + + ASC columns become DESC columns (and vice-versa). If the query + has no `order_by`s, it orders by the inverse of the primary key. + + ## Examples + + query |> reverse_order() |> Repo.one() + Post |> order(asc: :id) |> reverse_order() == Post |> order(desc: :id) + """ + def reverse_order(%Ecto.Query{} = query) do + update_in(query.order_bys, fn + [] -> [order_by_pk(query, :desc)] + order_bys -> Enum.map(order_bys, &reverse_order_by/1) + end) + end + + def reverse_order(queryable) do + reverse_order(Ecto.Queryable.to_query(queryable)) + end + + defp reverse_order_by(%{expr: expr} = order_by) do + %{ + order_by + | expr: + Enum.map(expr, fn + {:desc, ast} -> {:asc, ast} + {:desc_nulls_last, ast} -> {:asc_nulls_first, ast} + {:desc_nulls_first, ast} -> {:asc_nulls_last, ast} + {:asc, ast} -> {:desc, ast} + {:asc_nulls_last, ast} -> {:desc_nulls_first, ast} + {:asc_nulls_first, ast} -> {:desc_nulls_last, ast} + end) + } + end +end diff --git a/deps/ecto/lib/ecto/query/api.ex b/deps/ecto/lib/ecto/query/api.ex new file mode 100644 index 0000000..7093569 --- /dev/null +++ b/deps/ecto/lib/ecto/query/api.ex @@ -0,0 +1,689 @@ +defmodule Ecto.Query.API do + @moduledoc """ + Lists all functions allowed in the query API. + + * Comparison operators: `==`, `!=`, `<=`, `>=`, `<`, `>` + * Arithmetic operators: `+`, `-`, `*`, `/` + * Boolean operators: `and`, `or`, `not` + * Inclusion operator: `in/2` + * Subquery operators: `any`, `all` and `exists` + * Search functions: `like/2` and `ilike/2` + * Null check functions: `is_nil/1` + * Aggregates: `count/0`, `count/1`, `avg/1`, `sum/1`, `min/1`, `max/1` + * Date/time intervals: `datetime_add/3`, `date_add/3`, `from_now/2`, `ago/2` + * Inside select: `struct/2`, `map/2`, `merge/2` and literals (map, tuples, lists, etc) + * General: `fragment/1`, `field/2`, `type/2`, `as/1`, `parent_as/1` + + Note the functions in this module exist for documentation + purposes and one should never need to invoke them directly. + Furthermore, it is possible to define your own macros and + use them in Ecto queries (see docs for `fragment/1`). + + ## Intervals + + Ecto supports following values for `interval` option: `"year"`, `"month"`, + `"week"`, `"day"`, `"hour"`, `"minute"`, `"second"`, `"millisecond"`, and + `"microsecond"`. + + `Date`/`Time` functions like `datetime_add/3`, `date_add/3`, `from_now/2`, + `ago/2` take `interval` as an argument. + + ## Window API + + Ecto also supports many of the windows functions found + in SQL databases. See `Ecto.Query.WindowAPI` for more + information. + + ## About the arithmetic operators + + The Ecto implementation of these operators provide only + a thin layer above the adapters. So if your adapter allows you + to use them in a certain way (like adding a date and an + interval in PostgreSQL), it should work just fine in Ecto + queries. + """ + + @dialyzer :no_return + + @doc """ + Binary `==` operation. + """ + def left == right, do: doc! [left, right] + + @doc """ + Binary `!=` operation. + """ + def left != right, do: doc! [left, right] + + @doc """ + Binary `<=` operation. + """ + def left <= right, do: doc! [left, right] + + @doc """ + Binary `>=` operation. + """ + def left >= right, do: doc! [left, right] + + @doc """ + Binary `<` operation. + """ + def left < right, do: doc! [left, right] + + @doc """ + Binary `>` operation. + """ + def left > right, do: doc! [left, right] + + @doc """ + Binary `+` operation. + """ + def left + right, do: doc! [left, right] + + @doc """ + Binary `-` operation. + """ + def left - right, do: doc! [left, right] + + @doc """ + Binary `*` operation. + """ + def left * right, do: doc! [left, right] + + @doc """ + Binary `/` operation. + """ + def left / right, do: doc! [left, right] + + @doc """ + Binary `and` operation. + """ + def left and right, do: doc! [left, right] + + @doc """ + Binary `or` operation. + """ + def left or right, do: doc! [left, right] + + @doc """ + Unary `not` operation. + + It is used to negate values in `:where`. It is also used to match + the assert the opposite of `in/2`, `is_nil/1`, and `exists/1`. + For example: + + from p in Post, where: p.id not in [1, 2, 3] + + from p in Post, where: not is_nil(p.title) + + # Retrieve all the posts that doesn't have comments. + from p in Post, + as: :post, + where: + not exists( + from( + c in Comment, + where: parent_as(:post).id == c.post_id + ) + ) + + """ + def not(value), do: doc! [value] + + @doc """ + Checks if the left-value is included in the right one. + + from p in Post, where: p.id in [1, 2, 3] + + The right side may either be a list, a literal list + or even a column in the database with array type: + + from p in Post, where: "elixir" in p.tags + + Additionally, the right side may also be a subquery: + + from c in Comment, where: c.post_id in subquery( + from(p in Post, where: p.created_at > ^since) + ) + """ + def left in right, do: doc! [left, right] + + @doc """ + Evaluates to true if the provided subquery returns 1 or more rows. + + from p in Post, + as: :post, + where: + exists( + from( + c in Comment, + where: parent_as(:post).id == c.post_id and c.replies_count > 5, + select: 1 + ) + ) + + This is best used in conjunction with `parent_as` to correlate the subquery + with the parent query to test some condition on related rows in a different table. + In the above example the query returns posts which have at least one comment that + has more than 5 replies. + """ + def exists(subquery), do: doc! [subquery] + + @doc """ + Tests whether one or more values returned from the provided subquery match in a comparison operation. + + from p in Product, where: p.id == any( + from(li in LineItem, select: [li.product_id], where: li.created_at > ^since and li.qty >= 10) + ) + + A product matches in the above example if a line item was created since the provided date where the customer purchased + at least 10 units. + + Both `any` and `all` must be given a subquery as an argument, and they must be used on the right hand side of a comparison. + Both can be used with every comparison operator: `==`, `!=`, `>`, `>=`, `<`, `<=`. + """ + def any(subquery), do: doc! [subquery] + + @doc """ + Evaluates whether all values returned from the provided subquery match in a comparison operation. + + from p in Post, where: p.visits >= all( + from(p in Post, select: avg(p.visits), group_by: [p.category_id]) + ) + + For a post to match in the above example it must be visited at least as much as the average post in all categories. + + from p in Post, where: p.visits == all( + from(p in Post, select: max(p.visits)) + ) + + The above example matches all the posts which are tied for being the most visited. + + Both `any` and `all` must be given a subquery as an argument, and they must be used on the right hand side of a comparison. + Both can be used with every comparison operator: `==`, `!=`, `>`, `>=`, `<`, `<=`. + """ + def all(subquery), do: doc! [subquery] + + @doc """ + Searches for `search` in `string`. + + from p in Post, where: like(p.body, "Chapter%") + + Translates to the underlying SQL LIKE query, therefore + its behaviour is dependent on the database. In particular, + PostgreSQL will do a case-sensitive operation, while the + majority of other databases will be case-insensitive. For + performing a case-insensitive `like` in PostgreSQL, see `ilike/2`. + + You should be very careful when allowing user sent data to be used + as part of LIKE query, since they allow to perform + [LIKE-injections](https://githubengineering.com/like-injection/). + """ + def like(string, search), do: doc! [string, search] + + @doc """ + Searches for `search` in `string` in a case insensitive fashion. + + from p in Post, where: ilike(p.body, "Chapter%") + + Translates to the underlying SQL ILIKE query. This operation is + only available on PostgreSQL. + """ + def ilike(string, search), do: doc! [string, search] + + @doc """ + Checks if the given value is nil. + + from p in Post, where: is_nil(p.published_at) + + To check if a given value is not nil use: + + from p in Post, where: not is_nil(p.published_at) + """ + def is_nil(value), do: doc! [value] + + @doc """ + Counts the entries in the table. + + from p in Post, select: count() + """ + def count, do: doc! [] + + @doc """ + Counts the given entry. + + from p in Post, select: count(p.id) + """ + def count(value), do: doc! [value] + + @doc """ + Counts the distinct values in given entry. + + from p in Post, select: count(p.id, :distinct) + """ + def count(value, :distinct), do: doc! [value, :distinct] + + @doc """ + Takes whichever value is not null, or null if they both are. + + In SQL, COALESCE takes any number of arguments, but in ecto + it only takes two, so it must be chained to achieve the same + effect. + + from p in Payment, select: p.value |> coalesce(p.backup_value) |> coalesce(0) + """ + def coalesce(value, expr), do: doc! [value, expr] + + @doc """ + Applies the given expression as a FILTER clause against an + aggregate. This is currently only supported by Postgres. + + from p in Payment, select: filter(avg(p.value), p.value > 0 and p.value < 100) + + from p in Payment, select: avg(p.value) |> filter(p.value < 0) + """ + def filter(value, filter), do: doc! [value, filter] + + @doc """ + Calculates the average for the given entry. + + from p in Payment, select: avg(p.value) + """ + def avg(value), do: doc! [value] + + @doc """ + Calculates the sum for the given entry. + + from p in Payment, select: sum(p.value) + """ + def sum(value), do: doc! [value] + + @doc """ + Calculates the minimum for the given entry. + + from p in Payment, select: min(p.value) + """ + def min(value), do: doc! [value] + + @doc """ + Calculates the maximum for the given entry. + + from p in Payment, select: max(p.value) + """ + def max(value), do: doc! [value] + + @doc """ + Adds a given interval to a datetime. + + The first argument is a `datetime`, the second one is the count + for the interval, which may be either positive or negative and + the interval value: + + # Get all items published since the last month + from p in Post, where: p.published_at > + datetime_add(^NaiveDateTime.utc_now(), -1, "month") + + In the example above, we used `datetime_add/3` to subtract one month + from the current datetime and compared it with the `p.published_at`. + If you want to perform operations on date, `date_add/3` could be used. + + See [Intervals](#module-intervals) for supported `interval` values. + """ + def datetime_add(datetime, count, interval), do: doc! [datetime, count, interval] + + @doc """ + Adds a given interval to a date. + + See `datetime_add/3` for more information. + + See [Intervals](#module-intervals) for supported `interval` values. + """ + def date_add(date, count, interval), do: doc! [date, count, interval] + + @doc """ + Adds the given interval to the current time in UTC. + + The current time in UTC is retrieved from Elixir and + not from the database. + + See [Intervals](#module-intervals) for supported `interval` values. + + ## Examples + + from a in Account, where: a.expires_at < from_now(3, "month") + + """ + def from_now(count, interval), do: doc! [count, interval] + + @doc """ + Subtracts the given interval from the current time in UTC. + + The current time in UTC is retrieved from Elixir and + not from the database. + + See [Intervals](#module-intervals) for supported `interval` values. + + ## Examples + + from p in Post, where: p.published_at > ago(3, "month") + """ + def ago(count, interval), do: doc! [count, interval] + + @doc """ + Send fragments directly to the database. + + It is not possible to represent all possible database queries using + Ecto's query syntax. When such is required, it is possible to use + fragments to send any expression to the database: + + def unpublished_by_title(title) do + from p in Post, + where: is_nil(p.published_at) and + fragment("lower(?)", p.title) == ^title + end + + Every occurrence of the `?` character will be interpreted as a place + for parameters, which must be given as additional arguments to + `fragment`. If the literal character `?` is required as part of the + fragment, it can be escaped with `\\\\?` (one escape for strings, + another for fragment). + + In the example above, we are using the lower procedure in the + database to downcase the title column. + + It is very important to keep in mind that Ecto is unable to do any + type casting when fragments are used. Therefore it may be necessary + to explicitly cast parameters via `type/2`: + + fragment("lower(?)", p.title) == type(^title, :string) + + ## Literals + + Sometimes you need to interpolate a literal value into a fragment, + instead of a parameter. For example, you may need to pass a table + name or a collation, such as: + + collation = "es_ES" + fragment("? COLLATE ?", ^name, ^collation) + + The example above won't work because `collation` will be passed + as a parameter, while it has to be a literal part of the query. + + You can address this by telling Ecto that variable is a literal: + + fragment("? COLLATE ?", ^name, literal(^collation)) + + Ecto will then escape it and make it part of the query. + + > #### Literals and query caching {: .warning} + > + > Because literals are made part of the query, each interpolated + > literal will generate a separate query, with its own cache. + + ## Defining custom functions using macros and fragment + + You can add a custom Ecto query function using macros. For example + to expose SQL's coalesce function you can define this macro: + + defmodule CustomFunctions do + defmacro coalesce(left, right) do + quote do + fragment("coalesce(?, ?)", unquote(left), unquote(right)) + end + end + end + + To have coalesce/2 available, just import the module that defines it. + + import CustomFunctions + + The only downside is that it will show up as a fragment when + inspecting the Elixir query. Other than that, it should be + equivalent to a built-in Ecto query function. + + ## Keyword fragments + + In order to support databases that do not have string-based + queries, like MongoDB, fragments also allow keywords to be given: + + from p in Post, + where: fragment(title: ["$eq": ^some_value]) + + """ + def fragment(fragments), do: doc! [fragments] + + @doc """ + Allows a field to be dynamically accessed. + + def at_least_four(doors_or_tires) do + from c in Car, + where: field(c, ^doors_or_tires) >= 4 + end + + In the example above, both `at_least_four(:doors)` and `at_least_four(:tires)` + would be valid calls as the field is dynamically generated. + """ + def field(source, field), do: doc! [source, field] + + @doc """ + Used in `select` to specify which struct fields should be returned. + + For example, if you don't need all fields to be returned + as part of a struct, you can filter it to include only certain + fields by using `struct/2`: + + from p in Post, + select: struct(p, [:title, :body]) + + `struct/2` can also be used to dynamically select fields: + + fields = [:title, :body] + from p in Post, select: struct(p, ^fields) + + As a convenience, `select` allows developers to take fields + without an explicit call to `struct/2`: + + from p in Post, select: [:title, :body] + + Or even dynamically: + + fields = [:title, :body] + from p in Post, select: ^fields + + For preloads, the selected fields may be specified from the parent: + + from(city in City, preload: :country, + select: struct(city, [:country_id, :name, country: [:id, :population]])) + + If the same source is selected multiple times with a `struct`, + the fields are merged in order to avoid fetching multiple copies + from the database. In other words, the expression below: + + from(city in City, preload: :country, + select: {struct(city, [:country_id]), struct(city, [:name])}) + + is expanded to: + + from(city in City, preload: :country, + select: {struct(city, [:country_id, :name]), struct(city, [:country_id, :name])}) + + **IMPORTANT**: When filtering fields for associations, you + MUST include the foreign keys used in the relationship, + otherwise Ecto will be unable to find associated records. + """ + def struct(source, fields), do: doc! [source, fields] + + @doc """ + Used in `select` to specify which fields should be returned as a map. + + For example, if you don't need all fields to be returned or + neither need a struct, you can use `map/2` to achieve both: + + from p in Post, + select: map(p, [:title, :body]) + + `map/2` can also be used to dynamically select fields: + + fields = [:title, :body] + from p in Post, select: map(p, ^fields) + + If the same source is selected multiple times with a `map`, + the fields are merged in order to avoid fetching multiple copies + from the database. In other words, the expression below: + + from(city in City, preload: :country, + select: {map(city, [:country_id]), map(city, [:name])}) + + is expanded to: + + from(city in City, preload: :country, + select: {map(city, [:country_id, :name]), map(city, [:country_id, :name])}) + + For preloads, the selected fields may be specified from the parent: + + from(city in City, preload: :country, + select: map(city, [:country_id, :name, country: [:id, :population]])) + + It's also possible to select a struct from one source but only a subset of + fields from one of its associations: + + from(city in City, preload: :country, + select: %{city | country: map(country: [:id, :population])}) + + **IMPORTANT**: When filtering fields for associations, you + MUST include the foreign keys used in the relationship, + otherwise Ecto will be unable to find associated records. + """ + def map(source, fields), do: doc! [source, fields] + + @doc """ + Merges the map on the right over the map on the left. + + If the map on the left side is a struct, Ecto will check + all of the field on the right previously exist on the left + before merging. + + from(city in City, select: merge(city, %{virtual_field: "some_value"})) + + This function is primarily used by `Ecto.Query.select_merge/3` + to merge different select clauses. + """ + def merge(left_map, right_map), do: doc! [left_map, right_map] + + @doc """ + Returns value from the `json_field` pointed to by `path`. + + from(post in Post, select: json_extract_path(post.meta, ["author", "name"])) + + The query can be also rewritten as: + + from(post in Post, select: post.meta["author"]["name"]) + + Path elements can be integers to access values in JSON arrays: + + from(post in Post, select: post.meta["tags"][0]["name"]) + + Any element of the path can be dynamic: + + field = "name" + from(post in Post, select: post.meta["author"][^field]) + + ## Warning: indexes on PostgreSQL + + PostgreSQL supports indexing on jsonb columns via GIN indexes. + Whenever comparing the value of a jsonb field against a string + or integer, Ecto will use the containement operator @> which + is optimized. You can even use the more efficient `jsonb_path_ops` + GIN index variant. For more information, consult PostgreSQL's docs + on [JSON indexing](https://www.postgresql.org/docs/current/datatype-json.html#JSON-INDEXING). + + ## Warning: return types + + The underlying data in the JSON column is returned without any + additional decoding. This means "null" JSON values are not the + same as SQL's "null". For example, the `Repo.all` operation below + returns an empty list because `p.meta["author"]` returns JSON's + null and therefore `is_nil` does not succeed: + + Repo.insert!(%Post{meta: %{author: nil}}) + Repo.all(from(post in Post, where: is_nil(p.meta["author"]))) + + Similarly, other types, such as datetimes, are returned as strings. + This means conditions like `post.meta["published_at"] > from_now(-1, "day")` + may return incorrect results or fail as the underlying database + tries to compare incompatible types. You can, however, use `type/2` + to force the types on the database level. + """ + def json_extract_path(json_field, path), do: doc! [json_field, path] + + @doc """ + Casts the given value to the given type at the database level. + + Most of the times, Ecto is able to proper cast interpolated + values due to its type checking mechanism. In some situations + though, you may want to tell Ecto that a parameter has some + particular type: + + type(^title, :string) + + It is also possible to say the type must match the same of a column: + + type(^title, p.title) + + Or a parameterized type, which must be previously initialized + with `Ecto.ParameterizedType.init/2`: + + @my_enum Ecto.ParameterizedType.init(Ecto.Enum, values: [:foo, :bar, :baz]) + type(^title, ^@my_enum) + + Ecto will ensure `^title` is cast to the given type and enforce such + type at the database level. If the value is returned in a `select`, + Ecto will also enforce the proper type throughout. + + When performing arithmetic operations, `type/2` can be used to cast + all the parameters in the operation to the same type: + + from p in Post, + select: type(p.visits + ^a_float + ^a_integer, :decimal) + + Inside `select`, `type/2` can also be used to cast fragments: + + type(fragment("NOW"), :naive_datetime) + + Or to type fields from schemaless queries: + + from p in "posts", select: type(p.cost, :decimal) + + Or to type aggregation results: + + from p in Post, select: type(avg(p.cost), :integer) + from p in Post, select: type(filter(avg(p.cost), p.cost > 0), :integer) + + Or to type comparison expression results: + + from p in Post, select: type(coalesce(p.cost, 0), :integer) + + """ + def type(interpolated_value, type), do: doc! [interpolated_value, type] + + @doc """ + Refer to a named atom binding. + + See the "Named binding" section in `Ecto.Query` for more information. + """ + def as(binding), do: doc! [binding] + + @doc """ + Refer to a named atom binding in the parent query. + + This is available only inside subqueries. + + See the "Named binding" section in `Ecto.Query` for more information. + """ + def parent_as(binding), do: doc! [binding] + + defp doc!(_) do + raise "the functions in Ecto.Query.API should not be invoked directly, " <> + "they serve for documentation purposes only" + end +end diff --git a/deps/ecto/lib/ecto/query/builder.ex b/deps/ecto/lib/ecto/query/builder.ex new file mode 100644 index 0000000..32c0ffb --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder.ex @@ -0,0 +1,1250 @@ +defmodule Ecto.Query.Builder do + @moduledoc false + + alias Ecto.Query + + @comparisons [ + is_nil: 1, + ==: 2, + !=: 2, + <: 2, + >: 2, + <=: 2, + >=: 2 + ] + + @dynamic_aggregates [ + max: 1, + min: 1, + first_value: 1, + last_value: 1, + nth_value: 2, + lag: 3, + lead: 3, + lag: 2, + lead: 2, + lag: 1, + lead: 1 + ] + + @static_aggregates [ + count: {0, :integer}, + count: {1, :integer}, + count: {2, :integer}, + avg: {1, :any}, + sum: {1, :any}, + row_number: {0, :integer}, + rank: {0, :integer}, + dense_rank: {0, :integer}, + percent_rank: {0, :any}, + cume_dist: {0, :any}, + ntile: {1, :integer} + ] + + @typedoc """ + Quoted types store primitive types and types in the format + {source, quoted}. The latter are handled directly in the planner, + never forwarded to Ecto.Type. + + The Ecto.Type module concerns itself only with runtime types, + which include all primitive types and custom user types. Also + note custom user types do not show up during compilation time. + """ + @type quoted_type :: Ecto.Type.primitive | {non_neg_integer, atom | Macro.t} + + @doc """ + Smart escapes a query expression and extracts interpolated values in + a map. + + Everything that is a query expression will be escaped, interpolated + expressions (`^foo`) will be moved to a map unescaped and replaced + with `^index` in the query where index is a number indexing into the + map. + """ + @spec escape(Macro.t, quoted_type | {:in, quoted_type} | {:out, quoted_type}, {list, term}, + Keyword.t, Macro.Env.t | {Macro.Env.t, fun}) :: {Macro.t, {list, term}} + def escape(expr, type, params_acc, vars, env) + + # var.x - where var is bound + def escape({{:., _, [callee, field]}, _, []}, _type, params_acc, vars, _env) when is_atom(field) do + {escape_field!(callee, field, vars), params_acc} + end + + # field macro + def escape({:field, _, [callee, field]}, _type, params_acc, vars, _env) do + {escape_field!(callee, field, vars), params_acc} + end + + # param interpolation + def escape({:^, _, [arg]}, type, {params, acc}, _vars, _env) do + expr = {:{}, [], [:^, [], [length(params)]]} + params = [{arg, type} | params] + {expr, {params, acc}} + end + + # tagged types + def escape({:type, _, [{:^, _, [arg]}, type]}, _type, {params, acc}, vars, env) do + type = validate_type!(type, vars, env) + expr = {:{}, [], [:type, [], [{:{}, [], [:^, [], [length(params)]]}, type]]} + params = [{arg, type} | params] + {expr, {params, acc}} + end + + def escape({:type, _, [{{:., _, [{var, _, context}, field]}, _, []} = expr, type]}, _type, params_acc, vars, env) + when is_atom(var) and is_atom(context) and is_atom(field) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{:coalesce, _, [_ | _]} = expr, type]}, _type, params_acc, vars, env) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{:field, _, [_ | _]} = expr, type]}, _type, params_acc, vars, env) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{math_op, _, [_, _]} = op_expr, type]}, _type, params_acc, vars, env) + when math_op in ~w(+ - * /)a do + escape_with_type(op_expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{fun, _, args} = expr, type]}, _type, params_acc, vars, env) + when is_list(args) and fun in ~w(fragment avg count max min sum over filter)a do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{:json_extract_path, _, [_ | _]} = expr, type]}, _type, params_acc, vars, env) do + escape_with_type(expr, type, params_acc, vars, env) + end + + def escape({:type, _, [{{:., _, [Access, :get]}, _, _} = access_expr, type]}, _type, params_acc, vars, env) do + escape_with_type(access_expr, type, params_acc, vars, env) + end + + def escape({:type, meta, [expr, type]}, given_type, params_acc, vars, env) do + case Macro.expand_once(expr, get_env(env)) do + ^expr -> + error! """ + the first argument of type/2 must be one of: + + * interpolations, such as ^value + * fields, such as p.foo or field(p, :foo) + * fragments, such as fragment("foo(?)", value) + * an arithmetic expression (+, -, *, /) + * an aggregation or window expression (avg, count, min, max, sum, over, filter) + * a conditional expression (coalesce) + * access/json paths (p.column[0].field) + + Got: #{Macro.to_string(expr)} + """ + + expanded -> + escape({:type, meta, [expanded, type]}, given_type, params_acc, vars, env) + end + end + + # fragments + def escape({:fragment, _, [query]}, _type, params_acc, vars, env) when is_list(query) do + {escaped, params_acc} = + Enum.map_reduce(query, params_acc, &escape_kw_fragment(&1, &2, vars, env)) + {{:{}, [], [:fragment, [], [escaped]]}, params_acc} + end + + def escape({:fragment, _, [{:^, _, [var]} = _expr]}, _type, params_acc, _vars, _env) do + expr = quote do: Ecto.Query.Builder.fragment!(unquote(var)) + {{:{}, [], [:fragment, [], [expr]]}, params_acc} + end + + def escape({:fragment, _, [query | frags]}, _type, params_acc, vars, env) do + pieces = expand_and_split_fragment(query, env) + + if length(pieces) != length(frags) + 1 do + error! "fragment(...) expects extra arguments in the same amount of question marks in string. " <> + "It received #{length(frags)} extra argument(s) but expected #{length(pieces) - 1}" + end + + {frags, params_acc} = Enum.map_reduce(frags, params_acc, &escape_fragment(&1, &2, vars, env)) + {{:{}, [], [:fragment, [], merge_fragments(pieces, frags)]}, params_acc} + end + + # subqueries + def escape({:subquery, _, [expr]}, _, {params, subqueries}, _vars, _env) do + subquery = quote(do: Ecto.Query.subquery(unquote(expr))) + index = length(subqueries) + expr = {:subquery, index} # used both in ast and in parameters, as a placeholder. + {expr, {[expr | params], [subquery | subqueries]}} + end + + # interval + + def escape({:from_now, meta, [count, interval]}, type, params_acc, vars, env) do + utc = quote do: ^DateTime.utc_now() + escape({:datetime_add, meta, [utc, count, interval]}, type, params_acc, vars, env) + end + + def escape({:ago, meta, [count, interval]}, type, params_acc, vars, env) do + utc = quote do: ^DateTime.utc_now() + count = + case count do + {:^, meta, [value]} -> + negate = quote do: Ecto.Query.Builder.negate!(unquote(value)) + {:^, meta, [negate]} + value -> + {:-, [], [value]} + end + escape({:datetime_add, meta, [utc, count, interval]}, type, params_acc, vars, env) + end + + def escape({:datetime_add, _, [datetime, count, interval]} = expr, type, params_acc, vars, env) do + assert_type!(expr, type, {:param, :any_datetime}) + {datetime, params_acc} = escape(datetime, {:param, :any_datetime}, params_acc, vars, env) + {count, interval, params_acc} = escape_interval(count, interval, params_acc, vars, env) + {{:{}, [], [:datetime_add, [], [datetime, count, interval]]}, params_acc} + end + + def escape({:date_add, _, [date, count, interval]} = expr, type, params_acc, vars, env) do + assert_type!(expr, type, :date) + {date, params_acc} = escape(date, :date, params_acc, vars, env) + {count, interval, params_acc} = escape_interval(count, interval, params_acc, vars, env) + {{:{}, [], [:date_add, [], [date, count, interval]]}, params_acc} + end + + # json + def escape({:json_extract_path, _, [field, path]} = expr, type, params_acc, vars, env) do + case field do + {{:., _, _}, _, _} -> + path = escape_json_path(path) + {field, params_acc} = escape(field, type, params_acc, vars, env) + {{:{}, [], [:json_extract_path, [], [field, path]]}, params_acc} + + _ -> + error!("`#{Macro.to_string(expr)}` is not a valid query expression") + end + end + + def escape({{:., meta, [Access, :get]}, _, [left, _]} = expr, type, params_acc, vars, env) do + case left do + {{:., _, _}, _, _} -> + {expr, path} = parse_access_get(expr, []) + escape({:json_extract_path, meta, [expr, path]}, type, params_acc, vars, env) + + _ -> + error!("`#{Macro.to_string(expr)}` is not a valid query expression") + end + end + + # sigils + def escape({name, _, [_, []]} = sigil, type, params_acc, vars, _env) + when name in ~w(sigil_s sigil_S sigil_w sigil_W)a do + {literal(sigil, type, vars), params_acc} + end + + # lists + def escape(list, type, params_acc, vars, env) when is_list(list) do + if Enum.all?(list, &is_binary(&1) or is_number(&1) or is_boolean(&1)) do + {literal(list, type, vars), params_acc} + else + fun = + case type do + {:array, inner_type} -> + &escape(&1, inner_type, &2, vars, env) + + _ -> + # In case we don't have an array nor a literal at compile-time, + # such as p.links == [^value], we don't do any casting nor validation. + # We may want to tackle this if the expression above is ever used. + &escape(&1, :any, &2, vars, env) + end + + Enum.map_reduce(list, params_acc, fun) + end + end + + # literals + def escape({:<<>>, _, args} = expr, type, params_acc, vars, _env) do + valid? = Enum.all?(args, fn + {:"::", _, [left, _]} -> is_integer(left) or is_binary(left) + left -> is_integer(left) or is_binary(left) + end) + + unless valid? do + error! "`#{Macro.to_string(expr)}` is not a valid query expression. " <> + "Only literal binaries and strings are allowed, " <> + "dynamic values need to be explicitly interpolated in queries with ^" + end + + {literal(expr, type, vars), params_acc} + end + + def escape({:-, _, [number]}, type, params_acc, vars, _env) when is_number(number), + do: {literal(-number, type, vars), params_acc} + def escape(number, type, params_acc, vars, _env) when is_number(number), + do: {literal(number, type, vars), params_acc} + def escape(binary, type, params_acc, vars, _env) when is_binary(binary), + do: {literal(binary, type, vars), params_acc} + def escape(nil, _type, params_acc, _vars, _env), + do: {nil, params_acc} + def escape(atom, type, params_acc, vars, _env) when is_atom(atom), + do: {literal(atom, type, vars), params_acc} + + # negate any expression + def escape({:-, meta, arg}, type, params_acc, vars, env) do + {escaped_arg, params_acc} = escape(arg, type, params_acc, vars, env) + expr = {:{}, [], [:-, meta, escaped_arg]} + {expr, params_acc} + end + + # comparison operators + def escape({comp_op, _, [left, right]} = expr, type, params_acc, vars, env) + when comp_op in ~w(== != < > <= >=)a do + assert_type!(expr, type, :boolean) + + if is_nil(left) or is_nil(right) do + error! "comparison with nil is forbidden as it is unsafe. " <> + "If you want to check if a value is nil, use is_nil/1 instead" + end + + ltype = quoted_type(right, vars) + rtype = quoted_type(left, vars) + + {left, params_acc} = escape(left, ltype, params_acc, vars, env) + {right, params_acc} = escape(right, rtype, params_acc, vars, env) + + {params, acc} = params_acc + {{:{}, [], [comp_op, [], [left, right]]}, + {params |> wrap_nil(left) |> wrap_nil(right), acc}} + end + + # mathematical operators + def escape({math_op, _, [left, right]}, type, params_acc, vars, env) + when math_op in ~w(+ - * /)a do + {left, params_acc} = escape(left, type, params_acc, vars, env) + {right, params_acc} = escape(right, type, params_acc, vars, env) + + {{:{}, [], [math_op, [], [left, right]]}, params_acc} + end + + # in operator + def escape({:in, _, [left, right]} = expr, type, params_acc, vars, env) + when is_list(right) + when is_tuple(right) and elem(right, 0) in ~w(sigil_w sigil_W)a do + assert_type!(expr, type, :boolean) + + {:array, ltype} = quoted_type(right, vars) + rtype = {:array, quoted_type(left, vars)} + + {left, params_acc} = escape(left, ltype, params_acc, vars, env) + {right, params_acc} = escape(right, rtype, params_acc, vars, env) + {{:{}, [], [:in, [], [left, right]]}, params_acc} + end + + def escape({:in, _, [left, right]} = expr, type, params_acc, vars, env) do + assert_type!(expr, type, :boolean) + + ltype = {:out, quoted_type(right, vars)} + rtype = {:in, quoted_type(left, vars)} + + {left, params_acc} = escape(left, ltype, params_acc, vars, env) + {right, params_acc} = escape(right, rtype, params_acc, vars, env) + + # Remove any type wrapper from the right side + right = + case right do + {:{}, [], [:type, [], [right, _]]} -> right + _ -> right + end + + {{:{}, [], [:in, [], [left, right]]}, params_acc} + end + + def escape({:count, _, [arg, :distinct]}, type, params_acc, vars, env) do + {arg, params_acc} = escape(arg, type, params_acc, vars, env) + expr = {:{}, [], [:count, [], [arg, :distinct]]} + {expr, params_acc} + end + + def escape({:filter, _, [aggregate]}, type, params_acc, vars, env) do + escape(aggregate, type, params_acc, vars, env) + end + + def escape({:filter, _, [aggregate, filter_expr]}, type, params_acc, vars, env) do + {aggregate, params_acc} = escape(aggregate, type, params_acc, vars, env) + {filter_expr, params_acc} = escape(filter_expr, :boolean, params_acc, vars, env) + {{:{}, [], [:filter, [], [aggregate, filter_expr]]}, params_acc} + end + + def escape({:coalesce, _, [left, right]}, type, params_acc, vars, env) do + {left, params_acc} = escape(left, type, params_acc, vars, env) + {right, params_acc} = escape(right, type, params_acc, vars, env) + {{:{}, [], [:coalesce, [], [left, right]]}, params_acc} + end + + def escape({:over, _, [{agg_name, _, agg_args} | over_args]}, type, params_acc, vars, env) do + aggregate = {agg_name, [], agg_args || []} + {aggregate, params_acc} = escape_window_function(aggregate, type, params_acc, vars, env) + {window, params_acc} = escape_window_description(over_args, params_acc, vars, env) + {{:{}, [], [:over, [], [aggregate, window]]}, params_acc} + end + + def escape({quantifier, meta, [subquery]}, type, params_acc, vars, env) when quantifier in [:all, :any, :exists] do + {subquery, params_acc} = escape({:subquery, meta, [subquery]}, type, params_acc, vars, env) + {{:{}, [], [quantifier, [], [subquery]]}, params_acc} + end + + def escape({:=, _, _} = expr, _type, _params_acc, _vars, _env) do + error! "`#{Macro.to_string(expr)}` is not a valid query expression. " <> + "The match operator is not supported: `=`. " <> + "Did you mean to use `==` instead?" + end + + def escape({op, _, _}, _type, _params_acc, _vars, _env) when op in ~w(|| && !)a do + error! "short-circuit operators are not supported: `#{op}`. " <> + "Instead use boolean operators: `and`, `or`, and `not`" + end + + # Tuple + def escape({left, right}, type, params_acc, vars, env) do + escape({:{}, [], [left, right]}, type, params_acc, vars, env) + end + + # Tuple + def escape({:{}, _, list}, {:tuple, types}, params_acc, vars, env) do + if Enum.count(list) == Enum.count(types) do + {list, params_acc} = + list + |> Enum.zip(types) + |> Enum.map_reduce(params_acc, fn {expr, type}, params_acc -> + escape(expr, type, params_acc, vars, env) + end) + expr = {:{}, [], [:{}, [], list]} + {expr, params_acc} + else + escape({:{}, [], list}, :any, params_acc, vars, env) + end + end + + # Tuple + def escape({:{}, _, _}, _, _, _, _) do + error! "Tuples can only be used in comparisons with literal tuples of the same size" + end + + # Unnecessary parentheses around an expression + def escape({:__block__, _, [expr]}, type, params_acc, vars, env) do + escape(expr, type, params_acc, vars, env) + end + + # Other functions - no type casting + def escape({name, _, args} = expr, type, params_acc, vars, env) when is_atom(name) and is_list(args) do + case call_type(name, length(args)) do + {in_type, out_type} -> + assert_type!(expr, type, out_type) + escape_call(expr, in_type, params_acc, vars, env) + nil -> + try_expansion(expr, type, params_acc, vars, env) + end + end + + # Finally handle vars + def escape({var, _, context}, _type, params_acc, vars, _env) when is_atom(var) and is_atom(context) do + {escape_var!(var, vars), params_acc} + end + + # Raise nice error messages for fun calls. + def escape({fun, _, args} = other, _type, _params_acc, _vars, _env) + when is_atom(fun) and is_list(args) do + error! """ + `#{Macro.to_string(other)}` is not a valid query expression. \ + If you are trying to invoke a function that is not supported by Ecto, \ + you can use fragments: + + fragment("some_function(?, ?, ?)", m.some_field, 1) + + See Ecto.Query.API to learn more about the supported functions and \ + Ecto.Query.API.fragment/1 to learn more about fragments. + """ + end + + # Raise nice error message for remote calls + def escape({{:., _, [_, fun]}, _, _} = other, type, params_acc, vars, env) + when is_atom(fun) do + try_expansion(other, type, params_acc, vars, env) + end + + # For everything else we raise + def escape(other, _type, _params_acc, _vars, _env) do + error! "`#{Macro.to_string(other)}` is not a valid query expression" + end + + defp escape_with_type(expr, {:^, _, [type]}, params_acc, vars, env) do + {expr, params_acc} = escape(expr, :any, params_acc, vars, env) + {{:{}, [], [:type, [], [expr, type]]}, params_acc} + end + + defp escape_with_type(expr, type, params_acc, vars, env) do + type = validate_type!(type, vars, env) + {expr, params_acc} = escape(expr, type, params_acc, vars, env) + {{:{}, [], [:type, [], [expr, escape_type(type)]]}, params_acc} + end + + defp escape_type({:parameterized, _, _} = param), do: Macro.escape(param) + defp escape_type(type), do: type + + defp wrap_nil(params, {:{}, _, [:^, _, [ix]]}), do: wrap_nil(params, length(params) - ix - 1, []) + defp wrap_nil(params, _other), do: params + + defp wrap_nil([{val, type} | params], 0, acc) do + val = quote do: Ecto.Query.Builder.not_nil!(unquote(val)) + Enum.reverse(acc, [{val, type} | params]) + end + + defp wrap_nil([pair | params], i, acc) do + wrap_nil(params, i - 1, [pair | acc]) + end + + defp expand_and_split_fragment(query, env) do + case Macro.expand(query, get_env(env)) do + binary when is_binary(binary) -> + split_fragment(binary, "") + + _ -> + error! bad_fragment_message(Macro.to_string(query)) + end + end + + defp bad_fragment_message(arg) do + "to prevent SQL injection attacks, fragment(...) does not allow strings " <> + "to be interpolated as the first argument via the `^` operator, got: `#{arg}`" + end + + defp split_fragment(<<>>, consumed), + do: [consumed] + defp split_fragment(<>, consumed), + do: [consumed | split_fragment(rest, "")] + defp split_fragment(<>, consumed), + do: split_fragment(rest, consumed <> <>) + defp split_fragment(<>, consumed), + do: split_fragment(rest, consumed <> <>) + + @doc "Returns fragment pieces, given a fragment string and arguments." + def fragment_pieces(frag, args) do + frag + |> split_fragment("") + |> merge_fragments(args) + end + + defp escape_window_description([], params_acc, _vars, _env), + do: {[], params_acc} + defp escape_window_description([window_name], params_acc, _vars, _env) when is_atom(window_name), + do: {window_name, params_acc} + defp escape_window_description([kw], params_acc, vars, env) do + case Ecto.Query.Builder.Windows.escape(kw, params_acc, vars, env) do + {runtime, [], params_acc} -> + {runtime, params_acc} + + {_, [{key, _} | _], _} -> + error! "windows definitions given to over/2 do not allow interpolations at the root of " <> + "`#{key}`. Please use Ecto.Query.windows/3 to explicitly define a window instead" + end + end + + defp escape_window_function(expr, type, params_acc, vars, env) do + expr + |> validate_window_function!(env) + |> escape(type, params_acc, vars, env) + end + + defp validate_window_function!({:fragment, _, _} = expr, _env), do: expr + + defp validate_window_function!({agg, _, args} = expr, env) + when is_atom(agg) and is_list(args) do + if Code.ensure_loaded?(Ecto.Query.WindowAPI) and + function_exported?(Ecto.Query.WindowAPI, agg, length(args)) do + expr + else + case Macro.expand_once(expr, get_env(env)) do + ^expr -> + error! "unknown window function #{agg}/#{length(args)}. " <> + "See Ecto.Query.WindowAPI for all available functions" + expr -> + validate_window_function!(expr, env) + end + end + end + + defp validate_window_function!(expr, _), do: expr + + defp escape_call({name, _, args}, type, params_acc, vars, env) do + {args, params_acc} = Enum.map_reduce(args, params_acc, &escape(&1, type, &2, vars, env)) + expr = {:{}, [], [name, [], args]} + {expr, params_acc} + end + + defp escape_field!({var, _, context}, field, vars) + when is_atom(var) and is_atom(context) do + var = escape_var!(var, vars) + field = quoted_atom!(field, "field/2") + dot = {:{}, [], [:., [], [var, field]]} + {:{}, [], [dot, [], []]} + end + + defp escape_field!({kind, _, [value]}, field, _vars) + when kind in [:as, :parent_as] do + value = + case value do + {:^, _, [value]} -> + value + + other -> + quoted_atom!(other, "#{kind}/1") + end + as = {:{}, [], [kind, [], [value]]} + field = quoted_atom!(field, "field/2") + dot = {:{}, [], [:., [], [as, field]]} + {:{}, [], [dot, [], []]} + end + + defp escape_field!(expr, field, _vars) do + error!(""" + cannot fetch field `#{field}` from `#{Macro.to_string(expr)}`. Can only fetch fields from: + + * sources, such as `p` in `from p in Post` + * named bindings, such as `as(:post)` in `from Post, as: :post` + * parent named bindings, such as `parent_as(:post)` in a subquery + """) + end + + defp escape_interval(count, interval, params_acc, vars, env) do + type = + cond do + is_float(count) -> :float + is_integer(count) -> :integer + true -> :decimal + end + + {count, params_acc} = escape(count, type, params_acc, vars, env) + {count, quoted_interval!(interval), params_acc} + end + + defp escape_kw_fragment({key, [{_, _}|_] = exprs}, params_acc, vars, env) when is_atom(key) do + {escaped, params_acc} = Enum.map_reduce(exprs, params_acc, &escape_kw_fragment(&1, &2, vars, env)) + {{key, escaped}, params_acc} + end + + defp escape_kw_fragment({key, expr}, params_acc, vars, env) when is_atom(key) do + {escaped, params_acc} = escape(expr, :any, params_acc, vars, env) + {{key, escaped}, params_acc} + end + + defp escape_kw_fragment({key, _expr}, _params_acc, _vars, _env) do + error! "fragment(...) with keywords accepts only atoms as keys, got `#{Macro.to_string(key)}`" + end + + defp escape_fragment({:literal, _meta, [expr]}, params_acc, _vars, _env) do + case expr do + {:^, _, [expr]} -> + checked = quote do: Ecto.Query.Builder.literal!(unquote(expr)) + escaped = {:{}, [], [:literal, [], [checked]]} + {escaped, params_acc} + + _ -> + error! "literal/1 in fragment expects an interpolated value, such as literal(^value), got `#{Macro.to_string(expr)}`" + end + end + + defp escape_fragment(expr, params_acc, vars, env) do + escape(expr, :any, params_acc, vars, env) + end + + defp merge_fragments([h1|t1], [h2|t2]), + do: [{:raw, h1}, {:expr, h2} | merge_fragments(t1, t2)] + + defp merge_fragments([h1], []), + do: [{:raw, h1}] + + for {agg, arity} <- @dynamic_aggregates do + defp call_type(unquote(agg), unquote(arity)), do: {:any, :any} + end + + for {agg, {arity, return}} <- @static_aggregates do + defp call_type(unquote(agg), unquote(arity)), do: {:any, unquote(return)} + end + + for {comp, arity} <- @comparisons do + defp call_type(unquote(comp), unquote(arity)), do: {:any, :boolean} + end + + defp call_type(:or, 2), do: {:boolean, :boolean} + defp call_type(:and, 2), do: {:boolean, :boolean} + defp call_type(:not, 1), do: {:boolean, :boolean} + defp call_type(:like, 2), do: {:string, :boolean} + defp call_type(:ilike, 2), do: {:string, :boolean} + defp call_type(_, _), do: nil + + defp assert_type!(expr, type, actual) do + cond do + not is_atom(type) and not Ecto.Type.primitive?(type) -> + :ok + + Ecto.Type.match?(type, actual) -> + :ok + + true -> + error! "expression `#{Macro.to_string(expr)}` does not type check. " <> + "It returns a value of type #{inspect actual} but a value of " <> + "type #{inspect type} is expected" + end + end + + @doc """ + Validates the type with the given vars. + """ + def validate_type!({composite, type}, vars, env), + do: {composite, validate_type!(type, vars, env)} + def validate_type!({:^, _, [type]}, _vars, _env), + do: type + def validate_type!({:__aliases__, _, _} = type, _vars, env), + do: Macro.expand(type, get_env(env)) + def validate_type!({:parameterized, _, _} = type, _vars, _env), + do: type + def validate_type!(type, _vars, _env) when is_atom(type), + do: type + def validate_type!({{:., _, [{var, _, context}, field]}, _, []}, vars, _env) + when is_atom(var) and is_atom(context) and is_atom(field), + do: {find_var!(var, vars), field} + def validate_type!({:field, _, [{var, _, context}, field]}, vars, _env) + when is_atom(var) and is_atom(context) and is_atom(field), + do: {find_var!(var, vars), field} + + def validate_type!(type, _vars, _env) do + error! "type/2 expects an alias, atom, initialized parameterized type or " <> + "source.field as second argument, got: `#{Macro.to_string(type)}`" + end + + @always_tagged [:binary] + + defp literal(value, expected, vars), + do: do_literal(value, expected, quoted_type(value, vars)) + + defp do_literal(value, _, current) when current in @always_tagged, + do: {:%, [], [Ecto.Query.Tagged, {:%{}, [], [value: value, type: current]}]} + defp do_literal(value, :any, _current), + do: value + defp do_literal(value, expected, expected), + do: value + defp do_literal(value, expected, _current), + do: {:%, [], [Ecto.Query.Tagged, {:%{}, [], [value: value, type: expected]}]} + + @doc """ + Escape the params entries list. + """ + @spec escape_params(list()) :: list() + def escape_params(list), do: Enum.reverse(list) + + @doc """ + Escapes a variable according to the given binds. + + A escaped variable is represented internally as + `&0`, `&1` and so on. + """ + @spec escape_var!(atom, Keyword.t) :: Macro.t + def escape_var!(var, vars) do + {:{}, [], [:&, [], [find_var!(var, vars)]]} + end + + @doc """ + Escapes a list of bindings as a list of atoms. + + Only variables or `{:atom, value}` tuples are allowed in the `bindings` list, + otherwise an `Ecto.Query.CompileError` is raised. + + ## Examples + + iex> escape_binding(%Ecto.Query{}, quote(do: [x, y, z]), __ENV__) + {%Ecto.Query{}, [x: 0, y: 1, z: 2]} + + iex> escape_binding(%Ecto.Query{}, quote(do: [{x, 0}, {z, 2}]), __ENV__) + {%Ecto.Query{}, [x: 0, z: 2]} + + iex> escape_binding(%Ecto.Query{}, quote(do: [x, y, x]), __ENV__) + ** (Ecto.Query.CompileError) variable `x` is bound twice + + iex> escape_binding(%Ecto.Query{}, quote(do: [a, b, :foo]), __ENV__) + ** (Ecto.Query.CompileError) binding list should contain only variables or `{as, var}` tuples, got: :foo + + """ + @spec escape_binding(Macro.t, list, Macro.Env.t) :: {Macro.t, Keyword.t} + def escape_binding(query, binding, _env) when is_list(binding) do + vars = binding |> Enum.with_index |> Enum.map(&escape_bind/1) + assert_no_duplicate_binding!(vars) + + {positional_vars, named_vars} = Enum.split_while(vars, ¬ named_bind?(&1)) + assert_named_binds_in_tail!(named_vars, binding) + + {query, positional_binds} = calculate_positional_binds(query, positional_vars) + {query, named_binds} = calculate_named_binds(query, named_vars) + {query, positional_binds ++ named_binds} + end + def escape_binding(_query, bind, _env) do + error! "binding should be list of variables and `{as, var}` tuples " <> + "at the end, got: #{Macro.to_string(bind)}" + end + + defp named_bind?({kind, _, _}), do: kind == :named + + defp assert_named_binds_in_tail!(named_vars, binding) do + if Enum.all?(named_vars, &named_bind?/1) do + :ok + else + error! "named binds in the form of `{as, var}` tuples must be at the end " <> + "of the binding list, got: #{Macro.to_string(binding)}" + end + end + + defp assert_no_duplicate_binding!(vars) do + bound_vars = for {_, var, _} <- vars, var != :_, do: var + + case bound_vars -- Enum.uniq(bound_vars) do + [] -> :ok + [var | _] -> error! "variable `#{var}` is bound twice" + end + end + + defp calculate_positional_binds(query, vars) do + case Enum.split_while(vars, &elem(&1, 1) != :...) do + {vars, []} -> + vars = for {:pos, var, count} <- vars, do: {var, count} + {query, vars} + {vars, [_ | tail]} -> + query = + quote do + query = Ecto.Queryable.to_query(unquote(query)) + escape_count = Ecto.Query.Builder.count_binds(query) + query + end + + tail = + tail + |> Enum.with_index(-length(tail)) + |> Enum.map(fn {{:pos, k, _}, count} -> {k, quote(do: escape_count + unquote(count))} end) + + vars = for {:pos, var, count} <- vars, do: {var, count} + {query, vars ++ tail} + end + end + + defp calculate_named_binds(query, []), do: {query, []} + defp calculate_named_binds(query, vars) do + assignments = + for {:named, key, name} <- vars do + quote do + unquote({key, [], __MODULE__}) = unquote(__MODULE__).count_alias!(query, unquote(name)) + end + end + + query = + quote do + query = Ecto.Queryable.to_query(unquote(query)) + unquote_splicing(assignments) + query + end + + pairs = + for {:named, key, _name} <- vars do + {key, {key, [], __MODULE__}} + end + + {query, pairs} + end + + @doc """ + Count the alias for the given query. + """ + def count_alias!(%{aliases: aliases} = query, name) do + case aliases do + %{^name => ix} -> + ix + + %{} -> + raise Ecto.QueryError, message: "unknown bind name `#{inspect name}`", query: query + end + end + + defp escape_bind({{{var, _, context}, ix}, _}) when is_atom(var) and is_atom(context), + do: {:pos, var, ix} + defp escape_bind({{var, _, context}, ix}) when is_atom(var) and is_atom(context), + do: {:pos, var, ix} + defp escape_bind({{name, {var, _, context}}, _ix}) when is_atom(name) and is_atom(var) and is_atom(context), + do: {:named, var, name} + defp escape_bind({{{:^, _, [expr]}, {var, _, context}}, _ix}) when is_atom(var) and is_atom(context), + do: {:named, var, expr} + defp escape_bind({bind, _ix}), + do: error!("binding list should contain only variables or " <> + "`{as, var}` tuples, got: #{Macro.to_string(bind)}") + + defp try_expansion(expr, type, params, vars, %Macro.Env{} = env) do + try_expansion(expr, type, params, vars, {env, &escape/5}) + end + + defp try_expansion(expr, type, params, vars, {env, fun}) do + case Macro.expand_once(expr, env) do + ^expr -> + error! """ + `#{Macro.to_string(expr)}` is not a valid query expression. + + * If you intended to call an Elixir function or introduce a value, + you need to explicitly interpolate it with ^ + + * If you intended to call a database function, please check the documentation + for Ecto.Query.API to see the supported database expressions + + * If you intended to extend Ecto's query DSL, make sure that you have required + the module or imported the relevant function. Note that you need macros to + extend Ecto's querying capabilities + """ + + expanded -> + fun.(expanded, type, params, vars, env) + end + end + + @doc """ + Finds the index value for the given var in vars or raises. + """ + def find_var!(var, vars) do + vars[var] || error! "unbound variable `#{var}` in query. If you are attempting to interpolate a value, use ^var" + end + + @doc """ + Checks if the field is an atom at compilation time or + delegate the check to runtime for interpolation. + """ + def quoted_atom!({:^, _, [expr]}, used_ref), + do: quote(do: Ecto.Query.Builder.atom!(unquote(expr), unquote(used_ref))) + + def quoted_atom!(atom, _used_ref) when is_atom(atom), + do: atom + + def quoted_atom!(other, used_ref), + do: + error!( + "expected literal atom or interpolated value in #{used_ref}, got: " <> + "`#{Macro.to_string(other)}`" + ) + + @doc """ + Called by escaper at runtime to verify that value is an atom. + """ + def atom!(atom, _used_ref) when is_atom(atom), + do: atom + + def atom!(other, used_ref), + do: error!("expected atom in #{used_ref}, got: `#{inspect other}`") + + defp escape_json_path(path) when is_list(path) do + Enum.map(path, "ed_json_path_element!/1) + end + + defp escape_json_path(other) do + error!("expected JSON path to be compile-time list, got: `#{Macro.to_string(other)}`") + end + + defp quoted_json_path_element!({:^, _, [expr]}), + do: quote(do: Ecto.Query.Builder.json_path_element!(unquote(expr))) + + defp quoted_json_path_element!(binary) when is_binary(binary), + do: binary + + defp quoted_json_path_element!(integer) when is_integer(integer), + do: integer + + defp quoted_json_path_element!(other), + do: + error!( + "expected JSON path to contain literal strings, literal integers, or interpolated values, got: " <> + "`#{Macro.to_string(other)}`" + ) + + @doc """ + Called by escaper at runtime to verify that value is a string or an integer. + """ + def json_path_element!(binary) when is_binary(binary), + do: binary + def json_path_element!(integer) when is_integer(integer), + do: integer + def json_path_element!(other), + do: error!("expected string or integer in json_extract_path/2, got: `#{inspect other}`") + + @doc """ + Called by escaper at runtime to verify that a value is not nil. + """ + def not_nil!(nil) do + raise ArgumentError, "comparison with nil is forbidden as it is unsafe. " <> + "If you want to check if a value is nil, use is_nil/1 instead" + end + def not_nil!(not_nil) do + not_nil + end + + @doc """ + Checks if the field is a valid interval at compilation time or + delegate the check to runtime for interpolation. + """ + def quoted_interval!({:^, _, [expr]}), + do: quote(do: Ecto.Query.Builder.interval!(unquote(expr))) + def quoted_interval!(other), + do: interval!(other) + + @doc """ + Called by escaper at runtime to verify fragment keywords. + """ + def fragment!(kw) do + if Keyword.keyword?(kw) do + kw + else + raise ArgumentError, bad_fragment_message(inspect(kw)) + end + end + + @doc """ + Called by escaper at runtime to verify literal in fragments. + """ + def literal!(literal) do + if is_binary(literal) do + literal + else + raise ArgumentError, + "literal(^value) expects `value` to be a string, got `#{inspect(literal)}`" + end + end + + @doc """ + Called by escaper at runtime to verify that value is a valid interval. + """ + @interval ~w(year month week day hour minute second millisecond microsecond) + def interval!(interval) when interval in @interval, + do: interval + def interval!(other_string) when is_binary(other_string), + do: error!("invalid interval: `#{inspect other_string}` (expected one of #{Enum.join(@interval, ", ")})") + def interval!(not_string), + do: error!("invalid interval: `#{inspect not_string}` (expected a string)") + + @doc """ + Negates the given number. + """ + # TODO: Remove check when we depend on decimal v2.0 + if Code.ensure_loaded?(Decimal) and function_exported?(Decimal, :negate, 1) do + def negate!(%Decimal{} = decimal), do: Decimal.negate(decimal) + else + def negate!(%Decimal{} = decimal), do: Decimal.minus(decimal) + end + + def negate!(number) when is_number(number), do: -number + + @doc """ + Returns the type of an expression at build time. + """ + @spec quoted_type(Macro.t, Keyword.t) :: quoted_type + + # Fields + def quoted_type({{:., _, [{var, _, context}, field]}, _, []}, vars) + when is_atom(var) and is_atom(context) and is_atom(field), + do: {find_var!(var, vars), field} + + def quoted_type({:field, _, [{var, _, context}, field]}, vars) + when is_atom(var) and is_atom(context) and is_atom(field), + do: {find_var!(var, vars), field} + + # Unquoting code here means the second argument of field will + # always be unquoted twice, one by the type checking and another + # in the query itself. We are assuming this is not an issue + # as the solution is somewhat complicated. + def quoted_type({:field, _, [{var, _, context}, {:^, _, [code]}]}, vars) + when is_atom(var) and is_atom(context), + do: {find_var!(var, vars), code} + + # Interval + def quoted_type({:datetime_add, _, [_, _, _]}, _vars), do: :naive_datetime + def quoted_type({:date_add, _, [_, _, _]}, _vars), do: :date + + # Tagged + def quoted_type({:<<>>, _, _}, _vars), do: :binary + def quoted_type({:type, _, [_, type]}, _vars), do: type + + # Sigils + def quoted_type({sigil, _, [_, []]}, _vars) when sigil in ~w(sigil_s sigil_S)a, do: :string + def quoted_type({sigil, _, [_, []]}, _vars) when sigil in ~w(sigil_w sigil_W)a, do: {:array, :string} + + # Lists + def quoted_type(list, vars) when is_list(list) do + case list |> Enum.map("ed_type(&1, vars)) |> Enum.uniq() do + [type] -> {:array, type} + _ -> {:array, :any} + end + end + + # Negative numbers + def quoted_type({:-, _, [number]}, _vars) when is_integer(number), do: :integer + def quoted_type({:-, _, [number]}, _vars) when is_float(number), do: :float + + # Dynamic aggregates + for {agg, arity} <- @dynamic_aggregates do + args = 1..arity |> Enum.map(fn _ -> Macro.var(:_, __MODULE__) end) |> tl() + + def quoted_type({unquote(agg), _, [expr, unquote_splicing(args)]}, vars) do + quoted_type(expr, vars) + end + end + + # Literals + def quoted_type(literal, _vars) when is_float(literal), do: :float + def quoted_type(literal, _vars) when is_binary(literal), do: :string + def quoted_type(literal, _vars) when is_boolean(literal), do: :boolean + def quoted_type(literal, _vars) when is_atom(literal) and not is_nil(literal), do: :atom + def quoted_type(literal, _vars) when is_integer(literal), do: :integer + + # Tuples + def quoted_type({left, right}, vars), do: quoted_type({:{}, [], [left, right]}, vars) + def quoted_type({:{}, _, elems}, vars), do: {:tuple, Enum.map(elems, "ed_type(&1, vars))} + + def quoted_type({name, _, args}, _vars) when is_atom(name) and is_list(args) do + case call_type(name, length(args)) do + {_in, out} -> out + nil -> :any + end + end + + def quoted_type(_, _vars), do: :any + + defp get_env({env, _}), do: env + defp get_env(env), do: env + + @doc """ + Raises a query building error. + """ + def error!(message) when is_binary(message) do + {:current_stacktrace, [_|t]} = Process.info(self(), :current_stacktrace) + + t = Enum.drop_while t, fn + {mod, _, _, _} -> + String.starts_with?(Atom.to_string(mod), ["Elixir.Ecto.Query.", "Elixir.Enum"]) + _ -> + false + end + + reraise Ecto.Query.CompileError, [message: message], t + end + + @doc """ + Counts the bindings in a query expression. + + ## Examples + + iex> count_binds(%Ecto.Query{joins: [1,2,3]}) + 4 + + """ + @spec count_binds(Ecto.Query.t) :: non_neg_integer + def count_binds(%Query{joins: joins}) do + 1 + length(joins) + end + + @doc """ + Bump interpolations by the length of parameters. + """ + def bump_interpolations(expr, []), do: expr + + def bump_interpolations(expr, params) do + len = length(params) + + Macro.prewalk(expr, fn + {:^, meta, [counter]} when is_integer(counter) -> {:^, meta, [len + counter]} + other -> other + end) + end + + @doc """ + Applies a query at compilation time or at runtime. + + This function is responsible for checking if a given query is an + `Ecto.Query` struct at compile time. If it is not it will act + accordingly. + + If a query is available, it invokes the `apply` function in the + given `module`, otherwise, it delegates the call to runtime. + + It is important to keep in mind the complexities introduced + by this function. In particular, a %Query{} is a mixture of escaped + and unescaped expressions which makes it impossible for this + function to properly escape or unescape it at compile/runtime. + For this reason, the apply function should be ready to handle + arguments in both escaped and unescaped form. + + For example, take into account the `Builder.OrderBy`: + + select = %Ecto.Query.QueryExpr{expr: expr, file: env.file, line: env.line} + Builder.apply_query(query, __MODULE__, [order_by], env) + + `expr` is already an escaped expression and we must not escape + it again. However, it is wrapped in an Ecto.Query.QueryExpr, + which must be escaped! Furthermore, the `apply/2` function + in `Builder.OrderBy` very likely will inject the QueryExpr inside + Query, which again, is a mixture of escaped and unescaped expressions. + + That said, you need to obey the following rules: + + 1. In order to call this function, the arguments must be escapable + values supported by the `escape/1` function below; + + 2. The apply function may not manipulate the given arguments, + with exception to the query. + + In particular, when invoked at compilation time, all arguments + (except the query) will be escaped, so they can be injected into + the query properly, but they will be in their runtime form + when invoked at runtime. + """ + @spec apply_query(Macro.t, Macro.t, Macro.t, Macro.Env.t) :: Macro.t + def apply_query(query, module, args, env) do + case Macro.expand(query, env) |> unescape_query() do + %Query{} = compiletime_query -> + apply(module, :apply, [compiletime_query | args]) + |> escape_query() + + runtime_query -> + quote do + # Unquote the query before `module.apply()` for any binding variable. + query = unquote(runtime_query) + unquote(module).apply(query, unquote_splicing(args)) + end + end + end + + # Unescapes an `Ecto.Query` struct. + @spec unescape_query(Macro.t) :: Query.t | Macro.t + defp unescape_query({:%, _, [Query, {:%{}, _, list}]}) do + struct(Query, list) + end + defp unescape_query({:%{}, _, list} = ast) do + if List.keyfind(list, :__struct__, 0) == {:__struct__, Query} do + Map.new(list) + else + ast + end + end + defp unescape_query(other) do + other + end + + # Escapes an `Ecto.Query` and associated structs. + @spec escape_query(Query.t) :: Macro.t + defp escape_query(%Query{} = query), do: {:%{}, [], Map.to_list(query)} + + defp parse_access_get({{:., _, [Access, :get]}, _, [left, right]}, acc) do + parse_access_get(left, [right | acc]) + end + + defp parse_access_get({{:., _, [{var, _, context}, field]}, _, []} = expr, acc) + when is_atom(var) and is_atom(context) and is_atom(field) do + {expr, acc} + end +end diff --git a/deps/ecto/lib/ecto/query/builder/combination.ex b/deps/ecto/lib/ecto/query/builder/combination.ex new file mode 100644 index 0000000..00f1306 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/combination.ex @@ -0,0 +1,36 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Combination do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(atom, Macro.t, Macro.t, Macro.Env.t) :: Macro.t + def build(kind, query, {:^, _, [expr]}, env) do + expr = quote do: Ecto.Queryable.to_query(unquote(expr)) + Builder.apply_query(query, __MODULE__, [[{kind, expr}]], env) + end + + def build(kind, _query, other, _env) do + Builder.error! "`#{Macro.to_string(other)}` is not a valid #{kind}. " <> + "#{kind} must always be an interpolated query, such as ^existing_query" + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{combinations: combinations} = query, value) do + %{query | combinations: combinations ++ value} + end + def apply(query, value) do + apply(Ecto.Queryable.to_query(query), value) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/cte.ex b/deps/ecto/lib/ecto/query/builder/cte.ex new file mode 100644 index 0000000..3879b08 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/cte.ex @@ -0,0 +1,87 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.CTE do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes the CTE name. + + iex> escape(quote(do: "FOO"), __ENV__) + "FOO" + + """ + @spec escape(Macro.t, Macro.Env.t) :: Macro.t + def escape(name, _env) when is_bitstring(name), do: name + + def escape({:^, _, [expr]}, _env), do: expr + + def escape(expr, env) do + case Macro.expand_once(expr, env) do + ^expr -> + Builder.error! "`#{Macro.to_string(expr)}` is not a valid CTE name. " <> + "It must be a literal string or an interpolated variable." + + expr -> + escape(expr, env) + end + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, Macro.t, Macro.t, Macro.Env.t) :: Macro.t + def build(query, name, cte, env) do + Builder.apply_query(query, __MODULE__, [escape(name, env), build_cte(name, cte, env)], env) + end + + @spec build_cte(Macro.t, Macro.t, Macro.Env.t) :: Macro.t + def build_cte(_name, {:^, _, [expr]}, _env) do + quote do: Ecto.Queryable.to_query(unquote(expr)) + end + + def build_cte(_name, {:fragment, _, _} = fragment, env) do + {expr, {params, :acc}} = Builder.escape(fragment, :any, {[], :acc}, [], env) + params = Builder.escape_params(params) + + quote do + %Ecto.Query.QueryExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line) + } + end + end + + def build_cte(name, cte, env) do + case Macro.expand_once(cte, env) do + ^cte -> + Builder.error! "`#{Macro.to_string(cte)}` is not a valid CTE (named: #{Macro.to_string(name)}). " <> + "The CTE must be an interpolated query, such as ^existing_query or a fragment." + + cte -> + build_cte(name, cte, env) + end + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, bitstring, Ecto.Queryable.t) :: Ecto.Query.t + def apply(%Ecto.Query{with_ctes: with_expr} = query, name, with_query) do + with_expr = with_expr || %Ecto.Query.WithExpr{} + queries = List.keystore(with_expr.queries, name, 0, {name, with_query}) + with_expr = %{with_expr | queries: queries} + %{query | with_ctes: with_expr} + end + + def apply(query, name, with_query) do + apply(Ecto.Queryable.to_query(query), name, with_query) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/distinct.ex b/deps/ecto/lib/ecto/query/builder/distinct.ex new file mode 100644 index 0000000..e3286fa --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/distinct.ex @@ -0,0 +1,81 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Distinct do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a list of quoted expressions. + + iex> escape(quote do true end, {[], :acc}, [], __ENV__) + {true, {[], :acc}} + + iex> escape(quote do [x.x, 13] end, {[], :acc}, [x: 0], __ENV__) + {[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, + asc: 13], + {[], :acc}} + + """ + @spec escape(Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: {Macro.t, {list, term}} + def escape(expr, params_acc, _vars, _env) when is_boolean(expr) do + {expr, params_acc} + end + + def escape(expr, params_acc, vars, env) do + Builder.OrderBy.escape(:distinct, expr, params_acc, vars, env) + end + + @doc """ + Called at runtime to verify distinct. + """ + def distinct!(query, distinct, file, line) when is_boolean(distinct) do + apply(query, %Ecto.Query.QueryExpr{expr: distinct, params: [], line: line, file: file}) + end + def distinct!(query, distinct, file, line) do + {expr, params} = Builder.OrderBy.order_by_or_distinct!(:distinct, query, distinct, []) + expr = %Ecto.Query.QueryExpr{expr: expr, params: Enum.reverse(params), line: line, file: file} + apply(query, expr) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.Distinct.distinct!(unquote(query), unquote(var), unquote(env.file), unquote(env.line)) + end + end + + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, _}} = escape(expr, {[], :acc}, binding, env) + params = Builder.escape_params(params) + + distinct = quote do: %Ecto.Query.QueryExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line)} + Builder.apply_query(query, __MODULE__, [distinct], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{distinct: nil} = query, expr) do + %{query | distinct: expr} + end + def apply(%Ecto.Query{}, _expr) do + Builder.error! "only one distinct expression is allowed in query" + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/dynamic.ex b/deps/ecto/lib/ecto/query/builder/dynamic.ex new file mode 100644 index 0000000..723a1bf --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/dynamic.ex @@ -0,0 +1,76 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Dynamic do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Builds a dynamic expression. + """ + @spec build([Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(binding, expr, env) do + {query, vars} = Builder.escape_binding(quote(do: query), binding, env) + {expr, {params, subqueries}} = Builder.escape(expr, :any, {[], []}, vars, env) + params = Builder.escape_params(params) + + quote do + %Ecto.Query.DynamicExpr{fun: fn query -> + _ = unquote(query) + {unquote(expr), unquote(params), unquote(subqueries)} + end, + binding: unquote(Macro.escape(binding)), + file: unquote(env.file), + line: unquote(env.line)} + end + end + + @doc """ + Expands a dynamic expression for insertion into the given query. + """ + def fully_expand(query, %{file: file, line: line, binding: binding} = dynamic) do + {expr, {binding, params, subqueries, _count}} = expand(query, dynamic, {binding, [], [], 0}) + {expr, binding, Enum.reverse(params), Enum.reverse(subqueries), file, line} + end + + @doc """ + Expands a dynamic expression as part of an existing expression. + + Any dynamic expression parameter is prepended and the parameters + list is not reversed. This is useful when the dynamic expression + is given in the middle of an expression. + """ + def partially_expand(kind, query, %{binding: binding} = dynamic, params, count) do + {expr, {_binding, params, subqueries, count}} = expand(query, dynamic, {binding, params, [], count}) + + if subqueries != [] do + raise ArgumentError, "subqueries are not allowed in `#{kind}` expressions" + end + + {expr, params, count} + end + + defp expand(query, %{fun: fun}, {binding, params, subqueries, count}) do + {dynamic_expr, dynamic_params, dynamic_subqueries} = fun.(query) + + Macro.postwalk(dynamic_expr, {binding, params, subqueries, count}, fn + {:^, meta, [ix]}, {binding, params, subqueries, count} -> + case Enum.fetch!(dynamic_params, ix) do + {%Ecto.Query.DynamicExpr{binding: new_binding} = dynamic, _} -> + binding = if length(new_binding) > length(binding), do: new_binding, else: binding + expand(query, dynamic, {binding, params, subqueries, count}) + + param -> + {{:^, meta, [count]}, {binding, [param | params], subqueries, count + 1}} + end + + {:subquery, i}, {binding, params, subqueries, count} -> + subquery = Enum.fetch!(dynamic_subqueries, i) + ix = length(subqueries) + {{:subquery, ix}, {binding, [{:subquery, ix} | params], [subquery | subqueries], count + 1}} + + expr, acc -> + {expr, acc} + end) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/filter.ex b/deps/ecto/lib/ecto/query/builder/filter.ex new file mode 100644 index 0000000..5f88dcd --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/filter.ex @@ -0,0 +1,196 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.Filter do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a where or having clause. + + It allows query expressions that evaluate to a boolean + or a keyword list of field names and values. In a keyword + list multiple key value pairs will be joined with "and". + + Returned is `{expression, {params, subqueries}}` which is + a valid escaped expression, see `Macro.escape/2`. Both params + and subqueries are reversed. + """ + @spec escape(:where | :having | :on, Macro.t, non_neg_integer, Keyword.t, Macro.Env.t) :: {Macro.t, {list, list}} + def escape(_kind, [], _binding, _vars, _env) do + {true, {[], []}} + end + + def escape(kind, expr, binding, vars, env) when is_list(expr) do + {parts, params_subqueries} = + Enum.map_reduce(expr, {[], []}, fn + {field, nil}, _params_subqueries -> + Builder.error! "nil given for `#{field}`. Comparison with nil is forbidden as it is unsafe. " <> + "Instead write a query with is_nil/1, for example: is_nil(s.#{field})" + + {field, value}, params_subqueries when is_atom(field) -> + value = check_for_nils(value, field) + {value, params_subqueries} = Builder.escape(value, {binding, field}, params_subqueries, vars, env) + {{:{}, [], [:==, [], [to_escaped_field(binding, field), value]]}, params_subqueries} + + _, _params_subqueries -> + Builder.error! "expected a keyword list at compile time in #{kind}, " <> + "got: `#{Macro.to_string expr}`. If you would like to " <> + "pass a list dynamically, please interpolate the whole list with ^" + end) + + expr = Enum.reduce parts, &{:{}, [], [:and, [], [&2, &1]]} + {expr, params_subqueries} + end + + def escape(_kind, expr, _binding, vars, env) do + Builder.escape(expr, :boolean, {[], []}, vars, env) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(:where | :having, :and | :or, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(kind, op, query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.Filter.filter!(unquote(kind), unquote(op), unquote(query), + unquote(var), 0, unquote(env.file), unquote(env.line)) + end + end + + def build(kind, op, query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, subqueries}} = escape(kind, expr, 0, binding, env) + + params = Builder.escape_params(params) + subqueries = Enum.reverse(subqueries) + + expr = quote do: %Ecto.Query.BooleanExpr{ + expr: unquote(expr), + op: unquote(op), + params: unquote(params), + subqueries: unquote(subqueries), + file: unquote(env.file), + line: unquote(env.line)} + Builder.apply_query(query, __MODULE__, [kind, expr], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, :where | :having, term) :: Ecto.Query.t + def apply(query, _, %{expr: true}) do + query + end + def apply(%Ecto.Query{wheres: wheres} = query, :where, expr) do + %{query | wheres: wheres ++ [expr]} + end + def apply(%Ecto.Query{havings: havings} = query, :having, expr) do + %{query | havings: havings ++ [expr]} + end + def apply(query, kind, expr) do + apply(Ecto.Queryable.to_query(query), kind, expr) + end + + @doc """ + Builds a filter based on the given arguments. + + This is shared by having, where and join's on expressions. + """ + def filter!(kind, query, %Ecto.Query.DynamicExpr{} = dynamic, _binding, _file, _line) do + {expr, _binding, params, subqueries, file, line} = + Ecto.Query.Builder.Dynamic.fully_expand(query, dynamic) + + if subqueries != [] do + raise ArgumentError, "subqueries are not allowed in `#{kind}` expressions" + end + + {expr, params, file, line} + end + + def filter!(_kind, _query, bool, _binding, file, line) when is_boolean(bool) do + {bool, [], file, line} + end + + def filter!(kind, _query, kw, binding, file, line) when is_list(kw) do + {expr, params} = kw!(kind, kw, binding) + {expr, params, file, line} + end + + def filter!(kind, _query, other, _binding, _file, _line) do + raise ArgumentError, "expected a keyword list or dynamic expression in `#{kind}`, got: `#{inspect other}`" + end + + @doc """ + Builds the filter and applies it to the given query as boolean operator. + """ + def filter!(:where, op, query, %Ecto.Query.DynamicExpr{} = dynamic, _binding, _file, _line) do + {expr, _binding, params, subqueries, file, line} = + Ecto.Query.Builder.Dynamic.fully_expand(query, dynamic) + + boolean = %Ecto.Query.BooleanExpr{ + expr: expr, + params: params, + line: line, + file: file, + op: op, + subqueries: subqueries + } + + apply(query, :where, boolean) + end + + def filter!(kind, op, query, expr, binding, file, line) do + {expr, params, file, line} = filter!(kind, query, expr, binding, file, line) + boolean = %Ecto.Query.BooleanExpr{expr: expr, params: params, line: line, file: file, op: op} + apply(query, kind, boolean) + end + + defp kw!(kind, kw, binding) do + case kw!(kw, binding, 0, [], [], kind, kw) do + {[], params} -> {true, params} + {parts, params} -> {Enum.reduce(parts, &{:and, [], [&2, &1]}), params} + end + end + + defp kw!([{field, nil}|_], _binding, _counter, _exprs, _params, _kind, _original) when is_atom(field) do + raise ArgumentError, "nil given for #{inspect field}. Comparison with nil is forbidden as it is unsafe. " <> + "Instead write a query with is_nil/1, for example: is_nil(s.#{field})" + end + defp kw!([{field, value}|t], binding, counter, exprs, params, kind, original) when is_atom(field) do + kw!(t, binding, counter + 1, + [{:==, [], [to_field(binding, field), {:^, [], [counter]}]}|exprs], + [{value, {binding, field}}|params], + kind, original) + end + defp kw!([], _binding, _counter, exprs, params, _kind, _original) do + {Enum.reverse(exprs), Enum.reverse(params)} + end + defp kw!(_, _binding, _counter, _exprs, _params, kind, original) do + raise ArgumentError, "expected a keyword list in `#{kind}`, got: `#{inspect original}`" + end + + defp to_field(binding, field), + do: {{:., [], [{:&, [], [binding]}, field]}, [], []} + defp to_escaped_field(binding, field), + do: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [binding]]}, field]]}, [], []]} + + defp check_for_nils({:^, _, [var]}, field) do + quote do + ^Ecto.Query.Builder.Filter.not_nil!(unquote(var), unquote(field)) + end + end + + defp check_for_nils(value, _field), do: value + + def not_nil!(nil, field) do + raise ArgumentError, "nil given for `#{field}`. comparison with nil is forbidden as it is unsafe. " <> + "Instead write a query with is_nil/1, for example: is_nil(s.#{field})" + end + + def not_nil!(other, _field), do: other +end diff --git a/deps/ecto/lib/ecto/query/builder/from.ex b/deps/ecto/lib/ecto/query/builder/from.ex new file mode 100644 index 0000000..e3f891f --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/from.ex @@ -0,0 +1,185 @@ +defmodule Ecto.Query.Builder.From do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Handles from expressions. + + The expressions may either contain an `in` expression or not. + The right side is always expected to Queryable. + + ## Examples + + iex> escape(quote(do: MySchema), __ENV__) + {quote(do: MySchema), []} + + iex> escape(quote(do: p in posts), __ENV__) + {quote(do: posts), [p: 0]} + + iex> escape(quote(do: p in {"posts", MySchema}), __ENV__) + {quote(do: {"posts", MySchema}), [p: 0]} + + iex> escape(quote(do: [p, q] in posts), __ENV__) + {quote(do: posts), [p: 0, q: 1]} + + iex> escape(quote(do: [_, _] in abc), __ENV__) + {quote(do: abc), [_: 0, _: 1]} + + iex> escape(quote(do: other), __ENV__) + {quote(do: other), []} + + iex> escape(quote(do: x() in other), __ENV__) + ** (Ecto.Query.CompileError) binding list should contain only variables or `{as, var}` tuples, got: x() + + """ + @spec escape(Macro.t(), Macro.Env.t()) :: {Macro.t(), Keyword.t()} + def escape({:in, _, [var, query]}, env) do + Builder.escape_binding(query, List.wrap(var), env) + end + + def escape(query, _env) do + {query, []} + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t(), Macro.Env.t(), atom, String.t | nil, nil | {:ok, String.t | nil} | [String.t]) :: + {Macro.t(), Keyword.t(), non_neg_integer | nil} + def build(query, env, as, prefix, maybe_hints) do + hints = List.wrap(maybe_hints) + + unless Enum.all?(hints, &is_valid_hint/1) do + Builder.error!( + "`hints` must be a compile time string, list of strings, or a tuple " <> + "got: `#{Macro.to_string(maybe_hints)}`" + ) + end + + case prefix do + nil -> :ok + {:ok, prefix} when is_binary(prefix) or is_nil(prefix) -> :ok + _ -> Builder.error!("`prefix` must be a compile time string, got: `#{Macro.to_string(prefix)}`") + end + + as = case as do + {:^, _, [as]} -> as + as when is_atom(as) -> as + as -> Builder.error!("`as` must be a compile time atom or an interpolated value using ^, got: #{Macro.to_string(as)}") + end + + {query, binds} = escape(query, env) + + case expand_from(query, env) do + schema when is_atom(schema) -> + # Get the source at runtime so no unnecessary compile time + # dependencies between modules are added + source = quote(do: unquote(schema).__schema__(:source)) + {:ok, prefix} = prefix || {:ok, quote(do: unquote(schema).__schema__(:prefix))} + {query(prefix, source, schema, as, hints), binds, 1} + + source when is_binary(source) -> + {:ok, prefix} = prefix || {:ok, nil} + # When a binary is used, there is no schema + {query(prefix, source, nil, as, hints), binds, 1} + + {source, schema} when is_binary(source) and is_atom(schema) -> + {:ok, prefix} = prefix || {:ok, quote(do: unquote(schema).__schema__(:prefix))} + {query(prefix, source, schema, as, hints), binds, 1} + + _other -> + quoted = quote do + Ecto.Query.Builder.From.apply(unquote(query), unquote(length(binds)), unquote(as), unquote(prefix), unquote(hints)) + end + + {quoted, binds, nil} + end + end + + defp query(prefix, source, schema, as, hints) do + aliases = if as, do: [{as, 0}], else: [] + from_fields = [source: {source, schema}, as: as, prefix: prefix, hints: hints] + + query_fields = [ + from: {:%, [], [Ecto.Query.FromExpr, {:%{}, [], from_fields}]}, + aliases: {:%{}, [], aliases} + ] + + {:%, [], [Ecto.Query, {:%{}, [], query_fields}]} + end + + defp expand_from({left, right}, env) do + {left, Macro.expand(right, env)} + end + + defp expand_from(other, env) do + Macro.expand(other, env) + end + + @doc """ + The callback applied by `build/2` to build the query. + """ + @spec apply(Ecto.Queryable.t(), non_neg_integer, Macro.t(), {:ok, String.t} | nil, [String.t]) :: Ecto.Query.t() + def apply(query, binds, as, prefix, hints) do + query = + query + |> Ecto.Queryable.to_query() + |> maybe_apply_as(as) + |> maybe_apply_prefix(prefix) + |> maybe_apply_hints(hints) + + check_binds(query, binds) + query + end + + defp maybe_apply_as(query, nil), do: query + + defp maybe_apply_as(%{from: %{as: from_as}}, as) when not is_nil(from_as) do + Builder.error!( + "can't apply alias `#{inspect(as)}`, binding in `from` is already aliased to `#{inspect(from_as)}`" + ) + end + + defp maybe_apply_as(%{from: from, aliases: aliases} = query, as) do + if Map.has_key?(aliases, as) do + Builder.error!("alias `#{inspect(as)}` already exists") + else + %{query | aliases: Map.put(aliases, as, 0), from: %{from | as: as}} + end + end + + defp maybe_apply_prefix(query, nil), do: query + + defp maybe_apply_prefix(query, {:ok, prefix}) do + update_in query.from.prefix, fn + nil -> + prefix + + from_prefix -> + Builder.error!( + "can't apply prefix `#{inspect(prefix)}`, `from` is already prefixed to `#{inspect(from_prefix)}`" + ) + end + end + + defp maybe_apply_hints(query, []), do: query + defp maybe_apply_hints(query, hints), do: update_in(query.from.hints, &(&1 ++ hints)) + + defp is_valid_hint(hint) when is_binary(hint), do: true + defp is_valid_hint({_key, _val}), do: true + defp is_valid_hint(_), do: false + + defp check_binds(query, count) do + if count > 1 and count > Builder.count_binds(query) do + Builder.error!( + "`from` in query expression specified #{count} " <> + "binds but query contains #{Builder.count_binds(query)} binds" + ) + end + end +end diff --git a/deps/ecto/lib/ecto/query/builder/group_by.ex b/deps/ecto/lib/ecto/query/builder/group_by.ex new file mode 100644 index 0000000..eaf258b --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/group_by.ex @@ -0,0 +1,117 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.GroupBy do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a list of quoted expressions. + + See `Ecto.Builder.escape/2`. + + iex> escape(:group_by, quote do [x.x, 13] end, {[], :acc}, [x: 0], __ENV__) + {[{:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, + 13], + {[], :acc}} + """ + @spec escape(:group_by | :partition_by, Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: + {Macro.t, {list, term}} + def escape(kind, expr, params_acc, vars, env) do + expr + |> List.wrap + |> Enum.map_reduce(params_acc, &do_escape(&1, &2, kind, vars, env)) + end + + defp do_escape({:^, _, [expr]}, params_acc, kind, _vars, _env) do + {quote(do: Ecto.Query.Builder.GroupBy.field!(unquote(kind), unquote(expr))), params_acc} + end + + defp do_escape(field, params_acc, _kind, _vars, _env) when is_atom(field) do + {Macro.escape(to_field(field)), params_acc} + end + + defp do_escape(expr, params_acc, _kind, vars, env) do + Builder.escape(expr, :any, params_acc, vars, env) + end + + @doc """ + Called at runtime to verify a field. + """ + def field!(_kind, field) when is_atom(field), + do: to_field(field) + def field!(kind, other) do + raise ArgumentError, + "expected a field as an atom in `#{kind}`, got: `#{inspect other}`" + end + + @doc """ + Shared between group_by and partition_by. + """ + def group_or_partition_by!(kind, query, exprs, params) do + {expr, {params, _}} = + Enum.map_reduce(List.wrap(exprs), {params, length(params)}, fn + field, params_count when is_atom(field) -> + {to_field(field), params_count} + + %Ecto.Query.DynamicExpr{} = dynamic, {params, count} -> + {expr, params, count} = Builder.Dynamic.partially_expand(kind, query, dynamic, params, count) + {expr, {params, count}} + + other, _params_count -> + raise ArgumentError, + "expected a list of fields and dynamics in `#{kind}`, got: `#{inspect other}`" + end) + + {expr, params} + end + + defp to_field(field), do: {{:., [], [{:&, [], [0]}, field]}, [], []} + + @doc """ + Called at runtime to assemble group_by. + """ + def group_by!(query, group_by, file, line) do + {expr, params} = group_or_partition_by!(:group_by, query, group_by, []) + expr = %Ecto.Query.QueryExpr{expr: expr, params: Enum.reverse(params), line: line, file: file} + apply(query, expr) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.GroupBy.group_by!(unquote(query), unquote(var), unquote(env.file), unquote(env.line)) + end + end + + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, _}} = escape(:group_by, expr, {[], :acc}, binding, env) + params = Builder.escape_params(params) + + group_by = quote do: %Ecto.Query.QueryExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line)} + Builder.apply_query(query, __MODULE__, [group_by], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{group_bys: group_bys} = query, expr) do + %{query | group_bys: group_bys ++ [expr]} + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/join.ex b/deps/ecto/lib/ecto/query/builder/join.ex new file mode 100644 index 0000000..1db4f97 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/join.ex @@ -0,0 +1,318 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Join do + @moduledoc false + + alias Ecto.Query.Builder + alias Ecto.Query.{JoinExpr, QueryExpr} + + @doc """ + Escapes a join expression (not including the `on` expression). + + It returns a tuple containing the binds, the on expression (if available) + and the association expression. + + ## Examples + + iex> escape(quote(do: x in "foo"), [], __ENV__) + {:x, {"foo", nil}, nil, []} + + iex> escape(quote(do: "foo"), [], __ENV__) + {:_, {"foo", nil}, nil, []} + + iex> escape(quote(do: x in Sample), [], __ENV__) + {:x, {nil, Sample}, nil, []} + + iex> escape(quote(do: x in __MODULE__), [], __ENV__) + {:x, {nil, __MODULE__}, nil, []} + + iex> escape(quote(do: x in {"foo", :sample}), [], __ENV__) + {:x, {"foo", :sample}, nil, []} + + iex> escape(quote(do: x in {"foo", Sample}), [], __ENV__) + {:x, {"foo", Sample}, nil, []} + + iex> escape(quote(do: x in {"foo", __MODULE__}), [], __ENV__) + {:x, {"foo", __MODULE__}, nil, []} + + iex> escape(quote(do: c in assoc(p, :comments)), [p: 0], __ENV__) + {:c, nil, {0, :comments}, []} + + iex> escape(quote(do: x in fragment("foo")), [], __ENV__) + {:x, {:{}, [], [:fragment, [], [raw: "foo"]]}, nil, []} + + """ + @spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {atom, Macro.t | nil, Macro.t | nil, list} + def escape({:in, _, [{var, _, context}, expr]}, vars, env) + when is_atom(var) and is_atom(context) do + {_, expr, assoc, params} = escape(expr, vars, env) + {var, expr, assoc, params} + end + + def escape({:subquery, _, [expr]}, _vars, _env) do + {:_, quote(do: Ecto.Query.subquery(unquote(expr))), nil, []} + end + + def escape({:subquery, _, [expr, opts]}, _vars, _env) do + {:_, quote(do: Ecto.Query.subquery(unquote(expr), unquote(opts))), nil, []} + end + + def escape({:fragment, _, [_ | _]} = expr, vars, env) do + {expr, {params, :acc}} = Builder.escape(expr, :any, {[], :acc}, vars, env) + {:_, expr, nil, params} + end + + def escape({string, schema} = join, _vars, env) when is_binary(string) do + case Macro.expand(schema, env) do + schema when is_atom(schema) -> + {:_, {string, schema}, nil, []} + + _ -> + Builder.error! "malformed join `#{Macro.to_string(join)}` in query expression" + end + end + + def escape({:assoc, _, [{var, _, context}, field]}, vars, _env) + when is_atom(var) and is_atom(context) do + ensure_field!(field) + var = Builder.find_var!(var, vars) + field = Builder.quoted_atom!(field, "field/2") + {:_, nil, {var, field}, []} + end + + def escape({:^, _, [expr]}, _vars, _env) do + {:_, quote(do: Ecto.Query.Builder.Join.join!(unquote(expr))), nil, []} + end + + def escape(string, _vars, _env) when is_binary(string) do + {:_, {string, nil}, nil, []} + end + + def escape(schema, _vars, _env) when is_atom(schema) do + {:_, {nil, schema}, nil, []} + end + + def escape(join, vars, env) do + case Macro.expand(join, env) do + ^join -> + Builder.error! "malformed join `#{Macro.to_string(join)}` in query expression" + join -> + escape(join, vars, env) + end + end + + @doc """ + Called at runtime to check dynamic joins. + """ + def join!(expr) when is_atom(expr), + do: {nil, expr} + def join!(expr) when is_binary(expr), + do: {expr, nil} + def join!({source, module}) when is_binary(source) and is_atom(module), + do: {source, module} + def join!(expr), + do: Ecto.Queryable.to_query(expr) + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, atom, [Macro.t], Macro.t, Macro.t, Macro.t, atom, nil | {:ok, String.t | nil}, nil | String.t | [String.t], Macro.Env.t) :: + {Macro.t, Keyword.t, non_neg_integer | nil} + def build(query, qual, binding, expr, count_bind, on, as, prefix, maybe_hints, env) do + {:ok, prefix} = prefix || {:ok, nil} + hints = List.wrap(maybe_hints) + + unless Enum.all?(hints, &is_binary/1) do + Builder.error!( + "`hints` must be a compile time string or list of strings, " <> + "got: `#{Macro.to_string(maybe_hints)}`" + ) + end + + unless is_binary(prefix) or is_nil(prefix) do + Builder.error! "`prefix` must be a compile time string, got: `#{Macro.to_string(prefix)}`" + end + + as = case as do + {:^, _, [as]} -> as + as when is_atom(as) -> as + as -> Builder.error!("`as` must be a compile time atom or an interpolated value using ^, got: #{Macro.to_string(as)}") + end + + {query, binding} = Builder.escape_binding(query, binding, env) + {join_bind, join_source, join_assoc, join_params} = escape(expr, binding, env) + join_params = Builder.escape_params(join_params) + + join_qual = validate_qual(qual) + validate_bind(join_bind, binding) + + {count_bind, query} = + if is_nil(count_bind) do + query = + quote do + query = Ecto.Queryable.to_query(unquote(query)) + join_count = Builder.count_binds(query) + query + end + {quote(do: join_count), query} + else + {count_bind, query} + end + + binding = binding ++ [{join_bind, count_bind}] + + next_bind = + if is_integer(count_bind) do + count_bind + 1 + else + quote(do: unquote(count_bind) + 1) + end + + join = [ + as: as, + assoc: join_assoc, + file: env.file, + line: env.line, + params: join_params, + prefix: prefix, + qual: join_qual, + source: join_source, + hints: hints + ] + + query = build_on(on || true, join, as, query, binding, count_bind, env) + {query, binding, next_bind} + end + + def build_on({:^, _, [var]}, join, as, query, _binding, count_bind, env) do + quote do + query = unquote(query) + + Ecto.Query.Builder.Join.join!( + query, + %JoinExpr{unquote_splicing(join), on: %QueryExpr{}}, + unquote(var), + unquote(as), + unquote(count_bind), + unquote(env.file), + unquote(env.line) + ) + end + end + + def build_on(on, join, as, query, binding, count_bind, env) do + case Ecto.Query.Builder.Filter.escape(:on, on, count_bind, binding, env) do + {on_expr, {on_params, []}} -> + on_params = Builder.escape_params(on_params) + + join = + quote do + %JoinExpr{ + unquote_splicing(join), + on: %QueryExpr{ + expr: unquote(on_expr), + params: unquote(on_params), + line: unquote(env.line), + file: unquote(env.file) + } + } + end + + Builder.apply_query(query, __MODULE__, [join, as, count_bind], env) + + _pattern -> + raise ArgumentError, "invalid expression for join `:on`, subqueries aren't supported" + end + end + + @doc """ + Applies the join expression to the query. + """ + def apply(%Ecto.Query{joins: joins} = query, expr, nil, _count_bind) do + %{query | joins: joins ++ [expr]} + end + def apply(%Ecto.Query{joins: joins, aliases: aliases} = query, expr, as, count_bind) do + aliases = + case aliases do + %{} -> runtime_aliases(aliases, as, count_bind) + _ -> compile_aliases(aliases, as, count_bind) + end + + %{query | joins: joins ++ [expr], aliases: aliases} + end + def apply(query, expr, as, count_bind) do + apply(Ecto.Queryable.to_query(query), expr, as, count_bind) + end + + @doc """ + Called at runtime to build aliases. + """ + def runtime_aliases(aliases, nil, _), do: aliases + + def runtime_aliases(aliases, name, join_count) when is_integer(join_count) do + if Map.has_key?(aliases, name) do + Builder.error! "alias `#{inspect name}` already exists" + else + Map.put(aliases, name, join_count) + end + end + + defp compile_aliases({:%{}, meta, aliases}, name, join_count) + when is_atom(name) and is_integer(join_count) do + {:%{}, meta, aliases |> Map.new |> runtime_aliases(name, join_count) |> Map.to_list} + end + + defp compile_aliases(aliases, name, join_count) do + quote do + Ecto.Query.Builder.Join.runtime_aliases(unquote(aliases), unquote(name), unquote(join_count)) + end + end + + @doc """ + Called at runtime to build a join. + """ + def join!(query, join, expr, as, count_bind, file, line) do + # join without expanded :on is built and applied to the query, + # so that expansion of dynamic :on accounts for the new binding + {on_expr, on_params, on_file, on_line} = + Ecto.Query.Builder.Filter.filter!(:on, apply(query, join, as, count_bind), expr, count_bind, file, line) + + join = %{join | on: %QueryExpr{expr: on_expr, params: on_params, line: on_line, file: on_file}} + apply(query, join, as, count_bind) + end + + defp validate_qual(qual) when is_atom(qual) do + qual!(qual) + end + + defp validate_qual(qual) do + quote(do: Ecto.Query.Builder.Join.qual!(unquote(qual))) + end + + defp validate_bind(bind, all) do + if bind != :_ and bind in all do + Builder.error! "variable `#{bind}` is already defined in query" + end + end + + @qualifiers [:inner, :inner_lateral, :left, :left_lateral, :right, :full, :cross] + + @doc """ + Called at runtime to check dynamic qualifier. + """ + def qual!(qual) when qual in @qualifiers, do: qual + def qual!(qual) do + raise ArgumentError, + "invalid join qualifier `#{inspect qual}`, accepted qualifiers are: " <> + Enum.map_join(@qualifiers, ", ", &"`#{inspect &1}`") + end + + defp ensure_field!({var, _, _}) when var != :^ do + Builder.error! "you passed the variable `#{var}` to `assoc/2`. Did you mean to pass the atom `:#{var}`?" + end + defp ensure_field!(_), do: true +end diff --git a/deps/ecto/lib/ecto/query/builder/limit_offset.ex b/deps/ecto/lib/ecto/query/builder/limit_offset.ex new file mode 100644 index 0000000..03325ec --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/limit_offset.ex @@ -0,0 +1,56 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.LimitOffset do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(:limit | :offset, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(type, query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, :acc}} = Builder.escape(expr, :integer, {[], :acc}, binding, env) + params = Builder.escape_params(params) + + if contains_variable?(expr) do + Builder.error! "query variables are not allowed in #{type} expression" + end + + limoff = quote do: %Ecto.Query.QueryExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line)} + + Builder.apply_query(query, __MODULE__, [type, limoff], env) + end + + defp contains_variable?(ast) do + ast + |> Macro.prewalk(false, fn + {:&, _, [_]} = expr, _ -> {expr, true} + expr, acc -> {expr, acc} + end) + |> elem(1) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, :limit | :offset, term) :: Ecto.Query.t + def apply(%Ecto.Query{} = query, :limit, expr) do + %{query | limit: expr} + end + def apply(%Ecto.Query{} = query, :offset, expr) do + %{query | offset: expr} + end + def apply(query, kind, expr) do + apply(Ecto.Queryable.to_query(query), kind, expr) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/lock.ex b/deps/ecto/lib/ecto/query/builder/lock.ex new file mode 100644 index 0000000..0a93412 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/lock.ex @@ -0,0 +1,59 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Lock do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes the lock code. + + iex> escape(quote(do: "FOO"), [], __ENV__) + "FOO" + + """ + @spec escape(Macro.t(), Keyword.t, Macro.Env.t) :: Macro.t() + def escape(lock, _vars, _env) when is_binary(lock), do: lock + + def escape({:fragment, _, [_ | _]} = expr, vars, env) do + {expr, {params, :acc}} = Builder.escape(expr, :any, {[], :acc}, vars, env) + + if params != [] do + Builder.error!("value interpolation is not allowed in :lock") + end + + expr + end + + def escape(other, _, _) do + Builder.error!( + "`#{Macro.to_string(other)}` is not a valid lock. " <> + "For security reasons, a lock must always be a literal string or a fragment" + ) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t(), Macro.t(), Macro.t(), Macro.Env.t()) :: Macro.t() + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + Builder.apply_query(query, __MODULE__, [escape(expr, binding, env)], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t(), term) :: Ecto.Query.t() + def apply(%Ecto.Query{} = query, value) do + %{query | lock: value} + end + + def apply(query, value) do + apply(Ecto.Queryable.to_query(query), value) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/order_by.ex b/deps/ecto/lib/ecto/query/builder/order_by.ex new file mode 100644 index 0000000..5a1d350 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/order_by.ex @@ -0,0 +1,209 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.OrderBy do + @moduledoc false + + alias Ecto.Query.Builder + + @directions [ + :asc, + :asc_nulls_last, + :asc_nulls_first, + :desc, + :desc_nulls_last, + :desc_nulls_first + ] + + @doc """ + Returns `true` if term is a valid order_by direction; otherwise returns `false`. + + ## Examples + + iex> valid_direction?(:asc) + true + + iex> valid_direction?(:desc) + true + + iex> valid_direction?(:invalid) + false + + """ + def valid_direction?(term), do: term in @directions + + @doc """ + Escapes an order by query. + + The query is escaped to a list of `{direction, expression}` + pairs at runtime. Escaping also validates direction is one of + `:asc`, `:asc_nulls_last`, `:asc_nulls_first`, `:desc`, + `:desc_nulls_last` or `:desc_nulls_first`. + + ## Examples + + iex> escape(:order_by, quote do [x.x, desc: 13] end, {[], :acc}, [x: 0], __ENV__) + {[asc: {:{}, [], [{:{}, [], [:., [], [{:{}, [], [:&, [], [0]]}, :x]]}, [], []]}, + desc: 13], + {[], :acc}} + + """ + @spec escape(:order_by | :distinct, Macro.t, {list, term}, Keyword.t, Macro.Env.t) :: + {Macro.t, {list, term}} + def escape(kind, expr, params_acc, vars, env) do + expr + |> List.wrap + |> Enum.map_reduce(params_acc, &do_escape(&1, &2, kind, vars, env)) + end + + defp do_escape({dir, {:^, _, [expr]}}, params_acc, kind, _vars, _env) do + {{quoted_dir!(kind, dir), quote(do: Ecto.Query.Builder.OrderBy.field!(unquote(kind), unquote(expr)))}, params_acc} + end + + defp do_escape({:^, _, [expr]}, params_acc, kind, _vars, _env) do + {{:asc, quote(do: Ecto.Query.Builder.OrderBy.field!(unquote(kind), unquote(expr)))}, params_acc} + end + + defp do_escape({dir, field}, params_acc, kind, _vars, _env) when is_atom(field) do + {{quoted_dir!(kind, dir), Macro.escape(to_field(field))}, params_acc} + end + + defp do_escape(field, params_acc, _kind, _vars, _env) when is_atom(field) do + {{:asc, Macro.escape(to_field(field))}, params_acc} + end + + defp do_escape({dir, expr}, params_acc, kind, vars, env) do + {ast, params_acc} = Builder.escape(expr, :any, params_acc, vars, env) + {{quoted_dir!(kind, dir), ast}, params_acc} + end + + defp do_escape(expr, params_acc, _kind, vars, env) do + {ast, params_acc} = Builder.escape(expr, :any, params_acc, vars, env) + {{:asc, ast}, params_acc} + end + + @doc """ + Checks the variable is a quoted direction at compilation time or + delegate the check to runtime for interpolation. + """ + def quoted_dir!(kind, {:^, _, [expr]}), + do: quote(do: Ecto.Query.Builder.OrderBy.dir!(unquote(kind), unquote(expr))) + def quoted_dir!(_kind, dir) when dir in @directions, + do: dir + def quoted_dir!(kind, other) do + Builder.error!( + "expected #{Enum.map_join(@directions, ", ", &inspect/1)} or interpolated value " <> + "in `#{kind}`, got: `#{inspect other}`" + ) + end + + @doc """ + Called by at runtime to verify the direction. + """ + def dir!(_kind, dir) when dir in @directions, + do: dir + + def dir!(kind, other) do + raise ArgumentError, + "expected one of #{Enum.map_join(@directions, ", ", &inspect/1)} " <> + "in `#{kind}`, got: `#{inspect other}`" + end + + @doc """ + Called at runtime to verify a field. + """ + def field!(_kind, field) when is_atom(field) do + to_field(field) + end + + def field!(kind, %Ecto.Query.DynamicExpr{} = dynamic_expression) do + raise ArgumentError, + "expected a field as an atom in `#{kind}`, got: `#{inspect dynamic_expression}`. " <> + "To use dynamic expressions, you need to interpolate at root level, as in: " <> + "`^[asc: dynamic, desc: :id]`" + end + + def field!(kind, other) do + raise ArgumentError, "expected a field as an atom in `#{kind}`, got: `#{inspect other}`" + end + + defp to_field(field), do: {{:., [], [{:&, [], [0]}, field]}, [], []} + + @doc """ + Shared between order_by and distinct. + """ + def order_by_or_distinct!(kind, query, exprs, params) do + {expr, {params, _}} = + Enum.map_reduce(List.wrap(exprs), {params, length(params)}, fn + {dir, expr}, params_count when dir in @directions -> + {expr, params} = dynamic_or_field!(kind, expr, query, params_count) + {{dir, expr}, params} + expr, params_count -> + {expr, params} = dynamic_or_field!(kind, expr, query, params_count) + {{:asc, expr}, params} + end) + + {expr, params} + end + + @doc """ + Called at runtime to assemble order_by. + """ + def order_by!(query, exprs, file, line) do + {expr, params} = order_by_or_distinct!(:order_by, query, exprs, []) + expr = %Ecto.Query.QueryExpr{expr: expr, params: Enum.reverse(params), line: line, file: file} + apply(query, expr) + end + + defp dynamic_or_field!(kind, %Ecto.Query.DynamicExpr{} = dynamic, query, {params, count}) do + {expr, params, count} = Builder.Dynamic.partially_expand(kind, query, dynamic, params, count) + {expr, {params, count}} + end + + defp dynamic_or_field!(_kind, field, _query, params_count) when is_atom(field) do + {to_field(field), params_count} + end + + defp dynamic_or_field!(kind, other, _query, _params_count) do + raise ArgumentError, + "`#{kind}` interpolated on root expects a field or a keyword list " <> + "with the direction as keys and fields or dynamics as values, got: `#{inspect other}`" + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.OrderBy.order_by!(unquote(query), unquote(var), unquote(env.file), unquote(env.line)) + end + end + + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, _}} = escape(:order_by, expr, {[], :acc}, binding, env) + params = Builder.escape_params(params) + + order_by = quote do: %Ecto.Query.QueryExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line)} + Builder.apply_query(query, __MODULE__, [order_by], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{order_bys: order_bys} = query, expr) do + %{query | order_bys: order_bys ++ [expr]} + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/preload.ex b/deps/ecto/lib/ecto/query/builder/preload.ex new file mode 100644 index 0000000..924864c --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/preload.ex @@ -0,0 +1,159 @@ +import Kernel, except: [apply: 3] + +defmodule Ecto.Query.Builder.Preload do + @moduledoc false + alias Ecto.Query.Builder + + @doc """ + Escapes a preload. + + A preload may be an atom, a list of atoms or a keyword list + nested as a rose tree. + + iex> escape(:foo, []) + {[:foo], []} + + iex> escape([foo: :bar], []) + {[foo: [:bar]], []} + + iex> escape([:foo, :bar], []) + {[:foo, :bar], []} + + iex> escape([foo: [:bar, bar: :bat]], []) + {[foo: [:bar, bar: [:bat]]], []} + + iex> escape([foo: {:^, [], ["external"]}], []) + {[foo: "external"], []} + + iex> escape([foo: [:bar, {:^, [], ["external"]}], baz: :bat], []) + {[foo: [:bar, "external"], baz: [:bat]], []} + + iex> escape([foo: {:c, [], nil}], [c: 1]) + {[], [foo: {1, []}]} + + iex> escape([foo: {{:c, [], nil}, bar: {:l, [], nil}}], [c: 1, l: 2]) + {[], [foo: {1, [bar: {2, []}]}]} + + iex> escape([foo: {:c, [], nil}, bar: {:l, [], nil}], [c: 1, l: 2]) + {[], [foo: {1, []}, bar: {2, []}]} + + iex> escape([foo: {{:c, [], nil}, :bar}], [c: 1]) + {[foo: [:bar]], [foo: {1, []}]} + + iex> escape([foo: [bar: {:c, [], nil}]], [c: 1]) + ** (Ecto.Query.CompileError) cannot preload join association `:bar` with binding `c` because parent preload is not a join association + + """ + @spec escape(Macro.t, Keyword.t) :: {[Macro.t], [Macro.t]} + def escape(preloads, vars) do + {preloads, assocs} = escape(preloads, :both, [], [], vars) + {Enum.reverse(preloads), Enum.reverse(assocs)} + end + + defp escape(atom, _mode, preloads, assocs, _vars) when is_atom(atom) do + {[atom|preloads], assocs} + end + + defp escape(list, mode, preloads, assocs, vars) when is_list(list) do + Enum.reduce list, {preloads, assocs}, fn item, acc -> + escape_each(item, mode, acc, vars) + end + end + + defp escape({:^, _, [inner]}, _mode, preloads, assocs, _vars) do + {[inner|preloads], assocs} + end + + defp escape(other, _mode, _preloads, _assocs, _vars) do + Builder.error! "`#{Macro.to_string other}` is not a valid preload expression. " <> + "preload expects an atom, a list of atoms or a keyword list with " <> + "more preloads as values. Use ^ on the outermost preload to interpolate a value" + end + + defp escape_each({key, {:^, _, [inner]}}, _mode, {preloads, assocs}, _vars) do + key = escape_key(key) + {[{key, inner}|preloads], assocs} + end + + defp escape_each({key, {var, _, context}}, mode, {preloads, assocs}, vars) when is_atom(context) do + assert_assoc!(mode, key, var) + key = escape_key(key) + idx = Builder.find_var!(var, vars) + {preloads, [{key, {idx, []}}|assocs]} + end + + defp escape_each({key, {{var, _, context}, list}}, mode, {preloads, assocs}, vars) when is_atom(context) do + assert_assoc!(mode, key, var) + key = escape_key(key) + idx = Builder.find_var!(var, vars) + {inner_preloads, inner_assocs} = escape(list, :assoc, [], [], vars) + assocs = [{key, {idx, Enum.reverse(inner_assocs)}}|assocs] + case inner_preloads do + [] -> {preloads, assocs} + _ -> {[{key, Enum.reverse(inner_preloads)}|preloads], assocs} + end + end + + defp escape_each({key, list}, _mode, {preloads, assocs}, vars) do + key = escape_key(key) + {inner_preloads, []} = escape(list, :preload, [], [], vars) + {[{key, Enum.reverse(inner_preloads)}|preloads], assocs} + end + + defp escape_each(other, mode, {preloads, assocs}, vars) do + escape(other, mode, preloads, assocs, vars) + end + + defp escape_key(atom) when is_atom(atom) do + atom + end + + defp escape_key({:^, _, [expr]}) do + quote(do: Ecto.Query.Builder.Preload.key!(unquote(expr))) + end + + defp escape_key(other) do + Builder.error! "malformed key in preload `#{Macro.to_string(other)}` in query expression" + end + + defp assert_assoc!(mode, _atom, _var) when mode in [:both, :assoc], do: :ok + defp assert_assoc!(_mode, atom, var) do + Builder.error! "cannot preload join association `#{Macro.to_string atom}` with binding `#{var}` " <> + "because parent preload is not a join association" + end + + @doc """ + Called at runtime to check dynamic preload keys. + """ + def key!(key) when is_atom(key), + do: key + def key!(key) do + raise ArgumentError, + "expected key in preload to be an atom, got: `#{inspect key}`" + end + + @doc """ + Applies the preloaded value into the query. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {preloads, assocs} = escape(expr, binding) + Builder.apply_query(query, __MODULE__, [Enum.reverse(preloads), Enum.reverse(assocs)], env) + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term, term) :: Ecto.Query.t + def apply(%Ecto.Query{preloads: p, assocs: a} = query, preloads, assocs) do + %{query | preloads: p ++ preloads, assocs: a ++ assocs} + end + def apply(query, preloads, assocs) do + apply(Ecto.Queryable.to_query(query), preloads, assocs) + end +end diff --git a/deps/ecto/lib/ecto/query/builder/select.ex b/deps/ecto/lib/ecto/query/builder/select.ex new file mode 100644 index 0000000..d6e6d55 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/select.ex @@ -0,0 +1,396 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Select do + @moduledoc false + + alias Ecto.Query.Builder + + @doc """ + Escapes a select. + + It allows tuples, lists and variables at the top level. Inside the + tuples and lists query expressions are allowed. + + ## Examples + + iex> escape({1, 2}, [], __ENV__) + {{:{}, [], [:{}, [], [1, 2]]}, {[], %{}}} + + iex> escape([1, 2], [], __ENV__) + {[1, 2], {[], %{}}} + + iex> escape(quote(do: x), [x: 0], __ENV__) + {{:{}, [], [:&, [], [0]]}, {[], %{}}} + + """ + @spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, {list, %{}}} + def escape(atom, _vars, _env) + when is_atom(atom) and not is_boolean(atom) and atom != nil do + Builder.error! """ + #{inspect(atom)} is not a valid query expression, :select expects a query expression or a list of fields + """ + end + + def escape(other, vars, env) do + cond do + take?(other) -> + {{:{}, [], [:&, [], [0]]}, {[], %{0 => {:any, Macro.expand(other, env)}}}} + + maybe_take?(other) -> + Builder.error! """ + Cannot mix fields with interpolations, such as: `select: [:foo, ^:bar, :baz]`. \ + Instead interpolate all fields at once, such as: `select: ^[:foo, :bar, :baz]`. \ + Got: #{Macro.to_string(other)}. + """ + + true -> + escape(other, {[], %{}}, vars, env) + end + end + + # Tuple + defp escape({left, right}, params_take, vars, env) do + escape({:{}, [], [left, right]}, params_take, vars, env) + end + + # Tuple + defp escape({:{}, _, list}, params_take, vars, env) do + {list, params_take} = Enum.map_reduce(list, params_take, &escape(&1, &2, vars, env)) + expr = {:{}, [], [:{}, [], list]} + {expr, params_take} + end + + # Struct + defp escape({:%, _, [name, map]}, params_take, vars, env) do + name = Macro.expand(name, env) + {escaped_map, params_take} = escape(map, params_take, vars, env) + {{:{}, [], [:%, [], [name, escaped_map]]}, params_take} + end + + # Map + defp escape({:%{}, _, [{:|, _, [data, pairs]}]}, params_take, vars, env) do + {data, params_take} = escape(data, params_take, vars, env) + {pairs, params_take} = escape_pairs(pairs, params_take, vars, env) + {{:{}, [], [:%{}, [], [{:{}, [], [:|, [], [data, pairs]]}]]}, params_take} + end + + # Merge + defp escape({:merge, _, [left, {kind, _, _} = right]}, params_take, vars, env) + when kind in [:%{}, :map] do + {left, params_take} = escape(left, params_take, vars, env) + {right, params_take} = escape(right, params_take, vars, env) + {{:{}, [], [:merge, [], [left, right]]}, params_take} + end + + defp escape({:merge, _, [_left, right]}, _params_take, _vars, _env) do + Builder.error! "expected the second argument of merge/2 in select to be a map, got: `#{Macro.to_string(right)}`" + end + + # Map + defp escape({:%{}, _, pairs}, params_take, vars, env) do + {pairs, params_take} = escape_pairs(pairs, params_take, vars, env) + {{:{}, [], [:%{}, [], pairs]}, params_take} + end + + # List + defp escape(list, params_take, vars, env) when is_list(list) do + Enum.map_reduce(list, params_take, &escape(&1, &2, vars, env)) + end + + # map/struct(var, [:foo, :bar]) + defp escape({tag, _, [{var, _, context}, fields]}, {params, take}, vars, env) + when tag in [:map, :struct] and is_atom(var) and is_atom(context) do + taken = escape_fields(fields, tag, env) + expr = Builder.escape_var!(var, vars) + take = add_take(take, Builder.find_var!(var, vars), {tag, taken}) + {expr, {params, take}} + end + + defp escape(expr, params_take, vars, env) do + Builder.escape(expr, :any, params_take, vars, {env, &escape_expansion/5}) + end + + defp escape_expansion(expr, _type, params_take, vars, env) do + escape(expr, params_take, vars, env) + end + + defp escape_pairs(pairs, params_take, vars, env) do + Enum.map_reduce pairs, params_take, fn({k, v}, acc) -> + {k, acc} = escape_key(k, acc, vars, env) + {v, acc} = escape(v, acc, vars, env) + {{k, v}, acc} + end + end + + defp escape_key(k, params_take, _vars, _env) when is_atom(k) do + {k, params_take} + end + defp escape_key(k, params_take, vars, env) do + escape(k, params_take, vars, env) + end + + defp escape_fields({:^, _, [interpolated]}, tag, _env) do + quote do + Ecto.Query.Builder.Select.fields!(unquote(tag), unquote(interpolated)) + end + end + defp escape_fields(expr, tag, env) do + case Macro.expand(expr, env) do + fields when is_list(fields) -> + fields + _ -> + Builder.error! "`#{tag}/2` in `select` expects either a literal or " <> + "an interpolated list of atom fields" + end + end + + @doc """ + Called at runtime to verify a field. + """ + def fields!(tag, fields) do + if take?(fields) do + fields + else + raise ArgumentError, + "expected a list of fields in `#{tag}/2` inside `select`, got: `#{inspect fields}`" + end + end + + # atom list sigils + defp take?({name, _, [_, modifiers]}) when name in ~w(sigil_w sigil_W)a do + ?a in modifiers + end + + defp take?(fields) do + is_list(fields) and Enum.all?(fields, fn + {k, v} when is_atom(k) -> take?(List.wrap(v)) + k when is_atom(k) -> true + _ -> false + end) + end + + defp maybe_take?(fields) do + is_list(fields) and Enum.any?(fields, fn + {k, v} when is_atom(k) -> maybe_take?(List.wrap(v)) + k when is_atom(k) -> true + _ -> false + end) + end + + @doc """ + Called at runtime for interpolated/dynamic selects. + """ + def select!(kind, query, fields, file, line) do + take = %{0 => {:any, fields!(:select, fields)}} + expr = %Ecto.Query.SelectExpr{expr: {:&, [], [0]}, take: take, file: file, line: line} + if kind == :select do + apply(query, expr) + else + merge(query, expr) + end + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(:select | :merge, Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + + def build(kind, query, _binding, {:^, _, [var]}, env) do + quote do + Ecto.Query.Builder.Select.select!(unquote(kind), unquote(query), unquote(var), + unquote(env.file), unquote(env.line)) + end + end + + def build(kind, query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {expr, {params, take}} = escape(expr, binding, env) + params = Builder.escape_params(params) + take = {:%{}, [], Map.to_list(take)} + + select = quote do: %Ecto.Query.SelectExpr{ + expr: unquote(expr), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line), + take: unquote(take)} + + if kind == :select do + Builder.apply_query(query, __MODULE__, [select], env) + else + quote do + query = unquote(query) + Builder.Select.merge(query, unquote(select)) + end + end + end + + @doc """ + The callback applied by `build/5` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{select: nil} = query, expr) do + %{query | select: expr} + end + def apply(%Ecto.Query{}, _expr) do + Builder.error! "only one select expression is allowed in query" + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end + + @doc """ + The callback applied by `build/5` when merging. + """ + def merge(%Ecto.Query{select: nil} = query, new_select) do + merge(query, new_select, {:&, [], [0]}, [], %{}, new_select) + end + def merge(%Ecto.Query{select: old_select} = query, new_select) do + %{expr: old_expr, params: old_params, take: old_take} = old_select + merge(query, old_select, old_expr, old_params, old_take, new_select) + end + def merge(query, expr) do + merge(Ecto.Queryable.to_query(query), expr) + end + + defp merge(query, select, old_expr, old_params, old_take, new_select) do + %{expr: new_expr, params: new_params, take: new_take} = new_select + new_expr = Ecto.Query.Builder.bump_interpolations(new_expr, old_params) + + expr = + case {classify_merge(old_expr, old_take), classify_merge(new_expr, new_take)} do + {_, _} when old_expr == new_expr -> + new_expr + + {{:source, meta, ix}, {:source, _, ix}} -> + {:&, meta, [ix]} + + {{:struct, meta, name, old_fields}, {:map, _, new_fields}} when old_params == [] -> + cond do + new_fields == [] -> + old_expr + + Keyword.keyword?(old_fields) and Keyword.keyword?(new_fields) -> + {:%, meta, [name, {:%{}, meta, Keyword.merge(old_fields, new_fields)}]} + + true -> + {:merge, [], [old_expr, new_expr]} + end + + {{:map, meta, old_fields}, {:map, _, new_fields}} when old_params == [] -> + cond do + old_fields == [] -> + new_expr + + new_fields == [] -> + old_expr + + Keyword.keyword?(old_fields) and Keyword.keyword?(new_fields) -> + {:%{}, meta, Keyword.merge(old_fields, new_fields)} + + true -> + {:merge, [], [old_expr, new_expr]} + end + + {_, {:map, _, _}} -> + {:merge, [], [old_expr, new_expr]} + + {_, _} -> + message = """ + cannot select_merge #{merge_argument_to_error(new_expr, query)} into \ + #{merge_argument_to_error(old_expr, query)}, those select expressions \ + are incompatible. You can only select_merge: + + * a source (such as post) with another source (of the same type) + * a source (such as post) with a map + * a struct with a map + * a map with a map + + Incompatible merge found + """ + + raise Ecto.QueryError, query: query, message: message + end + + select = %{ + select | expr: expr, + params: old_params ++ new_params, + take: merge_take(old_expr, old_take, new_take) + } + + %{query | select: select} + end + + defp classify_merge({:&, meta, [ix]}, take) when is_integer(ix) do + case take do + %{^ix => {:map, _}} -> {:map, meta, :runtime} + _ -> {:source, meta, ix} + end + end + + defp classify_merge({:%, meta, [name, {:%{}, _, fields}]}, _take) + when fields == [] or tuple_size(hd(fields)) == 2 do + {:struct, meta, name, fields} + end + + defp classify_merge({:%{}, meta, fields}, _take) + when fields == [] or tuple_size(hd(fields)) == 2 do + {:map, meta, fields} + end + + defp classify_merge({:%{}, meta, _}, _take) do + {:map, meta, :runtime} + end + + defp classify_merge(_, _take) do + :error + end + + defp merge_argument_to_error({:&, _, [0]}, %{from: %{source: {source, alias}}}) do + "source #{inspect(source || alias)}" + end + + defp merge_argument_to_error({:&, _, [ix]}, _query) do + "join (at position #{ix})" + end + + defp merge_argument_to_error(other, _query) do + Macro.to_string(other) + end + + defp add_take(take, key, value) do + Map.update(take, key, value, &merge_take_kind_and_fields(key, &1, value)) + end + + defp merge_take(old_expr, %{} = old_take, %{} = new_take) do + Enum.reduce(new_take, old_take, fn {binding, new_value}, acc -> + case acc do + %{^binding => old_value} -> + Map.put(acc, binding, merge_take_kind_and_fields(binding, old_value, new_value)) + + %{} -> + # If the binding is a not filtered source, merge shouldn't restrict it + case old_expr do + {:&, _, [^binding]} -> acc + _ -> Map.put(acc, binding, new_value) + end + end + end) + end + + defp merge_take_kind_and_fields(binding, {old_kind, old_fields}, {new_kind, new_fields}) do + {merge_take_kind(binding, old_kind, new_kind), Enum.uniq(old_fields ++ new_fields)} + end + + defp merge_take_kind(_, kind, kind), do: kind + defp merge_take_kind(_, :any, kind), do: kind + defp merge_take_kind(_, kind, :any), do: kind + defp merge_take_kind(binding, old, new) do + Builder.error! "cannot select_merge because the binding at position #{binding} " <> + "was previously specified as a `#{old}` and later as `#{new}`" + end +end diff --git a/deps/ecto/lib/ecto/query/builder/update.ex b/deps/ecto/lib/ecto/query/builder/update.ex new file mode 100644 index 0000000..8e970de --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/update.ex @@ -0,0 +1,200 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Update do + @moduledoc false + + @keys [:set, :inc, :push, :pull] + alias Ecto.Query.Builder + + @doc """ + Escapes a list of quoted expressions. + + iex> escape([], [], __ENV__) + {[], [], []} + + iex> escape([set: []], [], __ENV__) + {[], [], []} + + iex> escape(quote(do: ^[set: []]), [], __ENV__) + {[], [set: []], []} + + iex> escape(quote(do: [set: ^[foo: 1]]), [], __ENV__) + {[], [set: [foo: 1]], []} + + iex> escape(quote(do: [set: [foo: ^1]]), [], __ENV__) + {[], [set: [foo: 1]], []} + + """ + @spec escape(Macro.t, Keyword.t, Macro.Env.t) :: {Macro.t, Macro.t, list} + def escape(expr, vars, env) when is_list(expr) do + escape_op(expr, [], [], [], vars, env) + end + + def escape({:^, _, [v]}, _vars, _env) do + {[], v, []} + end + + def escape(expr, _vars, _env) do + compile_error!(expr) + end + + defp escape_op([{k, v}|t], compile, runtime, params, vars, env) when is_atom(k) and is_list(v) do + validate_op!(k) + {compile_values, runtime_values, params} = escape_kw(k, v, params, vars, env) + compile = + if compile_values == [], do: compile, else: [{k, Enum.reverse(compile_values)} | compile] + runtime = + if runtime_values == [], do: runtime, else: [{k, Enum.reverse(runtime_values)} | runtime] + escape_op(t, compile, runtime, params, vars, env) + end + + defp escape_op([{k, {:^, _, [v]}}|t], compile, runtime, params, vars, env) when is_atom(k) do + validate_op!(k) + escape_op(t, compile, [{k, v}|runtime], params, vars, env) + end + + defp escape_op([], compile, runtime, params, _vars, _env) do + {Enum.reverse(compile), Enum.reverse(runtime), params} + end + + defp escape_op(expr, _compile, _runtime, _params, _vars, _env) do + compile_error!(expr) + end + + defp escape_kw(op, kw, params, vars, env) do + Enum.reduce kw, {[], [], params}, fn + {k, {:^, _, [v]}}, {compile, runtime, params} when is_atom(k) -> + {compile, [{k, v} | runtime], params} + {k, v}, {compile, runtime, params} -> + k = escape_field!(k) + {v, {params, :acc}} = Builder.escape(v, type_for_key(op, {0, k}), {params, :acc}, vars, env) + {[{k, v} | compile], runtime, params} + _, _acc -> + Builder.error! "malformed #{inspect op} in update `#{Macro.to_string(kw)}`, " <> + "expected a keyword list" + end + end + + defp escape_field!({:^, _, [k]}), do: quote(do: Ecto.Query.Builder.Update.field!(unquote(k))) + defp escape_field!(k) when is_atom(k), do: k + + defp escape_field!(k) do + Builder.error!( + "expected an atom field or an interpolated field in `update`, got `#{inspect(k)}`" + ) + end + + def field!(field) when is_atom(field), do: field + + def field!(other) do + raise ArgumentError, "expected a field as an atom in `update`, got: `#{inspect other}`" + end + + defp compile_error!(expr) do + Builder.error! "malformed update `#{Macro.to_string(expr)}` in query expression, " <> + "expected a keyword list with set/push/pop as keys with field-value " <> + "pairs as values" + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Macro.t, Macro.Env.t) :: Macro.t + def build(query, binding, expr, env) do + {query, binding} = Builder.escape_binding(query, binding, env) + {compile, runtime, params} = escape(expr, binding, env) + + query = + if compile == [] do + query + else + params = Builder.escape_params(params) + + update = quote do + %Ecto.Query.QueryExpr{expr: unquote(compile), params: unquote(params), + file: unquote(env.file), line: unquote(env.line)} + end + + Builder.apply_query(query, __MODULE__, [update], env) + end + + if runtime == [] do + query + else + quote do + Ecto.Query.Builder.Update.update!(unquote(query), unquote(runtime), + unquote(env.file), unquote(env.line)) + end + end + end + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, term) :: Ecto.Query.t + def apply(%Ecto.Query{updates: updates} = query, expr) do + %{query | updates: updates ++ [expr]} + end + def apply(query, expr) do + apply(Ecto.Queryable.to_query(query), expr) + end + + @doc """ + If there are interpolated updates at compile time, + we need to handle them at runtime. We do such in + this callback. + """ + def update!(query, runtime, file, line) when is_list(runtime) do + {runtime, {params, _count}} = + Enum.map_reduce runtime, {[], 0}, fn + {k, v}, acc when is_atom(k) and is_list(v) -> + validate_op!(k) + {v, params} = runtime_field!(query, k, v, acc) + {{k, v}, params} + _, _ -> + runtime_error!(runtime) + end + + expr = %Ecto.Query.QueryExpr{expr: runtime, params: Enum.reverse(params), + file: file, line: line} + + apply(query, expr) + end + + def update!(_query, runtime, _file, _line) do + runtime_error!(runtime) + end + + defp runtime_field!(query, key, kw, acc) do + Enum.map_reduce kw, acc, fn + {k, %Ecto.Query.DynamicExpr{} = v}, {params, count} when is_atom(k) -> + {v, params, count} = Ecto.Query.Builder.Dynamic.partially_expand(:update, query, v, params, count) + {{k, v}, {params, count}} + {k, v}, {params, count} when is_atom(k) -> + params = [{v, type_for_key(key, {0, k})} | params] + {{k, {:^, [], [count]}}, {params, count + 1}} + _, _acc -> + raise ArgumentError, "malformed #{inspect key} in update `#{inspect(kw)}`, " <> + "expected a keyword list" + end + end + + defp runtime_error!(value) do + raise ArgumentError, + "malformed update `#{inspect(value)}` in query expression, " <> + "expected a keyword list with set/push/pop as keys with field-value pairs as values" + end + + defp validate_op!(key) when key in @keys, do: :ok + defp validate_op!(key), do: Builder.error! "unknown key `#{inspect(key)}` in update" + + # Out means the given type must be taken out of an array + # It is the opposite of "left in right" in the query API. + defp type_for_key(:push, type), do: {:out, type} + defp type_for_key(:pull, type), do: {:out, type} + defp type_for_key(_, type), do: type +end diff --git a/deps/ecto/lib/ecto/query/builder/windows.ex b/deps/ecto/lib/ecto/query/builder/windows.ex new file mode 100644 index 0000000..13f1891 --- /dev/null +++ b/deps/ecto/lib/ecto/query/builder/windows.ex @@ -0,0 +1,201 @@ +import Kernel, except: [apply: 2] + +defmodule Ecto.Query.Builder.Windows do + @moduledoc false + + alias Ecto.Query.Builder + alias Ecto.Query.Builder.{GroupBy, OrderBy} + @sort_order [:partition_by, :order_by, :frame] + + @doc """ + Escapes a window params. + + ## Examples + + iex> escape(quote do [order_by: [desc: 13]] end, {[], :acc}, [x: 0], __ENV__) + {[order_by: [desc: 13]], [], {[], :acc}} + + """ + @spec escape([Macro.t], {list, term}, Keyword.t, Macro.Env.t | {Macro.Env.t, fun}) + :: {Macro.t, [{atom, term}], {list, term}} + def escape(kw, params_acc, vars, env) when is_list(kw) do + {compile, runtime} = sort(@sort_order, kw, :compile, [], []) + {compile, params_acc} = Enum.map_reduce(compile, params_acc, &escape_compile(&1, &2, vars, env)) + {compile, runtime, params_acc} + end + + def escape(kw, _params_acc, _vars, _env) do + error!(kw) + end + + defp sort([key | keys], kw, mode, compile, runtime) do + case Keyword.pop(kw, key) do + {nil, kw} -> + sort(keys, kw, mode, compile, runtime) + + {{:^, _, [var]}, kw} -> + sort(keys, kw, :runtime, compile, [{key, var} | runtime]) + + {_, _} when mode == :runtime -> + [{runtime_key, _} | _] = runtime + raise ArgumentError, "window has an interpolated value under `#{runtime_key}` " <> + "and therefore `#{key}` must also be interpolated" + + {expr, kw} -> + sort(keys, kw, mode, [{key, expr} | compile], runtime) + end + end + + defp sort([], [], _mode, compile, runtime) do + {Enum.reverse(compile), Enum.reverse(runtime)} + end + + defp sort([], kw, _mode, _compile, _runtime) do + error!(kw) + end + + defp escape_compile({:partition_by, fields}, params_acc, vars, env) do + {fields, params_acc} = GroupBy.escape(:partition_by, fields, params_acc, vars, env) + {{:partition_by, fields}, params_acc} + end + + defp escape_compile({:order_by, fields}, params_acc, vars, env) do + {fields, params_acc} = OrderBy.escape(:order_by, fields, params_acc, vars, env) + {{:order_by, fields}, params_acc} + end + + defp escape_compile({:frame, frame_clause}, params_acc, vars, env) do + {frame_clause, params_acc} = escape_frame(frame_clause, params_acc, vars, env) + {{:frame, frame_clause}, params_acc} + end + + defp escape_frame({:fragment, _, _} = fragment, params_acc, vars, env) do + Builder.escape(fragment, :any, params_acc, vars, env) + end + defp escape_frame(other, _, _, _) do + Builder.error!("expected a dynamic or fragment in `:frame`, got: `#{inspect other}`") + end + + defp error!(other) do + Builder.error!( + "expected window definition to be a keyword list " <> + "with partition_by, order_by or frame as keys, got: `#{inspect other}`" + ) + end + + @doc """ + Builds a quoted expression. + + The quoted expression should evaluate to a query at runtime. + If possible, it does all calculations at compile time to avoid + runtime work. + """ + @spec build(Macro.t, [Macro.t], Keyword.t, Macro.Env.t) :: Macro.t + def build(query, binding, windows, env) when is_list(windows) do + {query, binding} = Builder.escape_binding(query, binding, env) + + {compile, runtime} = + windows + |> Enum.map(&escape_window(binding, &1, env)) + |> Enum.split_with(&elem(&1, 2) == []) + + compile = Enum.map(compile, &build_compile_window(&1, env)) + runtime = Enum.map(runtime, &build_runtime_window(&1, env)) + query = Builder.apply_query(query, __MODULE__, [compile], env) + + if runtime == [] do + query + else + quote do + Ecto.Query.Builder.Windows.runtime!( + unquote(query), + unquote(runtime), + unquote(env.file), + unquote(env.line) + ) + end + end + end + + def build(_, _, windows, _) do + Builder.error!( + "expected window definitions to be a keyword list with window names as keys and " <> + "a keyword list with the window definition as value, got: `#{inspect windows}`" + ) + end + + defp escape_window(vars, {name, expr}, env) do + {compile_acc, runtime_acc, {params, _}} = escape(expr, {[], :acc}, vars, env) + {name, compile_acc, runtime_acc, Builder.escape_params(params)} + end + + defp build_compile_window({name, compile_acc, _, params}, env) do + {name, + quote do + %Ecto.Query.QueryExpr{ + expr: unquote(compile_acc), + params: unquote(params), + file: unquote(env.file), + line: unquote(env.line) + } + end} + end + + defp build_runtime_window({name, compile_acc, runtime_acc, params}, _env) do + {:{}, [], [name, Enum.reverse(compile_acc), runtime_acc, Enum.reverse(params)]} + end + + @doc """ + Invoked for runtime windows. + """ + def runtime!(query, runtime, file, line) do + windows = + Enum.map(runtime, fn {name, compile_acc, runtime_acc, params} -> + {acc, params} = do_runtime_window!(runtime_acc, query, compile_acc, params) + expr = %Ecto.Query.QueryExpr{expr: Enum.reverse(acc), params: Enum.reverse(params), file: file, line: line} + {name, expr} + end) + + apply(query, windows) + end + + defp do_runtime_window!([{:order_by, order_by} | kw], query, acc, params) do + {order_by, params} = OrderBy.order_by_or_distinct!(:order_by, query, order_by, params) + do_runtime_window!(kw, query, [{:order_by, order_by} | acc], params) + end + + defp do_runtime_window!([{:partition_by, partition_by} | kw], query, acc, params) do + {partition_by, params} = GroupBy.group_or_partition_by!(:partition_by, query, partition_by, params) + do_runtime_window!(kw, query, [{:partition_by, partition_by} | acc], params) + end + + defp do_runtime_window!([{:frame, frame} | kw], query, acc, params) do + case frame do + %Ecto.Query.DynamicExpr{} -> + {frame, params, _count} = Builder.Dynamic.partially_expand(:windows, query, frame, params, length(params)) + do_runtime_window!(kw, query, [{:frame, frame} | acc], params) + + _ -> + raise ArgumentError, + "expected a dynamic or fragment in `:frame`, got: `#{inspect frame}`" + end + end + + defp do_runtime_window!([], _query, acc, params), do: {acc, params} + + @doc """ + The callback applied by `build/4` to build the query. + """ + @spec apply(Ecto.Queryable.t, Keyword.t) :: Ecto.Query.t + def apply(%Ecto.Query{windows: windows} = query, definitions) do + merged = Keyword.merge(windows, definitions, fn name, _, _ -> + Builder.error! "window with name #{name} is already defined" + end) + + %{query | windows: merged} + end + + def apply(query, definitions) do + apply(Ecto.Queryable.to_query(query), definitions) + end +end diff --git a/deps/ecto/lib/ecto/query/inspect.ex b/deps/ecto/lib/ecto/query/inspect.ex new file mode 100644 index 0000000..1c261ee --- /dev/null +++ b/deps/ecto/lib/ecto/query/inspect.ex @@ -0,0 +1,423 @@ +import Inspect.Algebra +import Kernel, except: [to_string: 1] + +alias Ecto.Query.{DynamicExpr, JoinExpr, QueryExpr, WithExpr} + +defimpl Inspect, for: Ecto.Query.DynamicExpr do + def inspect(%DynamicExpr{binding: binding} = dynamic, opts) do + joins = + binding + |> Enum.drop(1) + |> Enum.with_index() + |> Enum.map(&%JoinExpr{ix: &1}) + + aliases = + for({as, _} when is_atom(as) <- binding, do: as) + |> Enum.with_index() + |> Map.new + + query = %Ecto.Query{joins: joins, aliases: aliases} + + {expr, binding, params, subqueries, _, _} = + Ecto.Query.Builder.Dynamic.fully_expand(query, dynamic) + + names = + Enum.map(binding, fn + {_, {name, _, _}} -> name + {name, _, _} -> name + end) + + query_expr = %{expr: expr, params: params, subqueries: subqueries} + inspected = Inspect.Ecto.Query.expr(expr, List.to_tuple(names), query_expr) + + container_doc("dynamic(", [Macro.to_string(binding), inspected], ")", opts, fn str, _ -> + str + end) + end +end + +defimpl Inspect, for: Ecto.Query do + @doc false + def inspect(query, opts) do + list = + Enum.map(to_list(query), fn + {key, string} -> + concat(Atom.to_string(key) <> ": ", string) + + string -> + string + end) + + result = container_doc("#Ecto.Query<", list, ">", opts, fn str, _ -> str end) + + case query.with_ctes do + %WithExpr{recursive: recursive, queries: [_ | _] = queries} -> + with_ctes = + Enum.map(queries, fn {name, query} -> + cte = case query do + %Ecto.Query{} -> __MODULE__.inspect(query, opts) + %Ecto.Query.QueryExpr{} -> expr(query, {}) + end + + concat(["|> with_cte(\"" <> name <> "\", as: ", cte, ")"]) + end) + + result = if recursive, do: glue(result, "\n", "|> recursive_ctes(true)"), else: result + [result | with_ctes] |> Enum.intersperse(break("\n")) |> concat() + + _ -> + result + end + end + + @doc false + def to_string(query) do + Enum.map_join(to_list(query), ",\n ", fn + {key, string} -> + Atom.to_string(key) <> ": " <> string + + string -> + string + end) + end + + defp to_list(query) do + names = + query + |> collect_sources() + |> generate_letters() + |> generate_names() + |> List.to_tuple() + + from = bound_from(query.from, elem(names, 0)) + joins = joins(query.joins, names) + preloads = preloads(query.preloads) + assocs = assocs(query.assocs, names) + windows = windows(query.windows, names) + combinations = combinations(query.combinations) + + wheres = bool_exprs(%{and: :where, or: :or_where}, query.wheres, names) + group_bys = kw_exprs(:group_by, query.group_bys, names) + havings = bool_exprs(%{and: :having, or: :or_having}, query.havings, names) + order_bys = kw_exprs(:order_by, query.order_bys, names) + updates = kw_exprs(:update, query.updates, names) + + lock = kw_inspect(:lock, query.lock) + limit = kw_expr(:limit, query.limit, names) + offset = kw_expr(:offset, query.offset, names) + select = kw_expr(:select, query.select, names) + distinct = kw_expr(:distinct, query.distinct, names) + + Enum.concat([ + from, + joins, + wheres, + group_bys, + havings, + windows, + combinations, + order_bys, + limit, + offset, + lock, + distinct, + updates, + select, + preloads, + assocs + ]) + end + + defp bound_from(nil, name), do: ["from #{name} in query"] + + defp bound_from(%{source: source} = from, name) do + ["from #{name} in #{inspect_source(source)}"] ++ kw_as_and_prefix(from) + end + + defp inspect_source(%Ecto.Query{} = query), do: "^" <> inspect(query) + defp inspect_source(%Ecto.SubQuery{query: query}), do: "subquery(#{to_string(query)})" + defp inspect_source({source, nil}), do: inspect(source) + defp inspect_source({nil, schema}), do: inspect(schema) + + defp inspect_source({source, schema} = from) do + inspect(if source == schema.__schema__(:source), do: schema, else: from) + end + + defp joins(joins, names) do + joins + |> Enum.with_index() + |> Enum.flat_map(fn {expr, ix} -> join(expr, elem(names, expr.ix || ix + 1), names) end) + end + + defp join(%JoinExpr{qual: qual, assoc: {ix, right}, on: on} = join, name, names) do + string = "#{name} in assoc(#{elem(names, ix)}, #{inspect(right)})" + [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ maybe_on(on, names) + end + + defp join( + %JoinExpr{qual: qual, source: {:fragment, _, _} = source, on: on} = join = part, + name, + names + ) do + string = "#{name} in #{expr(source, names, part)}" + [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ [on: expr(on, names)] + end + + defp join(%JoinExpr{qual: qual, source: source, on: on} = join, name, names) do + string = "#{name} in #{inspect_source(source)}" + [{join_qual(qual), string}] ++ kw_as_and_prefix(join) ++ [on: expr(on, names)] + end + + defp maybe_on(%QueryExpr{expr: true}, _names), do: [] + defp maybe_on(%QueryExpr{} = on, names), do: [on: expr(on, names)] + + defp preloads([]), do: [] + defp preloads(preloads), do: [preload: inspect(preloads)] + + defp assocs([], _names), do: [] + defp assocs(assocs, names), do: [preload: expr(assocs(assocs), names, %{})] + + defp assocs(assocs) do + Enum.map(assocs, fn + {field, {idx, []}} -> + {field, {:&, [], [idx]}} + + {field, {idx, children}} -> + {field, {{:&, [], [idx]}, assocs(children)}} + end) + end + + defp windows(windows, names) do + Enum.map(windows, &window(&1, names)) + end + + defp window({name, %{expr: definition} = part}, names) do + {:windows, "[#{name}: " <> expr(definition, names, part) <> "]"} + end + + defp combinations(combinations) do + Enum.map(combinations, fn {key, val} -> {key, "(" <> to_string(val) <> ")"} end) + end + + defp bool_exprs(keys, exprs, names) do + Enum.map(exprs, fn %{expr: expr, op: op} = part -> + {Map.fetch!(keys, op), expr(expr, names, part)} + end) + end + + defp kw_exprs(key, exprs, names) do + Enum.map(exprs, &{key, expr(&1, names)}) + end + + defp kw_expr(_key, nil, _names), do: [] + defp kw_expr(key, expr, names), do: [{key, expr(expr, names)}] + + defp kw_inspect(_key, nil), do: [] + defp kw_inspect(key, val), do: [{key, inspect(val)}] + + defp kw_as_and_prefix(%{as: as, prefix: prefix}) do + kw_inspect(:as, as) ++ kw_inspect(:prefix, prefix) + end + + defp expr(%{expr: expr} = part, names) do + expr(expr, names, part) + end + + @doc false + def expr(expr, names, part) do + expr + |> Macro.traverse(:ok, &{prewalk(&1), &2}, &{postwalk(&1, names, part), &2}) + |> elem(0) + |> macro_to_string() + end + + if Version.match?(System.version(), ">= 1.11.0") do + defp macro_to_string(expr), do: Macro.to_string(expr) + else + defp macro_to_string(expr) do + Macro.to_string(expr, fn + {{:., _, [_, _]}, _, []}, string -> String.replace_suffix(string, "()", "") + _other, string -> string + end) + end + end + + # Tagged values + defp prewalk(%Ecto.Query.Tagged{value: value, tag: nil}) do + value + end + + defp prewalk(%Ecto.Query.Tagged{value: value, tag: {:parameterized, type, opts}}) do + {:type, [], [value, {:{}, [], [:parameterized, type, opts]}]} + end + + defp prewalk(%Ecto.Query.Tagged{value: value, tag: tag}) do + {:type, [], [value, tag]} + end + + defp prewalk({:type, _, [value, {:parameterized, type, opts}]}) do + {:type, [], [value, {:{}, [], [:parameterized, type, opts]}]} + end + + defp prewalk(node) do + node + end + + # Convert variables to proper names + defp postwalk({:&, _, [ix]}, names, part) do + binding_to_expr(ix, names, part) + end + + # Remove parens from field calls + defp postwalk({{:., _, [_, _]} = dot, meta, []}, _names, _part) do + {dot, [no_parens: true] ++ meta, []} + end + + # Interpolated unknown value + defp postwalk({:^, _, [_ix, _len]}, _names, _part) do + {:^, [], [{:..., [], nil}]} + end + + # Interpolated known value + defp postwalk({:^, _, [ix]}, _, %{params: params}) do + value = + case Enum.at(params || [], ix) do + # Wrap the head in a block so it is not treated as a charlist + {[head | tail], _type} -> [{:__block__, [], [head]} | tail] + {value, _type} -> value + _ -> {:..., [], nil} + end + + {:^, [], [value]} + end + + # Types need to be converted back to AST for fields + defp postwalk({:type, meta, [expr, type]}, names, part) do + {:type, meta, [expr, type_to_expr(type, names, part)]} + end + + # For keyword and interpolated fragments use normal escaping + defp postwalk({:fragment, _, [{_, _} | _] = parts}, _names, _part) do + {:fragment, [], unmerge_fragments(parts, "", [])} + end + + # Subqueries + defp postwalk({:subquery, i}, _names, %{subqueries: subqueries}) do + {:subquery, [], [Enum.fetch!(subqueries, i).query]} + end + + # Jason + defp postwalk({:json_extract_path, _, [expr, path]}, _names, _part) do + Enum.reduce(path, expr, fn element, acc -> + {{:., [], [Access, :get]}, [], [acc, element]} + end) + end + + defp postwalk(node, _names, _part) do + node + end + + defp binding_to_expr(ix, names, part) do + case part do + %{take: %{^ix => {:any, fields}}} when ix == 0 -> + fields + + %{take: %{^ix => {tag, fields}}} -> + {tag, [], [binding(names, ix), fields]} + + _ -> + binding(names, ix) + end + end + + defp type_to_expr({:parameterized, type, opts}, _names, _part) do + {:{}, [], [:parameterized, type, opts]} + end + + defp type_to_expr({ix, type}, names, part) when is_integer(ix) do + {{:., [], [binding_to_expr(ix, names, part), type]}, [no_parens: true], []} + end + + defp type_to_expr({composite, type}, names, part) when is_atom(composite) do + {composite, type_to_expr(type, names, part)} + end + + defp type_to_expr(type, _names, _part) do + type + end + + defp unmerge_fragments([{:raw, s}, {:expr, v} | t], frag, args) do + unmerge_fragments(t, frag <> s <> "?", [v | args]) + end + + defp unmerge_fragments([{:raw, s}], frag, args) do + [frag <> s | Enum.reverse(args)] + end + + defp join_qual(:inner), do: :join + defp join_qual(:inner_lateral), do: :join_lateral + defp join_qual(:left), do: :left_join + defp join_qual(:left_lateral), do: :left_join_lateral + defp join_qual(:right), do: :right_join + defp join_qual(:full), do: :full_join + defp join_qual(:cross), do: :cross_join + + defp collect_sources(%{from: nil, joins: joins}) do + ["query" | join_sources(joins)] + end + + defp collect_sources(%{from: %{source: source}, joins: joins}) do + [from_sources(source) | join_sources(joins)] + end + + defp from_sources(%Ecto.SubQuery{query: query}), do: from_sources(query.from.source) + defp from_sources({source, schema}), do: schema || source + defp from_sources(nil), do: "query" + + defp join_sources(joins) do + joins + |> Enum.sort_by(& &1.ix) + |> Enum.map(fn + %JoinExpr{assoc: {_var, assoc}} -> + assoc + + %JoinExpr{source: {:fragment, _, _}} -> + "fragment" + + %JoinExpr{source: %Ecto.Query{from: from}} -> + from_sources(from.source) + + %JoinExpr{source: source} -> + from_sources(source) + end) + end + + defp generate_letters(sources) do + Enum.map(sources, fn source -> + source + |> Kernel.to_string() + |> normalize_source() + |> String.first() + |> String.downcase() + end) + end + + defp generate_names(letters) do + {names, _} = Enum.map_reduce(letters, 0, &{:"#{&1}#{&2}", &2 + 1}) + names + end + + defp binding(names, pos) do + try do + {elem(names, pos), [], nil} + rescue + ArgumentError -> {:"unknown_binding_#{pos}!", [], nil} + end + end + + defp normalize_source("Elixir." <> _ = source), + do: source |> Module.split() |> List.last() + + defp normalize_source(source), + do: source +end diff --git a/deps/ecto/lib/ecto/query/planner.ex b/deps/ecto/lib/ecto/query/planner.ex new file mode 100644 index 0000000..38b40a0 --- /dev/null +++ b/deps/ecto/lib/ecto/query/planner.ex @@ -0,0 +1,1884 @@ +defmodule Ecto.Query.Planner do + # Normalizes a query and its parameters. + @moduledoc false + + alias Ecto.Query.{BooleanExpr, DynamicExpr, FromExpr, JoinExpr, QueryExpr, SelectExpr} + + if map_size(%Ecto.Query{}) != 21 do + raise "Ecto.Query match out of date in builder" + end + + @parent_as __MODULE__ + @aggs ~w(count avg min max sum row_number rank dense_rank percent_rank cume_dist ntile lag lead first_value last_value nth_value)a + + @doc """ + Converts a query to a list of joins. + + The from is moved as last join with the where conditions as its "on" + in order to keep proper binding order. + """ + def query_to_joins(qual, source, %{wheres: wheres, joins: joins}, position) do + on = %QueryExpr{file: __ENV__.file, line: __ENV__.line, expr: true, params: []} + + on = + Enum.reduce(wheres, on, fn %BooleanExpr{op: op, expr: expr, params: params}, acc -> + merge_expr_and_params(op, acc, expr, params) + end) + + join = %JoinExpr{qual: qual, source: source, file: __ENV__.file, line: __ENV__.line, on: on} + last = length(joins) + position + + mapping = fn + 0 -> last + ix -> ix + position - 1 + end + + for {%{on: on} = join, ix} <- Enum.with_index(joins ++ [join]) do + %{join | on: rewrite_sources(on, mapping), ix: ix + position} + end + end + + defp merge_expr_and_params(op, %QueryExpr{expr: left_expr, params: left_params} = struct, + right_expr, right_params) do + right_expr = Ecto.Query.Builder.bump_interpolations(right_expr, left_params) + %{struct | expr: merge_expr(op, left_expr, right_expr), params: left_params ++ right_params} + end + + defp merge_expr(_op, left, true), do: left + defp merge_expr(_op, true, right), do: right + defp merge_expr(op, left, right), do: {op, [], [left, right]} + + @doc """ + Rewrites the given query expression sources using the given mapping. + """ + def rewrite_sources(%{expr: expr, params: params} = part, mapping) do + expr = + Macro.prewalk expr, fn + %Ecto.Query.Tagged{type: type, tag: tag} = tagged -> + %{tagged | type: rewrite_type(type, mapping), tag: rewrite_type(tag, mapping)} + {:&, meta, [ix]} -> + {:&, meta, [mapping.(ix)]} + other -> + other + end + + params = + Enum.map params, fn + {val, type} -> + {val, rewrite_type(type, mapping)} + val -> + val + end + + %{part | expr: expr, params: params} + end + + defp rewrite_type({composite, {ix, field}}, mapping) when is_integer(ix) do + {composite, {mapping.(ix), field}} + end + + defp rewrite_type({ix, field}, mapping) when is_integer(ix) do + {mapping.(ix), field} + end + + defp rewrite_type(other, _mapping) do + other + end + + @doc """ + Define the query cache table. + """ + def new_query_cache(atom_name) do + :ets.new(atom_name || __MODULE__, [:set, :public, read_concurrency: true]) + end + + @doc """ + Plans the query for execution. + + Planning happens in multiple steps: + + 1. First the query is planned by retrieving + its cache key, casting and merging parameters + + 2. Then a cache lookup is done, if the query is + cached, we are done + + 3. If there is no cache, we need to actually + normalize and validate the query, asking the + adapter to prepare it + + 4. The query is sent to the adapter to be generated + + ## Cache + + All entries in the query, except the preload and sources + field, should be part of the cache key. + + The cache value is the compiled query by the adapter + along-side the select expression. + """ + def query(query, operation, cache, adapter, counter) do + {query, params, key} = plan(query, operation, adapter) + query_with_cache(key, query, operation, cache, adapter, counter, params) + end + + defp query_with_cache(key, query, operation, cache, adapter, counter, params) do + case query_lookup(key, query, operation, cache, adapter, counter) do + {_, select, prepared} -> + {build_meta(query, select), {:nocache, prepared}, params} + {_key, :cached, select, cached} -> + update = &cache_update(cache, key, &1) + reset = &cache_reset(cache, key, &1) + {build_meta(query, select), {:cached, update, reset, cached}, params} + {_key, :cache, select, prepared} -> + update = &cache_update(cache, key, &1) + {build_meta(query, select), {:cache, update, prepared}, params} + end + end + + defp query_lookup(:nocache, query, operation, _cache, adapter, counter) do + query_without_cache(query, operation, adapter, counter) + end + + defp query_lookup(key, query, operation, cache, adapter, counter) do + case :ets.lookup(cache, key) do + [term] -> term + [] -> query_prepare(query, operation, adapter, counter, cache, key) + end + end + + defp query_prepare(query, operation, adapter, counter, cache, key) do + case query_without_cache(query, operation, adapter, counter) do + {:cache, select, prepared} -> + cache_insert(cache, key, {key, :cache, select, prepared}) + {:nocache, _, _} = nocache -> + nocache + end + end + + defp cache_insert(cache, key, elem) do + case :ets.insert_new(cache, elem) do + true -> + elem + false -> + [elem] = :ets.lookup(cache, key) + elem + end + end + + defp cache_update(cache, key, cached) do + _ = :ets.update_element(cache, key, [{2, :cached}, {4, cached}]) + :ok + end + + defp cache_reset(cache, key, prepared) do + _ = :ets.update_element(cache, key, [{2, :cache}, {4, prepared}]) + :ok + end + + defp query_without_cache(query, operation, adapter, counter) do + {query, select} = normalize(query, operation, adapter, counter) + {cache, prepared} = adapter.prepare(operation, query) + {cache, select, prepared} + end + + defp build_meta(%{sources: sources, preloads: preloads}, select) do + %{select: select, preloads: preloads, sources: sources} + end + + @doc """ + Prepares the query for cache. + + This means all the parameters from query expressions are + merged into a single value and their entries are pruned + from the query. + + This function is called by the backend before invoking + any cache mechanism. + """ + @spec plan(Ecto.Query.t, atom, module) :: {planned_query :: Ecto.Query.t, parameters :: list, cache_key :: any} + def plan(query, operation, adapter) do + query + |> plan_sources(adapter) + |> plan_assocs() + |> plan_combinations(adapter) + |> plan_ctes(adapter) + |> plan_wheres(adapter) + |> plan_cache(operation, adapter) + rescue + e -> + # Reraise errors so we ignore the planner inner stacktrace + filter_and_reraise e, __STACKTRACE__ + end + + @doc """ + Prepare all sources, by traversing and expanding from, joins, subqueries. + """ + def plan_sources(query, adapter) do + {from, source} = plan_from(query, adapter) + + # Set up the initial source so we can refer + # to the parent in subqueries in joins + query = %{query | sources: {source}} + + {joins, sources, tail_sources} = plan_joins(query, [source], length(query.joins), adapter) + + %{query | from: from, + joins: joins |> Enum.reverse, + sources: (tail_sources ++ sources) |> Enum.reverse |> List.to_tuple()} + end + + defp plan_from(%{from: nil} = query, _adapter) do + error!(query, "query must have a from expression") + end + + defp plan_from(%{from: from} = query, adapter) do + plan_source(query, from, adapter) + end + + defp plan_source(query, %{source: %Ecto.SubQuery{} = subquery, prefix: prefix} = expr, adapter) do + subquery = plan_subquery(subquery, query, prefix, adapter, true) + {%{expr | source: subquery}, subquery} + end + + defp plan_source(query, %{source: {nil, schema}} = expr, _adapter) + when is_atom(schema) and schema != nil do + source = schema.__schema__(:source) + prefix = plan_source_schema_prefix(expr, schema) || query.prefix + {%{expr | source: {source, schema}}, {source, schema, prefix}} + end + + defp plan_source(query, %{source: {source, schema}, prefix: prefix} = expr, _adapter) + when is_binary(source) and is_atom(schema), + do: {expr, {source, schema, prefix || query.prefix}} + + defp plan_source(_query, %{source: {:fragment, _, _} = source, prefix: nil} = expr, _adapter), + do: {expr, source} + + defp plan_source(query, %{source: {:fragment, _, _}, prefix: prefix} = expr, _adapter), + do: error!(query, expr, "cannot set prefix: #{inspect(prefix)} option for fragment joins") + + defp plan_subquery(subquery, query, prefix, adapter, source?) do + %{query: inner_query} = subquery + + inner_query = %{ + inner_query + | prefix: prefix || subquery.query.prefix || query.prefix, + aliases: Map.put(inner_query.aliases, @parent_as, query) + } + + {inner_query, params, key} = plan(inner_query, :all, adapter) + assert_no_subquery_assocs!(inner_query) + + {inner_query, select} = + inner_query + |> ensure_select(true) + |> normalize_subquery_select(adapter, source?) + + {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) + %{subquery | query: inner_query, params: params, cache: key, select: select} + rescue + e -> raise Ecto.SubQueryError, query: query, exception: e + end + + # The prefix for form are computed upfront, but not for joins + defp plan_source_schema_prefix(%FromExpr{prefix: prefix}, _schema), + do: prefix + + defp plan_source_schema_prefix(%JoinExpr{prefix: prefix}, schema), + do: prefix || schema.__schema__(:prefix) + + defp assert_no_subquery_assocs!(%{assocs: assocs, preloads: preloads} = query) + when assocs != [] or preloads != [] do + error!(query, "cannot preload associations in subquery") + end + + defp assert_no_subquery_assocs!(query) do + query + end + + defp normalize_subquery_select(query, adapter, source?) do + {schema_or_source, expr, %{select: select} = query} = rewrite_subquery_select_expr(query, source?) + {expr, _} = prewalk(expr, :select, query, select, 0, adapter) + {{:map, types}, _fields, _from} = collect_fields(expr, [], :never, query, select.take, true) + {query, subquery_source(schema_or_source, types)} + end + + defp subquery_source(nil, types), do: {:map, types} + defp subquery_source(name, types) when is_atom(name), do: {:struct, name, types} + defp subquery_source({:source, schema, prefix, types}, only) do + types = Enum.map(only, fn {field, _} -> {field, Keyword.get(types, field, :any)} end) + {:source, schema, prefix, types} + end + + defp rewrite_subquery_select_expr(%{select: select} = query, source?) do + %{expr: expr, take: take} = select + + case subquery_select(expr, take, query) do + {schema_or_source, fields} -> + expr = {:%{}, [], fields} + {schema_or_source, expr, put_in(query.select.expr, expr)} + + :error when source? -> + error!(query, "subquery/cte must select a source (t), a field (t.field) or a map, got: `#{Macro.to_string(expr)}`") + + :error -> + expr = {:%{}, [], [result: expr]} + {nil, expr, put_in(query.select.expr, expr)} + end + end + + defp subquery_select({:merge, _, [left, right]}, take, query) do + {left_struct, left_fields} = subquery_select(left, take, query) + {right_struct, right_fields} = subquery_select(right, take, query) + {left_struct || right_struct, Keyword.merge(left_fields, right_fields)} + end + defp subquery_select({:%, _, [name, map]}, take, query) do + {_, fields} = subquery_select(map, take, query) + {name, fields} + end + defp subquery_select({:%{}, _, [{:|, _, [{:&, [], [ix]}, pairs]}]} = expr, take, query) do + assert_subquery_fields!(query, expr, pairs) + {source, _} = source_take!(:select, query, take, ix, ix) + + # In case of map updates, we need to remove duplicated fields + # at query time because we use the field names as aliases and + # duplicate aliases will lead to invalid queries. + kept_keys = subquery_source_fields(source) -- Keyword.keys(pairs) + {keep_source_or_struct(source), subquery_fields(kept_keys, ix) ++ pairs} + end + defp subquery_select({:%{}, _, pairs} = expr, _take, query) do + assert_subquery_fields!(query, expr, pairs) + {nil, pairs} + end + defp subquery_select({:&, _, [ix]}, take, query) do + {source, _} = source_take!(:select, query, take, ix, ix) + fields = subquery_source_fields(source) + {keep_source_or_struct(source), subquery_fields(fields, ix)} + end + defp subquery_select({{:., _, [{:&, _, [_]}, field]}, _, []} = expr, _take, _query) do + {nil, [{field, expr}]} + end + defp subquery_select(_expr, _take, _query) do + :error + end + + defp subquery_fields(fields, ix) do + for field <- fields do + {field, {{:., [], [{:&, [], [ix]}, field]}, [], []}} + end + end + + defp keep_source_or_struct({:source, _, _, _} = source), do: source + defp keep_source_or_struct({:struct, name, _}), do: name + defp keep_source_or_struct(_), do: nil + + defp subquery_source_fields({:source, _, _, types}), do: Keyword.keys(types) + defp subquery_source_fields({:struct, _, types}), do: Keyword.keys(types) + defp subquery_source_fields({:map, types}), do: Keyword.keys(types) + + defp subquery_type_for({:source, _, _, fields}, field), do: Keyword.fetch(fields, field) + defp subquery_type_for({:struct, _name, types}, field), do: subquery_type_for_value(types, field) + defp subquery_type_for({:map, types}, field), do: subquery_type_for_value(types, field) + + defp subquery_type_for_value(types, field) do + case Keyword.fetch(types, field) do + {:ok, {:value, type}} -> {:ok, type} + {:ok, _} -> {:ok, :any} + :error -> :error + end + end + + defp assert_subquery_fields!(query, expr, pairs) do + Enum.each(pairs, fn + {key, _} when not is_atom(key) -> + error!(query, "only atom keys are allowed when selecting a map in subquery, got: `#{Macro.to_string(expr)}`") + + {key, value} -> + if valid_subquery_value?(value) do + {key, value} + else + error!(query, "atoms, maps, lists, tuples and sources are not allowed as map values in subquery, got: `#{Macro.to_string(expr)}`") + end + end) + end + + defp valid_subquery_value?({_, _}), do: false + defp valid_subquery_value?(args) when is_list(args), do: false + defp valid_subquery_value?({container, _, args}) + when container in [:{}, :%{}, :&] and is_list(args), do: false + defp valid_subquery_value?(nil), do: true + defp valid_subquery_value?(arg) when is_atom(arg), do: is_boolean(arg) + defp valid_subquery_value?(_), do: true + + defp plan_joins(query, sources, offset, adapter) do + plan_joins(query.joins, query, [], sources, [], 1, offset, adapter) + end + + defp plan_joins([%JoinExpr{assoc: {ix, assoc}, qual: qual, on: on, prefix: prefix} = join|t], + query, joins, sources, tail_sources, counter, offset, adapter) do + source = fetch_source!(sources, ix) + schema = schema_for_association_join!(query, join, source) + refl = schema.__schema__(:association, assoc) + + unless refl do + error! query, join, "could not find association `#{assoc}` on schema #{inspect schema}" + end + + # If we have the following join: + # + # from p in Post, + # join: p in assoc(p, :comments) + # + # The callback below will return a query that contains only + # joins in a way it starts with the Post and ends in the + # Comment. + # + # This means we need to rewrite the joins below to properly + # shift the &... identifier in a way that: + # + # &0 -> becomes assoc ix + # &LAST_JOIN -> becomes counter + # + # All values in the middle should be shifted by offset, + # all values after join are already correct. + child = refl.__struct__.joins_query(refl) + + # Rewrite prefixes: + # 1. the child query has the parent query prefix + # (note the child query should NEVER have a prefix) + # 2. from and joins can have their prefixes explicitly + # overwritten by the join prefix + child = rewrite_prefix(child, query.prefix) + child = update_in child.from, &rewrite_prefix(&1, prefix) + child = update_in child.joins, &Enum.map(&1, fn join -> rewrite_prefix(join, prefix) end) + + last_ix = length(child.joins) + source_ix = counter + + {_, child_from_source} = plan_source(child, child.from, adapter) + + {child_joins, child_sources, child_tail} = + plan_joins(child, [child_from_source], offset + last_ix - 1, adapter) + + # Rewrite joins indexes as mentioned above + child_joins = Enum.map(child_joins, &rewrite_join(&1, qual, ix, last_ix, source_ix, offset)) + + # Drop the last resource which is the association owner (it is reversed) + child_sources = Enum.drop(child_sources, -1) + + [current_source|child_sources] = child_sources + child_sources = child_tail ++ child_sources + + plan_joins(t, query, attach_on(child_joins, on) ++ joins, [current_source|sources], + child_sources ++ tail_sources, counter + 1, offset + length(child_sources), adapter) + end + + defp plan_joins([%JoinExpr{source: %Ecto.Query{} = join_query, qual: qual, on: on, prefix: prefix} = join|t], + query, joins, sources, tail_sources, counter, offset, adapter) do + case join_query do + %{order_bys: [], limit: nil, offset: nil, group_bys: [], joins: [], + havings: [], preloads: [], assocs: [], distinct: nil, lock: nil} -> + join_query = rewrite_prefix(join_query, query.prefix) + from = rewrite_prefix(join_query.from, prefix) + {from, source} = plan_source(join_query, from, adapter) + [join] = attach_on(query_to_joins(qual, from.source, join_query, counter), on) + plan_joins(t, query, [join|joins], [source|sources], tail_sources, counter + 1, offset, adapter) + _ -> + error! query, join, """ + invalid query was interpolated in a join. + If you want to pass a query to a join, you must either: + + 1. Make sure the query only has `where` conditions (which will be converted to ON clauses) + 2. Or wrap the query in a subquery by calling subquery(query) + """ + end + end + + defp plan_joins([%JoinExpr{} = join|t], + query, joins, sources, tail_sources, counter, offset, adapter) do + {join, source} = plan_source(query, %{join | ix: counter}, adapter) + plan_joins(t, query, [join|joins], [source|sources], tail_sources, counter + 1, offset, adapter) + end + + defp plan_joins([], _query, joins, sources, tail_sources, _counter, _offset, _adapter) do + {joins, sources, tail_sources} + end + + defp attach_on([%{on: on} = h | t], %{expr: expr, params: params}) do + [%{h | on: merge_expr_and_params(:and, on, expr, params)} | t] + end + + defp rewrite_prefix(expr, nil), do: expr + defp rewrite_prefix(%{prefix: nil} = expr, prefix), do: %{expr | prefix: prefix} + defp rewrite_prefix(expr, _prefix), do: expr + + defp rewrite_join(%{on: on, ix: join_ix} = join, qual, ix, last_ix, source_ix, inc_ix) do + expr = Macro.prewalk on.expr, fn + {:&, meta, [join_ix]} -> + {:&, meta, [rewrite_ix(join_ix, ix, last_ix, source_ix, inc_ix)]} + expr = %Ecto.Query.Tagged{type: {type_ix, type}} when is_integer(type_ix) -> + %{expr | type: {rewrite_ix(type_ix, ix, last_ix, source_ix, inc_ix), type}} + other -> + other + end + + params = Enum.map(on.params, &rewrite_param_ix(&1, ix, last_ix, source_ix, inc_ix)) + + %{join | on: %{on | expr: expr, params: params}, qual: qual, + ix: rewrite_ix(join_ix, ix, last_ix, source_ix, inc_ix)} + end + + # We need to replace the source by the one from the assoc + defp rewrite_ix(0, ix, _last_ix, _source_ix, _inc_x), do: ix + + # The last entry will have the current source index + defp rewrite_ix(last_ix, _ix, last_ix, source_ix, _inc_x), do: source_ix + + # All above last are already correct + defp rewrite_ix(join_ix, _ix, last_ix, _source_ix, _inc_ix) when join_ix > last_ix, do: join_ix + + # All others need to be incremented by the offset sources + defp rewrite_ix(join_ix, _ix, _last_ix, _source_ix, inc_ix), do: join_ix + inc_ix + + defp rewrite_param_ix({value, {upper, {type_ix, field}}}, ix, last_ix, source_ix, inc_ix) when is_integer(type_ix) do + {value, {upper, {rewrite_ix(type_ix, ix, last_ix, source_ix, inc_ix), field}}} + end + + defp rewrite_param_ix({value, {type_ix, field}}, ix, last_ix, source_ix, inc_ix) when is_integer(type_ix) do + {value, {rewrite_ix(type_ix, ix, last_ix, source_ix, inc_ix), field}} + end + + defp rewrite_param_ix(param, _, _, _, _), do: param + + defp fetch_source!(sources, ix) when is_integer(ix) do + case Enum.reverse(sources) |> Enum.fetch(ix) do + {:ok, source} -> source + :error -> raise ArgumentError, "could not find a source with index `#{ix}` in `#{inspect sources}" + end + end + + defp fetch_source!(_, ix) do + raise ArgumentError, "invalid binding index: `#{inspect ix}` (check if you're binding using a valid :as atom)" + end + + defp schema_for_association_join!(query, join, source) do + case source do + {:fragment, _, _} -> + error! query, join, "cannot perform association joins on fragment sources" + + {source, nil, _} -> + error! query, join, "cannot perform association join on #{inspect source} " <> + "because it does not have a schema" + + {_, schema, _} -> + schema + + %Ecto.SubQuery{select: {:source, {_, schema}, _, _}} -> + schema + + %Ecto.SubQuery{select: {:struct, schema, _}} -> + schema + + %Ecto.SubQuery{} -> + error! query, join, "can only perform association joins on subqueries " <> + "that return a source with schema in select" + + _ -> + error! query, join, "can only perform association joins on sources with a schema" + end + end + + @spec plan_wheres(Ecto.Query.t, module) :: Ecto.Query.t + defp plan_wheres(query, adapter) do + wheres = + Enum.map(query.wheres, fn + %{subqueries: []} = where -> + where + + %{subqueries: subqueries} = where -> + %{where | subqueries: Enum.map(subqueries, &plan_subquery(&1, query, nil, adapter, false))} + end) + + %{query | wheres: wheres} + end + + @doc """ + Prepare the parameters by merging and casting them according to sources. + """ + def plan_cache(query, operation, adapter) do + {query, params, cache} = traverse_cache(query, operation, {[], []}, adapter) + {query, Enum.reverse(params), cache} + end + + defp traverse_cache(query, operation, cache_params, adapter) do + fun = &{&3, merge_cache(&1, &2, &3, &4, operation, adapter)} + {query, {cache, params}} = traverse_exprs(query, operation, cache_params, fun) + {query, params, finalize_cache(query, operation, cache)} + end + + defp merge_cache(:from, _query, from, {cache, params}, _operation, _adapter) do + {key, params} = source_cache(from, params) + {merge_cache({:from, key, from.hints}, cache, key != :nocache), params} + end + + defp merge_cache(kind, query, expr, {cache, params}, _operation, adapter) + when kind in ~w(select distinct limit offset)a do + if expr do + {params, cacheable?} = cast_and_merge_params(kind, query, expr, params, adapter) + {merge_cache({kind, expr_to_cache(expr)}, cache, cacheable?), params} + else + {cache, params} + end + end + + defp merge_cache(kind, query, exprs, {cache, params}, _operation, adapter) + when kind in ~w(where update group_by having order_by)a do + {expr_cache, {params, cacheable?}} = + Enum.map_reduce exprs, {params, true}, fn expr, {params, cacheable?} -> + {params, current_cacheable?} = cast_and_merge_params(kind, query, expr, params, adapter) + {expr_to_cache(expr), {params, cacheable? and current_cacheable?}} + end + + case expr_cache do + [] -> {cache, params} + _ -> {merge_cache({kind, expr_cache}, cache, cacheable?), params} + end + end + + defp merge_cache(:join, query, exprs, {cache, params}, _operation, adapter) do + {expr_cache, {params, cacheable?}} = + Enum.map_reduce exprs, {params, true}, fn + %JoinExpr{on: on, qual: qual, hints: hints} = join, {params, cacheable?} -> + {key, params} = source_cache(join, params) + {params, join_cacheable?} = cast_and_merge_params(:join, query, join, params, adapter) + {params, on_cacheable?} = cast_and_merge_params(:join, query, on, params, adapter) + {{qual, key, on.expr, hints}, + {params, cacheable? and join_cacheable? and on_cacheable? and key != :nocache}} + end + + case expr_cache do + [] -> {cache, params} + _ -> {merge_cache({:join, expr_cache}, cache, cacheable?), params} + end + end + + defp merge_cache(:windows, query, exprs, {cache, params}, _operation, adapter) do + {expr_cache, {params, cacheable?}} = + Enum.map_reduce exprs, {params, true}, fn {key, expr}, {params, cacheable?} -> + {params, current_cacheable?} = cast_and_merge_params(:windows, query, expr, params, adapter) + {{key, expr_to_cache(expr)}, {params, cacheable? and current_cacheable?}} + end + + case expr_cache do + [] -> {cache, params} + _ -> {merge_cache({:windows, expr_cache}, cache, cacheable?), params} + end + end + + defp merge_cache(:combination, _query, combinations, cache_and_params, operation, adapter) do + # In here we add each combination as its own entry in the cache key. + # We could group them to avoid multiple keys, but since they are uncommon, we keep it simple. + Enum.reduce combinations, cache_and_params, fn {modifier, query}, {cache, params} -> + {_, params, inner_cache} = traverse_cache(query, operation, {[], params}, adapter) + {merge_cache({modifier, inner_cache}, cache, inner_cache != :nocache), params} + end + end + + defp merge_cache(:with_cte, _query, nil, cache_and_params, _operation, _adapter) do + cache_and_params + end + + defp merge_cache(:with_cte, query, with_expr, cache_and_params, _operation, adapter) do + %{queries: queries, recursive: recursive} = with_expr + key = if recursive, do: :recursive_cte, else: :non_recursive_cte + + # In here we add each cte as its own entry in the cache key. + # We could group them to avoid multiple keys, but since they are uncommon, we keep it simple. + Enum.reduce queries, cache_and_params, fn + {name, %Ecto.Query{} = query}, {cache, params} -> + {_, params, inner_cache} = traverse_cache(query, :all, {[], params}, adapter) + {merge_cache({key, name, inner_cache}, cache, inner_cache != :nocache), params} + + {name, %Ecto.Query.QueryExpr{} = query_expr}, {cache, params} -> + {params, cacheable?} = cast_and_merge_params(:with_cte, query, query_expr, params, adapter) + {merge_cache({key, name, expr_to_cache(query_expr)}, cache, cacheable?), params} + end + end + + defp expr_to_cache(%QueryExpr{expr: expr}), do: expr + defp expr_to_cache(%SelectExpr{expr: expr}), do: expr + defp expr_to_cache(%BooleanExpr{op: op, expr: expr, subqueries: []}), do: {op, expr} + defp expr_to_cache(%BooleanExpr{op: op, expr: expr, subqueries: subqueries}) do + # Alternate implementation could be replace {:subquery, i} expression in expr. + # Current strategy appends [{:subquery, i, cache}], where cache is the cache key for this subquery. + {op, expr, Enum.map(subqueries, fn %{cache: cache} -> {:subquery, cache} end)} + end + + @spec cast_and_merge_params(atom, Ecto.Query.t, any, list, module) :: {params :: list, cacheable? :: boolean} + defp cast_and_merge_params(kind, query, expr, params, adapter) do + Enum.reduce expr.params, {params, true}, fn + {:subquery, i}, {acc, cacheable?} -> + # This is the place holder to intersperse subquery parameters. + %Ecto.SubQuery{params: subparams, cache: cache} = Enum.fetch!(expr.subqueries, i) + {Enum.reverse(subparams, acc), cacheable? and cache != :nocache} + + {v, type}, {acc, cacheable?} -> + case cast_param(kind, query, expr, v, type, adapter) do + {:in, v} -> {Enum.reverse(v, acc), false} + v -> {[v | acc], cacheable?} + end + end + end + + defp merge_cache(_left, _right, false), do: :nocache + defp merge_cache(_left, :nocache, true), do: :nocache + defp merge_cache(left, right, true), do: [left|right] + + defp finalize_cache(_query, _operation, :nocache) do + :nocache + end + + defp finalize_cache(query, operation, cache) do + %{assocs: assocs, prefix: prefix, lock: lock, select: select, aliases: aliases} = query + aliases = Map.delete(aliases, @parent_as) + + cache = + case select do + %{take: take} when take != %{} -> + [take: take] ++ cache + _ -> + cache + end + + cache = + cache + |> prepend_if(assocs != [], [assocs: assocs]) + |> prepend_if(prefix != nil, [prefix: prefix]) + |> prepend_if(lock != nil, [lock: lock]) + |> prepend_if(aliases != %{}, [aliases: aliases]) + + [operation | cache] + end + + defp prepend_if(cache, true, prepend), do: prepend ++ cache + defp prepend_if(cache, false, _prepend), do: cache + + defp source_cache(%{source: {_, nil} = source, prefix: prefix}, params), + do: {{source, prefix}, params} + defp source_cache(%{source: {bin, schema}, prefix: prefix}, params), + do: {{bin, schema, schema.__schema__(:hash), prefix}, params} + defp source_cache(%{source: {:fragment, _, _} = source, prefix: prefix}, params), + do: {{source, prefix}, params} + defp source_cache(%{source: %Ecto.SubQuery{params: inner, cache: key}}, params), + do: {key, Enum.reverse(inner, params)} + + defp cast_param(_kind, query, expr, %DynamicExpr{}, _type, _value) do + error! query, expr, "invalid dynamic expression", + "dynamic expressions can only be interpolated at the top level of where, having, group_by, order_by, update or a join's on" + end + defp cast_param(_kind, query, expr, [{key, _} | _], _type, _value) when is_atom(key) do + error! query, expr, "invalid keyword list", + "keyword lists are only allowed at the top level of where, having, distinct, order_by, update or a join's on" + end + defp cast_param(_kind, query, expr, %x{}, {:in, _type}, _value) when x in [Ecto.Query, Ecto.SubQuery] do + error! query, expr, "an #{inspect(x)} struct is not supported as right-side value of `in` operator", + "Did you mean to write `expr in subquery(query)` instead?" + end + defp cast_param(kind, query, expr, v, type, adapter) do + type = field_type!(kind, query, expr, type) + + try do + case cast_param(kind, type, v, adapter) do + {:ok, v} -> v + {:error, error} -> error! query, expr, error + end + catch + :error, %Ecto.QueryError{} = e -> + raise Ecto.Query.CastError, value: v, type: type, message: Exception.message(e) + end + end + + defp cast_param(kind, type, v, adapter) do + with {:ok, type} <- normalize_param(kind, type, v), + {:ok, v} <- cast_param(kind, type, v), + do: dump_param(adapter, type, v) + end + + @doc """ + Prepare association fields found in the query. + """ + def plan_assocs(query) do + plan_assocs(query, 0, query.assocs) + query + end + + defp plan_assocs(_query, _ix, []), do: :ok + defp plan_assocs(query, ix, assocs) do + # We validate the schema exists when preparing joins above + {_, parent_schema, _} = get_preload_source!(query, ix) + + Enum.each assocs, fn {assoc, {child_ix, child_assocs}} -> + refl = parent_schema.__schema__(:association, assoc) + + unless refl do + error! query, "field `#{inspect parent_schema}.#{assoc}` " <> + "in preload is not an association" + end + + case find_source_expr(query, child_ix) do + %JoinExpr{qual: qual} when qual in [:inner, :left, :inner_lateral, :left_lateral] -> + :ok + %JoinExpr{qual: qual} -> + error! query, "association `#{inspect parent_schema}.#{assoc}` " <> + "in preload requires an inner, left or lateral join, got #{qual} join" + _ -> + :ok + end + + plan_assocs(query, child_ix, child_assocs) + end + end + + defp plan_combinations(query, adapter) do + combinations = + Enum.map query.combinations, fn {type, combination_query} -> + {prepared_query, _params, _key} = combination_query |> attach_prefix(query) |> plan(:all, adapter) + prepared_query = prepared_query |> ensure_select(true) + {type, prepared_query} + end + + %{query | combinations: combinations} + end + + defp plan_ctes(%Ecto.Query{with_ctes: nil} = query, _adapter), do: query + defp plan_ctes(%Ecto.Query{with_ctes: %{queries: queries}} = query, adapter) do + queries = + Enum.map queries, fn + {name, %Ecto.Query{} = cte_query} -> + {planned_query, _params, _key} = cte_query |> attach_prefix(query) |> plan(:all, adapter) + planned_query = planned_query |> ensure_select(true) + {name, planned_query} + + {name, other} -> + {name, other} + end + + put_in(query.with_ctes.queries, queries) + end + + defp find_source_expr(query, 0) do + query.from + end + + defp find_source_expr(query, ix) do + Enum.find(query.joins, & &1.ix == ix) + end + + @doc """ + Used for customizing the query returning result. + """ + def ensure_select(%{select: select} = query, _fields) when select != nil do + query + end + def ensure_select(%{select: nil}, []) do + raise ArgumentError, ":returning expects at least one field to be given, got an empty list" + end + def ensure_select(%{select: nil} = query, fields) when is_list(fields) do + %{query | select: %SelectExpr{expr: {:&, [], [0]}, take: %{0 => {:any, fields}}, + line: __ENV__.line, file: __ENV__.file}} + end + def ensure_select(%{select: nil, from: %{source: {_, nil}}} = query, true) do + error! query, "queries that do not have a schema need to explicitly pass a :select clause" + end + def ensure_select(%{select: nil} = query, true) do + %{query | select: %SelectExpr{expr: {:&, [], [0]}, line: __ENV__.line, file: __ENV__.file}} + end + def ensure_select(%{select: nil} = query, false) do + query + end + + @doc """ + Normalizes and validates the query. + + After the query was planned and there is no cache + entry, we need to update its interpolations and check + its fields and associations exist and are valid. + """ + def normalize(query, operation, adapter, counter) do + query + |> normalize_query(operation, adapter, counter) + |> elem(0) + |> normalize_select(keep_literals?(operation, query)) + rescue + e -> + # Reraise errors so we ignore the planner inner stacktrace + filter_and_reraise e, __STACKTRACE__ + end + + defp keep_literals?(:insert_all, _), do: true + defp keep_literals?(_, %{combinations: combinations}), do: combinations != [] + + defp normalize_query(query, operation, adapter, counter) do + case operation do + :all -> + assert_no_update!(query, operation) + :insert_all -> + assert_no_update!(query, operation) + :update_all -> + assert_update!(query, operation) + assert_only_filter_expressions!(query, operation) + :delete_all -> + assert_no_update!(query, operation) + assert_only_filter_expressions!(query, operation) + end + + traverse_exprs(query, operation, counter, + &validate_and_increment(&1, &2, &3, &4, operation, adapter)) + end + + defp validate_and_increment(:from, query, %{source: %Ecto.SubQuery{}}, _counter, kind, _adapter) when kind not in ~w(all insert_all)a do + error! query, "`#{kind}` does not allow subqueries in `from`" + end + defp validate_and_increment(:from, query, %{source: source} = expr, counter, _kind, adapter) do + {source, acc} = prewalk_source(source, :from, query, expr, counter, adapter) + {%{expr | source: source}, acc} + end + + defp validate_and_increment(kind, query, expr, counter, _operation, adapter) + when kind in ~w(select distinct limit offset)a do + if expr do + prewalk(kind, query, expr, counter, adapter) + else + {nil, counter} + end + end + + defp validate_and_increment(kind, query, exprs, counter, _operation, adapter) + when kind in ~w(where group_by having order_by update)a do + + {exprs, counter} = + Enum.reduce(exprs, {[], counter}, fn + %{expr: []}, {list, acc} -> + {list, acc} + expr, {list, acc} -> + {expr, acc} = prewalk(kind, query, expr, acc, adapter) + {[expr|list], acc} + end) + {Enum.reverse(exprs), counter} + end + + defp validate_and_increment(:with_cte, _query, nil, counter, _operation, _adapter) do + {nil, counter} + end + + defp validate_and_increment(:with_cte, query, with_expr, counter, _operation, adapter) do + fun = &validate_and_increment(&1, &2, &3, &4, :all, adapter) + + {queries, counter} = + Enum.reduce with_expr.queries, {[], counter}, fn + {name, %Ecto.Query{} = inner_query}, {queries, counter} -> + inner_query = put_in(inner_query.aliases[@parent_as], query) + + # We don't want to use normalize_subquery_select because we are + # going to prepare the whole query ourselves next. + {_, _, inner_query} = rewrite_subquery_select_expr(inner_query, true) + {inner_query, counter} = traverse_exprs(inner_query, :all, counter, fun) + + # Now compute the fields as keyword lists so we emit AS in Ecto query. + %{select: %{expr: expr, take: take}} = inner_query + {{:map, types}, fields, _from} = collect_fields(expr, [], :never, inner_query, take, true) + fields = Enum.zip(Keyword.keys(types), Enum.reverse(fields)) + inner_query = put_in(inner_query.select.fields, fields) + {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) + + {[{name, inner_query} | queries], counter} + + {name, %QueryExpr{expr: {:fragment, _, _} = fragment} = query_expr}, {queries, counter} -> + {fragment, counter} = prewalk_source(fragment, :with_cte, query, with_expr, counter, adapter) + query_expr = %{query_expr | expr: fragment} + {[{name, query_expr} | queries], counter} + end + + {%{with_expr | queries: Enum.reverse(queries)}, counter} + end + + defp validate_and_increment(:join, query, exprs, counter, _operation, adapter) do + Enum.map_reduce exprs, counter, fn join, acc -> + {source, acc} = prewalk_source(join.source, :join, query, join, acc, adapter) + {on, acc} = prewalk(:join, query, join.on, acc, adapter) + {%{join | on: on, source: source, params: nil}, acc} + end + end + + defp validate_and_increment(:windows, query, exprs, counter, _operation, adapter) do + {exprs, counter} = + Enum.reduce(exprs, {[], counter}, fn {name, expr}, {list, acc} -> + {expr, acc} = prewalk(:windows, query, expr, acc, adapter) + {[{name, expr}|list], acc} + end) + + {Enum.reverse(exprs), counter} + end + + defp validate_and_increment(:combination, _query, combinations, counter, operation, adapter) do + fun = &validate_and_increment(&1, &2, &3, &4, operation, adapter) + + {combinations, counter} = + Enum.reduce combinations, {[], counter}, fn {type, combination_query}, {combinations, counter} -> + {combination_query, counter} = traverse_exprs(combination_query, operation, counter, fun) + {combination_query, _} = combination_query |> normalize_select(true) + {[{type, combination_query} | combinations], counter} + end + + {Enum.reverse(combinations), counter} + end + + defp validate_json_path!([path_field | rest], field, embed) do + case embed do + %{related: related, cardinality: :one} -> + unless Enum.any?(related.__schema__(:fields), &Atom.to_string(&1) == path_field) do + raise "field `#{path_field}` does not exist in #{inspect(related)}" + end + + path_embed = related.__schema__(:embed, String.to_atom(path_field)) + validate_json_path!(rest, path_field, path_embed) + + %{related: _, cardinality: :many} -> + unless is_integer(path_field) do + raise "cannot use `#{path_field}` to refer to an item in `embeds_many`" + end + + validate_json_path!(rest, path_field, %{embed | cardinality: :one}) + + other -> + raise "expected field `#{field}` to be of type embed, got: `#{inspect(other)}`" + end + end + + defp validate_json_path!([], _field, _type) do + :ok + end + + defp prewalk_source({:fragment, meta, fragments}, kind, query, expr, acc, adapter) do + {fragments, acc} = prewalk(fragments, kind, query, expr, acc, adapter) + {{:fragment, meta, fragments}, acc} + end + defp prewalk_source(%Ecto.SubQuery{query: inner_query} = subquery, kind, query, _expr, counter, adapter) do + try do + inner_query = put_in inner_query.aliases[@parent_as], query + {inner_query, counter} = normalize_query(inner_query, :all, adapter, counter) + {inner_query, _} = normalize_select(inner_query, true) + {_, inner_query} = pop_in(inner_query.aliases[@parent_as]) + + inner_query = + # If the subquery comes from a select, we are not really interested on the fields + if kind == :where do + inner_query + else + update_in(inner_query.select.fields, fn fields -> + subquery.select |> subquery_source_fields() |> Enum.zip(fields) + end) + end + + {%{subquery | query: inner_query}, counter} + rescue + e -> raise Ecto.SubQueryError, query: query, exception: e + end + end + defp prewalk_source(source, _kind, _query, _expr, acc, _adapter) do + {source, acc} + end + + defp prewalk(:update, query, expr, counter, adapter) do + source = get_source!(:update, query, 0) + + {inner, acc} = + Enum.map_reduce expr.expr, counter, fn {op, kw}, counter -> + {kw, acc} = + Enum.map_reduce kw, counter, fn {field, value}, counter -> + {value, acc} = prewalk(value, :update, query, expr, counter, adapter) + {{field_source(source, field), value}, acc} + end + {{op, kw}, acc} + end + + {%{expr | expr: inner, params: nil}, acc} + end + defp prewalk(kind, query, expr, counter, adapter) do + {inner, acc} = prewalk(expr.expr, kind, query, expr, counter, adapter) + {%{expr | expr: inner, params: nil}, acc} + end + + defp prewalk({:subquery, i}, kind, query, expr, acc, adapter) do + prewalk_source(Enum.fetch!(expr.subqueries, i), kind, query, expr, acc, adapter) + end + + defp prewalk({:in, in_meta, [left, {:^, meta, [param]}]}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {right, acc} = validate_in(meta, expr, param, acc, adapter) + {{:in, in_meta, [left, right]}, acc} + end + + defp prewalk({:in, in_meta, [left, {:subquery, _} = right]}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {right, acc} = prewalk(right, kind, query, expr, acc, adapter) + + case right.query.select.fields do + [_] -> :ok + _ -> error!(query, "subquery must return a single field in order to be used on the right-side of `in`") + end + + {{:in, in_meta, [left, right]}, acc} + end + + defp prewalk({quantifier, meta, [{:subquery, _} = subquery]}, kind, query, expr, acc, adapter) when quantifier in [:exists, :any, :all] do + {subquery, acc} = prewalk(subquery, kind, query, expr, acc, adapter) + + case {quantifier, subquery.query.select.fields} do + {:exists, _} -> + :ok + + {_, [_]} -> + :ok + + _ -> + error!( + query, + "subquery must return a single field in order to be used with #{quantifier}" + ) + end + + {{quantifier, meta, [subquery]}, acc} + end + + defp prewalk({{:., dot_meta, [left, field]}, meta, []}, + kind, query, expr, acc, _adapter) do + {ix, ix_expr, ix_query} = get_ix!(left, kind, query) + extra = if kind == :select, do: [type: type!(kind, ix_query, expr, ix, field)], else: [] + field = field_source(get_source!(kind, ix_query, ix), field) + {{{:., extra ++ dot_meta, [ix_expr, field]}, meta, []}, acc} + end + + defp prewalk({:^, meta, [ix]}, _kind, _query, _expr, acc, _adapter) when is_integer(ix) do + {{:^, meta, [acc]}, acc + 1} + end + + defp prewalk({:type, _, [arg, type]}, kind, query, expr, acc, adapter) do + {arg, acc} = prewalk(arg, kind, query, expr, acc, adapter) + type = field_type!(kind, query, expr, type, true) + {%Ecto.Query.Tagged{value: arg, tag: type, type: Ecto.Type.type(type)}, acc} + end + + defp prewalk({:json_extract_path, meta, [json_field, path]}, kind, query, expr, acc, _adapter) do + {{:., _, [{:&, _, [ix]}, field]}, _, []} = json_field + + case type!(kind, query, expr, ix, field) do + {:parameterized, Ecto.Embedded, embed} -> + validate_json_path!(path, field, embed) + + type -> + case Ecto.Type.type(type) do + :any -> + :ok + + :map -> + :ok + + {:map, _} -> + :ok + + _ -> + raise "expected field `#{field}` to be an embed or a map, got: `#{inspect(type)}`" + end + end + + {{:json_extract_path, meta, [json_field, path]}, acc} + end + + defp prewalk(%Ecto.Query.Tagged{value: v, type: type} = tagged, kind, query, expr, acc, adapter) do + if Ecto.Type.base?(type) do + {tagged, acc} + else + {dump_param(kind, query, expr, v, type, adapter), acc} + end + end + + defp prewalk({left, right}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {right, acc} = prewalk(right, kind, query, expr, acc, adapter) + {{left, right}, acc} + end + + defp prewalk({left, meta, args}, kind, query, expr, acc, adapter) do + {left, acc} = prewalk(left, kind, query, expr, acc, adapter) + {args, acc} = prewalk(args, kind, query, expr, acc, adapter) + {{left, meta, args}, acc} + end + + defp prewalk(list, kind, query, expr, acc, adapter) when is_list(list) do + Enum.map_reduce(list, acc, &prewalk(&1, kind, query, expr, &2, adapter)) + end + + defp prewalk(other, _kind, _query, _expr, acc, _adapter) do + {other, acc} + end + + defp dump_param(kind, query, expr, v, type, adapter) do + type = field_type!(kind, query, expr, type) + + case dump_param(kind, type, v, adapter) do + {:ok, v} -> + v + {:error, error} -> + error = error <> ". Or the value is incompatible or it must be " <> + "interpolated (using ^) so it may be cast accordingly" + error! query, expr, error + end + end + + defp dump_param(kind, type, v, adapter) do + with {:ok, type} <- normalize_param(kind, type, v), + do: dump_param(adapter, type, v) + end + + defp validate_in(meta, expr, param, acc, adapter) do + {v, t} = Enum.fetch!(expr.params, param) + length = length(v) + + case adapter.dumpers(t, t) do + [{:in, _} | _] -> {{:^, meta, [acc, length]}, acc + length} + _ -> {{:^, meta, [acc, length]}, acc + 1} + end + end + + defp normalize_select(%{select: nil} = query, _keep_literals?) do + {query, nil} + end + + defp normalize_select(query, keep_literals?) do + %{assocs: assocs, preloads: preloads, select: select} = query + %{take: take, expr: expr} = select + {tag, from_take} = Map.get(take, 0, {:any, []}) + source = get_source!(:select, query, 0) + assocs = merge_assocs(assocs, query) + + # In from, if there is a schema and we have a map tag with preloads, + # it needs to be converted to a map in a later pass. + {take, from_tag} = + case source do + {source, schema, _} + when tag == :map and preloads != [] and is_binary(source) and schema != nil -> + {Map.put(take, 0, {:struct, from_take}), :map} + + _ -> + {take, :any} + end + + {postprocess, fields, from} = + collect_fields(expr, [], :none, query, take, keep_literals?) + + {fields, preprocess, from} = + case from do + {:ok, from_pre, from_expr, from_taken} -> + {assoc_exprs, assoc_fields} = collect_assocs([], [], query, tag, from_take, assocs) + fields = from_taken ++ Enum.reverse(assoc_fields, Enum.reverse(fields)) + preprocess = [from_pre | Enum.reverse(assoc_exprs)] + {fields, preprocess, {from_tag, from_expr}} + + :none when preloads != [] or assocs != [] -> + error! query, "the binding used in `from` must be selected in `select` when using `preload`" + + :none -> + {Enum.reverse(fields), [], :none} + end + + select = %{ + preprocess: preprocess, + postprocess: postprocess, + take: from_take, + assocs: assocs, + from: from + } + + {put_in(query.select.fields, fields), select} + end + + # Handling of source + + defp collect_fields({:merge, _, [{:&, _, [0]}, right]}, fields, :none, query, take, keep_literals?) do + {expr, taken} = source_take!(:select, query, take, 0, 0) + from = {:ok, {:source, :from}, expr, taken} + + {right, right_fields, _from} = collect_fields(right, [], from, query, take, keep_literals?) + from = {:ok, {:merge, {:source, :from}, right}, expr, taken ++ Enum.reverse(right_fields)} + + {{:source, :from}, fields, from} + end + + defp collect_fields({:&, _, [0]}, fields, :none, query, take, _keep_literals?) do + {expr, taken} = source_take!(:select, query, take, 0, 0) + {{:source, :from}, fields, {:ok, {:source, :from}, expr, taken}} + end + + defp collect_fields({:&, _, [0]}, fields, from, _query, _take, _keep_literals?) + when from != :never do + {{:source, :from}, fields, from} + end + + defp collect_fields({:&, _, [ix]}, fields, from, query, take, _keep_literals?) do + {expr, taken} = source_take!(:select, query, take, ix, ix) + {expr, Enum.reverse(taken, fields), from} + end + + # Expression handling + + defp collect_fields({agg, _, [{{:., dot_meta, [{:&, _, [_]}, _]}, _, []} | _]} = expr, + fields, from, _query, _take, _keep_literals?) + when agg in @aggs do + type = + case agg do + :count -> :integer + :row_number -> :integer + :rank -> :integer + :dense_rank -> :integer + :ntile -> :integer + # If it is possible to upcast, we do it, otherwise keep the DB value. + # For example, an average of integers will return a decimal, which can't be cast + # as an integer. But an average of "moneys" should be upcast. + _ -> {:maybe, Keyword.fetch!(dot_meta, :type)} + end + + {{:value, type}, [expr | fields], from} + end + + defp collect_fields({:filter, _, [call, _]} = expr, fields, from, query, take, keep_literals?) do + case call do + {agg, _, _} when agg in @aggs -> :ok + {:fragment, _, [_ | _]} -> :ok + _ -> error!(query, "filter(...) expects the first argument to be an aggregate expression, got: `#{Macro.to_string(expr)}`") + end + + {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?) + {type, [expr | fields], from} + end + + defp collect_fields({:coalesce, _, [left, right]} = expr, fields, from, query, take, _keep_literals?) do + {left_type, _, _} = collect_fields(left, fields, from, query, take, true) + {right_type, _, _} = collect_fields(right, fields, from, query, take, true) + + type = if left_type == right_type, do: left_type, else: {:value, :any} + {type, [expr | fields], from} + end + + defp collect_fields({:over, _, [call, window]} = expr, fields, from, query, take, keep_literals?) do + if is_atom(window) and not Keyword.has_key?(query.windows, window) do + error!(query, "unknown window #{inspect window} given to over/2") + end + + {type, _, _} = collect_fields(call, fields, from, query, take, keep_literals?) + {type, [expr | fields], from} + end + + defp collect_fields({{:., dot_meta, [{:&, _, [_]}, _]}, _, []} = expr, + fields, from, _query, _take, _keep_literals?) do + {{:value, Keyword.fetch!(dot_meta, :type)}, [expr | fields], from} + end + + defp collect_fields({left, right}, fields, from, query, take, keep_literals?) do + {args, fields, from} = collect_args([left, right], fields, from, query, take, keep_literals?, []) + {{:tuple, args}, fields, from} + end + + defp collect_fields({:{}, _, args}, fields, from, query, take, keep_literals?) do + {args, fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) + {{:tuple, args}, fields, from} + end + + defp collect_fields({:%{}, _, [{:|, _, [data, args]}]}, fields, from, query, take, keep_literals?) do + {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?) + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + {{:map, data, args}, fields, from} + end + + defp collect_fields({:%{}, _, args}, fields, from, query, take, keep_literals?) do + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + {{:map, args}, fields, from} + end + + defp collect_fields({:%, _, [name, {:%{}, _, [{:|, _, [data, args]}]}]}, + fields, from, query, take, keep_literals?) do + {data, fields, from} = collect_fields(data, fields, from, query, take, keep_literals?) + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + struct!(name, args) + {{:struct, name, data, args}, fields, from} + end + + defp collect_fields({:%, _, [name, {:%{}, _, args}]}, fields, from, query, take, keep_literals?) do + {args, fields, from} = collect_kv(args, fields, from, query, take, keep_literals?, []) + struct!(name, args) + {{:struct, name, args}, fields, from} + end + + defp collect_fields({:merge, _, args}, fields, from, query, take, keep_literals?) do + {[left, right], fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) + {{:merge, left, right}, fields, from} + end + + defp collect_fields({:date_add, _, [arg | _]} = expr, fields, from, query, take, keep_literals?) do + case collect_fields(arg, fields, from, query, take, keep_literals?) do + {{:value, :any}, _, _} -> {{:value, :date}, [expr | fields], from} + {type, _, _} -> {type, [expr | fields], from} + end + end + + defp collect_fields({:datetime_add, _, [arg | _]} = expr, fields, from, query, take, keep_literals?) do + case collect_fields(arg, fields, from, query, take, keep_literals?) do + {{:value, :any}, _, _} -> {{:value, :naive_datetime}, [expr | fields], from} + {type, _, _} -> {type, [expr | fields], from} + end + end + + defp collect_fields(args, fields, from, query, take, keep_literals?) when is_list(args) do + {args, fields, from} = collect_args(args, fields, from, query, take, keep_literals?, []) + {{:list, args}, fields, from} + end + + defp collect_fields(expr, fields, from, _query, _take, true) when is_binary(expr) do + {{:value, :binary}, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, true) when is_integer(expr) do + {{:value, :integer}, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, true) when is_float(expr) do + {{:value, :float}, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, true) when is_boolean(expr) do + {{:value, :boolean}, [expr | fields], from} + end + + defp collect_fields(nil, fields, from, _query, _take, true) do + {{:value, :any}, [nil | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, _keep_literals?) when is_atom(expr) do + {expr, fields, from} + end + + defp collect_fields(expr, fields, from, _query, _take, false) + when is_binary(expr) or is_number(expr) do + {expr, fields, from} + end + + defp collect_fields(%Ecto.Query.Tagged{tag: tag} = expr, fields, from, _query, _take, _keep_literals?) do + {{:value, tag}, [expr | fields], from} + end + + defp collect_fields({op, _, [_]} = expr, fields, from, _query, _take, _keep_literals?) + when op in ~w(not is_nil)a do + {{:value, :boolean}, [expr | fields], from} + end + + defp collect_fields({op, _, [_, _]} = expr, fields, from, _query, _take, _keep_literals?) + when op in ~w(< > <= >= == != and or like ilike)a do + {{:value, :boolean}, [expr | fields], from} + end + + defp collect_fields(expr, fields, from, _query, _take, _keep_literals?) do + {{:value, :any}, [expr | fields], from} + end + + defp collect_kv([{key, value} | elems], fields, from, query, take, keep_literals?, acc) do + {key, fields, from} = collect_fields(key, fields, from, query, take, keep_literals?) + {value, fields, from} = collect_fields(value, fields, from, query, take, keep_literals?) + collect_kv(elems, fields, from, query, take, keep_literals?, [{key, value} | acc]) + end + defp collect_kv([], fields, from, _query, _take, _keep_literals?, acc) do + {Enum.reverse(acc), fields, from} + end + + defp collect_args([elem | elems], fields, from, query, take, keep_literals?, acc) do + {elem, fields, from} = collect_fields(elem, fields, from, query, take, keep_literals?) + collect_args(elems, fields, from, query, take, keep_literals?, [elem | acc]) + end + defp collect_args([], fields, from, _query, _take, _keep_literals?, acc) do + {Enum.reverse(acc), fields, from} + end + + defp merge_assocs(assocs, query) do + assocs + |> Enum.reduce(%{}, fn {field, {index, children}}, acc -> + children = merge_assocs(children, query) + + Map.update(acc, field, {index, children}, fn + {^index, current_children} -> + {index, merge_assocs(children ++ current_children, query)} + {other_index, _} -> + error! query, "association `#{field}` is being set to binding at position #{index} " <> + "and at position #{other_index} at the same time" + end) + end) + |> Map.to_list() + end + + defp collect_assocs(exprs, fields, query, tag, take, [{assoc, {ix, children}}|tail]) do + to_take = get_preload_source!(query, ix) + {fetch, take_children} = fetch_assoc(tag, take, assoc) + {expr, taken} = take!(to_take, query, fetch, assoc, ix) + exprs = [expr | exprs] + fields = Enum.reverse(taken, fields) + {exprs, fields} = collect_assocs(exprs, fields, query, tag, take_children, children) + {exprs, fields} = collect_assocs(exprs, fields, query, tag, take, tail) + {exprs, fields} + end + defp collect_assocs(exprs, fields, _query, _tag, _take, []) do + {exprs, fields} + end + + defp fetch_assoc(tag, take, assoc) do + case Access.fetch(take, assoc) do + {:ok, value} -> {{:ok, {tag, value}}, value} + :error -> {:error, []} + end + end + + defp source_take!(kind, query, take, field, ix) do + source = get_source!(kind, query, ix) + take!(source, query, Access.fetch(take, field), field, ix) + end + + defp take!(source, query, fetched, field, ix) do + case {fetched, source} do + {{:ok, {:struct, _}}, {:fragment, _, _}} -> + error! query, "it is not possible to return a struct subset of a fragment" + + {{:ok, {:struct, _}}, %Ecto.SubQuery{}} -> + error! query, "it is not possible to return a struct subset of a subquery" + + {{:ok, {_, []}}, {_, _, _}} -> + error! query, "at least one field must be selected for binding `#{field}`, got an empty list" + + {{:ok, {:struct, _}}, {_, nil, _}} -> + error! query, "struct/2 in select expects a source with a schema" + + {{:ok, {kind, fields}}, {source, schema, prefix}} when is_binary(source) -> + dumper = if schema, do: schema.__schema__(:dump), else: %{} + schema = if kind == :map, do: nil, else: schema + {types, fields} = select_dump(List.wrap(fields), dumper, ix) + {{:source, {source, schema}, prefix || query.prefix, types}, fields} + + {{:ok, {_, fields}}, _} -> + {{:map, Enum.map(fields, &{&1, {:value, :any}})}, Enum.map(fields, &select_field(&1, ix))} + + {:error, {:fragment, _, _}} -> + {{:value, :map}, [{:&, [], [ix]}]} + + {:error, {_, nil, _}} -> + {{:value, :map}, [{:&, [], [ix]}]} + + {:error, {source, schema, prefix}} -> + {types, fields} = select_dump(schema.__schema__(:query_fields), schema.__schema__(:dump), ix) + {{:source, {source, schema}, prefix || query.prefix, types}, fields} + + {:error, %Ecto.SubQuery{select: select}} -> + fields = subquery_source_fields(select) + {select, Enum.map(fields, &select_field(&1, ix))} + end + end + + defp select_dump(fields, dumper, ix) do + fields + |> Enum.reverse + |> Enum.reduce({[], []}, fn + field, {types, exprs} when is_atom(field) -> + {source, type} = Map.get(dumper, field, {field, :any}) + {[{field, type} | types], [select_field(source, ix) | exprs]} + _field, acc -> + acc + end) + end + + defp select_field(field, ix) do + {{:., [], [{:&, [], [ix]}, field]}, [], []} + end + + defp get_ix!({:&, _, [ix]} = expr, _kind, query) do + {ix, expr, query} + end + + defp get_ix!({:as, meta, [as]}, _kind, query) do + case query.aliases do + %{^as => ix} -> {ix, {:&, meta, [ix]}, query} + %{} -> error!(query, "could not find named binding `as(#{inspect(as)})`") + end + end + + defp get_ix!({:parent_as, meta, [as]}, kind, query) do + case query.aliases[@parent_as] do + %{aliases: %{^as => ix}, sources: sources} = query -> + if kind == :select and not (ix < tuple_size(sources)) do + error!(query, "the parent_as in a subquery select used as a join can only access the `from` binding") + else + {ix, {:parent_as, [], [as]}, query} + end + + %{} = parent -> + get_ix!({:parent_as, meta, [as]}, kind, parent) + + nil -> + error!(query, "could not find named binding `parent_as(#{inspect(as)})`") + end + end + + defp get_source!(where, %{sources: sources} = query, ix) do + elem(sources, ix) + rescue + ArgumentError -> + error! query, "invalid query has specified more bindings than bindings available " <> + "in `#{where}` (look for `unknown_binding!` in the printed query below)" + end + + defp get_preload_source!(query, ix) do + case get_source!(:preload, query, ix) do + {source, schema, _} = all when is_binary(source) and schema != nil -> + all + _ -> + error! query, "can only preload sources with a schema " <> + "(fragments, binary and subqueries are not supported)" + end + end + + @doc """ + Puts the prefix given via `opts` into the given query, if available. + """ + def attach_prefix(%{prefix: nil} = query, opts) when is_list(opts) do + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} -> %{query | prefix: prefix} + :error -> query + end + end + + def attach_prefix(%{prefix: nil} = query, %{prefix: prefix}) do + %{query | prefix: prefix} + end + + def attach_prefix(query, _), do: query + + ## Helpers + + @all_exprs [with_cte: :with_ctes, distinct: :distinct, select: :select, from: :from, join: :joins, + where: :wheres, group_by: :group_bys, having: :havings, windows: :windows, + combination: :combinations, order_by: :order_bys, limit: :limit, offset: :offset] + + # Although joins come before updates in the actual query, + # the on fields are moved to where, so they effectively + # need to come later for MySQL. This means subqueries + # with parameters are not supported as a join on MySQL. + # The only way to address it is by splitting how join + # and their on expressions are processed. + @update_all_exprs [with_cte: :with_ctes, from: :from, update: :updates, + join: :joins, where: :wheres, select: :select] + + @delete_all_exprs [with_cte: :with_ctes, from: :from, join: :joins, + where: :wheres, select: :select] + + # Traverse all query components with expressions. + # Therefore from, preload, assocs and lock are not traversed. + defp traverse_exprs(query, operation, acc, fun) do + exprs = + case operation do + :all -> @all_exprs + :insert_all -> @all_exprs + :update_all -> @update_all_exprs + :delete_all -> @delete_all_exprs + end + + Enum.reduce exprs, {query, acc}, fn {kind, key}, {query, acc} -> + {traversed, acc} = fun.(kind, query, Map.fetch!(query, key), acc) + {%{query | key => traversed}, acc} + end + end + + defp field_type!(kind, query, expr, type, allow_virtuals? \\ false) + + defp field_type!(kind, query, expr, {composite, {ix, field}}, allow_virtuals?) when is_integer(ix) do + {composite, type!(kind, query, expr, ix, field, allow_virtuals?)} + end + + defp field_type!(kind, query, expr, {ix, field}, allow_virtuals?) when is_integer(ix) do + type!(kind, query, expr, ix, field, allow_virtuals?) + end + + defp field_type!(_kind, _query, _expr, type, _) do + type + end + + defp type!(kind, query, expr, schema, field, allow_virtuals? \\ false) + + defp type!(_kind, _query, _expr, nil, _field, _allow_virtuals?), do: :any + + defp type!(kind, query, expr, ix, field, allow_virtuals?) when is_integer(ix) do + case get_source!(kind, query, ix) do + {:fragment, _, _} -> + :any + + {_, schema, _} -> + type!(kind, query, expr, schema, field, allow_virtuals?) + + %Ecto.SubQuery{select: select} -> + case subquery_type_for(select, field) do + {:ok, type} -> type + :error -> error!(query, expr, "field `#{field}` does not exist in subquery") + end + end + end + + defp type!(kind, query, expr, schema, field, allow_virtuals?) when is_atom(schema) do + cond do + type = schema.__schema__(:type, field) -> + type + + type = allow_virtuals? && schema.__schema__(:virtual_type, field) -> + type + + Map.has_key?(schema.__struct__(), field) -> + case schema.__schema__(:association, field) do + %Ecto.Association.BelongsTo{owner_key: owner_key} -> + error! query, expr, "field `#{field}` in `#{kind}` is an association in schema #{inspect schema}. " <> + "Did you mean to use `#{owner_key}`?" + %_{} -> + error! query, expr, "field `#{field}` in `#{kind}` is an association in schema #{inspect schema}" + + _ -> + error! query, expr, "field `#{field}` in `#{kind}` is a virtual field in schema #{inspect schema}" + end + + true -> + hint = closest_fields_hint(field, schema) + error! query, expr, "field `#{field}` in `#{kind}` does not exist in schema #{inspect schema}", hint + end + end + + defp closest_fields_hint(input, schema) do + input_string = Atom.to_string(input) + + schema.__schema__(:fields) + |> Enum.map(fn field -> {field, String.jaro_distance(input_string, Atom.to_string(field))} end) + |> Enum.filter(fn {_field, score} -> score >= 0.77 end) + |> Enum.sort(& elem(&1, 0) >= elem(&2, 0)) + |> Enum.take(5) + |> Enum.map(&elem(&1, 0)) + |> case do + [] -> + nil + + [suggestion] -> + "Did you mean `#{suggestion}`?" + + suggestions -> + Enum.reduce(suggestions, "Did you mean one of: \n", fn suggestion, acc -> + acc <> "\n * `#{suggestion}`" + end) + end + end + + defp normalize_param(_kind, {:out, {:array, type}}, _value) do + {:ok, type} + end + defp normalize_param(_kind, {:out, :any}, _value) do + {:ok, :any} + end + defp normalize_param(kind, {:out, other}, value) do + {:error, "value `#{inspect value}` in `#{kind}` expected to be part of an array " <> + "but matched type is #{inspect other}"} + end + defp normalize_param(_kind, type, _value) do + {:ok, type} + end + + defp cast_param(kind, type, v) do + case Ecto.Type.cast(type, v) do + {:ok, v} -> + {:ok, v} + _ -> + {:error, "value `#{inspect v}` in `#{kind}` cannot be cast to type #{inspect type}"} + end + end + + defp dump_param(adapter, type, v) do + case Ecto.Type.adapter_dump(adapter, type, v) do + {:ok, v} -> + {:ok, v} + :error -> + {:error, "value `#{inspect v}` cannot be dumped to type #{inspect type}"} + end + end + + defp field_source({source, schema, _}, field) when is_binary(source) and schema != nil do + # If the field is not found we return the field itself + # which will be checked and raise later. + schema.__schema__(:field_source, field) || field + end + defp field_source(_, field) do + field + end + + defp assert_update!(%Ecto.Query{updates: updates} = query, operation) do + changes = + Enum.reduce(updates, %{}, fn update, acc -> + Enum.reduce(update.expr, acc, fn {_op, kw}, acc -> + Enum.reduce(kw, acc, fn {k, v}, acc -> + if Map.has_key?(acc, k) do + error! query, "duplicate field `#{k}` for `#{operation}`" + else + Map.put(acc, k, v) + end + end) + end) + end) + + if changes == %{} do + error! query, "`#{operation}` requires at least one field to be updated" + end + end + + defp assert_no_update!(query, operation) do + case query do + %Ecto.Query{updates: []} -> query + _ -> + error! query, "`#{operation}` does not allow `update` expressions" + end + end + + defp assert_only_filter_expressions!(query, operation) do + case query do + %Ecto.Query{order_bys: [], limit: nil, offset: nil, group_bys: [], + havings: [], preloads: [], assocs: [], distinct: nil, lock: nil, + windows: [], combinations: []} -> + query + _ -> + error! query, "`#{operation}` allows only `with_cte`, `where` and `join` expressions. " <> + "You can exclude unwanted expressions from a query by using " <> + "Ecto.Query.exclude/2. Error found" + end + end + + defp filter_and_reraise(exception, stacktrace) do + reraise exception, Enum.reject(stacktrace, &match?({__MODULE__, _, _, _}, &1)) + end + + defp error!(query, message) do + raise Ecto.QueryError, message: message, query: query + end + + defp error!(query, expr, message) do + raise Ecto.QueryError, message: message, query: query, file: expr.file, line: expr.line + end + + defp error!(query, expr, message, hint) do + raise Ecto.QueryError, message: message, query: query, file: expr.file, line: expr.line, hint: hint + end +end diff --git a/deps/ecto/lib/ecto/query/window_api.ex b/deps/ecto/lib/ecto/query/window_api.ex new file mode 100644 index 0000000..6e6a900 --- /dev/null +++ b/deps/ecto/lib/ecto/query/window_api.ex @@ -0,0 +1,232 @@ +defmodule Ecto.Query.WindowAPI do + @moduledoc """ + Lists all windows functions. + + Windows functions must always be used as the first argument + of `over/2` where the second argument is the name of a window: + + from e in Employee, + select: {e.depname, e.empno, e.salary, over(avg(e.salary), :department)}, + windows: [department: [partition_by: e.depname]] + + In the example above, we get the average salary per department. + `:department` is the window name, partitioned by `e.depname` + and `avg/1` is the window function. + + However, note that defining a window is not necessary, as the + window definition can be given as the second argument to `over`: + + from e in Employee, + select: {e.depname, e.empno, e.salary, over(avg(e.salary), partition_by: e.depname)} + + Both queries are equivalent. However, if you are using the same + partitioning over and over again, defining a window will reduce + the query size. See `Ecto.Query.windows/3` for all possible window + expressions, such as `:partition_by` and `:order_by`. + """ + + @dialyzer :no_return + + @doc """ + Counts the entries in the table. + + from p in Post, select: count() + """ + def count, do: doc! [] + + @doc """ + Counts the given entry. + + from p in Post, select: count(p.id) + """ + def count(value), do: doc! [value] + + @doc """ + Calculates the average for the given entry. + + from p in Payment, select: avg(p.value) + """ + def avg(value), do: doc! [value] + + @doc """ + Calculates the sum for the given entry. + + from p in Payment, select: sum(p.value) + """ + def sum(value), do: doc! [value] + + @doc """ + Calculates the minimum for the given entry. + + from p in Payment, select: min(p.value) + """ + def min(value), do: doc! [value] + + @doc """ + Calculates the maximum for the given entry. + + from p in Payment, select: max(p.value) + """ + def max(value), do: doc! [value] + + @doc """ + Defines a value based on the function and the window. See moduledoc for more information. + + from e in Employee, select: over(avg(e.salary), partition_by: e.depname) + """ + def over(window_function, window_name), do: doc! [window_function, window_name] + + @doc """ + Returns number of the current row within its partition, counting from 1. + + from p in Post, + select: row_number() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def row_number(), do: doc! [] + + @doc """ + Returns rank of the current row with gaps; same as `row_number/0` of its first peer. + + from p in Post, + select: rank() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def rank(), do: doc! [] + + @doc """ + Returns rank of the current row without gaps; this function counts peer groups. + + from p in Post, + select: dense_rank() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def dense_rank(), do: doc! [] + + @doc """ + Returns relative rank of the current row: (rank - 1) / (total rows - 1). + + from p in Post, + select: percent_rank() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def percent_rank(), do: doc! [] + + @doc """ + Returns relative rank of the current row: + (number of rows preceding or peer with current row) / (total rows). + + from p in Post, + select: cume_dist() |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def cume_dist(), do: doc! [] + + @doc """ + Returns integer ranging from 1 to the argument value, dividing the partition as equally as possible. + + from p in Post, + select: ntile(10) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def ntile(num_buckets), do: doc! [num_buckets] + + @doc """ + Returns value evaluated at the row that is the first row of the window frame. + + from p in Post, + select: first_value(p.id) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def first_value(value), do: doc! [value] + + @doc """ + Returns value evaluated at the row that is the last row of the window frame. + + from p in Post, + select: last_value(p.id) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def last_value(value), do: doc! [value] + + + @doc """ + Applies the given expression as a FILTER clause against an + aggregate. This is currently only supported by Postgres. + + from p in Post, + select: avg(p.value) + |> filter(p.value > 0 and p.value < 100) + |> over(partition_by: p.category_id, order_by: p.date) + """ + + def filter(value, filter), do: doc! [value, filter] + + @doc """ + Returns value evaluated at the row that is the nth row of the window + frame (counting from 1); `nil` if no such row. + + from p in Post, + select: nth_value(p.id, 4) |> over(partition_by: p.category_id, order_by: p.date) + + Note that this function must be invoked using window function syntax. + """ + def nth_value(value, nth), do: doc! [value, nth] + + @doc """ + Returns value evaluated at the row that is offset rows before + the current row within the partition. + + If there is no such row, instead return default (which must be of the + same type as value). Both offset and default are evaluated with respect + to the current row. If omitted, offset defaults to 1 and default to `nil`. + + from e in Events, + windows: [w: [partition_by: e.name, order_by: e.tick]], + select: { + e.tick, + e.action, + e.name, + lag(e.action) |> over(:w), # previous_action + lead(e.action) |> over(:w) # next_action + } + + Note that this function must be invoked using window function syntax. + """ + def lag(value, offset \\ 1, default \\ nil), do: doc! [value, offset, default] + + @doc """ + Returns value evaluated at the row that is offset rows after + the current row within the partition. + + If there is no such row, instead return default (which must be of the + same type as value). Both offset and default are evaluated with respect + to the current row. If omitted, offset defaults to 1 and default to `nil`. + + from e in Events, + windows: [w: [partition_by: e.name, order_by: e.tick]], + select: { + e.tick, + e.action, + e.name, + lag(e.action) |> over(:w), # previous_action + lead(e.action) |> over(:w) # next_action + } + + Note that this function must be invoked using window function syntax. + """ + def lead(value, offset \\ 1, default \\ nil), do: doc! [value, offset, default] + + defp doc!(_) do + raise "the functions in Ecto.Query.WindowAPI should not be invoked directly, " <> + "they serve for documentation purposes only" + end +end diff --git a/deps/ecto/lib/ecto/queryable.ex b/deps/ecto/lib/ecto/queryable.ex new file mode 100644 index 0000000..d9e7c4b --- /dev/null +++ b/deps/ecto/lib/ecto/queryable.ex @@ -0,0 +1,51 @@ +defprotocol Ecto.Queryable do + @moduledoc """ + Converts a data structure into an `Ecto.Query`. + """ + + @doc """ + Converts the given `data` into an `Ecto.Query`. + """ + def to_query(data) +end + +defimpl Ecto.Queryable, for: Ecto.Query do + def to_query(query), do: query +end + +defimpl Ecto.Queryable, for: Ecto.SubQuery do + def to_query(subquery) do + %Ecto.Query{from: %Ecto.Query.FromExpr{source: subquery}} + end +end + +defimpl Ecto.Queryable, for: BitString do + def to_query(source) when is_binary(source) do + %Ecto.Query{from: %Ecto.Query.FromExpr{source: {source, nil}}} + end +end + +defimpl Ecto.Queryable, for: Atom do + def to_query(module) do + try do + module.__schema__(:query) + rescue + UndefinedFunctionError -> + message = if :code.is_loaded(module) do + "the given module does not provide a schema" + else + "the given module does not exist" + end + + raise Protocol.UndefinedError, + protocol: @protocol, value: module, description: message + end + end +end + +defimpl Ecto.Queryable, for: Tuple do + def to_query({source, schema} = from) + when is_binary(source) and is_atom(schema) and not is_nil(schema) do + %Ecto.Query{from: %Ecto.Query.FromExpr{source: from, prefix: schema.__schema__(:prefix)}} + end +end diff --git a/deps/ecto/lib/ecto/repo.ex b/deps/ecto/lib/ecto/repo.ex new file mode 100644 index 0000000..21c902a --- /dev/null +++ b/deps/ecto/lib/ecto/repo.ex @@ -0,0 +1,1875 @@ +defmodule Ecto.Repo do + @moduledoc """ + Defines a repository. + + A repository maps to an underlying data store, controlled by the + adapter. For example, Ecto ships with a Postgres adapter that + stores data into a PostgreSQL database. + + When used, the repository expects the `:otp_app` and `:adapter` as + option. The `:otp_app` should point to an OTP application that has + the repository configuration. For example, the repository: + + defmodule Repo do + use Ecto.Repo, + otp_app: :my_app, + adapter: Ecto.Adapters.Postgres + end + + Could be configured with: + + config :my_app, Repo, + database: "ecto_simple", + username: "postgres", + password: "postgres", + hostname: "localhost" + + Most of the configuration that goes into the `config` is specific + to the adapter. For this particular example, you can check + [`Ecto.Adapters.Postgres`](https://hexdocs.pm/ecto_sql/Ecto.Adapters.Postgres.html) + for more information. In spite of this, the following configuration values + are shared across all adapters: + + * `:name`- The name of the Repo supervisor process + + * `:priv` - the directory where to keep repository data, like + migrations, schema and more. Defaults to "priv/YOUR_REPO". + It must always point to a subdirectory inside the priv directory + + * `:url` - an URL that specifies storage information. Read below + for more information + + * `:log` - the log level used when logging the query with Elixir's + Logger. If false, disables logging for that repository. + Defaults to `:debug` + + * `:pool_size` - the size of the pool used by the connection module. + Defaults to `10` + + * `:telemetry_prefix` - we recommend adapters to publish events + using the `Telemetry` library. By default, the telemetry prefix + is based on the module name, so if your module is called + `MyApp.Repo`, the prefix will be `[:my_app, :repo]`. See the + "Telemetry Events" section to see which events we recommend + adapters to publish. Note that if you have multiple databases, you + should keep the `:telemetry_prefix` consistent for each repo and + use the `:repo` property in the event metadata for distinguishing + between repos. + + * `:stacktrace`- when true, publishes the stacktrace in telemetry events + and allows more advanced logging. + + ## URLs + + Repositories by default support URLs. For example, the configuration + above could be rewritten to: + + config :my_app, Repo, + url: "ecto://postgres:postgres@localhost/ecto_simple" + + The schema can be of any value. The path represents the database name + while options are simply merged in. + + URL can include query parameters to override shared and adapter-specific + options, like `ssl`, `timeout` and `pool_size`. The following example + shows how to pass these configuration values: + + config :my_app, Repo, + url: "ecto://postgres:postgres@localhost/ecto_simple?ssl=true&pool_size=10" + + In case the URL needs to be dynamically configured, for example by + reading a system environment variable, such can be done via the + `c:init/2` repository callback: + + def init(_type, config) do + {:ok, Keyword.put(config, :url, System.get_env("DATABASE_URL"))} + end + + ## Shared options + + Almost all of the repository functions outlined in this module accept the following + options: + + * `:timeout` - The time in milliseconds (as an integer) to wait for the query call to + finish. `:infinity` will wait indefinitely (default: `15_000`) + * `:log` - When false, does not log the query + * `:telemetry_event` - The telemetry event name to dispatch the event under. + See the next section for more information + * `:telemetry_options` - Extra options to attach to telemetry event name. + See the next section for more information + + ## Telemetry events + + There are two types of telemetry events. The ones emitted by Ecto and the + ones that are adapter specific. + + ### Ecto telemetry events + + The following events are emitted by all Ecto repositories: + + * `[:ecto, :repo, :init]` - it is invoked whenever a repository starts. + The measurement is a single `system_time` entry in native unit. The + metadata is the `:repo` and all initialization options under `:opts`. + + ### Adapter-specific events + + We recommend adapters to publish certain `Telemetry` events listed below. + Those events will use the `:telemetry_prefix` outlined above which defaults + to `[:my_app, :repo]`. + + For instance, to receive all query events published by a repository called + `MyApp.Repo`, one would define a module: + + defmodule MyApp.Telemetry do + def handle_event([:my_app, :repo, :query], measurements, metadata, config) do + IO.inspect binding() + end + end + + Then, in the `Application.start/2` callback, attach the handler to this event using + a unique handler id: + + :ok = :telemetry.attach("my-app-handler-id", [:my_app, :repo, :query], &MyApp.Telemetry.handle_event/4, %{}) + + For details, see [the telemetry documentation](https://hexdocs.pm/telemetry/). + + Below we list all events developers should expect from Ecto. All examples + below consider a repository named `MyApp.Repo`: + + #### `[:my_app, :repo, :query]` + + This event should be invoked on every query sent to the adapter, including + queries that are related to the transaction management. + + The `:measurements` map will include the following, all given in the + `:native` time unit: + + * `:idle_time` - the time the connection spent waiting before being checked out for the query + * `:queue_time` - the time spent waiting to check out a database connection + * `:query_time` - the time spent executing the query + * `:decode_time` - the time spent decoding the data received from the database + * `:total_time` - the sum of (`queue_time`, `query_time`, and `decode_time`)๏ธ + + All measurements are given in the `:native` time unit. You can read more + about it in the docs for `System.convert_time_unit/3`. + + A telemetry `:metadata` map including the following fields. Each database + adapter may emit different information here. For Ecto.SQL databases, it + will look like this: + + * `:type` - the type of the Ecto query. For example, for Ecto.SQL + databases, it would be `:ecto_sql_query` + * `:repo` - the Ecto repository + * `:result` - the query result + * `:params` - the query parameters + * `:query` - the query sent to the database as a string + * `:source` - the source the query was made on (may be nil) + * `:options` - extra options given to the repo operation under + `:telemetry_options` + + ## Read-only repositories + + You can mark a repository as read-only by passing the `:read_only` + flag on `use`: + + use Ecto.Repo, otp_app: ..., adapter: ..., read_only: true + + By passing the `:read_only` option, none of the functions that perform + write operations, such as `c:insert/2`, `c:insert_all/3`, `c:update_all/3`, + and friends will be defined. + """ + + @type t :: module + + @doc """ + Returns all running Ecto repositories. + + The list is returned in no particular order. The list + contains either atoms, for named Ecto repositories, or + PIDs. + """ + @spec all_running() :: [atom() | pid()] + defdelegate all_running(), to: Ecto.Repo.Registry + + @doc false + defmacro __using__(opts) do + quote bind_quoted: [opts: opts] do + @behaviour Ecto.Repo + + {otp_app, adapter, behaviours} = + Ecto.Repo.Supervisor.compile_config(__MODULE__, opts) + + @otp_app otp_app + @adapter adapter + @default_dynamic_repo opts[:default_dynamic_repo] || __MODULE__ + @read_only opts[:read_only] || false + @before_compile adapter + @aggregates [:count, :avg, :max, :min, :sum] + + def config do + {:ok, config} = Ecto.Repo.Supervisor.runtime_config(:runtime, __MODULE__, @otp_app, []) + config + end + + def __adapter__ do + @adapter + end + + def child_spec(opts) do + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [opts]}, + type: :supervisor + } + end + + def start_link(opts \\ []) do + Ecto.Repo.Supervisor.start_link(__MODULE__, @otp_app, @adapter, opts) + end + + def stop(timeout \\ 5000) do + Supervisor.stop(get_dynamic_repo(), :normal, timeout) + end + + def load(schema_or_types, data) do + Ecto.Repo.Schema.load(@adapter, schema_or_types, data) + end + + def checkout(fun, opts \\ []) when is_function(fun) do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(get_dynamic_repo()) + adapter.checkout(meta, opts, fun) + end + + def checked_out? do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(get_dynamic_repo()) + adapter.checked_out?(meta) + end + + @compile {:inline, get_dynamic_repo: 0, prepare_opts: 2} + + def get_dynamic_repo() do + Process.get({__MODULE__, :dynamic_repo}, @default_dynamic_repo) + end + + def put_dynamic_repo(dynamic) when is_atom(dynamic) or is_pid(dynamic) do + Process.put({__MODULE__, :dynamic_repo}, dynamic) || @default_dynamic_repo + end + + def default_options(_operation), do: [] + defoverridable default_options: 1 + + defp prepare_opts(operation_name, opts) do + operation_name + |> default_options() + |> Keyword.merge(opts) + end + + ## Transactions + + if Ecto.Adapter.Transaction in behaviours do + def transaction(fun_or_multi, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Transaction.transaction(__MODULE__, repo, fun_or_multi, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:transaction, opts))) + end + + def in_transaction? do + Ecto.Repo.Transaction.in_transaction?(get_dynamic_repo()) + end + + @spec rollback(term) :: no_return + def rollback(value) do + Ecto.Repo.Transaction.rollback(get_dynamic_repo(), value) + end + end + + ## Schemas + + if Ecto.Adapter.Schema in behaviours and not @read_only do + def insert(struct, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.insert(__MODULE__, repo, struct, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts))) + end + + def update(struct, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.update(__MODULE__, get_dynamic_repo(), struct, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:update, opts))) + end + + def insert_or_update(changeset, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.insert_or_update(__MODULE__, get_dynamic_repo(), changeset, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert_or_update, opts))) + end + + def delete(struct, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.delete(__MODULE__, get_dynamic_repo(), struct, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:delete, opts))) + end + + def insert!(struct, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.insert!(__MODULE__, get_dynamic_repo(), struct, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert, opts))) + end + + def update!(struct, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.update!(__MODULE__, get_dynamic_repo(), struct, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:update, opts))) + end + + def insert_or_update!(changeset, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.insert_or_update!(__MODULE__, get_dynamic_repo(), changeset, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert_or_update, opts))) + end + + def delete!(struct, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.delete!(__MODULE__, get_dynamic_repo(), struct, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:delete, opts))) + end + + def insert_all(schema_or_source, entries, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Schema.insert_all(__MODULE__, get_dynamic_repo(), schema_or_source, entries, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:insert_all, opts))) + end + end + + ## Queryable + + if Ecto.Adapter.Queryable in behaviours do + if not @read_only do + def update_all(queryable, updates, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.update_all(get_dynamic_repo(), queryable, updates, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:update_all, opts))) + end + + def delete_all(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.delete_all(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:delete_all, opts))) + end + end + + def all(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.all(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def stream(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.stream(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:stream, opts))) + end + + def get(queryable, id, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.get(get_dynamic_repo(), queryable, id, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def get!(queryable, id, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.get!(get_dynamic_repo(), queryable, id, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def get_by(queryable, clauses, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.get_by(get_dynamic_repo(), queryable, clauses, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def get_by!(queryable, clauses, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.get_by!(get_dynamic_repo(), queryable, clauses, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def reload(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.reload(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:reload, opts))) + end + + def reload!(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.reload!(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:reload, opts))) + end + + def one(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.one(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def one!(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.one!(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def aggregate(queryable, aggregate, opts \\ []) + + def aggregate(queryable, aggregate, opts) + when aggregate in [:count] and is_list(opts) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def aggregate(queryable, aggregate, field) + when aggregate in @aggregates and is_atom(field) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, field, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, []))) + end + + def aggregate(queryable, aggregate, field, opts) + when aggregate in @aggregates and is_atom(field) and is_list(opts) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.aggregate(get_dynamic_repo(), queryable, aggregate, field, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def exists?(queryable, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Queryable.exists?(get_dynamic_repo(), queryable, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:all, opts))) + end + + def preload(struct_or_structs_or_nil, preloads, opts \\ []) do + repo = get_dynamic_repo() + Ecto.Repo.Preloader.preload(struct_or_structs_or_nil, get_dynamic_repo(), preloads, Ecto.Repo.Supervisor.tuplet(repo, prepare_opts(:preload, opts))) + end + + def prepare_query(operation, query, opts), do: {query, opts} + defoverridable prepare_query: 3 + end + end + end + + ## User callbacks + + @optional_callbacks init: 2 + + @doc """ + A callback executed when the repo starts or when configuration is read. + + The first argument is the context the callback is being invoked. If it + is called because the Repo supervisor is starting, it will be `:supervisor`. + It will be `:runtime` if it is called for reading configuration without + actually starting a process. + + The second argument is the repository configuration as stored in the + application environment. It must return `{:ok, keyword}` with the updated + list of configuration or `:ignore` (only in the `:supervisor` case). + """ + @doc group: "User callbacks" + @callback init(context :: :supervisor | :runtime, config :: Keyword.t()) :: + {:ok, Keyword.t()} | :ignore + + ## Ecto.Adapter + + @doc """ + Returns the adapter tied to the repository. + """ + @doc group: "Runtime API" + @callback __adapter__ :: Ecto.Adapter.t() + + @doc """ + Returns the adapter configuration stored in the `:otp_app` environment. + + If the `c:init/2` callback is implemented in the repository, + it will be invoked with the first argument set to `:runtime`. + """ + @doc group: "Runtime API" + @callback config() :: Keyword.t() + + @doc """ + Starts any connection pooling or supervision and return `{:ok, pid}` + or just `:ok` if nothing needs to be done. + + Returns `{:error, {:already_started, pid}}` if the repo is already + started or `{:error, term}` in case anything else goes wrong. + + ## Options + + See the configuration in the moduledoc for options shared between adapters, + for adapter-specific configuration see the adapter's documentation. + """ + @doc group: "Runtime API" + @callback start_link(opts :: Keyword.t()) :: + {:ok, pid} + | {:error, {:already_started, pid}} + | {:error, term} + + @doc """ + Shuts down the repository. + """ + @doc group: "Runtime API" + @callback stop(timeout) :: :ok + + @doc """ + Checks out a connection for the duration of the function. + + It returns the result of the function. This is useful when + you need to perform multiple operations against the repository + in a row and you want to avoid checking out the connection + multiple times. + + `checkout/2` and `transaction/2` can be combined and nested + multiple times. If `checkout/2` is called inside the function + of another `checkout/2` call, the function is simply executed, + without checking out a new connection. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + """ + @doc group: "Transaction API" + @callback checkout((() -> result), opts :: Keyword.t()) :: result when result: var + + @doc """ + Returns true if a connection has been checked out. + + This is true if inside a `c:Ecto.Repo.checkout/2` or + `c:Ecto.Repo.transaction/2`. + + ## Examples + + MyRepo.checked_out? + #=> false + + MyRepo.transaction(fn -> + MyRepo.checked_out? #=> true + end) + + MyRepo.checkout(fn -> + MyRepo.checked_out? #=> true + end) + + """ + @doc group: "Transaction API" + @callback checked_out?() :: boolean + + @doc """ + Loads `data` into a schema or a map. + + The first argument can be a a schema module or a map (of types). + The first argument determines the return value: a struct or a map, + respectively. + + The second argument `data` specifies fields and values that are to be loaded. + It can be a map, a keyword list, or a `{fields, values}` tuple. + Fields can be atoms or strings. + + Fields that are not present in the schema (or `types` map) are ignored. + If any of the values has invalid type, an error is raised. + + To load data from non-database sources, use `Ecto.embedded_load/3`. + + ## Examples + + iex> MyRepo.load(User, %{name: "Alice", age: 25}) + %User{name: "Alice", age: 25} + + iex> MyRepo.load(User, [name: "Alice", age: 25]) + %User{name: "Alice", age: 25} + + `data` can also take form of `{fields, values}`: + + iex> MyRepo.load(User, {[:name, :age], ["Alice", 25]}) + %User{name: "Alice", age: 25, ...} + + The first argument can also be a `types` map: + + iex> types = %{name: :string, age: :integer} + iex> MyRepo.load(types, %{name: "Alice", age: 25}) + %{name: "Alice", age: 25} + + This function is especially useful when parsing raw query results: + + iex> result = Ecto.Adapters.SQL.query!(MyRepo, "SELECT * FROM users", []) + iex> Enum.map(result.rows, &MyRepo.load(User, {result.columns, &1})) + [%User{...}, ...] + + """ + @doc group: "Schema API" + @callback load( + schema_or_map :: module | map(), + data :: map() | Keyword.t() | {list, list} + ) :: Ecto.Schema.t() | map() + + @doc """ + Returns the atom name or pid of the current repository. + + See `c:put_dynamic_repo/1` for more information. + """ + @doc group: "Runtime API" + @callback get_dynamic_repo() :: atom() | pid() + + @doc """ + Sets the dynamic repository to be used in further interactions. + + Sometimes you may want a single Ecto repository to talk to + many different database instances. By default, when you call + `MyApp.Repo.start_link/1`, it will start a repository with + name `MyApp.Repo`. But if you want to start multiple repositories, + you can give each of them a different name: + + MyApp.Repo.start_link(name: :tenant_foo, hostname: "foo.example.com") + MyApp.Repo.start_link(name: :tenant_bar, hostname: "bar.example.com") + + You can also start repositories without names by explicitly + setting the name to nil: + + MyApp.Repo.start_link(name: nil, hostname: "temp.example.com") + + However, once the repository is started, you can't directly interact with + it, since all operations in `MyApp.Repo` are sent by default to the repository + named `MyApp.Repo`. You can change the default repo at compile time with: + + use Ecto.Repo, default_dynamic_repo: :name_of_repo + + Or you can change it anytime at runtime by calling `put_dynamic_repo/1`: + + MyApp.Repo.put_dynamic_repo(:tenant_foo) + + From this moment on, all future queries done by the current process will + run on `:tenant_foo`. + """ + @doc group: "Runtime API" + @callback put_dynamic_repo(name_or_pid :: atom() | pid()) :: atom() | pid() + + ## Ecto.Adapter.Queryable + + @optional_callbacks get: 3, get!: 3, get_by: 3, get_by!: 3, reload: 2, reload!: 2, aggregate: 3, + aggregate: 4, exists?: 2, one: 2, one!: 2, preload: 3, all: 2, stream: 2, + update_all: 3, delete_all: 2 + + @doc """ + Fetches a single struct from the data store where the primary key matches the + given id. + + Returns `nil` if no result was found. If the struct in the queryable + has no or more than one primary key, it will raise an argument error. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get(Post, 42) + + MyRepo.get(Post, 42, prefix: "public") + + """ + @doc group: "Query API" + @callback get(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) :: + Ecto.Schema.t() | nil + + @doc """ + Similar to `c:get/3` but raises `Ecto.NoResultsError` if no record was found. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get!(Post, 42) + + MyRepo.get!(Post, 42, prefix: "public") + + """ + @doc group: "Query API" + @callback get!(queryable :: Ecto.Queryable.t(), id :: term, opts :: Keyword.t()) :: + Ecto.Schema.t() + + @doc """ + Fetches a single result from the query. + + Returns `nil` if no result was found. Raises if more than one entry. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get_by(Post, title: "My post") + + MyRepo.get_by(Post, [title: "My post"], prefix: "public") + + """ + @doc group: "Query API" + @callback get_by( + queryable :: Ecto.Queryable.t(), + clauses :: Keyword.t() | map, + opts :: Keyword.t() + ) :: Ecto.Schema.t() | nil + + @doc """ + Similar to `c:get_by/3` but raises `Ecto.NoResultsError` if no record was found. + + Raises if more than one entry. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + MyRepo.get_by!(Post, title: "My post") + + MyRepo.get_by!(Post, [title: "My post"], prefix: "public") + + """ + @doc group: "Query API" + @callback get_by!( + queryable :: Ecto.Queryable.t(), + clauses :: Keyword.t() | map, + opts :: Keyword.t() + ) :: Ecto.Schema.t() + + @doc """ + Reloads a given schema or schema list from the database. + + When using with lists, it is expected that all of the structs in the list belong + to the same schema. Ordering is guaranteed to be kept. Results not found in + the database will be returned as `nil`. + + ## Example + + MyRepo.reload(post) + %Post{} + + MyRepo.reload([post1, post2]) + [%Post{}, %Post{}] + + MyRepo.reload([deleted_post, post1]) + [nil, %Post{}] + """ + @doc group: "Schema API" + @callback reload( + struct_or_structs :: Ecto.Schema.t() | [Ecto.Schema.t()], + opts :: Keyword.t() + ) :: Ecto.Schema.t() | [Ecto.Schema.t() | nil] | nil + + @doc """ + Similar to `c:reload/2`, but raises when something is not found. + + When using with lists, ordering is guaranteed to be kept. + + ## Example + + MyRepo.reload!(post) + %Post{} + + MyRepo.reload!([post1, post2]) + [%Post{}, %Post{}] + """ + @doc group: "Schema API" + @callback reload!(struct_or_structs, opts :: Keyword.t()) :: struct_or_structs + when struct_or_structs: Ecto.Schema.t() | [Ecto.Schema.t()] + + @doc """ + Calculate the given `aggregate`. + + If the query has a limit, offset, distinct or combination set, it will be + automatically wrapped in a subquery in order to return the + proper result. + + Any preload or select in the query will be ignored in favor of + the column being aggregated. + + The aggregation will fail if any `group_by` field is set. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + # Returns the number of blog posts + Repo.aggregate(Post, :count) + + # Returns the number of blog posts in the "private" schema path + # (in Postgres) or database (in MySQL) + Repo.aggregate(Post, :count, prefix: "private") + + """ + @doc group: "Query API" + @callback aggregate( + queryable :: Ecto.Queryable.t(), + aggregate :: :count, + opts :: Keyword.t() + ) :: term | nil + + @doc """ + Calculate the given `aggregate` over the given `field`. + + See `c:aggregate/3` for general considerations and options. + + ## Examples + + # Returns the number of visits per blog post + Repo.aggregate(Post, :count, :visits) + + # Returns the number of visits per blog post in the "private" schema path + # (in Postgres) or database (in MySQL) + Repo.aggregate(Post, :count, :visits, prefix: "private") + + # Returns the average number of visits for the top 10 + query = from Post, limit: 10 + Repo.aggregate(query, :avg, :visits) + """ + @doc group: "Query API" + @callback aggregate( + queryable :: Ecto.Queryable.t(), + aggregate :: :avg | :count | :max | :min | :sum, + field :: atom, + opts :: Keyword.t() + ) :: term | nil + + @doc """ + Checks if there exists an entry that matches the given query. + + Returns a boolean. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + # checks if any posts exist + Repo.exists?(Post) + + # checks if any posts exist in the "private" schema path (in Postgres) or + # database (in MySQL) + Repo.exists?(Post, schema: "private") + + # checks if any post with a like count greater than 10 exists + query = from p in Post, where: p.like_count > 10 + Repo.exists?(query) + """ + @doc group: "Query API" + @callback exists?(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: boolean() + + @doc """ + Fetches a single result from the query. + + Returns `nil` if no result was found. Raises if more than one entry. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + Repo.one(from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id) + + query = from p in Post, join: c in assoc(p, :comments), where: p.id == ^post_id + Repo.one(query, prefix: "private") + """ + @doc group: "Query API" + @callback one(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() | nil + + @doc """ + Similar to `c:one/2` but raises `Ecto.NoResultsError` if no record was found. + + Raises if more than one entry. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + """ + @doc group: "Query API" + @callback one!(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() + + @doc """ + Preloads all associations on the given struct or structs. + + This is similar to `Ecto.Query.preload/3` except it allows + you to preload structs after they have been fetched from the + database. + + In case the association was already loaded, preload won't attempt + to reload it. + + ## Options + + * `:force` - By default, Ecto won't preload associations that + are already loaded. By setting this option to true, any existing + association will be discarded and reloaded. + * `:in_parallel` - If the preloads must be done in parallel. It can + only be performed when we have more than one preload and the + repository is not in a transaction. Defaults to `true`. + * `:prefix` - the prefix to fetch preloads from. By default, queries + will use the same prefix as the first struct in the given collection. + This option allows the prefix to be changed. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + # Use a single atom to preload an association + posts = Repo.preload posts, :comments + + # Use a list of atoms to preload multiple associations + posts = Repo.preload posts, [:comments, :authors] + + # Use a keyword list to preload nested associations as well + posts = Repo.preload posts, [comments: [:replies, :likes], authors: []] + + # You can mix atoms and keywords, but the atoms must come first + posts = Repo.preload posts, [:authors, comments: [:likes, replies: [:reactions]]] + + # Use a keyword list to customize how associations are queried + posts = Repo.preload posts, [comments: from(c in Comment, order_by: c.published_at)] + + # Use a two-element tuple for a custom query and nested association definition + query = from c in Comment, order_by: c.published_at + posts = Repo.preload posts, [comments: {query, [:replies, :likes]}] + + The query given to preload may also preload its own associations. + """ + @doc group: "Schema API" + @callback preload(structs_or_struct_or_nil, preloads :: term, opts :: Keyword.t()) :: + structs_or_struct_or_nil + when structs_or_struct_or_nil: [Ecto.Schema.t()] | Ecto.Schema.t() | nil + + @doc """ + A user customizable callback invoked for query-based operations. + + This callback can be used to further modify the query and options + before it is transformed and sent to the database. + + This callback is invoked for all query APIs, including the `stream` + functions. It is also invoked for `insert_all` if a source query is + given. It is not invoked for any of the other schema functions. + + ## Examples + + Let's say you want to filter out records that were "soft-deleted" + (have `deleted_at` column set) from all operations unless an admin + is running the query; you can define the callback like this: + + @impl true + def prepare_query(_operation, query, opts) do + if opts[:admin] do + {query, opts} + else + query = from(x in query, where: is_nil(x.deleted_at)) + {query, opts} + end + end + + And then execute the query: + + Repo.all(query) # only non-deleted records are returned + Repo.all(query, admin: true) # all records are returned + + The callback will be invoked for all queries, including queries + made from associations and preloads. It is not invoked for each + individual join inside a query. + """ + @doc group: "User callbacks" + @callback prepare_query(operation, query :: Ecto.Query.t(), opts :: Keyword.t()) :: + {Ecto.Query.t(), Keyword.t()} + when operation: :all | :update_all | :delete_all | :stream | :insert_all + + @doc """ + A user customizable callback invoked to retrieve default options + for operations. + + This can be used to provide default values per operation that + have higher precedence than the values given on configuration + or when starting the repository. It can also be used to set + query specific options, such as `:prefix`. + + This callback is invoked as the entry point for all repository + operations. For example, if you are executing a query with preloads, + this callback will be invoked once at the beginning, but the + options returned here will be passed to all following operations. + """ + @doc group: "User callbacks" + @callback default_options(operation) :: Keyword.t() + when operation: :all | :insert_all | :update_all | :delete_all | :stream | + :transaction | :insert | :update | :delete | :insert_or_update + + @doc """ + Fetches all entries from the data store matching the given query. + + May raise `Ecto.QueryError` if query validation fails. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + # Fetch all post titles + query = from p in Post, + select: p.title + MyRepo.all(query) + """ + @doc group: "Query API" + @callback all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: [Ecto.Schema.t()] + + @doc """ + Returns a lazy enumerable that emits all entries from the data store + matching the given query. + + SQL adapters, such as Postgres and MySQL, can only enumerate a stream + inside a transaction. + + May raise `Ecto.QueryError` if query validation fails. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This will be applied to all `from` + and `join`s in the query that did not have a prefix previously given + either via the `:prefix` option on `join`/`from` or via `@schema_prefix` + in the schema. For more information see the "Query Prefix" section of the + `Ecto.Query` documentation. + + * `:max_rows` - The number of rows to load from the database as we stream. + It is supported at least by Postgres and MySQL and defaults to 500. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + # Fetch all post titles + query = from p in Post, + select: p.title + stream = MyRepo.stream(query) + MyRepo.transaction(fn -> + Enum.to_list(stream) + end) + """ + @doc group: "Query API" + @callback stream(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: Enum.t() + + @doc """ + Updates all entries matching the given query with the given values. + + It returns a tuple containing the number of entries and any returned + result as second element. The second element is `nil` by default + unless a `select` is supplied in the update query. Note, however, + not all databases support returning data from UPDATEs. + + Keep in mind this `update_all` will not update autogenerated + fields like the `updated_at` columns. + + See `Ecto.Query.update/3` for update operations that can be + performed on fields. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set in the schema. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for remaining options. + + ## Examples + + MyRepo.update_all(Post, set: [title: "New title"]) + + MyRepo.update_all(Post, inc: [visits: 1]) + + from(p in Post, where: p.id < 10, select: p.visits) + |> MyRepo.update_all(set: [title: "New title"]) + + from(p in Post, where: p.id < 10, update: [set: [title: "New title"]]) + |> MyRepo.update_all([]) + + from(p in Post, where: p.id < 10, update: [set: [title: ^new_title]]) + |> MyRepo.update_all([]) + + from(p in Post, where: p.id < 10, update: [set: [title: fragment("upper(?)", ^new_title)]]) + |> MyRepo.update_all([]) + + """ + @doc group: "Query API" + @callback update_all( + queryable :: Ecto.Queryable.t(), + updates :: Keyword.t(), + opts :: Keyword.t() + ) :: {non_neg_integer, nil | [term]} + + @doc """ + Deletes all entries matching the given query. + + It returns a tuple containing the number of entries and any returned + result as second element. The second element is `nil` by default + unless a `select` is supplied in the delete query. Note, however, + not all databases support returning data from DELETEs. + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set in the schema. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for remaining options. + + ## Examples + + MyRepo.delete_all(Post) + + from(p in Post, where: p.id < 10) |> MyRepo.delete_all + """ + @doc group: "Query API" + @callback delete_all(queryable :: Ecto.Queryable.t(), opts :: Keyword.t()) :: + {non_neg_integer, nil | [term]} + + ## Ecto.Adapter.Schema + + @optional_callbacks insert_all: 3, insert: 2, insert!: 2, update: 2, update!: 2, + delete: 2, delete!: 2, insert_or_update: 2, insert_or_update!: 2, + prepare_query: 3 + + @doc """ + Inserts all entries into the repository. + + It expects a schema module (`MyApp.User`) or a source (`"users"`) or + both (`{"users", MyApp.User}`) as the first argument. The second + argument is a list of entries to be inserted, either as keyword + lists or as maps. The keys of the entries are the field names as + atoms and the value should be the respective value for the field + type or, optionally, an `Ecto.Query` that returns a single entry + with a single value. + + It returns a tuple containing the number of entries + and any returned result as second element. If the database + does not support RETURNING in INSERT statements or no + return result was selected, the second element will be `nil`. + + When a schema module is given, the entries given will be properly dumped + before being sent to the database. If the schema primary key has type + `:id` or `:binary_id`, it will be handled either at the adapter + or the storage layer. However any other primary key type or autogenerated + value, like `Ecto.UUID` and timestamps, won't be autogenerated when + using `c:insert_all/3`. You must set those fields explicitly. This is by + design as this function aims to be a more direct way to insert data into + the database without the conveniences of `c:insert/2`. This is also + consistent with `c:update_all/3` that does not handle auto generated + values as well. + + It is also not possible to use `insert_all` to insert across multiple + tables, therefore associations are not supported. + + If a source is given, without a schema module, the given fields are passed + as is to the adapter. + + ## Options + + * `:returning` - selects which fields to return. When `true`, + returns all fields in the given schema. May be a list of + fields, where a struct is still returned but only with the + given fields. Or `false`, where nothing is returned (the default). + This option is not supported by all databases. + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set in the schema. + + * `:on_conflict` - It may be one of `:raise` (the default), `:nothing`, + `:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`, + a keyword list of update instructions or an `Ecto.Query` + query for updates. See the "Upserts" section for more information. + + * `:conflict_target` - A list of column names to verify for conflicts. + It is expected those columns to have unique indexes on them that may conflict. + If none is specified, the conflict target is left up to the database. + It may also be `{:unsafe_fragment, binary_fragment}` to pass any + expression to the database without any sanitization, this is useful + for partial index or index with expressions, such as + `{:unsafe_fragment, "(coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL"}` for + `ON CONFLICT (coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL` SQL query. + + * `:placeholders` - A map with placeholders. This feature is not supported + by all databases. See the "Placeholders" section for more information. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for remaining options. + + ## Source query + + A query can be given instead of a list with entries. This query needs to select + into a map containing only keys that are available as writeable columns in the + schema. + + ## Examples + + MyRepo.insert_all(Post, [[title: "My first post"], [title: "My second post"]]) + + MyRepo.insert_all(Post, [%{title: "My first post"}, %{title: "My second post"}]) + + query = from p in Post, + join: c in assoc(p, :comments), + select: %{ + author_id: p.author_id, + posts: count(p.id, :distinct), + interactions: sum(p.likes) + count(c.id) + }, + group_by: p.author_id + MyRepo.insert_all(AuthorStats, query) + + ## Upserts + + `c:insert_all/3` provides upserts (update or inserts) via the `:on_conflict` + option. The `:on_conflict` option supports the following values: + + * `:raise` - raises if there is a conflicting primary key or unique index + + * `:nothing` - ignores the error in case of conflicts + + * `:replace_all` - replace **all** values on the existing row with the values + in the schema/changeset, including fields not explicitly set in the changeset, + such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`). + Do not use this option if you have auto-incrementing primary keys, as they + will also be replaced. You most likely want to use `{:replace_all_except, [:id]}` + or `{:replace, fields}` explicitly instead. This option requires a schema + + * `{:replace_all_except, fields}` - same as above except the given fields + are not replaced. This option requires a schema + + * `{:replace, fields}` - replace only specific columns. This option requires + `:conflict_target` + + * a keyword list of update instructions - such as the one given to + `c:update_all/3`, for example: `[set: [title: "new title"]]` + + * an `Ecto.Query` that will act as an `UPDATE` statement, such as the + one given to `c:update_all/3` + + Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY" + on databases such as MySQL. + + ## Return values + + By default, both Postgres and MySQL will return the number of entries + inserted on `c:insert_all/3`. However, when the `:on_conflict` option + is specified, Postgres and MySQL will return different results. + + Postgres will only count a row if it was affected and will + return 0 if no new entry was added. + + MySQL will return, at a minimum, the number of entries attempted. For example, + if `:on_conflict` is set to `:nothing`, MySQL will return + the number of entries attempted to be inserted, even when no entry + was added. + + Also note that if `:on_conflict` is a query, MySQL will return + the number of attempted entries plus the number of entries modified + by the UPDATE query. + + ## Placeholders + + Passing in a map for the `:placeholders` allows you to send less + data over the wire when you have many entries with the same value + for a field. To use a placeholder, replace its value in each of your + entries with `{:placeholder, key}`, where `key` is the key you + are using in the `:placeholders` option map. For example: + + placeholders = %{blob: large_blob_of_text(...)} + + entries = [ + %{title: "v1", body: {:placeholder, :blob}}, + %{title: "v2", body: {:placeholder, :blob}} + ] + + Repo.insert_all(Post, entries, placeholders: placeholders) + + Keep in mind that: + + * placeholders cannot be nested in other values. For example, you + cannot put a placeholder inside an array. Instead, the whole + array has to be the placeholder + + * a placeholder key can only be used with columns of the same type + + * placeholders require a database that supports index parameters, + so they are not currently compatible with MySQL + + """ + @doc group: "Schema API" + @callback insert_all( + schema_or_source :: binary | {binary, module} | module, + entries_or_query :: [%{atom => value} | Keyword.t(value)] | Ecto.Query.t, + opts :: Keyword.t() + ) :: {non_neg_integer, nil | [term]} when value: term | Ecto.Query.t() + + @doc """ + Inserts a struct defined via `Ecto.Schema` or a changeset. + + In case a struct is given, the struct is converted into a changeset + with all non-nil fields as part of the changeset. + + In case a changeset is given, the changes in the changeset are + merged with the struct fields, and all of them are sent to the + database. If more than one database operation is required, they're + automatically wrapped in a transaction. + + It returns `{:ok, struct}` if the struct has been successfully + inserted or `{:error, changeset}` if there was a validation + or a known constraint error. + + ## Options + + * `:returning` - selects which fields to return. It accepts a list + of fields to be returned from the database. When `true`, returns + all fields. When `false`, no extra fields are returned. It will + always include all fields in `read_after_writes` as well as any + autogenerated id. Not all databases support this option and it + may not be available during upserts. See the "Upserts" section + for more information. + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set any schemas. Also, the + `@schema_prefix` for the parent record will override all default + `@schema_prefix`s set in any child schemas for associations. + + * `:on_conflict` - It may be one of `:raise` (the default), `:nothing`, + `:replace_all`, `{:replace_all_except, fields}`, `{:replace, fields}`, + a keyword list of update instructions or an `Ecto.Query` query for updates. + See the "Upserts" section for more information. + + * `:conflict_target` - A list of column names to verify for conflicts. + It is expected those columns to have unique indexes on them that may conflict. + If none is specified, the conflict target is left up to the database. + It may also be `{:unsafe_fragment, binary_fragment}` to pass any + expression to the database without any sanitization, this is useful + for partial index or index with expressions, such as + `{:unsafe_fragment, "(coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL"}` for + `ON CONFLICT (coalesce(firstname, ""), coalesce(lastname, "")) WHERE middlename IS NULL` SQL query. + + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. + + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Examples + + A typical example is calling `MyRepo.insert/1` with a struct + and acting on the return value: + + case MyRepo.insert %Post{title: "Ecto is great"} do + {:ok, struct} -> # Inserted with success + {:error, changeset} -> # Something went wrong + end + + ## Upserts + + `c:insert/2` provides upserts (update or inserts) via the `:on_conflict` + option. The `:on_conflict` option supports the following values: + + * `:raise` - raises if there is a conflicting primary key or unique index + + * `:nothing` - ignores the error in case of conflicts + + * `:replace_all` - replace **all** values on the existing row with the values + in the schema/changeset, including fields not explicitly set in the changeset, + such as IDs and autogenerated timestamps (`inserted_at` and `updated_at`). + Do not use this option if you have auto-incrementing primary keys, as they + will also be replaced. You most likely want to use `{:replace_all_except, [:id]}` + or `{:replace, fields}` explicitly instead. This option requires a schema + + * `{:replace_all_except, fields}` - same as above except the given fields are + not replaced. This option requires a schema + + * `{:replace, fields}` - replace only specific columns. This option requires + `:conflict_target` + + * a keyword list of update instructions - such as the one given to + `c:update_all/3`, for example: `[set: [title: "new title"]]` + + * an `Ecto.Query` that will act as an `UPDATE` statement, such as the + one given to `c:update_all/3`. Similarly to `c:update_all/3`, auto + generated values, such as timestamps are not automatically updated. + If the struct cannot be found, `Ecto.StaleEntryError` will be raised. + + Upserts map to "ON CONFLICT" on databases like Postgres and "ON DUPLICATE KEY" + on databases such as MySQL. + + As an example, imagine `:title` is marked as a unique column in + the database: + + {:ok, inserted} = MyRepo.insert(%Post{title: "this is unique"}) + + Now we can insert with the same title but do nothing on conflicts: + + {:ok, ignored} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: :nothing) + + Because we used `on_conflict: :nothing`, instead of getting an error, + we got `{:ok, struct}`. However the returned struct does not reflect + the data in the database. If the primary key is auto-generated by the + database, the primary key in the `ignored` record will be nil if there + was no insertion. For example, if you use the default primary key + (which has name `:id` and a type of `:id`), then `ignored.id` above + will be nil if there was no insertion. + + If your id is generated by your application (typically the case for + `:binary_id`) or if you pass another value for `:on_conflict`, detecting + if an insert or update happened is slightly more complex, as the database + does not actually inform us what happened. Let's insert a post with the + same title but use a query to update the body column in case of conflicts: + + # In Postgres (it requires the conflict target for updates): + on_conflict = [set: [body: "updated"]] + {:ok, updated} = MyRepo.insert(%Post{title: "this is unique"}, + on_conflict: on_conflict, conflict_target: :title) + + # In MySQL (conflict target is not supported): + on_conflict = [set: [title: "updated"]] + {:ok, updated} = MyRepo.insert(%Post{id: inserted.id, title: "updated"}, + on_conflict: on_conflict) + + In the examples above, even though it returned `:ok`, we do not know + if we inserted new data or if we updated only the `:on_conflict` fields. + In case an update happened, the data in the struct most likely does + not match the data in the database. For example, autogenerated fields + such as `inserted_at` will point to now rather than the time the + struct was actually inserted. + + If you need to guarantee the data in the returned struct mirrors the + database, you have three options: + + * Use `on_conflict: :replace_all`, although that will replace all + fields in the database with the ones in the struct/changeset, + including autogenerated fields such as `inserted_at` and `updated_at`: + + MyRepo.insert(%Post{title: "this is unique"}, + on_conflict: :replace_all, conflict_target: :title) + + * Specify `read_after_writes: true` in your schema for choosing + fields that are read from the database after every operation. + Or pass `returning: true` to `insert` to read all fields back: + + MyRepo.insert(%Post{title: "this is unique"}, returning: true, + on_conflict: on_conflict, conflict_target: :title) + + * Alternatively, read the data again from the database in a separate + query. This option requires the primary key to be generated by the + database: + + {:ok, updated} = MyRepo.insert(%Post{title: "this is unique"}, on_conflict: on_conflict) + Repo.get(Post, updated.id) + + Because of the inability to know if the struct is up to date or not, + inserting a struct with associations and using the `:on_conflict` option + at the same time is not recommended, as Ecto will be unable to actually + track the proper status of the association. + """ + @doc group: "Schema API" + @callback insert( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Updates a changeset using its primary key. + + A changeset is required as it is the only mechanism for + tracking dirty changes. Only the fields present in the `changes` part + of the changeset are sent to the database. Any other, in-memory + changes done to the schema are ignored. If more than one database + operation is required, they're automatically wrapped in a transaction. + + If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError` + will be raised. + + If the struct cannot be found, `Ecto.StaleEntryError` will be raised. + + It returns `{:ok, struct}` if the struct has been successfully + updated or `{:error, changeset}` if there was a validation + or a known constraint error. + + ## Options + + * `:returning` - selects which fields to return. It accepts a list + of fields to be returned from the database. When `true`, returns + all fields. When `false`, no extra fields are returned. It will + always include all fields in `read_after_writes`. Not all + databases support this option. + + * `:force` - By default, if there are no changes in the changeset, + `c:update/2` is a no-op. By setting this option to true, update + callbacks will always be executed, even if there are no changes + (including timestamps). + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set any schemas. Also, the + `@schema_prefix` for the parent record will override all default + `@schema_prefix`s set in any child schemas for associations. + + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. + + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + post = MyRepo.get!(Post, 42) + post = Ecto.Changeset.change post, title: "New title" + case MyRepo.update post do + {:ok, struct} -> # Updated with success + {:error, changeset} -> # Something went wrong + end + """ + @doc group: "Schema API" + @callback update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Inserts or updates a changeset depending on whether the struct is persisted + or not. + + The distinction whether to insert or update will be made on the + `Ecto.Schema.Metadata` field `:state`. The `:state` is automatically set by + Ecto when loading or building a schema. + + Please note that for this to work, you will have to load existing structs from + the database. So even if the struct exists, this won't work: + + struct = %Post{id: "existing_id", ...} + MyRepo.insert_or_update changeset + # => {:error, changeset} # id already exists + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set any schemas. Also, the + `@schema_prefix` for the parent record will override all default + `@schema_prefix`s set in any child schemas for associations. + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. Only applies to updates. + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + Only applies to updates. + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + result = + case MyRepo.get(Post, id) do + nil -> %Post{id: id} # Post not found, we build one + post -> post # Post exists, let's use it + end + |> Post.changeset(changes) + |> MyRepo.insert_or_update + + case result do + {:ok, struct} -> # Inserted or updated with success + {:error, changeset} -> # Something went wrong + end + """ + @doc group: "Schema API" + @callback insert_or_update(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Deletes a struct using its primary key. + + If the struct has no primary key, `Ecto.NoPrimaryKeyFieldError` + will be raised. If the struct has been removed prior to the call, + `Ecto.StaleEntryError` will be raised. If more than one database + operation is required, they're automatically wrapped in a transaction. + + It returns `{:ok, struct}` if the struct has been successfully + deleted or `{:error, changeset}` if there was a validation + or a known constraint error. By default, constraint errors will + raise the `Ecto.ConstraintError` exception, unless a changeset is + given as the first argument with the relevant constraints declared + in it (see `Ecto.Changeset`). + + ## Options + + * `:prefix` - The prefix to run the query on (such as the schema path + in Postgres or the database in MySQL). This overrides the prefix set + in the query and any `@schema_prefix` set in the schema. + + * `:stale_error_field` - The field where stale errors will be added in + the returning changeset. This option can be used to avoid raising + `Ecto.StaleEntryError`. + + * `:stale_error_message` - The message to add to the configured + `:stale_error_field` when stale errors happen, defaults to "is stale". + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + + ## Example + + post = MyRepo.get!(Post, 42) + case MyRepo.delete post do + {:ok, struct} -> # Deleted with success + {:error, changeset} -> # Something went wrong + end + + """ + @doc group: "Schema API" + @callback delete( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: {:ok, Ecto.Schema.t()} | {:error, Ecto.Changeset.t()} + + @doc """ + Same as `c:insert/2` but returns the struct or raises if the changeset is invalid. + """ + @doc group: "Schema API" + @callback insert!( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: Ecto.Schema.t() + + @doc """ + Same as `c:update/2` but returns the struct or raises if the changeset is invalid. + """ + @doc group: "Schema API" + @callback update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() + + @doc """ + Same as `c:insert_or_update/2` but returns the struct or raises if the changeset + is invalid. + """ + @doc group: "Schema API" + @callback insert_or_update!(changeset :: Ecto.Changeset.t(), opts :: Keyword.t()) :: + Ecto.Schema.t() + + @doc """ + Same as `c:delete/2` but returns the struct or raises if the changeset is invalid. + """ + @doc group: "Schema API" + @callback delete!( + struct_or_changeset :: Ecto.Schema.t() | Ecto.Changeset.t(), + opts :: Keyword.t() + ) :: Ecto.Schema.t() + + ## Ecto.Adapter.Transaction + + @optional_callbacks transaction: 2, in_transaction?: 0, rollback: 1 + + @doc """ + Runs the given function or `Ecto.Multi` inside a transaction. + + ## Use with function + + `c:transaction/2` can be called with both a function of arity + zero or one. The arity zero function will just be executed as is: + + import Ecto.Changeset, only: [change: 2] + + MyRepo.transaction(fn -> + MyRepo.update!(change(alice, balance: alice.balance - 10)) + MyRepo.update!(change(bob, balance: bob.balance + 10)) + end) + + While the arity one function will receive the repo of the transaction + as its first argument: + + MyRepo.transaction(fn repo -> + repo.insert!(%Post{}) + end) + + If an unhandled error occurs the transaction will be rolled back + and the error will bubble up from the transaction function. + If no error occurred the transaction will be committed when the + function returns. A transaction can be explicitly rolled back + by calling `c:rollback/1`, this will immediately leave the function + and return the value given to `rollback` as `{:error, value}`. + + A successful transaction returns the value returned by the function + wrapped in a tuple as `{:ok, value}`. + + If `c:transaction/2` is called inside another transaction, the function + is simply executed, without wrapping the new transaction call in any + way. If there is an error in the inner transaction and the error is + rescued, or the inner transaction is rolled back, the whole outer + transaction is marked as tainted, guaranteeing nothing will be committed. + + Below is an example of how rollbacks work with nested transactions: + + {:error, :rollback} = + MyRepo.transaction(fn -> + {:error, :posting_not_allowed} = + MyRepo.transaction(fn -> + # This function call causes the following to happen: + # + # * the transaction is rolled back in the database, + # * code execution is stopped within the current function, + # * and the value, passed to `rollback/1` is returned from + # `MyRepo.transaction/1` as the second element in the error + # tuple. + # + MyRepo.rollback(:posting_not_allowed) + + # `rollback/1` stops execution, so code here won't be run + end) + + # When the inner transaction was rolled back, execution in this outer + # transaction is also stopped immediately. When this occurs, the + # outer transaction(s) return `{:error, :rollback}`. + end) + + ## Use with Ecto.Multi + + Besides functions, transactions can be used with an `Ecto.Multi` struct. + A transaction will be started, all operations applied and in case of + success committed returning `{:ok, changes}`: + + # With Ecto.Multi + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{}) + |> MyRepo.transaction + + In case of any errors the transaction will be rolled back and + `{:error, failed_operation, failed_value, changes_so_far}` will be + returned. + + You can read more about using transactions with `Ecto.Multi` as well as + see some examples in the `Ecto.Multi` documentation. + + ## Working with processes + + The transaction is per process. A separate process started inside a + transaction won't be part of the same transaction and will use a separate + connection altogether. + + When using the the `Ecto.Adapters.SQL.Sandbox` in tests, while it may be + possible to share the connection between processes, the parent process + will typically hold the connection until the transaction completes. This + may lead to a deadlock if the child process attempts to use the same connection. + See the docs for + [`Ecto.Adapters.SQL.Sandbox`](https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.Sandbox.html) + for more information. + + ## Options + + See the ["Shared options"](#module-shared-options) section at the module + documentation for more options. + """ + @doc group: "Transaction API" + @callback transaction(fun_or_multi :: fun | Ecto.Multi.t(), opts :: Keyword.t()) :: + {:ok, any} + | {:error, any} + | {:error, Ecto.Multi.name(), any, %{Ecto.Multi.name() => any}} + + @doc """ + Returns true if the current process is inside a transaction. + + If you are using the `Ecto.Adapters.SQL.Sandbox` in tests, note that even + though each test is inside a transaction, `in_transaction?/0` will only + return true inside transactions explicitly created with `transaction/2`. This + is done so the test environment mimics dev and prod. + + ## Examples + + MyRepo.in_transaction? + #=> false + + MyRepo.transaction(fn -> + MyRepo.in_transaction? #=> true + end) + + """ + @doc group: "Transaction API" + @callback in_transaction?() :: boolean + + @doc """ + Rolls back the current transaction. + + The transaction will return the value given as `{:error, value}`. + + Note that calling `rollback` causes the code in the transaction to stop executing. + """ + @doc group: "Transaction API" + @callback rollback(value :: any) :: no_return +end diff --git a/deps/ecto/lib/ecto/repo/assoc.ex b/deps/ecto/lib/ecto/repo/assoc.ex new file mode 100644 index 0000000..09f5226 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/assoc.ex @@ -0,0 +1,120 @@ +defmodule Ecto.Repo.Assoc do + # The module invoked by repo modules + # for association related functionality. + @moduledoc false + + @doc """ + Transforms a result set based on query assocs, loading + the associations onto their parent schema. + """ + @spec query([list], list, tuple, (list -> list)) :: [Ecto.Schema.t] + def query(rows, assocs, sources, fun) + + def query([], _assocs, _sources, _fun), do: [] + def query(rows, [], _sources, fun), do: Enum.map(rows, fun) + + def query(rows, assocs, sources, fun) do + # Create rose tree of accumulator dicts in the same + # structure as the fields tree + accs = create_accs(0, assocs, sources, []) + + # Populate tree of dicts of associated entities from the result set + {_keys, _cache, rows, sub_dicts} = Enum.reduce(rows, accs, fn row, acc -> + merge(fun.(row), acc, 0) |> elem(0) + end) + + # Create the reflections that will be loaded into memory. + refls = create_refls(0, assocs, sub_dicts, sources) + + # Retrieve and load the assocs from cached dictionaries recursively + for {item, sub_structs} <- Enum.reverse(rows) do + [load_assocs(item, refls)|sub_structs] + end + end + + defp merge([struct|sub_structs], {primary_keys, cache, dict, sub_dicts}, parent_key) do + child_key = + if struct do + for primary_key <- primary_keys do + case Map.get(struct, primary_key) do + nil -> raise Ecto.NoPrimaryKeyValueError, struct: struct + value -> value + end + end + end + + # Traverse sub_structs adding one by one to the tree. + # Note we need to traverse even if we don't have a child_key + # due to nested associations. + {sub_dicts, sub_structs} = Enum.map_reduce(sub_dicts, sub_structs, &merge(&2, &1, child_key)) + + cache_key = cache_key(parent_key, child_key, sub_structs, dict) + + if struct && parent_key && not Map.get(cache, cache_key, false) do + cache = Map.put(cache, cache_key, true) + item = {child_key, struct} + + # If we have a list, we are at the root, so we also store the sub structs + dict = update_dict(dict, parent_key, item, sub_structs) + + {{primary_keys, cache, dict, sub_dicts}, sub_structs} + else + {{primary_keys, cache, dict, sub_dicts}, sub_structs} + end + end + + defp cache_key(parent_key, child_key, sub_structs, dict) when is_list(dict) do + {parent_key, child_key, sub_structs} + end + + defp cache_key(parent_key, child_key, _sub_structs, dict) when is_map(dict) do + {parent_key, child_key} + end + + defp update_dict(dict, _parent_key, item, sub_structs) when is_list(dict) do + [{item, sub_structs} | dict] + end + + defp update_dict(dict, parent_key, item, _sub_structs) when is_map(dict) do + Map.update(dict, parent_key, [item], &[item | &1]) + end + + defp load_assocs({child_key, struct}, refls) do + Enum.reduce refls, struct, fn {dict, refl, sub_refls}, acc -> + %{field: field, cardinality: cardinality} = refl + loaded = + dict + |> Map.get(child_key, []) + |> Enum.reverse() + |> Enum.map(&load_assocs(&1, sub_refls)) + |> maybe_first(cardinality) + Map.put(acc, field, loaded) + end + end + + defp maybe_first(list, :one), do: List.first(list) + defp maybe_first(list, _), do: list + + defp create_refls(idx, fields, dicts, sources) do + {_source, schema, _prefix} = elem(sources, idx) + + Enum.map(:lists.zip(dicts, fields), fn + {{_primary_keys, _cache, dict, sub_dicts}, {field, {child_idx, child_fields}}} -> + sub_refls = create_refls(child_idx, child_fields, sub_dicts, sources) + {dict, schema.__schema__(:association, field), sub_refls} + end) + end + + defp create_accs(idx, fields, sources, initial_dict) do + acc = Enum.map(fields, fn {_field, {child_idx, child_fields}} -> + create_accs(child_idx, child_fields, sources, %{}) + end) + + {_source, schema, _prefix} = elem(sources, idx) + + case schema.__schema__(:primary_key) do + [] -> raise Ecto.NoPrimaryKeyFieldError, schema: schema + pk -> {pk, %{}, initial_dict, acc} + end + end +end diff --git a/deps/ecto/lib/ecto/repo/preloader.ex b/deps/ecto/lib/ecto/repo/preloader.ex new file mode 100644 index 0000000..13b6a7f --- /dev/null +++ b/deps/ecto/lib/ecto/repo/preloader.ex @@ -0,0 +1,548 @@ +defmodule Ecto.Repo.Preloader do + # The module invoked by user defined repo_names + # for preload related functionality. + @moduledoc false + + require Ecto.Query + require Logger + + @doc """ + Transforms a result set based on query preloads, loading + the associations onto their parent schema. + """ + @spec query([list], Ecto.Repo.t, list, Access.t, fun, {adapter_meta :: map, opts :: Keyword.t}) :: [list] + def query([], _repo_name, _preloads, _take, _fun, _tuplet), do: [] + def query(rows, _repo_name, [], _take, fun, _tuplet), do: Enum.map(rows, fun) + + def query(rows, repo_name, preloads, take, fun, tuplet) do + rows + |> extract() + |> normalize_and_preload_each(repo_name, preloads, take, tuplet) + |> unextract(rows, fun) + end + + defp extract([[nil|_]|t2]), do: extract(t2) + defp extract([[h|_]|t2]), do: [h|extract(t2)] + defp extract([]), do: [] + + defp unextract(structs, [[nil|_] = h2|t2], fun), do: [fun.(h2)|unextract(structs, t2, fun)] + defp unextract([h1|structs], [[_|t1]|t2], fun), do: [fun.([h1|t1])|unextract(structs, t2, fun)] + defp unextract([], [], _fun), do: [] + + @doc """ + Implementation for `Ecto.Repo.preload/2`. + """ + @spec preload(structs, atom, atom | list, {adapter_meta :: map, opts :: Keyword.t}) :: + structs when structs: [Ecto.Schema.t] | Ecto.Schema.t | nil + def preload(nil, _repo_name, _preloads, _tuplet) do + nil + end + + def preload(structs, repo_name, preloads, {_adapter_meta, opts} = tuplet) when is_list(structs) do + normalize_and_preload_each(structs, repo_name, preloads, opts[:take], tuplet) + end + + def preload(struct, repo_name, preloads, {_adapter_meta, opts} = tuplet) when is_map(struct) do + normalize_and_preload_each([struct], repo_name, preloads, opts[:take], tuplet) |> hd() + end + + defp normalize_and_preload_each(structs, repo_name, preloads, take, tuplet) do + preloads = normalize(preloads, take, preloads) + preload_each(structs, repo_name, preloads, tuplet) + rescue + e -> + # Reraise errors so we ignore the preload inner stacktrace + filter_and_reraise e, __STACKTRACE__ + end + + ## Preloading + + defp preload_each(structs, _repo_name, [], _tuplet), do: structs + defp preload_each([], _repo_name, _preloads, _tuplet), do: [] + defp preload_each(structs, repo_name, preloads, tuplet) do + if sample = Enum.find(structs, & &1) do + module = sample.__struct__ + prefix = preload_prefix(tuplet, sample) + {assocs, throughs} = expand(module, preloads, {%{}, %{}}) + + {fetched_assocs, to_fetch_queries} = + prepare_queries(structs, module, assocs, prefix, repo_name, tuplet) + + fetched_queries = maybe_pmap(to_fetch_queries, repo_name, tuplet) + assocs = preload_assocs(fetched_assocs, fetched_queries, repo_name, tuplet) + throughs = Map.values(throughs) + + for struct <- structs do + struct = Enum.reduce assocs, struct, &load_assoc/2 + struct = Enum.reduce throughs, struct, &load_through/2 + struct + end + else + structs + end + end + + defp preload_prefix({_adapter_meta, opts}, sample) do + case Keyword.fetch(opts, :prefix) do + {:ok, prefix} -> + prefix + + :error -> + case sample do + %{__meta__: %{prefix: prefix}} -> prefix + # Must be an embedded schema + _ -> nil + end + end + end + + ## Association preloading + + # First we traverse all assocs and find which queries we need to run. + defp prepare_queries(structs, module, assocs, prefix, repo_name, tuplet) do + Enum.reduce(assocs, {[], []}, fn + {_key, {{:assoc, assoc, related_key}, take, query, preloads}}, {assocs, queries} -> + {fetch_ids, loaded_ids, loaded_structs} = fetch_ids(structs, module, assoc, tuplet) + + queries = + if fetch_ids != [] do + [ + fn tuplet -> + fetch_query(fetch_ids, assoc, repo_name, query, prefix, related_key, take, tuplet) + end + | queries + ] + else + queries + end + + {[{assoc, fetch_ids != [], loaded_ids, loaded_structs, preloads} | assocs], queries} + end) + end + + # Then we execute queries in parallel + defp maybe_pmap(preloaders, _repo_name, {adapter_meta, opts}) do + if match?([_,_|_], preloaders) and not adapter_meta.adapter.checked_out?(adapter_meta) and + Keyword.get(opts, :in_parallel, true) do + # We pass caller: self() so the ownership pool knows where + # to fetch the connection from and set the proper timeouts. + # Note while the ownership pool uses '$callers' from pdict, + # it does not do so in automatic mode, hence this line is + # still necessary. + opts = Keyword.put_new(opts, :caller, self()) + + preloaders + |> Task.async_stream(&(&1.({adapter_meta, opts})), timeout: :infinity) + |> Enum.map(fn {:ok, assoc} -> assoc end) + else + Enum.map(preloaders, &(&1.({adapter_meta, opts}))) + end + end + + # Then we unpack the query results, merge them, and preload recursively + defp preload_assocs( + [{assoc, query?, loaded_ids, loaded_structs, preloads} | assocs], + queries, + repo_name, + tuplet + ) do + {fetch_ids, fetch_structs, queries} = maybe_unpack_query(query?, queries) + all = preload_each(Enum.reverse(loaded_structs, fetch_structs), repo_name, preloads, tuplet) + entry = {:assoc, assoc, assoc_map(assoc.cardinality, Enum.reverse(loaded_ids, fetch_ids), all)} + [entry | preload_assocs(assocs, queries, repo_name, tuplet)] + end + + defp preload_assocs([], [], _repo_name, _tuplet), do: [] + + defp maybe_unpack_query(false, queries), do: {[], [], queries} + defp maybe_unpack_query(true, [{ids, structs} | queries]), do: {ids, structs, queries} + + defp fetch_ids(structs, module, assoc, {_adapter_meta, opts}) do + %{field: field, owner_key: owner_key, cardinality: card} = assoc + force? = Keyword.get(opts, :force, false) + + Enum.reduce structs, {[], [], []}, fn + nil, acc -> + acc + struct, {fetch_ids, loaded_ids, loaded_structs} -> + assert_struct!(module, struct) + %{^owner_key => id, ^field => value} = struct + loaded? = Ecto.assoc_loaded?(value) and not force? + + if loaded? and is_nil(id) and not Ecto.Changeset.Relation.empty?(assoc, value) do + Logger.warn """ + association `#{field}` for `#{inspect(module)}` has a loaded value but \ + its association key `#{owner_key}` is nil. This usually means one of: + + * `#{owner_key}` was not selected in a query + * the struct was set with default values for `#{field}` which now you want to override + + If this is intentional, set force: true to disable this warning + """ + end + + cond do + card == :one and loaded? -> + {fetch_ids, [id | loaded_ids], [value | loaded_structs]} + card == :many and loaded? -> + {fetch_ids, [{id, length(value)} | loaded_ids], value ++ loaded_structs} + is_nil(id) -> + {fetch_ids, loaded_ids, loaded_structs} + true -> + {[id | fetch_ids], loaded_ids, loaded_structs} + end + end + end + + defp fetch_query(ids, assoc, _repo_name, query, _prefix, related_key, _take, _tuplet) when is_function(query, 1) do + # Note we use an explicit sort because we don't want + # to reorder based on the struct. Only the ID. + ids + |> Enum.uniq + |> query.() + |> fetched_records_to_tuple_ids(assoc, related_key) + |> Enum.sort(fn {id1, _}, {id2, _} -> id1 <= id2 end) + |> unzip_ids([], []) + end + + defp fetch_query(ids, %{cardinality: card} = assoc, repo_name, query, prefix, related_key, take, tuplet) do + query = assoc.__struct__.assoc_query(assoc, query, Enum.uniq(ids)) + field = related_key_to_field(query, related_key) + + # Normalize query + query = %{Ecto.Query.Planner.ensure_select(query, take || true) | prefix: prefix} + + # Add the related key to the query results + query = update_in query.select.expr, &{:{}, [], [field, &1]} + + # If we are returning many results, we must sort by the key too + query = + case card do + :many -> + update_in query.order_bys, fn order_bys -> + [%Ecto.Query.QueryExpr{expr: preload_order(assoc, query, field), params: [], + file: __ENV__.file, line: __ENV__.line}|order_bys] + end + :one -> + query + end + + unzip_ids Ecto.Repo.Queryable.all(repo_name, query, tuplet), [], [] + end + + defp fetched_records_to_tuple_ids([], _assoc, _related_key), + do: [] + + defp fetched_records_to_tuple_ids([%{} | _] = entries, _assoc, {0, key}), + do: Enum.map(entries, &{Map.fetch!(&1, key), &1}) + + defp fetched_records_to_tuple_ids([{_, %{}} | _] = entries, _assoc, _related_key), + do: entries + + defp fetched_records_to_tuple_ids([entry | _], assoc, _), + do: raise """ + invalid custom preload for `#{assoc.field}` on `#{inspect assoc.owner}`. + + For many_to_many associations, the custom function given to preload should \ + return a tuple with the associated key as first element and the record as \ + second element. + + For example, imagine posts has many to many tags through a posts_tags table. \ + When preloading the tags, you may write: + + custom_tags = fn post_ids -> + Repo.all( + from t in Tag, + join: pt in "posts_tags", + where: t.custom and pt.post_id in ^post_ids and pt.tag_id == t.id + ) + end + + from Post, preload: [tags: ^custom_tags] + + Unfortunately the query above is not enough because Ecto won't know how to \ + associate the posts with the tags. In those cases, you need to return a tuple \ + with the `post_id` as first element and the tag record as second. The new query \ + will have a select field as follows: + + from t in Tag, + join: pt in "posts_tags", + where: t.custom and pt.post_id in ^post_ids and pt.tag_id == t.id, + select: {pt.post_id, t} + + We expected a tuple but we got: #{inspect(entry)} + """ + + defp preload_order(assoc, query, related_field) do + custom_order_by = Enum.map(assoc.preload_order, fn + {direction, field} -> + {direction, related_key_to_field(query, {0, field})} + field -> + {:asc, related_key_to_field(query, {0, field})} + end) + + [{:asc, related_field} | custom_order_by] + end + + defp related_key_to_field(query, {pos, key, field_type}) do + field_ast = related_key_to_field(query, {pos, key}) + + {:type, [], [field_ast, field_type]} + end + + defp related_key_to_field(query, {pos, key}) do + {{:., [], [{:&, [], [related_key_pos(query, pos)]}, key]}, [], []} + end + + defp related_key_pos(_query, pos) when pos >= 0, do: pos + defp related_key_pos(query, pos), do: Ecto.Query.Builder.count_binds(query) + pos + + defp unzip_ids([{k, v}|t], acc1, acc2), do: unzip_ids(t, [k|acc1], [v|acc2]) + defp unzip_ids([], acc1, acc2), do: {acc1, acc2} + + defp assert_struct!(mod, %{__struct__: mod}), do: true + defp assert_struct!(mod, %{__struct__: struct}) do + raise ArgumentError, "expected a homogeneous list containing the same struct, " <> + "got: #{inspect mod} and #{inspect struct}" + end + + defp assoc_map(:one, ids, structs) do + one_assoc_map(ids, structs, %{}) + end + defp assoc_map(:many, ids, structs) do + many_assoc_map(ids, structs, %{}) + end + + defp one_assoc_map([id|ids], [struct|structs], map) do + one_assoc_map(ids, structs, Map.put(map, id, struct)) + end + defp one_assoc_map([], [], map) do + map + end + + defp many_assoc_map([{id, n}|ids], structs, map) do + {acc, structs} = split_n(structs, n, []) + many_assoc_map(ids, structs, Map.put(map, id, acc)) + end + defp many_assoc_map([id|ids], [struct|structs], map) do + {ids, structs, acc} = split_while(ids, structs, id, [struct]) + many_assoc_map(ids, structs, Map.put(map, id, acc)) + end + defp many_assoc_map([], [], map) do + map + end + + defp split_n(structs, 0, acc), do: {acc, structs} + defp split_n([struct | structs], n, acc), do: split_n(structs, n - 1, [struct | acc]) + + defp split_while([id|ids], [struct|structs], id, acc), + do: split_while(ids, structs, id, [struct|acc]) + defp split_while(ids, structs, _id, acc), + do: {ids, structs, acc} + + ## Load preloaded data + + defp load_assoc({:assoc, _assoc, _ids}, nil) do + nil + end + + defp load_assoc({:assoc, assoc, ids}, struct) do + %{field: field, owner_key: owner_key, cardinality: cardinality} = assoc + key = Map.fetch!(struct, owner_key) + + loaded = + case ids do + %{^key => value} -> value + _ when cardinality == :many -> [] + _ -> nil + end + + Map.put(struct, field, loaded) + end + + defp load_through({:through, assoc, throughs}, struct) do + %{cardinality: cardinality, field: field, owner: owner} = assoc + {loaded, _} = Enum.reduce(throughs, {[struct], owner}, &recur_through/2) + Map.put(struct, field, maybe_first(loaded, cardinality)) + end + + defp maybe_first(list, :one), do: List.first(list) + defp maybe_first(list, _), do: list + + defp recur_through(field, {structs, owner}) do + assoc = owner.__schema__(:association, field) + case assoc.__struct__.preload_info(assoc) do + {:assoc, %{related: related}, _} -> + pk_fields = + related.__schema__(:primary_key) + |> validate_has_pk_field!(related, assoc) + + {children, _} = + Enum.reduce(structs, {[], %{}}, fn struct, acc -> + struct + |> Map.fetch!(field) + |> List.wrap() + |> Enum.reduce(acc, fn child, {fresh, set} -> + pk_values = + child + |> through_pks(pk_fields, assoc) + |> validate_non_null_pk!(child, pk_fields, assoc) + + case set do + %{^pk_values => true} -> + {fresh, set} + _ -> + {[child|fresh], Map.put(set, pk_values, true)} + end + end) + end) + + {Enum.reverse(children), related} + + {:through, _, through} -> + Enum.reduce(through, {structs, owner}, &recur_through/2) + end + end + + defp validate_has_pk_field!([], related, assoc) do + raise ArgumentError, + "cannot preload through association `#{assoc.field}` on " <> + "`#{inspect assoc.owner}`. Ecto expected the #{inspect related} schema " <> + "to have at least one primary key field" + end + + defp validate_has_pk_field!(pk_fields, _related, _assoc), do: pk_fields + + defp through_pks(map, pks, assoc) do + Enum.map(pks, fn pk -> + case map do + %{^pk => value} -> + value + + _ -> + raise ArgumentError, + "cannot preload through association `#{assoc.field}` on " <> + "`#{inspect assoc.owner}`. Ecto expected a map/struct with " <> + "the key `#{pk}` but got: #{inspect map}" + end + end) + end + + defp validate_non_null_pk!(values, map, pks, assoc) do + case values do + [nil | _] -> + raise ArgumentError, + "cannot preload through association `#{assoc.field}` on " <> + "`#{inspect assoc.owner}` because the primary key `#{hd(pks)}` " <> + "is nil for map/struct: #{inspect map}" + + _ -> + values + end + end + + ## Normalizer + + def normalize(preload, take, original) do + normalize_each(wrap(preload, original), [], take, original) + end + + defp normalize_each({atom, {query, list}}, acc, take, original) + when is_atom(atom) and (is_map(query) or is_function(query, 1)) do + fields = take(take, atom) + [{atom, {fields, query!(query), normalize_each(wrap(list, original), [], fields, original)}}|acc] + end + + defp normalize_each({atom, query}, acc, take, _original) + when is_atom(atom) and (is_map(query) or is_function(query, 1)) do + [{atom, {take(take, atom), query!(query), []}}|acc] + end + + defp normalize_each({atom, list}, acc, take, original) when is_atom(atom) do + fields = take(take, atom) + [{atom, {fields, nil, normalize_each(wrap(list, original), [], fields, original)}}|acc] + end + + defp normalize_each(atom, acc, take, _original) when is_atom(atom) do + [{atom, {take(take, atom), nil, []}}|acc] + end + + defp normalize_each(other, acc, take, original) do + Enum.reduce(wrap(other, original), acc, &normalize_each(&1, &2, take, original)) + end + + defp query!(query) when is_function(query, 1), do: query + defp query!(%Ecto.Query{} = query), do: query + + defp take(take, field) do + case Access.fetch(take, field) do + {:ok, fields} -> List.wrap(fields) + :error -> nil + end + end + + defp wrap(list, _original) when is_list(list), + do: list + defp wrap(atom, _original) when is_atom(atom), + do: atom + defp wrap(other, original) do + raise ArgumentError, "invalid preload `#{inspect other}` in `#{inspect original}`. " <> + "preload expects an atom, a (nested) keyword or a (nested) list of atoms" + end + + ## Expand + + def expand(schema, preloads, acc) do + Enum.reduce(preloads, acc, fn {preload, {fields, query, sub_preloads}}, {assocs, throughs} -> + assoc = association_from_schema!(schema, preload) + info = assoc.__struct__.preload_info(assoc) + + case info do + {:assoc, _, _} -> + value = {info, fields, query, sub_preloads} + assocs = Map.update(assocs, preload, value, &merge_preloads(preload, value, &1)) + {assocs, throughs} + {:through, _, through} -> + through = + through + |> Enum.reverse() + |> Enum.reduce({fields, query, sub_preloads}, &{nil, nil, [{&1, &2}]}) + |> elem(2) + expand(schema, through, {assocs, Map.put(throughs, preload, info)}) + end + end) + end + + defp merge_preloads(_preload, {info, _, nil, left}, {info, take, query, right}), + do: {info, take, query, left ++ right} + defp merge_preloads(_preload, {info, take, query, left}, {info, _, nil, right}), + do: {info, take, query, left ++ right} + defp merge_preloads(preload, {info, _, left, _}, {info, _, right, _}) do + raise ArgumentError, "cannot preload `#{preload}` as it has been supplied more than once " <> + "with different queries: #{inspect left} and #{inspect right}" + end + + # Since there is some ambiguity between assoc and queries. + # We reimplement this function here for nice error messages. + defp association_from_schema!(schema, assoc) do + schema.__schema__(:association, assoc) || + raise ArgumentError, + "schema #{inspect schema} does not have association #{inspect assoc}#{maybe_module(assoc)}" + end + + defp maybe_module(assoc) do + case Atom.to_string(assoc) do + "Elixir." <> _ -> + " (if you were trying to pass a schema as a query to preload, " <> + "you have to explicitly convert it to a query by doing `from x in #{inspect assoc}` " <> + "or by calling Ecto.Queryable.to_query/1)" + + _ -> + "" + end + end + + defp filter_and_reraise(exception, stacktrace) do + reraise exception, Enum.reject(stacktrace, &match?({__MODULE__, _, _, _}, &1)) + end +end diff --git a/deps/ecto/lib/ecto/repo/queryable.ex b/deps/ecto/lib/ecto/repo/queryable.ex new file mode 100644 index 0000000..b060a6c --- /dev/null +++ b/deps/ecto/lib/ecto/repo/queryable.ex @@ -0,0 +1,558 @@ +defmodule Ecto.Repo.Queryable do + @moduledoc false + + alias Ecto.Queryable + alias Ecto.Query + alias Ecto.Query.Planner + alias Ecto.Query.SelectExpr + + import Ecto.Query.Planner, only: [attach_prefix: 2] + + require Ecto.Query + + def all(name, queryable, tuplet) do + query = + queryable + |> Ecto.Queryable.to_query() + |> Ecto.Query.Planner.ensure_select(true) + + execute(:all, name, query, tuplet) |> elem(1) + end + + def stream(_name, queryable, {adapter_meta, opts}) do + %{adapter: adapter, cache: cache, repo: repo} = adapter_meta + + query = + queryable + |> Ecto.Queryable.to_query() + |> Ecto.Query.Planner.ensure_select(true) + + {query, opts} = repo.prepare_query(:stream, query, opts) + query = attach_prefix(query, opts) + {query_meta, prepared, params} = Planner.query(query, :all, cache, adapter, 0) + + case query_meta do + %{select: nil} -> + adapter_meta + |> adapter.stream(query_meta, prepared, params, opts) + |> Stream.flat_map(fn {_, nil} -> [] end) + + %{select: select, preloads: preloads} -> + %{ + assocs: assocs, + preprocess: preprocess, + postprocess: postprocess, + take: take, + from: from + } = select + + if preloads != [] or assocs != [] do + raise Ecto.QueryError, query: query, message: "preloads are not supported on streams" + end + + preprocessor = preprocessor(from, preprocess, adapter) + stream = adapter.stream(adapter_meta, query_meta, prepared, params, opts) + postprocessor = postprocessor(from, postprocess, take, adapter) + + stream + |> Stream.flat_map(fn {_, rows} -> rows end) + |> Stream.map(preprocessor) + |> Stream.map(postprocessor) + end + end + + def get(name, queryable, id, opts) do + one(name, query_for_get(queryable, id), opts) + end + + def get!(name, queryable, id, opts) do + one!(name, query_for_get(queryable, id), opts) + end + + def get_by(name, queryable, clauses, opts) do + one(name, query_for_get_by(queryable, clauses), opts) + end + + def get_by!(name, queryable, clauses, opts) do + one!(name, query_for_get_by(queryable, clauses), opts) + end + + def reload(name, [head | _] = structs, opts) when is_list(structs) do + results = all(name, query_for_reload(structs), opts) + + [pk] = head.__struct__.__schema__(:primary_key) + + for struct <- structs do + struct_pk = Map.fetch!(struct, pk) + Enum.find(results, &Map.fetch!(&1, pk) == struct_pk) + end + end + + def reload(name, struct, opts) do + one(name, query_for_reload([struct]), opts) + end + + def reload!(name, [head | _] = structs, opts) when is_list(structs) do + query = query_for_reload(structs) + results = all(name, query, opts) + + [pk] = head.__struct__.__schema__(:primary_key) + + for struct <- structs do + struct_pk = Map.fetch!(struct, pk) + Enum.find(results, &Map.fetch!(&1, pk) == struct_pk) || raise "could not reload #{inspect(struct)}, maybe it doesn't exist or was deleted" + end + end + + def reload!(name, struct, opts) do + query = query_for_reload([struct]) + one!(name, query, opts) + end + + def aggregate(name, queryable, aggregate, opts) do + one!(name, query_for_aggregate(queryable, aggregate), opts) + end + + def aggregate(name, queryable, aggregate, field, opts) do + one!(name, query_for_aggregate(queryable, aggregate, field), opts) + end + + def exists?(name, queryable, opts) do + queryable = + Query.exclude(queryable, :select) + |> Query.exclude(:preload) + |> Query.exclude(:order_by) + |> Query.exclude(:distinct) + |> Query.select(1) + |> Query.limit(1) + |> rewrite_combinations() + + case all(name, queryable, opts) do + [1] -> true + [] -> false + end + end + + defp rewrite_combinations(%{combinations: []} = query), do: query + + defp rewrite_combinations(%{combinations: combinations} = query) do + combinations = Enum.map(combinations, fn {type, query} -> + {type, query |> Query.exclude(:select) |> Query.select(1)} + end) + + %{query | combinations: combinations} + end + + def one(name, queryable, tuplet) do + case all(name, queryable, tuplet) do + [one] -> one + [] -> nil + other -> raise Ecto.MultipleResultsError, queryable: queryable, count: length(other) + end + end + + def one!(name, queryable, tuplet) do + case all(name, queryable, tuplet) do + [one] -> one + [] -> raise Ecto.NoResultsError, queryable: queryable + other -> raise Ecto.MultipleResultsError, queryable: queryable, count: length(other) + end + end + + def update_all(name, queryable, [], tuplet) do + update_all(name, queryable, tuplet) + end + + def update_all(name, queryable, updates, tuplet) do + query = Query.from(queryable, update: ^updates) + update_all(name, query, tuplet) + end + + defp update_all(name, queryable, tuplet) do + query = Ecto.Queryable.to_query(queryable) + execute(:update_all, name, query, tuplet) + end + + def delete_all(name, queryable, tuplet) do + query = Ecto.Queryable.to_query(queryable) + execute(:delete_all, name, query, tuplet) + end + + @doc """ + Load structs from query. + """ + def struct_load!([{field, type} | types], [value | values], acc, all_nil?, struct, adapter) do + all_nil? = all_nil? and value == nil + value = load!(type, value, field, struct, adapter) + struct_load!(types, values, [{field, value} | acc], all_nil?, struct, adapter) + end + + def struct_load!([], values, _acc, true, _struct, _adapter) do + {nil, values} + end + + def struct_load!([], values, acc, false, struct, _adapter) do + {Map.merge(struct, Map.new(acc)), values} + end + + ## Helpers + + defp execute(operation, name, query, {adapter_meta, opts} = tuplet) do + %{adapter: adapter, cache: cache, repo: repo} = adapter_meta + + {query, opts} = repo.prepare_query(operation, query, opts) + query = attach_prefix(query, opts) + {query_meta, prepared, params} = Planner.query(query, operation, cache, adapter, 0) + + case query_meta do + %{select: nil} -> + adapter.execute(adapter_meta, query_meta, prepared, params, opts) + + %{select: select, sources: sources, preloads: preloads} -> + %{ + preprocess: preprocess, + postprocess: postprocess, + take: take, + assocs: assocs, + from: from + } = select + + preprocessor = preprocessor(from, preprocess, adapter) + {count, rows} = adapter.execute(adapter_meta, query_meta, prepared, params, opts) + postprocessor = postprocessor(from, postprocess, take, adapter) + + {count, + rows + |> Ecto.Repo.Assoc.query(assocs, sources, preprocessor) + |> Ecto.Repo.Preloader.query(name, preloads, take, postprocessor, tuplet)} + end + end + + defp preprocessor({_, {:source, {source, schema}, prefix, types}}, preprocess, adapter) do + struct = Ecto.Schema.Loader.load_struct(schema, prefix, source) + + fn row -> + {entry, rest} = struct_load!(types, row, [], false, struct, adapter) + preprocess(rest, preprocess, entry, adapter) + end + end + + defp preprocessor({_, from}, preprocess, adapter) do + fn row -> + {entry, rest} = process(row, from, nil, adapter) + preprocess(rest, preprocess, entry, adapter) + end + end + + defp preprocessor(:none, preprocess, adapter) do + fn row -> + preprocess(row, preprocess, nil, adapter) + end + end + + defp preprocess(row, [], _from, _adapter) do + row + end + + defp preprocess(row, [source | sources], from, adapter) do + {entry, rest} = process(row, source, from, adapter) + [entry | preprocess(rest, sources, from, adapter)] + end + + defp postprocessor({:any, _}, postprocess, _take, adapter) do + fn [from | row] -> + row |> process(postprocess, from, adapter) |> elem(0) + end + end + + defp postprocessor({:map, _}, postprocess, take, adapter) do + fn [from | row] -> + row |> process(postprocess, to_map(from, take), adapter) |> elem(0) + end + end + + defp postprocessor(:none, postprocess, _take, adapter) do + fn row -> row |> process(postprocess, nil, adapter) |> elem(0) end + end + + defp process(row, {:source, :from}, from, _adapter) do + {from, row} + end + + defp process(row, {:source, {source, schema}, prefix, types}, _from, adapter) do + struct = Ecto.Schema.Loader.load_struct(schema, prefix, source) + struct_load!(types, row, [], true, struct, adapter) + end + + defp process(row, {:merge, left, right}, from, adapter) do + {left, row} = process(row, left, from, adapter) + {right, row} = process(row, right, from, adapter) + + data = + case {left, right} do + {%{__struct__: s}, %{__struct__: s}} -> + Map.merge(left, right) + + {%{__struct__: left_struct}, %{__struct__: right_struct}} -> + raise ArgumentError, + "cannot merge structs of different types, " <> + "got: #{inspect(left_struct)} and #{inspect(right_struct)}" + + {%{__struct__: name}, %{}} -> + for {key, _} <- right, not Map.has_key?(left, key) do + raise ArgumentError, "struct #{inspect(name)} does not have the key #{inspect(key)}" + end + + Map.merge(left, right) + + {%{}, %{}} -> + Map.merge(left, right) + + {%{}, nil} -> + left + + {_, %{}} -> + raise ArgumentError, + "cannot merge because the left side is not a map, got: #{inspect(left)}" + + {%{}, _} -> + raise ArgumentError, + "cannot merge because the right side is not a map, got: #{inspect(right)}" + end + + {data, row} + end + + defp process(row, {:struct, struct, data, args}, from, adapter) do + case process(row, data, from, adapter) do + {%{__struct__: ^struct} = data, row} -> + process_update(data, args, row, from, adapter) + + {data, _row} -> + raise BadStructError, struct: struct, term: data + end + end + + defp process(row, {:struct, struct, args}, from, adapter) do + {fields, row} = process_kv(args, row, from, adapter) + + case Map.merge(struct.__struct__(), Map.new(fields)) do + %{__meta__: %Ecto.Schema.Metadata{state: state} = metadata} = struct + when state != :loaded -> + {Map.replace!(struct, :__meta__, %{metadata | state: :loaded}), row} + + map -> + {map, row} + end + end + + defp process(row, {:map, data, args}, from, adapter) do + {data, row} = process(row, data, from, adapter) + process_update(data, args, row, from, adapter) + end + + defp process(row, {:map, args}, from, adapter) do + {args, row} = process_kv(args, row, from, adapter) + {Map.new(args), row} + end + + defp process(row, {:list, args}, from, adapter) do + process_args(args, row, from, adapter) + end + + defp process(row, {:tuple, args}, from, adapter) do + {args, row} = process_args(args, row, from, adapter) + {List.to_tuple(args), row} + end + + defp process([value | row], {:value, :any}, _from, _adapter) do + {value, row} + end + + defp process([value | row], {:value, type}, _from, adapter) do + {load!(type, value, nil, nil, adapter), row} + end + + defp process(row, value, _from, _adapter) + when is_binary(value) or is_number(value) or is_atom(value) do + {value, row} + end + + defp process_update(data, args, row, from, adapter) do + {args, row} = process_kv(args, row, from, adapter) + data = Enum.reduce(args, data, fn {key, value}, acc -> %{acc | key => value} end) + {data, row} + end + + defp process_args(args, row, from, adapter) do + Enum.map_reduce(args, row, fn arg, row -> + process(row, arg, from, adapter) + end) + end + + defp process_kv(kv, row, from, adapter) do + Enum.map_reduce(kv, row, fn {key, value}, row -> + {key, row} = process(row, key, from, adapter) + {value, row} = process(row, value, from, adapter) + {{key, value}, row} + end) + end + + @compile {:inline, load!: 5} + defp load!(type, value, field, struct, adapter) do + case Ecto.Type.adapter_load(adapter, type, value) do + {:ok, value} -> + value + + :error -> + field = field && " for field #{inspect(field)}" + struct = struct && " in #{inspect(struct)}" + + raise ArgumentError, + "cannot load `#{inspect(value)}` as type #{inspect(type)}#{field}#{struct}" + end + end + + defp to_map(nil, _fields) do + nil + end + + defp to_map(value, fields) when is_list(value) do + Enum.map(value, &to_map(&1, fields)) + end + + defp to_map(value, fields) do + for field <- fields, into: %{} do + case field do + {k, v} -> {k, to_map(Map.fetch!(value, k), List.wrap(v))} + k -> {k, Map.fetch!(value, k)} + end + end + end + + defp query_for_get(_queryable, nil) do + raise ArgumentError, "cannot perform Ecto.Repo.get/2 because the given value is nil" + end + + defp query_for_get(queryable, id) do + query = Queryable.to_query(queryable) + schema = assert_schema!(query) + + case schema.__schema__(:primary_key) do + [pk] -> + Query.from(x in query, where: field(x, ^pk) == ^id) + + pks -> + raise ArgumentError, + "Ecto.Repo.get/2 requires the schema #{inspect(schema)} " <> + "to have exactly one primary key, got: #{inspect(pks)}" + end + end + + defp query_for_get_by(queryable, clauses) do + Query.where(queryable, [], ^Enum.to_list(clauses)) + end + + defp query_for_reload([head| _] = structs) do + assert_structs!(structs) + + schema = head.__struct__ + prefix = head.__meta__.prefix + + case schema.__schema__(:primary_key) do + [pk] -> + keys = Enum.map(structs, &get_pk!(&1, pk)) + query = Query.from(x in schema, where: field(x, ^pk) in ^keys) + %{query | prefix: prefix} + + pks -> + raise ArgumentError, + "Ecto.Repo.reload/2 requires the schema #{inspect(schema)} " <> + "to have exactly one primary key, got: #{inspect(pks)}" + end + end + + defp query_for_aggregate(queryable, aggregate) do + query = + case prepare_for_aggregate(queryable) do + %{distinct: nil, limit: nil, offset: nil, combinations: []} = query -> + %{query | order_bys: []} + + query -> + query + |> Query.subquery() + |> Queryable.Ecto.SubQuery.to_query() + end + + select = %SelectExpr{expr: {aggregate, [], []}, file: __ENV__.file, line: __ENV__.line} + %{query | select: select} + end + + defp query_for_aggregate(queryable, aggregate, field) do + ast = field(0, field) + + query = + case prepare_for_aggregate(queryable) do + %{distinct: nil, limit: nil, offset: nil, combinations: []} = query -> + %{query | order_bys: []} + + query -> + select = %SelectExpr{expr: ast, file: __ENV__.file, line: __ENV__.line} + + %{query | select: select} + |> Query.subquery() + |> Queryable.Ecto.SubQuery.to_query() + end + + select = %SelectExpr{expr: {aggregate, [], [ast]}, file: __ENV__.file, line: __ENV__.line} + %{query | select: select} + end + + defp prepare_for_aggregate(queryable) do + case %{Queryable.to_query(queryable) | preloads: [], assocs: []} do + %{group_bys: [_ | _]} = query -> + raise Ecto.QueryError, message: "cannot aggregate on query with group_by", query: query + + %{} = query -> + query + end + end + + defp field(ix, field) when is_integer(ix) and is_atom(field) do + {{:., [], [{:&, [], [ix]}, field]}, [], []} + end + + defp assert_schema!(%{from: %{source: {_source, schema}}}) when schema != nil, do: schema + + defp assert_schema!(query) do + raise Ecto.QueryError, + query: query, + message: "expected a from expression with a schema" + end + + defp assert_structs!([head | _] = structs) when is_list(structs) do + unless Enum.all?(structs, &schema?/1) do + raise ArgumentError, "expected a struct or a list of structs, received #{inspect(structs)}" + end + + unless Enum.all?(structs, &(&1.__struct__ == head.__struct__)) do + raise ArgumentError, "expected an homogenous list, received different struct types" + end + + :ok + end + + defp schema?(%{__meta__: _}), do: true + defp schema?(_), do: false + + defp get_pk!(struct, pk) do + struct + |> Map.fetch!(pk) + |> case do + nil -> + raise ArgumentError, "Ecto.Repo.reload/2 expects existent structs, found a `nil` primary key" + key -> + key + end + end +end diff --git a/deps/ecto/lib/ecto/repo/registry.ex b/deps/ecto/lib/ecto/repo/registry.ex new file mode 100644 index 0000000..88f9cd3 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/registry.ex @@ -0,0 +1,51 @@ +defmodule Ecto.Repo.Registry do + @moduledoc false + + use GenServer + + def start_link(_opts) do + GenServer.start_link(__MODULE__, :ok, name: __MODULE__) + end + + def associate(pid, name, value) when is_pid(pid) do + GenServer.call(__MODULE__, {:associate, pid, name, value}) + end + + def all_running() do + for [pid, name] <- :ets.match(__MODULE__, {:"$1", :_, :"$2", :_}) do + name || pid + end + end + + def lookup(repo) when is_atom(repo) do + GenServer.whereis(repo) + |> Kernel.||(raise "could not lookup Ecto repo #{inspect repo} because it was not started or it does not exist") + |> lookup() + end + + def lookup(pid) when is_pid(pid) do + :ets.lookup_element(__MODULE__, pid, 4) + end + + ## Callbacks + + @impl true + def init(:ok) do + table = :ets.new(__MODULE__, [:named_table, read_concurrency: true]) + {:ok, table} + end + + @impl true + def handle_call({:associate, pid, name, value}, _from, table) do + ref = Process.monitor(pid) + true = :ets.insert(table, {pid, ref, name, value}) + {:reply, :ok, table} + end + + @impl true + def handle_info({:DOWN, ref, _type, pid, _reason}, table) do + [{^pid, ^ref, _, _}] = :ets.lookup(table, pid) + :ets.delete(table, pid) + {:noreply, table} + end +end diff --git a/deps/ecto/lib/ecto/repo/schema.ex b/deps/ecto/lib/ecto/repo/schema.ex new file mode 100644 index 0000000..8093313 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/schema.ex @@ -0,0 +1,1012 @@ +defmodule Ecto.Repo.Schema do + # The module invoked by user defined repos + # for schema related functionality. + @moduledoc false + + alias Ecto.Changeset + alias Ecto.Changeset.Relation + require Ecto.Query + + import Ecto.Query.Planner, only: [attach_prefix: 2] + + @doc """ + Implementation for `Ecto.Repo.insert_all/3`. + """ + def insert_all(repo, name, schema, rows, tuplet) when is_atom(schema) do + do_insert_all(repo, name, schema, schema.__schema__(:prefix), + schema.__schema__(:source), rows, tuplet) + end + + def insert_all(repo, name, table, rows, tuplet) when is_binary(table) do + do_insert_all(repo, name, nil, nil, table, rows, tuplet) + end + + def insert_all(repo, name, {source, schema}, rows, tuplet) when is_atom(schema) do + do_insert_all(repo, name, schema, schema.__schema__(:prefix), source, rows, tuplet) + end + + defp do_insert_all(_repo, _name, _schema, _prefix, _source, [], {_adapter_meta, opts}) do + if opts[:returning] do + {0, []} + else + {0, nil} + end + end + + defp do_insert_all(repo, _name, schema, prefix, source, rows_or_query, {adapter_meta, opts}) do + %{adapter: adapter} = adapter_meta + autogen_id = schema && schema.__schema__(:autogenerate_id) + dumper = schema && schema.__schema__(:dump) + placeholder_map = Keyword.get(opts, :placeholders, %{}) + + {return_fields_or_types, return_sources} = + schema + |> returning(opts) + |> fields_to_sources(dumper) + + {rows_or_query, header, placeholder_values, counter} = + extract_header_and_fields(repo, rows_or_query, schema, dumper, autogen_id, placeholder_map, adapter, opts) + + schema_meta = metadata(schema, prefix, source, autogen_id, nil, opts) + + on_conflict = Keyword.get(opts, :on_conflict, :raise) + conflict_target = Keyword.get(opts, :conflict_target, []) + conflict_target = conflict_target(conflict_target, dumper) + on_conflict = on_conflict(on_conflict, conflict_target, schema_meta, counter, adapter) + + {count, rows_or_query} = + adapter.insert_all(adapter_meta, schema_meta, header, rows_or_query, on_conflict, return_sources, placeholder_values, opts) + + {count, postprocess(rows_or_query, return_fields_or_types, adapter, schema, schema_meta)} + end + + defp postprocess(nil, [], _adapter, _schema, _schema_meta) do + nil + end + + defp postprocess(rows, fields, _adapter, nil, _schema_meta) do + for row <- rows, do: Map.new(Enum.zip(fields, row)) + end + + defp postprocess(rows, types, adapter, schema, %{prefix: prefix, source: source}) do + struct = Ecto.Schema.Loader.load_struct(schema, prefix, source) + + for row <- rows do + {loaded, _} = Ecto.Repo.Queryable.struct_load!(types, row, [], false, struct, adapter) + loaded + end + end + + defp extract_header_and_fields(_repo, rows, schema, dumper, autogen_id, placeholder_map, adapter, _opts) + when is_list(rows) do + mapper = init_mapper(schema, dumper, adapter, placeholder_map) + + {rows, {header, has_query?, placeholder_dump, _}} = + Enum.map_reduce(rows, {%{}, false, %{}, 1}, fn fields, acc -> + {fields, {header, has_query?, placeholder_dump, counter}} = Enum.map_reduce(fields, acc, mapper) + {fields, header} = autogenerate_id(autogen_id, fields, header, adapter) + {fields, {header, has_query?, placeholder_dump, counter}} + end) + + header = Map.keys(header) + + placeholder_size = map_size(placeholder_dump) + + counter = fn -> + Enum.reduce( + rows, + placeholder_size, + &(Enum.count(&1, fn {_, val} -> not match?({:placeholder, _}, val) end) + &2) + ) + end + + placeholder_vals_list = + placeholder_dump + |> Enum.map(fn {_, {idx, _, value}} -> {idx, value} end) + |> Enum.sort + |> Enum.map(&elem(&1, 1)) + + if has_query? do + rows = plan_query_in_rows(rows, header, adapter) + {rows, header, placeholder_vals_list, counter} + else + {rows, header, placeholder_vals_list, counter} + end + end + + defp extract_header_and_fields(repo, %Ecto.Query{} = query, _schema, _dumper, _autogen_id, _placeholder_map, adapter, opts) do + {query, opts} = repo.prepare_query(:insert_all, query, opts) + query = attach_prefix(query, opts) + + {query, params} = Ecto.Adapter.Queryable.plan_query(:insert_all, adapter, query) + + header = case query.select do + %Ecto.Query.SelectExpr{expr: {:%{}, _ctx, args}} -> + Enum.map(args, &elem(&1, 0)) + + _ -> + raise ArgumentError, """ + cannot generate a fields list for insert_all from the given source query + because it does not have a select clause that uses a map: + + #{inspect query} + + Please add a select clause that selects into a map, like this: + + from x in Source, + ..., + select: %{ + field_a: x.bar, + field_b: x.foo + } + + The keys must exist in the schema that is being inserted into + """ + end + + counter = fn -> length(params) end + + {{query, params}, header, [], counter} + end + + defp extract_header_and_fields(_repo, rows_or_query, _schema, _dumper, _autogen_id, _placeholder_map, _adapter, _opts) do + raise ArgumentError, "expected a list of rows or a query, but got #{inspect rows_or_query} as rows_or_query argument in insert_all" + end + + defp init_mapper(nil, _dumper, _adapter, placeholder_map) do + fn {field, value}, acc -> + extract_value(field, value, :any, placeholder_map, acc, & &1) + end + end + + defp init_mapper(schema, dumper, adapter, placeholder_map) do + fn {field, value}, acc -> + case dumper do + %{^field => {source, type}} -> + extract_value(source, value, type, placeholder_map, acc, fn val -> + dump_field!(:insert_all, schema, field, type, val, adapter) + end) + + %{} -> + raise ArgumentError, + "unknown field `#{inspect(field)}` in schema #{inspect(schema)} given to " <> + "insert_all. Note virtual fields and associations are not supported" + end + end + end + + defp extract_value(source, value, type, placeholder_map, acc, dumper) do + {header, has_query?, placeholder_dump, counter} = acc + + case value do + %Ecto.Query{} = query -> + {{source, query}, {Map.put(header, source, true), true, placeholder_dump, counter}} + + {:placeholder, key} -> + {value, placeholder_dump, counter} = + extract_placeholder(key, type, placeholder_map, placeholder_dump, counter, dumper) + + {{source, value}, + {Map.put(header, source, true), has_query?, placeholder_dump, counter}} + + value -> + {{source, dumper.(value)}, + {Map.put(header, source, true), has_query?, placeholder_dump, counter}} + end + end + + defp extract_placeholder(key, type, placeholder_map, placeholder_dump, counter, dumper) do + case placeholder_dump do + %{^key => {idx, ^type, _}} -> + {{:placeholder, idx}, placeholder_dump, counter} + + %{^key => {_, type, _}} -> + raise ArgumentError, + "a placeholder key can only be used with columns of the same type. " <> + "The key #{inspect(key)} has already been dumped as a #{inspect(type)}" + + %{} -> + dumped_value = + case placeholder_map do + %{^key => val} -> + dumper.(val) + + _ -> + raise KeyError, + "placeholder key #{inspect(key)} not found in #{inspect(placeholder_map)}" + end + + placeholder_dump = Map.put(placeholder_dump, key, {counter, type, dumped_value}) + {{:placeholder, counter}, placeholder_dump, counter + 1} + end + end + + defp plan_query_in_rows(rows, header, adapter) do + {rows, _counter} = + Enum.map_reduce(rows, 0, fn fields, counter -> + Enum.flat_map_reduce(header, counter, fn key, counter -> + case :lists.keyfind(key, 1, fields) do + {^key, %Ecto.Query{} = query} -> + {query, params, _} = Ecto.Query.Planner.plan(query, :all, adapter) + {query, _} = Ecto.Query.Planner.normalize(query, :all, adapter, counter) + + {[{key, {query, params}}], counter + length(params)} + + {^key, value} -> + {[{key, value}], counter + 1} + + false -> + {[], counter} + end + end) + end) + + rows + end + + defp autogenerate_id(nil, fields, header, _adapter) do + {fields, header} + end + + defp autogenerate_id({key, source, type}, fields, header, adapter) do + case :lists.keyfind(key, 1, fields) do + {^key, _} -> + {fields, header} + + false -> + if value = Ecto.Type.adapter_autogenerate(adapter, type) do + {[{source, value} | fields], Map.put(header, source, true)} + else + {fields, header} + end + end + end + + @doc """ + Implementation for `Ecto.Repo.insert!/2`. + """ + def insert!(repo, name, struct_or_changeset, tuplet) do + case insert(repo, name, struct_or_changeset, tuplet) do + {:ok, struct} -> + struct + + {:error, %Ecto.Changeset{} = changeset} -> + raise Ecto.InvalidChangesetError, action: :insert, changeset: changeset + end + end + + @doc """ + Implementation for `Ecto.Repo.update!/2`. + """ + def update!(repo, name, struct_or_changeset, tuplet) do + case update(repo, name, struct_or_changeset, tuplet) do + {:ok, struct} -> + struct + + {:error, %Ecto.Changeset{} = changeset} -> + raise Ecto.InvalidChangesetError, action: :update, changeset: changeset + end + end + + @doc """ + Implementation for `Ecto.Repo.delete!/2`. + """ + def delete!(repo, name, struct_or_changeset, tuplet) do + case delete(repo, name, struct_or_changeset, tuplet) do + {:ok, struct} -> + struct + + {:error, %Ecto.Changeset{} = changeset} -> + raise Ecto.InvalidChangesetError, action: :delete, changeset: changeset + end + end + + @doc """ + Implementation for `Ecto.Repo.insert/2`. + """ + def insert(repo, name, %Changeset{} = changeset, tuplet) do + do_insert(repo, name, changeset, tuplet) + end + + def insert(repo, name, %{__struct__: _} = struct, tuplet) do + do_insert(repo, name, Ecto.Changeset.change(struct), tuplet) + end + + defp do_insert(repo, _name, %Changeset{valid?: true} = changeset, {adapter_meta, opts} = tuplet) do + %{adapter: adapter} = adapter_meta + %{prepare: prepare, repo_opts: repo_opts} = changeset + opts = Keyword.merge(repo_opts, opts) + + struct = struct_from_changeset!(:insert, changeset) + schema = struct.__struct__ + dumper = schema.__schema__(:dump) + fields = schema.__schema__(:fields) + assocs = schema.__schema__(:associations) + embeds = schema.__schema__(:embeds) + + {return_types, return_sources} = + schema + |> returning(opts) + |> add_read_after_writes(schema) + |> fields_to_sources(dumper) + + on_conflict = Keyword.get(opts, :on_conflict, :raise) + conflict_target = Keyword.get(opts, :conflict_target, []) + conflict_target = conflict_target(conflict_target, dumper) + + # On insert, we always merge the whole struct into the + # changeset as changes, except the primary key if it is nil. + changeset = put_repo_and_action(changeset, :insert, repo, tuplet) + changeset = Relation.surface_changes(changeset, struct, fields ++ assocs) + + wrap_in_transaction(adapter, adapter_meta, opts, changeset, assocs, embeds, prepare, fn -> + assoc_opts = assoc_opts(assocs, opts) + user_changeset = run_prepare(changeset, prepare) + + {changeset, parents, children} = pop_assocs(user_changeset, assocs) + changeset = process_parents(changeset, user_changeset, parents, adapter, assoc_opts) + + if changeset.valid? do + embeds = Ecto.Embedded.prepare(changeset, embeds, adapter, :insert) + + autogen_id = schema.__schema__(:autogenerate_id) + schema_meta = metadata(struct, autogen_id, opts) + changes = Map.merge(changeset.changes, embeds) + + {changes, extra, return_types, return_sources} = + autogenerate_id(autogen_id, changes, return_types, return_sources, adapter) + + {changes, autogen} = + dump_changes!(:insert, Map.take(changes, fields), schema, extra, dumper, adapter) + + on_conflict = + on_conflict(on_conflict, conflict_target, schema_meta, fn -> length(changes) end, adapter) + + args = [adapter_meta, schema_meta, changes, on_conflict, return_sources, opts] + + case apply(user_changeset, adapter, :insert, args) do + {:ok, values} -> + values = extra ++ values + + changeset + |> load_changes(:loaded, return_types, values, embeds, autogen, adapter, schema_meta) + |> process_children(user_changeset, children, adapter, assoc_opts) + + {:error, _} = error -> + error + end + else + {:error, changeset} + end + end) + end + + defp do_insert(repo, _name, %Changeset{valid?: false} = changeset, tuplet) do + {:error, put_repo_and_action(changeset, :insert, repo, tuplet)} + end + + @doc """ + Implementation for `Ecto.Repo.update/2`. + """ + def update(repo, name, %Changeset{} = changeset, tuplet) do + do_update(repo, name, changeset, tuplet) + end + + def update(_repo, _name, %{__struct__: _}, _tuplet) do + raise ArgumentError, "giving a struct to Ecto.Repo.update/2 is not supported. " <> + "Ecto is unable to properly track changes when a struct is given, " <> + "an Ecto.Changeset must be given instead" + end + + defp do_update(repo, _name, %Changeset{valid?: true} = changeset, {adapter_meta, opts} = tuplet) do + %{adapter: adapter} = adapter_meta + %{prepare: prepare, repo_opts: repo_opts} = changeset + opts = Keyword.merge(repo_opts, opts) + + struct = struct_from_changeset!(:update, changeset) + schema = struct.__struct__ + dumper = schema.__schema__(:dump) + fields = schema.__schema__(:fields) + assocs = schema.__schema__(:associations) + embeds = schema.__schema__(:embeds) + + force? = !!opts[:force] + filters = add_pk_filter!(changeset.filters, struct) + + {return_types, return_sources} = + schema + |> returning(opts) + |> add_read_after_writes(schema) + |> fields_to_sources(dumper) + + # Differently from insert, update does not copy the struct + # fields into the changeset. All changes must be in the + # changeset before hand. + changeset = put_repo_and_action(changeset, :update, repo, tuplet) + + if changeset.changes != %{} or force? do + wrap_in_transaction(adapter, adapter_meta, opts, changeset, assocs, embeds, prepare, fn -> + assoc_opts = assoc_opts(assocs, opts) + user_changeset = run_prepare(changeset, prepare) + + {changeset, parents, children} = pop_assocs(user_changeset, assocs) + changeset = process_parents(changeset, user_changeset, parents, adapter, assoc_opts) + + if changeset.valid? do + embeds = Ecto.Embedded.prepare(changeset, embeds, adapter, :update) + + original = changeset.changes |> Map.merge(embeds) |> Map.take(fields) + {changes, autogen} = dump_changes!(:update, original, schema, [], dumper, adapter) + + schema_meta = metadata(struct, schema.__schema__(:autogenerate_id), opts) + filters = dump_fields!(:update, schema, filters, dumper, adapter) + args = [adapter_meta, schema_meta, changes, filters, return_sources, opts] + + # If there are no changes or all the changes were autogenerated but not forced, we skip + {action, autogen} = + if original != %{} or (autogen != [] and force?), + do: {:update, autogen}, + else: {:noop, []} + + case apply(user_changeset, adapter, action, args) do + {:ok, values} -> + changeset + |> load_changes(:loaded, return_types, values, embeds, autogen, adapter, schema_meta) + |> process_children(user_changeset, children, adapter, assoc_opts) + + {:error, _} = error -> + error + end + else + {:error, changeset} + end + end) + else + {:ok, changeset.data} + end + end + + defp do_update(repo, _name, %Changeset{valid?: false} = changeset, tuplet) do + {:error, put_repo_and_action(changeset, :update, repo, tuplet)} + end + + @doc """ + Implementation for `Ecto.Repo.insert_or_update/2`. + """ + def insert_or_update(repo, name, changeset, tuplet) do + case get_state(changeset) do + :built -> insert(repo, name, changeset, tuplet) + :loaded -> update(repo, name, changeset, tuplet) + state -> raise ArgumentError, "the changeset has an invalid state " <> + "for Repo.insert_or_update/2: #{state}" + end + end + + @doc """ + Implementation for `Ecto.Repo.insert_or_update!/2`. + """ + def insert_or_update!(repo, name, changeset, tuplet) do + case get_state(changeset) do + :built -> insert!(repo, name, changeset, tuplet) + :loaded -> update!(repo, name, changeset, tuplet) + state -> raise ArgumentError, "the changeset has an invalid state " <> + "for Repo.insert_or_update!/2: #{state}" + end + end + + defp get_state(%Changeset{data: %{__meta__: %{state: state}}}), do: state + defp get_state(%{__struct__: _}) do + raise ArgumentError, "giving a struct to Repo.insert_or_update/2 or " <> + "Repo.insert_or_update!/2 is not supported. " <> + "Please use an Ecto.Changeset" + end + + @doc """ + Implementation for `Ecto.Repo.delete/2`. + """ + def delete(repo, name, %Changeset{} = changeset, tuplet) do + do_delete(repo, name, changeset, tuplet) + end + + def delete(repo, name, %{__struct__: _} = struct, tuplet) do + changeset = Ecto.Changeset.change(struct) + do_delete(repo, name, changeset, tuplet) + end + + defp do_delete(repo, name, %Changeset{valid?: true} = changeset, {adapter_meta, opts} = tuplet) do + %{adapter: adapter} = adapter_meta + %{prepare: prepare, repo_opts: repo_opts} = changeset + opts = Keyword.merge(repo_opts, opts) + + struct = struct_from_changeset!(:delete, changeset) + schema = struct.__struct__ + assocs = to_delete_assocs(schema) + dumper = schema.__schema__(:dump) + changeset = put_repo_and_action(changeset, :delete, repo, tuplet) + + wrap_in_transaction(adapter, adapter_meta, opts, assocs != [], prepare, fn -> + changeset = run_prepare(changeset, prepare) + + if changeset.valid? do + filters = add_pk_filter!(changeset.filters, struct) + filters = dump_fields!(:delete, schema, filters, dumper, adapter) + + # Delete related associations + for %{__struct__: mod, on_delete: on_delete} = reflection <- assocs do + apply(mod, on_delete, [reflection, changeset.data, name, tuplet]) + end + + schema_meta = metadata(struct, schema.__schema__(:autogenerate_id), opts) + args = [adapter_meta, schema_meta, filters, opts] + + case apply(changeset, adapter, :delete, args) do + {:ok, values} -> + changeset = load_changes(changeset, :deleted, [], values, %{}, [], adapter, schema_meta) + {:ok, changeset.data} + + {:error, _} = error -> + error + end + else + {:error, changeset} + end + end) + end + + defp do_delete(repo, _name, %Changeset{valid?: false} = changeset, tuplet) do + {:error, put_repo_and_action(changeset, :delete, repo, tuplet)} + end + + def load(adapter, schema_or_types, data) do + do_load(schema_or_types, data, &Ecto.Type.adapter_load(adapter, &1, &2)) + end + + defp do_load(schema, data, loader) when is_list(data), + do: do_load(schema, Map.new(data), loader) + defp do_load(schema, {fields, values}, loader) when is_list(fields) and is_list(values), + do: do_load(schema, Enum.zip(fields, values), loader) + defp do_load(schema, data, loader) when is_atom(schema), + do: Ecto.Schema.Loader.unsafe_load(schema, data, loader) + defp do_load(types, data, loader) when is_map(types), + do: Ecto.Schema.Loader.unsafe_load(%{}, types, data, loader) + + ## Helpers + + defp returning(schema, opts) do + case Keyword.get(opts, :returning, false) do + [_ | _] = fields -> + fields + [] -> + raise ArgumentError, ":returning expects at least one field to be given, got an empty list" + true when is_nil(schema) -> + raise ArgumentError, ":returning option can only be set to true if a schema is given" + true -> + schema.__schema__(:fields) + false -> + [] + end + end + + defp add_read_after_writes([], schema), + do: schema.__schema__(:read_after_writes) + + defp add_read_after_writes(return, schema), + do: Enum.uniq(return ++ schema.__schema__(:read_after_writes)) + + defp fields_to_sources(fields, nil) do + {fields, fields} + end + defp fields_to_sources(fields, dumper) do + Enum.reduce(fields, {[], []}, fn field, {types, sources} -> + {source, type} = Map.fetch!(dumper, field) + {[{field, type} | types], [source | sources]} + end) + end + + defp struct_from_changeset!(action, %{data: nil}), + do: raise(ArgumentError, "cannot #{action} a changeset without :data") + defp struct_from_changeset!(_action, %{data: struct}), + do: struct + + defp put_repo_and_action(%{action: :ignore, valid?: valid?} = changeset, action, repo, {_adapter_meta, opts}) do + if valid? do + raise ArgumentError, "a valid changeset with action :ignore was given to " <> + "#{inspect repo}.#{action}/2. Changesets can only be ignored " <> + "in a repository action if they are also invalid" + else + %{changeset | action: action, repo: repo, repo_opts: opts} + end + end + defp put_repo_and_action(%{action: given}, action, repo, _tuplet) when given != nil and given != action, + do: raise ArgumentError, "a changeset with action #{inspect given} was given to #{inspect repo}.#{action}/2" + defp put_repo_and_action(changeset, action, repo, {_adapter_meta, opts}), + do: %{changeset | action: action, repo: repo, repo_opts: opts} + + defp run_prepare(changeset, prepare) do + Enum.reduce(Enum.reverse(prepare), changeset, fn fun, acc -> + case fun.(acc) do + %Ecto.Changeset{} = acc -> + acc + + other -> + raise "expected function #{inspect fun} given to Ecto.Changeset.prepare_changes/2 " <> + "to return an Ecto.Changeset, got: `#{inspect other}`" + end + end) + end + + defp metadata(schema, prefix, source, autogen_id, context, opts) do + %{ + autogenerate_id: autogen_id, + context: context, + schema: schema, + source: source, + prefix: Keyword.get(opts, :prefix, prefix) + } + end + defp metadata(%{__struct__: schema, __meta__: %{context: context, source: source, prefix: prefix}}, + autogen_id, opts) do + metadata(schema, prefix, source, autogen_id, context, opts) + end + defp metadata(%{__struct__: schema}, _, _) do + raise ArgumentError, "#{inspect(schema)} needs to be a schema with source" + end + + defp conflict_target({:unsafe_fragment, fragment}, _dumper) when is_binary(fragment) do + {:unsafe_fragment, fragment} + end + defp conflict_target(conflict_target, dumper) do + for target <- List.wrap(conflict_target) do + case dumper do + %{^target => {alias, _}} -> + alias + %{} when is_atom(target) -> + raise ArgumentError, "unknown field `#{inspect(target)}` in conflict_target" + _ -> + target + end + end + end + + defp on_conflict(on_conflict, conflict_target, schema_meta, counter_fun, adapter) do + %{source: source, schema: schema, prefix: prefix} = schema_meta + + case on_conflict do + :raise when conflict_target == [] -> + {:raise, [], []} + + :raise -> + raise ArgumentError, ":conflict_target option is forbidden when :on_conflict is :raise" + + :nothing -> + {:nothing, [], conflict_target} + + {:replace, keys} when is_list(keys) -> + fields = Enum.map(keys, &field_source!(schema, &1)) + {fields, [], conflict_target} + + :replace_all -> + {replace_all_fields!(:replace_all, schema, []), [], conflict_target} + + {:replace_all_except, fields} -> + {replace_all_fields!(:replace_all_except, schema, fields), [], conflict_target} + + [_ | _] = on_conflict -> + from = if schema, do: {source, schema}, else: source + query = Ecto.Query.from from, update: ^on_conflict + on_conflict_query(query, {source, schema}, prefix, counter_fun, adapter, conflict_target) + + %Ecto.Query{} = query -> + on_conflict_query(query, {source, schema}, prefix, counter_fun, adapter, conflict_target) + + other -> + raise ArgumentError, "unknown value for :on_conflict, got: #{inspect other}" + end + end + + defp replace_all_fields!(kind, nil, _to_remove) do + raise ArgumentError, "cannot use #{inspect(kind)} on operations without a schema" + end + + defp replace_all_fields!(_kind, schema, to_remove) do + Enum.map(schema.__schema__(:fields) -- to_remove, &field_source!(schema, &1)) + end + + defp field_source!(nil, field) do + field + end + + defp field_source!(schema, field) do + schema.__schema__(:field_source, field) || + raise ArgumentError, "unknown field for :on_conflict, got: #{inspect(field)}" + end + + defp on_conflict_query(query, from, prefix, counter_fun, adapter, conflict_target) do + {query, params, _} = + Ecto.Query.Planner.plan(%{query | prefix: prefix}, :update_all, adapter) + + unless query.from.source == from do + raise ArgumentError, "cannot run on_conflict: query because the query " <> + "has a different {source, schema} pair than the " <> + "original struct/changeset/query. Got #{inspect query.from} " <> + "and #{inspect from} respectively" + end + + {query, _} = Ecto.Query.Planner.normalize(query, :update_all, adapter, counter_fun.()) + {query, params, conflict_target} + end + + defp apply(_user_changeset, _adapter, :noop, _args) do + {:ok, []} + end + + defp apply(user_changeset, adapter, action, args) do + case apply(adapter, action, args) do + {:ok, values} -> + {:ok, values} + + {:invalid, constraints} -> + {:error, constraints_to_errors(user_changeset, action, constraints)} + + {:error, :stale} -> + opts = List.last(args) + + case Keyword.fetch(opts, :stale_error_field) do + {:ok, stale_error_field} when is_atom(stale_error_field) -> + stale_message = Keyword.get(opts, :stale_error_message, "is stale") + user_changeset = Changeset.add_error(user_changeset, stale_error_field, stale_message, [stale: true]) + {:error, user_changeset} + + _other -> + raise Ecto.StaleEntryError, changeset: user_changeset, action: action + end + end + end + + defp constraints_to_errors(%{constraints: user_constraints, errors: errors} = changeset, action, constraints) do + constraint_errors = + Enum.map constraints, fn {type, constraint} -> + user_constraint = + Enum.find(user_constraints, fn c -> + case {c.type, c.constraint, c.match} do + {^type, ^constraint, :exact} -> true + {^type, cc, :suffix} -> String.ends_with?(constraint, cc) + {^type, cc, :prefix} -> String.starts_with?(constraint, cc) + _ -> false + end + end) + + case user_constraint do + %{field: field, error_message: error_message, error_type: error_type} -> + {field, {error_message, [constraint: error_type, constraint_name: constraint]}} + nil -> + raise Ecto.ConstraintError, action: action, type: type, + constraint: constraint, changeset: changeset + end + end + + %{changeset | errors: constraint_errors ++ errors, valid?: false} + end + + defp load_changes(changeset, state, types, values, embeds, autogen, adapter, schema_meta) do + %{data: data, changes: changes} = changeset + + data = + data + |> merge_changes(changes) + |> Map.merge(embeds) + |> merge_autogen(autogen) + |> apply_metadata(state, schema_meta) + |> load_each(values, types, adapter) + + Map.put(changeset, :data, data) + end + + defp merge_changes(data, changes) do + changes = + Enum.reduce(changes, changes, fn {key, _value}, changes -> + if Map.has_key?(data, key), do: changes, else: Map.delete(changes, key) + end) + + Map.merge(data, changes) + end + + defp merge_autogen(data, autogen) do + Enum.reduce(autogen, data, fn {k, v}, acc -> %{acc | k => v} end) + end + + defp apply_metadata(%{__meta__: meta} = data, state, %{source: source, prefix: prefix}) do + %{data | __meta__: %{meta | state: state, source: source, prefix: prefix}} + end + + defp load_each(struct, [{_, value} | kv], [{key, type} | types], adapter) do + case Ecto.Type.adapter_load(adapter, type, value) do + {:ok, value} -> + load_each(%{struct | key => value}, kv, types, adapter) + :error -> + raise ArgumentError, "cannot load `#{inspect value}` as type #{inspect type} " <> + "for field `#{key}` in schema #{inspect struct.__struct__}" + end + end + defp load_each(struct, [], _types, _adapter) do + struct + end + + defp pop_assocs(changeset, []) do + {changeset, [], []} + end + + defp pop_assocs(%{changes: changes, types: types} = changeset, assocs) do + {changes, parent, child} = + Enum.reduce assocs, {changes, [], []}, fn assoc, {changes, parent, child} -> + case changes do + %{^assoc => value} -> + changes = Map.delete(changes, assoc) + + case types do + %{^assoc => {:assoc, %{relationship: :parent} = refl}} -> + {changes, [{refl, value} | parent], child} + %{^assoc => {:assoc, %{relationship: :child} = refl}} -> + {changes, parent, [{refl, value} | child]} + end + + %{} -> + {changes, parent, child} + end + end + + {%{changeset | changes: changes}, parent, child} + end + + # Don't mind computing options if there are no assocs + defp assoc_opts([], _opts), do: [] + + defp assoc_opts(_assocs, opts) do + Keyword.take(opts, [:timeout, :log, :telemetry_event, :prefix]) + end + + defp process_parents(changeset, user_changeset, assocs, adapter, opts) do + %{changes: changes, valid?: valid?} = changeset + + # Even if the changeset is invalid, we want to run parent callbacks + # to collect feedback. But if all is ok, still return the user changeset. + case Ecto.Association.on_repo_change(changeset, assocs, adapter, opts) do + {:ok, struct} when valid? -> + changes = change_parents(changes, struct, assocs) + %{changeset | changes: changes, data: struct} + + {:ok, _} -> + user_changeset + + {:error, changes} -> + %{user_changeset | changes: Map.merge(user_changeset.changes, changes), valid?: false} + end + end + + defp change_parents(changes, struct, assocs) do + Enum.reduce assocs, changes, fn {refl, _}, acc -> + %{field: field, owner_key: owner_key, related_key: related_key} = refl + related = Map.get(struct, field) + value = related && Map.fetch!(related, related_key) + + case Map.fetch(changes, owner_key) do + {:ok, current} when current != value -> + raise ArgumentError, + "cannot change belongs_to association `#{field}` because there is " <> + "already a change setting its foreign key `#{owner_key}` to `#{inspect current}`" + + _ -> + Map.put(acc, owner_key, value) + end + end + end + + defp process_children(changeset, user_changeset, assocs, adapter, opts) do + case Ecto.Association.on_repo_change(changeset, assocs, adapter, opts) do + {:ok, struct} -> + {:ok, struct} + + {:error, changes} -> + changes = Map.merge(user_changeset.changes, changes) + {:error, %{user_changeset | changes: changes, valid?: false}} + end + end + + defp to_delete_assocs(schema) do + for assoc <- schema.__schema__(:associations), + reflection = schema.__schema__(:association, assoc), + match?(%{on_delete: on_delete} when on_delete != :nothing, reflection), + do: reflection + end + + defp autogenerate_id(nil, changes, return_types, return_sources, _adapter) do + {changes, [], return_types, return_sources} + end + + defp autogenerate_id({key, source, type}, changes, return_types, return_sources, adapter) do + cond do + Map.has_key?(changes, key) -> # Set by user + {changes, [], return_types, return_sources} + value = Ecto.Type.adapter_autogenerate(adapter, type) -> # Autogenerated now + {changes, [{source, value}], [{key, type} | return_types], return_sources} + true -> # Autogenerated in storage + {changes, [], [{key, type} | return_types], [source | List.delete(return_sources, source)]} + end + end + + defp dump_changes!(action, changes, schema, extra, dumper, adapter) do + autogen = autogenerate_changes(schema, action, changes) + dumped = + dump_fields!(action, schema, changes, dumper, adapter) ++ + dump_fields!(action, schema, autogen, dumper, adapter) ++ + extra + {dumped, autogen} + end + + defp autogenerate_changes(schema, action, changes) do + autogen_fields = action |> action_to_auto() |> schema.__schema__() + + Enum.flat_map(autogen_fields, fn {fields, {mod, fun, args}} -> + case Enum.reject(fields, &Map.has_key?(changes, &1)) do + [] -> + [] + + fields -> + generated = apply(mod, fun, args) + Enum.map(fields, &{&1, generated}) + end + end) + end + + defp action_to_auto(:insert), do: :autogenerate + defp action_to_auto(:update), do: :autoupdate + + defp add_pk_filter!(filters, struct) do + Enum.reduce Ecto.primary_key!(struct), filters, fn + {_k, nil}, _acc -> + raise Ecto.NoPrimaryKeyValueError, struct: struct + {k, v}, acc -> + Map.put(acc, k, v) + end + end + + defp wrap_in_transaction(adapter, adapter_meta, opts, changeset, assocs, embeds, prepare, fun) do + %{changes: changes} = changeset + changed = &Map.has_key?(changes, &1) + relations_changed? = Enum.any?(assocs, changed) or Enum.any?(embeds, changed) + wrap_in_transaction(adapter, adapter_meta, opts, relations_changed?, prepare, fun) + end + + defp wrap_in_transaction(adapter, adapter_meta, opts, relations_changed?, prepare, fun) do + if (relations_changed? or prepare != []) and + function_exported?(adapter, :transaction, 3) and + not adapter.in_transaction?(adapter_meta) do + adapter.transaction(adapter_meta, opts, fn -> + case fun.() do + {:ok, struct} -> struct + {:error, changeset} -> adapter.rollback(adapter_meta, changeset) + end + end) + else + fun.() + end + end + + defp dump_field!(action, schema, field, type, value, adapter) do + case Ecto.Type.adapter_dump(adapter, type, value) do + {:ok, value} -> + value + :error -> + raise Ecto.ChangeError, + "value `#{inspect(value)}` for `#{inspect(schema)}.#{field}` " <> + "in `#{action}` does not match type #{inspect type}" + end + end + + defp dump_fields!(action, schema, kw, dumper, adapter) do + for {field, value} <- kw do + {alias, type} = Map.fetch!(dumper, field) + {alias, dump_field!(action, schema, field, type, value, adapter)} + end + end +end diff --git a/deps/ecto/lib/ecto/repo/supervisor.ex b/deps/ecto/lib/ecto/repo/supervisor.ex new file mode 100644 index 0000000..7533c6b --- /dev/null +++ b/deps/ecto/lib/ecto/repo/supervisor.ex @@ -0,0 +1,214 @@ +defmodule Ecto.Repo.Supervisor do + @moduledoc false + use Supervisor + + @defaults [timeout: 15000, pool_size: 10] + @integer_url_query_params ["timeout", "pool_size"] + + @doc """ + Starts the repo supervisor. + """ + def start_link(repo, otp_app, adapter, opts) do + name = Keyword.get(opts, :name, repo) + sup_opts = if name, do: [name: name], else: [] + Supervisor.start_link(__MODULE__, {name, repo, otp_app, adapter, opts}, sup_opts) + end + + @doc """ + Retrieves the runtime configuration. + """ + def runtime_config(type, repo, otp_app, opts) do + config = Application.get_env(otp_app, repo, []) + config = [otp_app: otp_app] ++ (@defaults |> Keyword.merge(config) |> Keyword.merge(opts)) + config = Keyword.put_new_lazy(config, :telemetry_prefix, fn -> telemetry_prefix(repo) end) + + case repo_init(type, repo, config) do + {:ok, config} -> + {url, config} = Keyword.pop(config, :url) + {:ok, Keyword.merge(config, parse_url(url || ""))} + + :ignore -> + :ignore + end + end + + defp telemetry_prefix(repo) do + repo + |> Module.split() + |> Enum.map(& &1 |> Macro.underscore() |> String.to_atom()) + end + + defp repo_init(type, repo, config) do + if Code.ensure_loaded?(repo) and function_exported?(repo, :init, 2) do + repo.init(type, config) + else + {:ok, config} + end + end + + @doc """ + Retrieves the compile time configuration. + """ + def compile_config(_repo, opts) do + otp_app = Keyword.fetch!(opts, :otp_app) + adapter = opts[:adapter] + + unless adapter do + raise ArgumentError, "missing :adapter option on use Ecto.Repo" + end + + if Code.ensure_compiled(adapter) != {:module, adapter} do + raise ArgumentError, "adapter #{inspect adapter} was not compiled, " <> + "ensure it is correct and it is included as a project dependency" + end + + behaviours = + for {:behaviour, behaviours} <- adapter.__info__(:attributes), + behaviour <- behaviours, + do: behaviour + + unless Ecto.Adapter in behaviours do + raise ArgumentError, + "expected :adapter option given to Ecto.Repo to list Ecto.Adapter as a behaviour" + end + + {otp_app, adapter, behaviours} + end + + @doc """ + Parses an Ecto URL allowed in configuration. + + The format must be: + + "ecto://username:password@hostname:port/database?ssl=true&timeout=1000" + + """ + def parse_url(""), do: [] + + def parse_url(url) when is_binary(url) do + info = URI.parse(url) + + if is_nil(info.host) do + raise Ecto.InvalidURLError, url: url, message: "host is not present" + end + + if is_nil(info.path) or not (info.path =~ ~r"^/([^/])+$") do + raise Ecto.InvalidURLError, url: url, message: "path should be a database name" + end + + destructure [username, password], info.userinfo && String.split(info.userinfo, ":") + "/" <> database = info.path + + url_opts = [ + username: username, + password: password, + database: database, + port: info.port + ] + + url_opts = put_hostname_if_present(url_opts, info.host) + query_opts = parse_uri_query(info) + + for {k, v} <- url_opts ++ query_opts, + not is_nil(v), + do: {k, if(is_binary(v), do: URI.decode(v), else: v)} + end + + defp put_hostname_if_present(keyword, "") do + keyword + end + + defp put_hostname_if_present(keyword, hostname) when is_binary(hostname) do + Keyword.put(keyword, :hostname, hostname) + end + + defp parse_uri_query(%URI{query: nil}), + do: [] + defp parse_uri_query(%URI{query: query} = url) do + query + |> URI.query_decoder() + |> Enum.reduce([], fn + {"ssl", "true"}, acc -> + [{:ssl, true}] ++ acc + + {"ssl", "false"}, acc -> + [{:ssl, false}] ++ acc + + {key, value}, acc when key in @integer_url_query_params -> + [{String.to_atom(key), parse_integer!(key, value, url)}] ++ acc + + {key, value}, acc -> + [{String.to_atom(key), value}] ++ acc + end) + end + + defp parse_integer!(key, value, url) do + case Integer.parse(value) do + {int, ""} -> + int + + _ -> + raise Ecto.InvalidURLError, + url: url, + message: "can not parse value `#{value}` for parameter `#{key}` as an integer" + end + end + + @doc false + def tuplet(name, opts) do + adapter_meta = Ecto.Repo.Registry.lookup(name) + + if opts[:stacktrace] || Map.get(adapter_meta, :stacktrace) do + {:current_stacktrace, stacktrace} = :erlang.process_info(self(), :current_stacktrace) + {adapter_meta, Keyword.put(opts, :stacktrace, stacktrace)} + else + {adapter_meta, opts} + end + end + + ## Callbacks + + @doc false + def init({name, repo, otp_app, adapter, opts}) do + # Normalize name to atom, ignore via/global names + name = if is_atom(name), do: name, else: nil + + case runtime_config(:supervisor, repo, otp_app, opts) do + {:ok, opts} -> + :telemetry.execute( + [:ecto, :repo, :init], + %{system_time: System.system_time()}, + %{repo: repo, opts: opts} + ) + + {:ok, child, meta} = adapter.init([repo: repo] ++ opts) + cache = Ecto.Query.Planner.new_query_cache(name) + meta = Map.merge(meta, %{repo: repo, cache: cache}) + child_spec = wrap_child_spec(child, [name, adapter, meta]) + Supervisor.init([child_spec], strategy: :one_for_one, max_restarts: 0) + + :ignore -> + :ignore + end + end + + def start_child({mod, fun, args}, name, adapter, meta) do + case apply(mod, fun, args) do + {:ok, pid} -> + meta = Map.merge(meta, %{pid: pid, adapter: adapter}) + Ecto.Repo.Registry.associate(self(), name, meta) + {:ok, pid} + + other -> + other + end + end + + defp wrap_child_spec({id, start, restart, shutdown, type, mods}, args) do + {id, {__MODULE__, :start_child, [start | args]}, restart, shutdown, type, mods} + end + + defp wrap_child_spec(%{start: start} = spec, args) do + %{spec | start: {__MODULE__, :start_child, [start | args]}} + end +end diff --git a/deps/ecto/lib/ecto/repo/transaction.ex b/deps/ecto/lib/ecto/repo/transaction.ex new file mode 100644 index 0000000..a4691c1 --- /dev/null +++ b/deps/ecto/lib/ecto/repo/transaction.ex @@ -0,0 +1,34 @@ +defmodule Ecto.Repo.Transaction do + @moduledoc false + @dialyzer :no_opaque + + def transaction(_repo, _name, fun, {adapter_meta, opts}) when is_function(fun, 0) do + adapter_meta.adapter.transaction(adapter_meta, opts, fun) + end + + def transaction(repo, _name, fun, {adapter_meta, opts}) when is_function(fun, 1) do + adapter_meta.adapter.transaction(adapter_meta, opts, fn -> fun.(repo) end) + end + + def transaction(repo, _name, %Ecto.Multi{} = multi, {adapter_meta, opts}) do + %{adapter: adapter} = adapter_meta + wrap = &adapter.transaction(adapter_meta, opts, &1) + return = &adapter.rollback(adapter_meta, &1) + + case Ecto.Multi.__apply__(multi, repo, wrap, return) do + {:ok, values} -> {:ok, values} + {:error, {key, error_value, values}} -> {:error, key, error_value, values} + {:error, operation} -> raise "operation #{inspect operation} is manually rolling back, which is not supported by Ecto.Multi" + end + end + + def in_transaction?(name) do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(name) + adapter.in_transaction?(meta) + end + + def rollback(name, value) do + %{adapter: adapter} = meta = Ecto.Repo.Registry.lookup(name) + adapter.rollback(meta, value) + end +end diff --git a/deps/ecto/lib/ecto/schema.ex b/deps/ecto/lib/ecto/schema.ex new file mode 100644 index 0000000..b454234 --- /dev/null +++ b/deps/ecto/lib/ecto/schema.ex @@ -0,0 +1,2351 @@ +defmodule Ecto.Schema do + @moduledoc ~S""" + An Ecto schema maps external data into Elixir structs. + + The definition of the schema is possible through two main APIs: + `schema/2` and `embedded_schema/1`. + + `schema/2` is typically used to map data from a persisted source, + usually a database table, into Elixir structs and vice-versa. For + this reason, the first argument of `schema/2` is the source (table) + name. Structs defined with `schema/2` also contain a `__meta__` field + with metadata holding the status of the struct, for example, if it + has been built, loaded or deleted. + + On the other hand, `embedded_schema/1` is used for defining schemas + that are embedded in other schemas or only exist in-memory. For example, + you can use such schemas to receive data from a command line interface + and validate it, without ever persisting it elsewhere. Such structs + do not contain a `__meta__` field, as they are never persisted. + + Besides working as data mappers, `embedded_schema/1` and `schema/2` can + also be used together to decouple how the data is represented in your + applications from the database. Let's see some examples. + + ## Example + + defmodule User do + use Ecto.Schema + + schema "users" do + field :name, :string + field :age, :integer, default: 0 + field :password, :string, redact: true + has_many :posts, Post + end + end + + By default, a schema will automatically generate a primary key which is named + `id` and of type `:integer`. The `field` macro defines a field in the schema + with given name and type. `has_many` associates many posts with the user + schema. Schemas are regular structs and can be created and manipulated directly + using Elixir's struct API: + + iex> user = %User{name: "jane"} + iex> %{user | age: 30} + + However, most commonly, structs are cast, validated and manipulated with the + `Ecto.Changeset` module. + + Note that the name of the database table does not need to correlate to your + module name. For example, if you are working with a legacy database, you can + reference the table name when you define your schema: + + defmodule User do + use Ecto.Schema + + schema "legacy_users" do + # ... fields ... + end + end + + Embedded schemas are defined similarly to source-based schemas. For example, + you can use an embedded schema to represent your UI, mapping and validating + its inputs, and then you convert such embedded schema to other schemas that + are persisted to the database: + + defmodule SignUp do + use Ecto.Schema + + embedded_schema do + field :name, :string + field :age, :integer + field :email, :string + field :accepts_conditions, :boolean + end + end + + defmodule Profile do + use Ecto.Schema + + schema "profiles" do + field :name + field :age + belongs_to :account, Account + end + end + + defmodule Account do + use Ecto.Schema + + schema "accounts" do + field :email + end + end + + The `SignUp` schema can be cast and validated with the help of the + `Ecto.Changeset` module, and afterwards, you can copy its data to + the `Profile` and `Account` structs that will be persisted to the + database with the help of `Ecto.Repo`. + + ## Redacting fields + + A field marked with `redact: true` will display a value of `**redacted**` + when inspected in changes inside a `Ecto.Changeset` and be excluded from + inspect on the schema unless the schema module is tagged with + the option `@ecto_derive_inspect_for_redacted_fields false`. + + ## Schema attributes + + Supported attributes for configuring the defined schema. They must + be set after the `use Ecto.Schema` call and before the `schema/2` + definition. + + These attributes are: + + * `@primary_key` - configures the schema primary key. It expects + a tuple `{field_name, type, options}` with the primary key field + name, type (typically `:id` or `:binary_id`, but can be any type) and + options. It also accepts `false` to disable the generation of a primary + key field. Defaults to `{:id, :id, autogenerate: true}`. + + * `@schema_prefix` - configures the schema prefix. Defaults to `nil`, + which generates structs and queries without prefix. When set, the + prefix will be used by every built struct and on queries whenever + the schema is used in a `from` or a `join`. In PostgreSQL, the prefix + is called "SCHEMA" (typically set via Postgres' `search_path`). + In MySQL the prefix points to databases. + + * `@schema_context` - configures the schema context. Defaults to `nil`, + which generates structs and queries without context. Context are not used + by the built-in SQL adapters. + + * `@foreign_key_type` - configures the default foreign key type + used by `belongs_to` associations. It must be set in the same + module that defines the `belongs_to`. Defaults to `:id`; + + * `@timestamps_opts` - configures the default timestamps type + used by `timestamps`. Defaults to `[type: :naive_datetime]`; + + * `@derive` - the same as `@derive` available in `Kernel.defstruct/1` + as the schema defines a struct behind the scenes; + + * `@field_source_mapper` - a function that receives the current field name + and returns the mapping of this field name in the underlying source. + In other words, it is a mechanism to automatically generate the `:source` + option for the `field` macro. It defaults to `fn x -> x end`, where no + field transformation is done; + + The advantage of configuring the schema via those attributes is + that they can be set with a macro to configure application wide + defaults. + + For example, if your database does not support autoincrementing + primary keys and requires something like UUID or a RecordID, you + can configure and use `:binary_id` as your primary key type as follows: + + # Define a module to be used as base + defmodule MyApp.Schema do + defmacro __using__(_) do + quote do + use Ecto.Schema + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id + end + end + end + + # Now use MyApp.Schema to define new schemas + defmodule MyApp.Comment do + use MyApp.Schema + + schema "comments" do + belongs_to :post, MyApp.Post + end + end + + Any schemas using `MyApp.Schema` will get the `:id` field with type + `:binary_id` as the primary key. We explain what the `:binary_id` type + entails in the next section. + + The `belongs_to` association on `MyApp.Comment` will also define + a `:post_id` field with `:binary_id` type that references the `:id` + field of the `MyApp.Post` schema. + + ## Primary keys + + Ecto supports two ID types, called `:id` and `:binary_id`, which are + often used as the type for primary keys and associations. + + The `:id` type is used when the primary key is an integer while the + `:binary_id` is used for primary keys in particular binary formats, + which may be `Ecto.UUID` for databases like PostgreSQL and MySQL, + or some specific ObjectID or RecordID often imposed by NoSQL databases. + + In both cases, both types have their semantics specified by the + underlying adapter/database. If you use the `:id` type with + `:autogenerate`, it means the database will be responsible for + auto-generation of the id. This is often the case for primary keys + in relational databases which are auto-incremented. + + There are two ways to define primary keys in Ecto: using the `@primary_key` + module attribute and using `primary_key: true` as option for `field/3` in + your schema definition. They are not mutually exclusive and can be used + together. + + Using `@primary_key` should be preferred for single field primary keys and + sharing primary key definitions between multiple schemas using macros. + Setting `@primary_key` also automatically configures the reference types + for `has_one` and `has_many` associations. + + Ecto also supports composite primary keys, which is where you need to use + `primary_key: true` for the fields in your schema. This usually goes along + with setting `@primary_key false` to disable generation of additional + primary key fields. + + Besides `:id` and `:binary_id`, which are often used by primary + and foreign keys, Ecto provides a huge variety of types to be used + by any field. + + ## Types and casting + + When defining the schema, types need to be given. Types are split + into two categories, primitive types and custom types. + + ### Primitive types + + The primitive types are: + + Ecto type | Elixir type | Literal syntax in query + :---------------------- | :---------------------- | :--------------------- + `:id` | `integer` | 1, 2, 3 + `:binary_id` | `binary` | `<>` + `:integer` | `integer` | 1, 2, 3 + `:float` | `float` | 1.0, 2.0, 3.0 + `:boolean` | `boolean` | true, false + `:string` | UTF-8 encoded `string` | "hello" + `:binary` | `binary` | `<>` + `{:array, inner_type}` | `list` | `[value, value, value, ...]` + `:map` | `map` | + `{:map, inner_type}` | `map` | + `:decimal` | [`Decimal`](https://github.com/ericmj/decimal) | + `:date` | `Date` | + `:time` | `Time` | + `:time_usec` | `Time` | + `:naive_datetime` | `NaiveDateTime` | + `:naive_datetime_usec` | `NaiveDateTime` | + `:utc_datetime` | `DateTime` | + `:utc_datetime_usec` | `DateTime` | + + **Notes:** + + * When using database migrations provided by "Ecto SQL", you can pass + your Ecto type as the column type. However, note the same Ecto type + may support multiple database types. For example, all of `:varchar`, + `:text`, `:bytea`, etc. translate to Ecto's `:string`. Similarly, + Ecto's `:decimal` can be used for `:numeric` and other database + types. For more information, see [all migration types](https://hexdocs.pm/ecto_sql/Ecto.Migration.html#module-field-types). + + * For the `{:array, inner_type}` and `{:map, inner_type}` type, + replace `inner_type` with one of the valid types, such as `:string`. + + * For the `:decimal` type, `+Infinity`, `-Infinity`, and `NaN` values + are not supported, even though the `Decimal` library handles them. + To support them, you can create a custom type. + + * For calendar types with and without microseconds, the precision is + enforced when persisting to the DB. For example, casting `~T[09:00:00]` + as `:time_usec` will succeed and result in `~T[09:00:00.000000]`, but + persisting a type without microseconds as `:time_usec` will fail. + Similarly, casting `~T[09:00:00.000000]` as `:time` will succeed, but + persisting will not. This is the same behaviour as seen in other types, + where casting has to be done explicitly and is never performed + implicitly when loading from or dumping to the database. + + ### Custom types + + Besides providing primitive types, Ecto allows custom types to be + implemented by developers, allowing Ecto behaviour to be extended. + + A custom type is a module that implements one of the `Ecto.Type` + or `Ecto.ParameterizedType` behaviours. By default, Ecto provides + the following custom types: + + Custom type | Database type | Elixir type + :---------------------- | :---------------------- | :--------------------- + `Ecto.UUID` | `:uuid` (as a binary) | `string()` (as a UUID) + `Ecto.Enum` | `:string` | `atom()` + + Finally, schemas can also have virtual fields by passing the + `virtual: true` option. These fields are not persisted to the database + and can optionally not be type checked by declaring type `:any`. + + ### The datetime types + + Four different datetime primitive types are available: + + * `naive_datetime` - has a precision of seconds and casts values + to Elixir's `NaiveDateTime` struct which has no timezone information. + + * `naive_datetime_usec` - has a default precision of microseconds and + also casts values to `NaiveDateTime` with no timezone information. + + * `utc_datetime` - has a precision of seconds and casts values to + Elixir's `DateTime` struct and expects the time zone to be set to UTC. + + * `utc_datetime_usec` has a default precision of microseconds and also + casts values to `DateTime` expecting the time zone be set to UTC. + + All of those types are represented by the same timestamp/datetime in the + underlying data storage, the difference are in their precision and how the + data is loaded into Elixir. + + Having different precisions allows developers to choose a type that will + be compatible with the database and your project's precision requirements. + For example, some older versions of MySQL do not support microseconds in + datetime fields. + + When choosing what datetime type to work with, keep in mind that Elixir + functions like `NaiveDateTime.utc_now/0` have a default precision of 6. + Casting a value with a precision greater than 0 to a non-`usec` type will + truncate all microseconds and set the precision to 0. + + ### The map type + + The map type allows developers to store an Elixir map directly + in the database: + + # In your migration + create table(:users) do + add :data, :map + end + + # In your schema + field :data, :map + + # Now in your code + user = Repo.insert! %User{data: %{"foo" => "bar"}} + + Keep in mind that we advise the map keys to be strings or integers + instead of atoms. Atoms may be accepted depending on how maps are + serialized but the database will always convert atom keys to strings + due to security reasons. + + In order to support maps, different databases may employ different + techniques. For example, PostgreSQL will store those values in jsonb + fields, allowing you to just query parts of it. MSSQL, on + the other hand, does not yet provide a JSON type, so the value will be + stored in a text field. + + For maps to work in such databases, Ecto will need a JSON library. + By default Ecto will use [Jason](https://github.com/michalmuskala/jason) + which needs to be added to your deps in `mix.exs`: + + {:jason, "~> 1.0"} + + You can however configure the adapter to use another library. For example, + if using Postgres: + + config :postgrex, :json_library, YourLibraryOfChoice + + Or if using MySQL: + + config :mariaex, :json_library, YourLibraryOfChoice + + If changing the JSON library, remember to recompile the adapter afterwards + by cleaning the current build: + + mix deps.clean --build postgrex + + ### Casting + + When directly manipulating the struct, it is the responsibility of + the developer to ensure the field values have the proper type. For + example, you can create a user struct with an invalid value + for `age`: + + iex> user = %User{age: "0"} + iex> user.age + "0" + + However, if you attempt to persist the struct above, an error will + be raised since Ecto validates the types when sending them to the + adapter/database. + + Therefore, when working with and manipulating external data, it is + recommended to use `Ecto.Changeset`'s that are able to filter + and properly cast external data: + + changeset = Ecto.Changeset.cast(%User{}, %{"age" => "0"}, [:age]) + user = Repo.insert!(changeset) + + **You can use Ecto schemas and changesets to cast and validate any kind + of data, regardless if the data will be persisted to an Ecto repository + or not**. + + ## Reflection + + Any schema module will generate the `__schema__` function that can be + used for runtime introspection of the schema: + + * `__schema__(:source)` - Returns the source as given to `schema/2`; + * `__schema__(:prefix)` - Returns optional prefix for source provided by + `@schema_prefix` schema attribute; + * `__schema__(:primary_key)` - Returns a list of primary key fields (empty if there is none); + + * `__schema__(:fields)` - Returns a list of all non-virtual field names; + * `__schema__(:virtual_fields)` - Returns a list of all virtual field names; + * `__schema__(:field_source, field)` - Returns the alias of the given field; + + * `__schema__(:type, field)` - Returns the type of the given non-virtual field; + * `__schema__(:virtual_type, field)` - Returns the type of the given virtual field; + + * `__schema__(:associations)` - Returns a list of all association field names; + * `__schema__(:association, assoc)` - Returns the association reflection of the given assoc; + + * `__schema__(:embeds)` - Returns a list of all embedded field names; + * `__schema__(:embed, embed)` - Returns the embedding reflection of the given embed; + + * `__schema__(:read_after_writes)` - Non-virtual fields that must be read back + from the database after every write (insert or update); + + * `__schema__(:autogenerate_id)` - Primary key that is auto generated on insert; + + * `__schema__(:redact_fields)` - Returns a list of redacted field names; + + Furthermore, both `__struct__` and `__changeset__` functions are + defined so structs and changeset functionalities are available. + + ## Working with typespecs + + Generating typespecs for schemas is out of the scope of `Ecto.Schema`. + + In order to be able to use types such as `User.t()`, `t/0` has to be defined manually: + + defmodule User do + use Ecto.Schema + + @type t :: %__MODULE__{ + name: String.t(), + age: non_neg_integer() + } + + # ... schema ... + end + + Defining the type of each field is not mandatory, but it is preferable. + """ + + alias Ecto.Schema.Metadata + + @type source :: String.t + @type prefix :: String.t | nil + @type schema :: %{optional(atom) => any, __struct__: atom, __meta__: Metadata.t} + @type embedded_schema :: %{optional(atom) => any, __struct__: atom} + @type t :: schema | embedded_schema + @type belongs_to(t) :: t | Ecto.Association.NotLoaded.t() + @type has_one(t) :: t | Ecto.Association.NotLoaded.t() + @type has_many(t) :: [t] | Ecto.Association.NotLoaded.t() + @type many_to_many(t) :: [t] | Ecto.Association.NotLoaded.t() + @type embeds_one(t) :: t + @type embeds_many(t) :: [t] + + @doc false + defmacro __using__(_) do + quote do + import Ecto.Schema, only: [schema: 2, embedded_schema: 1] + + @primary_key nil + @timestamps_opts [] + @foreign_key_type :id + @schema_prefix nil + @schema_context nil + @field_source_mapper fn x -> x end + + Module.register_attribute(__MODULE__, :ecto_primary_keys, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_fields, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_virtual_fields, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_query_fields, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_field_sources, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_assocs, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_embeds, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_raw, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_autogenerate, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_autoupdate, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_redact_fields, accumulate: true) + Module.put_attribute(__MODULE__, :ecto_derive_inspect_for_redacted_fields, true) + Module.put_attribute(__MODULE__, :ecto_autogenerate_id, nil) + end + end + + @field_opts [ + :default, + :source, + :autogenerate, + :read_after_writes, + :virtual, + :primary_key, + :load_in_query, + :redact, + :foreign_key, + :on_replace, + :defaults, + :type, + :where, + :references, + :skip_default_validation + ] + + @doc """ + Defines an embedded schema with the given field definitions. + + An embedded schema is either embedded into another + schema or kept exclusively in memory. For this reason, + an embedded schema does not require a source name and + it does not include a metadata field. + + Embedded schemas by default set the primary key type + to `:binary_id` but such can be configured with the + `@primary_key` attribute. + """ + defmacro embedded_schema([do: block]) do + schema(__CALLER__, nil, false, :binary_id, block) + end + + @doc """ + Defines a schema struct with a source name and field definitions. + + An additional field called `__meta__` is added to the struct for storing + internal Ecto state. This field always has a `Ecto.Schema.Metadata` struct + as value and can be manipulated with the `Ecto.put_meta/2` function. + """ + defmacro schema(source, [do: block]) do + schema(__CALLER__, source, true, :id, block) + end + + defp schema(caller, source, meta?, type, block) do + prelude = + quote do + if line = Module.get_attribute(__MODULE__, :ecto_schema_defined) do + raise "schema already defined for #{inspect(__MODULE__)} on line #{line}" + end + + @ecto_schema_defined unquote(caller.line) + + @after_compile Ecto.Schema + Module.register_attribute(__MODULE__, :ecto_changeset_fields, accumulate: true) + Module.register_attribute(__MODULE__, :ecto_struct_fields, accumulate: true) + + meta? = unquote(meta?) + source = unquote(source) + prefix = @schema_prefix + context = @schema_context + + # Those module attributes are accessed only dynamically + # so we explicitly reference them here to avoid warnings. + _ = @foreign_key_type + _ = @timestamps_opts + + if meta? do + unless is_binary(source) do + raise ArgumentError, "schema source must be a string, got: #{inspect source}" + end + + meta = %Metadata{ + state: :built, + source: source, + prefix: prefix, + context: context, + schema: __MODULE__ + } + + Module.put_attribute(__MODULE__, :ecto_struct_fields, {:__meta__, meta}) + end + + if @primary_key == nil do + @primary_key {:id, unquote(type), autogenerate: true} + end + + primary_key_fields = + case @primary_key do + false -> + [] + {name, type, opts} -> + Ecto.Schema.__field__(__MODULE__, name, type, [primary_key: true] ++ opts) + [name] + other -> + raise ArgumentError, "@primary_key must be false or {name, type, opts}" + end + + try do + import Ecto.Schema + unquote(block) + after + :ok + end + end + + postlude = + quote unquote: false do + primary_key_fields = @ecto_primary_keys |> Enum.reverse + autogenerate = @ecto_autogenerate |> Enum.reverse + autoupdate = @ecto_autoupdate |> Enum.reverse + fields = @ecto_fields |> Enum.reverse + query_fields = @ecto_query_fields |> Enum.reverse + virtual_fields = @ecto_virtual_fields |> Enum.reverse + field_sources = @ecto_field_sources |> Enum.reverse + assocs = @ecto_assocs |> Enum.reverse + embeds = @ecto_embeds |> Enum.reverse + redacted_fields = @ecto_redact_fields + loaded = Ecto.Schema.__loaded__(__MODULE__, @ecto_struct_fields) + + if redacted_fields != [] and not List.keymember?(@derive, Inspect, 0) and + @ecto_derive_inspect_for_redacted_fields do + @derive {Inspect, except: @ecto_redact_fields} + end + + defstruct Enum.reverse(@ecto_struct_fields) + + def __changeset__ do + %{unquote_splicing(Macro.escape(@ecto_changeset_fields))} + end + + def __schema__(:prefix), do: unquote(prefix) + def __schema__(:source), do: unquote(source) + def __schema__(:fields), do: unquote(Enum.map(fields, &elem(&1, 0))) + def __schema__(:query_fields), do: unquote(Enum.map(query_fields, &elem(&1, 0))) + def __schema__(:primary_key), do: unquote(primary_key_fields) + def __schema__(:hash), do: unquote(:erlang.phash2({primary_key_fields, query_fields})) + def __schema__(:read_after_writes), do: unquote(Enum.reverse(@ecto_raw)) + def __schema__(:autogenerate_id), do: unquote(Macro.escape(@ecto_autogenerate_id)) + def __schema__(:autogenerate), do: unquote(Macro.escape(autogenerate)) + def __schema__(:autoupdate), do: unquote(Macro.escape(autoupdate)) + def __schema__(:loaded), do: unquote(Macro.escape(loaded)) + def __schema__(:redact_fields), do: unquote(redacted_fields) + def __schema__(:virtual_fields), do: unquote(Enum.map(virtual_fields, &elem(&1, 0))) + + def __schema__(:query) do + %Ecto.Query{ + from: %Ecto.Query.FromExpr{ + source: {unquote(source), __MODULE__}, + prefix: unquote(prefix) + } + } + end + + for clauses <- Ecto.Schema.__schema__(fields, field_sources, assocs, embeds, virtual_fields), + {args, body} <- clauses do + def __schema__(unquote_splicing(args)), do: unquote(body) + end + end + + quote do + unquote(prelude) + unquote(postlude) + end + end + + ## API + + @doc """ + Defines a field on the schema with given name and type. + + The field name will be used as is to read and write to the database + by all of the built-in adapters unless overridden with the `:source` + option. + + ## Options + + * `:default` - Sets the default value on the schema and the struct. + + The default value is calculated at compilation time, so don't use + expressions like `DateTime.utc_now` or `Ecto.UUID.generate` as + they would then be the same for all records: in this scenario you can use + the `:autogenerate` option to generate at insertion time. + + The default value is validated against the field's type at compilation time + and it will raise an ArgumentError if there is a type mismatch. If you cannot + infer the field's type at compilation time, you can use the + `:skip_default_validation` option on the field to skip validations. + + Once a default value is set, if you send changes to the changeset that + contains the same value defined as default, validations will not be performed + since there are no changes after all. + + * `:source` - Defines the name that is to be used in database for this field. + This is useful when attaching to an existing database. The value should be + an atom. + + * `:autogenerate` - a `{module, function, args}` tuple for a function + to call to generate the field value before insertion if value is not set. + A shorthand value of `true` is equivalent to `{type, :autogenerate, []}`. + + * `:read_after_writes` - When true, the field is always read back + from the database after insert and updates. + + For relational databases, this means the RETURNING option of those + statements is used. For this reason, MySQL does not support this + option and will raise an error if a schema is inserted/updated with + read after writes fields. + + * `:virtual` - When true, the field is not persisted to the database. + Notice virtual fields do not support `:autogenerate` nor + `:read_after_writes`. + + * `:primary_key` - When true, the field is used as part of the + composite primary key. + + * `:load_in_query` - When false, the field will not be loaded when + selecting the whole struct in a query, such as `from p in Post, select: p`. + Defaults to `true`. + + * `:redact` - When true, it will display a value of `**redacted**` + when inspected in changes inside a `Ecto.Changeset` and be excluded + from inspect on the schema. Defaults to `false`. + + * `:skip_default_validation` - When true, it will skip the type validation + step at compile time. + + """ + defmacro field(name, type \\ :string, opts \\ []) do + quote do + Ecto.Schema.__field__(__MODULE__, unquote(name), unquote(type), unquote(opts)) + end + end + + @doc """ + Generates `:inserted_at` and `:updated_at` timestamp fields. + + The fields generated by this macro will automatically be set to + the current time when inserting and updating values in a repository. + + ## Options + + * `:inserted_at` - the Ecto schema name of the field for insertion times or `false` + * `:updated_at` - the Ecto schema name of the field for update times or `false` + * `:inserted_at_source` - the name of the database column for insertion times or `false` + * `:updated_at_source` - the name of the database column for update times or `false` + * `:type` - the timestamps type, defaults to `:naive_datetime`. + * `:autogenerate` - a module-function-args tuple used for generating + both `inserted_at` and `updated_at` timestamps + + All options can be pre-configured by setting `@timestamps_opts`. + """ + defmacro timestamps(opts \\ []) do + quote bind_quoted: binding() do + timestamps = Keyword.merge(@timestamps_opts, opts) + + type = Keyword.get(timestamps, :type, :naive_datetime) + autogen = timestamps[:autogenerate] || {Ecto.Schema, :__timestamps__, [type]} + + inserted_at = Keyword.get(timestamps, :inserted_at, :inserted_at) + updated_at = Keyword.get(timestamps, :updated_at, :updated_at) + + if inserted_at do + opts = if source = timestamps[:inserted_at_source], do: [source: source], else: [] + Ecto.Schema.field(inserted_at, type, opts) + end + + if updated_at do + opts = if source = timestamps[:updated_at_source], do: [source: source], else: [] + Ecto.Schema.field(updated_at, type, opts) + Module.put_attribute(__MODULE__, :ecto_autoupdate, {[updated_at], autogen}) + end + + with [_ | _] = fields <- Enum.filter([inserted_at, updated_at], & &1) do + Module.put_attribute(__MODULE__, :ecto_autogenerate, {fields, autogen}) + end + + :ok + end + end + + @doc ~S""" + Indicates a one-to-many association with another schema. + + The current schema has zero or more records of the other schema. The other + schema often has a `belongs_to` field with the reverse association. + + ## Options + + * `:foreign_key` - Sets the foreign key, this should map to a field on the + other schema, defaults to the underscored name of the current schema + suffixed by `_id` + + * `:references` - Sets the key on the current schema to be used for the + association, defaults to the primary key on the schema + + * `:through` - Allow this association to be defined in terms of existing + associations. Read the section on `:through` associations for more info + + * `:on_delete` - The action taken on associations when parent record + is deleted. May be `:nothing` (default), `:nilify_all` and `:delete_all`. + Using this option is DISCOURAGED for most relational databases. Instead, + in your migration, set `references(:parent_id, on_delete: :delete_all)`. + Opposite to the migration option, this option cannot guarantee integrity + and it is only triggered for `c:Ecto.Repo.delete/2` (and not on + `c:Ecto.Repo.delete_all/2`) and it never cascades. If posts has many comments, + which has many tags, and you delete a post, only comments will be deleted. + If your database does not support references, cascading can be manually + implemented by using `Ecto.Multi` or `Ecto.Changeset.prepare_changes/2`. + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:nilify`, `:delete` or + `:delete_if_exists`. See `Ecto.Changeset`'s section about `:on_replace` for + more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or a atom/`{module, function, args}` that receives the struct and the owner as + arguments. For example, if you set `Post.has_many :comments, defaults: [public: true]`, + then when using `Ecto.build_assoc(post, :comments)`, the comment will have + `comment.public == true`. Alternatively, you can set it to + `Post.has_many :comments, defaults: :update_comment`, which will invoke + `Post.update_comment(comment, post)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(comment, post, arg3, arg4)` + + * `:where` - A filter for the association. See "Filtering associations" below. + It does not apply to `:through` associations. + + * `:preload_order` - Sets the default `order_by` of the association. + It is used when the association is preloaded. + For example, if you set `Post.has_many :comments, preload_order: [asc: :content]`, + whenever the `:comments` associations is preloaded, + the comments will be order by the `:content` field. + See `Ecto.Query.order_by/3` for more examples. + + ## Examples + + defmodule Post do + use Ecto.Schema + schema "posts" do + has_many :comments, Comment + end + end + + # Get all comments for a given post + post = Repo.get(Post, 42) + comments = Repo.all assoc(post, :comments) + + # The comments can come preloaded on the post struct + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments)) + post.comments #=> [%Comment{...}, ...] + + `has_many` can be used to define hierarchical relationships within a single + schema, for example threaded comments. + + defmodule Comment do + use Ecto.Schema + schema "comments" do + field :content, :string + field :parent_id, :integer + belongs_to :parent, Comment, foreign_key: :parent_id, references: :id, define_field: false + has_many :children, Comment, foreign_key: :parent_id, references: :id + end + end + + ## Filtering associations + + It is possible to specify a `:where` option that will filter the records + returned by the association. Querying, joining or preloading the association + will use the given conditions as shown next: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :public_comments, Comment, + where: [public: true] + end + end + + The `:where` option expects a keyword list where the key is an atom + representing the field and the value is either: + + * `nil` - which specifies the field must be nil + * `{:not, nil}` - which specifies the field must not be nil + * `{:in, list}` - which specifies the field must be one of the values in a list + * `{:fragment, expr}` - which specifies a fragment string as the filter + (see `Ecto.Query.API.fragment/1`) with the field's value given to it + as the only argument + * or any other value which the field is compared directly against + + Note the values above are distinctly different from the values you + would pass to `where` when building a query. For example, if you + attempt to build a query such as + + from Post, where: [id: nil] + + it will emit an error. This is because queries can be built dynamically, + and therefore passing `nil` can lead to security errors. However, the + `:where` values for an association are given at compile-time, which is + less dynamic and cannot leverage the full power of Ecto queries, which + explains why they have different APIs. + + **Important!** Please use this feature only when strictly necessary, + otherwise it is very easy to end-up with large schemas with dozens of + different associations polluting your schema and affecting your + application performance. For instance, if you are using associations + only for different querying purposes, then it is preferable to build + and compose queries. For instance, instead of having two associations, + one for comments and another for deleted comments, you might have + a single comments association and filter it instead: + + posts + |> Ecto.assoc(:comments) + |> Comment.deleted() + + Or when preloading: + + from posts, preload: [comments: ^Comment.deleted()] + + ## has_many/has_one :through + + Ecto also supports defining associations in terms of other associations + via the `:through` option. Let's see an example: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :comments, Comment + has_one :permalink, Permalink + + # In the has_many :through example below, the `:comments` + # in the list [:comments, :author] refers to the + # `has_many :comments` in the Post own schema and the + # `:author` refers to the `belongs_to :author` of the + # Comment's schema (the module below). + # (see the description below for more details) + has_many :comments_authors, through: [:comments, :author] + + # Specify the association with custom source + has_many :tags, {"posts_tags", Tag} + end + end + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + belongs_to :author, Author + belongs_to :post, Post + has_one :post_permalink, through: [:post, :permalink] + end + end + + In the example above, we have defined a `has_many :through` association + named `:comments_authors`. A `:through` association always expects a list + and the first element of the list must be a previously defined association + in the current module. For example, `:comments_authors` first points to + `:comments` in the same module (Post), which then points to `:author` in + the next schema, `Comment`. + + This `:through` association will return all authors for all comments + that belongs to that post: + + # Get all comments authors for a given post + post = Repo.get(Post, 42) + authors = Repo.all assoc(post, :comments_authors) + + `:through` associations can also be preloaded. In such cases, not only + the `:through` association is preloaded but all intermediate steps are + preloaded too: + + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :comments_authors)) + post.comments_authors #=> [%Author{...}, ...] + + # The comments for each post will be preloaded too + post.comments #=> [%Comment{...}, ...] + + # And the author for each comment too + hd(post.comments).author #=> %Author{...} + + When the `:through` association is expected to return one or zero items, + `has_one :through` should be used instead, as in the example at the beginning + of this section: + + # How we defined the association above + has_one :post_permalink, through: [:post, :permalink] + + # Get a preloaded comment + [comment] = Repo.all(Comment) |> Repo.preload(:post_permalink) + comment.post_permalink #=> %Permalink{...} + + Note `:through` associations are read-only. For example, you cannot use + `Ecto.Changeset.cast_assoc/3` to modify through associations. + """ + defmacro has_many(name, queryable, opts \\ []) do + queryable = expand_alias(queryable, __CALLER__) + quote do + Ecto.Schema.__has_many__(__MODULE__, unquote(name), unquote(queryable), unquote(opts)) + end + end + + @doc ~S""" + Indicates a one-to-one association with another schema. + + The current schema has zero or one records of the other schema. The other + schema often has a `belongs_to` field with the reverse association. + + ## Options + + * `:foreign_key` - Sets the foreign key, this should map to a field on the + other schema, defaults to the underscored name of the current module + suffixed by `_id` + + * `:references` - Sets the key on the current schema to be used for the + association, defaults to the primary key on the schema + + * `:through` - If this association must be defined in terms of existing + associations. Read the section in `has_many/3` for more information + + * `:on_delete` - The action taken on associations when parent record + is deleted. May be `:nothing` (default), `:nilify_all` and `:delete_all`. + Using this option is DISCOURAGED for most relational databases. Instead, + in your migration, set `references(:parent_id, on_delete: :delete_all)`. + Opposite to the migration option, this option cannot guarantee integrity + and it is only triggered for `c:Ecto.Repo.delete/2` (and not on + `c:Ecto.Repo.delete_all/2`) and it never cascades. If posts has many comments, + which has many tags, and you delete a post, only comments will be deleted. + If your database does not support references, cascading can be manually + implemented by using `Ecto.Multi` or `Ecto.Changeset.prepare_changes/2` + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:nilify`, `:update`, or + `:delete`. See `Ecto.Changeset`'s section on related data for more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or as a atom/`{module, function, args}` that receives the struct and the + owner as arguments. For example, if you set `Post.has_one :banner, defaults: [public: true]`, + then when using `Ecto.build_assoc(post, :banner)`, the banner will have + `banner.public == true`. Alternatively, you can set it to + `Post.has_one :banner, defaults: :update_banner`, which will invoke + `Post.update_banner(banner, post)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(banner, post, arg3, arg4)` + + * `:where` - A filter for the association. See "Filtering associations" + in `has_many/3`. It does not apply to `:through` associations. + + ## Examples + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_one :permalink, Permalink + + # Specify the association with custom source + has_one :category, {"posts_categories", Category} + end + end + + # The permalink can come preloaded on the post struct + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :permalink)) + post.permalink #=> %Permalink{...} + """ + defmacro has_one(name, queryable, opts \\ []) do + queryable = expand_alias(queryable, __CALLER__) + quote do + Ecto.Schema.__has_one__(__MODULE__, unquote(name), unquote(queryable), unquote(opts)) + end + end + + @doc ~S""" + Indicates a one-to-one or many-to-one association with another schema. + + The current schema belongs to zero or one records of the other schema. The other + schema often has a `has_one` or a `has_many` field with the reverse association. + + You should use `belongs_to` in the table that contains the foreign key. Imagine + a company <-> employee relationship. If the employee contains the `company_id` in + the underlying database table, we say the employee belongs to company. + + In fact, when you invoke this macro, a field with the name of foreign key is + automatically defined in the schema for you. + + ## Options + + * `:foreign_key` - Sets the foreign key field name, defaults to the name + of the association suffixed by `_id`. For example, `belongs_to :company` + will define foreign key of `:company_id`. The associated `has_one` or `has_many` + field in the other schema should also have its `:foreign_key` option set + with the same value. + + * `:references` - Sets the key on the other schema to be used for the + association, defaults to: `:id` + + * `:define_field` - When false, does not automatically define a `:foreign_key` + field, implying the user is defining the field manually elsewhere + + * `:type` - Sets the type of automatically defined `:foreign_key`. + Defaults to: `:integer` and can be set per schema via `@foreign_key_type` + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:nilify`, `:update`, or `:delete`. + See `Ecto.Changeset`'s section on related data for more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or a atom/`{module, function, args}` that receives the struct and the owner as + arguments. For example, if you set `Comment.belongs_to :post, defaults: [public: true]`, + then when using `Ecto.build_assoc(comment, :post)`, the post will have + `post.public == true`. Alternatively, you can set it to + `Comment.belongs_to :post, defaults: :update_post`, which will invoke + `Comment.update_post(post, comment)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(post, comment, arg3, arg4)` + + * `:primary_key` - If the underlying belongs_to field is a primary key + + * `:source` - Defines the name that is to be used in database for this field + + * `:where` - A filter for the association. See "Filtering associations" + in `has_many/3`. + + ## Examples + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + belongs_to :post, Post + end + end + + # The post can come preloaded on the comment record + [comment] = Repo.all(from(c in Comment, where: c.id == 42, preload: :post)) + comment.post #=> %Post{...} + + If you need custom options on the underlying field, you can define the + field explicitly and then pass `define_field: false` to `belongs_to`: + + defmodule Comment do + use Ecto.Schema + + schema "comments" do + field :post_id, :integer, ... # custom options + belongs_to :post, Post, define_field: false + end + end + + ## Polymorphic associations + + One common use case for belongs to associations is to handle + polymorphism. For example, imagine you have defined a Comment + schema and you wish to use it for commenting on both tasks and + posts. + + Some abstractions would force you to define some sort of + polymorphic association with two fields in your database: + + * commentable_type + * commentable_id + + The problem with this approach is that it breaks references in + the database. You can't use foreign keys and it is very inefficient, + both in terms of query time and storage. + + In Ecto, we have three ways to solve this issue. The simplest + is to define multiple fields in the Comment schema, one for each + association: + + * task_id + * post_id + + Unless you have dozens of columns, this is simpler for the developer, + more DB friendly and more efficient in all aspects. + + Alternatively, because Ecto does not tie a schema to a given table, + we can use separate tables for each association. Let's start over + and define a new Comment schema: + + defmodule Comment do + use Ecto.Schema + + schema "abstract table: comments" do + # This will be used by associations on each "concrete" table + field :assoc_id, :integer + end + end + + Notice we have changed the table name to "abstract table: comments". + You can choose whatever name you want, the point here is that this + particular table will never exist. + + Now in your Post and Task schemas: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + has_many :comments, {"posts_comments", Comment}, foreign_key: :assoc_id + end + end + + defmodule Task do + use Ecto.Schema + + schema "tasks" do + has_many :comments, {"tasks_comments", Comment}, foreign_key: :assoc_id + end + end + + Now each association uses its own specific table, "posts_comments" + and "tasks_comments", which must be created on migrations. The + advantage of this approach is that we never store unrelated data + together, also ensuring we keep database references fast and correct. + + When using this technique, the only limitation is that you cannot + build comments directly. For example, the command below + + Repo.insert!(%Comment{}) + + will attempt to use the abstract table. Instead, one should use + + Repo.insert!(build_assoc(post, :comments)) + + leveraging the `Ecto.build_assoc/3` function. You can also + use `Ecto.assoc/2` or pass a tuple in the query syntax + to easily retrieve associated comments to a given post or + task: + + # Fetch all comments associated with the given task + Repo.all(Ecto.assoc(task, :comments)) + + Or all comments in a given table: + + Repo.all from(c in {"posts_comments", Comment}), ...) + + The third and final option is to use `many_to_many/3` to + define the relationships between the resources. In this case, + the comments table won't have the foreign key, instead there + is an intermediary table responsible for associating the entries: + + defmodule Comment do + use Ecto.Schema + schema "comments" do + # ... + end + end + + In your posts and tasks: + + defmodule Post do + use Ecto.Schema + + schema "posts" do + many_to_many :comments, Comment, join_through: "posts_comments" + end + end + + defmodule Task do + use Ecto.Schema + + schema "tasks" do + many_to_many :comments, Comment, join_through: "tasks_comments" + end + end + + See `many_to_many/3` for more information on this particular approach. + """ + defmacro belongs_to(name, queryable, opts \\ []) do + queryable = expand_alias(queryable, __CALLER__) + quote do + Ecto.Schema.__belongs_to__(__MODULE__, unquote(name), unquote(queryable), unquote(opts)) + end + end + + @doc ~S""" + Indicates a many-to-many association with another schema. + + The association happens through a join schema or source, containing + foreign keys to the associated schemas. For example, the association + below: + + # from MyApp.Post + many_to_many :tags, MyApp.Tag, join_through: "posts_tags" + + is backed by relational databases through a join table as follows: + + [Post] <-> [posts_tags] <-> [Tag] + id <-- post_id + tag_id --> id + + More information on the migration for creating such a schema is shown + below. + + ## Options + + * `:join_through` - Specifies the source of the associated data. + It may be a string, like "posts_tags", representing the + underlying storage table or an atom, like `MyApp.PostTag`, + representing a schema. This option is required. + + * `:join_keys` - Specifies how the schemas are associated. It + expects a keyword list with two entries, the first being how + the join table should reach the current schema and the second + how the join table should reach the associated schema. In the + example above, it defaults to: `[post_id: :id, tag_id: :id]`. + The keys are inflected from the schema names. + + * `:on_delete` - The action taken on associations when the parent record + is deleted. May be `:nothing` (default) or `:delete_all`. + Using this option is DISCOURAGED for most relational databases. Instead, + in your migration, set `references(:parent_id, on_delete: :delete_all)`. + Opposite to the migration option, this option cannot guarantee integrity + and it is only triggered for `c:Ecto.Repo.delete/2` (and not on + `c:Ecto.Repo.delete_all/2`). This option can only remove data from the + join source, never the associated records, and it never cascades. + + * `:on_replace` - The action taken on associations when the record is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, or `:delete`. + `:delete` will only remove data from the join source, never the + associated records. See `Ecto.Changeset`'s section on related data + for more info. + + * `:defaults` - Default values to use when building the association. + It may be a keyword list of options that override the association schema + or a atom/`{module, function, args}` that receives the struct and the owner as + arguments. For example, if you set `Post.many_to_many :tags, defaults: [public: true]`, + then when using `Ecto.build_assoc(post, :tags)`, the tag will have + `tag.public == true`. Alternatively, you can set it to + `Post.many_to_many :tags, defaults: :update_tag`, which will invoke + `Post.update_tag(tag, post)`, or set it to a MFA tuple such as + `{Mod, fun, [arg3, arg4]}`, which will invoke `Mod.fun(tag, post, arg3, arg4)` + + * `:join_defaults` - The same as `:defaults` but it applies to the join schema + instead. This option will raise if it is given and the `:join_through` value + is not a schema. + + * `:unique` - When true, checks if the associated entries are unique + whenever the association is cast or changed via the parent record. + For instance, it would verify that a given tag cannot be attached to + the same post more than once. This exists mostly as a quick check + for user feedback, as it does not guarantee uniqueness at the database + level. Therefore, you should also set a unique index in the database + join table, such as: `create unique_index(:posts_tags, [:post_id, :tag_id])` + + * `:where` - A filter for the association. See "Filtering associations" + in `has_many/3` + + * `:join_where` - A filter for the join table. See "Filtering associations" + in `has_many/3` + + * `:preload_order` - Sets the default `order_by` of the association. + It is used when the association is preloaded. + For example, if you set `Post.many_to_many :tags, Tag, join_through: "posts_tags", preload_order: [asc: :foo]`, + whenever the `:tags` associations is preloaded, the tags will be order by the `:foo` field. + See `Ecto.Query.order_by/3` for more examples. + + ## Using Ecto.assoc/2 + + One of the benefits of using `many_to_many` is that Ecto will avoid + loading the intermediate whenever possible, making your queries more + efficient. For this reason, developers should not refer to the join + table of `many_to_many` in queries. The join table is accessible in + few occasions, such as in `Ecto.assoc/2`. For example, if you do this: + + post + |> Ecto.assoc(:tags) + |> where([t, _pt, p], p.public == t.public) + + It may not work as expected because the `posts_tags` table may not be + included in the query. You can address this problem in multiple ways. + One option is to use `...`: + + post + |> Ecto.assoc(:tags) + |> where([t, ..., p], p.public == t.public) + + Another and preferred option is to rewrite to an explicit `join`, which + leaves out the intermediate bindings as they are resolved only later on: + + # keyword syntax + from t in Tag, + join: p in assoc(t, :post), on: p.id == ^post.id + + # pipe syntax + Tag + |> join(:inner, [t], p in assoc(t, :post), on: p.id == ^post.id) + + If you need to access the join table, then you likely want to use + `has_many/3` with the `:through` option instead. + + ## Removing data + + If you attempt to remove associated `many_to_many` data, **Ecto will + always remove data from the join schema and never from the target + associations** be it by setting `:on_replace` to `:delete`, `:on_delete` + to `:delete_all` or by using changeset functions such as + `Ecto.Changeset.put_assoc/3`. For example, if a `Post` has a many to many + relationship with `Tag`, setting `:on_delete` to `:delete_all` will + only delete entries from the "posts_tags" table in case `Post` is + deleted. + + ## Migration + + How your migration should be structured depends on the value you pass + in `:join_through`. If `:join_through` is simply a string, representing + a table, you may define a table without primary keys and you must not + include any further columns, as those values won't be set by Ecto: + + create table(:posts_tags, primary_key: false) do + add :post_id, references(:posts) + add :tag_id, references(:tags) + end + + However, if your `:join_through` is a schema, like `MyApp.PostTag`, your + join table may be structured as any other table in your codebase, + including timestamps: + + create table(:posts_tags) do + add :post_id, references(:posts) + add :tag_id, references(:tags) + timestamps() + end + + Because `:join_through` contains a schema, in such cases, autogenerated + values and primary keys will be automatically handled by Ecto. + + ## Examples + + defmodule Post do + use Ecto.Schema + schema "posts" do + many_to_many :tags, Tag, join_through: "posts_tags" + end + end + + # Let's create a post and a tag + post = Repo.insert!(%Post{}) + tag = Repo.insert!(%Tag{name: "introduction"}) + + # We can associate at any time post and tags together using changesets + post + |> Repo.preload(:tags) # Load existing data + |> Ecto.Changeset.change() # Build the changeset + |> Ecto.Changeset.put_assoc(:tags, [tag]) # Set the association + |> Repo.update! + + # In a later moment, we may get all tags for a given post + post = Repo.get(Post, 42) + tags = Repo.all(assoc(post, :tags)) + + # The tags may also be preloaded on the post struct for reading + [post] = Repo.all(from(p in Post, where: p.id == 42, preload: :tags)) + post.tags #=> [%Tag{...}, ...] + + ## Join Schema Example + + You may prefer to use a join schema to handle many_to_many associations. The + decoupled nature of Ecto allows us to create a "join" struct which + `belongs_to` both sides of the many to many association. + + In our example, a `User` has and belongs to many `Organization`s: + + defmodule MyApp.Repo.Migrations.CreateUserOrganization do + use Ecto.Migration + + def change do + create table(:users_organizations) do + add :user_id, references(:users) + add :organization_id, references(:organizations) + + timestamps() + end + end + end + + defmodule UserOrganization do + use Ecto.Schema + + @primary_key false + schema "users_organizations" do + belongs_to :user, User + belongs_to :organization, Organization + timestamps() # Added bonus, a join schema will also allow you to set timestamps + end + + def changeset(struct, params \\ %{}) do + struct + |> Ecto.Changeset.cast(params, [:user_id, :organization_id]) + |> Ecto.Changeset.validate_required([:user_id, :organization_id]) + # Maybe do some counter caching here! + end + end + + defmodule User do + use Ecto.Schema + + schema "users" do + many_to_many :organizations, Organization, join_through: UserOrganization + end + end + + defmodule Organization do + use Ecto.Schema + + schema "organizations" do + many_to_many :users, User, join_through: UserOrganization + end + end + + # Then to create the association, pass in the ID's of an existing + # User and Organization to UserOrganization.changeset + changeset = UserOrganization.changeset(%UserOrganization{}, %{user_id: id, organization_id: id}) + + case Repo.insert(changeset) do + {:ok, assoc} -> # Assoc was created! + {:error, changeset} -> # Handle the error + end + """ + defmacro many_to_many(name, queryable, opts \\ []) do + queryable = expand_alias(queryable, __CALLER__) + opts = expand_alias_in_key(opts, :join_through, __CALLER__) + + quote do + Ecto.Schema.__many_to_many__(__MODULE__, unquote(name), unquote(queryable), unquote(opts)) + end + end + + ## Embeds + + @doc ~S""" + Indicates an embedding of a schema. + + The current schema has zero or one records of the other schema embedded + inside of it. It uses a field similar to the `:map` type for storage, + but allows embeds to have all the things regular schema can. + + You must declare your `embeds_one/3` field with type `:map` at the + database level. + + The embedded may or may not have a primary key. Ecto uses the primary keys + to detect if an embed is being updated or not. If a primary key is not present, + `:on_replace` should be set to either `:update` or `:delete` if there is a + desire to either update or delete the current embed when a new one is set. + + ## Options + + * `:on_replace` - The action taken on associations when the embed is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, `:update`, or `:delete`. + See `Ecto.Changeset`'s section on related data for more info. + + * `:source` - Defines the name that is to be used in database for this field. + This is useful when attaching to an existing database. The value should be + an atom. + + ## Examples + + defmodule Order do + use Ecto.Schema + + schema "orders" do + embeds_one :item, Item + end + end + + defmodule Item do + use Ecto.Schema + + embedded_schema do + field :title + end + end + + # The item is loaded with the order + order = Repo.get!(Order, 42) + order.item #=> %Item{...} + + Adding and removal of embeds can only be done via the `Ecto.Changeset` + API so Ecto can properly track the embed life-cycle: + + order = Repo.get!(Order, 42) + item = %Item{title: "Soap"} + + # Generate a changeset + changeset = Ecto.Changeset.change(order) + + # Put a new embed to the changeset + changeset = Ecto.Changeset.put_embed(changeset, :item, item) + + # Update the order, and fetch the item + item = Repo.update!(changeset).item + + # Item is generated with a unique identification + item + # => %Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"} + + ## Inline embedded schema + + The schema module can be defined inline in the parent schema in simple + cases: + + defmodule Parent do + use Ecto.Schema + + schema "parents" do + field :name, :string + + embeds_one :child, Child do + field :name, :string + field :age, :integer + end + end + end + + Options should be passed before the `do` block like this: + + embeds_one :child, Child, on_replace: :delete do + field :name, :string + field :age, :integer + end + + Primary keys are automatically set up for embedded schemas as well, + defaulting to `{:id, :binary_id, autogenerate: true}`. You can + customize it by passing a `:primary_key` option with the same arguments + as `@primary_key` (see the [Schema attributes](https://hexdocs.pm/ecto/Ecto.Schema.html#module-schema-attributes) + section for more info). + + Defining embedded schema in such a way will define a `Parent.Child` module + with the appropriate struct. In order to properly cast the embedded schema. + When casting the inline-defined embedded schemas you need to use the `:with` + option of `Ecto.Changeset.cast_embed/3` to provide the proper function to do the casting. + For example: + + def changeset(schema, params) do + schema + |> cast(params, [:name]) + |> cast_embed(:child, with: &child_changeset/2) + end + + defp child_changeset(schema, params) do + schema + |> cast(params, [:name, :age]) + end + + ## Encoding and decoding + + Because many databases do not support direct encoding and decoding + of embeds, it is often emulated by Ecto by using specific encoding + and decoding rules. + + For example, PostgreSQL will store embeds on top of JSONB columns, + which means types in embedded schemas won't go through the usual + dump->DB->load cycle but rather encode->DB->decode->cast. This means + that, when using embedded schemas with databases like PG or MySQL, + make sure all of your types can be JSON encoded/decoded correctly. + Ecto provides this guarantee for all built-in types. + """ + defmacro embeds_one(name, schema, opts \\ []) + + defmacro embeds_one(name, schema, do: block) do + quote do + embeds_one(unquote(name), unquote(schema), [], do: unquote(block)) + end + end + + defmacro embeds_one(name, schema, opts) do + schema = expand_alias(schema, __CALLER__) + quote do + Ecto.Schema.__embeds_one__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + @doc """ + Indicates an embedding of a schema. + + For options and examples see documentation of `embeds_one/3`. + """ + defmacro embeds_one(name, schema, opts, do: block) do + quote do + {schema, opts} = Ecto.Schema.__embeds_module__(__ENV__, unquote(schema), unquote(opts), unquote(Macro.escape(block))) + Ecto.Schema.__embeds_one__(__MODULE__, unquote(name), schema, opts) + end + end + + @doc ~S""" + Indicates an embedding of many schemas. + + The current schema has zero or more records of the other schema embedded + inside of it. Embeds have all the things regular schemas have. + + It is recommended to declare your `embeds_many/3` field with type `:map` + in your migrations, instead of using `{:array, :map}`. Ecto can work with + both maps and arrays as the container for embeds (and in most databases + maps are represented as JSON which allows Ecto to choose what works best). + + The embedded may or may not have a primary key. Ecto uses the primary keys + to detect if an embed is being updated or not. If a primary is not present + and you still want the list of embeds to be updated, `:on_replace` must be + set to `:delete`, forcing all current embeds to be deleted and replaced by + new ones whenever a new list of embeds is set. + + For encoding and decoding of embeds, please read the docs for + `embeds_one/3`. + + ## Options + + * `:on_replace` - The action taken on associations when the embed is + replaced when casting or manipulating parent changeset. May be + `:raise` (default), `:mark_as_invalid`, or `:delete`. + See `Ecto.Changeset`'s section on related data for more info. + + * `:source` - Defines the name that is to be used in database for this field. + This is useful when attaching to an existing database. The value should be + an atom. + + ## Examples + + defmodule Order do + use Ecto.Schema + + schema "orders" do + embeds_many :items, Item + end + end + + defmodule Item do + use Ecto.Schema + + embedded_schema do + field :title + end + end + + # The items are loaded with the order + order = Repo.get!(Order, 42) + order.items #=> [%Item{...}, ...] + + Adding and removal of embeds can only be done via the `Ecto.Changeset` + API so Ecto can properly track the embed life-cycle: + + # Order has no items + order = Repo.get!(Order, 42) + order.items + # => [] + + items = [%Item{title: "Soap"}] + + # Generate a changeset + changeset = Ecto.Changeset.change(order) + + # Put a one or more new items + changeset = Ecto.Changeset.put_embed(changeset, :items, items) + + # Update the order and fetch items + items = Repo.update!(changeset).items + + # Items are generated with a unique identification + items + # => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"}] + + Updating of embeds must be done using a changeset for each changed embed. + + # Order has an existing items + order = Repo.get!(Order, 42) + order.items + # => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Soap"}] + + # Generate a changeset + changeset = Ecto.Changeset.change(order) + + # Put the updated item as a changeset + current_item = List.first(order.items) + item_changeset = Ecto.Changeset.change(current_item, title: "Mujju's Soap") + order_changeset = Ecto.Changeset.put_embed(changeset, :items, [item_changeset]) + + # Update the order and fetch items + items = Repo.update!(order_changeset).items + + # Item has the updated title + items + # => [%Item{id: "20a97d94-f79b-4e63-a875-85deed7719b7", title: "Mujju's Soap"}] + + ## Inline embedded schema + + The schema module can be defined inline in the parent schema in simple + cases: + + defmodule Parent do + use Ecto.Schema + + schema "parents" do + field :name, :string + + embeds_many :children, Child do + field :name, :string + field :age, :integer + end + end + end + + Primary keys are automatically set up for embedded schemas as well, + defaulting to `{:id, :binary_id, autogenerate: true}`. You can + customize it by passing a `:primary_key` option with the same arguments + as `@primary_key` (see the [Schema attributes](https://hexdocs.pm/ecto/Ecto.Schema.html#module-schema-attributes) + section for more info). + + Defining embedded schema in such a way will define a `Parent.Child` module + with the appropriate struct. In order to properly cast the embedded schema. + When casting the inline-defined embedded schemas you need to use the `:with` + option of `cast_embed/3` to provide the proper function to do the casting. + For example: + + def changeset(schema, params) do + schema + |> cast(params, [:name]) + |> cast_embed(:children, with: &child_changeset/2) + end + + defp child_changeset(schema, params) do + schema + |> cast(params, [:name, :age]) + end + + """ + defmacro embeds_many(name, schema, opts \\ []) + + defmacro embeds_many(name, schema, do: block) do + quote do + embeds_many(unquote(name), unquote(schema), [], do: unquote(block)) + end + end + + defmacro embeds_many(name, schema, opts) do + schema = expand_alias(schema, __CALLER__) + quote do + Ecto.Schema.__embeds_many__(__MODULE__, unquote(name), unquote(schema), unquote(opts)) + end + end + + @doc """ + Indicates an embedding of many schemas. + + For options and examples see documentation of `embeds_many/3`. + """ + defmacro embeds_many(name, schema, opts, do: block) do + quote do + {schema, opts} = Ecto.Schema.__embeds_module__(__ENV__, unquote(schema), unquote(opts), unquote(Macro.escape(block))) + Ecto.Schema.__embeds_many__(__MODULE__, unquote(name), schema, opts) + end + end + + # Internal function for integrating associations into schemas. + # + # This function exists as an extension point for libraries to + # experiment new types of associations to Ecto, although it may + # break at any time (as with any of the association callbacks). + # + # This function expects the current schema, the association cardinality, + # the association name, the association module (that implements + # `Ecto.Association` callbacks) and a keyword list of options. + @doc false + @spec association(module, :one | :many, atom(), module, Keyword.t) :: Ecto.Association.t + def association(schema, cardinality, name, association, opts) do + not_loaded = %Ecto.Association.NotLoaded{ + __owner__: schema, + __field__: name, + __cardinality__: cardinality + } + + put_struct_field(schema, name, not_loaded) + opts = [cardinality: cardinality] ++ opts + struct = association.struct(schema, name, opts) + Module.put_attribute(schema, :ecto_assocs, {name, struct}) + struct + end + + ## Callbacks + + @doc false + def __timestamps__(:naive_datetime) do + %{NaiveDateTime.utc_now() | microsecond: {0, 0}} + end + + def __timestamps__(:naive_datetime_usec) do + NaiveDateTime.utc_now() + end + + def __timestamps__(:utc_datetime) do + %{DateTime.utc_now() | microsecond: {0, 0}} + end + + def __timestamps__(:utc_datetime_usec) do + DateTime.utc_now() + end + + def __timestamps__(type) do + type.from_unix!(System.os_time(:microsecond), :microsecond) + end + + @doc false + def __loaded__(module, struct_fields) do + case Map.new([{:__struct__, module} | struct_fields]) do + %{__meta__: meta} = struct -> %{struct | __meta__: Map.put(meta, :state, :loaded)} + struct -> struct + end + end + + @doc false + def __field__(mod, name, type, opts) do + # Check the field type before we check options because it is + # better to raise unknown type first than unsupported option. + type = check_field_type!(mod, name, type, opts) + + if type == :any && !opts[:virtual] do + raise ArgumentError, "only virtual fields can have type :any, " <> + "invalid type for field #{inspect name}" + end + + check_options!(type, opts, @field_opts, "field/3") + Module.put_attribute(mod, :ecto_changeset_fields, {name, type}) + validate_default!(type, opts[:default], opts[:skip_default_validation]) + define_field(mod, name, type, opts) + end + + defp define_field(mod, name, type, opts) do + virtual? = opts[:virtual] || false + pk? = opts[:primary_key] || false + put_struct_field(mod, name, Keyword.get(opts, :default)) + + if Keyword.get(opts, :redact, false) do + Module.put_attribute(mod, :ecto_redact_fields, name) + end + + if virtual? do + Module.put_attribute(mod, :ecto_virtual_fields, {name, type}) + else + source = opts[:source] || Module.get_attribute(mod, :field_source_mapper).(name) + + if not is_atom(source) do + raise ArgumentError, "the :source for field `#{name}` must be an atom, got: #{inspect(source)}" + end + + if name != source do + Module.put_attribute(mod, :ecto_field_sources, {name, source}) + end + + if raw = opts[:read_after_writes] do + Module.put_attribute(mod, :ecto_raw, name) + end + + case gen = opts[:autogenerate] do + {_, _, _} -> + store_mfa_autogenerate!(mod, name, type, gen) + + true -> + store_type_autogenerate!(mod, name, source || name, type, pk?) + + _ -> + :ok + end + + if raw && gen do + raise ArgumentError, "cannot mark the same field as autogenerate and read_after_writes" + end + + if pk? do + Module.put_attribute(mod, :ecto_primary_keys, name) + end + + if Keyword.get(opts, :load_in_query, true) do + Module.put_attribute(mod, :ecto_query_fields, {name, type}) + end + + Module.put_attribute(mod, :ecto_fields, {name, type}) + end + end + + @valid_has_options [:foreign_key, :references, :through, :on_delete, :defaults, :on_replace, :where, :preload_order] + + @doc false + def __has_many__(mod, name, queryable, opts) do + if is_list(queryable) and Keyword.has_key?(queryable, :through) do + check_options!(queryable, @valid_has_options, "has_many/3") + association(mod, :many, name, Ecto.Association.HasThrough, queryable) + else + check_options!(opts, @valid_has_options, "has_many/3") + struct = association(mod, :many, name, Ecto.Association.Has, [queryable: queryable] ++ opts) + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + end + + @doc false + def __has_one__(mod, name, queryable, opts) do + if is_list(queryable) and Keyword.has_key?(queryable, :through) do + check_options!(queryable, @valid_has_options, "has_one/3") + association(mod, :one, name, Ecto.Association.HasThrough, queryable) + else + check_options!(opts, @valid_has_options, "has_one/3") + struct = association(mod, :one, name, Ecto.Association.Has, [queryable: queryable] ++ opts) + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + end + + # :primary_key is valid here to support associative entity + # https://en.wikipedia.org/wiki/Associative_entity + @valid_belongs_to_options [:foreign_key, :references, :define_field, :type, + :on_replace, :defaults, :primary_key, :source, :where] + + @doc false + def __belongs_to__(mod, name, queryable, opts) do + opts = Keyword.put_new(opts, :foreign_key, :"#{name}_id") + + foreign_key_name = opts[:foreign_key] + foreign_key_type = opts[:type] || Module.get_attribute(mod, :foreign_key_type) + foreign_key_type = check_field_type!(mod, name, foreign_key_type, opts) + check_options!(foreign_key_type, opts, @valid_belongs_to_options, "belongs_to/3") + + if foreign_key_name == name do + raise ArgumentError, "foreign_key #{inspect name} must be distinct from corresponding association name" + end + + if Keyword.get(opts, :define_field, true) do + Module.put_attribute(mod, :ecto_changeset_fields, {foreign_key_name, foreign_key_type}) + define_field(mod, foreign_key_name, foreign_key_type, opts) + end + + struct = + association(mod, :one, name, Ecto.Association.BelongsTo, [queryable: queryable] ++ opts) + + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + + @valid_many_to_many_options [:join_through, :join_defaults, :join_keys, :on_delete, :defaults, :on_replace, :unique, :where, :join_where, :preload_order] + + @doc false + def __many_to_many__(mod, name, queryable, opts) do + check_options!(opts, @valid_many_to_many_options, "many_to_many/3") + + struct = + association(mod, :many, name, Ecto.Association.ManyToMany, [queryable: queryable] ++ opts) + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:assoc, struct}}) + end + + @valid_embeds_one_options [:strategy, :on_replace, :source] + + @doc false + def __embeds_one__(mod, name, schema, opts) do + check_options!(opts, @valid_embeds_one_options, "embeds_one/3") + embed(mod, :one, name, schema, opts) + end + + @valid_embeds_many_options [:strategy, :on_replace, :source] + + @doc false + def __embeds_many__(mod, name, schema, opts) do + check_options!(opts, @valid_embeds_many_options, "embeds_many/3") + opts = Keyword.put(opts, :default, []) + embed(mod, :many, name, schema, opts) + end + + @doc false + def __embeds_module__(env, name, opts, block) do + {pk, opts} = Keyword.pop(opts, :primary_key, {:id, :binary_id, autogenerate: true}) + + block = + quote do + use Ecto.Schema + + @primary_key unquote(Macro.escape(pk)) + embedded_schema do + unquote(block) + end + end + + module = Module.concat(env.module, name) + Module.create(module, block, env) + {module, opts} + end + + ## Quoted callbacks + + @doc false + def __after_compile__(%{module: module} = env, _) do + # If we are compiling code, we can validate associations now, + # as the Elixir compiler will solve dependencies. + # + # TODO: Use Code.can_await_module_compilation?/0 from Elixir v1.10+. + if Process.info(self(), :error_handler) == {:error_handler, Kernel.ErrorHandler} do + for name <- module.__schema__(:associations) do + assoc = module.__schema__(:association, name) + + case assoc.__struct__.after_compile_validation(assoc, env) do + :ok -> + :ok + + {:error, message} -> + IO.warn "invalid association `#{assoc.field}` in schema #{inspect module}: #{message}", + Macro.Env.stacktrace(env) + end + end + end + + :ok + end + + @doc false + def __schema__(fields, field_sources, assocs, embeds, virtual_fields) do + load = + for {name, type} <- fields do + if alias = field_sources[name] do + {name, {:source, alias, type}} + else + {name, type} + end + end + + dump = + for {name, type} <- fields do + {name, {field_sources[name] || name, type}} + end + + field_sources_quoted = + for {name, _type} <- fields do + {[:field_source, name], field_sources[name] || name} + end + + types_quoted = + for {name, type} <- fields do + {[:type, name], Macro.escape(type)} + end + + virtual_types_quoted = + for {name, type} <- virtual_fields do + {[:virtual_type, name], Macro.escape(type)} + end + + assoc_quoted = + for {name, refl} <- assocs do + {[:association, name], Macro.escape(refl)} + end + + assoc_names = Enum.map(assocs, &elem(&1, 0)) + + embed_quoted = + for {name, refl} <- embeds do + {[:embed, name], Macro.escape(refl)} + end + + embed_names = Enum.map(embeds, &elem(&1, 0)) + + single_arg = [ + {[:dump], dump |> Map.new() |> Macro.escape()}, + {[:load], load |> Macro.escape()}, + {[:associations], assoc_names}, + {[:embeds], embed_names} + ] + + catch_all = [ + {[:field_source, quote(do: _)], nil}, + {[:type, quote(do: _)], nil}, + {[:virtual_type, quote(do: _)], nil}, + {[:association, quote(do: _)], nil}, + {[:embed, quote(do: _)], nil} + ] + + [ + single_arg, + field_sources_quoted, + types_quoted, + virtual_types_quoted, + assoc_quoted, + embed_quoted, + catch_all + ] + end + + ## Private + + defp embed(mod, cardinality, name, schema, opts) do + opts = [cardinality: cardinality, related: schema, owner: mod, field: name] ++ opts + struct = Ecto.Embedded.init(opts) + + Module.put_attribute(mod, :ecto_changeset_fields, {name, {:embed, struct}}) + Module.put_attribute(mod, :ecto_embeds, {name, struct}) + define_field(mod, name, {:parameterized, Ecto.Embedded, struct}, opts) + end + + defp put_struct_field(mod, name, assoc) do + fields = Module.get_attribute(mod, :ecto_struct_fields) + + if List.keyfind(fields, name, 0) do + raise ArgumentError, "field/association #{inspect name} already exists on schema, you must either remove the duplication or choose a different name" + end + + Module.put_attribute(mod, :ecto_struct_fields, {name, assoc}) + end + + defp validate_default!(_type, _value, true), do: :ok + defp validate_default!(type, value, _skip) do + case Ecto.Type.dump(type, value) do + {:ok, _} -> + :ok + _ -> + raise ArgumentError, "value #{inspect(value)} is invalid for type #{inspect(type)}, can't set default" + end + end + + defp check_options!(opts, valid, fun_arity) do + case Enum.find(opts, fn {k, _} -> not(k in valid) end) do + {k, _} -> raise ArgumentError, "invalid option #{inspect k} for #{fun_arity}" + nil -> :ok + end + end + + defp check_options!({:parameterized, _, _}, _opts, _valid, _fun_arity) do + :ok + end + + defp check_options!({_, type}, opts, valid, fun_arity) do + check_options!(type, opts, valid, fun_arity) + end + + defp check_options!(_type, opts, valid, fun_arity) do + check_options!(opts, valid, fun_arity) + end + + defp check_field_type!(_mod, name, :datetime, _opts) do + raise ArgumentError, "invalid type :datetime for field #{inspect name}. " <> + "You probably meant to choose one between :naive_datetime " <> + "(no time zone information) or :utc_datetime (time zone is set to UTC)" + end + + defp check_field_type!(mod, name, type, opts) do + cond do + composite?(type, name) -> + {outer_type, inner_type} = type + {outer_type, check_field_type!(mod, name, inner_type, opts)} + + not is_atom(type) -> + raise ArgumentError, "invalid type #{inspect type} for field #{inspect name}" + + Ecto.Type.base?(type) -> + type + + Code.ensure_compiled(type) == {:module, type} -> + cond do + function_exported?(type, :type, 0) -> + type + + function_exported?(type, :type, 1) -> + Ecto.ParameterizedType.init(type, Keyword.merge(opts, field: name, schema: mod)) + + function_exported?(type, :__schema__, 1) -> + raise ArgumentError, + "schema #{inspect type} is not a valid type for field #{inspect name}." <> + " Did you mean to use belongs_to, has_one, has_many, embeds_one, or embeds_many instead?" + + true -> + raise ArgumentError, + "module #{inspect(type)} given as type for field #{inspect name} is not an Ecto.Type/Ecto.ParameterizedType" + end + + true -> + raise ArgumentError, "unknown type #{inspect type} for field #{inspect name}" + end + end + + defp composite?({composite, _} = type, name) do + if Ecto.Type.composite?(composite) do + true + else + raise ArgumentError, + "invalid or unknown composite #{inspect type} for field #{inspect name}. " <> + "Did you mean to use :array or :map as first element of the tuple instead?" + end + end + + defp composite?(_type, _name), do: false + + defp store_mfa_autogenerate!(mod, name, type, mfa) do + if autogenerate_id?(type) do + raise ArgumentError, ":autogenerate with {m, f, a} not supported by ID types" + end + + Module.put_attribute(mod, :ecto_autogenerate, {[name], mfa}) + end + + defp store_type_autogenerate!(mod, name, source, {:parameterized, typemod, params} = type, pk?) do + cond do + store_autogenerate_id!(mod, name, source, type, pk?) -> + :ok + + not function_exported?(typemod, :autogenerate, 1) -> + raise ArgumentError, "field #{inspect name} does not support :autogenerate because it uses a " <> + "parameterized type #{inspect type} that does not define autogenerate/1" + + true -> + Module.put_attribute(mod, :ecto_autogenerate, {[name], {typemod, :autogenerate, [params]}}) + end + end + + defp store_type_autogenerate!(mod, name, source, type, pk?) do + cond do + store_autogenerate_id!(mod, name, source, type, pk?) -> + :ok + + Ecto.Type.primitive?(type) -> + raise ArgumentError, "field #{inspect name} does not support :autogenerate because it uses a " <> + "primitive type #{inspect type}" + + # Note the custom type has already been loaded in check_type!/3 + not function_exported?(type, :autogenerate, 0) -> + raise ArgumentError, "field #{inspect name} does not support :autogenerate because it uses a " <> + "custom type #{inspect type} that does not define autogenerate/0" + + true -> + Module.put_attribute(mod, :ecto_autogenerate, {[name], {type, :autogenerate, []}}) + end + end + + defp store_autogenerate_id!(mod, name, source, type, pk?) do + cond do + not autogenerate_id?(type) -> + false + + not pk? -> + raise ArgumentError, "only primary keys allow :autogenerate for type #{inspect type}, " <> + "field #{inspect name} is not a primary key" + + Module.get_attribute(mod, :ecto_autogenerate_id) -> + raise ArgumentError, "only one primary key with ID type may be marked as autogenerated" + + true -> + Module.put_attribute(mod, :ecto_autogenerate_id, {name, source, type}) + true + end + end + + defp autogenerate_id?(type), do: Ecto.Type.type(type) in [:id, :binary_id] + + defp expand_alias({:__aliases__, _, _} = ast, env), + do: Macro.expand(ast, %{env | function: {:__schema__, 2}}) + defp expand_alias(ast, _env), + do: ast + + defp expand_alias_in_key(opts, key, env) do + if is_list(opts) and Keyword.has_key?(opts, key) do + Keyword.update!(opts, key, &expand_alias(&1, env)) + else + opts + end + end +end diff --git a/deps/ecto/lib/ecto/schema/loader.ex b/deps/ecto/lib/ecto/schema/loader.ex new file mode 100644 index 0000000..d36d797 --- /dev/null +++ b/deps/ecto/lib/ecto/schema/loader.ex @@ -0,0 +1,106 @@ +defmodule Ecto.Schema.Loader do + @moduledoc false + + alias Ecto.Schema.Metadata + + @doc """ + Loads a struct to be used as a template in further operations. + """ + def load_struct(nil, _prefix, _source), do: %{} + + def load_struct(schema, prefix, source) do + case schema.__schema__(:loaded) do + %{__meta__: %Metadata{prefix: ^prefix, source: ^source}} = struct -> + struct + + %{__meta__: %Metadata{} = metadata} = struct -> + Map.put(struct, :__meta__, %{metadata | source: source, prefix: prefix}) + + %{} = struct -> + struct + end + end + + @doc """ + Loads data coming from the user/embeds into schema. + + Assumes data does not all belongs to schema/struct + and that it may also require source-based renaming. + """ + def unsafe_load(schema, data, loader) do + types = schema.__schema__(:load) + struct = schema.__schema__(:loaded) + unsafe_load(struct, types, data, loader) + end + + @doc """ + Loads data coming from the user/embeds into struct and types. + + Assumes data does not all belongs to schema/struct + and that it may also require source-based renaming. + """ + def unsafe_load(struct, types, map, loader) when is_map(map) do + Enum.reduce(types, struct, fn pair, acc -> + {field, source, type} = field_source_and_type(pair) + + case fetch_string_or_atom_field(map, source) do + {:ok, value} -> Map.put(acc, field, load!(struct, field, type, value, loader)) + :error -> acc + end + end) + end + + @compile {:inline, field_source_and_type: 1, fetch_string_or_atom_field: 2} + defp field_source_and_type({field, {:source, source, type}}) do + {field, source, type} + end + + defp field_source_and_type({field, type}) do + {field, field, type} + end + + defp fetch_string_or_atom_field(map, field) when is_atom(field) do + case Map.fetch(map, Atom.to_string(field)) do + {:ok, value} -> {:ok, value} + :error -> Map.fetch(map, field) + end + end + + @compile {:inline, load!: 5} + defp load!(struct, field, type, value, loader) do + case loader.(type, value) do + {:ok, value} -> + value + + :error -> + raise ArgumentError, + "cannot load `#{inspect(value)}` as type #{inspect(type)} " <> + "for field `#{field}`#{error_data(struct)}" + end + end + + defp error_data(%{__struct__: atom}) do + " in schema #{inspect(atom)}" + end + + defp error_data(other) when is_map(other) do + "" + end + + @doc """ + Dumps the given data. + """ + def safe_dump(struct, types, dumper) do + Enum.reduce(types, %{}, fn {field, {source, type}}, acc -> + value = Map.get(struct, field) + + case dumper.(type, value) do + {:ok, value} -> + Map.put(acc, source, value) + :error -> + raise ArgumentError, "cannot dump `#{inspect value}` as type #{inspect type} " <> + "for field `#{field}` in schema #{inspect struct.__struct__}" + end + end) + end +end diff --git a/deps/ecto/lib/ecto/schema/metadata.ex b/deps/ecto/lib/ecto/schema/metadata.ex new file mode 100644 index 0000000..cfbec0a --- /dev/null +++ b/deps/ecto/lib/ecto/schema/metadata.ex @@ -0,0 +1,66 @@ +defmodule Ecto.Schema.Metadata do + @moduledoc """ + Stores metadata of a struct. + + ## State + + The state of the schema is stored in the `:state` field and allows + following values: + + * `:built` - the struct was constructed in memory and is not persisted + to database yet; + * `:loaded` - the struct was loaded from database and represents + persisted data; + * `:deleted` - the struct was deleted and no longer represents persisted + data. + + ## Source + + The `:source` tracks the (table or collection) where the struct is or should + be persisted to. + + ## Prefix + + Tracks the source prefix in the data storage. + + ## Context + + The `:context` field represents additional state some databases require + for proper updates of data. It is not used by the built-in adapters of + `Ecto.Adapters.Postgres` and `Ecto.Adapters.MySQL`. + + ## Schema + + The `:schema` field refers the module name for the schema this metadata belongs to. + """ + defstruct [:state, :source, :context, :schema, :prefix] + + @type state :: :built | :loaded | :deleted + + @type context :: any + + @type t(schema) :: %__MODULE__{ + context: context, + prefix: Ecto.Schema.prefix(), + schema: schema, + source: Ecto.Schema.source(), + state: state + } + + @type t :: t(module) + + defimpl Inspect do + import Inspect.Algebra + + def inspect(metadata, opts) do + %{source: source, prefix: prefix, state: state, context: context} = metadata + + entries = + for entry <- [state, prefix, source, context], + entry != nil, + do: to_doc(entry, opts) + + concat(["#Ecto.Schema.Metadata<"] ++ Enum.intersperse(entries, ", ") ++ [">"]) + end + end +end diff --git a/deps/ecto/lib/ecto/type.ex b/deps/ecto/lib/ecto/type.ex new file mode 100644 index 0000000..796f6dc --- /dev/null +++ b/deps/ecto/lib/ecto/type.ex @@ -0,0 +1,1383 @@ +defmodule Ecto.Type do + @moduledoc """ + Defines functions and the `Ecto.Type` behaviour for implementing + basic custom types. + + Ecto provides two types of custom types: basic types and + parameterized types. Basic types are simple, requiring only four + callbacks to be implemented, and are enough for most occasions. + Parameterized types can be customized on the field definition and + provide a wide variety of callbacks. + + The definition of basic custom types and all of their callbacks are + available in this module. You can learn more about parameterized + types in `Ecto.ParameterizedType`. If in doubt, prefer to use + basic custom types and rely on parameterized types if you need + the extra functionality. + + ## Example + + Imagine you want to store a URI struct as part of a schema in a + url-shortening service. There isn't an Ecto field type to support + that value at runtime therefore a custom one is needed. + + You also want to query not only by the full url, but for example + by specific ports used. This is possible by putting the URI data + into a map field instead of just storing the plain + string representation. + + from s in ShortUrl, + where: fragment("?->>? ILIKE ?", s.original_url, "port", "443") + + So the custom type does need to handle the conversion from + external data to runtime data (`c:cast/1`) as well as + transforming that runtime data into the `:map` Ecto native type and + back (`c:dump/1` and `c:load/1`). + + defmodule EctoURI do + use Ecto.Type + def type, do: :map + + # Provide custom casting rules. + # Cast strings into the URI struct to be used at runtime + def cast(uri) when is_binary(uri) do + {:ok, URI.parse(uri)} + end + + # Accept casting of URI structs as well + def cast(%URI{} = uri), do: {:ok, uri} + + # Everything else is a failure though + def cast(_), do: :error + + # When loading data from the database, as long as it's a map, + # we just put the data back into a URI struct to be stored in + # the loaded schema struct. + def load(data) when is_map(data) do + data = + for {key, val} <- data do + {String.to_existing_atom(key), val} + end + {:ok, struct!(URI, data)} + end + + # When dumping data to the database, we *expect* a URI struct + # but any value could be inserted into the schema struct at runtime, + # so we need to guard against them. + def dump(%URI{} = uri), do: {:ok, Map.from_struct(uri)} + def dump(_), do: :error + end + + Now we can use our new field type above in our schemas: + + defmodule ShortUrl do + use Ecto.Schema + + schema "posts" do + field :original_url, EctoURI + end + end + + Note: `nil` values are always bypassed and cannot be handled by + custom types. + + ## Custom types and primary keys + + Remember that, if you change the type of your primary keys, + you will also need to change the type of all associations that + point to said primary key. + + Imagine you want to encode the ID so they cannot enumerate the + content in your application. An Ecto type could handle the conversion + between the encoded version of the id and its representation in the + database. For the sake of simplicity, we'll use base64 encoding in + this example: + + defmodule EncodedId do + use Ecto.Type + + def type, do: :id + + def cast(id) when is_integer(id) do + {:ok, encode_id(id)} + end + def cast(_), do: :error + + def dump(id) when is_binary(id) do + Base.decode64(id) + end + + def load(id) when is_integer(id) do + {:ok, encode_id(id)} + end + + defp encode_id(id) do + id + |> Integer.to_string() + |> Base.encode64 + end + end + + To use it as the type for the id in our schema, we can use the + `@primary_key` module attribute: + + defmodule BlogPost do + use Ecto.Schema + + @primary_key {:id, EncodedId, autogenerate: true} + schema "posts" do + belongs_to :author, Author, type: EncodedId + field :content, :string + end + end + + defmodule Author do + use Ecto.Schema + + @primary_key {:id, EncodedId, autogenerate: true} + schema "authors" do + field :name, :string + has_many :posts, BlogPost + end + end + + The `@primary_key` attribute will tell ecto which type to + use for the id. + + Note the `type: EncodedId` option given to `belongs_to` in + the `BlogPost` schema. By default, Ecto will treat + associations as if their keys were `:integer`s. Our primary + keys are a custom type, so when Ecto tries to cast those + ids, it will fail. + + Alternatively, you can set `@foreign_key_type EncodedId` + after `@primary_key` to automatically configure the type + of all `belongs_to` fields. + """ + + import Kernel, except: [match?: 2] + + @doc false + defmacro __using__(_opts) do + quote location: :keep do + @behaviour Ecto.Type + def embed_as(_), do: :self + def equal?(term1, term2), do: term1 == term2 + defoverridable [embed_as: 1, equal?: 2] + end + end + + @typedoc "An Ecto type, primitive or custom." + @type t :: primitive | custom + + @typedoc "Primitive Ecto types (handled by Ecto)." + @type primitive :: base | composite + + @typedoc "Custom types are represented by user-defined modules." + @type custom :: module | {:parameterized, module, term} + + @type base :: :integer | :float | :boolean | :string | :map | + :binary | :decimal | :id | :binary_id | + :utc_datetime | :naive_datetime | :date | :time | :any | + :utc_datetime_usec | :naive_datetime_usec | :time_usec + + @type composite :: {:array, t} | {:map, t} | private_composite + + @typep private_composite :: {:maybe, t} | {:in, t} | {:param, :any_datetime} + + @base ~w( + integer float decimal boolean string map binary id binary_id any + utc_datetime naive_datetime date time + utc_datetime_usec naive_datetime_usec time_usec + )a + @composite ~w(array map maybe in param)a + + @doc """ + Returns the underlying schema type for the custom type. + + For example, if you want to provide your own date + structures, the type function should return `:date`. + + Note this function is not required to return Ecto primitive + types, the type is only required to be known by the adapter. + """ + @callback type :: t + + @doc """ + Casts the given input to the custom type. + + This callback is called on external input and can return any type, + as long as the `dump/1` function is able to convert the returned + value into an Ecto native type. There are two situations where + this callback is called: + + 1. When casting values by `Ecto.Changeset` + 2. When passing arguments to `Ecto.Query` + + You can return `:error` if the given term cannot be cast. + A default error message of "is invalid" will be added to the + changeset. + + You may also return `{:error, keyword()}` to customize the + changeset error message and its metadata. Passing a `:message` + key, will override the default message. It is not possible to + override the `:type` key. + + For `{:array, CustomType}` or `{:map, CustomType}` the returned + keyword list will be erased and the default error will be shown. + """ + @callback cast(term) :: {:ok, term} | :error | {:error, keyword()} + + @doc """ + Loads the given term into a custom type. + + This callback is called when loading data from the database and + receives an Ecto native type. It can return any type, as long as + the `dump/1` function is able to convert the returned value back + into an Ecto native type. + """ + @callback load(term) :: {:ok, term} | :error + + @doc """ + Dumps the given term into an Ecto native type. + + This callback is called with any term that was stored in the struct + and it needs to validate them and convert it to an Ecto native type. + """ + @callback dump(term) :: {:ok, term} | :error + + @doc """ + Checks if two terms are semantically equal. + """ + @callback equal?(term, term) :: boolean + + @doc """ + Dictates how the type should be treated inside embeds. + + By default, the type is sent as itself, without calling + dumping to keep the higher level representation. But + it can be set to `:dump` so that it is dumped before + being encoded. + """ + @callback embed_as(format :: atom) :: :self | :dump + + @doc """ + Generates a loaded version of the data. + + This is callback is invoked when a custom type is given + to `field` with the `:autogenerate` flag. + """ + @callback autogenerate() :: term() + + @optional_callbacks autogenerate: 0 + + ## Functions + + @doc """ + Checks if we have a primitive type. + + iex> primitive?(:string) + true + iex> primitive?(Another) + false + + iex> primitive?({:array, :string}) + true + iex> primitive?({:array, Another}) + true + + """ + @spec primitive?(t) :: boolean + def primitive?({:parameterized, _, _}), do: true + def primitive?({composite, _}) when composite in @composite, do: true + def primitive?(base) when base in @base, do: true + def primitive?(_), do: false + + @doc """ + Checks if the given atom can be used as composite type. + + iex> composite?(:array) + true + iex> composite?(:string) + false + + """ + @spec composite?(atom) :: boolean + def composite?(atom), do: atom in @composite + + @doc """ + Checks if the given atom can be used as base type. + + iex> base?(:string) + true + iex> base?(:array) + false + iex> base?(Custom) + false + + """ + @spec base?(atom) :: boolean + def base?(atom), do: atom in @base + + @doc """ + Gets how the type is treated inside embeds for the given format. + + See `c:embed_as/1`. + """ + def embed_as({:parameterized, module, params}, format), do: module.embed_as(format, params) + def embed_as({composite, type}, format) when composite in @composite, do: embed_as(type, format) + def embed_as(base, _format) when base in @base, do: :self + def embed_as(mod, format), do: mod.embed_as(format) + + @doc """ + Dumps the `value` for `type` considering it will be embedded in `format`. + + ## Examples + + iex> Ecto.Type.embedded_dump(:decimal, Decimal.new("1"), :json) + {:ok, Decimal.new("1")} + + """ + def embedded_dump(type, value, format) do + case embed_as(type, format) do + :self -> {:ok, value} + :dump -> dump(type, value, &embedded_dump(&1, &2, format)) + end + end + + @doc """ + Loads the `value` for `type` considering it was embedded in `format`. + + ## Examples + + iex> Ecto.Type.embedded_load(:decimal, "1", :json) + {:ok, Decimal.new("1")} + + """ + def embedded_load(type, value, format) do + case embed_as(type, format) do + :self -> + case cast(type, value) do + {:ok, _} = ok -> ok + _ -> :error + end + + :dump -> + load(type, value, &embedded_load(&1, &2, format)) + end + end + + @doc """ + Retrieves the underlying schema type for the given, possibly custom, type. + + iex> type(:string) + :string + iex> type(Ecto.UUID) + :uuid + + iex> type({:array, :string}) + {:array, :string} + iex> type({:array, Ecto.UUID}) + {:array, :uuid} + + iex> type({:map, Ecto.UUID}) + {:map, :uuid} + + """ + @spec type(t) :: t + def type(type) + def type({:parameterized, type, params}), do: type.type(params) + def type({:array, type}), do: {:array, type(type)} + def type({:map, type}), do: {:map, type(type)} + def type(type) when type in @base, do: type + def type(type) when is_atom(type), do: type.type() + def type(type), do: type + + @doc """ + Checks if a given type matches with a primitive type + that can be found in queries. + + iex> match?(:string, :any) + true + iex> match?(:any, :string) + true + iex> match?(:string, :string) + true + + iex> match?({:array, :string}, {:array, :any}) + true + + iex> match?(Ecto.UUID, :uuid) + true + iex> match?(Ecto.UUID, :string) + false + + """ + @spec match?(t, primitive) :: boolean + def match?(schema_type, query_type) do + if primitive?(schema_type) do + do_match?(schema_type, query_type) + else + do_match?(schema_type.type, query_type) + end + end + + defp do_match?(_left, :any), do: true + defp do_match?(:any, _right), do: true + defp do_match?({outer, left}, {outer, right}), do: match?(left, right) + defp do_match?(:decimal, type) when type in [:float, :integer], do: true + defp do_match?(:binary_id, :binary), do: true + defp do_match?(:id, :integer), do: true + defp do_match?(type, type), do: true + defp do_match?(:naive_datetime, {:param, :any_datetime}), do: true + defp do_match?(:naive_datetime_usec, {:param, :any_datetime}), do: true + defp do_match?(:utc_datetime, {:param, :any_datetime}), do: true + defp do_match?(:utc_datetime_usec, {:param, :any_datetime}), do: true + defp do_match?(_, _), do: false + + @doc """ + Dumps a value to the given type. + + Opposite to casting, dumping requires the returned value + to be a valid Ecto type, as it will be sent to the + underlying data store. + + iex> dump(:string, nil) + {:ok, nil} + iex> dump(:string, "foo") + {:ok, "foo"} + + iex> dump(:integer, 1) + {:ok, 1} + iex> dump(:integer, "10") + :error + + iex> dump(:binary, "foo") + {:ok, "foo"} + iex> dump(:binary, 1) + :error + + iex> dump({:array, :integer}, [1, 2, 3]) + {:ok, [1, 2, 3]} + iex> dump({:array, :integer}, [1, "2", 3]) + :error + iex> dump({:array, :binary}, ["1", "2", "3"]) + {:ok, ["1", "2", "3"]} + + """ + @spec dump(t, term) :: {:ok, term} | :error + @spec dump(t, term, (t, term -> {:ok, term} | :error)) :: {:ok, term} | :error + def dump(type, value, dumper \\ &dump/2) + + def dump({:parameterized, module, params}, value, dumper) do + module.dump(value, dumper, params) + end + + def dump(_type, nil, _dumper) do + {:ok, nil} + end + + def dump({:maybe, type}, value, dumper) do + case dump(type, value, dumper) do + {:ok, _} = ok -> ok + :error -> {:ok, value} + end + end + + def dump({:in, type}, value, dumper) do + case dump({:array, type}, value, dumper) do + {:ok, value} -> {:ok, {:in, value}} + :error -> :error + end + end + + def dump({:array, {_, _, _} = type}, value, dumper), do: array(value, type, dumper, false, []) + def dump({:array, type}, value, dumper), do: array(value, type, dumper, true, []) + def dump({:map, type}, value, dumper), do: map(value, type, dumper, false, %{}) + + def dump(:any, value, _dumper), do: {:ok, value} + def dump(:integer, value, _dumper), do: same_integer(value) + def dump(:float, value, _dumper), do: dump_float(value) + def dump(:boolean, value, _dumper), do: same_boolean(value) + def dump(:map, value, _dumper), do: same_map(value) + def dump(:string, value, _dumper), do: same_binary(value) + def dump(:binary, value, _dumper), do: same_binary(value) + def dump(:id, value, _dumper), do: same_integer(value) + def dump(:binary_id, value, _dumper), do: same_binary(value) + def dump(:decimal, value, _dumper), do: same_decimal(value) + def dump(:date, value, _dumper), do: same_date(value) + def dump(:time, value, _dumper), do: dump_time(value) + def dump(:time_usec, value, _dumper), do: dump_time_usec(value) + def dump(:naive_datetime, value, _dumper), do: dump_naive_datetime(value) + def dump(:naive_datetime_usec, value, _dumper), do: dump_naive_datetime_usec(value) + def dump(:utc_datetime, value, _dumper), do: dump_utc_datetime(value) + def dump(:utc_datetime_usec, value, _dumper), do: dump_utc_datetime_usec(value) + def dump({:param, :any_datetime}, value, _dumper), do: dump_any_datetime(value) + def dump(mod, value, _dumper) when is_atom(mod), do: mod.dump(value) + + defp dump_float(term) when is_float(term), do: {:ok, term} + defp dump_float(_), do: :error + + defp dump_time(%Time{} = term), do: {:ok, check_no_usec!(term, :time)} + defp dump_time(_), do: :error + + defp dump_time_usec(%Time{} = term), do: {:ok, check_usec!(term, :time_usec)} + defp dump_time_usec(_), do: :error + + defp dump_any_datetime(%NaiveDateTime{} = term), do: {:ok, term} + defp dump_any_datetime(%DateTime{} = term), do: {:ok, term} + defp dump_any_datetime(_), do: :error + + defp dump_naive_datetime(%NaiveDateTime{} = term), do: + {:ok, check_no_usec!(term, :naive_datetime)} + + defp dump_naive_datetime(_), do: :error + + defp dump_naive_datetime_usec(%NaiveDateTime{} = term), + do: {:ok, check_usec!(term, :naive_datetime_usec)} + + defp dump_naive_datetime_usec(_), do: :error + + defp dump_utc_datetime(%DateTime{} = datetime) do + kind = :utc_datetime + {:ok, datetime |> check_utc_timezone!(kind) |> check_no_usec!(kind)} + end + + defp dump_utc_datetime(_), do: :error + + defp dump_utc_datetime_usec(%DateTime{} = datetime) do + kind = :utc_datetime_usec + {:ok, datetime |> check_utc_timezone!(kind) |> check_usec!(kind)} + end + + defp dump_utc_datetime_usec(_), do: :error + + @doc """ + Loads a value with the given type. + + iex> load(:string, nil) + {:ok, nil} + iex> load(:string, "foo") + {:ok, "foo"} + + iex> load(:integer, 1) + {:ok, 1} + iex> load(:integer, "10") + :error + + """ + @spec load(t, term) :: {:ok, term} | :error + @spec load(t, term, (t, term -> {:ok, term} | :error)) :: {:ok, term} | :error + def load(type, value, loader \\ &load/2) + + def load({:parameterized, module, params}, value, loader) do + module.load(value, loader, params) + end + + def load(_type, nil, _loader) do + {:ok, nil} + end + + def load({:maybe, type}, value, loader) do + case load(type, value, loader) do + {:ok, _} = ok -> ok + :error -> {:ok, value} + end + end + + def load({:array, {_, _, _} = type}, value, loader), do: array(value, type, loader, false, []) + def load({:array, type}, value, loader), do: array(value, type, loader, true, []) + def load({:map, type}, value, loader), do: map(value, type, loader, false, %{}) + + def load(:any, value, _loader), do: {:ok, value} + def load(:integer, value, _loader), do: same_integer(value) + def load(:float, value, _loader), do: load_float(value) + def load(:boolean, value, _loader), do: same_boolean(value) + def load(:map, value, _loader), do: same_map(value) + def load(:string, value, _loader), do: same_binary(value) + def load(:binary, value, _loader), do: same_binary(value) + def load(:id, value, _loader), do: same_integer(value) + def load(:binary_id, value, _loader), do: same_binary(value) + def load(:decimal, value, _loader), do: same_decimal(value) + def load(:date, value, _loader), do: same_date(value) + def load(:time, value, _loader), do: load_time(value) + def load(:time_usec, value, _loader), do: load_time_usec(value) + def load(:naive_datetime, value, _loader), do: load_naive_datetime(value) + def load(:naive_datetime_usec, value, _loader), do: load_naive_datetime_usec(value) + def load(:utc_datetime, value, _loader), do: load_utc_datetime(value) + def load(:utc_datetime_usec, value, _loader), do: load_utc_datetime_usec(value) + def load(mod, value, _loader), do: mod.load(value) + + defp load_float(term) when is_float(term), do: {:ok, term} + defp load_float(term) when is_integer(term), do: {:ok, :erlang.float(term)} + defp load_float(_), do: :error + + defp load_time(%Time{} = time), do: {:ok, truncate_usec(time)} + defp load_time(_), do: :error + + defp load_time_usec(%Time{} = time), do: {:ok, pad_usec(time)} + defp load_time_usec(_), do: :error + + # This is a downcast, which is always fine, and in case + # we try to send a naive datetime where a datetime is expected, + # the adapter will either explicitly error (Postgres) or it will + # accept the data (MySQL), which is fine as we always assume UTC + defp load_naive_datetime(%DateTime{} = datetime), + do: {:ok, datetime |> check_utc_timezone!(:naive_datetime) |> DateTime.to_naive() |> truncate_usec()} + + defp load_naive_datetime(%NaiveDateTime{} = naive_datetime), + do: {:ok, truncate_usec(naive_datetime)} + + defp load_naive_datetime(_), do: :error + + defp load_naive_datetime_usec(%DateTime{} = datetime), + do: {:ok, datetime |> check_utc_timezone!(:naive_datetime_usec) |> DateTime.to_naive() |> pad_usec()} + + defp load_naive_datetime_usec(%NaiveDateTime{} = naive_datetime), + do: {:ok, pad_usec(naive_datetime)} + + defp load_naive_datetime_usec(_), do: :error + + # This is an upcast but because we assume the database + # is always in UTC, we can perform it. + defp load_utc_datetime(%NaiveDateTime{} = naive_datetime), + do: {:ok, naive_datetime |> truncate_usec() |> DateTime.from_naive!("Etc/UTC")} + + defp load_utc_datetime(%DateTime{} = datetime), + do: {:ok, datetime |> check_utc_timezone!(:utc_datetime) |> truncate_usec()} + + defp load_utc_datetime(_), + do: :error + + defp load_utc_datetime_usec(%NaiveDateTime{} = naive_datetime), + do: {:ok, naive_datetime |> pad_usec() |> DateTime.from_naive!("Etc/UTC")} + + defp load_utc_datetime_usec(%DateTime{} = datetime), + do: {:ok, datetime |> check_utc_timezone!(:utc_datetime_usec) |> pad_usec()} + + defp load_utc_datetime_usec(_), + do: :error + + @doc """ + Casts a value to the given type. + + `cast/2` is used by the finder queries and changesets to cast outside values to + specific types. + + Note that nil can be cast to all primitive types as data stores allow nil to be + set on any column. + + NaN and infinite decimals are not supported, use custom types instead. + + iex> cast(:any, "whatever") + {:ok, "whatever"} + + iex> cast(:any, nil) + {:ok, nil} + iex> cast(:string, nil) + {:ok, nil} + + iex> cast(:integer, 1) + {:ok, 1} + iex> cast(:integer, "1") + {:ok, 1} + iex> cast(:integer, "1.0") + :error + + iex> cast(:id, 1) + {:ok, 1} + iex> cast(:id, "1") + {:ok, 1} + iex> cast(:id, "1.0") + :error + + iex> cast(:float, 1.0) + {:ok, 1.0} + iex> cast(:float, 1) + {:ok, 1.0} + iex> cast(:float, "1") + {:ok, 1.0} + iex> cast(:float, "1.0") + {:ok, 1.0} + iex> cast(:float, "1-foo") + :error + + iex> cast(:boolean, true) + {:ok, true} + iex> cast(:boolean, false) + {:ok, false} + iex> cast(:boolean, "1") + {:ok, true} + iex> cast(:boolean, "0") + {:ok, false} + iex> cast(:boolean, "whatever") + :error + + iex> cast(:string, "beef") + {:ok, "beef"} + iex> cast(:binary, "beef") + {:ok, "beef"} + + iex> cast(:decimal, Decimal.new("1.0")) + {:ok, Decimal.new("1.0")} + iex> cast(:decimal, "1.0bad") + :error + + iex> cast({:array, :integer}, [1, 2, 3]) + {:ok, [1, 2, 3]} + iex> cast({:array, :integer}, ["1", "2", "3"]) + {:ok, [1, 2, 3]} + iex> cast({:array, :string}, [1, 2, 3]) + :error + iex> cast(:string, [1, 2, 3]) + :error + + """ + @spec cast(t, term) :: {:ok, term} | {:error, keyword()} | :error + def cast({:parameterized, type, params}, value), do: type.cast(value, params) + def cast({:in, _type}, nil), do: :error + def cast(_type, nil), do: {:ok, nil} + + def cast({:maybe, type}, value) do + case cast(type, value) do + {:ok, _} = ok -> ok + _ -> {:ok, value} + end + end + + def cast(type, value) do + cast_fun(type).(value) + end + + defp cast_fun(:integer), do: &cast_integer/1 + defp cast_fun(:float), do: &cast_float/1 + defp cast_fun(:boolean), do: &cast_boolean/1 + defp cast_fun(:map), do: &cast_map/1 + defp cast_fun(:string), do: &cast_binary/1 + defp cast_fun(:binary), do: &cast_binary/1 + defp cast_fun(:id), do: &cast_integer/1 + defp cast_fun(:binary_id), do: &cast_binary/1 + defp cast_fun(:any), do: &{:ok, &1} + defp cast_fun(:decimal), do: &cast_decimal/1 + defp cast_fun(:date), do: &cast_date/1 + defp cast_fun(:time), do: &maybe_truncate_usec(cast_time(&1)) + defp cast_fun(:time_usec), do: &maybe_pad_usec(cast_time(&1)) + defp cast_fun(:naive_datetime), do: &maybe_truncate_usec(cast_naive_datetime(&1)) + defp cast_fun(:naive_datetime_usec), do: &maybe_pad_usec(cast_naive_datetime(&1)) + defp cast_fun(:utc_datetime), do: &maybe_truncate_usec(cast_utc_datetime(&1)) + defp cast_fun(:utc_datetime_usec), do: &maybe_pad_usec(cast_utc_datetime(&1)) + defp cast_fun({:param, :any_datetime}), do: &cast_any_datetime(&1) + defp cast_fun({:parameterized, mod, params}), do: &mod.cast(&1, params) + defp cast_fun({:in, type}), do: cast_fun({:array, type}) + + defp cast_fun({:array, {:parameterized, _, _} = type}) do + fun = cast_fun(type) + &array(&1, fun, false, []) + end + + defp cast_fun({:array, type}) do + fun = cast_fun(type) + &array(&1, fun, true, []) + end + + defp cast_fun({:map, {:parameterized, _, _} = type}) do + fun = cast_fun(type) + &map(&1, fun, false, %{}) + end + + defp cast_fun({:map, type}) do + fun = cast_fun(type) + &map(&1, fun, true, %{}) + end + + defp cast_fun(mod) when is_atom(mod) do + fn + nil -> {:ok, nil} + value -> mod.cast(value) + end + end + + defp cast_integer(term) when is_binary(term) do + case Integer.parse(term) do + {integer, ""} -> {:ok, integer} + _ -> :error + end + end + + defp cast_integer(term) when is_integer(term), do: {:ok, term} + defp cast_integer(_), do: :error + + defp cast_float(term) when is_binary(term) do + case Float.parse(term) do + {float, ""} -> {:ok, float} + _ -> :error + end + end + + defp cast_float(term) when is_float(term), do: {:ok, term} + defp cast_float(term) when is_integer(term), do: {:ok, :erlang.float(term)} + defp cast_float(_), do: :error + + defp cast_decimal(term) when is_binary(term) do + case Decimal.parse(term) do + {:ok, decimal} -> check_decimal(decimal, false) + # The following two clauses exist to support earlier versions of Decimal. + {decimal, ""} -> check_decimal(decimal, false) + {_, remainder} when is_binary(remainder) and byte_size(remainder) > 0 -> :error + :error -> :error + end + end + defp cast_decimal(term), do: same_decimal(term) + + defp cast_boolean(term) when term in ~w(true 1), do: {:ok, true} + defp cast_boolean(term) when term in ~w(false 0), do: {:ok, false} + defp cast_boolean(term) when is_boolean(term), do: {:ok, term} + defp cast_boolean(_), do: :error + + defp cast_binary(term) when is_binary(term), do: {:ok, term} + defp cast_binary(_), do: :error + + defp cast_map(term) when is_map(term), do: {:ok, term} + defp cast_map(_), do: :error + + ## Shared helpers + + @compile {:inline, same_integer: 1, same_boolean: 1, same_map: 1, same_decimal: 1, same_date: 1} + defp same_integer(term) when is_integer(term), do: {:ok, term} + defp same_integer(_), do: :error + + defp same_boolean(term) when is_boolean(term), do: {:ok, term} + defp same_boolean(_), do: :error + + defp same_binary(term) when is_binary(term), do: {:ok, term} + defp same_binary(_), do: :error + + defp same_map(term) when is_map(term), do: {:ok, term} + defp same_map(_), do: :error + + defp same_decimal(term) when is_integer(term), do: {:ok, Decimal.new(term)} + defp same_decimal(term) when is_float(term), do: {:ok, Decimal.from_float(term)} + defp same_decimal(%Decimal{} = term), do: check_decimal(term, true) + defp same_decimal(_), do: :error + + defp same_date(%Date{} = term), do: {:ok, term} + defp same_date(_), do: :error + + @doc false + @spec filter_empty_values(t, any, [any]) :: {:ok, any} | :empty + def filter_empty_values({:array, type}, value, empty_values) when is_list(value) do + value = + for elem <- value, + {:ok, elem} <- [filter_empty_values(type, elem, empty_values)], + do: elem + + if value in empty_values do + :empty + else + {:ok, value} + end + end + + def filter_empty_values(_type, value, empty_values) do + if value in empty_values do + :empty + else + {:ok, value} + end + end + + ## Adapter related + + @doc false + def adapter_autogenerate(adapter, type) do + type + |> type() + |> adapter.autogenerate() + end + + @doc false + def adapter_load(adapter, {:parameterized, module, params} = type, value) do + process_loaders(adapter.loaders(module.type(params), type), {:ok, value}, adapter) + end + def adapter_load(_adapter, _type, nil) do + {:ok, nil} + end + def adapter_load(adapter, type, value) do + if of_base_type?(type, value) do + {:ok, value} + else + process_loaders(adapter.loaders(type(type), type), {:ok, value}, adapter) + end + end + + defp process_loaders(_, :error, _adapter), + do: :error + defp process_loaders([fun|t], {:ok, value}, adapter) when is_function(fun), + do: process_loaders(t, fun.(value), adapter) + defp process_loaders([type|t], {:ok, value}, adapter), + do: process_loaders(t, load(type, value, &adapter_load(adapter, &1, &2)), adapter) + defp process_loaders([], {:ok, _} = acc, _adapter), + do: acc + + @doc false + def adapter_dump(adapter, {:parameterized, module, params} = type, value) do + process_dumpers(adapter.dumpers(module.type(params), type), {:ok, value}, adapter) + end + def adapter_dump(_adapter, type, nil) do + dump(type, nil) + end + def adapter_dump(adapter, type, value) do + process_dumpers(adapter.dumpers(type(type), type), {:ok, value}, adapter) + end + + defp process_dumpers(_, :error, _adapter), + do: :error + defp process_dumpers([fun|t], {:ok, value}, adapter) when is_function(fun), + do: process_dumpers(t, fun.(value), adapter) + defp process_dumpers([type|t], {:ok, value}, adapter), + do: process_dumpers(t, dump(type, value, &adapter_dump(adapter, &1, &2)), adapter) + defp process_dumpers([], {:ok, _} = acc, _adapter), + do: acc + + ## Date + + defp cast_date(binary) when is_binary(binary) do + case Date.from_iso8601(binary) do + {:ok, _} = ok -> + ok + {:error, _} -> + case NaiveDateTime.from_iso8601(binary) do + {:ok, naive_datetime} -> {:ok, NaiveDateTime.to_date(naive_datetime)} + {:error, _} -> :error + end + end + end + defp cast_date(%{"year" => empty, "month" => empty, "day" => empty}) when empty in ["", nil], + do: {:ok, nil} + defp cast_date(%{year: empty, month: empty, day: empty}) when empty in ["", nil], + do: {:ok, nil} + defp cast_date(%{"year" => year, "month" => month, "day" => day}), + do: cast_date(to_i(year), to_i(month), to_i(day)) + defp cast_date(%{year: year, month: month, day: day}), + do: cast_date(to_i(year), to_i(month), to_i(day)) + defp cast_date(_), + do: :error + + defp cast_date(year, month, day) when is_integer(year) and is_integer(month) and is_integer(day) do + case Date.new(year, month, day) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + defp cast_date(_, _, _), + do: :error + + ## Time + + defp cast_time(<>), + do: cast_time(to_i(hour), to_i(minute), 0, nil) + defp cast_time(binary) when is_binary(binary) do + case Time.from_iso8601(binary) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + defp cast_time(%{"hour" => empty, "minute" => empty}) when empty in ["", nil], + do: {:ok, nil} + defp cast_time(%{hour: empty, minute: empty}) when empty in ["", nil], + do: {:ok, nil} + defp cast_time(%{"hour" => hour, "minute" => minute} = map), + do: cast_time(to_i(hour), to_i(minute), to_i(Map.get(map, "second")), to_i(Map.get(map, "microsecond"))) + defp cast_time(%{hour: hour, minute: minute, second: second, microsecond: {microsecond, precision}}), + do: cast_time(to_i(hour), to_i(minute), to_i(second), {to_i(microsecond), to_i(precision)}) + defp cast_time(%{hour: hour, minute: minute} = map), + do: cast_time(to_i(hour), to_i(minute), to_i(Map.get(map, :second)), to_i(Map.get(map, :microsecond))) + defp cast_time(_), + do: :error + + defp cast_time(hour, minute, sec, usec) when is_integer(usec) do + cast_time(hour, minute, sec, {usec, 6}) + end + defp cast_time(hour, minute, sec, nil) do + cast_time(hour, minute, sec, {0, 0}) + end + defp cast_time(hour, minute, sec, {usec, precision}) + when is_integer(hour) and is_integer(minute) and + (is_integer(sec) or is_nil(sec)) and is_integer(usec) and is_integer(precision) do + case Time.new(hour, minute, sec || 0, {usec, precision}) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + defp cast_time(_, _, _, _) do + :error + end + + defp cast_any_datetime(%DateTime{} = datetime), do: cast_utc_datetime(datetime) + defp cast_any_datetime(other), do: cast_naive_datetime(other) + + ## Naive datetime + + defp cast_naive_datetime("-" <> rest) do + with {:ok, naive_datetime} <- cast_naive_datetime(rest) do + {:ok, %{naive_datetime | year: naive_datetime.year * -1}} + end + end + + defp cast_naive_datetime(<>) + when sep in [?\s, ?T] do + case NaiveDateTime.new(to_i(year), to_i(month), to_i(day), to_i(hour), to_i(minute), 0) do + {:ok, _} = ok -> ok + _ -> :error + end + end + + defp cast_naive_datetime(binary) when is_binary(binary) do + case NaiveDateTime.from_iso8601(binary) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + + defp cast_naive_datetime(%{"year" => empty, "month" => empty, "day" => empty, + "hour" => empty, "minute" => empty}) when empty in ["", nil], + do: {:ok, nil} + + defp cast_naive_datetime(%{year: empty, month: empty, day: empty, + hour: empty, minute: empty}) when empty in ["", nil], + do: {:ok, nil} + + defp cast_naive_datetime(%{} = map) do + with {:ok, %Date{} = date} <- cast_date(map), + {:ok, %Time{} = time} <- cast_time(map) do + NaiveDateTime.new(date, time) + else + _ -> :error + end + end + + defp cast_naive_datetime(_) do + :error + end + + ## UTC datetime + + defp cast_utc_datetime("-" <> rest) do + with {:ok, utc_datetime} <- cast_utc_datetime(rest) do + {:ok, %{utc_datetime | year: utc_datetime.year * -1}} + end + end + + defp cast_utc_datetime(<>) + when sep in [?\s, ?T] do + case NaiveDateTime.new(to_i(year), to_i(month), to_i(day), to_i(hour), to_i(minute), 0) do + {:ok, naive_datetime} -> {:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")} + _ -> :error + end + end + + defp cast_utc_datetime(binary) when is_binary(binary) do + case DateTime.from_iso8601(binary) do + {:ok, datetime, _offset} -> {:ok, datetime} + {:error, :missing_offset} -> + case NaiveDateTime.from_iso8601(binary) do + {:ok, naive_datetime} -> {:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")} + {:error, _} -> :error + end + {:error, _} -> :error + end + end + defp cast_utc_datetime(%DateTime{time_zone: "Etc/UTC"} = datetime), do: {:ok, datetime} + defp cast_utc_datetime(%DateTime{} = datetime) do + case (datetime |> DateTime.to_unix(:microsecond) |> DateTime.from_unix(:microsecond)) do + {:ok, _} = ok -> ok + {:error, _} -> :error + end + end + defp cast_utc_datetime(value) do + case cast_naive_datetime(value) do + {:ok, %NaiveDateTime{} = naive_datetime} -> + {:ok, DateTime.from_naive!(naive_datetime, "Etc/UTC")} + {:ok, _} = ok -> + ok + :error -> + :error + end + end + + @doc """ + Checks if two terms are equal. + + Depending on the given `type` performs a structural or semantical comparison. + + ## Examples + + iex> equal?(:integer, 1, 1) + true + iex> equal?(:decimal, Decimal.new("1"), Decimal.new("1.00")) + true + + """ + @spec equal?(t, term, term) :: boolean + def equal?(_, nil, nil), do: true + + def equal?(type, term1, term2) do + if fun = equal_fun(type) do + fun.(term1, term2) + else + term1 == term2 + end + end + + @doc """ + Checks if `collection` includes a `term`. + + Depending on the given `type` performs a structural or semantical comparison. + + ## Examples + + iex> include?(:integer, 1, 1..3) + true + iex> include?(:decimal, Decimal.new("1"), [Decimal.new("1.00"), Decimal.new("2.00")]) + true + + """ + @spec include?(t, term, Enum.t()) :: boolean + def include?(type, term, collection) do + if fun = equal_fun(type) do + Enum.any?(collection, &fun.(term, &1)) + else + term in collection + end + end + + defp equal_fun(:decimal), do: &equal_decimal?/2 + defp equal_fun(t) when t in [:time, :time_usec], do: &equal_time?/2 + defp equal_fun(t) when t in [:utc_datetime, :utc_datetime_usec], do: &equal_utc_datetime?/2 + defp equal_fun(t) when t in [:naive_datetime, :naive_datetime_usec], do: &equal_naive_datetime?/2 + defp equal_fun(t) when t in @base, do: nil + + defp equal_fun({:array, type}) do + if fun = equal_fun(type) do + &equal_list?(fun, &1, &2) + end + end + + defp equal_fun({:map, type}) do + if fun = equal_fun(type) do + &equal_map?(fun, &1, &2) + end + end + + defp equal_fun({:parameterized, mod, params}) do + &mod.equal?(&1, &2, params) + end + + defp equal_fun(mod) when is_atom(mod), do: &mod.equal?/2 + + defp equal_decimal?(%Decimal{} = a, %Decimal{} = b), do: Decimal.equal?(a, b) + defp equal_decimal?(_, _), do: false + + defp equal_time?(%Time{} = a, %Time{} = b), do: Time.compare(a, b) == :eq + defp equal_time?(_, _), do: false + + defp equal_utc_datetime?(%DateTime{} = a, %DateTime{} = b), do: DateTime.compare(a, b) == :eq + defp equal_utc_datetime?(_, _), do: false + + defp equal_naive_datetime?(%NaiveDateTime{} = a, %NaiveDateTime{} = b), + do: NaiveDateTime.compare(a, b) == :eq + defp equal_naive_datetime?(_, _), + do: false + + defp equal_list?(fun, [nil | xs], [nil | ys]), do: equal_list?(fun, xs, ys) + defp equal_list?(fun, [x | xs], [y | ys]), do: fun.(x, y) and equal_list?(fun, xs, ys) + defp equal_list?(_fun, [], []), do: true + defp equal_list?(_fun, _, _), do: false + + defp equal_map?(_fun, map1, map2) when map_size(map1) != map_size(map2) do + false + end + + defp equal_map?(fun, %{} = map1, %{} = map2) do + equal_map?(fun, Map.to_list(map1), map2) + end + + defp equal_map?(fun, [{key, nil} | tail], other_map) do + case other_map do + %{^key => nil} -> equal_map?(fun, tail, other_map) + _ -> false + end + end + + defp equal_map?(fun, [{key, val} | tail], other_map) do + case other_map do + %{^key => other_val} -> fun.(val, other_val) and equal_map?(fun, tail, other_map) + _ -> false + end + end + + defp equal_map?(_fun, [], _) do + true + end + + defp equal_map?(_fun, _, _) do + false + end + + ## Helpers + + # Checks if a value is of the given primitive type. + defp of_base_type?(:any, _), do: true + defp of_base_type?(:id, term), do: is_integer(term) + defp of_base_type?(:float, term), do: is_float(term) + defp of_base_type?(:integer, term), do: is_integer(term) + defp of_base_type?(:boolean, term), do: is_boolean(term) + defp of_base_type?(:binary, term), do: is_binary(term) + defp of_base_type?(:string, term), do: is_binary(term) + defp of_base_type?(:map, term), do: is_map(term) and not Map.has_key?(term, :__struct__) + defp of_base_type?(:decimal, value), do: Kernel.match?(%Decimal{}, value) + defp of_base_type?(:date, value), do: Kernel.match?(%Date{}, value) + defp of_base_type?(_, _), do: false + + defp array([nil | t], fun, true, acc) do + array(t, fun, true, [nil | acc]) + end + + defp array([h | t], fun, skip_nil?, acc) do + case fun.(h) do + {:ok, h} -> array(t, fun, skip_nil?, [h | acc]) + :error -> :error + {:error, _custom_errors} -> :error + end + end + + defp array([], _fun, _skip_nil?,acc) do + {:ok, Enum.reverse(acc)} + end + + defp array(_, _, _, _) do + :error + end + + defp map(map, fun, skip_nil?, acc) when is_map(map) do + map_each(Map.to_list(map), fun, skip_nil?, acc) + end + + defp map(_, _, _, _) do + :error + end + + defp map_each([{key, nil} | t], fun, true, acc) do + map_each(t, fun, true, Map.put(acc, key, nil)) + end + + defp map_each([{key, value} | t], fun, skip_nil?, acc) do + case fun.(value) do + {:ok, value} -> map_each(t, fun, skip_nil?, Map.put(acc, key, value)) + :error -> :error + {:error, _custom_errors} -> :error + end + end + + defp map_each([], _fun, _skip_nil?, acc) do + {:ok, acc} + end + + defp array([nil | t], type, fun, true, acc) do + array(t, type, fun, true, [nil | acc]) + end + + defp array([h | t], type, fun, skip_nil?, acc) do + case fun.(type, h) do + {:ok, h} -> array(t, type, fun, skip_nil?, [h | acc]) + :error -> :error + end + end + + defp array([], _type, _fun, _skip_nil?, acc) do + {:ok, Enum.reverse(acc)} + end + + defp array(_, _, _, _, _) do + :error + end + + defp map(map, type, fun, skip_nil?, acc) when is_map(map) do + map_each(Map.to_list(map), type, fun, skip_nil?, acc) + end + + defp map(_, _, _, _, _) do + :error + end + + defp map_each([{key, value} | t], type, fun, skip_nil?, acc) do + case fun.(type, value) do + {:ok, value} -> map_each(t, type, fun, skip_nil?, Map.put(acc, key, value)) + :error -> :error + end + end + + defp map_each([], _type, _fun, _skip_nil?, acc) do + {:ok, acc} + end + + defp to_i(nil), do: nil + defp to_i(int) when is_integer(int), do: int + defp to_i(bin) when is_binary(bin) do + case Integer.parse(bin) do + {int, ""} -> int + _ -> nil + end + end + + defp maybe_truncate_usec({:ok, struct}), do: {:ok, truncate_usec(struct)} + defp maybe_truncate_usec(:error), do: :error + + defp maybe_pad_usec({:ok, struct}), do: {:ok, pad_usec(struct)} + defp maybe_pad_usec(:error), do: :error + + defp truncate_usec(nil), do: nil + defp truncate_usec(%{microsecond: {0, 0}} = struct), do: struct + defp truncate_usec(struct), do: %{struct | microsecond: {0, 0}} + + defp pad_usec(nil), do: nil + defp pad_usec(%{microsecond: {_, 6}} = struct), do: struct + + defp pad_usec(%{microsecond: {microsecond, _}} = struct), + do: %{struct | microsecond: {microsecond, 6}} + + defp check_utc_timezone!(%{time_zone: "Etc/UTC"} = datetime, _kind), do: datetime + + defp check_utc_timezone!(datetime, kind) do + raise ArgumentError, + "#{inspect kind} expects the time zone to be \"Etc/UTC\", got `#{inspect(datetime)}`" + end + + defp check_usec!(%{microsecond: {_, 6}} = datetime, _kind), do: datetime + + defp check_usec!(datetime, kind) do + raise ArgumentError, + "#{inspect(kind)} expects microsecond precision, got: #{inspect(datetime)}" + end + + defp check_no_usec!(%{microsecond: {0, 0}} = datetime, _kind), do: datetime + + defp check_no_usec!(%struct{} = datetime, kind) do + raise ArgumentError, """ + #{inspect(kind)} expects microseconds to be empty, got: #{inspect(datetime)} + + Use `#{inspect(struct)}.truncate(#{kind}, :second)` (available in Elixir v1.6+) to remove microseconds. + """ + end + + defp check_decimal(%Decimal{coef: coef} = decimal, _) when is_integer(coef), do: {:ok, decimal} + defp check_decimal(_decimal, false), do: :error + defp check_decimal(decimal, true) do + raise ArgumentError, """ + #{inspect(decimal)} is not allowed for type :decimal + + `+Infinity`, `-Infinity`, and `NaN` values are not supported, even though the `Decimal` library handles them. \ + To support them, you can create a custom type. + """ + end +end diff --git a/deps/ecto/lib/ecto/uuid.ex b/deps/ecto/lib/ecto/uuid.ex new file mode 100644 index 0000000..d4b6f4f --- /dev/null +++ b/deps/ecto/lib/ecto/uuid.ex @@ -0,0 +1,223 @@ +defmodule Ecto.UUID do + @moduledoc """ + An Ecto type for UUID strings. + """ + + use Ecto.Type + + @typedoc """ + A hex-encoded UUID string. + """ + @type t :: <<_::288>> + + @typedoc """ + A raw binary representation of a UUID. + """ + @type raw :: <<_::128>> + + @doc false + def type, do: :uuid + + @doc """ + Casts to a UUID. + """ + @spec cast(t | raw | any) :: {:ok, t} | :error + def cast(<< a1, a2, a3, a4, a5, a6, a7, a8, ?-, + b1, b2, b3, b4, ?-, + c1, c2, c3, c4, ?-, + d1, d2, d3, d4, ?-, + e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12 >>) do + << c(a1), c(a2), c(a3), c(a4), + c(a5), c(a6), c(a7), c(a8), ?-, + c(b1), c(b2), c(b3), c(b4), ?-, + c(c1), c(c2), c(c3), c(c4), ?-, + c(d1), c(d2), c(d3), c(d4), ?-, + c(e1), c(e2), c(e3), c(e4), + c(e5), c(e6), c(e7), c(e8), + c(e9), c(e10), c(e11), c(e12) >> + catch + :error -> :error + else + hex_uuid -> {:ok, hex_uuid} + end + + def cast(<< _::128 >> = raw_uuid), do: {:ok, encode(raw_uuid)} + def cast(_), do: :error + + @doc """ + Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. + """ + @spec cast!(t | raw | any) :: t + def cast!(value) do + case cast(value) do + {:ok, hex_uuid} -> hex_uuid + :error -> raise Ecto.CastError, type: __MODULE__, value: value + end + end + + @compile {:inline, c: 1} + + defp c(?0), do: ?0 + defp c(?1), do: ?1 + defp c(?2), do: ?2 + defp c(?3), do: ?3 + defp c(?4), do: ?4 + defp c(?5), do: ?5 + defp c(?6), do: ?6 + defp c(?7), do: ?7 + defp c(?8), do: ?8 + defp c(?9), do: ?9 + defp c(?A), do: ?a + defp c(?B), do: ?b + defp c(?C), do: ?c + defp c(?D), do: ?d + defp c(?E), do: ?e + defp c(?F), do: ?f + defp c(?a), do: ?a + defp c(?b), do: ?b + defp c(?c), do: ?c + defp c(?d), do: ?d + defp c(?e), do: ?e + defp c(?f), do: ?f + defp c(_), do: throw(:error) + + @doc """ + Converts a string representing a UUID into a raw binary. + """ + @spec dump(t | any) :: {:ok, raw} | :error + def dump(<< a1, a2, a3, a4, a5, a6, a7, a8, ?-, + b1, b2, b3, b4, ?-, + c1, c2, c3, c4, ?-, + d1, d2, d3, d4, ?-, + e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12 >>) do + << d(a1)::4, d(a2)::4, d(a3)::4, d(a4)::4, + d(a5)::4, d(a6)::4, d(a7)::4, d(a8)::4, + d(b1)::4, d(b2)::4, d(b3)::4, d(b4)::4, + d(c1)::4, d(c2)::4, d(c3)::4, d(c4)::4, + d(d1)::4, d(d2)::4, d(d3)::4, d(d4)::4, + d(e1)::4, d(e2)::4, d(e3)::4, d(e4)::4, + d(e5)::4, d(e6)::4, d(e7)::4, d(e8)::4, + d(e9)::4, d(e10)::4, d(e11)::4, d(e12)::4 >> + catch + :error -> :error + else + raw_uuid -> {:ok, raw_uuid} + end + + def dump(_), do: :error + + @compile {:inline, d: 1} + + defp d(?0), do: 0 + defp d(?1), do: 1 + defp d(?2), do: 2 + defp d(?3), do: 3 + defp d(?4), do: 4 + defp d(?5), do: 5 + defp d(?6), do: 6 + defp d(?7), do: 7 + defp d(?8), do: 8 + defp d(?9), do: 9 + defp d(?A), do: 10 + defp d(?B), do: 11 + defp d(?C), do: 12 + defp d(?D), do: 13 + defp d(?E), do: 14 + defp d(?F), do: 15 + defp d(?a), do: 10 + defp d(?b), do: 11 + defp d(?c), do: 12 + defp d(?d), do: 13 + defp d(?e), do: 14 + defp d(?f), do: 15 + defp d(_), do: throw(:error) + + @doc """ + Same as `dump/1` but raises `Ecto.ArgumentError` on invalid arguments. + """ + @spec dump!(t | any) :: raw + def dump!(value) do + case dump(value) do + {:ok, raw_uuid} -> raw_uuid + :error -> raise ArgumentError, "cannot dump given UUID to binary: #{inspect(value)}" + end + end + + @doc """ + Converts a binary UUID into a string. + """ + @spec load(raw | any) :: {:ok, t} | :error + def load(<<_::128>> = raw_uuid), do: {:ok, encode(raw_uuid)} + + def load(<<_::64, ?-, _::32, ?-, _::32, ?-, _::32, ?-, _::96>> = string) do + raise ArgumentError, "trying to load string UUID as Ecto.UUID: #{inspect string}. " <> + "Maybe you wanted to declare :uuid as your database field?" + end + + def load(_), do: :error + + @doc """ + Same as `load/1` but raises `Ecto.ArgumentError` on invalid arguments. + """ + @spec load!(raw | any) :: t + def load!(value) do + case load(value) do + {:ok, hex_uuid} -> hex_uuid + :error -> raise ArgumentError, "cannot load given binary as UUID: #{inspect(value)}" + end + end + + @doc """ + Generates a random, version 4 UUID. + """ + @spec generate() :: t + def generate(), do: encode(bingenerate()) + + @doc """ + Generates a random, version 4 UUID in the binary format. + """ + @spec bingenerate() :: raw + def bingenerate() do + <> = :crypto.strong_rand_bytes(16) + <> + end + + # Callback invoked by autogenerate fields. + @doc false + def autogenerate, do: generate() + + @spec encode(raw) :: t + defp encode(<< a1::4, a2::4, a3::4, a4::4, + a5::4, a6::4, a7::4, a8::4, + b1::4, b2::4, b3::4, b4::4, + c1::4, c2::4, c3::4, c4::4, + d1::4, d2::4, d3::4, d4::4, + e1::4, e2::4, e3::4, e4::4, + e5::4, e6::4, e7::4, e8::4, + e9::4, e10::4, e11::4, e12::4 >>) do + << e(a1), e(a2), e(a3), e(a4), e(a5), e(a6), e(a7), e(a8), ?-, + e(b1), e(b2), e(b3), e(b4), ?-, + e(c1), e(c2), e(c3), e(c4), ?-, + e(d1), e(d2), e(d3), e(d4), ?-, + e(e1), e(e2), e(e3), e(e4), e(e5), e(e6), e(e7), e(e8), e(e9), e(e10), e(e11), e(e12) >> + end + + @compile {:inline, e: 1} + + defp e(0), do: ?0 + defp e(1), do: ?1 + defp e(2), do: ?2 + defp e(3), do: ?3 + defp e(4), do: ?4 + defp e(5), do: ?5 + defp e(6), do: ?6 + defp e(7), do: ?7 + defp e(8), do: ?8 + defp e(9), do: ?9 + defp e(10), do: ?a + defp e(11), do: ?b + defp e(12), do: ?c + defp e(13), do: ?d + defp e(14), do: ?e + defp e(15), do: ?f +end diff --git a/deps/ecto/lib/mix/ecto.ex b/deps/ecto/lib/mix/ecto.ex new file mode 100644 index 0000000..4226a1e --- /dev/null +++ b/deps/ecto/lib/mix/ecto.ex @@ -0,0 +1,157 @@ +defmodule Mix.Ecto do + @moduledoc """ + Conveniences for writing Ecto related Mix tasks. + """ + + @doc """ + Parses the repository option from the given command line args list. + + If no repo option is given, it is retrieved from the application environment. + """ + @spec parse_repo([term]) :: [Ecto.Repo.t] + def parse_repo(args) do + parse_repo(args, []) + end + + defp parse_repo([key, value|t], acc) when key in ~w(--repo -r) do + parse_repo t, [Module.concat([value])|acc] + end + + defp parse_repo([_|t], acc) do + parse_repo t, acc + end + + defp parse_repo([], []) do + apps = + if apps_paths = Mix.Project.apps_paths() do + # TODO: Use the proper ordering from Mix.Project.deps_apps + # when we depend on Elixir v1.11+. + apps_paths |> Map.keys() |> Enum.sort() + else + [Mix.Project.config()[:app]] + end + + apps + |> Enum.flat_map(fn app -> + Application.load(app) + Application.get_env(app, :ecto_repos, []) + end) + |> Enum.uniq() + |> case do + [] -> + Mix.shell().error """ + warning: could not find Ecto repos in any of the apps: #{inspect apps}. + + You can avoid this warning by passing the -r flag or by setting the + repositories managed by those applications in your config/config.exs: + + config #{inspect hd(apps)}, ecto_repos: [...] + """ + [] + repos -> + repos + end + end + + defp parse_repo([], acc) do + Enum.reverse(acc) + end + + @doc """ + Ensures the given module is an Ecto.Repo. + """ + @spec ensure_repo(module, list) :: Ecto.Repo.t + def ensure_repo(repo, args) do + # Do not pass the --force switch used by some tasks downstream + args = List.delete(args, "--force") + + # TODO: Use only app.config when we depend on Elixir v1.11+. + if Code.ensure_loaded?(Mix.Tasks.App.Config) do + Mix.Task.run("app.config", args) + else + Mix.Task.run("loadpaths", args) + "--no-compile" not in args && Mix.Task.run("compile", args) + end + + case Code.ensure_compiled(repo) do + {:module, _} -> + if function_exported?(repo, :__adapter__, 0) do + repo + else + Mix.raise "Module #{inspect repo} is not an Ecto.Repo. " <> + "Please configure your app accordingly or pass a repo with the -r option." + end + + {:error, error} -> + Mix.raise "Could not load #{inspect repo}, error: #{inspect error}. " <> + "Please configure your app accordingly or pass a repo with the -r option." + end + end + + @doc """ + Asks if the user wants to open a file based on ECTO_EDITOR. + + By default, it attempts to open the file and line using the + `file:line` notation. For example, if your editor is called + `subl`, it will open the file as: + + subl path/to/file:line + + It is important that you choose an editor command that does + not block nor that attempts to run an editor directly in the + terminal. Command-line based editors likely need extra + configuration so they open up the given file and line in a + separate window. + + Custom editors are supported by using the `__FILE__` and + `__LINE__` notations, for example: + + ECTO_EDITOR="my_editor +__LINE__ __FILE__" + + and Elixir will properly interpolate values. + + """ + @spec open?(binary, non_neg_integer) :: boolean + def open?(file, line \\ 1) do + editor = System.get_env("ECTO_EDITOR") || "" + + if editor != "" do + command = + if editor =~ "__FILE__" or editor =~ "__LINE__" do + editor + |> String.replace("__FILE__", inspect(file)) + |> String.replace("__LINE__", Integer.to_string(line)) + else + "#{editor} #{inspect(file)}:#{line}" + end + + Mix.shell().cmd(command) + true + else + false + end + end + + @doc """ + Gets a path relative to the application path. + + Raises on umbrella application. + """ + def no_umbrella!(task) do + if Mix.Project.umbrella?() do + Mix.raise "Cannot run task #{inspect task} from umbrella project root. " <> + "Change directory to one of the umbrella applications and try again" + end + end + + @doc """ + Returns `true` if module implements behaviour. + """ + def ensure_implements(module, behaviour, message) do + all = Keyword.take(module.__info__(:attributes), [:behaviour]) + unless [behaviour] in Keyword.values(all) do + Mix.raise "Expected #{inspect module} to implement #{inspect behaviour} " <> + "in order to #{message}" + end + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.create.ex b/deps/ecto/lib/mix/tasks/ecto.create.ex new file mode 100644 index 0000000..ad0145c --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.create.ex @@ -0,0 +1,69 @@ +defmodule Mix.Tasks.Ecto.Create do + use Mix.Task + import Mix.Ecto + + @shortdoc "Creates the repository storage" + + @switches [ + quiet: :boolean, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean + ] + + @aliases [ + r: :repo, + q: :quiet + ] + + @moduledoc """ + Create the storage for the given repository. + + The repositories to create are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + Since Ecto tasks can only be executed once, if you need to create + multiple repositories, set `:ecto_repos` accordingly or pass the `-r` + flag multiple times. + + ## Examples + + $ mix ecto.create + $ mix ecto.create -r Custom.Repo + + ## Command line options + + * `-r`, `--repo` - the repo to create + * `--quiet` - do not log output + * `--no-compile` - do not compile before creating + * `--no-deps-check` - do not compile before creating + + """ + + @impl true + def run(args) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases + + Enum.each repos, fn repo -> + ensure_repo(repo, args) + ensure_implements(repo.__adapter__, Ecto.Adapter.Storage, + "create storage for #{inspect repo}") + case repo.__adapter__.storage_up(repo.config) do + :ok -> + unless opts[:quiet] do + Mix.shell().info "The database for #{inspect repo} has been created" + end + {:error, :already_up} -> + unless opts[:quiet] do + Mix.shell().info "The database for #{inspect repo} has already been created" + end + {:error, term} when is_binary(term) -> + Mix.raise "The database for #{inspect repo} couldn't be created: #{term}" + {:error, term} -> + Mix.raise "The database for #{inspect repo} couldn't be created: #{inspect term}" + end + end + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.drop.ex b/deps/ecto/lib/mix/tasks/ecto.drop.ex new file mode 100644 index 0000000..98dac8d --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.drop.ex @@ -0,0 +1,96 @@ +defmodule Mix.Tasks.Ecto.Drop do + use Mix.Task + import Mix.Ecto + + @shortdoc "Drops the repository storage" + @default_opts [force: false, force_drop: false] + + @aliases [ + f: :force, + q: :quiet, + r: :repo + ] + + @switches [ + force: :boolean, + force_drop: :boolean, + quiet: :boolean, + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean, + ] + + @moduledoc """ + Drop the storage for the given repository. + + The repositories to drop are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + Since Ecto tasks can only be executed once, if you need to drop + multiple repositories, set `:ecto_repos` accordingly or pass the `-r` + flag multiple times. + + ## Examples + + $ mix ecto.drop + $ mix ecto.drop -r Custom.Repo + + ## Command line options + + * `-r`, `--repo` - the repo to drop + * `-q`, `--quiet` - run the command quietly + * `-f`, `--force` - do not ask for confirmation when dropping the database. + Configuration is asked only when `:start_permanent` is set to true + (typically in production) + * `--force-drop` - force the database to be dropped even + if it has connections to it (requires PostgreSQL 13+) + * `--no-compile` - do not compile before dropping + * `--no-deps-check` - do not compile before dropping + + """ + + @impl true + def run(args) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases + opts = Keyword.merge(@default_opts, opts) + + Enum.each repos, fn repo -> + ensure_repo(repo, args) + ensure_implements(repo.__adapter__, Ecto.Adapter.Storage, + "drop storage for #{inspect repo}") + + if skip_safety_warnings?() or + opts[:force] or + Mix.shell().yes?("Are you sure you want to drop the database for repo #{inspect repo}?") do + drop_database(repo, opts) + end + end + end + + defp skip_safety_warnings? do + Mix.Project.config()[:start_permanent] != true + end + + defp drop_database(repo, opts) do + config = + opts + |> Keyword.take([:force_drop]) + |> Keyword.merge(repo.config) + case repo.__adapter__.storage_down(config) do + :ok -> + unless opts[:quiet] do + Mix.shell().info "The database for #{inspect repo} has been dropped" + end + {:error, :already_down} -> + unless opts[:quiet] do + Mix.shell().info "The database for #{inspect repo} has already been dropped" + end + {:error, term} when is_binary(term) -> + Mix.raise "The database for #{inspect repo} couldn't be dropped: #{term}" + {:error, term} -> + Mix.raise "The database for #{inspect repo} couldn't be dropped: #{inspect term}" + end + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.ex b/deps/ecto/lib/mix/tasks/ecto.ex new file mode 100644 index 0000000..14d10da --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.ex @@ -0,0 +1,30 @@ +defmodule Mix.Tasks.Ecto do + use Mix.Task + + @shortdoc "Prints Ecto help information" + + @moduledoc """ + Prints Ecto tasks and their information. + + $ mix ecto + + """ + + @impl true + def run(args) do + {_opts, args} = OptionParser.parse!(args, strict: []) + + case args do + [] -> general() + _ -> Mix.raise "Invalid arguments, expected: mix ecto" + end + end + + defp general() do + Application.ensure_all_started(:ecto) + Mix.shell().info "Ecto v#{Application.spec(:ecto, :vsn)}" + Mix.shell().info "A toolkit for data mapping and language integrated query for Elixir." + Mix.shell().info "\nAvailable tasks:\n" + Mix.Tasks.Help.run(["--search", "ecto."]) + end +end diff --git a/deps/ecto/lib/mix/tasks/ecto.gen.repo.ex b/deps/ecto/lib/mix/tasks/ecto.gen.repo.ex new file mode 100644 index 0000000..e6f7a31 --- /dev/null +++ b/deps/ecto/lib/mix/tasks/ecto.gen.repo.ex @@ -0,0 +1,110 @@ +defmodule Mix.Tasks.Ecto.Gen.Repo do + use Mix.Task + + import Mix.Ecto + import Mix.Generator + + @shortdoc "Generates a new repository" + + @switches [ + repo: [:string, :keep], + ] + + @aliases [ + r: :repo, + ] + + @moduledoc """ + Generates a new repository. + + The repository will be placed in the `lib` directory. + + ## Examples + + $ mix ecto.gen.repo -r Custom.Repo + + This generator will automatically open the config/config.exs + after generation if you have `ECTO_EDITOR` set in your environment + variable. + + ## Command line options + + * `-r`, `--repo` - the repo to generate + + """ + + @impl true + def run(args) do + no_umbrella!("ecto.gen.repo") + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + repo = + case Keyword.get_values(opts, :repo) do + [] -> Mix.raise "ecto.gen.repo expects the repository to be given as -r MyApp.Repo" + [repo] -> Module.concat([repo]) + [_ | _] -> Mix.raise "ecto.gen.repo expects a single repository to be given" + end + + config = Mix.Project.config() + underscored = Macro.underscore(inspect(repo)) + + base = Path.basename(underscored) + file = Path.join("lib", underscored) <> ".ex" + app = config[:app] || :YOUR_APP_NAME + opts = [mod: repo, app: app, base: base] + + create_directory Path.dirname(file) + create_file file, repo_template(opts) + config_path = config[:config_path] || "config/config.exs" + + case File.read(config_path) do + {:ok, contents} -> + check = String.contains?(contents, "import Config") + config_first_line = get_first_config_line(check) <> "\n" + new_contents = config_first_line <> "\n" <> config_template(opts) + Mix.shell().info [:green, "* updating ", :reset, config_path] + File.write! config_path, String.replace(contents, config_first_line, new_contents) + + {:error, _} -> + create_file config_path, "import Config\n\n" <> config_template(opts) + end + + open?(config_path, 3) + + Mix.shell().info """ + Don't forget to add your new repo to your supervision tree + (typically in lib/#{app}/application.ex): + + def start(_type, _args) do + children = [ + #{inspect repo}, + ] + + And to add it to the list of Ecto repositories in your + configuration files (so Ecto tasks work as expected): + + config #{inspect app}, + ecto_repos: [#{inspect repo}] + + """ + end + + defp get_first_config_line(true), do: "import Config" + defp get_first_config_line(false), do: "use Mix.Config" + + embed_template :repo, """ + defmodule <%= inspect @mod %> do + use Ecto.Repo, + otp_app: <%= inspect @app %>, + adapter: Ecto.Adapters.Postgres + end + """ + + embed_template :config, """ + config <%= inspect @app %>, <%= inspect @mod %>, + database: "<%= @app %>_<%= @base %>", + username: "user", + password: "pass", + hostname: "localhost" + """ +end diff --git a/deps/ecto/mix.exs b/deps/ecto/mix.exs new file mode 100644 index 0000000..ae7f65e --- /dev/null +++ b/deps/ecto/mix.exs @@ -0,0 +1,143 @@ +defmodule Ecto.MixProject do + use Mix.Project + + @source_url "https://github.com/elixir-ecto/ecto" + @version "3.8.4" + + def project do + [ + app: :ecto, + version: @version, + elixir: "~> 1.10", + deps: deps(), + consolidate_protocols: Mix.env() != :test, + elixirc_paths: elixirc_paths(Mix.env()), + + # Hex + description: "A toolkit for data mapping and language integrated query for Elixir", + package: package(), + + # Docs + name: "Ecto", + docs: docs() + ] + end + + def application do + [ + extra_applications: [:logger, :crypto, :eex], + mod: {Ecto.Application, []} + ] + end + + defp deps do + [ + {:telemetry, "~> 0.4 or ~> 1.0"}, + {:decimal, "~> 1.6 or ~> 2.0"}, + {:jason, "~> 1.0", optional: true}, + {:ex_doc, "~> 0.20", only: :docs} + ] + end + + defp package do + [ + maintainers: ["Eric Meadows-Jรถnsson", "Josรฉ Valim", "James Fish", "Michaล‚ Muskaล‚a", "Felipe Stival"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url}, + files: + ~w(.formatter.exs mix.exs README.md CHANGELOG.md lib) ++ + ~w(integration_test/cases integration_test/support) + ] + end + + defp docs do + [ + main: "Ecto", + source_ref: "v#{@version}", + logo: "guides/images/e.png", + extra_section: "GUIDES", + source_url: @source_url, + skip_undefined_reference_warnings_on: ["CHANGELOG.md"], + extras: extras(), + groups_for_extras: groups_for_extras(), + groups_for_functions: [ + group_for_function("Query API"), + group_for_function("Schema API"), + group_for_function("Transaction API"), + group_for_function("Runtime API"), + group_for_function("User callbacks") + ], + groups_for_modules: [ + # Ecto, + # Ecto.Changeset, + # Ecto.Multi, + # Ecto.Query, + # Ecto.Repo, + # Ecto.Schema, + # Ecto.Schema.Metadata, + # Mix.Ecto, + + "Types": [ + Ecto.Enum, + Ecto.ParameterizedType, + Ecto.Type, + Ecto.UUID + ], + "Query APIs": [ + Ecto.Query.API, + Ecto.Query.WindowAPI, + Ecto.Queryable, + Ecto.SubQuery + ], + "Adapter specification": [ + Ecto.Adapter, + Ecto.Adapter.Queryable, + Ecto.Adapter.Schema, + Ecto.Adapter.Storage, + Ecto.Adapter.Transaction + ], + "Relation structs": [ + Ecto.Association.BelongsTo, + Ecto.Association.Has, + Ecto.Association.HasThrough, + Ecto.Association.ManyToMany, + Ecto.Association.NotLoaded, + Ecto.Embedded + ] + ] + ] + end + + def extras() do + [ + "guides/introduction/Getting Started.md", + "guides/introduction/Embedded Schemas.md", + "guides/introduction/Testing with Ecto.md", + "guides/howtos/Aggregates and subqueries.md", + "guides/howtos/Composable transactions with Multi.md", + "guides/howtos/Constraints and Upserts.md", + "guides/howtos/Data mapping and validation.md", + "guides/howtos/Dynamic queries.md", + "guides/howtos/Multi tenancy with query prefixes.md", + "guides/howtos/Multi tenancy with foreign keys.md", + "guides/howtos/Self-referencing many to many.md", + "guides/howtos/Polymorphic associations with many to many.md", + "guides/howtos/Replicas and dynamic repositories.md", + "guides/howtos/Schemaless queries.md", + "guides/howtos/Test factories.md", + "CHANGELOG.md" + ] + end + + defp group_for_function(group), do: {String.to_atom(group), &(&1[:group] == group)} + + defp groups_for_extras do + [ + "Introduction": ~r/guides\/introduction\/.?/, + "How-To's": ~r/guides\/howtos\/.?/ + ] + end + + defp elixirc_paths(:test), do: ["lib", "test/support"] + defp elixirc_paths(_), do: ["lib"] +end diff --git a/deps/ecto_sql/.fetch b/deps/ecto_sql/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/ecto_sql/.formatter.exs b/deps/ecto_sql/.formatter.exs new file mode 100644 index 0000000..f80a11a --- /dev/null +++ b/deps/ecto_sql/.formatter.exs @@ -0,0 +1,35 @@ +locals_without_parens = [ + add: 2, + add: 3, + add_if_not_exists: 2, + add_if_not_exists: 3, + alter: 2, + create: 1, + create: 2, + create_if_not_exists: 1, + create_if_not_exists: 2, + drop: 1, + drop: 2, + drop_if_exists: 1, + drop_if_exists: 2, + execute: 1, + execute: 2, + modify: 2, + modify: 3, + remove: 1, + remove: 2, + remove: 3, + remove_if_exists: 2, + rename: 2, + rename: 3, + timestamps: 1 +] + +[ + import_deps: [:ecto], + locals_without_parens: locals_without_parens, + export: [ + locals_without_parens: locals_without_parens + ], + inputs: [] +] diff --git a/deps/ecto_sql/.hex b/deps/ecto_sql/.hex new file mode 100644 index 0000000000000000000000000000000000000000..7668bd84929d5779561fa9cff184d703c25c1a56 GIT binary patch literal 272 zcmZ9HOH#uy5CuiTCo7JUl13VRcHAU2(u}YnvV-kV*>iCemTY_Ry5G;R8}_XBgWPCG zYEw!*vw9t;NtRO?q3w2ik9R*w@kxhoD*w)wPvoqNE1-Tnty7fUvw<|Lk}`U+?$ z2JMniEg3`9C=e3}iYHx>g>;I@)&~nIche%5`EGjZcQ1|A%XhCC=jo5xV?5el6e3CF literal 0 HcmV?d00001 diff --git a/deps/ecto_sql/CHANGELOG.md b/deps/ecto_sql/CHANGELOG.md new file mode 100644 index 0000000..a862876 --- /dev/null +++ b/deps/ecto_sql/CHANGELOG.md @@ -0,0 +1,396 @@ +# Changelog for v3.x + +## v3.8.3 (2022-06-04) + +### Enhancements + + * [sql] Implement `literal/1` support in fragments + +## v3.8.2 (2022-05-18) + +### Bug fixes + + * [postgres] Fix possible breaking change on `json_extract_path` for boolean values introduced in v3.8.0 + * [sql] Colorize stacktrace and use `:` before printing line number + +## v3.8.1 (2022-04-29) + +### Bug fixes + + * [mysql] Raise on a subquery with parameter on MySQL join + * [sql] Do not invoke dynamic repositories in direct `Ecto.Adapters.SQL` operations + +## v3.8.0 (2022-04-26) + +### Enhancements + + * [migrations] Support `--to-exclusive` in `mix ecto.migrate` and `mix ecto.rollback` + * [mysql] Add `:comment` support on MySQL migrations + * [postgres] Support `:prepare` option per operation + * [postgres] Optimize `json_extract_path` comparisons in PostgreSQL + * [sql] Optionally log last known call, publish stacktrace in telemetry + * [telemetry] Include `:repo` option in telemetry events + +### Bug fixes + + * [sql] Ensure `:timeout` option is respected in `Ecto.Adapters.SQL.explain/3` + +## v3.7.2 (2022-01-23) + +### Enhancements + + * [adapters] Support latest `myxql` and `postgrex` + +## v3.7.1 (2021-10-12) + +### Enhancements + + * [migrations] Add `:cascade` option to `drop` + * [migrations] Support `--prefix` in `mix ecto.migrations` + * [migrations] Add `--log-migrator-sql` and `--log-migrations-sql` + * [mysql] Cache more insert/update queries and allow `:cache_statement` to be set + * [mssql] Support more recent tds versions + +### Bug fixes + + * [migrations] Consider the database prefix when locking tables + +## v3.7.0 (2021-08-19) + +### Enhancements + + * [mysql] Support lateral joins + +### Bug fixes + + * [sql] Fix CTE subqueries not finding parent bindings + +## v3.6.2 (2021-05-28) + +### Bug fixes + + * [migration] Improve error message on invalid migration type + * [postgres] Avoid duplicate order_by with distinct + * [sql] Implement new checked_out? callback required by latest Ecto + +## v3.6.1 (2021-04-12) + +### Bug fixes + + * [migrations] Ensure migration_source option is respected in PostgreSQL adapter + +## v3.6.0 (2021-04-03) + +### Bug fixes + + * [migrations] Fix a bug where the migration lock would not apply on the first migration (when the schema migrations table is empty). This fix changes how migration tables are locked, therefore let us know of any possible regressions in your workflow + +### Enhancements + + * [migrations] Allow generating migrations from within umbrella app + * [postgres] Add `:format` option to PostgreSQL explain + * [postgres] Support `:socket_dir` connection option when using `mix ecto.load` or `mix ecto.dump` + * [sandbox] Support locally registered processes in `allow/3` + * [storage] Do not fail `storage_up` if the user has access to an already-created database + * [tds] Support for `:inner_lateral` and `:left_lateral` + +## v3.5.4 (2020-01-20) + +### Enhancements + + * [mysql] Support defaults for JSON columns + * [postgres] Allow Postgrex v1.0 + +## v3.5.3 (2020-10-27) + +### Enhancements + + * [migrations] Pass `:schema_migration` option to repo operations for `prepare_query` checks + * [psql] Support `:force_drop` configuration to force a DB to be dropped + +## v3.5.2 (2020-10-24) + +### Enhancements + + * [migrations] Support `:with` option in `references` for composite foreign keys + * [migrations] Support `:match` option in `references` + * [tds] Support TDS 3-part and 4-part prefixes + +## v3.5.1 (2020-10-12) + +### Enhancements + + * [tds] Support explain plan for the TDS adapter + +### Bug fix + + * [migrations] Reload all migrations once the lock is free to avoid running the same migration more than once + * [query] Support nested subqueries + +## v3.5.0 (2020-10-03) + +### Enhancements + + * [migrations] Add option to skip schema migrations table checks + * [migrations] Add `:migration_repo` configuration to allow a different repository to host the schema migrations + * [migrations] Support `validate: false` on references and constraints + * [migrations] Accept `:migration_primary_key` as false and add `:migration_foreign_key` repo config + * [postgres] Support for `:identity` key types in Postgres 10 or later + * [postgres] Use IF NOT EXIST when creating index with `create_if_not_exists`, this requires PG 9.5+ or later + * [repo] Support `Repo.explain(:all | :update_all | :delete_all, query)` for Ecto adapters + * [sandbox] Allow for dynamic repos to be checked out in sandbox + +### Bug fixes + + * [migrations] Flush migration commands before executing `before_commit` callback + * [migrations] Do not swallow errors when migration lock is disabled + +## v3.4.5 (2020-07-05) + +### Bug fixes + + * [ecto] Fix warnings on Elixir v1.11 + * [migrations] Migration prefix should have higher preference than `default_options` + +## v3.4.4 (2020-05-19) + +### Enhancements + + * [sandbox] Add `Ecto.Adapters.SQL.start_owner!/2` and `Ecto.Adapters.SQL.stop_owner/1` + * [myxql] Decode BIT columns when using MyXQL and `:boolean` type + * [migrations] Use one line per migration in the schema dump + +## v3.4.3 (2020-04-27) + +### Bug fixes + + * [ecto] Support `as` and `parent_as` from Ecto v3.4.3+ + * [ecto] Support `x in subquery(query)` from Ecto v3.4.3+ + +## v3.4.2 (2020-04-02) + +### Bug fixes + + * [myxql] A binary with size should be a varbinary + * [mssql] A binary without size should be a varbinary(max) + +## v3.4.1 (2020-03-25) + +### Bug fixes + + * [myxql] Assume the reference does not change in MyXQL and prepare for v0.4.0 + +## v3.4.0 (2020-03-24) + +### Enhancements + + * [adapters] Support Ecto's v3.4 `json_extract_path/2` + * [migrations] Support multiple migration paths to be given with `--migration-path` + * [mssql] Add built-in support to MSSQL via the TDS adapter + * [repo] Support custom options on telemetry + +## v3.3.4 (2020-02-14) + +### Enhancements + + * [adapters] Support fragments in locks + * [migration] Add `:include` option to support covering indexes + +## v3.3.3 (2020-01-28) + +### Enhancements + + * [myxql] Allow not setting the encoding when creating a database + +### Bug fixes + + * [myxql] Removing prefixed table name from constraints on latest MySQL versions + * [sql] Fix precedence of `is_nil` when inside a comparison operator + +## v3.3.2 (2019-12-15) + +### Bug fixes + + * [adapters] Start StorageSupervisor before using it + +## v3.3.1 (2019-12-15) + +### Bug fixes + + * [adapters] Do not leak PIDs on storage commands + * [migrations] Use :migration_primary_key in create/1 + +## v3.3.0 (2019-12-11) + +### Enhancements + + * [ecto] Upgrade and support Ecto v3.3 + * [repo] Include `:idle_time` on telemetry measurements + * [migration] Support anonymous functions in `Ecto.Migration.execute/2` + +### Bug fixes + + * [migration] Ensure that flush() will raise on rollback if called from `change/0` + +## v3.2.2 (2019-11-25) + +### Enhancements + + * [mysql] Support myxql v0.3 + +## v3.2.1 (2019-11-02) + +### Enhancements + + * [migration] Support anonymous functions in execute + +### Bug fixes + + * [mix ecto.create] Change default charset in MyXQL to utf8mb4 + +## v3.2.0 (2019-09-07) + +This new version requires Elixir v1.6+. Note also the previously soft-deprecated `Ecto.Adapters.MySQL` has been removed in favor of `Ecto.Adapters.MyXQL`. We announced the intent to remove `Ecto.Adapters.MySQL` back in v3.0 and `Ecto.Adapters.MyXQL` has been tested since then and ready for prime time since v3.1. + +### Enhancements + + * [sql] Use `get_dynamic_repo` on SQL-specific functions + * [sql] Respect `Ecto.Type.embed_as/2` choice when loading/dumping embeds (Ecto 3.2+ compat) + * [sql] Support CTE expressions (Ecto 3.2+ compat) + +### Bug fixes + + * [sql] Fix generated "COMMENT ON INDEX" for PostgreSQL + +## v3.1.6 (2019-06-27) + +### Enhancements + + * [sql] Set `cache_statement` for `insert_all` + +## v3.1.5 (2019-06-13) + +### Enhancements + + * [migration] Add `@disable_migration_lock` to be better handle concurrent indexes + * [mysql] Set `cache_statement` for inserts + +### Deprecations + + * [mysql] Deprecate Ecto.Adapters.MySQL + +## v3.1.4 (2019-05-28) + +### Enhancements + + * [migrator] Print warning message if concurrent indexes are used with migration lock + +## v3.1.3 (2019-05-19) + +### Enhancements + + * [migrator] Add `--migrations-path` to ecto.migrate/ecto.rollback/ecto.migrations Mix tasks + +### Bug fixes + + * [migrator] Make sure an unboxed run is performed when running migrations with the ownership pool + +## v3.1.2 (2019-05-11) + +### Enhancements + + * [migrator] Add `Ecto.Migrator.with_repo/2` to start repo and apps + * [mix] Add `--skip-if-loaded` for `ecto.load` + * [sql] Add `Ecto.Adapters.SQL.table_exists?/2` + +## v3.1.1 (2019-04-16) + +### Bug fixes + + * [repo] Fix backwards incompatible change in Telemetry metadata + +## v3.1.0 (2019-04-02) + +v3.1 requires Elixir v1.5+. + +### Enhancements + + * [mysql] Introduce Ecto.Adapters.MyXQL as an alternative library for MySQL + * [migrations] Run all migrations in subdirectories + * [repo] Update to Telemetry v0.4.0 (note the measurements value differ from previous versions) + +### Bug fixes + + * [sandbox] Respect `:ownership_timeout` repo configuration on SQL Sandbox + * [migrations] Commit and relock after every migration to avoid leaving the DB in an inconsistent state under certain failures + +### Backwards incompatible changes + + * [migrations] If you are creating indexes concurrently, you need to disable the migration lock: `config :app, App.Repo, migration_lock: nil`. This will migrations behave the same way as they did in Ecto 2.0. + +## v3.0.5 (2019-02-05) + +### Enhancements + + * [repo] Add `:repo` and `:type` keys to telemetry events + * [migrations] Add `:add_if_not_exists` and `:remove_if_exists` to columns in migrations + +### Bug fixes + + * [migrations] Load all migrations before running them + * [sandbox] Include `:queue_target` and `:queue_interval` in SQL Sandbox checkout + +## v3.0.4 (2018-12-31) + +### Enhancements + + * [repo] Bump telemetry dependency + * [migrations] Perform strict argument parsing in `ecto.migrate`, `ecto.rollback`, `ecto.load` and `ecto.dump` + +### Bug fixes + + * [migrations] Do not log migration versions query + +### Deprecations + + * [repo] `Telemetry.attach/5` and `Telemetry.attach_many/5` are deprecated in favor of `:telemetry.attach/5` and `:telemetry.attach_many/5` + +## v3.0.3 (2018-11-29) + +### Enhancements + + * [migration] Support `after_begin` and `before_commit` migration callbacks + * [migration] Add `:prefix` option to `references/2` + +### Bug fixes + + * [migration] Do not start a transaction for migrated versions if there is no `:migration_lock` + * [migration] Fix removing an reference column inside alter table + * [migration] Warn on removed `:pool_timeout` option + +## v3.0.2 (2018-11-20) + +### Enhancements + + * [query] Support `Ecto.Query` in `insert_all` values + * [migration] Add `Ecto.Migration.repo/0` + +## v3.0.1 (2018-11-17) + +### Enhancements + + * [migrations] Support `drop_if_exists` for constraints + +### Bug fixes + + * [migrations] Only commit migration transaction if migration can be inserted into the DB + * [migrations] Do not run migrations from `_build` when using Mix + * [migrations] Improve errors when checking in already committed sandboxes + * [mysql] Do not pass nil for `--user` to mysqldump + * [package] Require Ecto 3.0.2 with bug fixes + * [package] Require Mariaex 0.9.1 which fixes a bug when used with Ecto 3.0.2 + * [sandbox] Raise when using sandbox on non-sandbox pools + +## v3.0.0 (2018-10-29) + + * Initial release diff --git a/deps/ecto_sql/README.md b/deps/ecto_sql/README.md new file mode 100644 index 0000000..f3ba5ff --- /dev/null +++ b/deps/ecto_sql/README.md @@ -0,0 +1,69 @@ +Ecto SQL +========= + +[![Build Status](https://github.com/elixir-ecto/ecto_sql/workflows/CI/badge.svg)](https://github.com/elixir-ecto/ecto_sql/actions) + +Ecto SQL ([documentation](https://hexdocs.pm/ecto_sql)) provides building blocks for writing SQL adapters for Ecto. It features: + + * The Ecto.Adapters.SQL module as an entry point for all SQL-based adapters + * Default implementations for Postgres (Ecto.Adapters.Postgres), MySQL (Ecto.Adapters.MyXQL), and MSSQL (Ecto.Adapters.Tds) + * A test sandbox (Ecto.Adapters.SQL.Sandbox) that concurrently runs database tests inside transactions + * Support for database migrations via Mix tasks + +To learn more about getting started, [see the Ecto repository](https://github.com/elixir-ecto/ecto). + +## Running tests + +Clone the repo and fetch its dependencies: + + $ git clone https://github.com/elixir-ecto/ecto_sql.git + $ cd ecto_sql + $ mix deps.get + $ mix test.all + +Note that `mix test.all` runs the tests in `test/` and the `integration_test`s for each adapter: `pg`, `myxql` and `tds`. + +You can also use a local Ecto checkout if desired: + + $ ECTO_PATH=../ecto mix test.all + +You can run tests against a specific Ecto adapter by using the `ECTO_ADAPTER` environment variable: + + $ ECTO_ADAPTER=pg mix test + +MySQL and PostgreSQL can be installed directly on most systems. For MSSQL, you may need to run it as a Docker image: + + docker run -d -p 1433:1433 --name mssql -e 'ACCEPT_EULA=Y' -e 'MSSQL_SA_PASSWORD=some!Password' mcr.microsoft.com/mssql/server:2017-latest + +### Running containerized tests + +It is also possible to run the integration tests under a containerized environment using [earthly](https://earthly.dev/get-earthly): + + $ earthly -P +all + +You can also use this to interactively debug any failing integration tests using the corresponding commands: + + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MYSQL=5.7 +integration-test-mysql + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg MSSQL=2019 +integration-test-mssql + $ earthly -P -i --build-arg ELIXIR_BASE=1.8.2-erlang-20.3.8.26-alpine-3.11.6 --build-arg POSTGRES=11.11 +integration-test-postgres + +Then once you enter the containerized shell, you can inspect the underlying databases with the respective commands: + + PGPASSWORD=postgres psql -h 127.0.0.1 -U postgres -d postgres ecto_test + MYSQL_PASSWORD=root mysql -h 127.0.0.1 -uroot -proot ecto_test + sqlcmd -U sa -P 'some!Password' + +## License + +Copyright (c) 2012 Plataformatec \ +Copyright (c) 2020 Dashbit + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0) + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/ecto_sql/hex_metadata.config b/deps/ecto_sql/hex_metadata.config new file mode 100644 index 0000000..deb2f72 --- /dev/null +++ b/deps/ecto_sql/hex_metadata.config @@ -0,0 +1,75 @@ +{<<"app">>,<<"ecto_sql">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"SQL-based adapters for Ecto and database migrations">>}. +{<<"elixir">>,<<"~> 1.10">>}. +{<<"files">>, + [<<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>,<<"CHANGELOG.md">>, + <<"lib">>,<<"lib/ecto">>,<<"lib/ecto/adapter">>, + <<"lib/ecto/adapter/migration.ex">>,<<"lib/ecto/adapter/structure.ex">>, + <<"lib/ecto/adapters">>,<<"lib/ecto/adapters/tds.ex">>, + <<"lib/ecto/adapters/myxql">>,<<"lib/ecto/adapters/myxql/connection.ex">>, + <<"lib/ecto/adapters/tds">>,<<"lib/ecto/adapters/tds/types.ex">>, + <<"lib/ecto/adapters/tds/connection.ex">>,<<"lib/ecto/adapters/sql.ex">>, + <<"lib/ecto/adapters/postgres">>, + <<"lib/ecto/adapters/postgres/connection.ex">>, + <<"lib/ecto/adapters/mysql.ex">>,<<"lib/ecto/adapters/myxql.ex">>, + <<"lib/ecto/adapters/postgres.ex">>,<<"lib/ecto/adapters/sql">>, + <<"lib/ecto/adapters/sql/stream.ex">>, + <<"lib/ecto/adapters/sql/sandbox.ex">>, + <<"lib/ecto/adapters/sql/connection.ex">>, + <<"lib/ecto/adapters/sql/application.ex">>,<<"lib/ecto/migration.ex">>, + <<"lib/ecto/migrator.ex">>,<<"lib/ecto/migration">>, + <<"lib/ecto/migration/runner.ex">>, + <<"lib/ecto/migration/schema_migration.ex">>,<<"lib/mix">>, + <<"lib/mix/tasks">>,<<"lib/mix/tasks/ecto.migrations.ex">>, + <<"lib/mix/tasks/ecto.dump.ex">>,<<"lib/mix/tasks/ecto.rollback.ex">>, + <<"lib/mix/tasks/ecto.migrate.ex">>, + <<"lib/mix/tasks/ecto.gen.migration.ex">>,<<"lib/mix/tasks/ecto.load.ex">>, + <<"lib/mix/ecto_sql.ex">>,<<"integration_test/sql">>, + <<"integration_test/sql/migrator.exs">>, + <<"integration_test/sql/query_many.exs">>, + <<"integration_test/sql/lock.exs">>, + <<"integration_test/sql/transaction.exs">>, + <<"integration_test/sql/alter.exs">>,<<"integration_test/sql/stream.exs">>, + <<"integration_test/sql/sql.exs">>,<<"integration_test/sql/sandbox.exs">>, + <<"integration_test/sql/subquery.exs">>, + <<"integration_test/sql/logging.exs">>, + <<"integration_test/sql/migration.exs">>,<<"integration_test/support">>, + <<"integration_test/support/file_helpers.exs">>, + <<"integration_test/support/migration.exs">>, + <<"integration_test/support/repo.exs">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/elixir-ecto/ecto_sql">>}]}. +{<<"name">>,<<"ecto_sql">>}. +{<<"requirements">>, + [[{<<"app">>,<<"ecto">>}, + {<<"name">>,<<"ecto">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 3.8.4">>}], + [{<<"app">>,<<"telemetry">>}, + {<<"name">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 0.4.0 or ~> 1.0">>}], + [{<<"app">>,<<"db_connection">>}, + {<<"name">>,<<"db_connection">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 2.5 or ~> 2.4.1">>}], + [{<<"app">>,<<"postgrex">>}, + {<<"name">>,<<"postgrex">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 0.15.0 or ~> 0.16.0 or ~> 1.0">>}], + [{<<"app">>,<<"myxql">>}, + {<<"name">>,<<"myxql">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 0.6.0">>}], + [{<<"app">>,<<"tds">>}, + {<<"name">>,<<"tds">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 2.1.1 or ~> 2.2">>}]]}. +{<<"version">>,<<"3.8.3">>}. diff --git a/deps/ecto_sql/integration_test/sql/alter.exs b/deps/ecto_sql/integration_test/sql/alter.exs new file mode 100644 index 0000000..b7ec655 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/alter.exs @@ -0,0 +1,90 @@ +defmodule Ecto.Integration.AlterTest do + use Ecto.Integration.Case, async: false + + alias Ecto.Integration.PoolRepo + + defmodule AlterMigrationOne do + use Ecto.Migration + + def up do + create table(:alter_col_type) do + add :value, :integer + end + + execute "INSERT INTO alter_col_type (value) VALUES (1)" + end + + def down do + drop table(:alter_col_type) + end + end + + defmodule AlterMigrationTwo do + use Ecto.Migration + + def up do + alter table(:alter_col_type) do + modify :value, :numeric + end + end + + def down do + alter table(:alter_col_type) do + modify :value, :integer + end + end + end + + import Ecto.Query, only: [from: 1, from: 2] + + defp run(direction, repo, module) do + Ecto.Migration.Runner.run(repo, repo.config(), 1, module, :forward, direction, direction, log: false) + end + + test "reset cache on returning query after alter column type" do + values = from v in "alter_col_type", select: v.value + + assert :ok == run(:up, PoolRepo, AlterMigrationOne) + assert PoolRepo.all(values) == [1] + + assert :ok == run(:up, PoolRepo, AlterMigrationTwo) + [%Decimal{}] = PoolRepo.all(values) + + PoolRepo.transaction(fn() -> + assert [%Decimal{}] = PoolRepo.all(values) + assert :ok == run(:down, PoolRepo, AlterMigrationTwo) + + # Optionally fail once with database error when + # already prepared on connection (and clear cache) + try do + PoolRepo.all(values, [mode: :savepoint]) + rescue + _ -> + assert PoolRepo.all(values) == [1] + else + result -> + assert result == [1] + end + end) + after + assert :ok == run(:down, PoolRepo, AlterMigrationOne) + end + + test "reset cache on parameterized query after alter column type" do + values = from v in "alter_col_type" + + assert :ok == run(:up, PoolRepo, AlterMigrationOne) + assert PoolRepo.update_all(values, [set: [value: 2]]) == {1, nil} + + assert :ok == run(:up, PoolRepo, AlterMigrationTwo) + assert PoolRepo.update_all(values, [set: [value: 3]]) == {1, nil} + + PoolRepo.transaction(fn() -> + assert PoolRepo.update_all(values, [set: [value: Decimal.new(5)]]) == {1, nil} + assert :ok == run(:down, PoolRepo, AlterMigrationTwo) + assert PoolRepo.update_all(values, [set: [value: 6]]) == {1, nil} + end) + after + assert :ok == run(:down, PoolRepo, AlterMigrationOne) + end +end diff --git a/deps/ecto_sql/integration_test/sql/lock.exs b/deps/ecto_sql/integration_test/sql/lock.exs new file mode 100644 index 0000000..eb99ad5 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/lock.exs @@ -0,0 +1,59 @@ +defmodule Ecto.Integration.LockTest do + # We can keep this test async as long as it + # is the only one accessing the lock_test table. + use ExUnit.Case, async: true + + import Ecto.Query + alias Ecto.Integration.PoolRepo + + defmodule LockCounter do + use Ecto.Schema + + schema "lock_counters" do + field :count, :integer + end + end + + setup do + PoolRepo.delete_all(LockCounter) + :ok + end + + test "lock for update" do + %{id: id} = PoolRepo.insert!(%LockCounter{count: 1}) + pid = self() + + lock_for_update = + Application.get_env(:ecto_sql, :lock_for_update) || + raise ":lock_for_update not set in :ecto application" + + # Here we are manually inserting the lock in the query + # to test multiple adapters. Never do this in actual + # application code: it is not safe and not public. + query = from(lc in LockCounter, where: lc.id == ^id) + query = %{query | lock: lock_for_update} + + {:ok, new_pid} = + Task.start_link fn -> + assert_receive :select_for_update, 5000 + + PoolRepo.transaction(fn -> + [post] = PoolRepo.all(query) # this should block until the other trans. commit + post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! + end) + + send pid, :updated + end + + PoolRepo.transaction(fn -> + [post] = PoolRepo.all(query) # select and lock the row + send new_pid, :select_for_update # signal second process to begin a transaction + post |> Ecto.Changeset.change(count: post.count + 1) |> PoolRepo.update! + end) + + assert_receive :updated, 5000 + + # Final count will be 3 if SELECT ... FOR UPDATE worked and 2 otherwise + assert [%LockCounter{count: 3}] = PoolRepo.all(LockCounter) + end +end diff --git a/deps/ecto_sql/integration_test/sql/logging.exs b/deps/ecto_sql/integration_test/sql/logging.exs new file mode 100644 index 0000000..5267e32 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/logging.exs @@ -0,0 +1,135 @@ +defmodule Ecto.Integration.LoggingTest do + use Ecto.Integration.Case, async: true + + alias Ecto.Integration.TestRepo + alias Ecto.Integration.PoolRepo + alias Ecto.Integration.Post + + import ExUnit.CaptureLog + + describe "telemetry" do + test "dispatches event" do + log = fn event_name, measurements, metadata -> + assert Enum.at(event_name, -1) == :query + assert %{result: {:ok, _res}} = metadata + + assert measurements.total_time == + measurements.query_time + measurements.decode_time + measurements.queue_time + + assert measurements.idle_time + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post) + assert_received :logged + end + + test "dispatches event with stacktrace" do + log = fn _event_name, _measurements, metadata -> + assert %{stacktrace: [_ | _]} = metadata + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post, stacktrace: true) + assert_received :logged + end + + test "dispatches event with custom options" do + log = fn event_name, _measurements, metadata -> + assert Enum.at(event_name, -1) == :query + assert metadata.options == [:custom_metadata] + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post, telemetry_options: [:custom_metadata]) + assert_received :logged + end + + test "dispatches under another event name" do + log = fn [:custom], measurements, metadata -> + assert %{result: {:ok, _res}} = metadata + + assert measurements.total_time == + measurements.query_time + measurements.decode_time + measurements.queue_time + + assert measurements.idle_time + send(self(), :logged) + end + + Process.put(:telemetry, log) + _ = PoolRepo.all(Post, telemetry_event: [:custom]) + assert_received :logged + end + + test "is not dispatched with no event name" do + Process.put(:telemetry, fn _, _ -> raise "never called" end) + _ = TestRepo.all(Post, telemetry_event: nil) + refute_received :logged + end + end + + describe "logs" do + @stacktrace_opts [stacktrace: true, log: :error] + + defp stacktrace_entry(line) do + "โ†ณ anonymous fn/0 in Ecto.Integration.LoggingTest.\"test logs includes stacktraces\"/1, " <> + "at: integration_test/sql/logging.exs:#{line - 3}" + end + + test "when some measurements are nil" do + assert capture_log(fn -> TestRepo.query("BEG", [], log: :error) end) =~ + "[error]" + end + + test "includes stacktraces" do + assert capture_log(fn -> + TestRepo.all(Post, @stacktrace_opts) + + :ok + end) =~ stacktrace_entry(__ENV__.line) + + assert capture_log(fn -> + TestRepo.insert(%Post{}, @stacktrace_opts) + + :ok + end) =~ stacktrace_entry(__ENV__.line) + + assert capture_log(fn -> + # Test cascading options + Ecto.Multi.new() + |> Ecto.Multi.insert(:post, %Post{}) + |> TestRepo.transaction(@stacktrace_opts) + + :ok + end) =~ stacktrace_entry(__ENV__.line) + + assert capture_log(fn -> + # In theory we should point to the call _inside_ run + # but all multi calls point to the transaction starting point. + Ecto.Multi.new() + |> Ecto.Multi.run(:all, fn _, _ -> {:ok, TestRepo.all(Post, @stacktrace_opts)} end) + |> TestRepo.transaction() + + :ok + end) =~ stacktrace_entry(__ENV__.line) + end + + test "with custom log level" do + assert capture_log(fn -> TestRepo.insert!(%Post{title: "1"}, log: :error) end) =~ + "[error]" + + # We cannot assert on the result because it depends on the suite log level + capture_log(fn -> + TestRepo.insert!(%Post{title: "1"}, log: true) + end) + + # But this assertion is always true + assert capture_log(fn -> + TestRepo.insert!(%Post{title: "1"}, log: false) + end) == "" + end + end +end diff --git a/deps/ecto_sql/integration_test/sql/migration.exs b/deps/ecto_sql/integration_test/sql/migration.exs new file mode 100644 index 0000000..a0a4042 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/migration.exs @@ -0,0 +1,625 @@ +defmodule Ecto.Integration.MigrationTest do + use ExUnit.Case, async: true + + alias Ecto.Integration.{TestRepo, PoolRepo} + + defmodule CreateMigration do + use Ecto.Migration + + @table table(:create_table_migration) + @index index(:create_table_migration, [:value], unique: true) + + def up do + create @table do + add :value, :integer + end + create @index + end + + def down do + drop @index + drop @table + end + end + + defmodule AddColumnMigration do + use Ecto.Migration + + def up do + create table(:add_col_migration) do + add :value, :integer + end + + alter table(:add_col_migration) do + add :to_be_added, :integer + end + + execute "INSERT INTO add_col_migration (value, to_be_added) VALUES (1, 2)" + end + + def down do + drop table(:add_col_migration) + end + end + + defmodule AlterColumnMigration do + use Ecto.Migration + + def up do + create table(:alter_col_migration) do + add :from_null_to_not_null, :integer + add :from_not_null_to_null, :integer, null: false + + add :from_default_to_no_default, :integer, default: 0 + add :from_no_default_to_default, :integer + end + + alter table(:alter_col_migration) do + modify :from_null_to_not_null, :string, null: false + modify :from_not_null_to_null, :string, null: true + + modify :from_default_to_no_default, :integer, default: nil + modify :from_no_default_to_default, :integer, default: 0 + end + + execute "INSERT INTO alter_col_migration (from_null_to_not_null) VALUES ('foo')" + end + + def down do + drop table(:alter_col_migration) + end + end + + defmodule AlterColumnFromMigration do + use Ecto.Migration + + def change do + create table(:modify_from_products) do + add :value, :integer + add :nullable, :integer, null: false + end + + if direction() == :up do + flush() + PoolRepo.insert_all "modify_from_products", [[value: 1, nullable: 1]] + end + + alter table(:modify_from_products) do + modify :value, :bigint, from: :integer + modify :nullable, :bigint, null: true, from: {:integer, null: false} + end + end + end + + defmodule AlterColumnFromPkeyMigration do + use Ecto.Migration + + def change do + create table(:modify_from_authors, primary_key: false) do + add :id, :integer, primary_key: true + end + create table(:modify_from_posts) do + add :author_id, references(:modify_from_authors, type: :integer) + end + + if direction() == :up do + flush() + PoolRepo.insert_all "modify_from_authors", [[id: 1]] + PoolRepo.insert_all "modify_from_posts", [[author_id: 1]] + end + + alter table(:modify_from_posts) do + # remove the constraints modify_from_posts_author_id_fkey + modify :author_id, :integer, from: references(:modify_from_authors, type: :integer) + end + alter table(:modify_from_authors) do + modify :id, :bigint, from: :integer + end + alter table(:modify_from_posts) do + # add the constraints modify_from_posts_author_id_fkey + modify :author_id, references(:modify_from_authors, type: :bigint), from: :integer + end + end + end + + defmodule AlterForeignKeyOnDeleteMigration do + use Ecto.Migration + + def up do + create table(:alter_fk_users) + + create table(:alter_fk_posts) do + add :alter_fk_user_id, :id + end + + alter table(:alter_fk_posts) do + modify :alter_fk_user_id, references(:alter_fk_users, on_delete: :nilify_all) + end + end + + def down do + drop table(:alter_fk_posts) + drop table(:alter_fk_users) + end + end + + defmodule AlterForeignKeyOnUpdateMigration do + use Ecto.Migration + + def up do + create table(:alter_fk_users) + + create table(:alter_fk_posts) do + add :alter_fk_user_id, :id + end + + alter table(:alter_fk_posts) do + modify :alter_fk_user_id, references(:alter_fk_users, on_update: :update_all) + end + end + + def down do + drop table(:alter_fk_posts) + drop table(:alter_fk_users) + end + end + + defmodule DropColumnMigration do + use Ecto.Migration + + def up do + create table(:drop_col_migration) do + add :value, :integer + add :to_be_removed, :integer + end + + execute "INSERT INTO drop_col_migration (value, to_be_removed) VALUES (1, 2)" + + alter table(:drop_col_migration) do + remove :to_be_removed + end + end + + def down do + drop table(:drop_col_migration) + end + end + + defmodule RenameColumnMigration do + use Ecto.Migration + + def up do + create table(:rename_col_migration) do + add :to_be_renamed, :integer + end + + rename table(:rename_col_migration), :to_be_renamed, to: :was_renamed + + execute "INSERT INTO rename_col_migration (was_renamed) VALUES (1)" + end + + def down do + drop table(:rename_col_migration) + end + end + + defmodule OnDeleteMigration do + use Ecto.Migration + + def up do + create table(:parent1) + create table(:parent2) + + create table(:ref_migration) do + add :parent1, references(:parent1, on_delete: :nilify_all) + end + + alter table(:ref_migration) do + add :parent2, references(:parent2, on_delete: :delete_all) + end + end + + def down do + drop table(:ref_migration) + drop table(:parent1) + drop table(:parent2) + end + end + + defmodule CompositeForeignKeyMigration do + use Ecto.Migration + + def change do + create table(:composite_parent) do + add :key_id, :integer + end + + create unique_index(:composite_parent, [:id, :key_id]) + + create table(:composite_child) do + add :parent_key_id, :integer + add :parent_id, references(:composite_parent, with: [parent_key_id: :key_id]) + end + end + end + + defmodule ReferencesRollbackMigration do + use Ecto.Migration + + def change do + create table(:parent) do + add :name, :string + end + + create table(:child) do + add :parent_id, references(:parent) + end + end + end + + defmodule RenameMigration do + use Ecto.Migration + + @table_current table(:posts_migration) + @table_new table(:new_posts_migration) + + def up do + create @table_current + rename @table_current, to: @table_new + end + + def down do + drop @table_new + end + end + + defmodule PrefixMigration do + use Ecto.Migration + + @prefix "ecto_prefix_test" + + def up do + execute TestRepo.create_prefix(@prefix) + create table(:first, prefix: @prefix) + create table(:second, prefix: @prefix) do + add :first_id, references(:first) + end + end + + def down do + drop table(:second, prefix: @prefix) + drop table(:first, prefix: @prefix) + execute TestRepo.drop_prefix(@prefix) + end + end + + defmodule NoSQLMigration do + use Ecto.Migration + + def up do + create table(:collection, options: [capped: true]) + execute create: "collection" + end + end + + defmodule Parent do + use Ecto.Schema + + schema "parent" do + end + end + + defmodule NoErrorTableMigration do + use Ecto.Migration + + def change do + create_if_not_exists table(:existing) do + add :name, :string + end + + create_if_not_exists table(:existing) do + add :name, :string + end + + create_if_not_exists table(:existing) + + drop_if_exists table(:existing) + drop_if_exists table(:existing) + end + end + + defmodule NoErrorIndexMigration do + use Ecto.Migration + + def change do + create_if_not_exists index(:posts, [:title]) + create_if_not_exists index(:posts, [:title]) + drop_if_exists index(:posts, [:title]) + drop_if_exists index(:posts, [:title]) + end + end + + defmodule InferredDropIndexMigration do + use Ecto.Migration + + def change do + create index(:posts, [:title]) + end + end + + defmodule AlterPrimaryKeyMigration do + use Ecto.Migration + + def change do + create table(:no_pk, primary_key: false) do + add :dummy, :string + end + alter table(:no_pk) do + add :id, :serial, primary_key: true + end + end + end + + + defmodule AddColumnIfNotExistsMigration do + use Ecto.Migration + + def up do + create table(:add_col_if_not_exists_migration) + + alter table(:add_col_if_not_exists_migration) do + add_if_not_exists :value, :integer + add_if_not_exists :to_be_added, :integer + end + + execute "INSERT INTO add_col_if_not_exists_migration (value, to_be_added) VALUES (1, 2)" + end + + def down do + drop table(:add_col_if_not_exists_migration) + end + end + + defmodule DropColumnIfExistsMigration do + use Ecto.Migration + + def up do + create table(:drop_col_if_exists_migration) do + add :value, :integer + add :to_be_removed, :integer + end + + execute "INSERT INTO drop_col_if_exists_migration (value, to_be_removed) VALUES (1, 2)" + + alter table(:drop_col_if_exists_migration) do + remove_if_exists :to_be_removed, :integer + end + end + + def down do + drop table(:drop_col_if_exists_migration) + end + end + + defmodule NoErrorOnConditionalColumnMigration do + use Ecto.Migration + + def up do + create table(:no_error_on_conditional_column_migration) + + alter table(:no_error_on_conditional_column_migration) do + add_if_not_exists :value, :integer + add_if_not_exists :value, :integer + + remove_if_exists :value, :integer + remove_if_exists :value, :integer + end + end + + def down do + drop table(:no_error_on_conditional_column_migration) + end + end + + import Ecto.Query, only: [from: 2] + import Ecto.Migrator, only: [up: 4, down: 4] + + # Avoid migration out of order warnings + @moduletag :capture_log + @base_migration 1_000_000 + + setup do + {:ok, migration_number: System.unique_integer([:positive]) + @base_migration} + end + + test "create and drop table and indexes", %{migration_number: num} do + assert :ok == up(PoolRepo, num, CreateMigration, log: false) + assert :ok == down(PoolRepo, num, CreateMigration, log: false) + end + + test "correctly infers how to drop index", %{migration_number: num} do + assert :ok == up(PoolRepo, num, InferredDropIndexMigration, log: false) + assert :ok == down(PoolRepo, num, InferredDropIndexMigration, log: false) + end + + test "supports on delete", %{migration_number: num} do + assert :ok == up(PoolRepo, num, OnDeleteMigration, log: false) + + parent1 = PoolRepo.insert! Ecto.put_meta(%Parent{}, source: "parent1") + parent2 = PoolRepo.insert! Ecto.put_meta(%Parent{}, source: "parent2") + + writer = "INSERT INTO ref_migration (parent1, parent2) VALUES (#{parent1.id}, #{parent2.id})" + PoolRepo.query!(writer) + + reader = from r in "ref_migration", select: {r.parent1, r.parent2} + assert PoolRepo.all(reader) == [{parent1.id, parent2.id}] + + PoolRepo.delete!(parent1) + assert PoolRepo.all(reader) == [{nil, parent2.id}] + + PoolRepo.delete!(parent2) + assert PoolRepo.all(reader) == [] + + assert :ok == down(PoolRepo, num, OnDeleteMigration, log: false) + end + + test "composite foreign keys", %{migration_number: num} do + assert :ok == up(PoolRepo, num, CompositeForeignKeyMigration, log: false) + + PoolRepo.insert_all("composite_parent", [[key_id: 2]]) + assert [id] = PoolRepo.all(from p in "composite_parent", select: p.id) + + catch_error(PoolRepo.insert_all("composite_child", [[parent_id: id, parent_key_id: 1]])) + assert {1, nil} = PoolRepo.insert_all("composite_child", [[parent_id: id, parent_key_id: 2]]) + + assert :ok == down(PoolRepo, num, CompositeForeignKeyMigration, log: false) + end + + test "rolls back references in change/1", %{migration_number: num} do + assert :ok == up(PoolRepo, num, ReferencesRollbackMigration, log: false) + assert :ok == down(PoolRepo, num, ReferencesRollbackMigration, log: false) + end + + test "create table if not exists and drop table if exists does not raise on failure", %{migration_number: num} do + assert :ok == up(PoolRepo, num, NoErrorTableMigration, log: false) + end + + @tag :create_index_if_not_exists + test "create index if not exists and drop index if exists does not raise on failure", %{migration_number: num} do + assert :ok == up(PoolRepo, num, NoErrorIndexMigration, log: false) + end + + test "raises on NoSQL migrations", %{migration_number: num} do + assert_raise ArgumentError, ~r"does not support keyword lists in :options", fn -> + up(PoolRepo, num, NoSQLMigration, log: false) + end + end + + @tag :add_column + test "add column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AddColumnMigration, log: false) + assert [2] == PoolRepo.all from p in "add_col_migration", select: p.to_be_added + :ok = down(PoolRepo, num, AddColumnMigration, log: false) + end + + @tag :modify_column + test "modify column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterColumnMigration, log: false) + + assert ["foo"] == + PoolRepo.all from p in "alter_col_migration", select: p.from_null_to_not_null + assert [nil] == + PoolRepo.all from p in "alter_col_migration", select: p.from_not_null_to_null + assert [nil] == + PoolRepo.all from p in "alter_col_migration", select: p.from_default_to_no_default + assert [0] == + PoolRepo.all from p in "alter_col_migration", select: p.from_no_default_to_default + + query = "INSERT INTO alter_col_migration (from_not_null_to_null) VALUES ('foo')" + assert catch_error(PoolRepo.query!(query)) + + :ok = down(PoolRepo, num, AlterColumnMigration, log: false) + end + + @tag :modify_column + test "modify column with from", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterColumnFromMigration, log: false) + + assert [1] == + PoolRepo.all from p in "modify_from_products", select: p.value + + :ok = down(PoolRepo, num, AlterColumnFromMigration, log: false) + end + + @tag :alter_primary_key + test "modify column with from and pkey", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterColumnFromPkeyMigration, log: false) + + assert [1] == + PoolRepo.all from p in "modify_from_posts", select: p.author_id + + :ok = down(PoolRepo, num, AlterColumnFromPkeyMigration, log: false) + end + + @tag :alter_foreign_key + test "modify foreign key's on_delete constraint", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterForeignKeyOnDeleteMigration, log: false) + + PoolRepo.insert_all("alter_fk_users", [[]]) + assert [id] = PoolRepo.all from p in "alter_fk_users", select: p.id + + PoolRepo.insert_all("alter_fk_posts", [[alter_fk_user_id: id]]) + PoolRepo.delete_all("alter_fk_users") + assert [nil] == PoolRepo.all from p in "alter_fk_posts", select: p.alter_fk_user_id + + :ok = down(PoolRepo, num, AlterForeignKeyOnDeleteMigration, log: false) + end + + @tag :assigns_id_type + test "modify foreign key's on_update constraint", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterForeignKeyOnUpdateMigration, log: false) + + PoolRepo.insert_all("alter_fk_users", [[]]) + assert [id] = PoolRepo.all from p in "alter_fk_users", select: p.id + + PoolRepo.insert_all("alter_fk_posts", [[alter_fk_user_id: id]]) + PoolRepo.update_all("alter_fk_users", set: [id: 12345]) + assert [12345] == PoolRepo.all from p in "alter_fk_posts", select: p.alter_fk_user_id + + PoolRepo.delete_all("alter_fk_posts") + :ok = down(PoolRepo, num, AlterForeignKeyOnUpdateMigration, log: false) + end + + @tag :remove_column + test "remove column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, DropColumnMigration, log: false) + assert catch_error(PoolRepo.all from p in "drop_col_migration", select: p.to_be_removed) + :ok = down(PoolRepo, num, DropColumnMigration, log: false) + end + + @tag :rename_column + test "rename column", %{migration_number: num} do + assert :ok == up(PoolRepo, num, RenameColumnMigration, log: false) + assert [1] == PoolRepo.all from p in "rename_col_migration", select: p.was_renamed + :ok = down(PoolRepo, num, RenameColumnMigration, log: false) + end + + @tag :rename_table + test "rename table", %{migration_number: num} do + assert :ok == up(PoolRepo, num, RenameMigration, log: false) + assert :ok == down(PoolRepo, num, RenameMigration, log: false) + end + + @tag :prefix + test "prefix", %{migration_number: num} do + assert :ok == up(PoolRepo, num, PrefixMigration, log: false) + assert :ok == down(PoolRepo, num, PrefixMigration, log: false) + end + + @tag :alter_primary_key + test "alter primary key", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AlterPrimaryKeyMigration, log: false) + assert :ok == down(PoolRepo, num, AlterPrimaryKeyMigration, log: false) + end + + @tag :add_column_if_not_exists + @tag :remove_column_if_exists + test "add if not exists and remove if exists does not raise on failure", %{migration_number: num} do + assert :ok == up(PoolRepo, num, NoErrorOnConditionalColumnMigration, log: false) + assert :ok == down(PoolRepo, num, NoErrorOnConditionalColumnMigration, log: false) + end + + @tag :add_column_if_not_exists + test "add column if not exists", %{migration_number: num} do + assert :ok == up(PoolRepo, num, AddColumnIfNotExistsMigration, log: false) + assert [2] == PoolRepo.all from p in "add_col_if_not_exists_migration", select: p.to_be_added + :ok = down(PoolRepo, num, AddColumnIfNotExistsMigration, log: false) + end + + @tag :remove_column_if_exists + test "remove column when exists", %{migration_number: num} do + assert :ok == up(PoolRepo, num, DropColumnIfExistsMigration, log: false) + assert catch_error(PoolRepo.all from p in "drop_col_if_exists_migration", select: p.to_be_removed) + :ok = down(PoolRepo, num, DropColumnIfExistsMigration, log: false) + end +end diff --git a/deps/ecto_sql/integration_test/sql/migrator.exs b/deps/ecto_sql/integration_test/sql/migrator.exs new file mode 100644 index 0000000..8766d45 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/migrator.exs @@ -0,0 +1,242 @@ +Code.require_file "../support/file_helpers.exs", __DIR__ + +defmodule Ecto.Integration.MigratorTest do + use Ecto.Integration.Case + + import Support.FileHelpers + import ExUnit.CaptureLog + import Ecto.Migrator + + alias Ecto.Integration.{TestRepo, PoolRepo} + alias Ecto.Migration.SchemaMigration + + setup config do + Process.register(self(), config.test) + PoolRepo.delete_all(SchemaMigration) + :ok + end + + defmodule AnotherSchemaMigration do + use Ecto.Migration + + def change do + execute TestRepo.create_prefix("bad_schema_migrations"), + TestRepo.drop_prefix("bad_schema_migrations") + + create table(:schema_migrations, prefix: "bad_schema_migrations") do + add :version, :string + add :inserted_at, :integer + end + end + end + + defmodule BrokenLinkMigration do + use Ecto.Migration + + def change do + Task.start_link(fn -> raise "oops" end) + Process.sleep(:infinity) + end + end + + defmodule GoodMigration do + use Ecto.Migration + + def up do + create table(:good_migration) + end + + def down do + drop table(:good_migration) + end + end + + defmodule BadMigration do + use Ecto.Migration + + def change do + execute "CREATE WHAT" + end + end + + test "migrations up and down" do + assert migrated_versions(PoolRepo) == [] + assert up(PoolRepo, 31, GoodMigration, log: false) == :ok + + [migration] = PoolRepo.all(SchemaMigration) + assert migration.version == 31 + assert migration.inserted_at + + assert migrated_versions(PoolRepo) == [31] + assert up(PoolRepo, 31, GoodMigration, log: false) == :already_up + assert migrated_versions(PoolRepo) == [31] + assert down(PoolRepo, 32, GoodMigration, log: false) == :already_down + assert migrated_versions(PoolRepo) == [31] + assert down(PoolRepo, 31, GoodMigration, log: false) == :ok + assert migrated_versions(PoolRepo) == [] + end + + @tag :prefix + test "does not commit migration if insert into schema migration fails" do + # First we create a new schema migration table in another prefix + assert up(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok + assert migrated_versions(PoolRepo) == [33] + + catch_error(up(PoolRepo, 34, GoodMigration, log: false, prefix: "bad_schema_migrations")) + catch_error(PoolRepo.all("good_migration")) + catch_error(PoolRepo.all("good_migration", prefix: "bad_schema_migrations")) + + assert down(PoolRepo, 33, AnotherSchemaMigration, log: false) == :ok + end + + test "bad execute migration" do + assert catch_error(up(PoolRepo, 31, BadMigration, log: false)) + end + + test "broken link migration" do + Process.flag(:trap_exit, true) + + assert capture_log(fn -> + {:ok, pid} = Task.start_link(fn -> up(PoolRepo, 31, BrokenLinkMigration, log: false) end) + assert_receive {:EXIT, ^pid, _} + end) =~ "oops" + + assert capture_log(fn -> + catch_exit(up(PoolRepo, 31, BrokenLinkMigration, log: false)) + end) =~ "oops" + end + + test "run up to/step migration", config do + in_tmp fn path -> + create_migration(47, config) + create_migration(48, config) + + assert [47] = run(PoolRepo, path, :up, step: 1, log: false) + assert count_entries() == 1 + + assert [48] = run(PoolRepo, path, :up, to: 48, log: false) + end + end + + test "run down to/step migration", config do + in_tmp fn path -> + migrations = [ + create_migration(49, config), + create_migration(50, config), + ] + + assert [49, 50] = run(PoolRepo, path, :up, all: true, log: false) + purge migrations + + assert [50] = run(PoolRepo, path, :down, step: 1, log: false) + purge migrations + + assert count_entries() == 1 + assert [50] = run(PoolRepo, path, :up, to: 50, log: false) + end + end + + test "runs all migrations", config do + in_tmp fn path -> + migrations = [ + create_migration(53, config), + create_migration(54, config), + ] + + assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) + assert [] = run(PoolRepo, path, :up, all: true, log: false) + purge migrations + + assert [54, 53] = run(PoolRepo, path, :down, all: true, log: false) + purge migrations + + assert count_entries() == 0 + assert [53, 54] = run(PoolRepo, path, :up, all: true, log: false) + end + end + + test "does not commit half transactions on bad syntax", config do + in_tmp fn path -> + migrations = [ + create_migration(64, config), + create_migration("65_+", config) + ] + + assert_raise SyntaxError, fn -> + run(PoolRepo, path, :up, all: true, log: false) + end + + refute_received {:up, _} + assert count_entries() == 0 + purge migrations + end + end + + @tag :lock_for_migrations + test "raises when connection pool is too small" do + config = Application.fetch_env!(:ecto_sql, PoolRepo) + config = Keyword.merge(config, pool_size: 1) + Application.put_env(:ecto_sql, __MODULE__.SingleConnectionRepo, config) + + defmodule SingleConnectionRepo do + use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() + end + + {:ok, _pid} = SingleConnectionRepo.start_link() + + in_tmp fn path -> + exception_message = ~r/Migrations failed to run because the connection pool size is less than 2/ + + assert_raise Ecto.MigrationError, exception_message, fn -> + run(SingleConnectionRepo, path, :up, all: true, log: false) + end + end + end + + test "does not raise when connection pool is too small but there is no lock" do + config = Application.fetch_env!(:ecto_sql, PoolRepo) + config = Keyword.merge(config, pool_size: 1, migration_lock: nil) + Application.put_env(:ecto_sql, __MODULE__.SingleConnectionNoLockRepo, config) + + defmodule SingleConnectionNoLockRepo do + use Ecto.Repo, otp_app: :ecto_sql, adapter: PoolRepo.__adapter__() + end + + {:ok, _pid} = SingleConnectionNoLockRepo.start_link() + + in_tmp fn path -> + run(SingleConnectionNoLockRepo, path, :up, all: true, log: false) + end + end + + defp count_entries() do + PoolRepo.aggregate(SchemaMigration, :count, :version) + end + + defp create_migration(num, config) do + module = Module.concat(__MODULE__, "Migration#{num}") + + File.write! "#{num}_migration_#{num}.exs", """ + defmodule #{module} do + use Ecto.Migration + + def up do + send #{inspect config.test}, {:up, #{inspect num}} + end + + def down do + send #{inspect config.test}, {:down, #{inspect num}} + end + end + """ + + module + end + + defp purge(modules) do + Enum.each(List.wrap(modules), fn m -> + :code.delete m + :code.purge m + end) + end +end diff --git a/deps/ecto_sql/integration_test/sql/query_many.exs b/deps/ecto_sql/integration_test/sql/query_many.exs new file mode 100644 index 0000000..885e62c --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/query_many.exs @@ -0,0 +1,15 @@ +defmodule Ecto.Integration.QueryManyTest do + use Ecto.Integration.Case, async: true + + alias Ecto.Integration.TestRepo + + test "query_many!/4" do + results = TestRepo.query_many!("SELECT 1; SELECT 2;") + assert [%{rows: [[1]], num_rows: 1}, %{rows: [[2]], num_rows: 1}] = results + end + + test "query_many!/4 with iodata" do + results = TestRepo.query_many!(["SELECT", ?\s, ?1, ";", ?\s, "SELECT", ?\s, ?2, ";"]) + assert [%{rows: [[1]], num_rows: 1}, %{rows: [[2]], num_rows: 1}] = results + end +end diff --git a/deps/ecto_sql/integration_test/sql/sandbox.exs b/deps/ecto_sql/integration_test/sql/sandbox.exs new file mode 100644 index 0000000..fc4e3a8 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/sandbox.exs @@ -0,0 +1,316 @@ +defmodule Ecto.Integration.SandboxTest do + use ExUnit.Case + + alias Ecto.Adapters.SQL.Sandbox + alias Ecto.Integration.{PoolRepo, TestRepo} + alias Ecto.Integration.Post + + import ExUnit.CaptureLog + + Application.put_env(:ecto_sql, __MODULE__.DynamicRepo, Application.compile_env(:ecto_sql, TestRepo)) + + defmodule DynamicRepo do + use Ecto.Repo, otp_app: :ecto_sql, adapter: TestRepo.__adapter__() + end + + describe "errors" do + test "raises if repo doesn't exist" do + assert_raise UndefinedFunctionError, ~r"function UnknownRepo.get_dynamic_repo/0 is undefined", fn -> + Sandbox.mode(UnknownRepo, :manual) + end + end + + test "raises if repo is not started" do + assert_raise RuntimeError, ~r"could not lookup Ecto repo #{inspect DynamicRepo} because it was not started", fn -> + Sandbox.mode(DynamicRepo, :manual) + end + end + + test "raises if repo is not using sandbox" do + assert_raise RuntimeError, ~r"cannot invoke sandbox operation with pool DBConnection", fn -> + Sandbox.mode(PoolRepo, :manual) + end + + assert_raise RuntimeError, ~r"cannot invoke sandbox operation with pool DBConnection", fn -> + Sandbox.checkout(PoolRepo) + end + end + + test "includes link to SQL sandbox on ownership errors" do + assert_raise DBConnection.OwnershipError, + ~r"See Ecto.Adapters.SQL.Sandbox docs for more information.", fn -> + TestRepo.all(Post) + end + end + end + + describe "mode" do + test "uses the repository when checked out" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + Sandbox.checkout(TestRepo) + assert TestRepo.all(Post) == [] + Sandbox.checkin(TestRepo) + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + end + + test "uses the repository when allowed from another process" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + + Task.start_link fn -> + Sandbox.checkout(TestRepo) + Sandbox.allow(TestRepo, self(), parent) + send(parent, :allowed) + Process.sleep(:infinity) + end + + assert_receive :allowed + assert TestRepo.all(Post) == [] + end + + test "uses the repository when allowed from another process by registered name" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + Process.register(parent, __MODULE__) + + Task.start_link fn -> + Sandbox.checkout(TestRepo) + Sandbox.allow(TestRepo, self(), __MODULE__) + send(parent, :allowed) + Process.sleep(:infinity) + end + + assert_receive :allowed + assert TestRepo.all(Post) == [] + + Process.unregister(__MODULE__) + end + + test "uses the repository when shared from another process" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + + Task.start_link(fn -> + Sandbox.checkout(TestRepo) + Sandbox.mode(TestRepo, {:shared, self()}) + send(parent, :shared) + Process.sleep(:infinity) + end) + + assert_receive :shared + assert Task.async(fn -> TestRepo.all(Post) end) |> Task.await == [] + after + Sandbox.mode(TestRepo, :manual) + end + + test "works with a dynamic repo" do + repo_pid = start_supervised!({DynamicRepo, name: nil}) + DynamicRepo.put_dynamic_repo(repo_pid) + + assert Sandbox.mode(DynamicRepo, :manual) == :ok + + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + DynamicRepo.all(Post) + end + + Sandbox.checkout(DynamicRepo) + assert DynamicRepo.all(Post) == [] + end + + test "works with a repo pid" do + repo_pid = start_supervised!({DynamicRepo, name: nil}) + DynamicRepo.put_dynamic_repo(repo_pid) + + assert Sandbox.mode(repo_pid, :manual) == :ok + + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + DynamicRepo.all(Post) + end + + Sandbox.checkout(repo_pid) + assert DynamicRepo.all(Post) == [] + end + end + + describe "savepoints" do + test "runs inside a sandbox that is rolled back on checkin" do + Sandbox.checkout(TestRepo) + assert TestRepo.insert(%Post{}) + assert TestRepo.all(Post) != [] + Sandbox.checkin(TestRepo) + Sandbox.checkout(TestRepo) + assert TestRepo.all(Post) == [] + Sandbox.checkin(TestRepo) + end + + test "runs inside a sandbox that may be disabled" do + Sandbox.checkout(TestRepo, sandbox: false) + assert TestRepo.insert(%Post{}) + assert TestRepo.all(Post) != [] + Sandbox.checkin(TestRepo) + + Sandbox.checkout(TestRepo) + assert {1, _} = TestRepo.delete_all(Post) + Sandbox.checkin(TestRepo) + + Sandbox.checkout(TestRepo, sandbox: false) + assert {1, _} = TestRepo.delete_all(Post) + Sandbox.checkin(TestRepo) + end + + test "runs inside a sandbox with caller data when preloading associations" do + Sandbox.checkout(TestRepo) + assert TestRepo.insert(%Post{}) + parent = self() + + Task.start_link fn -> + Sandbox.allow(TestRepo, parent, self()) + assert [_] = TestRepo.all(Post) |> TestRepo.preload([:author, :comments]) + send parent, :success + end + + assert_receive :success + end + + test "runs inside a sidebox with custom ownership timeout" do + :ok = Sandbox.checkout(TestRepo, ownership_timeout: 200) + parent = self() + + assert capture_log(fn -> + {:ok, pid} = + Task.start(fn -> + Sandbox.allow(TestRepo, parent, self()) + TestRepo.transaction(fn -> Process.sleep(500) end) + end) + + ref = Process.monitor(pid) + assert_receive {:DOWN, ^ref, _, ^pid, _}, 1000 + end) =~ "it owned the connection for longer than 200ms" + end + + test "does not taint the sandbox on query errors" do + Sandbox.checkout(TestRepo) + + {:ok, _} = TestRepo.insert(%Post{}, skip_transaction: true) + {:error, _} = TestRepo.query("INVALID") + {:ok, _} = TestRepo.insert(%Post{}, skip_transaction: true) + + Sandbox.checkin(TestRepo) + end + end + + describe "transactions" do + @tag :transaction_isolation + test "with custom isolation level" do + Sandbox.checkout(TestRepo, isolation: "READ UNCOMMITTED") + + # Setting it to the same level later on works + TestRepo.query!("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED") + + # Even inside a transaction + TestRepo.transaction fn -> + TestRepo.query!("SET TRANSACTION ISOLATION LEVEL READ UNCOMMITTED") + end + end + + test "disconnects on transaction timeouts" do + Sandbox.checkout(TestRepo) + + assert capture_log(fn -> + {:error, :rollback} = + TestRepo.transaction(fn -> Process.sleep(1000) end, timeout: 100) + end) =~ "timed out" + + Sandbox.checkin(TestRepo) + end + end + + describe "checkouts" do + test "with transaction inside checkout" do + Sandbox.checkout(TestRepo) + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + + TestRepo.checkout(fn -> + assert TestRepo.checked_out?() + refute TestRepo.in_transaction?() + TestRepo.transaction(fn -> + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + end) + assert TestRepo.checked_out?() + refute TestRepo.in_transaction?() + end) + + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + end + + test "with checkout inside transaction" do + Sandbox.checkout(TestRepo) + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + + TestRepo.transaction(fn -> + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + TestRepo.checkout(fn -> + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + end) + assert TestRepo.checked_out?() + assert TestRepo.in_transaction?() + end) + + refute TestRepo.checked_out?() + refute TestRepo.in_transaction?() + end + end + + describe "start_owner!/2" do + test "checks out the connection" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + owner = Sandbox.start_owner!(TestRepo) + assert TestRepo.all(Post) == [] + + :ok = Sandbox.stop_owner(owner) + refute Process.alive?(owner) + end + + test "can set shared mode" do + assert_raise DBConnection.OwnershipError, ~r"cannot find ownership process", fn -> + TestRepo.all(Post) + end + + parent = self() + + Task.start_link(fn -> + owner = Sandbox.start_owner!(TestRepo, shared: true) + send(parent, {:owner, owner}) + Process.sleep(:infinity) + end) + + assert_receive {:owner, owner} + assert TestRepo.all(Post) == [] + :ok = Sandbox.stop_owner(owner) + after + Sandbox.mode(TestRepo, :manual) + end + end +end diff --git a/deps/ecto_sql/integration_test/sql/sql.exs b/deps/ecto_sql/integration_test/sql/sql.exs new file mode 100644 index 0000000..4645aa6 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/sql.exs @@ -0,0 +1,159 @@ +defmodule Ecto.Integration.SQLTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.PoolRepo + alias Ecto.Integration.TestRepo + alias Ecto.Integration.Barebone + alias Ecto.Integration.Post + alias Ecto.Integration.CorruptedPk + import Ecto.Query, only: [from: 2] + + test "fragmented types" do + datetime = ~N[2014-01-16 20:26:51] + TestRepo.insert!(%Post{inserted_at: datetime}) + query = from p in Post, where: fragment("? >= ?", p.inserted_at, ^datetime), select: p.inserted_at + assert [^datetime] = TestRepo.all(query) + end + + test "fragmented schemaless types" do + TestRepo.insert!(%Post{visits: 123}) + assert [123] = TestRepo.all(from p in "posts", select: type(fragment("visits"), :integer)) + end + + @tag :array_type + test "fragment array types" do + text1 = "foo" + text2 = "bar" + result = TestRepo.query!("SELECT $1::text[]", [[text1, text2]]) + assert result.rows == [[[text1, text2]]] + end + + test "query!/4 with dynamic repo" do + TestRepo.put_dynamic_repo(:unknown) + assert_raise RuntimeError, ~r/:unknown/, fn -> TestRepo.query!("SELECT 1") end + end + + test "query!/4" do + result = TestRepo.query!("SELECT 1") + assert result.rows == [[1]] + end + + test "query!/4 with iodata" do + result = TestRepo.query!(["SELECT", ?\s, ?1]) + assert result.rows == [[1]] + end + + test "disconnect_all/2" do + assert :ok = PoolRepo.disconnect_all(0) + end + + test "to_sql/3" do + {sql, []} = TestRepo.to_sql(:all, Barebone) + assert sql =~ "SELECT" + assert sql =~ "barebones" + + {sql, [0]} = TestRepo.to_sql(:update_all, from(b in Barebone, update: [set: [num: ^0]])) + assert sql =~ "UPDATE" + assert sql =~ "barebones" + assert sql =~ "SET" + + {sql, []} = TestRepo.to_sql(:delete_all, Barebone) + assert sql =~ "DELETE" + assert sql =~ "barebones" + end + + test "raises when primary key is not unique on struct operation" do + schema = %CorruptedPk{a: "abc"} + TestRepo.insert!(schema) + TestRepo.insert!(schema) + TestRepo.insert!(schema) + + assert_raise Ecto.MultiplePrimaryKeyError, + ~r|expected delete on corrupted_pk to return at most one entry but got 3 entries|, + fn -> TestRepo.delete!(schema) end + end + + test "Repo.insert! escape" do + TestRepo.insert!(%Post{title: "'"}) + + query = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(query) + end + + test "Repo.update! escape" do + p = TestRepo.insert!(%Post{title: "hello"}) + TestRepo.update!(Ecto.Changeset.change(p, title: "'")) + + query = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(query) + end + + @tag :insert_cell_wise_defaults + test "Repo.insert_all escape" do + TestRepo.insert_all(Post, [%{title: "'"}]) + + query = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(query) + end + + test "Repo.update_all escape" do + TestRepo.insert!(%Post{title: "hello"}) + + TestRepo.update_all(Post, set: [title: "'"]) + reader = from(p in Post, select: p.title) + assert ["'"] == TestRepo.all(reader) + + query = from(Post, where: "'" != "") + TestRepo.update_all(query, set: [title: "''"]) + assert ["''"] == TestRepo.all(reader) + end + + test "Repo.delete_all escape" do + TestRepo.insert!(%Post{title: "hello"}) + assert [_] = TestRepo.all(Post) + + TestRepo.delete_all(from(Post, where: "'" == "'")) + assert [] == TestRepo.all(Post) + end + + test "load" do + inserted_at = ~N[2016-01-01 09:00:00] + TestRepo.insert!(%Post{title: "title1", inserted_at: inserted_at, public: false}) + + result = Ecto.Adapters.SQL.query!(TestRepo, "SELECT * FROM posts", []) + posts = Enum.map(result.rows, &TestRepo.load(Post, {result.columns, &1})) + assert [%Post{title: "title1", inserted_at: ^inserted_at, public: false}] = posts + end + + test "returns true when table exists" do + assert Ecto.Adapters.SQL.table_exists?(TestRepo, "posts") + end + + test "returns false table doesn't exists" do + refute Ecto.Adapters.SQL.table_exists?(TestRepo, "unknown") + end + + test "returns result as a formatted table" do + TestRepo.insert_all(Post, [%{title: "my post title", counter: 1, public: nil}]) + + # resolve correct query for each adapter + query = from(p in Post, select: [p.title, p.counter, p.public]) + {query, _} = Ecto.Adapters.SQL.to_sql(:all, TestRepo, query) + + table = + query + |> TestRepo.query!() + |> Ecto.Adapters.SQL.format_table() + + assert table == "+---------------+---------+--------+\n| title | counter | public |\n+---------------+---------+--------+\n| my post title | 1 | NULL |\n+---------------+---------+--------+" + end + + test "format_table edge cases" do + assert Ecto.Adapters.SQL.format_table(nil) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: nil, rows: nil}) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: []}) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: [], rows: [["test"]]}) == "" + assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: []}) == "+------+\n| test |\n+------+\n+------+" + assert Ecto.Adapters.SQL.format_table(%{columns: ["test"], rows: nil}) == "+------+\n| test |\n+------+\n+------+" + end +end diff --git a/deps/ecto_sql/integration_test/sql/stream.exs b/deps/ecto_sql/integration_test/sql/stream.exs new file mode 100644 index 0000000..e304918 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/stream.exs @@ -0,0 +1,44 @@ +defmodule Ecto.Integration.StreamTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + import Ecto.Query + + test "stream empty" do + assert {:ok, []} = TestRepo.transaction(fn() -> + TestRepo.stream(Post) + |> Enum.to_list() + end) + + assert {:ok, []} = TestRepo.transaction(fn() -> + TestRepo.stream(from p in Post) + |> Enum.to_list() + end) + end + + test "stream without schema" do + %Post{} = TestRepo.insert!(%Post{title: "title1"}) + %Post{} = TestRepo.insert!(%Post{title: "title2"}) + + assert {:ok, ["title1", "title2"]} = TestRepo.transaction(fn() -> + TestRepo.stream(from(p in "posts", order_by: p.title, select: p.title)) + |> Enum.to_list() + end) + end + + test "stream with assoc" do + p1 = TestRepo.insert!(%Post{title: "1"}) + + %Comment{id: cid1} = TestRepo.insert!(%Comment{text: "1", post_id: p1.id}) + %Comment{id: cid2} = TestRepo.insert!(%Comment{text: "2", post_id: p1.id}) + + stream = TestRepo.stream(Ecto.assoc(p1, :comments)) + assert {:ok, [c1, c2]} = TestRepo.transaction(fn() -> + Enum.to_list(stream) + end) + assert c1.id == cid1 + assert c2.id == cid2 + end +end diff --git a/deps/ecto_sql/integration_test/sql/subquery.exs b/deps/ecto_sql/integration_test/sql/subquery.exs new file mode 100644 index 0000000..f556e12 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/subquery.exs @@ -0,0 +1,114 @@ +defmodule Ecto.Integration.SubQueryTest do + use Ecto.Integration.Case, async: Application.compile_env(:ecto, :async_integration_tests, true) + + alias Ecto.Integration.TestRepo + import Ecto.Query + alias Ecto.Integration.Post + alias Ecto.Integration.Comment + + test "from: subqueries with select source" do + TestRepo.insert!(%Post{title: "hello", public: true}) + + query = from p in Post, select: p + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.title) + assert [post] = + TestRepo.all(from p in subquery(query), select: p) + + assert %NaiveDateTime{} = post.inserted_at + assert post.__meta__.state == :loaded + end + + @tag :map_boolean_in_expression + test "from: subqueries with map and select expression" do + TestRepo.insert!(%Post{title: "hello", public: true}) + + query = from p in Post, select: %{title: p.title, pub: not p.public} + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.title) + assert [%{title: "hello", pub: false}] = + TestRepo.all(from p in subquery(query), select: p) + assert [{"hello", %{title: "hello", pub: false}}] = + TestRepo.all(from p in subquery(query), select: {p.title, p}) + assert [{%{title: "hello", pub: false}, false}] = + TestRepo.all(from p in subquery(query), select: {p, p.pub}) + end + + @tag :map_boolean_in_expression + test "from: subqueries with map update and select expression" do + TestRepo.insert!(%Post{title: "hello", public: true}) + + query = from p in Post, select: %{p | public: not p.public} + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.title) + assert [%Post{title: "hello", public: false}] = + TestRepo.all(from p in subquery(query), select: p) + assert [{"hello", %Post{title: "hello", public: false}}] = + TestRepo.all(from p in subquery(query), select: {p.title, p}) + assert [{%Post{title: "hello", public: false}, false}] = + TestRepo.all(from p in subquery(query), select: {p, p.public}) + end + + test "from: subqueries with map update on virtual field and select expression" do + TestRepo.insert!(%Post{title: "hello"}) + + query = from p in Post, select: %{p | temp: p.title} + assert ["hello"] = + TestRepo.all(from p in subquery(query), select: p.temp) + assert [%Post{title: "hello", temp: "hello"}] = + TestRepo.all(from p in subquery(query), select: p) + end + + @tag :subquery_aggregates + test "from: subqueries with aggregates" do + TestRepo.insert!(%Post{visits: 10}) + TestRepo.insert!(%Post{visits: 11}) + TestRepo.insert!(%Post{visits: 13}) + + query = from p in Post, select: [:visits], order_by: [asc: :visits] + assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + query = from p in Post, select: [:visits], order_by: [asc: :visits], limit: 2 + assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + + query = from p in Post, order_by: [asc: :visits] + assert [13] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + query = from p in Post, order_by: [asc: :visits], limit: 2 + assert [11] = TestRepo.all(from p in subquery(query), select: max(p.visits)) + end + + test "from: subqueries with parameters" do + TestRepo.insert!(%Post{visits: 10, title: "hello"}) + TestRepo.insert!(%Post{visits: 11, title: "hello"}) + TestRepo.insert!(%Post{visits: 13, title: "world"}) + + query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 + query = from p in subquery(query), where: p.title == ^"hello", select: fragment("? + ?", p.visits, ^1) + assert [12] = TestRepo.all(query) + end + + test "join: subqueries with select source" do + %{id: id} = TestRepo.insert!(%Post{title: "hello", public: true}) + TestRepo.insert!(%Comment{post_id: id}) + + query = from p in Post, select: p + assert ["hello"] = + TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p.title) + assert [%Post{inserted_at: %NaiveDateTime{}}] = + TestRepo.all(from c in Comment, join: p in subquery(query), on: c.post_id == p.id, select: p) + end + + test "join: subqueries with parameters" do + TestRepo.insert!(%Post{visits: 10, title: "hello"}) + TestRepo.insert!(%Post{visits: 11, title: "hello"}) + TestRepo.insert!(%Post{visits: 13, title: "world"}) + TestRepo.insert!(%Comment{}) + TestRepo.insert!(%Comment{}) + + query = from p in Post, where: p.visits >= ^11 and p.visits <= ^13 + query = from c in Comment, + join: p in subquery(query), + where: p.title == ^"hello", + select: fragment("? + ?", p.visits, ^1) + assert [12, 12] = TestRepo.all(query) + end +end diff --git a/deps/ecto_sql/integration_test/sql/transaction.exs b/deps/ecto_sql/integration_test/sql/transaction.exs new file mode 100644 index 0000000..6ac9334 --- /dev/null +++ b/deps/ecto_sql/integration_test/sql/transaction.exs @@ -0,0 +1,277 @@ +defmodule Ecto.Integration.TransactionTest do + # We can keep this test async as long as it + # is the only one access the transactions table + use Ecto.Integration.Case, async: true + + import Ecto.Query + alias Ecto.Integration.PoolRepo # Used for writes + alias Ecto.Integration.TestRepo # Used for reads + + @moduletag :capture_log + + defmodule UniqueError do + defexception message: "unique error" + end + + setup do + PoolRepo.delete_all "transactions" + :ok + end + + defmodule Trans do + use Ecto.Schema + + schema "transactions" do + field :num, :integer + end + end + + test "transaction returns value" do + refute PoolRepo.in_transaction?() + {:ok, val} = PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + {:ok, val} = + PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + 42 + end) + assert PoolRepo.in_transaction?() + val + end) + refute PoolRepo.in_transaction?() + assert val == 42 + end + + test "transaction re-raises" do + assert_raise UniqueError, fn -> + PoolRepo.transaction(fn -> + PoolRepo.transaction(fn -> + raise UniqueError + end) + end) + end + end + + # tag is required for TestRepo, since it is checkout in + # Ecto.Integration.Case setup + @tag isolation_level: :snapshot + test "transaction commits" do + # mssql requires that all transactions that use same shared lock are set + # to :snapshot isolation level + opts = [isolation_level: :snapshot] + + PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 1}) + assert [^e] = PoolRepo.all(Trans) + assert [] = TestRepo.all(Trans) + end, opts) + + assert [%Trans{num: 1}] = PoolRepo.all(Trans) + end + + @tag isolation_level: :snapshot + test "transaction rolls back" do + opts = [isolation_level: :snapshot] + try do + PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 2}) + assert [^e] = PoolRepo.all(Trans) + assert [] = TestRepo.all(Trans) + raise UniqueError + end, opts) + rescue + UniqueError -> :ok + end + + assert [] = TestRepo.all(Trans) + end + + test "transaction rolls back per repository" do + message = "cannot call rollback outside of transaction" + + assert_raise RuntimeError, message, fn -> + PoolRepo.rollback(:done) + end + + assert_raise RuntimeError, message, fn -> + TestRepo.transaction fn -> + PoolRepo.rollback(:done) + end + end + end + + @tag :assigns_id_type + test "transaction rolls back with reason on aborted transaction" do + e1 = PoolRepo.insert!(%Trans{num: 13}) + + assert_raise Ecto.ConstraintError, fn -> + TestRepo.transaction fn -> + PoolRepo.insert!(%Trans{id: e1.id, num: 14}) + end + end + end + + test "nested transaction partial rollback" do + assert PoolRepo.transaction(fn -> + e1 = PoolRepo.insert!(%Trans{num: 3}) + assert [^e1] = PoolRepo.all(Trans) + + try do + PoolRepo.transaction(fn -> + e2 = PoolRepo.insert!(%Trans{num: 4}) + assert [^e1, ^e2] = PoolRepo.all(from(t in Trans, order_by: t.num)) + raise UniqueError + end) + rescue + UniqueError -> :ok + end + + assert_raise DBConnection.ConnectionError, "transaction rolling back", + fn() -> PoolRepo.insert!(%Trans{num: 5}) end + end) == {:error, :rollback} + + assert TestRepo.all(Trans) == [] + end + + test "manual rollback doesn't bubble up" do + x = PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 6}) + assert [^e] = PoolRepo.all(Trans) + PoolRepo.rollback(:oops) + end) + + assert x == {:error, :oops} + assert [] = TestRepo.all(Trans) + end + + test "manual rollback bubbles up on nested transaction" do + assert PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 7}) + assert [^e] = PoolRepo.all(Trans) + assert {:error, :oops} = PoolRepo.transaction(fn -> + PoolRepo.rollback(:oops) + end) + assert_raise DBConnection.ConnectionError, "transaction rolling back", + fn() -> PoolRepo.insert!(%Trans{num: 8}) end + end) == {:error, :rollback} + + assert [] = TestRepo.all(Trans) + end + + test "transactions are not shared in repo" do + pid = self() + opts = [isolation_level: :snapshot] + + new_pid = spawn_link fn -> + PoolRepo.transaction(fn -> + e = PoolRepo.insert!(%Trans{num: 9}) + assert [^e] = PoolRepo.all(Trans) + send(pid, :in_transaction) + receive do + :commit -> :ok + after + 5000 -> raise "timeout" + end + end, opts) + send(pid, :committed) + end + + receive do + :in_transaction -> :ok + after + 5000 -> raise "timeout" + end + + # mssql requires that all transactions that use same shared lock + # set transaction isolation level to "snapshot" so this must be wrapped into + # explicit transaction + PoolRepo.transaction(fn -> + assert [] = PoolRepo.all(Trans) + end, opts) + + send(new_pid, :commit) + receive do + :committed -> :ok + after + 5000 -> raise "timeout" + end + + assert [%Trans{num: 9}] = PoolRepo.all(Trans) + end + + ## Checkout + + describe "with checkouts" do + test "transaction inside checkout" do + PoolRepo.checkout(fn -> + refute PoolRepo.in_transaction?() + PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + end) + refute PoolRepo.in_transaction?() + end) + end + + test "checkout inside transaction" do + PoolRepo.transaction(fn -> + assert PoolRepo.in_transaction?() + PoolRepo.checkout(fn -> + assert PoolRepo.in_transaction?() + end) + assert PoolRepo.in_transaction?() + end) + end + + @tag :transaction_checkout_raises + test "checkout raises on transaction attempt" do + assert_raise DBConnection.ConnectionError, ~r"connection was checked out with status", fn -> + PoolRepo.checkout(fn -> PoolRepo.query!("BEGIN") end) + end + end + end + + ## Logging + + defp register_telemetry() do + Process.put(:telemetry, fn _, measurements, event -> send(self(), {measurements, event}) end) + end + + test "log begin, commit and rollback" do + register_telemetry() + + PoolRepo.transaction(fn -> + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + assert is_integer(measurements.queue_time) and measurements.queue_time >= 0 + + refute_received %{} + register_telemetry() + end) + + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + refute Map.has_key?(measurements, :queue_time) + + assert PoolRepo.transaction(fn -> + refute_received %{} + register_telemetry() + PoolRepo.rollback(:log_rollback) + end) == {:error, :log_rollback} + + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + refute Map.has_key?(measurements, :queue_time) + end + + test "log queries inside transactions" do + PoolRepo.transaction(fn -> + register_telemetry() + assert [] = PoolRepo.all(Trans) + + assert_received {measurements, %{params: [], result: {:ok, _res}}} + assert is_integer(measurements.query_time) and measurements.query_time >= 0 + assert is_integer(measurements.decode_time) and measurements.query_time >= 0 + refute Map.has_key?(measurements, :queue_time) + end) + end +end diff --git a/deps/ecto_sql/integration_test/support/file_helpers.exs b/deps/ecto_sql/integration_test/support/file_helpers.exs new file mode 100644 index 0000000..947ff49 --- /dev/null +++ b/deps/ecto_sql/integration_test/support/file_helpers.exs @@ -0,0 +1,43 @@ +defmodule Support.FileHelpers do + import ExUnit.Assertions + + @doc """ + Returns the `tmp_path` for tests. + """ + def tmp_path do + Path.expand("../../tmp", __DIR__) + end + + @doc """ + Executes the given function in a temp directory + tailored for this test case and test. + """ + defmacro in_tmp(fun) do + path = Path.join([tmp_path(), "#{__CALLER__.module}", "#{elem(__CALLER__.function, 0)}"]) + quote do + path = unquote(path) + File.rm_rf!(path) + File.mkdir_p!(path) + File.cd!(path, fn -> unquote(fun).(path) end) + end + end + + @doc """ + Asserts a file was generated. + """ + def assert_file(file) do + assert File.regular?(file), "Expected #{file} to exist, but does not" + end + + @doc """ + Asserts a file was generated and that it matches a given pattern. + """ + def assert_file(file, callback) when is_function(callback, 1) do + assert_file(file) + callback.(File.read!(file)) + end + + def assert_file(file, match) do + assert_file file, &(assert &1 =~ match) + end +end diff --git a/deps/ecto_sql/integration_test/support/migration.exs b/deps/ecto_sql/integration_test/support/migration.exs new file mode 100644 index 0000000..c5c83b4 --- /dev/null +++ b/deps/ecto_sql/integration_test/support/migration.exs @@ -0,0 +1,132 @@ +defmodule Ecto.Integration.Migration do + use Ecto.Migration + + def change do + # IO.puts "TESTING MIGRATION LOCK" + # Process.sleep(10000) + + create table(:users, comment: "users table") do + add :name, :string, comment: "name column" + add :custom_id, :uuid + timestamps() + end + + create table(:posts) do + add :title, :string, size: 100 + add :counter, :integer + add :blob, :binary + add :bid, :binary_id + add :uuid, :uuid + add :meta, :map + add :links, {:map, :string} + add :intensities, {:map, :float} + add :public, :boolean + add :cost, :decimal, precision: 2, scale: 1 + add :visits, :integer + add :wrapped_visits, :integer + add :intensity, :float + add :author_id, :integer + add :posted, :date + timestamps(null: true) + end + + create table(:posts_users, primary_key: false) do + add :post_id, references(:posts) + add :user_id, references(:users) + end + + create table(:posts_users_pk) do + add :post_id, references(:posts) + add :user_id, references(:users) + timestamps() + end + + # Add a unique index on uuid. We use this + # to verify the behaviour that the index + # only matters if the UUID column is not NULL. + create unique_index(:posts, [:uuid], comment: "posts index") + + create table(:permalinks) do + add :uniform_resource_locator, :string + add :title, :string + add :post_id, references(:posts) + add :user_id, references(:users) + end + + create unique_index(:permalinks, [:post_id]) + create unique_index(:permalinks, [:uniform_resource_locator]) + + create table(:comments) do + add :text, :string, size: 100 + add :lock_version, :integer, default: 1 + add :post_id, references(:posts) + add :author_id, references(:users) + end + + create table(:customs, primary_key: false) do + add :bid, :binary_id, primary_key: true + add :uuid, :uuid + end + + create unique_index(:customs, [:uuid]) + + create table(:customs_customs, primary_key: false) do + add :custom_id1, references(:customs, column: :bid, type: :binary_id) + add :custom_id2, references(:customs, column: :bid, type: :binary_id) + end + + create table(:barebones) do + add :num, :integer + end + + create table(:transactions) do + add :num, :integer + end + + create table(:lock_counters) do + add :count, :integer + end + + create table(:orders) do + add :item, :map + add :items, :map + add :meta, :map + add :permalink_id, references(:permalinks) + end + + unless :array_type in ExUnit.configuration()[:exclude] do + create table(:tags) do + add :ints, {:array, :integer} + add :uuids, {:array, :uuid}, default: [] + add :items, {:array, :map} + end + end + + create table(:composite_pk, primary_key: false) do + add :a, :integer, primary_key: true + add :b, :integer, primary_key: true + add :name, :string + end + + create table(:corrupted_pk, primary_key: false) do + add :a, :string + end + + create table(:posts_users_composite_pk) do + add :post_id, references(:posts), primary_key: true + add :user_id, references(:users), primary_key: true + timestamps() + end + + create unique_index(:posts_users_composite_pk, [:post_id, :user_id]) + + create table(:usecs) do + add :naive_datetime_usec, :naive_datetime_usec + add :utc_datetime_usec, :utc_datetime_usec + end + + create table(:bits) do + add :bit, :bit + end + end +end diff --git a/deps/ecto_sql/integration_test/support/repo.exs b/deps/ecto_sql/integration_test/support/repo.exs new file mode 100644 index 0000000..f17c838 --- /dev/null +++ b/deps/ecto_sql/integration_test/support/repo.exs @@ -0,0 +1,23 @@ +defmodule Ecto.Integration.Repo do + defmacro __using__(opts) do + quote do + use Ecto.Repo, unquote(opts) + + @query_event __MODULE__ + |> Module.split() + |> Enum.map(& &1 |> Macro.underscore() |> String.to_atom()) + |> Kernel.++([:query]) + + def init(_, opts) do + fun = &Ecto.Integration.Repo.handle_event/4 + :telemetry.attach_many(__MODULE__, [[:custom], @query_event], fun, :ok) + {:ok, opts} + end + end + end + + def handle_event(event, latency, metadata, _config) do + handler = Process.delete(:telemetry) || fn _, _, _ -> :ok end + handler.(event, latency, metadata) + end +end diff --git a/deps/ecto_sql/lib/ecto/adapter/migration.ex b/deps/ecto_sql/lib/ecto/adapter/migration.ex new file mode 100644 index 0000000..86a662b --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapter/migration.ex @@ -0,0 +1,62 @@ +defmodule Ecto.Adapter.Migration do + @moduledoc """ + Specifies the adapter migrations API. + """ + + alias Ecto.Migration.Table + alias Ecto.Migration.Index + alias Ecto.Migration.Reference + + @type adapter_meta :: Ecto.Adapter.adapter_meta() + + @typedoc "All migration commands" + @type command :: + raw :: + String.t() + | {:create, Table.t(), [table_subcommand]} + | {:create_if_not_exists, Table.t(), [table_subcommand]} + | {:alter, Table.t(), [table_subcommand]} + | {:drop, Table.t(), :restrict | :cascade} + | {:drop_if_exists, Table.t(), :restrict | :cascade} + | {:create, Index.t()} + | {:create_if_not_exists, Index.t()} + | {:drop, Index.t(), :restrict | :cascade} + | {:drop_if_exists, Index.t(), :restrict | :cascade} + + @typedoc "All commands allowed within the block passed to `table/2`" + @type table_subcommand :: + {:add, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} + | {:add_if_not_exists, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} + | {:modify, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} + | {:remove, field :: atom, type :: Ecto.Type.t() | Reference.t() | binary(), Keyword.t()} + | {:remove, field :: atom} + | {:remove_if_exists, type :: Ecto.Type.t() | Reference.t() | binary()} + + @typedoc """ + A struct that represents a table or index in a database schema. + + These database objects can be modified through the use of a Data + Definition Language, hence the name DDL object. + """ + @type ddl_object :: Table.t() | Index.t() + + @doc """ + Checks if the adapter supports ddl transaction. + """ + @callback supports_ddl_transaction? :: boolean + + @doc """ + Executes migration commands. + """ + @callback execute_ddl(adapter_meta, command, options :: Keyword.t()) :: + {:ok, [{Logger.level, Logger.message, Logger.metadata}]} + + @doc """ + Locks the migrations table and emits the locked versions for callback execution. + + It returns the result of calling the given function with a list of versions. + """ + @callback lock_for_migrations(adapter_meta, options :: Keyword.t(), fun) :: + result + when fun: (() -> result), result: var +end diff --git a/deps/ecto_sql/lib/ecto/adapter/structure.ex b/deps/ecto_sql/lib/ecto/adapter/structure.ex new file mode 100644 index 0000000..e2de4cf --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapter/structure.ex @@ -0,0 +1,41 @@ +defmodule Ecto.Adapter.Structure do + @moduledoc """ + Specifies the adapter structure (dump/load) API. + """ + + @doc """ + Dumps the given structure. + + The path will be looked in the `config` under :dump_path or + default to the structure path inside `default`. + + Returns `:ok` if it was dumped successfully, an error tuple otherwise. + + ## Examples + + structure_dump("priv/repo", username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback structure_dump(default :: String.t, config :: Keyword.t) :: + {:ok, String.t} | {:error, term} + + @doc """ + Loads the given structure. + + The path will be looked in the `config` under :dump_path or + default to the structure path inside `default`. + + Returns `:ok` if it was loaded successfully, an error tuple otherwise. + + ## Examples + + structure_load("priv/repo", username: "postgres", + database: "ecto_test", + hostname: "localhost") + + """ + @callback structure_load(default :: String.t, config :: Keyword.t) :: + {:ok, String.t} | {:error, term} +end diff --git a/deps/ecto_sql/lib/ecto/adapters/mysql.ex b/deps/ecto_sql/lib/ecto/adapters/mysql.ex new file mode 100644 index 0000000..5f7dbcd --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/mysql.ex @@ -0,0 +1,23 @@ +defmodule Ecto.Adapters.MySQL do + @moduledoc false + + @behaviour Ecto.Adapter + + defp error!() do + raise "Ecto.Adapters.MySQL is obsolete, use Ecto.Adapters.MyXQL instead" + end + + defmacro __before_compile__(_env), do: error!() + + def ensure_all_started(_, _), do: error!() + + def init(_), do: error!() + + def checkout(_, _, _), do: error!() + + def checked_out?(_), do: error!() + + def loaders(_, _), do: error!() + + def dumpers(_, _), do: error!() +end diff --git a/deps/ecto_sql/lib/ecto/adapters/myxql.ex b/deps/ecto_sql/lib/ecto/adapters/myxql.ex new file mode 100644 index 0000000..4c5daea --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/myxql.ex @@ -0,0 +1,431 @@ +defmodule Ecto.Adapters.MyXQL do + @moduledoc """ + Adapter module for MySQL. + + It uses `MyXQL` for communicating to the database. + + ## Options + + MySQL options split in different categories described + below. All options can be given via the repository + configuration: + + ### Connection options + + * `:protocol` - Set to `:socket` for using UNIX domain socket, or `:tcp` for TCP + (default: `:socket`) + * `:socket` - Connect to MySQL via UNIX sockets in the given path. + * `:hostname` - Server hostname + * `:port` - Server port (default: 3306) + * `:username` - Username + * `:password` - User password + * `:database` - the database to connect to + * `:pool` - The connection pool module, may be set to `Ecto.Adapters.SQL.Sandbox` + * `:ssl` - Set to true if ssl should be used (default: false) + * `:ssl_opts` - A list of ssl options, see Erlang's `ssl` docs + * `:connect_timeout` - The timeout for establishing new connections (default: 5000) + * `:cli_protocol` - The protocol used for the mysql client connection (default: `"tcp"`). + This option is only used for `mix ecto.load` and `mix ecto.dump`, + via the `mysql` command. For more information, please check + [MySQL docs](https://dev.mysql.com/doc/en/connecting.html) + * `:socket_options` - Specifies socket configuration + * `:show_sensitive_data_on_connection_error` - show connection data and + configuration whenever there is an error attempting to connect to the + database + + The `:socket_options` are particularly useful when configuring the size + of both send and receive buffers. For example, when Ecto starts with a + pool of 20 connections, the memory usage may quickly grow from 20MB to + 50MB based on the operating system default values for TCP buffers. It is + advised to stick with the operating system defaults but they can be + tweaked if desired: + + socket_options: [recbuf: 8192, sndbuf: 8192] + + We also recommend developers to consult the `MyXQL.start_link/1` documentation + for a complete listing of all supported options. + + ### Storage options + + * `:charset` - the database encoding (default: "utf8mb4") + * `:collation` - the collation order + * `:dump_path` - where to place dumped structures + + ### After connect callback + + If you want to execute a callback as soon as connection is established + to the database, you can use the `:after_connect` configuration. For + example, in your repository configuration you can add: + + after_connect: {MyXQL, :query!, ["SET variable = value", []]} + + You can also specify your own module that will receive the MyXQL + connection as argument. + + ## Limitations + + There are some limitations when using Ecto with MySQL that one + needs to be aware of. + + ### Engine + + Tables created by Ecto are guaranteed to use InnoDB, regardless + of the MySQL version. + + ### UUIDs + + MySQL does not support UUID types. Ecto emulates them by using + `binary(16)`. + + ### Read after writes + + Because MySQL does not support RETURNING clauses in INSERT and + UPDATE, it does not support the `:read_after_writes` option of + `Ecto.Schema.field/3`. + + ### DDL Transaction + + MySQL does not support migrations inside transactions as it + automatically commits after some commands like CREATE TABLE. + Therefore MySQL migrations does not run inside transactions. + + ## Old MySQL versions + + ### JSON support + + MySQL introduced a native JSON type in v5.7.8, if your server is + using this version or higher, you may use `:map` type for your + column in migration: + + add :some_field, :map + + If you're using older server versions, use a `TEXT` field instead: + + add :some_field, :text + + in either case, the adapter will automatically encode/decode the + value from JSON. + + ### usec in datetime + + Old MySQL versions did not support usec in datetime while + more recent versions would round or truncate the usec value. + + Therefore, in case the user decides to use microseconds in + datetimes and timestamps with MySQL, be aware of such + differences and consult the documentation for your MySQL + version. + + If your version of MySQL supports microsecond precision, you + will be able to utilize Ecto's usec types. + + ## Multiple Result Support + + MyXQL supports the execution of queries that return multiple + results, such as text queries with multiple statements separated + by semicolons or stored procedures. These can be executed with + `Ecto.Adapters.SQL.query_many/4` or the `YourRepo.query_many/3` + shortcut. + + Be default, these queries will be executed with the `:query_type` + option set to `:text`. To take advantage of prepared statements + when executing a stored procedure, set the `:query_type` option + to `:binary`. + """ + + # Inherit all behaviour from Ecto.Adapters.SQL + use Ecto.Adapters.SQL, driver: :myxql + + # And provide a custom storage implementation + @behaviour Ecto.Adapter.Storage + @behaviour Ecto.Adapter.Structure + + ## Custom MySQL types + + @impl true + def loaders({:map, _}, type), do: [&json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] + def loaders(:map, type), do: [&json_decode/1, type] + def loaders(:float, type), do: [&float_decode/1, type] + def loaders(:boolean, type), do: [&bool_decode/1, type] + def loaders(:binary_id, type), do: [Ecto.UUID, type] + def loaders(_, type), do: [type] + + defp bool_decode(<<0>>), do: {:ok, false} + defp bool_decode(<<1>>), do: {:ok, true} + defp bool_decode(<<0::size(1)>>), do: {:ok, false} + defp bool_decode(<<1::size(1)>>), do: {:ok, true} + defp bool_decode(0), do: {:ok, false} + defp bool_decode(1), do: {:ok, true} + defp bool_decode(x), do: {:ok, x} + + defp float_decode(%Decimal{} = decimal), do: {:ok, Decimal.to_float(decimal)} + defp float_decode(x), do: {:ok, x} + + defp json_decode(x) when is_binary(x), do: {:ok, MyXQL.json_library().decode!(x)} + defp json_decode(x), do: {:ok, x} + + ## Storage API + + @impl true + def storage_up(opts) do + database = Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + opts = Keyword.delete(opts, :database) + charset = opts[:charset] || "utf8mb4" + + check_existence_command = "SELECT TRUE FROM information_schema.schemata WHERE schema_name = '#{database}'" + case run_query(check_existence_command, opts) do + {:ok, %{num_rows: 1}} -> + {:error, :already_up} + _ -> + create_command = + ~s(CREATE DATABASE `#{database}` DEFAULT CHARACTER SET = #{charset}) + |> concat_if(opts[:collation], &"DEFAULT COLLATE = #{&1}") + + case run_query(create_command, opts) do + {:ok, _} -> + :ok + {:error, %{mysql: %{name: :ER_DB_CREATE_EXISTS}}} -> + {:error, :already_up} + {:error, error} -> + {:error, Exception.message(error)} + {:exit, exit} -> + {:error, exit_to_exception(exit)} + end + end + end + + defp concat_if(content, nil, _fun), do: content + defp concat_if(content, value, fun), do: content <> " " <> fun.(value) + + @impl true + def storage_down(opts) do + database = Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + opts = Keyword.delete(opts, :database) + command = "DROP DATABASE `#{database}`" + + case run_query(command, opts) do + {:ok, _} -> + :ok + {:error, %{mysql: %{name: :ER_DB_DROP_EXISTS}}} -> + {:error, :already_down} + {:error, %{mysql: %{name: :ER_BAD_DB_ERROR}}} -> + {:error, :already_down} + {:exit, :killed} -> + {:error, :already_down} + {:exit, exit} -> + {:error, exit_to_exception(exit)} + end + end + + @impl Ecto.Adapter.Storage + def storage_status(opts) do + database = Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + opts = Keyword.delete(opts, :database) + + check_database_query = "SELECT schema_name FROM information_schema.schemata WHERE schema_name = '#{database}'" + + case run_query(check_database_query, opts) do + {:ok, %{num_rows: 0}} -> :down + {:ok, %{num_rows: _num_rows}} -> :up + other -> {:error, other} + end + end + + @impl true + def supports_ddl_transaction? do + false + end + + @impl true + def lock_for_migrations(meta, opts, fun) do + %{opts: adapter_opts, repo: repo} = meta + + if Keyword.fetch(adapter_opts, :pool_size) == {:ok, 1} do + Ecto.Adapters.SQL.raise_migration_pool_size_error() + end + + opts = Keyword.put(opts, :timeout, :infinity) + + {:ok, result} = + transaction(meta, opts, fn -> + lock_name = "\"ecto_#{inspect(repo)}\"" + + try do + {:ok, _} = Ecto.Adapters.SQL.query(meta, "SELECT GET_LOCK(#{lock_name}, -1)", [], opts) + fun.() + after + {:ok, _} = Ecto.Adapters.SQL.query(meta, "SELECT RELEASE_LOCK(#{lock_name})", [], opts) + end + end) + + result + end + + @impl true + def insert(adapter_meta, schema_meta, params, on_conflict, returning, opts) do + %{source: source, prefix: prefix} = schema_meta + {_, query_params, _} = on_conflict + + key = primary_key!(schema_meta, returning) + {fields, values} = :lists.unzip(params) + sql = @conn.insert(prefix, source, fields, [fields], on_conflict, [], []) + opts = if is_nil(Keyword.get(opts, :cache_statement)) do + [{:cache_statement, "ecto_insert_#{source}_#{length(fields)}"} | opts] + else + opts + end + + case Ecto.Adapters.SQL.query(adapter_meta, sql, values ++ query_params, opts) do + {:ok, %{num_rows: 1, last_insert_id: last_insert_id}} -> + {:ok, last_insert_id(key, last_insert_id)} + + {:ok, %{num_rows: 2, last_insert_id: last_insert_id}} -> + {:ok, last_insert_id(key, last_insert_id)} + + {:error, err} -> + case @conn.to_constraints(err, source: source) do + [] -> raise err + constraints -> {:invalid, constraints} + end + end + end + + defp primary_key!(%{autogenerate_id: {_, key, _type}}, [key]), do: key + defp primary_key!(_, []), do: nil + defp primary_key!(%{schema: schema}, returning) do + raise ArgumentError, "MySQL does not support :read_after_writes in schemas for non-primary keys. " <> + "The following fields in #{inspect schema} are tagged as such: #{inspect returning}" + end + + defp last_insert_id(nil, _last_insert_id), do: [] + defp last_insert_id(_key, 0), do: [] + defp last_insert_id(key, last_insert_id), do: [{key, last_insert_id}] + + @impl true + def structure_dump(default, config) do + table = config[:migration_source] || "schema_migrations" + path = config[:dump_path] || Path.join(default, "structure.sql") + + with {:ok, versions} <- select_versions(table, config), + {:ok, contents} <- mysql_dump(config), + {:ok, contents} <- append_versions(table, versions, contents) do + File.mkdir_p!(Path.dirname(path)) + File.write!(path, contents) + {:ok, path} + end + end + + defp select_versions(table, config) do + case run_query(~s[SELECT version FROM `#{table}` ORDER BY version], config) do + {:ok, %{rows: rows}} -> {:ok, Enum.map(rows, &hd/1)} + {:error, %{mysql: %{name: :ER_NO_SUCH_TABLE}}} -> {:ok, []} + {:error, _} = error -> error + {:exit, exit} -> {:error, exit_to_exception(exit)} + end + end + + defp mysql_dump(config) do + case run_with_cmd("mysqldump", config, ["--no-data", "--routines", config[:database]]) do + {output, 0} -> {:ok, output} + {output, _} -> {:error, output} + end + end + + defp append_versions(_table, [], contents) do + {:ok, contents} + end + defp append_versions(table, versions, contents) do + {:ok, + contents <> + Enum.map_join(versions, &~s[INSERT INTO `#{table}` (version) VALUES (#{&1});\n])} + end + + @impl true + def structure_load(default, config) do + path = config[:dump_path] || Path.join(default, "structure.sql") + + args = [ + "--execute", "SET FOREIGN_KEY_CHECKS = 0; SOURCE #{path}; SET FOREIGN_KEY_CHECKS = 1", + "--database", config[:database] + ] + + case run_with_cmd("mysql", config, args) do + {_output, 0} -> {:ok, path} + {output, _} -> {:error, output} + end + end + + ## Helpers + + defp run_query(sql, opts) do + {:ok, _} = Application.ensure_all_started(:ecto_sql) + {:ok, _} = Application.ensure_all_started(:myxql) + + opts = + opts + |> Keyword.drop([:name, :log, :pool, :pool_size]) + |> Keyword.put(:backoff_type, :stop) + |> Keyword.put(:max_restarts, 0) + + task = Task.Supervisor.async_nolink(Ecto.Adapters.SQL.StorageSupervisor, fn -> + {:ok, conn} = MyXQL.start_link(opts) + + value = MyXQL.query(conn, sql, [], opts) + GenServer.stop(conn) + value + end) + + timeout = Keyword.get(opts, :timeout, 15_000) + + case Task.yield(task, timeout) || Task.shutdown(task) do + {:ok, {:ok, result}} -> + {:ok, result} + {:ok, {:error, error}} -> + {:error, error} + {:exit, exit} -> + {:exit, exit} + nil -> + {:error, RuntimeError.exception("command timed out")} + end + end + + defp exit_to_exception({%{__struct__: struct} = error, _}) + when struct in [MyXQL.Error, DBConnection.Error], + do: error + + defp exit_to_exception(reason), do: RuntimeError.exception(Exception.format_exit(reason)) + + defp run_with_cmd(cmd, opts, opt_args) do + unless System.find_executable(cmd) do + raise "could not find executable `#{cmd}` in path, " <> + "please guarantee it is available before running ecto commands" + end + + env = + if password = opts[:password] do + [{"MYSQL_PWD", password}] + else + [] + end + + host = opts[:hostname] || System.get_env("MYSQL_HOST") || "localhost" + port = opts[:port] || System.get_env("MYSQL_TCP_PORT") || "3306" + protocol = opts[:cli_protocol] || System.get_env("MYSQL_CLI_PROTOCOL") || "tcp" + + user_args = + if username = opts[:username] do + ["--user", username] + else + [] + end + + args = + [ + "--host", host, + "--port", to_string(port), + "--protocol", protocol + ] ++ user_args ++ opt_args + + System.cmd(cmd, args, env: env, stderr_to_stdout: true) + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex b/deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex new file mode 100644 index 0000000..43c1404 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/myxql/connection.ex @@ -0,0 +1,1137 @@ +if Code.ensure_loaded?(MyXQL) do + defmodule Ecto.Adapters.MyXQL.Connection do + @moduledoc false + alias Ecto.Adapters.SQL + + @behaviour Ecto.Adapters.SQL.Connection + + ## Connection + + @impl true + def child_spec(opts) do + MyXQL.child_spec(opts) + end + + ## Query + + @impl true + def prepare_execute(conn, name, sql, params, opts) do + MyXQL.prepare_execute(conn, name, sql, params, opts) + end + + @impl true + def query(conn, sql, params, opts) do + opts = Keyword.put_new(opts, :query_type, :binary_then_text) + MyXQL.query(conn, sql, params, opts) + end + + @impl true + def query_many(conn, sql, params, opts) do + opts = Keyword.put_new(opts, :query_type, :text) + MyXQL.query_many(conn, sql, params, opts) + end + + @impl true + def execute(conn, query, params, opts) do + case MyXQL.execute(conn, query, params, opts) do + {:ok, _, result} -> {:ok, result} + {:error, _} = error -> error + end + end + + @impl true + def stream(conn, sql, params, opts) do + MyXQL.stream(conn, sql, params, opts) + end + + @impl true + def to_constraints(%MyXQL.Error{mysql: %{name: :ER_DUP_ENTRY}, message: message}, opts) do + case :binary.split(message, " for key ") do + [_, quoted] -> [unique: normalize_index_name(quoted, opts[:source])] + _ -> [] + end + end + def to_constraints(%MyXQL.Error{mysql: %{name: name}, message: message}, _opts) + when name in [:ER_ROW_IS_REFERENCED_2, :ER_NO_REFERENCED_ROW_2] do + case :binary.split(message, [" CONSTRAINT ", " FOREIGN KEY "], [:global]) do + [_, quoted, _] -> [foreign_key: strip_quotes(quoted)] + _ -> [] + end + end + def to_constraints(_, _), + do: [] + + defp strip_quotes(quoted) do + size = byte_size(quoted) - 2 + <<_, unquoted::binary-size(size), _>> = quoted + unquoted + end + + defp normalize_index_name(quoted, source) do + name = strip_quotes(quoted) + + if source do + String.trim_leading(name, "#{source}.") + else + name + end + end + + ## Query + + @parent_as __MODULE__ + alias Ecto.Query.{BooleanExpr, JoinExpr, QueryExpr, WithExpr} + + @impl true + def all(query, as_prefix \\ []) do + sources = create_names(query, as_prefix) + + cte = cte(query, sources) + from = from(query, sources) + select = select(query, sources) + join = join(query, sources) + where = where(query, sources) + group_by = group_by(query, sources) + having = having(query, sources) + window = window(query, sources) + combinations = combinations(query) + order_by = order_by(query, sources) + limit = limit(query, sources) + offset = offset(query, sources) + lock = lock(query, sources) + + [cte, select, from, join, where, group_by, having, window, combinations, order_by, limit, offset | lock] + end + + @impl true + def update_all(query, prefix \\ nil) do + %{from: %{source: source}, select: select} = query + + if select do + error!(nil, ":select is not supported in update_all by MySQL") + end + + sources = create_names(query, []) + cte = cte(query, sources) + {from, name} = get_source(query, sources, 0, source) + + fields = if prefix do + update_fields(:on_conflict, query, sources) + else + update_fields(:update, query, sources) + end + + {join, wheres} = using_join(query, :update_all, sources) + prefix = prefix || ["UPDATE ", from, " AS ", name, join, " SET "] + where = where(%{query | wheres: wheres ++ query.wheres}, sources) + + [cte, prefix, fields | where] + end + + @impl true + def delete_all(query) do + if query.select do + error!(nil, ":select is not supported in delete_all by MySQL") + end + + sources = create_names(query, []) + cte = cte(query, sources) + {_, name, _} = elem(sources, 0) + + from = from(query, sources) + join = join(query, sources) + where = where(query, sources) + + [cte, "DELETE ", name, ".*", from, join | where] + end + + @impl true + def insert(prefix, table, header, rows, on_conflict, [], []) do + fields = quote_names(header) + ["INSERT INTO ", quote_table(prefix, table), " (", fields, ") ", + insert_all(rows) | on_conflict(on_conflict, header)] + end + def insert(_prefix, _table, _header, _rows, _on_conflict, _returning, []) do + error!(nil, ":returning is not supported in insert/insert_all by MySQL") + end + def insert(_prefix, _table, _header, _rows, _on_conflict, _returning, _placeholders) do + error!(nil, ":placeholders is not supported by MySQL") + end + + defp on_conflict({_, _, [_ | _]}, _header) do + error!(nil, ":conflict_target is not supported in insert/insert_all by MySQL") + end + defp on_conflict({:raise, _, []}, _header) do + [] + end + defp on_conflict({:nothing, _, []}, [field | _]) do + quoted = quote_name(field) + [" ON DUPLICATE KEY UPDATE ", quoted, " = " | quoted] + end + defp on_conflict({fields, _, []}, _header) when is_list(fields) do + [" ON DUPLICATE KEY UPDATE " | + intersperse_map(fields, ?,, fn field -> + quoted = quote_name(field) + [quoted, " = VALUES(", quoted, ?)] + end)] + end + defp on_conflict({%{wheres: []} = query, _, []}, _header) do + [" ON DUPLICATE KEY " | update_all(query, "UPDATE ")] + end + defp on_conflict({_query, _, []}, _header) do + error!(nil, "Using a query with :where in combination with the :on_conflict option is not supported by MySQL") + end + + defp insert_all(rows) when is_list(rows) do + ["VALUES ", intersperse_map(rows, ?,, fn row -> + [?(, intersperse_map(row, ?,, &insert_all_value/1), ?)] + end)] + end + + defp insert_all(%Ecto.Query{} = query) do + [?(, all(query), ?)] + end + + defp insert_all_value(nil), do: "DEFAULT" + defp insert_all_value({%Ecto.Query{} = query, _params_counter}), do: [?(, all(query), ?)] + defp insert_all_value(_), do: '?' + + @impl true + def update(prefix, table, fields, filters, _returning) do + fields = intersperse_map(fields, ", ", &[quote_name(&1), " = ?"]) + filters = intersperse_map(filters, " AND ", fn + {field, nil} -> + [quote_name(field), " IS NULL"] + + {field, _value} -> + [quote_name(field), " = ?"] + end) + ["UPDATE ", quote_table(prefix, table), " SET ", fields, " WHERE " | filters] + end + + @impl true + def delete(prefix, table, filters, _returning) do + filters = intersperse_map(filters, " AND ", fn + {field, nil} -> + [quote_name(field), " IS NULL"] + + {field, _value} -> + [quote_name(field), " = ?"] + end) + ["DELETE FROM ", quote_table(prefix, table), " WHERE " | filters] + end + + @impl true + # DB explain opts are deprecated, so they aren't used to build the explain query. + # See Notes at https://dev.mysql.com/doc/refman/5.7/en/explain.html + def explain_query(conn, query, params, opts) do + case query(conn, build_explain_query(query), params, opts) do + {:ok, %MyXQL.Result{} = result} -> + {:ok, SQL.format_table(result)} + + error -> + error + end + end + + def build_explain_query(query) do + ["EXPLAIN ", query] + |> IO.iodata_to_binary() + end + + ## Query generation + + binary_ops = + [==: " = ", !=: " != ", <=: " <= ", >=: " >= ", <: " < ", >: " > ", + +: " + ", -: " - ", *: " * ", /: " / ", + and: " AND ", or: " OR ", like: " LIKE "] + + @binary_ops Keyword.keys(binary_ops) + + Enum.map(binary_ops, fn {op, str} -> + defp handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} + end) + + defp handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} + + defp select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do + ["SELECT ", distinct(distinct, sources, query) | select(fields, sources, query)] + end + + defp distinct(nil, _sources, _query), do: [] + defp distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " + defp distinct(%QueryExpr{expr: false}, _sources, _query), do: [] + defp distinct(%QueryExpr{expr: exprs}, _sources, query) when is_list(exprs) do + error!(query, "DISTINCT with multiple columns is not supported by MySQL") + end + + defp select([], _sources, _query), + do: "TRUE" + defp select(fields, sources, query) do + intersperse_map(fields, ", ", fn + {:&, _, [idx]} -> + case elem(sources, idx) do + {source, _, nil} -> + error!(query, "MySQL does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want to select") + {_, source, _} -> + source + end + {key, value} -> + [expr(value, sources, query), " AS ", quote_name(key)] + value -> + expr(value, sources, query) + end) + end + + defp from(%{from: %{source: source, hints: hints}} = query, sources) do + {from, name} = get_source(query, sources, 0, source) + [" FROM ", from, " AS ", name | Enum.map(hints, &[?\s | &1])] + end + + defp cte(%{with_ctes: %WithExpr{recursive: recursive, queries: [_ | _] = queries}} = query, sources) do + recursive_opt = if recursive, do: "RECURSIVE ", else: "" + ctes = intersperse_map(queries, ", ", &cte_expr(&1, sources, query)) + ["WITH ", recursive_opt, ctes, " "] + end + + defp cte(%{with_ctes: _}, _), do: [] + + defp cte_expr({name, cte}, sources, query) do + [quote_name(name), " AS ", cte_query(cte, sources, query)] + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + ["(", all(query, subquery_as_prefix(sources)), ")"] + end + + defp cte_query(%QueryExpr{expr: expr}, sources, query) do + expr(expr, sources, query) + end + + defp update_fields(type, %{updates: updates} = query, sources) do + fields = for(%{expr: expr} <- updates, + {op, kw} <- expr, + {key, value} <- kw, + do: update_op(op, update_key(type, key, query, sources), value, sources, query)) + Enum.intersperse(fields, ", ") + end + + defp update_key(:update, key, %{from: from} = query, sources) do + {_from, name} = get_source(query, sources, 0, from) + + [name, ?. | quote_name(key)] + end + defp update_key(:on_conflict, key, _query, _sources) do + quote_name(key) + end + + defp update_op(:set, quoted_key, value, sources, query) do + [quoted_key, " = " | expr(value, sources, query)] + end + + defp update_op(:inc, quoted_key, value, sources, query) do + [quoted_key, " = ", quoted_key, " + " | expr(value, sources, query)] + end + + defp update_op(command, _quoted_key, _value, _sources, query) do + error!(query, "Unknown update operation #{inspect command} for MySQL") + end + + defp using_join(%{joins: []}, _kind, _sources), do: {[], []} + defp using_join(%{joins: joins} = query, kind, sources) do + froms = + intersperse_map(joins, ", ", fn + %JoinExpr{source: %Ecto.SubQuery{params: [_ | _]}} -> + error!(query, "MySQL adapter does not support subqueries with parameters in update_all/delete_all joins") + + %JoinExpr{qual: :inner, ix: ix, source: source} -> + {join, name} = get_source(query, sources, ix, source) + [join, " AS " | name] + + %JoinExpr{qual: qual} -> + error!(query, "MySQL adapter supports only inner joins on #{kind}, got: `#{qual}`") + end) + + wheres = + for %JoinExpr{on: %QueryExpr{expr: value} = expr} <- joins, + value != true, + do: expr |> Map.put(:__struct__, BooleanExpr) |> Map.put(:op, :and) + + {[?,, ?\s | froms], wheres} + end + + defp join(%{joins: []}, _sources), do: [] + defp join(%{joins: joins} = query, sources) do + Enum.map(joins, fn + %JoinExpr{on: %QueryExpr{expr: expr}, qual: qual, ix: ix, source: source, hints: hints} -> + {join, name} = get_source(query, sources, ix, source) + [join_qual(qual, query), join, " AS ", name, Enum.map(hints, &[?\s | &1]) | join_on(qual, expr, sources, query)] + end) + end + + defp join_on(:cross, true, _sources, _query), do: [] + defp join_on(_qual, expr, sources, query), do: [" ON " | expr(expr, sources, query)] + + defp join_qual(:inner, _), do: " INNER JOIN " + defp join_qual(:inner_lateral, _), do: " INNER JOIN LATERAL " + defp join_qual(:left, _), do: " LEFT OUTER JOIN " + defp join_qual(:left_lateral, _), do: " LEFT OUTER JOIN LATERAL " + defp join_qual(:right, _), do: " RIGHT OUTER JOIN " + defp join_qual(:full, _), do: " FULL OUTER JOIN " + defp join_qual(:cross, _), do: " CROSS JOIN " + + defp where(%{wheres: wheres} = query, sources) do + boolean(" WHERE ", wheres, sources, query) + end + + defp having(%{havings: havings} = query, sources) do + boolean(" HAVING ", havings, sources, query) + end + + defp group_by(%{group_bys: []}, _sources), do: [] + defp group_by(%{group_bys: group_bys} = query, sources) do + [" GROUP BY " | + intersperse_map(group_bys, ", ", fn %QueryExpr{expr: expr} -> + intersperse_map(expr, ", ", &expr(&1, sources, query)) + end)] + end + + defp window(%{windows: []}, _sources), do: [] + defp window(%{windows: windows} = query, sources) do + [" WINDOW " | + intersperse_map(windows, ", ", fn {name, %{expr: kw}} -> + [quote_name(name), " AS " | window_exprs(kw, sources, query)] + end)] + end + + defp window_exprs(kw, sources, query) do + [?(, intersperse_map(kw, ?\s, &window_expr(&1, sources, query)), ?)] + end + + defp window_expr({:partition_by, fields}, sources, query) do + ["PARTITION BY " | intersperse_map(fields, ", ", &expr(&1, sources, query))] + end + + defp window_expr({:order_by, fields}, sources, query) do + ["ORDER BY " | intersperse_map(fields, ", ", &order_by_expr(&1, sources, query))] + end + + defp window_expr({:frame, {:fragment, _, _} = fragment}, sources, query) do + expr(fragment, sources, query) + end + + defp order_by(%{order_bys: []}, _sources), do: [] + defp order_by(%{order_bys: order_bys} = query, sources) do + [" ORDER BY " | + intersperse_map(order_bys, ", ", fn %QueryExpr{expr: expr} -> + intersperse_map(expr, ", ", &order_by_expr(&1, sources, query)) + end)] + end + + defp order_by_expr({dir, expr}, sources, query) do + str = expr(expr, sources, query) + + case dir do + :asc -> str + :desc -> [str | " DESC"] + _ -> error!(query, "#{dir} is not supported in ORDER BY in MySQL") + end + end + + defp limit(%{limit: nil}, _sources), do: [] + defp limit(%{limit: %QueryExpr{expr: expr}} = query, sources) do + [" LIMIT " | expr(expr, sources, query)] + end + + defp offset(%{offset: nil}, _sources), do: [] + defp offset(%{offset: %QueryExpr{expr: expr}} = query, sources) do + [" OFFSET " | expr(expr, sources, query)] + end + + defp combinations(%{combinations: combinations}) do + Enum.map(combinations, fn + {:union, query} -> [" UNION (", all(query), ")"] + {:union_all, query} -> [" UNION ALL (", all(query), ")"] + {:except, query} -> [" EXCEPT (", all(query), ")"] + {:except_all, query} -> [" EXCEPT ALL (", all(query), ")"] + {:intersect, query} -> [" INTERSECT (", all(query), ")"] + {:intersect_all, query} -> [" INTERSECT ALL (", all(query), ")"] + end) + end + + defp lock(%{lock: nil}, _sources), do: [] + defp lock(%{lock: binary}, _sources) when is_binary(binary), do: [?\s | binary] + defp lock(%{lock: expr} = query, sources), do: [?\s | expr(expr, sources, query)] + + defp boolean(_name, [], _sources, _query), do: [] + defp boolean(name, [%{expr: expr, op: op} | query_exprs], sources, query) do + [name, + Enum.reduce(query_exprs, {op, paren_expr(expr, sources, query)}, fn + %BooleanExpr{expr: expr, op: op}, {op, acc} -> + {op, [acc, operator_to_boolean(op) | paren_expr(expr, sources, query)]} + %BooleanExpr{expr: expr, op: op}, {_, acc} -> + {op, [?(, acc, ?), operator_to_boolean(op) | paren_expr(expr, sources, query)]} + end) |> elem(1)] + end + + defp operator_to_boolean(:and), do: " AND " + defp operator_to_boolean(:or), do: " OR " + + defp parens_for_select([first_expr | _] = expr) do + if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select/i) do + [?(, expr, ?)] + else + expr + end + end + + defp paren_expr(expr, sources, query) do + [?(, expr(expr, sources, query), ?)] + end + + defp expr({:^, [], [_ix]}, _sources, _query) do + '?' + end + + defp expr({{:., _, [{:parent_as, _, [as]}, field]}, _, []}, _sources, query) + when is_atom(field) do + {ix, sources} = get_parent_sources_ix(query, as) + {_, name, _} = elem(sources, ix) + [name, ?. | quote_name(field)] + end + + defp expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) + when is_atom(field) do + {_, name, _} = elem(sources, idx) + [name, ?. | quote_name(field)] + end + + defp expr({:&, _, [idx]}, sources, _query) do + {_, source, _} = elem(sources, idx) + source + end + + defp expr({:in, _, [_left, []]}, _sources, _query) do + "false" + end + + defp expr({:in, _, [left, right]}, sources, query) when is_list(right) do + args = intersperse_map(right, ?,, &expr(&1, sources, query)) + [expr(left, sources, query), " IN (", args, ?)] + end + + defp expr({:in, _, [_, {:^, _, [_, 0]}]}, _sources, _query) do + "false" + end + + defp expr({:in, _, [left, {:^, _, [_, length]}]}, sources, query) do + args = Enum.intersperse(List.duplicate(??, length), ?,) + [expr(left, sources, query), " IN (", args, ?)] + end + + defp expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do + [expr(left, sources, query), " IN ", expr(subquery, sources, query)] + end + + defp expr({:in, _, [left, right]}, sources, query) do + [expr(left, sources, query), " = ANY(", expr(right, sources, query), ?)] + end + + defp expr({:is_nil, _, [arg]}, sources, query) do + [expr(arg, sources, query) | " IS NULL"] + end + + defp expr({:not, _, [expr]}, sources, query) do + ["NOT (", expr(expr, sources, query), ?)] + end + + defp expr({:filter, _, _}, _sources, query) do + error!(query, "MySQL adapter does not support aggregate filters") + end + + defp expr(%Ecto.SubQuery{query: query}, sources, parent_query) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [?(, all(query, subquery_as_prefix(sources)), ?)] + end + + defp expr({:fragment, _, [kw]}, _sources, query) when is_list(kw) or tuple_size(kw) == 3 do + error!(query, "MySQL adapter does not support keyword or interpolated fragments") + end + + defp expr({:fragment, _, parts}, sources, query) do + Enum.map(parts, fn + {:raw, part} -> part + {:expr, expr} -> expr(expr, sources, query) + end) + |> parens_for_select + end + + defp expr({:literal, _, [literal]}, _sources, _query) do + quote_name(literal) + end + + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do + ["date_add(", expr(datetime, sources, query), ", ", + interval(count, interval, sources, query) | ")"] + end + + defp expr({:date_add, _, [date, count, interval]}, sources, query) do + ["CAST(date_add(", expr(date, sources, query), ", ", + interval(count, interval, sources, query) | ") AS date)"] + end + + defp expr({:ilike, _, [_, _]}, _sources, query) do + error!(query, "ilike is not supported by MySQL") + end + + defp expr({:over, _, [agg, name]}, sources, query) when is_atom(name) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | quote_name(name)] + end + + defp expr({:over, _, [agg, kw]}, sources, query) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | window_exprs(kw, sources, query)] + end + + defp expr({:{}, _, elems}, sources, query) do + [?(, intersperse_map(elems, ?,, &expr(&1, sources, query)), ?)] + end + + defp expr({:count, _, []}, _sources, _query), do: "count(*)" + + defp expr({:json_extract_path, _, [expr, path]}, sources, query) do + path = + Enum.map(path, fn + binary when is_binary(binary) -> + [?., ?", escape_json_key(binary), ?"] + + integer when is_integer(integer) -> + "[#{integer}]" + end) + + ["json_extract(", expr(expr, sources, query), ", '$", path, "')"] + end + + defp expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do + {modifier, args} = + case args do + [rest, :distinct] -> {"DISTINCT ", [rest]} + _ -> {[], args} + end + + case handle_call(fun, length(args)) do + {:binary_op, op} -> + [left, right] = args + [op_to_binary(left, sources, query), op | op_to_binary(right, sources, query)] + {:fun, fun} -> + [fun, ?(, modifier, intersperse_map(args, ", ", &expr(&1, sources, query)), ?)] + end + end + + defp expr(list, _sources, query) when is_list(list) do + error!(query, "Array type is not supported by MySQL") + end + + defp expr(%Decimal{} = decimal, _sources, _query) do + Decimal.to_string(decimal, :normal) + end + + defp expr(%Ecto.Query.Tagged{value: binary, type: :binary}, _sources, _query) + when is_binary(binary) do + hex = Base.encode16(binary, case: :lower) + [?x, ?', hex, ?'] + end + + defp expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) + when type in [:decimal, :float] do + [expr(other, sources, query), " + 0"] + end + + defp expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do + ["CAST(", expr(other, sources, query), " AS ", ecto_cast_to_db(type, query), ?)] + end + + defp expr(nil, _sources, _query), do: "NULL" + defp expr(true, _sources, _query), do: "TRUE" + defp expr(false, _sources, _query), do: "FALSE" + + defp expr(literal, _sources, _query) when is_binary(literal) do + [?', escape_string(literal), ?'] + end + + defp expr(literal, _sources, _query) when is_integer(literal) do + Integer.to_string(literal) + end + + defp expr(literal, _sources, _query) when is_float(literal) do + # MySQL doesn't support float cast + ["(0 + ", Float.to_string(literal), ?)] + end + + defp expr(expr, _sources, query) do + error!(query, "unsupported expression: #{inspect(expr)}") + end + + defp interval(count, "millisecond", sources, query) do + ["INTERVAL (", expr(count, sources, query) | " * 1000) microsecond"] + end + + defp interval(count, interval, sources, query) do + ["INTERVAL ", expr(count, sources, query), ?\s | interval] + end + + defp op_to_binary({op, _, [_, _]} = expr, sources, query) when op in @binary_ops, + do: paren_expr(expr, sources, query) + + defp op_to_binary({:is_nil, _, [_]} = expr, sources, query), + do: paren_expr(expr, sources, query) + + defp op_to_binary(expr, sources, query), + do: expr(expr, sources, query) + + defp create_names(%{sources: sources}, as_prefix) do + create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() + end + + defp create_names(sources, pos, limit, as_prefix) when pos < limit do + [create_name(sources, pos, as_prefix) | create_names(sources, pos + 1, limit, as_prefix)] + end + + defp create_names(_sources, pos, pos, as_prefix) do + [as_prefix] + end + + defp subquery_as_prefix(sources) do + [?s | :erlang.element(tuple_size(sources), sources)] + end + + defp create_name(sources, pos, as_prefix) do + case elem(sources, pos) do + {:fragment, _, _} -> + {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} + + {table, schema, prefix} -> + name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] + {quote_table(prefix, table), name, schema} + + %Ecto.SubQuery{} -> + {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} + end + end + + defp create_alias(<>) when first in ?a..?z when first in ?A..?Z do + first + end + defp create_alias(_) do + ?t + end + + ## DDL + + alias Ecto.Migration.{Table, Index, Reference, Constraint} + + @impl true + def execute_ddl({command, %Table{} = table, columns}) when command in [:create, :create_if_not_exists] do + table_structure = + case column_definitions(table, columns) ++ pk_definitions(columns, ", ") do + [] -> [] + list -> [?\s, ?(, list, ?)] + end + + [["CREATE TABLE ", + if_do(command == :create_if_not_exists, "IF NOT EXISTS "), + quote_table(table.prefix, table.name), + table_structure, + comment_expr(table.comment, true), + engine_expr(table.engine), options_expr(table.options)]] + end + + + def execute_ddl({command, %Table{} = table, mode}) when command in [:drop, :drop_if_exists] do + [["DROP TABLE ", if_do(command == :drop_if_exists, "IF EXISTS "), + quote_table(table.prefix, table.name), drop_mode(mode)]] + end + + def execute_ddl({:alter, %Table{} = table, changes}) do + [["ALTER TABLE ", quote_table(table.prefix, table.name), ?\s, + column_changes(table, changes), pk_definitions(changes, ", ADD ")]] + ++ + if_do(table.comment, + [["ALTER TABLE ", quote_table(table.prefix, table.name), comment_expr(table.comment)]] + ) + end + + def execute_ddl({:create, %Index{} = index}) do + if index.where do + error!(nil, "MySQL adapter does not support where in indexes") + end + + [["CREATE", if_do(index.unique, " UNIQUE"), " INDEX ", + quote_name(index.name), + " ON ", + quote_table(index.prefix, index.table), ?\s, + ?(, intersperse_map(index.columns, ", ", &index_expr/1), ?), + if_do(index.using, [" USING ", to_string(index.using)]), + if_do(index.concurrently, " LOCK=NONE")]] + end + + def execute_ddl({:create_if_not_exists, %Index{}}), + do: error!(nil, "MySQL adapter does not support create if not exists for index") + + def execute_ddl({:create, %Constraint{check: check}}) when is_binary(check), + do: error!(nil, "MySQL adapter does not support check constraints") + def execute_ddl({:create, %Constraint{exclude: exclude}}) when is_binary(exclude), + do: error!(nil, "MySQL adapter does not support exclusion constraints") + + def execute_ddl({:drop, %Index{}, :cascade}), + do: error!(nil, "MySQL adapter does not support cascade in drop index") + + def execute_ddl({:drop, %Index{} = index, :restrict}) do + [["DROP INDEX ", + quote_name(index.name), + " ON ", quote_table(index.prefix, index.table), + if_do(index.concurrently, " LOCK=NONE")]] + end + + def execute_ddl({:drop, %Constraint{}, _}), + do: error!(nil, "MySQL adapter does not support constraints") + + def execute_ddl({:drop_if_exists, %Constraint{}, _}), + do: error!(nil, "MySQL adapter does not support constraints") + + def execute_ddl({:drop_if_exists, %Index{}, _}), + do: error!(nil, "MySQL adapter does not support drop if exists for index") + + def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do + [["RENAME TABLE ", quote_table(current_table.prefix, current_table.name), + " TO ", quote_table(new_table.prefix, new_table.name)]] + end + + def execute_ddl({:rename, %Table{} = table, current_column, new_column}) do + [["ALTER TABLE ", quote_table(table.prefix, table.name), " RENAME COLUMN ", + quote_name(current_column), " TO ", quote_name(new_column)]] + end + + def execute_ddl(string) when is_binary(string), do: [string] + + def execute_ddl(keyword) when is_list(keyword), + do: error!(nil, "MySQL adapter does not support keyword lists in execute") + + @impl true + def ddl_logs(_), do: [] + + @impl true + def table_exists_query(table) do + {"SELECT true FROM information_schema.tables WHERE table_name = ? AND table_schema = DATABASE() LIMIT 1", [table]} + end + + defp drop_mode(:cascade), do: " CASCADE" + defp drop_mode(:restrict), do: [] + + defp pk_definitions(columns, prefix) do + pks = + for {_, name, _, opts} <- columns, + opts[:primary_key], + do: name + + case pks do + [] -> [] + _ -> [[prefix, "PRIMARY KEY (", quote_names(pks), ?)]] + end + end + + defp column_definitions(table, columns) do + intersperse_map(columns, ", ", &column_definition(table, &1)) + end + + defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do + [quote_name(name), ?\s, reference_column_type(ref.type, opts), + column_options(opts), reference_expr(ref, table, name)] + end + + defp column_definition(_table, {:add, name, type, opts}) do + [quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_changes(table, columns) do + intersperse_map(columns, ", ", &column_change(table, &1)) + end + + defp column_change(_table, {_command, _name, %Reference{validate: false}, _opts}) do + error!(nil, "validate: false on references is not supported in MyXQL") + end + + defp column_change(table, {:add, name, %Reference{} = ref, opts}) do + ["ADD ", quote_name(name), ?\s, reference_column_type(ref.type, opts), + column_options(opts), constraint_expr(ref, table, name)] + end + + defp column_change(_table, {:add, name, type, opts}) do + ["ADD ", quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_change(table, {:add_if_not_exists, name, %Reference{} = ref, opts}) do + ["ADD IF NOT EXISTS ", quote_name(name), ?\s, reference_column_type(ref.type, opts), + column_options(opts), constraint_if_not_exists_expr(ref, table, name)] + end + + defp column_change(_table, {:add_if_not_exists, name, type, opts}) do + ["ADD IF NOT EXISTS ", quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_change(table, {:modify, name, %Reference{} = ref, opts}) do + [drop_constraint_expr(opts[:from], table, name), "MODIFY ", quote_name(name), ?\s, reference_column_type(ref.type, opts), + column_options(opts), constraint_expr(ref, table, name)] + end + + defp column_change(table, {:modify, name, type, opts}) do + [drop_constraint_expr(opts[:from], table, name), "MODIFY ", quote_name(name), ?\s, column_type(type, opts), column_options(opts)] + end + + defp column_change(_table, {:remove, name}), do: ["DROP ", quote_name(name)] + defp column_change(table, {:remove, name, %Reference{} = ref, _opts}) do + [drop_constraint_expr(ref, table, name), "DROP ", quote_name(name)] + end + defp column_change(_table, {:remove, name, _type, _opts}), do: ["DROP ", quote_name(name)] + + defp column_change(table, {:remove_if_exists, name, %Reference{} = ref}) do + [drop_constraint_if_exists_expr(ref, table, name), "DROP IF EXISTS ", quote_name(name)] + end + defp column_change(_table, {:remove_if_exists, name, _type}), do: ["DROP IF EXISTS ", quote_name(name)] + + defp column_options(opts) do + default = Keyword.fetch(opts, :default) + null = Keyword.get(opts, :null) + after_column = Keyword.get(opts, :after) + comment = Keyword.get(opts, :comment) + + [default_expr(default), null_expr(null), after_expr(after_column), comment_expr(comment)] + end + + defp comment_expr(comment, create_table? \\ false) + defp comment_expr(comment, true) when is_binary(comment), do: " COMMENT = '#{escape_string(comment)}'" + defp comment_expr(comment, false) when is_binary(comment), do: " COMMENT '#{escape_string(comment)}'" + defp comment_expr(_, _), do: [] + + defp after_expr(nil), do: [] + defp after_expr(column) when is_atom(column) or is_binary(column), do: " AFTER `#{column}`" + defp after_expr(_), do: [] + + defp null_expr(false), do: " NOT NULL" + defp null_expr(true), do: " NULL" + defp null_expr(_), do: [] + + defp default_expr({:ok, nil}), + do: " DEFAULT NULL" + defp default_expr({:ok, literal}) when is_binary(literal), + do: [" DEFAULT '", escape_string(literal), ?'] + defp default_expr({:ok, literal}) when is_number(literal) or is_boolean(literal), + do: [" DEFAULT ", to_string(literal)] + defp default_expr({:ok, {:fragment, expr}}), + do: [" DEFAULT ", expr] + defp default_expr({:ok, value}) when is_map(value) do + library = Application.get_env(:myxql, :json_library, Jason) + expr = IO.iodata_to_binary(library.encode_to_iodata!(value)) + [" DEFAULT ", ?(, ?', escape_string(expr), ?', ?)] + end + defp default_expr(:error), + do: [] + + defp index_expr(literal) when is_binary(literal), + do: literal + defp index_expr(literal), do: quote_name(literal) + + defp engine_expr(storage_engine), + do: [" ENGINE = ", String.upcase(to_string(storage_engine || "INNODB"))] + + defp options_expr(nil), + do: [] + defp options_expr(keyword) when is_list(keyword), + do: error!(nil, "MySQL adapter does not support keyword lists in :options") + defp options_expr(options), + do: [?\s, to_string(options)] + + defp column_type(type, _opts) when type in ~w(time utc_datetime naive_datetime)a, + do: ecto_to_db(type) + + defp column_type(type, opts) when type in ~w(time_usec utc_datetime_usec naive_datetime_usec)a do + precision = Keyword.get(opts, :precision, 6) + type_name = ecto_to_db(type) + + [type_name, ?(, to_string(precision), ?)] + end + + defp column_type(type, opts) do + size = Keyword.get(opts, :size) + precision = Keyword.get(opts, :precision) + scale = Keyword.get(opts, :scale) + + cond do + size -> [ecto_size_to_db(type), ?(, to_string(size), ?)] + precision -> [ecto_to_db(type), ?(, to_string(precision), ?,, to_string(scale || 0), ?)] + type == :string -> ["varchar(255)"] + true -> ecto_to_db(type) + end + end + + defp reference_expr(type, ref, table, name) do + {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) + + if ref.match do + error!(nil, ":match is not supported in references for tds") + end + + ["CONSTRAINT ", reference_name(ref, table, name), + " ", type, " (", quote_names(current_columns), ?), + " REFERENCES ", quote_table(ref.prefix || table.prefix, ref.table), + ?(, quote_names(reference_columns), ?), + reference_on_delete(ref.on_delete), reference_on_update(ref.on_update)] + end + + defp reference_expr(%Reference{} = ref, table, name), + do: [", " | reference_expr("FOREIGN KEY", ref, table, name)] + + defp constraint_expr(%Reference{} = ref, table, name), + do: [", ADD " | reference_expr("FOREIGN KEY", ref, table, name)] + + defp constraint_if_not_exists_expr(%Reference{} = ref, table, name), + do: [", ADD " | reference_expr("FOREIGN KEY IF NOT EXISTS", ref, table, name)] + + defp drop_constraint_expr(%Reference{} = ref, table, name), + do: ["DROP FOREIGN KEY ", reference_name(ref, table, name), ", "] + defp drop_constraint_expr(_, _, _), + do: [] + + defp drop_constraint_if_exists_expr(%Reference{} = ref, table, name), + do: ["DROP FOREIGN KEY IF EXISTS ", reference_name(ref, table, name), ", "] + defp drop_constraint_if_exists_expr(_, _, _), + do: [] + + defp reference_name(%Reference{name: nil}, table, column), + do: quote_name("#{table.name}_#{column}_fkey") + defp reference_name(%Reference{name: name}, _table, _column), + do: quote_name(name) + + defp reference_column_type(:serial, _opts), do: "BIGINT UNSIGNED" + defp reference_column_type(:bigserial, _opts), do: "BIGINT UNSIGNED" + defp reference_column_type(type, opts), do: column_type(type, opts) + + defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" + defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" + defp reference_on_delete(:restrict), do: " ON DELETE RESTRICT" + defp reference_on_delete(_), do: [] + + defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" + defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" + defp reference_on_update(:restrict), do: " ON UPDATE RESTRICT" + defp reference_on_update(_), do: [] + + ## Helpers + + defp get_source(query, sources, ix, source) do + {expr, name, _schema} = elem(sources, ix) + {expr || expr(source, sources, query), name} + end + + defp get_parent_sources_ix(query, as) do + case query.aliases[@parent_as] do + {%{aliases: %{^as => ix}}, sources} -> {ix, sources} + {%{} = parent, _sources} -> get_parent_sources_ix(parent, as) + end + end + + defp quote_name(name) when is_atom(name) do + quote_name(Atom.to_string(name)) + end + + defp quote_name(name) when is_binary(name) do + if String.contains?(name, "`") do + error!(nil, "bad literal/field/table name #{inspect name} (` is not permitted)") + end + + [?`, name, ?`] + end + + defp quote_names(names), do: intersperse_map(names, ?,, "e_name/1) + + defp quote_table(nil, name), do: quote_table(name) + defp quote_table(prefix, name), do: [quote_table(prefix), ?., quote_table(name)] + + defp quote_table(name) when is_atom(name), + do: quote_table(Atom.to_string(name)) + defp quote_table(name) do + if String.contains?(name, "`") do + error!(nil, "bad table name #{inspect name}") + end + [?`, name, ?`] + end + + defp intersperse_map(list, separator, mapper, acc \\ []) + defp intersperse_map([], _separator, _mapper, acc), + do: acc + defp intersperse_map([elem], _separator, mapper, acc), + do: [acc | mapper.(elem)] + defp intersperse_map([elem | rest], separator, mapper, acc), + do: intersperse_map(rest, separator, mapper, [acc, mapper.(elem), separator]) + + defp if_do(condition, value) do + if condition, do: value, else: [] + end + + defp escape_string(value) when is_binary(value) do + value + |> :binary.replace("'", "''", [:global]) + |> :binary.replace("\\", "\\\\", [:global]) + end + + defp escape_json_key(value) when is_binary(value) do + value + |> escape_string() + |> :binary.replace("\"", "\\\\\"", [:global]) + end + + defp ecto_cast_to_db(:id, _query), do: "unsigned" + defp ecto_cast_to_db(:integer, _query), do: "unsigned" + defp ecto_cast_to_db(:string, _query), do: "char" + defp ecto_cast_to_db(:utc_datetime_usec, _query), do: "datetime(6)" + defp ecto_cast_to_db(:naive_datetime_usec, _query), do: "datetime(6)" + defp ecto_cast_to_db(type, query), do: ecto_to_db(type, query) + + defp ecto_size_to_db(:binary), do: "varbinary" + defp ecto_size_to_db(type), do: ecto_to_db(type) + + defp ecto_to_db(type, query \\ nil) + defp ecto_to_db({:array, _}, query), do: error!(query, "Array type is not supported by MySQL") + defp ecto_to_db(:id, _query), do: "integer" + defp ecto_to_db(:serial, _query), do: "bigint unsigned not null auto_increment" + defp ecto_to_db(:bigserial, _query), do: "bigint unsigned not null auto_increment" + defp ecto_to_db(:binary_id, _query), do: "binary(16)" + defp ecto_to_db(:string, _query), do: "varchar" + defp ecto_to_db(:float, _query), do: "double" + defp ecto_to_db(:binary, _query), do: "blob" + defp ecto_to_db(:uuid, _query), do: "binary(16)" # MySQL does not support uuid + defp ecto_to_db(:map, _query), do: "json" + defp ecto_to_db({:map, _}, _query), do: "json" + defp ecto_to_db(:time_usec, _query), do: "time" + defp ecto_to_db(:utc_datetime, _query), do: "datetime" + defp ecto_to_db(:utc_datetime_usec, _query), do: "datetime" + defp ecto_to_db(:naive_datetime, _query), do: "datetime" + defp ecto_to_db(:naive_datetime_usec, _query), do: "datetime" + defp ecto_to_db(atom, _query) when is_atom(atom), do: Atom.to_string(atom) + defp ecto_to_db(type, _query) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` where `t` itself follows the same conditions." + end + + defp error!(nil, message) do + raise ArgumentError, message + end + defp error!(query, message) do + raise Ecto.QueryError, query: query, message: message + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/postgres.ex b/deps/ecto_sql/lib/ecto/adapters/postgres.ex new file mode 100644 index 0000000..d066560 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/postgres.ex @@ -0,0 +1,376 @@ +defmodule Ecto.Adapters.Postgres do + @moduledoc """ + Adapter module for PostgreSQL. + + It uses `Postgrex` for communicating to the database. + + ## Features + + * Full query support (including joins, preloads and associations) + * Support for transactions + * Support for data migrations + * Support for ecto.create and ecto.drop operations + * Support for transactional tests via `Ecto.Adapters.SQL` + + ## Options + + Postgres options split in different categories described + below. All options can be given via the repository + configuration: + + config :your_app, YourApp.Repo, + ... + + The `:prepare` option may be specified per operation: + + YourApp.Repo.all(Queryable, prepare: :unnamed) + + ### Connection options + + * `:hostname` - Server hostname + * `:socket_dir` - Connect to Postgres via UNIX sockets in the given directory + The socket name is derived based on the port. This is the preferred method + for configuring sockets and it takes precedence over the hostname. If you are + connecting to a socket outside of the Postgres convention, use `:socket` instead; + * `:socket` - Connect to Postgres via UNIX sockets in the given path. + This option takes precedence over the `:hostname` and `:socket_dir` + * `:username` - Username + * `:password` - User password + * `:port` - Server port (default: 5432) + * `:database` - the database to connect to + * `:maintenance_database` - Specifies the name of the database to connect to when + creating or dropping the database. Defaults to `"postgres"` + * `:pool` - The connection pool module, may be set to `Ecto.Adapters.SQL.Sandbox` + * `:ssl` - Set to true if ssl should be used (default: false) + * `:ssl_opts` - A list of ssl options, see Erlang's `ssl` docs + * `:parameters` - Keyword list of connection parameters + * `:connect_timeout` - The timeout for establishing new connections (default: 5000) + * `:prepare` - How to prepare queries, either `:named` to use named queries + or `:unnamed` to force unnamed queries (default: `:named`) + * `:socket_options` - Specifies socket configuration + * `:show_sensitive_data_on_connection_error` - show connection data and + configuration whenever there is an error attempting to connect to the + database + + The `:socket_options` are particularly useful when configuring the size + of both send and receive buffers. For example, when Ecto starts with a + pool of 20 connections, the memory usage may quickly grow from 20MB to + 50MB based on the operating system default values for TCP buffers. It is + advised to stick with the operating system defaults but they can be + tweaked if desired: + + socket_options: [recbuf: 8192, sndbuf: 8192] + + We also recommend developers to consult the `Postgrex.start_link/1` + documentation for a complete listing of all supported options. + + ### Storage options + + * `:encoding` - the database encoding (default: "UTF8") + or `:unspecified` to remove encoding parameter (alternative engine compatibility) + * `:template` - the template to create the database from + * `:lc_collate` - the collation order + * `:lc_ctype` - the character classification + * `:dump_path` - where to place dumped structures + * `:force_drop` - force the database to be dropped even + if it has connections to it (requires PostgreSQL 13+) + + ### After connect callback + + If you want to execute a callback as soon as connection is established + to the database, you can use the `:after_connect` configuration. For + example, in your repository configuration you can add: + + after_connect: {Postgrex, :query!, ["SET search_path TO global_prefix", []]} + + You can also specify your own module that will receive the Postgrex + connection as argument. + + ## Extensions + + Both PostgreSQL and its adapter for Elixir, Postgrex, support an + extension system. If you want to use custom extensions for Postgrex + alongside Ecto, you must define a type module with your extensions. + Create a new file anywhere in your application with the following: + + Postgrex.Types.define(MyApp.PostgresTypes, + [MyExtension.Foo, MyExtensionBar] ++ Ecto.Adapters.Postgres.extensions()) + + Once your type module is defined, you can configure the repository to use it: + + config :my_app, MyApp.Repo, types: MyApp.PostgresTypes + + """ + + # Inherit all behaviour from Ecto.Adapters.SQL + use Ecto.Adapters.SQL, driver: :postgrex + + # And provide a custom storage implementation + @behaviour Ecto.Adapter.Storage + @behaviour Ecto.Adapter.Structure + + @default_maintenance_database "postgres" + @default_prepare_opt :named + + @doc """ + All Ecto extensions for Postgrex. + """ + def extensions do + [] + end + + # Support arrays in place of IN + @impl true + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers({:in, sub}, {:in, sub}), do: [{:array, sub}] + def dumpers(:binary_id, type), do: [type, Ecto.UUID] + def dumpers(_, type), do: [type] + + ## Query API + + @impl Ecto.Adapter.Queryable + def execute(adapter_meta, query_meta, query, params, opts) do + prepare = Keyword.get(opts, :prepare, @default_prepare_opt) + + unless valid_prepare?(prepare) do + raise ArgumentError, + "expected option `:prepare` to be either `:named` or `:unnamed`, got: #{inspect(prepare)}" + end + + Ecto.Adapters.SQL.execute(prepare, adapter_meta, query_meta, query, params, opts) + end + + defp valid_prepare?(prepare) when prepare in [:named, :unnamed], do: true + defp valid_prepare?(_), do: false + + ## Storage API + + @impl true + def storage_up(opts) do + database = + Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + + encoding = if opts[:encoding] == :unspecified, do: nil, else: opts[:encoding] || "UTF8" + maintenance_database = Keyword.get(opts, :maintenance_database, @default_maintenance_database) + opts = Keyword.put(opts, :database, maintenance_database) + + check_existence_command = "SELECT FROM pg_database WHERE datname = '#{database}'" + + case run_query(check_existence_command, opts) do + {:ok, %{num_rows: 1}} -> + {:error, :already_up} + + _ -> + create_command = + ~s(CREATE DATABASE "#{database}") + |> concat_if(encoding, &"ENCODING '#{&1}'") + |> concat_if(opts[:template], &"TEMPLATE=#{&1}") + |> concat_if(opts[:lc_ctype], &"LC_CTYPE='#{&1}'") + |> concat_if(opts[:lc_collate], &"LC_COLLATE='#{&1}'") + + case run_query(create_command, opts) do + {:ok, _} -> + :ok + + {:error, %{postgres: %{code: :duplicate_database}}} -> + {:error, :already_up} + + {:error, error} -> + {:error, Exception.message(error)} + end + end + end + + defp concat_if(content, nil, _), do: content + defp concat_if(content, false, _), do: content + defp concat_if(content, value, fun), do: content <> " " <> fun.(value) + + @impl true + def storage_down(opts) do + database = Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + command = "DROP DATABASE \"#{database}\"" + |> concat_if(opts[:force_drop], fn _ -> "WITH (FORCE)" end) + maintenance_database = Keyword.get(opts, :maintenance_database, @default_maintenance_database) + opts = Keyword.put(opts, :database, maintenance_database) + + case run_query(command, opts) do + {:ok, _} -> + :ok + {:error, %{postgres: %{code: :invalid_catalog_name}}} -> + {:error, :already_down} + {:error, error} -> + {:error, Exception.message(error)} + end + end + + @impl Ecto.Adapter.Storage + def storage_status(opts) do + database = Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + maintenance_database = Keyword.get(opts, :maintenance_database, @default_maintenance_database) + opts = Keyword.put(opts, :database, maintenance_database) + + check_database_query = "SELECT datname FROM pg_catalog.pg_database WHERE datname = '#{database}'" + + case run_query(check_database_query, opts) do + {:ok, %{num_rows: 0}} -> :down + {:ok, %{num_rows: _num_rows}} -> :up + other -> {:error, other} + end + end + + @impl true + def supports_ddl_transaction? do + true + end + + @impl true + def lock_for_migrations(meta, opts, fun) do + %{opts: adapter_opts} = meta + + if Keyword.fetch(adapter_opts, :pool_size) == {:ok, 1} do + Ecto.Adapters.SQL.raise_migration_pool_size_error() + end + + opts = Keyword.put(opts, :timeout, :infinity) + + {:ok, result} = + transaction(meta, opts, fn -> + # SHARE UPDATE EXCLUSIVE MODE is the first lock that locks + # itself but still allows updates to happen, see + # # https://www.postgresql.org/docs/9.4/explicit-locking.html + source = Keyword.get(opts, :migration_source, "schema_migrations") + table = if prefix = opts[:prefix], do: ~s|"#{prefix}"."#{source}"|, else: ~s|"#{source}"| + {:ok, _} = Ecto.Adapters.SQL.query(meta, "LOCK TABLE #{table} IN SHARE UPDATE EXCLUSIVE MODE", [], opts) + fun.() + end) + + result + end + + @impl true + def structure_dump(default, config) do + table = config[:migration_source] || "schema_migrations" + with {:ok, versions} <- select_versions(table, config), + {:ok, path} <- pg_dump(default, config), + do: append_versions(table, versions, path) + end + + defp select_versions(table, config) do + case run_query(~s[SELECT version FROM public."#{table}" ORDER BY version], config) do + {:ok, %{rows: rows}} -> {:ok, Enum.map(rows, &hd/1)} + {:error, %{postgres: %{code: :undefined_table}}} -> {:ok, []} + {:error, _} = error -> error + end + end + + defp pg_dump(default, config) do + path = config[:dump_path] || Path.join(default, "structure.sql") + File.mkdir_p!(Path.dirname(path)) + + case run_with_cmd("pg_dump", config, ["--file", path, "--schema-only", "--no-acl", + "--no-owner", config[:database]]) do + {_output, 0} -> + {:ok, path} + {output, _} -> + {:error, output} + end + end + + defp append_versions(_table, [], path) do + {:ok, path} + end + + defp append_versions(table, versions, path) do + sql = Enum.map_join(versions, &~s[INSERT INTO public."#{table}" (version) VALUES (#{&1});\n]) + + File.open!(path, [:append], fn file -> + IO.write(file, sql) + end) + + {:ok, path} + end + + @impl true + def structure_load(default, config) do + path = config[:dump_path] || Path.join(default, "structure.sql") + args = ["--quiet", "--file", path, "-vON_ERROR_STOP=1", + "--single-transaction", config[:database]] + case run_with_cmd("psql", config, args) do + {_output, 0} -> {:ok, path} + {output, _} -> {:error, output} + end + end + + ## Helpers + + defp run_query(sql, opts) do + {:ok, _} = Application.ensure_all_started(:ecto_sql) + {:ok, _} = Application.ensure_all_started(:postgrex) + + opts = + opts + |> Keyword.drop([:name, :log, :pool, :pool_size]) + |> Keyword.put(:backoff_type, :stop) + |> Keyword.put(:max_restarts, 0) + + task = Task.Supervisor.async_nolink(Ecto.Adapters.SQL.StorageSupervisor, fn -> + {:ok, conn} = Postgrex.start_link(opts) + + value = Postgrex.query(conn, sql, [], opts) + GenServer.stop(conn) + value + end) + + timeout = Keyword.get(opts, :timeout, 15_000) + + case Task.yield(task, timeout) || Task.shutdown(task) do + {:ok, {:ok, result}} -> + {:ok, result} + {:ok, {:error, error}} -> + {:error, error} + {:exit, {%{__struct__: struct} = error, _}} + when struct in [Postgrex.Error, DBConnection.Error] -> + {:error, error} + {:exit, reason} -> + {:error, RuntimeError.exception(Exception.format_exit(reason))} + nil -> + {:error, RuntimeError.exception("command timed out")} + end + end + + defp run_with_cmd(cmd, opts, opt_args) do + unless System.find_executable(cmd) do + raise "could not find executable `#{cmd}` in path, " <> + "please guarantee it is available before running ecto commands" + end + + env = + [{"PGCONNECT_TIMEOUT", "10"}] + env = + if password = opts[:password] do + [{"PGPASSWORD", password}|env] + else + env + end + + args = + [] + args = + if username = opts[:username], do: ["-U", username|args], else: args + args = + if port = opts[:port], do: ["-p", to_string(port)|args], else: args + + host = opts[:socket_dir] || opts[:hostname] || System.get_env("PGHOST") || "localhost" + + if opts[:socket] do + IO.warn( + ":socket option is ignored when connecting in structure_load/2 and structure_dump/2," <> + " use :socket_dir or :hostname instead" + ) + end + + args = ["--host", host|args] + args = args ++ opt_args + System.cmd(cmd, args, env: env, stderr_to_stdout: true) + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex b/deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex new file mode 100644 index 0000000..6c74e4c --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/postgres/connection.ex @@ -0,0 +1,1393 @@ +if Code.ensure_loaded?(Postgrex) do + defmodule Ecto.Adapters.Postgres.Connection do + @moduledoc false + + @default_port 5432 + @behaviour Ecto.Adapters.SQL.Connection + + ## Module and Options + + @impl true + def child_spec(opts) do + opts + |> Keyword.put_new(:port, @default_port) + |> Postgrex.child_spec() + end + + @impl true + def to_constraints(%Postgrex.Error{postgres: %{code: :unique_violation, constraint: constraint}}, _opts), + do: [unique: constraint] + def to_constraints(%Postgrex.Error{postgres: %{code: :foreign_key_violation, constraint: constraint}}, _opts), + do: [foreign_key: constraint] + def to_constraints(%Postgrex.Error{postgres: %{code: :exclusion_violation, constraint: constraint}}, _opts), + do: [exclusion: constraint] + def to_constraints(%Postgrex.Error{postgres: %{code: :check_violation, constraint: constraint}}, _opts), + do: [check: constraint] + + # Postgres 9.2 and earlier does not provide the constraint field + @impl true + def to_constraints(%Postgrex.Error{postgres: %{code: :unique_violation, message: message}}, _opts) do + case :binary.split(message, " unique constraint ") do + [_, quoted] -> [unique: strip_quotes(quoted)] + _ -> [] + end + end + def to_constraints(%Postgrex.Error{postgres: %{code: :foreign_key_violation, message: message}}, _opts) do + case :binary.split(message, " foreign key constraint ") do + [_, quoted] -> + [quoted | _] = :binary.split(quoted, " on table ") + [foreign_key: strip_quotes(quoted)] + _ -> + [] + end + end + def to_constraints(%Postgrex.Error{postgres: %{code: :exclusion_violation, message: message}}, _opts) do + case :binary.split(message, " exclusion constraint ") do + [_, quoted] -> [exclusion: strip_quotes(quoted)] + _ -> [] + end + end + def to_constraints(%Postgrex.Error{postgres: %{code: :check_violation, message: message}}, _opts) do + case :binary.split(message, " check constraint ") do + [_, quoted] -> [check: strip_quotes(quoted)] + _ -> [] + end + end + + def to_constraints(_, _opts), + do: [] + + defp strip_quotes(quoted) do + size = byte_size(quoted) - 2 + <<_, unquoted::binary-size(size), _>> = quoted + unquoted + end + + ## Query + + @impl true + def prepare_execute(conn, name, sql, params, opts) do + case Postgrex.prepare_execute(conn, name, sql, params, opts) do + {:error, %Postgrex.Error{postgres: %{pg_code: "22P02", message: message}} = error} -> + context = """ + . If you are trying to query a JSON field, the parameter may need to be interpolated. \ + Instead of + + p.json["field"] != "value" + + do + + p.json["field"] != ^"value" + """ + + {:error, put_in(error.postgres.message, message <> context)} + other -> + other + end + + end + + @impl true + def query(conn, sql, params, opts) do + Postgrex.query(conn, sql, params, opts) + end + + @impl true + def query_many(_conn, _sql, _params, _opts) do + raise RuntimeError, "query_many is not supported in the Postgrex adapter" + end + + @impl true + def execute(conn, %{ref: ref} = query, params, opts) do + case Postgrex.execute(conn, query, params, opts) do + {:ok, %{ref: ^ref}, result} -> + {:ok, result} + + {:ok, _, _} = ok -> + ok + + {:error, %Postgrex.QueryError{} = err} -> + {:reset, err} + + {:error, %Postgrex.Error{postgres: %{code: :feature_not_supported}} = err} -> + {:reset, err} + + {:error, _} = error -> + error + end + end + + @impl true + def stream(conn, sql, params, opts) do + Postgrex.stream(conn, sql, params, opts) + end + + @parent_as __MODULE__ + alias Ecto.Query.{BooleanExpr, JoinExpr, QueryExpr, WithExpr} + + @impl true + def all(query, as_prefix \\ []) do + sources = create_names(query, as_prefix) + {select_distinct, order_by_distinct} = distinct(query.distinct, sources, query) + + cte = cte(query, sources) + from = from(query, sources) + select = select(query, select_distinct, sources) + join = join(query, sources) + where = where(query, sources) + group_by = group_by(query, sources) + having = having(query, sources) + window = window(query, sources) + combinations = combinations(query) + order_by = order_by(query, order_by_distinct, sources) + limit = limit(query, sources) + offset = offset(query, sources) + lock = lock(query, sources) + + [cte, select, from, join, where, group_by, having, window, combinations, order_by, limit, offset | lock] + end + + @impl true + def update_all(%{from: %{source: source}} = query, prefix \\ nil) do + sources = create_names(query, []) + cte = cte(query, sources) + {from, name} = get_source(query, sources, 0, source) + + prefix = prefix || ["UPDATE ", from, " AS ", name | " SET "] + fields = update_fields(query, sources) + {join, wheres} = using_join(query, :update_all, "FROM", sources) + where = where(%{query | wheres: wheres ++ query.wheres}, sources) + + [cte, prefix, fields, join, where | returning(query, sources)] + end + + @impl true + def delete_all(%{from: from} = query) do + sources = create_names(query, []) + cte = cte(query, sources) + {from, name} = get_source(query, sources, 0, from) + + {join, wheres} = using_join(query, :delete_all, "USING", sources) + where = where(%{query | wheres: wheres ++ query.wheres}, sources) + + [cte, "DELETE FROM ", from, " AS ", name, join, where | returning(query, sources)] + end + + @impl true + def insert(prefix, table, header, rows, on_conflict, returning, placeholders) do + counter_offset = length(placeholders) + 1 + values = + if header == [] do + [" VALUES " | intersperse_map(rows, ?,, fn _ -> "(DEFAULT)" end)] + else + [" (", quote_names(header), ") " | insert_all(rows, counter_offset)] + end + + ["INSERT INTO ", quote_table(prefix, table), insert_as(on_conflict), + values, on_conflict(on_conflict, header) | returning(returning)] + end + + defp insert_as({%{sources: sources}, _, _}) do + {_expr, name, _schema} = create_name(sources, 0, []) + [" AS " | name] + end + defp insert_as({_, _, _}) do + [] + end + + defp on_conflict({:raise, _, []}, _header), + do: [] + defp on_conflict({:nothing, _, targets}, _header), + do: [" ON CONFLICT ", conflict_target(targets) | "DO NOTHING"] + defp on_conflict({fields, _, targets}, _header) when is_list(fields), + do: [" ON CONFLICT ", conflict_target!(targets), "DO " | replace(fields)] + defp on_conflict({query, _, targets}, _header), + do: [" ON CONFLICT ", conflict_target!(targets), "DO " | update_all(query, "UPDATE SET ")] + + defp conflict_target!([]), + do: error!(nil, "the :conflict_target option is required on upserts by PostgreSQL") + defp conflict_target!(target), + do: conflict_target(target) + + defp conflict_target({:unsafe_fragment, fragment}), + do: [fragment, ?\s] + defp conflict_target([]), + do: [] + defp conflict_target(targets), + do: [?(, quote_names(targets), ?), ?\s] + + defp replace(fields) do + ["UPDATE SET " | + intersperse_map(fields, ?,, fn field -> + quoted = quote_name(field) + [quoted, " = ", "EXCLUDED." | quoted] + end)] + end + + defp insert_all(query = %Ecto.Query{}, _counter) do + [?(, all(query), ?)] + end + + defp insert_all(rows, counter) do + ["VALUES ", intersperse_reduce(rows, ?,, counter, fn row, counter -> + {row, counter} = insert_each(row, counter) + {[?(, row, ?)], counter} + end) + |> elem(0)] + end + + defp insert_each(values, counter) do + intersperse_reduce(values, ?,, counter, fn + nil, counter -> + {"DEFAULT", counter} + + {%Ecto.Query{} = query, params_counter}, counter -> + {[?(, all(query), ?)], counter + params_counter} + + {:placeholder, placeholder_index}, counter -> + {[?$ | placeholder_index], counter} + + _, counter -> + {[?$ | Integer.to_string(counter)], counter + 1} + end) + end + + @impl true + def update(prefix, table, fields, filters, returning) do + {fields, count} = intersperse_reduce(fields, ", ", 1, fn field, acc -> + {[quote_name(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + + {filters, _count} = intersperse_reduce(filters, " AND ", count, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc} + + {field, _value}, acc -> + {[quote_name(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + + ["UPDATE ", quote_table(prefix, table), " SET ", + fields, " WHERE ", filters | returning(returning)] + end + + @impl true + def delete(prefix, table, filters, returning) do + {filters, _} = intersperse_reduce(filters, " AND ", 1, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc} + + {field, _value}, acc -> + {[quote_name(field), " = $" | Integer.to_string(acc)], acc + 1} + end) + + ["DELETE FROM ", quote_table(prefix, table), " WHERE ", filters | returning(returning)] + end + + @impl true + def explain_query(conn, query, params, opts) do + {explain_opts, opts} = + Keyword.split(opts, ~w[analyze verbose costs settings buffers timing summary format]a) + + map_format? = {:format, :map} in explain_opts + + case query(conn, build_explain_query(query, explain_opts), params, opts) do + {:ok, %Postgrex.Result{rows: rows}} when map_format? -> + {:ok, List.flatten(rows)} + {:ok, %Postgrex.Result{rows: rows}} -> + {:ok, Enum.map_join(rows, "\n", & &1)} + error -> error + end + end + + def build_explain_query(query, []) do + ["EXPLAIN ", query] + |> IO.iodata_to_binary() + end + + def build_explain_query(query, opts) do + {analyze, opts} = Keyword.pop(opts, :analyze) + {verbose, opts} = Keyword.pop(opts, :verbose) + + # Given only ANALYZE or VERBOSE opts we assume the legacy format + # to support all Postgres versions, otherwise assume the new + # syntax supported since v9.0 + case opts do + [] -> + [ + "EXPLAIN ", + if_do(quote_boolean(analyze) == "TRUE", "ANALYZE "), + if_do(quote_boolean(verbose) == "TRUE", "VERBOSE "), + query + ] + + opts -> + opts = + ([analyze: analyze, verbose: verbose] ++ opts) + |> Enum.reduce([], fn + {_, nil}, acc -> + acc + + {:format, value}, acc -> + [String.upcase("#{format_to_sql(value)}") | acc] + + {opt, value}, acc -> + [String.upcase("#{opt} #{quote_boolean(value)}") | acc] + end) + |> Enum.reverse() + |> Enum.join(", ") + + ["EXPLAIN ( ", opts, " ) ", query] + end + |> IO.iodata_to_binary() + end + + ## Query generation + + binary_ops = + [==: " = ", !=: " != ", <=: " <= ", >=: " >= ", <: " < ", >: " > ", + +: " + ", -: " - ", *: " * ", /: " / ", + and: " AND ", or: " OR ", ilike: " ILIKE ", like: " LIKE "] + + @binary_ops Keyword.keys(binary_ops) + + Enum.map(binary_ops, fn {op, str} -> + defp handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} + end) + + defp handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} + + defp select(%{select: %{fields: fields}} = query, select_distinct, sources) do + ["SELECT", select_distinct, ?\s | select_fields(fields, sources, query)] + end + + defp select_fields([], _sources, _query), + do: "TRUE" + defp select_fields(fields, sources, query) do + intersperse_map(fields, ", ", fn + {:&, _, [idx]} -> + case elem(sources, idx) do + {source, _, nil} -> + error!(query, "PostgreSQL does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want to select") + {_, source, _} -> + source + end + {key, value} -> + [expr(value, sources, query), " AS " | quote_name(key)] + value -> + expr(value, sources, query) + end) + end + + defp distinct(nil, _, _), do: {[], []} + defp distinct(%QueryExpr{expr: []}, _, _), do: {[], []} + defp distinct(%QueryExpr{expr: true}, _, _), do: {" DISTINCT", []} + defp distinct(%QueryExpr{expr: false}, _, _), do: {[], []} + defp distinct(%QueryExpr{expr: exprs}, sources, query) do + {[" DISTINCT ON (", + intersperse_map(exprs, ", ", fn {_, expr} -> expr(expr, sources, query) end), ?)], + exprs} + end + + defp from(%{from: %{hints: [_ | _]}} = query, _sources) do + error!(query, "table hints are not supported by PostgreSQL") + end + + defp from(%{from: %{source: source}} = query, sources) do + {from, name} = get_source(query, sources, 0, source) + [" FROM ", from, " AS " | name] + end + + defp cte(%{with_ctes: %WithExpr{recursive: recursive, queries: [_ | _] = queries}} = query, sources) do + recursive_opt = if recursive, do: "RECURSIVE ", else: "" + ctes = intersperse_map(queries, ", ", &cte_expr(&1, sources, query)) + ["WITH ", recursive_opt, ctes, " "] + end + + defp cte(%{with_ctes: _}, _), do: [] + + defp cte_expr({name, cte}, sources, query) do + [quote_name(name), " AS ", cte_query(cte, sources, query)] + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + ["(", all(query, subquery_as_prefix(sources)), ")"] + end + + defp cte_query(%QueryExpr{expr: expr}, sources, query) do + expr(expr, sources, query) + end + + defp update_fields(%{updates: updates} = query, sources) do + for(%{expr: expr} <- updates, + {op, kw} <- expr, + {key, value} <- kw, + do: update_op(op, key, value, sources, query)) |> Enum.intersperse(", ") + end + + defp update_op(:set, key, value, sources, query) do + [quote_name(key), " = " | expr(value, sources, query)] + end + + defp update_op(:inc, key, value, sources, query) do + [quote_name(key), " = ", quote_qualified_name(key, sources, 0), " + " | + expr(value, sources, query)] + end + + defp update_op(:push, key, value, sources, query) do + [quote_name(key), " = array_append(", quote_qualified_name(key, sources, 0), + ", ", expr(value, sources, query), ?)] + end + + defp update_op(:pull, key, value, sources, query) do + [quote_name(key), " = array_remove(", quote_qualified_name(key, sources, 0), + ", ", expr(value, sources, query), ?)] + end + + defp update_op(command, _key, _value, _sources, query) do + error!(query, "unknown update operation #{inspect command} for PostgreSQL") + end + + defp using_join(%{joins: []}, _kind, _prefix, _sources), do: {[], []} + defp using_join(%{joins: joins} = query, kind, prefix, sources) do + froms = + intersperse_map(joins, ", ", fn + %JoinExpr{qual: :inner, ix: ix, source: source} -> + {join, name} = get_source(query, sources, ix, source) + [join, " AS " | name] + %JoinExpr{qual: qual} -> + error!(query, "PostgreSQL supports only inner joins on #{kind}, got: `#{qual}`") + end) + + wheres = + for %JoinExpr{on: %QueryExpr{expr: value} = expr} <- joins, + value != true, + do: expr |> Map.put(:__struct__, BooleanExpr) |> Map.put(:op, :and) + + {[?\s, prefix, ?\s | froms], wheres} + end + + defp join(%{joins: []}, _sources), do: [] + defp join(%{joins: joins} = query, sources) do + [?\s | intersperse_map(joins, ?\s, fn + %JoinExpr{on: %QueryExpr{expr: expr}, qual: qual, ix: ix, source: source, hints: hints} -> + if hints != [] do + error!(query, "table hints are not supported by PostgreSQL") + end + + {join, name} = get_source(query, sources, ix, source) + [join_qual(qual), join, " AS ", name | join_on(qual, expr, sources, query)] + end)] + end + + defp join_on(:cross, true, _sources, _query), do: [] + defp join_on(_qual, expr, sources, query), do: [" ON " | expr(expr, sources, query)] + + defp join_qual(:inner), do: "INNER JOIN " + defp join_qual(:inner_lateral), do: "INNER JOIN LATERAL " + defp join_qual(:left), do: "LEFT OUTER JOIN " + defp join_qual(:left_lateral), do: "LEFT OUTER JOIN LATERAL " + defp join_qual(:right), do: "RIGHT OUTER JOIN " + defp join_qual(:full), do: "FULL OUTER JOIN " + defp join_qual(:cross), do: "CROSS JOIN " + + defp where(%{wheres: wheres} = query, sources) do + boolean(" WHERE ", wheres, sources, query) + end + + defp having(%{havings: havings} = query, sources) do + boolean(" HAVING ", havings, sources, query) + end + + defp group_by(%{group_bys: []}, _sources), do: [] + defp group_by(%{group_bys: group_bys} = query, sources) do + [" GROUP BY " | + intersperse_map(group_bys, ", ", fn + %QueryExpr{expr: expr} -> + intersperse_map(expr, ", ", &expr(&1, sources, query)) + end)] + end + + defp window(%{windows: []}, _sources), do: [] + defp window(%{windows: windows} = query, sources) do + [" WINDOW " | + intersperse_map(windows, ", ", fn {name, %{expr: kw}} -> + [quote_name(name), " AS " | window_exprs(kw, sources, query)] + end)] + end + + defp window_exprs(kw, sources, query) do + [?(, intersperse_map(kw, ?\s, &window_expr(&1, sources, query)), ?)] + end + + defp window_expr({:partition_by, fields}, sources, query) do + ["PARTITION BY " | intersperse_map(fields, ", ", &expr(&1, sources, query))] + end + + defp window_expr({:order_by, fields}, sources, query) do + ["ORDER BY " | intersperse_map(fields, ", ", &order_by_expr(&1, sources, query))] + end + + defp window_expr({:frame, {:fragment, _, _} = fragment}, sources, query) do + expr(fragment, sources, query) + end + + defp order_by(%{order_bys: []}, _distinct, _sources), do: [] + defp order_by(%{order_bys: order_bys} = query, distinct, sources) do + order_bys = Enum.flat_map(order_bys, & &1.expr) + order_bys = order_by_concat(distinct, order_bys) + [" ORDER BY " | intersperse_map(order_bys, ", ", &order_by_expr(&1, sources, query))] + end + + defp order_by_concat([head | left], [head | right]), do: [head | order_by_concat(left, right)] + defp order_by_concat(left, right), do: left ++ right + + defp order_by_expr({dir, expr}, sources, query) do + str = expr(expr, sources, query) + + case dir do + :asc -> str + :asc_nulls_last -> [str | " ASC NULLS LAST"] + :asc_nulls_first -> [str | " ASC NULLS FIRST"] + :desc -> [str | " DESC"] + :desc_nulls_last -> [str | " DESC NULLS LAST"] + :desc_nulls_first -> [str | " DESC NULLS FIRST"] + end + end + + defp limit(%{limit: nil}, _sources), do: [] + defp limit(%{limit: %QueryExpr{expr: expr}} = query, sources) do + [" LIMIT " | expr(expr, sources, query)] + end + + defp offset(%{offset: nil}, _sources), do: [] + defp offset(%{offset: %QueryExpr{expr: expr}} = query, sources) do + [" OFFSET " | expr(expr, sources, query)] + end + + defp combinations(%{combinations: combinations}) do + Enum.map(combinations, fn + {:union, query} -> [" UNION (", all(query), ")"] + {:union_all, query} -> [" UNION ALL (", all(query), ")"] + {:except, query} -> [" EXCEPT (", all(query), ")"] + {:except_all, query} -> [" EXCEPT ALL (", all(query), ")"] + {:intersect, query} -> [" INTERSECT (", all(query), ")"] + {:intersect_all, query} -> [" INTERSECT ALL (", all(query), ")"] + end) + end + + defp lock(%{lock: nil}, _sources), do: [] + defp lock(%{lock: binary}, _sources) when is_binary(binary), do: [?\s | binary] + defp lock(%{lock: expr} = query, sources), do: [?\s | expr(expr, sources, query)] + + defp boolean(_name, [], _sources, _query), do: [] + defp boolean(name, [%{expr: expr, op: op} | query_exprs], sources, query) do + [name | + Enum.reduce(query_exprs, {op, paren_expr(expr, sources, query)}, fn + %BooleanExpr{expr: expr, op: op}, {op, acc} -> + {op, [acc, operator_to_boolean(op), paren_expr(expr, sources, query)]} + %BooleanExpr{expr: expr, op: op}, {_, acc} -> + {op, [?(, acc, ?), operator_to_boolean(op), paren_expr(expr, sources, query)]} + end) |> elem(1)] + end + + defp operator_to_boolean(:and), do: " AND " + defp operator_to_boolean(:or), do: " OR " + + defp parens_for_select([first_expr | _] = expr) do + if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select/i) do + [?(, expr, ?)] + else + expr + end + end + + defp paren_expr(expr, sources, query) do + [?(, expr(expr, sources, query), ?)] + end + + defp expr({:^, [], [ix]}, _sources, _query) do + [?$ | Integer.to_string(ix + 1)] + end + + defp expr({{:., _, [{:parent_as, _, [as]}, field]}, _, []}, _sources, query) + when is_atom(field) do + {ix, sources} = get_parent_sources_ix(query, as) + quote_qualified_name(field, sources, ix) + end + + defp expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) when is_atom(field) do + quote_qualified_name(field, sources, idx) + end + + defp expr({:&, _, [idx]}, sources, _query) do + {_, source, _} = elem(sources, idx) + source + end + + defp expr({:in, _, [_left, []]}, _sources, _query) do + "false" + end + + defp expr({:in, _, [left, right]}, sources, query) when is_list(right) do + args = intersperse_map(right, ?,, &expr(&1, sources, query)) + [expr(left, sources, query), " IN (", args, ?)] + end + + defp expr({:in, _, [left, {:^, _, [ix, _]}]}, sources, query) do + [expr(left, sources, query), " = ANY($", Integer.to_string(ix + 1), ?)] + end + + defp expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do + [expr(left, sources, query), " IN ", expr(subquery, sources, query)] + end + + defp expr({:in, _, [left, right]}, sources, query) do + [expr(left, sources, query), " = ANY(", expr(right, sources, query), ?)] + end + + defp expr({:is_nil, _, [arg]}, sources, query) do + [expr(arg, sources, query) | " IS NULL"] + end + + defp expr({:not, _, [expr]}, sources, query) do + ["NOT (", expr(expr, sources, query), ?)] + end + + defp expr(%Ecto.SubQuery{query: query}, sources, parent_query) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [?(, all(query, subquery_as_prefix(sources)), ?)] + end + + defp expr({:fragment, _, [kw]}, _sources, query) when is_list(kw) or tuple_size(kw) == 3 do + error!(query, "PostgreSQL adapter does not support keyword or interpolated fragments") + end + + defp expr({:fragment, _, parts}, sources, query) do + Enum.map(parts, fn + {:raw, part} -> part + {:expr, expr} -> expr(expr, sources, query) + end) + |> parens_for_select + end + + defp expr({:literal, _, [literal]}, _sources, _query) do + quote_name(literal) + end + + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do + [expr(datetime, sources, query), type_unless_typed(datetime, "timestamp"), " + ", + interval(count, interval, sources, query)] + end + + defp expr({:date_add, _, [date, count, interval]}, sources, query) do + [?(, expr(date, sources, query), type_unless_typed(date, "date"), " + ", + interval(count, interval, sources, query) | ")::date"] + end + + defp expr({:json_extract_path, _, [expr, path]}, sources, query) do + json_extract_path(expr, path, sources, query) + end + + defp expr({:filter, _, [agg, filter]}, sources, query) do + aggregate = expr(agg, sources, query) + [aggregate, " FILTER (WHERE ", expr(filter, sources, query), ?)] + end + + defp expr({:over, _, [agg, name]}, sources, query) when is_atom(name) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER " | quote_name(name)] + end + + defp expr({:over, _, [agg, kw]}, sources, query) do + aggregate = expr(agg, sources, query) + [aggregate, " OVER ", window_exprs(kw, sources, query)] + end + + defp expr({:{}, _, elems}, sources, query) do + [?(, intersperse_map(elems, ?,, &expr(&1, sources, query)), ?)] + end + + defp expr({:count, _, []}, _sources, _query), do: "count(*)" + + defp expr({:==, _, [{:json_extract_path, _, [expr, path]} = left, right]}, sources, query) + when is_binary(right) or is_integer(right) or is_boolean(right) do + case Enum.split(path, -1) do + {path, [last]} when is_binary(last) -> + extracted = json_extract_path(expr, path, sources, query) + [?(, extracted, "@>'{", escape_json(last), ": ", escape_json(right) | "}')"] + + _ -> + [maybe_paren(left, sources, query), " = " | maybe_paren(right, sources, query)] + end + end + + defp expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do + {modifier, args} = + case args do + [rest, :distinct] -> {"DISTINCT ", [rest]} + _ -> {[], args} + end + + case handle_call(fun, length(args)) do + {:binary_op, op} -> + [left, right] = args + [maybe_paren(left, sources, query), op | maybe_paren(right, sources, query)] + {:fun, fun} -> + [fun, ?(, modifier, intersperse_map(args, ", ", &expr(&1, sources, query)), ?)] + end + end + + defp expr(list, sources, query) when is_list(list) do + ["ARRAY[", intersperse_map(list, ?,, &expr(&1, sources, query)), ?]] + end + + defp expr(%Decimal{} = decimal, _sources, _query) do + Decimal.to_string(decimal, :normal) + end + + defp expr(%Ecto.Query.Tagged{value: binary, type: :binary}, _sources, _query) + when is_binary(binary) do + ["'\\x", Base.encode16(binary, case: :lower) | "'::bytea"] + end + + defp expr(%Ecto.Query.Tagged{value: other, type: type}, sources, query) do + [maybe_paren(other, sources, query), ?:, ?: | tagged_to_db(type)] + end + + defp expr(nil, _sources, _query), do: "NULL" + defp expr(true, _sources, _query), do: "TRUE" + defp expr(false, _sources, _query), do: "FALSE" + + defp expr(literal, _sources, _query) when is_binary(literal) do + [?\', escape_string(literal), ?\'] + end + + defp expr(literal, _sources, _query) when is_integer(literal) do + Integer.to_string(literal) + end + + defp expr(literal, _sources, _query) when is_float(literal) do + [Float.to_string(literal) | "::float"] + end + + defp expr(expr, _sources, query) do + error!(query, "unsupported expression: #{inspect(expr)}") + end + + defp json_extract_path(expr, [], sources, query) do + expr(expr, sources, query) + end + + defp json_extract_path(expr, path, sources, query) do + path = intersperse_map(path, ?,, &escape_json/1) + [?(, expr(expr, sources, query), "#>'{", path, "}')"] + end + + defp type_unless_typed(%Ecto.Query.Tagged{}, _type), do: [] + defp type_unless_typed(_, type), do: [?:, ?: | type] + + # Always use the largest possible type for integers + defp tagged_to_db(:id), do: "bigint" + defp tagged_to_db(:integer), do: "bigint" + defp tagged_to_db({:array, type}), do: [tagged_to_db(type), ?[, ?]] + defp tagged_to_db(type), do: ecto_to_db(type) + + defp interval(count, interval, _sources, _query) when is_integer(count) do + ["interval '", String.Chars.Integer.to_string(count), ?\s, interval, ?\'] + end + + defp interval(count, interval, _sources, _query) when is_float(count) do + count = :erlang.float_to_binary(count, [:compact, decimals: 16]) + ["interval '", count, ?\s, interval, ?\'] + end + + defp interval(count, interval, sources, query) do + [?(, expr(count, sources, query), "::numeric * ", + interval(1, interval, sources, query), ?)] + end + + defp maybe_paren({op, _, [_, _]} = expr, sources, query) when op in @binary_ops, + do: paren_expr(expr, sources, query) + + defp maybe_paren({:is_nil, _, [_]} = expr, sources, query), + do: paren_expr(expr, sources, query) + + defp maybe_paren(expr, sources, query), + do: expr(expr, sources, query) + + defp returning(%{select: nil}, _sources), + do: [] + defp returning(%{select: %{fields: fields}} = query, sources), + do: [" RETURNING " | select_fields(fields, sources, query)] + + defp returning([]), + do: [] + defp returning(returning), + do: [" RETURNING " | quote_names(returning)] + + defp create_names(%{sources: sources}, as_prefix) do + create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() + end + + defp create_names(sources, pos, limit, as_prefix) when pos < limit do + [create_name(sources, pos, as_prefix) | create_names(sources, pos + 1, limit, as_prefix)] + end + + defp create_names(_sources, pos, pos, as_prefix) do + [as_prefix] + end + + defp subquery_as_prefix(sources) do + [?s | :erlang.element(tuple_size(sources), sources)] + end + + defp create_name(sources, pos, as_prefix) do + case elem(sources, pos) do + {:fragment, _, _} -> + {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} + + {table, schema, prefix} -> + name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] + {quote_table(prefix, table), name, schema} + + %Ecto.SubQuery{} -> + {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} + end + end + + defp create_alias(<>) when first in ?a..?z when first in ?A..?Z do + first + end + defp create_alias(_) do + ?t + end + + # DDL + + alias Ecto.Migration.{Table, Index, Reference, Constraint} + + @creates [:create, :create_if_not_exists] + @drops [:drop, :drop_if_exists] + + @impl true + def execute_ddl({command, %Table{} = table, columns}) when command in @creates do + table_name = quote_table(table.prefix, table.name) + query = ["CREATE TABLE ", + if_do(command == :create_if_not_exists, "IF NOT EXISTS "), + table_name, ?\s, ?(, + column_definitions(table, columns), pk_definition(columns, ", "), ?), + options_expr(table.options)] + + [query] ++ + comments_on("TABLE", table_name, table.comment) ++ + comments_for_columns(table_name, columns) + end + + def execute_ddl({command, %Table{} = table, mode}) when command in @drops do + [["DROP TABLE ", if_do(command == :drop_if_exists, "IF EXISTS "), + quote_table(table.prefix, table.name), drop_mode(mode)]] + end + + def execute_ddl({:alter, %Table{} = table, changes}) do + table_name = quote_table(table.prefix, table.name) + query = ["ALTER TABLE ", table_name, ?\s, + column_changes(table, changes), pk_definition(changes, ", ADD ")] + + [query] ++ + comments_on("TABLE", table_name, table.comment) ++ + comments_for_columns(table_name, changes) + end + + def execute_ddl({command, %Index{} = index}) when command in @creates do + fields = intersperse_map(index.columns, ", ", &index_expr/1) + include_fields = intersperse_map(index.include, ", ", &index_expr/1) + + queries = [["CREATE ", + if_do(index.unique, "UNIQUE "), + "INDEX ", + if_do(index.concurrently, "CONCURRENTLY "), + if_do(command == :create_if_not_exists, "IF NOT EXISTS "), + quote_name(index.name), + " ON ", + quote_table(index.prefix, index.table), + if_do(index.using, [" USING " , to_string(index.using)]), + ?\s, ?(, fields, ?), + if_do(include_fields != [], [" INCLUDE ", ?(, include_fields, ?)]), + if_do(index.where, [" WHERE ", to_string(index.where)])]] + + queries ++ comments_on("INDEX", quote_table(index.prefix, index.name), index.comment) + end + + def execute_ddl({command, %Index{} = index, mode}) when command in @drops do + [["DROP INDEX ", + if_do(index.concurrently, "CONCURRENTLY "), + if_do(command == :drop_if_exists, "IF EXISTS "), + quote_table(index.prefix, index.name), + drop_mode(mode)]] + end + + def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do + [["ALTER TABLE ", quote_table(current_table.prefix, current_table.name), + " RENAME TO ", quote_table(nil, new_table.name)]] + end + + def execute_ddl({:rename, %Table{} = table, current_column, new_column}) do + [["ALTER TABLE ", quote_table(table.prefix, table.name), " RENAME ", + quote_name(current_column), " TO ", quote_name(new_column)]] + end + + def execute_ddl({:create, %Constraint{} = constraint}) do + table_name = quote_table(constraint.prefix, constraint.table) + queries = [["ALTER TABLE ", table_name, + " ADD ", new_constraint_expr(constraint)]] + + queries ++ comments_on("CONSTRAINT", constraint.name, constraint.comment, table_name) + end + + def execute_ddl({command, %Constraint{}, :cascade}) when command in @drops, + do: error!(nil, "PostgreSQL does not support `CASCADE` in DROP CONSTRAINT commands") + + def execute_ddl({command, %Constraint{} = constraint, :restrict}) when command in @drops do + [["ALTER TABLE ", quote_table(constraint.prefix, constraint.table), + " DROP CONSTRAINT ", if_do(command == :drop_if_exists, "IF EXISTS "), quote_name(constraint.name)]] + end + + def execute_ddl(string) when is_binary(string), do: [string] + + def execute_ddl(keyword) when is_list(keyword), + do: error!(nil, "PostgreSQL adapter does not support keyword lists in execute") + + @impl true + def ddl_logs(%Postgrex.Result{} = result) do + %{messages: messages} = result + + for message <- messages do + %{message: message, severity: severity} = message + + {ddl_log_level(severity), message, []} + end + end + + @impl true + def table_exists_query(table) do + {"SELECT true FROM information_schema.tables WHERE table_name = $1 AND table_schema = current_schema() LIMIT 1", [table]} + end + + defp drop_mode(:cascade), do: " CASCADE" + defp drop_mode(:restrict), do: [] + + # From https://www.postgresql.org/docs/current/protocol-error-fields.html. + defp ddl_log_level("DEBUG"), do: :debug + defp ddl_log_level("LOG"), do: :info + defp ddl_log_level("INFO"), do: :info + defp ddl_log_level("NOTICE"), do: :info + defp ddl_log_level("WARNING"), do: :warn + defp ddl_log_level("ERROR"), do: :error + defp ddl_log_level("FATAL"), do: :error + defp ddl_log_level("PANIC"), do: :error + defp ddl_log_level(_severity), do: :info + + defp pk_definition(columns, prefix) do + pks = + for {_, name, _, opts} <- columns, + opts[:primary_key], + do: name + + case pks do + [] -> [] + _ -> [prefix, "PRIMARY KEY (", quote_names(pks), ")"] + end + end + + defp comments_on(_object, _name, nil), do: [] + defp comments_on(object, name, comment) do + [["COMMENT ON ", object, ?\s, name, " IS ", single_quote(comment)]] + end + + defp comments_on(_object, _name, nil, _table_name), do: [] + defp comments_on(object, name, comment, table_name) do + [["COMMENT ON ", object, ?\s, quote_name(name), " ON ", table_name, + " IS ", single_quote(comment)]] + end + + defp comments_for_columns(table_name, columns) do + Enum.flat_map(columns, fn + {_operation, column_name, _column_type, opts} -> + column_name = [table_name, ?. | quote_name(column_name)] + comments_on("COLUMN", column_name, opts[:comment]) + _ -> [] + end) + end + + defp column_definitions(table, columns) do + intersperse_map(columns, ", ", &column_definition(table, &1)) + end + + defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do + [quote_name(name), ?\s, reference_column_type(ref.type, opts), + column_options(ref.type, opts), ", ", reference_expr(ref, table, name)] + end + + defp column_definition(_table, {:add, name, type, opts}) do + [quote_name(name), ?\s, column_type(type, opts), column_options(type, opts)] + end + + defp column_changes(table, columns) do + intersperse_map(columns, ", ", &column_change(table, &1)) + end + + defp column_change(table, {:add, name, %Reference{} = ref, opts}) do + ["ADD COLUMN ", quote_name(name), ?\s, reference_column_type(ref.type, opts), + column_options(ref.type, opts), ", ADD ", reference_expr(ref, table, name)] + end + + defp column_change(_table, {:add, name, type, opts}) do + ["ADD COLUMN ", quote_name(name), ?\s, column_type(type, opts), + column_options(type, opts)] + end + + defp column_change(table, {:add_if_not_exists, name, %Reference{} = ref, opts}) do + ["ADD COLUMN IF NOT EXISTS ", quote_name(name), ?\s, reference_column_type(ref.type, opts), + column_options(ref.type, opts), ", ADD ", reference_expr(ref, table, name)] + end + + defp column_change(_table, {:add_if_not_exists, name, type, opts}) do + ["ADD COLUMN IF NOT EXISTS ", quote_name(name), ?\s, column_type(type, opts), + column_options(type, opts)] + end + + defp column_change(table, {:modify, name, %Reference{} = ref, opts}) do + [drop_reference_expr(opts[:from], table, name), "ALTER COLUMN ", quote_name(name), + " TYPE ", reference_column_type(ref.type, opts), + ", ADD ", reference_expr(ref, table, name), + modify_null(name, opts), modify_default(name, ref.type, opts)] + end + + defp column_change(table, {:modify, name, type, opts}) do + [drop_reference_expr(opts[:from], table, name), "ALTER COLUMN ", quote_name(name), " TYPE ", + column_type(type, opts), modify_null(name, opts), modify_default(name, type, opts)] + end + + defp column_change(_table, {:remove, name}), do: ["DROP COLUMN ", quote_name(name)] + defp column_change(table, {:remove, name, %Reference{} = ref, _opts}) do + [drop_reference_expr(ref, table, name), "DROP COLUMN ", quote_name(name)] + end + defp column_change(_table, {:remove, name, _type, _opts}), do: ["DROP COLUMN ", quote_name(name)] + + defp column_change(table, {:remove_if_exists, name, %Reference{} = ref}) do + [drop_reference_if_exists_expr(ref, table, name), "DROP COLUMN IF EXISTS ", quote_name(name)] + end + defp column_change(_table, {:remove_if_exists, name, _type}), do: ["DROP COLUMN IF EXISTS ", quote_name(name)] + + defp modify_null(name, opts) do + case Keyword.get(opts, :null) do + true -> [", ALTER COLUMN ", quote_name(name), " DROP NOT NULL"] + false -> [", ALTER COLUMN ", quote_name(name), " SET NOT NULL"] + nil -> [] + end + end + + defp modify_default(name, type, opts) do + case Keyword.fetch(opts, :default) do + {:ok, val} -> [", ALTER COLUMN ", quote_name(name), " SET", default_expr({:ok, val}, type)] + :error -> [] + end + end + + defp column_options(type, opts) do + default = Keyword.fetch(opts, :default) + null = Keyword.get(opts, :null) + + [default_expr(default, type), null_expr(null)] + end + + defp null_expr(false), do: " NOT NULL" + defp null_expr(true), do: " NULL" + defp null_expr(_), do: [] + + defp new_constraint_expr(%Constraint{check: check} = constraint) when is_binary(check) do + ["CONSTRAINT ", quote_name(constraint.name), " CHECK (", check, ")", validate(constraint.validate)] + end + defp new_constraint_expr(%Constraint{exclude: exclude} = constraint) when is_binary(exclude) do + ["CONSTRAINT ", quote_name(constraint.name), " EXCLUDE USING ", exclude, validate(constraint.validate)] + end + + defp default_expr({:ok, nil}, _type), do: " DEFAULT NULL" + defp default_expr({:ok, literal}, type), do: [" DEFAULT ", default_type(literal, type)] + defp default_expr(:error, _), do: [] + + defp default_type(list, {:array, inner} = type) when is_list(list) do + ["ARRAY[", Enum.map(list, &default_type(&1, inner)) |> Enum.intersperse(?,), "]::", ecto_to_db(type)] + end + defp default_type(literal, _type) when is_binary(literal) do + if :binary.match(literal, <<0>>) == :nomatch and String.valid?(literal) do + single_quote(literal) + else + encoded = "\\x" <> Base.encode16(literal, case: :lower) + raise ArgumentError, "default values are interpolated as UTF-8 strings and cannot contain null bytes. " <> + "`#{inspect literal}` is invalid. If you want to write it as a binary, use \"#{encoded}\", " <> + "otherwise refer to PostgreSQL documentation for instructions on how to escape this SQL type" + end + end + defp default_type(literal, _type) when is_number(literal), do: to_string(literal) + defp default_type(literal, _type) when is_boolean(literal), do: to_string(literal) + defp default_type(%{} = map, :map) do + library = Application.get_env(:postgrex, :json_library, Jason) + default = IO.iodata_to_binary(library.encode_to_iodata!(map)) + [single_quote(default)] + end + defp default_type({:fragment, expr}, _type), + do: [expr] + defp default_type(expr, type), + do: raise(ArgumentError, "unknown default `#{inspect expr}` for type `#{inspect type}`. " <> + ":default may be a string, number, boolean, list of strings, list of integers, map (when type is Map), or a fragment(...)") + + defp index_expr(literal) when is_binary(literal), + do: literal + defp index_expr(literal), + do: quote_name(literal) + + defp options_expr(nil), + do: [] + defp options_expr(keyword) when is_list(keyword), + do: error!(nil, "PostgreSQL adapter does not support keyword lists in :options") + defp options_expr(options), + do: [?\s, options] + + defp column_type({:array, type}, opts), + do: [column_type(type, opts), "[]"] + + defp column_type(type, _opts) when type in ~w(time utc_datetime naive_datetime)a, + do: [ecto_to_db(type), "(0)"] + + defp column_type(type, opts) when type in ~w(time_usec utc_datetime_usec naive_datetime_usec)a do + precision = Keyword.get(opts, :precision) + type_name = ecto_to_db(type) + + if precision do + [type_name, ?(, to_string(precision), ?)] + else + type_name + end + end + + defp column_type(:identity, opts) do + start_value = [Keyword.get(opts, :start_value)] + increment = [Keyword.get(opts, :increment)] + type_name = ecto_to_db(:identity) + + cleanup = fn v -> is_integer(v) and v > 0 end + + sequence = + start_value + |> Enum.filter(cleanup) + |> Enum.map(&"START WITH #{&1}") + |> Kernel.++( + increment + |> Enum.filter(cleanup) + |> Enum.map(&"INCREMENT BY #{&1}") + ) + + case sequence do + [] -> [type_name, " GENERATED BY DEFAULT AS IDENTITY"] + _ -> [type_name, " GENERATED BY DEFAULT AS IDENTITY(", Enum.join(sequence, " "), ") "] + end + end + + defp column_type(type, opts) do + size = Keyword.get(opts, :size) + precision = Keyword.get(opts, :precision) + scale = Keyword.get(opts, :scale) + type_name = ecto_to_db(type) + + cond do + size -> [type_name, ?(, to_string(size), ?)] + precision -> [type_name, ?(, to_string(precision), ?,, to_string(scale || 0), ?)] + type == :string -> [type_name, "(255)"] + true -> type_name + end + end + + defp reference_expr(%Reference{} = ref, table, name) do + {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) + + ["CONSTRAINT ", reference_name(ref, table, name), ?\s, + "FOREIGN KEY (", quote_names(current_columns), ") REFERENCES ", + quote_table(ref.prefix || table.prefix, ref.table), ?(, quote_names(reference_columns), ?), + reference_match(ref.match), reference_on_delete(ref.on_delete), + reference_on_update(ref.on_update), validate(ref.validate)] + end + + defp drop_reference_expr(%Reference{} = ref, table, name), + do: ["DROP CONSTRAINT ", reference_name(ref, table, name), ", "] + defp drop_reference_expr(_, _, _), + do: [] + + defp drop_reference_if_exists_expr(%Reference{} = ref, table, name), + do: ["DROP CONSTRAINT IF EXISTS ", reference_name(ref, table, name), ", "] + defp drop_reference_if_exists_expr(_, _, _), + do: [] + + defp reference_name(%Reference{name: nil}, table, column), + do: quote_name("#{table.name}_#{column}_fkey") + defp reference_name(%Reference{name: name}, _table, _column), + do: quote_name(name) + + defp reference_column_type(:serial, _opts), do: "integer" + defp reference_column_type(:bigserial, _opts), do: "bigint" + defp reference_column_type(:identity, _opts), do: "bigint" + defp reference_column_type(type, opts), do: column_type(type, opts) + + defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" + defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" + defp reference_on_delete(:restrict), do: " ON DELETE RESTRICT" + defp reference_on_delete(_), do: [] + + defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" + defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" + defp reference_on_update(:restrict), do: " ON UPDATE RESTRICT" + defp reference_on_update(_), do: [] + + defp reference_match(nil), do: [] + defp reference_match(:full), do: " MATCH FULL" + defp reference_match(:simple), do: " MATCH SIMPLE" + defp reference_match(:partial), do: " MATCH PARTIAL" + + defp validate(false), do: " NOT VALID" + defp validate(_), do: [] + + ## Helpers + + defp get_source(query, sources, ix, source) do + {expr, name, _schema} = elem(sources, ix) + {expr || expr(source, sources, query), name} + end + + defp get_parent_sources_ix(query, as) do + case query.aliases[@parent_as] do + {%{aliases: %{^as => ix}}, sources} -> {ix, sources} + {%{} = parent, _sources} -> get_parent_sources_ix(parent, as) + end + end + + defp quote_qualified_name(name, sources, ix) do + {_, source, _} = elem(sources, ix) + [source, ?. | quote_name(name)] + end + + defp quote_names(names) do + intersperse_map(names, ?,, "e_name/1) + end + + defp quote_name(name) when is_atom(name) do + quote_name(Atom.to_string(name)) + end + + defp quote_name(name) when is_binary(name) do + if String.contains?(name, "\"") do + error!(nil, "bad literal/field/table name #{inspect name} (\" is not permitted)") + end + + [?", name, ?"] + end + + defp quote_table(nil, name), do: quote_table(name) + defp quote_table(prefix, name), do: [quote_table(prefix), ?., quote_table(name)] + + defp quote_table(name) when is_atom(name), + do: quote_table(Atom.to_string(name)) + defp quote_table(name) do + if String.contains?(name, "\"") do + error!(nil, "bad table name #{inspect name}") + end + [?", name, ?"] + end + + # TRUE, ON, or 1 to enable the option, and FALSE, OFF, or 0 to disable it + defp quote_boolean(nil), do: nil + defp quote_boolean(true), do: "TRUE" + defp quote_boolean(false), do: "FALSE" + defp quote_boolean(value), do: error!(nil, "bad boolean value #{value}") + + defp format_to_sql(:text), do: "FORMAT TEXT" + defp format_to_sql(:map), do: "FORMAT JSON" + defp format_to_sql(:yaml), do: "FORMAT YAML" + + defp single_quote(value), do: [?', escape_string(value), ?'] + + defp intersperse_map(list, separator, mapper, acc \\ []) + defp intersperse_map([], _separator, _mapper, acc), + do: acc + defp intersperse_map([elem], _separator, mapper, acc), + do: [acc | mapper.(elem)] + defp intersperse_map([elem | rest], separator, mapper, acc), + do: intersperse_map(rest, separator, mapper, [acc, mapper.(elem), separator]) + + defp intersperse_reduce(list, separator, user_acc, reducer, acc \\ []) + defp intersperse_reduce([], _separator, user_acc, _reducer, acc), + do: {acc, user_acc} + defp intersperse_reduce([elem], _separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + {[acc | elem], user_acc} + end + defp intersperse_reduce([elem | rest], separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + intersperse_reduce(rest, separator, user_acc, reducer, [acc, elem, separator]) + end + + defp if_do(condition, value) do + if condition, do: value, else: [] + end + + defp escape_string(value) when is_binary(value) do + :binary.replace(value, "'", "''", [:global]) + end + + defp escape_json(value) when is_binary(value) do + escaped = + value + |> escape_string() + |> :binary.replace("\"", "\\\"", [:global]) + + [?", escaped, ?"] + end + + defp escape_json(value) when is_integer(value) do + Integer.to_string(value) + end + + defp escape_json(true), do: ["true"] + defp escape_json(false), do: ["false"] + + defp ecto_to_db({:array, t}), do: [ecto_to_db(t), ?[, ?]] + defp ecto_to_db(:id), do: "integer" + defp ecto_to_db(:identity), do: "bigint" + defp ecto_to_db(:serial), do: "serial" + defp ecto_to_db(:bigserial), do: "bigserial" + defp ecto_to_db(:binary_id), do: "uuid" + defp ecto_to_db(:string), do: "varchar" + defp ecto_to_db(:binary), do: "bytea" + defp ecto_to_db(:map), do: Application.fetch_env!(:ecto_sql, :postgres_map_type) + defp ecto_to_db({:map, _}), do: Application.fetch_env!(:ecto_sql, :postgres_map_type) + defp ecto_to_db(:time_usec), do: "time" + defp ecto_to_db(:utc_datetime), do: "timestamp" + defp ecto_to_db(:utc_datetime_usec), do: "timestamp" + defp ecto_to_db(:naive_datetime), do: "timestamp" + defp ecto_to_db(:naive_datetime_usec), do: "timestamp" + defp ecto_to_db(atom) when is_atom(atom), do: Atom.to_string(atom) + defp ecto_to_db(type) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` or `{:array, t}` where `t` itself follows the same conditions." + end + + defp error!(nil, message) do + raise ArgumentError, message + end + defp error!(query, message) do + raise Ecto.QueryError, query: query, message: message + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql.ex b/deps/ecto_sql/lib/ecto/adapters/sql.ex new file mode 100644 index 0000000..5b18e35 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql.ex @@ -0,0 +1,1262 @@ +defmodule Ecto.Adapters.SQL do + @moduledoc ~S""" + This application provides functionality for working with + SQL databases in `Ecto`. + + ## Built-in adapters + + By default, we support the following adapters: + + * `Ecto.Adapters.Postgres` for Postgres + * `Ecto.Adapters.MyXQL` for MySQL + * `Ecto.Adapters.Tds` for SQLServer + + ## Additional functions + + If your `Ecto.Repo` is backed by any of the SQL adapters above, + this module will inject additional functions into your repository: + + * `disconnect_all(interval, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.disconnect_all/3` + + * `explain(type, query, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.explain/4` + + * `query(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query/4` + + * `query!(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query!/4` + + * `query_many(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query_many/4` + + * `query_many!(sql, params, options \\ [])` - + shortcut for `Ecto.Adapters.SQL.query_many!/4` + + * `to_sql(type, query)` - + shortcut for `Ecto.Adapters.SQL.to_sql/3` + + Generally speaking, you must invoke those functions directly from + your repository, for example: `MyApp.Repo.query("SELECT true")`. + You can also invoke them direcltly from `Ecto.Adapters.SQL`, but + keep in mind that in such cases features such as "dynamic repositories" + won't be available. + + ## Migrations + + `ecto_sql` supports database migrations. You can generate a migration + with: + + $ mix ecto.gen.migration create_posts + + This will create a new file inside `priv/repo/migrations` with the + `change` function. Check `Ecto.Migration` for more information. + + To interface with migrations, developers typically use mix tasks: + + * `mix ecto.migrations` - lists all available migrations and their status + * `mix ecto.migrate` - runs a migration + * `mix ecto.rollback` - rolls back a previously run migration + + If you want to run migrations programmatically, see `Ecto.Migrator`. + + ## SQL sandbox + + `ecto_sql` provides a sandbox for testing. The sandbox wraps each + test in a transaction, making sure the tests are isolated and can + run concurrently. See `Ecto.Adapters.SQL.Sandbox` for more information. + + ## Structure load and dumping + + If you have an existing database, you may want to dump its existing + structure and make it reproducible from within Ecto. This can be + achieved with two Mix tasks: + + * `mix ecto.load` - loads an existing structure into the database + * `mix ecto.dump` - dumps the existing database structure to the filesystem + + For creating and dropping databases, see `mix ecto.create` + and `mix ecto.drop` that are included as part of Ecto. + + ## Custom adapters + + Developers can implement their own SQL adapters by using + `Ecto.Adapters.SQL` and by implementing the callbacks required + by `Ecto.Adapters.SQL.Connection` for handling connections and + performing queries. The connection handling and pooling for SQL + adapters should be built using the `DBConnection` library. + + When using `Ecto.Adapters.SQL`, the following options are required: + + * `:driver` (required) - the database driver library. + For example: `:postgrex` + + """ + + require Logger + + @doc false + defmacro __using__(opts) do + quote do + @behaviour Ecto.Adapter + @behaviour Ecto.Adapter.Migration + @behaviour Ecto.Adapter.Queryable + @behaviour Ecto.Adapter.Schema + @behaviour Ecto.Adapter.Transaction + + opts = unquote(opts) + @conn __MODULE__.Connection + @driver Keyword.fetch!(opts, :driver) + + @impl true + defmacro __before_compile__(env) do + Ecto.Adapters.SQL.__before_compile__(@driver, env) + end + + @impl true + def ensure_all_started(config, type) do + Ecto.Adapters.SQL.ensure_all_started(@driver, config, type) + end + + @impl true + def init(config) do + Ecto.Adapters.SQL.init(@conn, @driver, config) + end + + @impl true + def checkout(meta, opts, fun) do + Ecto.Adapters.SQL.checkout(meta, opts, fun) + end + + @impl true + def checked_out?(meta) do + Ecto.Adapters.SQL.checked_out?(meta) + end + + @impl true + def loaders({:map, _}, type), do: [&Ecto.Type.embedded_load(type, &1, :json)] + def loaders(:binary_id, type), do: [Ecto.UUID, type] + def loaders(_, type), do: [type] + + @impl true + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers(:binary_id, type), do: [type, Ecto.UUID] + def dumpers(_, type), do: [type] + + ## Query + + @impl true + def prepare(:all, query) do + {:cache, {System.unique_integer([:positive]), IO.iodata_to_binary(@conn.all(query))}} + end + + def prepare(:update_all, query) do + {:cache, {System.unique_integer([:positive]), IO.iodata_to_binary(@conn.update_all(query))}} + end + + def prepare(:delete_all, query) do + {:cache, {System.unique_integer([:positive]), IO.iodata_to_binary(@conn.delete_all(query))}} + end + + @impl true + def execute(adapter_meta, query_meta, query, params, opts) do + Ecto.Adapters.SQL.execute(:named, adapter_meta, query_meta, query, params, opts) + end + + @impl true + def stream(adapter_meta, query_meta, query, params, opts) do + Ecto.Adapters.SQL.stream(adapter_meta, query_meta, query, params, opts) + end + + ## Schema + + @impl true + def autogenerate(:id), do: nil + def autogenerate(:embed_id), do: Ecto.UUID.generate() + def autogenerate(:binary_id), do: Ecto.UUID.bingenerate() + + @impl true + def insert_all(adapter_meta, schema_meta, header, rows, on_conflict, returning, placeholders, opts) do + Ecto.Adapters.SQL.insert_all(adapter_meta, schema_meta, @conn, header, rows, on_conflict, returning, placeholders, opts) + end + + @impl true + def insert(adapter_meta, %{source: source, prefix: prefix}, params, + {kind, conflict_params, _} = on_conflict, returning, opts) do + {fields, values} = :lists.unzip(params) + sql = @conn.insert(prefix, source, fields, [fields], on_conflict, returning, []) + Ecto.Adapters.SQL.struct(adapter_meta, @conn, sql, :insert, source, [], values ++ conflict_params, kind, returning, opts) + end + + @impl true + def update(adapter_meta, %{source: source, prefix: prefix}, fields, params, returning, opts) do + {fields, field_values} = :lists.unzip(fields) + filter_values = params |> Keyword.values() |> Enum.reject(&is_nil(&1)) + sql = @conn.update(prefix, source, fields, params, returning) + Ecto.Adapters.SQL.struct(adapter_meta, @conn, sql, :update, source, params, field_values ++ filter_values, :raise, returning, opts) + end + + @impl true + def delete(adapter_meta, %{source: source, prefix: prefix}, params, opts) do + filter_values = params |> Keyword.values() |> Enum.reject(&is_nil(&1)) + sql = @conn.delete(prefix, source, params, []) + Ecto.Adapters.SQL.struct(adapter_meta, @conn, sql, :delete, source, params, filter_values, :raise, [], opts) + end + + ## Transaction + + @impl true + def transaction(meta, opts, fun) do + Ecto.Adapters.SQL.transaction(meta, opts, fun) + end + + @impl true + def in_transaction?(meta) do + Ecto.Adapters.SQL.in_transaction?(meta) + end + + @impl true + def rollback(meta, value) do + Ecto.Adapters.SQL.rollback(meta, value) + end + + ## Migration + + @impl true + def execute_ddl(meta, definition, opts) do + Ecto.Adapters.SQL.execute_ddl(meta, @conn, definition, opts) + end + + defoverridable [prepare: 2, execute: 5, insert: 6, update: 6, delete: 4, insert_all: 8, + execute_ddl: 3, loaders: 2, dumpers: 2, autogenerate: 1, + ensure_all_started: 2, __before_compile__: 1] + end + end + + @doc """ + Converts the given query to SQL according to its kind and the + adapter in the given repository. + + ## Examples + + The examples below are meant for reference. Each adapter will + return a different result: + + iex> Ecto.Adapters.SQL.to_sql(:all, Repo, Post) + {"SELECT p.id, p.title, p.inserted_at, p.created_at FROM posts as p", []} + + iex> Ecto.Adapters.SQL.to_sql(:update_all, Repo, + from(p in Post, update: [set: [title: ^"hello"]])) + {"UPDATE posts AS p SET title = $1", ["hello"]} + + This function is also available under the repository with name `to_sql`: + + iex> Repo.to_sql(:all, Post) + {"SELECT p.id, p.title, p.inserted_at, p.created_at FROM posts as p", []} + + """ + @spec to_sql(:all | :update_all | :delete_all, Ecto.Repo.t, Ecto.Queryable.t) :: + {String.t, [term]} + def to_sql(kind, repo, queryable) do + case Ecto.Adapter.Queryable.prepare_query(kind, repo, queryable) do + {{:cached, _update, _reset, {_id, cached}}, params} -> + {String.Chars.to_string(cached), params} + + {{:cache, _update, {_id, prepared}}, params} -> + {prepared, params} + + {{:nocache, {_id, prepared}}, params} -> + {prepared, params} + end + end + + @doc """ + Executes an EXPLAIN statement or similar for the given query according to its kind and the + adapter in the given repository. + + ## Examples + + # Postgres + iex> Ecto.Adapters.SQL.explain(Repo, :all, Post) + "Seq Scan on posts p0 (cost=0.00..12.12 rows=1 width=443)" + + # MySQL + iex> Ecto.Adapters.SQL.explain(Repo, :all, from(p in Post, where: p.title == "title")) |> IO.puts() + +----+-------------+-------+------------+------+---------------+------+---------+------+------+----------+-------------+ + | id | select_type | table | partitions | type | possible_keys | key | key_len | ref | rows | filtered | Extra | + +----+-------------+-------+------------+------+---------------+------+---------+------+------+----------+-------------+ + | 1 | SIMPLE | p0 | NULL | ALL | NULL | NULL | NULL | NULL | 1 | 100.0 | Using where | + +----+-------------+-------+------------+------+---------------+------+---------+------+------+----------+-------------+ + + # Shared opts + iex> Ecto.Adapters.SQL.explain(Repo, :all, Post, analyze: true, timeout: 20_000) + "Seq Scan on posts p0 (cost=0.00..11.70 rows=170 width=443) (actual time=0.013..0.013 rows=0 loops=1)\\nPlanning Time: 0.031 ms\\nExecution Time: 0.021 ms" + + It's safe to execute it for updates and deletes, no data change will be committed: + + iex> Ecto.Adapters.SQL.explain(Repo, :update_all, from(p in Post, update: [set: [title: "new title"]])) + "Update on posts p0 (cost=0.00..11.70 rows=170 width=449)\\n -> Seq Scan on posts p0 (cost=0.00..11.70 rows=170 width=449)" + + This function is also available under the repository with name `explain`: + + iex> Repo.explain(:all, from(p in Post, where: p.title == "title")) + "Seq Scan on posts p0 (cost=0.00..12.12 rows=1 width=443)\\n Filter: ((title)::text = 'title'::text)" + + ### Options + + Built-in adapters support passing `opts` to the EXPLAIN statement according to the following: + + Adapter | Supported opts + ---------------- | -------------- + Postgrex | `analyze`, `verbose`, `costs`, `settings`, `buffers`, `timing`, `summary` + MyXQL | None + + _Postgrex_: Check [PostgreSQL doc](https://www.postgresql.org/docs/current/sql-explain.html) + for version compatibility. + + _MyXQL_: `EXTENDED` and `PARTITIONS` opts were [deprecated](https://dev.mysql.com/doc/refman/5.7/en/explain.html) + and are enabled by default. + + Also note that: + + * Currently `:map`, `:yaml`, and `:text` format options are supported + for PostgreSQL. `:map` is the deserialized JSON encoding. The last two + options return the result as a string; + + * Any other value passed to `opts` will be forwarded to the underlying + adapter query function, including Repo shared options such as `:timeout`; + + * Non built-in adapters may have specific behavior and you should consult + their own documentation. + + """ + @spec explain(pid() | Ecto.Repo.t | Ecto.Adapter.adapter_meta, + :all | :update_all | :delete_all, + Ecto.Queryable.t, opts :: Keyword.t) :: String.t | Exception.t + def explain(repo, operation, queryable, opts \\ []) + + def explain(repo, operation, queryable, opts) when is_atom(repo) or is_pid(repo) do + explain(Ecto.Adapter.lookup_meta(repo), operation, queryable, opts) + end + + def explain(%{repo: repo} = adapter_meta, operation, queryable, opts) do + Ecto.Multi.new() + |> Ecto.Multi.run(:explain, fn _, _ -> + {prepared, prepared_params} = to_sql(operation, repo, queryable) + sql_call(adapter_meta, :explain_query, [prepared], prepared_params, opts) + end) + |> Ecto.Multi.run(:rollback, fn _, _ -> + {:error, :forced_rollback} + end) + |> repo.transaction(opts) + |> case do + {:error, :rollback, :forced_rollback, %{explain: result}} -> result + {:error, :explain, error, _} -> raise error + _ -> raise "unable to execute explain" + end + end + + @doc """ + Forces all connections in the repo pool to disconnect within the given interval. + + Once this function is called, the pool will disconnect all of its connections + as they are checked in or as they are pinged. Checked in connections will be + randomly disconnected within the given time interval. Pinged connections are + immediately disconnected - as they are idle (according to `:idle_interval`). + + If the connection has a backoff configured (which is the case by default), + disconnecting means an attempt at a new connection will be done immediately + after, without starting a new process for each connection. However, if backoff + has been disabled, the connection process will terminate. In such cases, + disconnecting all connections may cause the pool supervisor to restart + depending on the max_restarts/max_seconds configuration of the pool, + so you will want to set those carefully. + + For convenience, this function is also available in the repository: + + iex> MyRepo.disconnect_all(60_000) + :ok + """ + @spec disconnect_all(pid | Ecto.Repo.t | Ecto.Adapter.adapter_meta, non_neg_integer, opts :: Keyword.t()) :: :ok + def disconnect_all(repo, interval, opts \\ []) + + def disconnect_all(repo, interval, opts) when is_atom(repo) or is_pid(repo) do + disconnect_all(Ecto.Adapter.lookup_meta(repo), interval, opts) + end + + def disconnect_all(%{pid: pid} = _adapter_meta, interval, opts) do + DBConnection.disconnect_all(pid, interval, opts) + end + + @doc """ + Returns a stream that runs a custom SQL query on given repo when reduced. + + In case of success it is a enumerable containing maps with at least two keys: + + * `:num_rows` - the number of rows affected + + * `:rows` - the result set as a list. `nil` may be returned + instead of the list if the command does not yield any row + as result (but still yields the number of affected rows, + like a `delete` command without returning would) + + In case of failure it raises an exception. + + If the adapter supports a collectable stream, the stream may also be used as + the collectable in `Enum.into/3`. Behaviour depends on the adapter. + + ## Options + + * `:log` - When false, does not log the query + * `:max_rows` - The number of rows to load from the database as we stream + + ## Examples + + iex> Ecto.Adapters.SQL.stream(MyRepo, "SELECT $1::integer + $2", [40, 2]) |> Enum.to_list() + [%{rows: [[42]], num_rows: 1}] + + """ + @spec stream(Ecto.Repo.t, String.t, [term], Keyword.t) :: Enum.t + def stream(repo, sql, params \\ [], opts \\ []) do + repo + |> Ecto.Adapter.lookup_meta() + |> Ecto.Adapters.SQL.Stream.build(sql, params, opts) + end + + @doc """ + Same as `query/4` but raises on invalid queries. + """ + @spec query!(pid() | Ecto.Repo.t | Ecto.Adapter.adapter_meta, iodata, [term], Keyword.t) :: + %{:rows => nil | [[term] | binary], + :num_rows => non_neg_integer, + optional(atom) => any} + def query!(repo, sql, params \\ [], opts \\ []) do + case query(repo, sql, params, opts) do + {:ok, result} -> result + {:error, err} -> raise_sql_call_error err + end + end + + @doc """ + Runs a custom SQL query on the given repo. + + In case of success, it must return an `:ok` tuple containing + a map with at least two keys: + + * `:num_rows` - the number of rows affected + + * `:rows` - the result set as a list. `nil` may be returned + instead of the list if the command does not yield any row + as result (but still yields the number of affected rows, + like a `delete` command without returning would) + + ## Options + + * `:log` - When false, does not log the query + + ## Examples + + iex> Ecto.Adapters.SQL.query(MyRepo, "SELECT $1::integer + $2", [40, 2]) + {:ok, %{rows: [[42]], num_rows: 1}} + + For convenience, this function is also available under the repository: + + iex> MyRepo.query("SELECT $1::integer + $2", [40, 2]) + {:ok, %{rows: [[42]], num_rows: 1}} + + """ + @spec query(pid() | Ecto.Repo.t | Ecto.Adapter.adapter_meta, iodata, [term], Keyword.t) :: + {:ok, %{:rows => nil | [[term] | binary], + :num_rows => non_neg_integer, + optional(atom) => any}} + | {:error, Exception.t} + def query(repo, sql, params \\ [], opts \\ []) + + def query(repo, sql, params, opts) when is_atom(repo) or is_pid(repo) do + query(Ecto.Adapter.lookup_meta(repo), sql, params, opts) + end + + def query(adapter_meta, sql, params, opts) do + sql_call(adapter_meta, :query, [sql], params, opts) + end + + @doc """ + Same as `query_many/4` but raises on invalid queries. + """ + @spec query_many!(Ecto.Repo.t | Ecto.Adapter.adapter_meta, iodata, [term], Keyword.t) :: + [%{:rows => nil | [[term] | binary], + :num_rows => non_neg_integer, + optional(atom) => any}] + def query_many!(repo, sql, params \\ [], opts \\ []) do + case query_many(repo, sql, params, opts) do + {:ok, result} -> result + {:error, err} -> raise_sql_call_error err + end + end + + @doc """ + Runs a custom SQL query that returns multiple results on the given repo. + + In case of success, it must return an `:ok` tuple containing + a list of maps with at least two keys: + + * `:num_rows` - the number of rows affected + + * `:rows` - the result set as a list. `nil` may be returned + instead of the list if the command does not yield any row + as result (but still yields the number of affected rows, + like a `delete` command without returning would) + + ## Options + + * `:log` - When false, does not log the query + + ## Examples + + iex> Ecto.Adapters.SQL.query_many(MyRepo, "SELECT $1; SELECT $2;", [40, 2]) + {:ok, [%{rows: [[40]], num_rows: 1}, %{rows: [[2]], num_rows: 1}]} + + For convenience, this function is also available under the repository: + + iex> MyRepo.query_many(SELECT $1; SELECT $2;", [40, 2]) + {:ok, [%{rows: [[40]], num_rows: 1}, %{rows: [[2]], num_rows: 1}]} + + """ + @spec query_many(pid() | Ecto.Repo.t | Ecto.Adapter.adapter_meta, iodata, [term], Keyword.t) :: + {:ok, [%{:rows => nil | [[term] | binary], + :num_rows => non_neg_integer, + optional(atom) => any}]} + | {:error, Exception.t} + def query_many(repo, sql, params \\ [], opts \\ []) + + def query_many(repo, sql, params, opts) when is_atom(repo) or is_pid(repo) do + query_many(Ecto.Adapter.lookup_meta(repo), sql, params, opts) + end + + def query_many(adapter_meta, sql, params, opts) do + sql_call(adapter_meta, :query_many, [sql], params, opts) + end + + defp sql_call(adapter_meta, callback, args, params, opts) do + %{pid: pool, telemetry: telemetry, sql: sql, opts: default_opts} = adapter_meta + conn = get_conn_or_pool(pool) + opts = with_log(telemetry, params, opts ++ default_opts) + args = args ++ [params, opts] + apply(sql, callback, [conn | args]) + end + + defp put_source(opts, %{sources: sources}) when is_binary(elem(elem(sources, 0), 0)) do + {source, _, _} = elem(sources, 0) + [source: source] ++ opts + end + + defp put_source(opts, _) do + opts + end + + @doc """ + Check if the given `table` exists. + + Returns `true` if the `table` exists in the `repo`, otherwise `false`. + The table is checked against the current database/schema in the connection. + """ + @spec table_exists?(Ecto.Repo.t, table :: String.t) :: boolean + def table_exists?(repo, table) when is_atom(repo) do + %{sql: sql} = adapter_meta = Ecto.Adapter.lookup_meta(repo) + {query, params} = sql.table_exists_query(table) + query!(adapter_meta, query, params, []).num_rows != 0 + end + + # Returns a formatted table for a given query `result`. + # + # ## Examples + # + # iex> Ecto.Adapters.SQL.format_table(query) |> IO.puts() + # +---------------+---------+--------+ + # | title | counter | public | + # +---------------+---------+--------+ + # | My Post Title | 1 | NULL | + # +---------------+---------+--------+ + @doc false + @spec format_table(%{:columns => [String.t] | nil, :rows => [term()] | nil, optional(atom) => any()}) :: String.t + def format_table(result) + + def format_table(nil), do: "" + def format_table(%{columns: nil}), do: "" + def format_table(%{columns: []}), do: "" + def format_table(%{columns: columns, rows: nil}), do: format_table(%{columns: columns, rows: []}) + + def format_table(%{columns: columns, rows: rows}) do + column_widths = + [columns | rows] + |> List.zip() + |> Enum.map(&Tuple.to_list/1) + |> Enum.map(fn column_with_rows -> + column_with_rows |> Enum.map(&binary_length/1) |> Enum.max() + end) + + [ + separator(column_widths), + "\n", + cells(columns, column_widths), + "\n", + separator(column_widths), + "\n", + Enum.map(rows, &cells(&1, column_widths) ++ ["\n"]), + separator(column_widths) + ] + |> IO.iodata_to_binary() + end + + defp binary_length(nil), do: 4 # NULL + defp binary_length(binary) when is_binary(binary), do: String.length(binary) + defp binary_length(other), do: other |> inspect() |> String.length() + + defp separator(widths) do + Enum.map(widths, & [?+, ?-, String.duplicate("-", &1), ?-]) ++ [?+] + end + + defp cells(items, widths) do + cell = + [items, widths] + |> List.zip() + |> Enum.map(fn {item, width} -> [?|, " ", format_item(item, width) , " "] end) + + [cell | [?|]] + end + + defp format_item(nil, width), do: String.pad_trailing("NULL", width) + defp format_item(item, width) when is_binary(item), do: String.pad_trailing(item, width) + defp format_item(item, width) when is_number(item), do: item |> inspect() |> String.pad_leading(width) + defp format_item(item, width), do: item |> inspect() |> String.pad_trailing(width) + + ## Callbacks + + @doc false + def __before_compile__(driver, _env) do + case Application.get_env(:ecto, :json_library) do + nil -> + :ok + + Jason -> + IO.warn """ + Jason is the default :json_library in Ecto 3.0. + You no longer need to configure it explicitly, + please remove this line from your config files: + + config :ecto, :json_library, Jason + + """ + + value -> + IO.warn """ + The :json_library configuration for the :ecto application is deprecated. + Please configure the :json_library in the driver instead: + + config #{inspect driver}, :json_library, #{inspect value} + + """ + end + + quote do + @doc """ + A convenience function for SQL-based repositories that executes the given query. + + See `Ecto.Adapters.SQL.query/4` for more information. + """ + def query(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query(get_dynamic_repo(), sql, params, opts) + end + + @doc """ + A convenience function for SQL-based repositories that executes the given query. + + See `Ecto.Adapters.SQL.query!/4` for more information. + """ + def query!(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query!(get_dynamic_repo(), sql, params, opts) + end + + @doc """ + A convenience function for SQL-based repositories that executes the given multi-result query. + + See `Ecto.Adapters.SQL.query_many/4` for more information. + """ + def query_many(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query_many(get_dynamic_repo(), sql, params, opts) + end + + @doc """ + A convenience function for SQL-based repositories that executes the given multi-result query. + + See `Ecto.Adapters.SQL.query_many!/4` for more information. + """ + def query_many!(sql, params \\ [], opts \\ []) do + Ecto.Adapters.SQL.query_many!(get_dynamic_repo(), sql, params, opts) + end + + @doc """ + A convenience function for SQL-based repositories that translates the given query to SQL. + + See `Ecto.Adapters.SQL.to_sql/3` for more information. + """ + def to_sql(operation, queryable) do + Ecto.Adapters.SQL.to_sql(operation, get_dynamic_repo(), queryable) + end + + @doc """ + A convenience function for SQL-based repositories that executes an EXPLAIN statement or similar + depending on the adapter to obtain statistics for the given query. + + See `Ecto.Adapters.SQL.explain/4` for more information. + """ + def explain(operation, queryable, opts \\ []) do + Ecto.Adapters.SQL.explain(get_dynamic_repo(), operation, queryable, opts) + end + + @doc """ + A convenience function for SQL-based repositories that forces all connections in the + pool to disconnect within the given interval. + + See `Ecto.Adapters.SQL.disconnect_all/3` for more information. + """ + def disconnect_all(interval, opts \\ []) do + Ecto.Adapters.SQL.disconnect_all(get_dynamic_repo(), interval, opts) + end + end + end + + @doc false + def ensure_all_started(driver, _config, type) do + Application.ensure_all_started(driver, type) + end + + @pool_opts [:timeout, :pool, :pool_size] ++ + [:queue_target, :queue_interval, :ownership_timeout, :repo] + + @doc false + def init(connection, driver, config) do + unless Code.ensure_loaded?(connection) do + raise """ + could not find #{inspect connection}. + + Please verify you have added #{inspect driver} as a dependency: + + {#{inspect driver}, ">= 0.0.0"} + + And remember to recompile Ecto afterwards by cleaning the current build: + + mix deps.clean --build ecto + """ + end + + log = Keyword.get(config, :log, :debug) + stacktrace = Keyword.get(config, :stacktrace, nil) + telemetry_prefix = Keyword.fetch!(config, :telemetry_prefix) + telemetry = {config[:repo], log, telemetry_prefix ++ [:query]} + + config = adapter_config(config) + opts = Keyword.take(config, @pool_opts) + meta = %{telemetry: telemetry, sql: connection, stacktrace: stacktrace, opts: opts} + {:ok, connection.child_spec(config), meta} + end + + defp adapter_config(config) do + if Keyword.has_key?(config, :pool_timeout) do + message = """ + :pool_timeout option no longer has an effect and has been replaced with an improved queuing system. + See \"Queue config\" in DBConnection.start_link/2 documentation for more information. + """ + + IO.warn(message) + end + + config + |> Keyword.delete(:name) + |> Keyword.update(:pool, DBConnection.ConnectionPool, &normalize_pool/1) + end + + defp normalize_pool(pool) do + if Code.ensure_loaded?(pool) && function_exported?(pool, :unboxed_run, 2) do + DBConnection.Ownership + else + pool + end + end + + @doc false + def checkout(adapter_meta, opts, callback) do + checkout_or_transaction(:run, adapter_meta, opts, callback) + end + + @doc false + def checked_out?(adapter_meta) do + %{pid: pool} = adapter_meta + get_conn(pool) != nil + end + + ## Query + + @doc false + def insert_all(adapter_meta, schema_meta, conn, header, rows, on_conflict, returning, placeholders, opts) do + %{source: source, prefix: prefix} = schema_meta + {_, conflict_params, _} = on_conflict + + {rows, params} = + case rows do + {%Ecto.Query{} = query, params} -> {query, Enum.reverse(params)} + rows -> unzip_inserts(header, rows) + end + + sql = conn.insert(prefix, source, header, rows, on_conflict, returning, placeholders) + + opts = if is_nil(Keyword.get(opts, :cache_statement)) do + [{:cache_statement, "ecto_insert_all_#{source}"} | opts] + else + opts + end + + all_params = placeholders ++ Enum.reverse(params, conflict_params) + + %{num_rows: num, rows: rows} = query!(adapter_meta, sql, all_params, opts) + {num, rows} + end + + defp unzip_inserts(header, rows) do + Enum.map_reduce rows, [], fn fields, params -> + Enum.map_reduce header, params, fn key, acc -> + case :lists.keyfind(key, 1, fields) do + {^key, {%Ecto.Query{} = query, query_params}} -> + {{query, length(query_params)}, Enum.reverse(query_params, acc)} + + {^key, {:placeholder, placeholder_index}} -> + {{:placeholder, Integer.to_string(placeholder_index)}, acc} + + {^key, value} -> {key, [value | acc]} + + false -> {nil, acc} + end + end + end + end + + @doc false + def execute(prepare, adapter_meta, query_meta, prepared, params, opts) do + %{num_rows: num, rows: rows} = + execute!(prepare, adapter_meta, prepared, params, put_source(opts, query_meta)) + + {num, rows} + end + + defp execute!(prepare, adapter_meta, {:cache, update, {id, prepared}}, params, opts) do + name = prepare_name(prepare, id) + + case sql_call(adapter_meta, :prepare_execute, [name, prepared], params, opts) do + {:ok, query, result} -> + maybe_update_cache(prepare, update, {id, query}) + result + {:error, err} -> + raise_sql_call_error err + end + end + + defp execute!(:unnamed = prepare, adapter_meta, {:cached, _update, _reset, {id, cached}}, params, opts) do + name = prepare_name(prepare, id) + prepared = String.Chars.to_string(cached) + + case sql_call(adapter_meta, :prepare_execute, [name, prepared], params, opts) do + {:ok, _query, result} -> + result + {:error, err} -> + raise_sql_call_error err + end + end + + defp execute!(:named = _prepare, adapter_meta, {:cached, update, reset, {id, cached}}, params, opts) do + case sql_call(adapter_meta, :execute, [cached], params, opts) do + {:ok, query, result} -> + update.({id, query}) + result + {:ok, result} -> + result + {:error, err} -> + raise_sql_call_error err + {:reset, err} -> + reset.({id, String.Chars.to_string(cached)}) + raise_sql_call_error err + end + end + + defp execute!(_prepare, adapter_meta, {:nocache, {_id, prepared}}, params, opts) do + case sql_call(adapter_meta, :query, [prepared], params, opts) do + {:ok, res} -> res + {:error, err} -> raise_sql_call_error err + end + end + + defp prepare_name(:named, id), do: "ecto_" <> Integer.to_string(id) + defp prepare_name(:unnamed, _id), do: "" + + defp maybe_update_cache(:named = _prepare, update, value), do: update.(value) + defp maybe_update_cache(:unnamed = _prepare, _update, _value), do: :noop + + @doc false + def stream(adapter_meta, query_meta, prepared, params, opts) do + do_stream(adapter_meta, prepared, params, put_source(opts, query_meta)) + end + + defp do_stream(adapter_meta, {:cache, _, {_, prepared}}, params, opts) do + prepare_stream(adapter_meta, prepared, params, opts) + end + + defp do_stream(adapter_meta, {:cached, _, _, {_, cached}}, params, opts) do + prepare_stream(adapter_meta, String.Chars.to_string(cached), params, opts) + end + + defp do_stream(adapter_meta, {:nocache, {_id, prepared}}, params, opts) do + prepare_stream(adapter_meta, prepared, params, opts) + end + + defp prepare_stream(adapter_meta, prepared, params, opts) do + adapter_meta + |> Ecto.Adapters.SQL.Stream.build(prepared, params, opts) + |> Stream.map(fn(%{num_rows: nrows, rows: rows}) -> {nrows, rows} end) + end + + defp raise_sql_call_error(%DBConnection.OwnershipError{} = err) do + message = err.message <> "\nSee Ecto.Adapters.SQL.Sandbox docs for more information." + raise %{err | message: message} + end + + defp raise_sql_call_error(err), do: raise err + + @doc false + def reduce(adapter_meta, statement, params, opts, acc, fun) do + %{pid: pool, telemetry: telemetry, sql: sql, opts: default_opts} = adapter_meta + opts = with_log(telemetry, params, opts ++ default_opts) + + case get_conn(pool) do + %DBConnection{conn_mode: :transaction} = conn -> + sql + |> apply(:stream, [conn, statement, params, opts]) + |> Enumerable.reduce(acc, fun) + + _ -> + raise "cannot reduce stream outside of transaction" + end + end + + @doc false + def into(adapter_meta, statement, params, opts) do + %{pid: pool, telemetry: telemetry, sql: sql, opts: default_opts} = adapter_meta + opts = with_log(telemetry, params, opts ++ default_opts) + + case get_conn(pool) do + %DBConnection{conn_mode: :transaction} = conn -> + sql + |> apply(:stream, [conn, statement, params, opts]) + |> Collectable.into() + + _ -> + raise "cannot collect into stream outside of transaction" + end + end + + @doc false + def struct(adapter_meta, conn, sql, operation, source, params, values, on_conflict, returning, opts) do + opts = if is_nil(Keyword.get(opts, :cache_statement)) do + [{:cache_statement, "ecto_#{operation}_#{source}_#{length(params)}"} | opts] + else + opts + end + + case query(adapter_meta, sql, values, opts) do + {:ok, %{rows: nil, num_rows: 1}} -> + {:ok, []} + + {:ok, %{rows: [values], num_rows: 1}} -> + {:ok, Enum.zip(returning, values)} + + {:ok, %{num_rows: 0}} -> + if on_conflict == :nothing, do: {:ok, []}, else: {:error, :stale} + + {:ok, %{num_rows: num_rows}} when num_rows > 1 -> + raise Ecto.MultiplePrimaryKeyError, + source: source, params: params, count: num_rows, operation: operation + + {:error, err} -> + case conn.to_constraints(err, source: source) do + [] -> raise_sql_call_error err + constraints -> {:invalid, constraints} + end + end + end + + ## Transactions + + @doc false + def transaction(adapter_meta, opts, callback) do + checkout_or_transaction(:transaction, adapter_meta, opts, callback) + end + + @doc false + def in_transaction?(%{pid: pool}) do + match?(%DBConnection{conn_mode: :transaction}, get_conn(pool)) + end + + @doc false + def rollback(%{pid: pool}, value) do + case get_conn(pool) do + %DBConnection{conn_mode: :transaction} = conn -> DBConnection.rollback(conn, value) + _ -> raise "cannot call rollback outside of transaction" + end + end + + ## Migrations + + @doc false + def execute_ddl(meta, conn, definition, opts) do + ddl_logs = + definition + |> conn.execute_ddl() + |> List.wrap() + |> Enum.map(&query!(meta, &1, [], opts)) + |> Enum.flat_map(&conn.ddl_logs/1) + + {:ok, ddl_logs} + end + + @doc false + def raise_migration_pool_size_error do + raise Ecto.MigrationError, """ + Migrations failed to run because the connection pool size is less than 2. + + Ecto requires a pool size of at least 2 to support concurrent migrators. + When migrations run, Ecto uses one connection to maintain a lock and + another to run migrations. + + If you are running migrations with Mix, you can increase the number + of connections via the pool size option: + + mix ecto.migrate --pool-size 2 + + If you are running the Ecto.Migrator programmatically, you can configure + the pool size via your application config: + + config :my_app, Repo, + ..., + pool_size: 2 # at least + """ + end + + ## Log + + defp with_log(telemetry, params, opts) do + [log: &log(telemetry, params, &1, opts)] ++ opts + end + + defp log({repo, log, event_name}, params, entry, opts) do + %{ + connection_time: query_time, + decode_time: decode_time, + pool_time: queue_time, + idle_time: idle_time, + result: result, + query: query + } = entry + + source = Keyword.get(opts, :source) + query = String.Chars.to_string(query) + result = with {:ok, _query, res} <- result, do: {:ok, res} + stacktrace = Keyword.get(opts, :stacktrace) + + params = + Enum.map(params, fn + %Ecto.Query.Tagged{value: value} -> value + value -> value + end) + + acc = + if idle_time, do: [idle_time: idle_time], else: [] + + measurements = + log_measurements( + [query_time: query_time, decode_time: decode_time, queue_time: queue_time], + 0, + acc + ) + + metadata = %{ + type: :ecto_sql_query, + repo: repo, + result: result, + params: params, + query: query, + source: source, + stacktrace: stacktrace, + options: Keyword.get(opts, :telemetry_options, []) + } + + if event_name = Keyword.get(opts, :telemetry_event, event_name) do + :telemetry.execute(event_name, measurements, metadata) + end + + case Keyword.get(opts, :log, log) do + true -> + Logger.log( + log, + fn -> log_iodata(measurements, repo, source, query, params, result, stacktrace) end, + ansi_color: sql_color(query) + ) + + false -> + :ok + + level -> + Logger.log( + level, + fn -> log_iodata(measurements, repo, source, query, params, result, stacktrace) end, + ansi_color: sql_color(query) + ) + end + + :ok + end + + defp log_measurements([{_, nil} | rest], total, acc), + do: log_measurements(rest, total, acc) + + defp log_measurements([{key, value} | rest], total, acc), + do: log_measurements(rest, total + value, [{key, value} | acc]) + + defp log_measurements([], total, acc), + do: Map.new([total_time: total] ++ acc) + + defp log_iodata(measurements, repo, source, query, params, result, stacktrace) do + [ + "QUERY", + ?\s, + log_ok_error(result), + log_ok_source(source), + log_time("db", measurements, :query_time, true), + log_time("decode", measurements, :decode_time, false), + log_time("queue", measurements, :queue_time, false), + log_time("idle", measurements, :idle_time, true), + ?\n, + query, + ?\s, + inspect(params, charlists: false), + log_stacktrace(stacktrace, repo) + ] + end + + defp log_ok_error({:ok, _res}), do: "OK" + defp log_ok_error({:error, _err}), do: "ERROR" + + defp log_ok_source(nil), do: "" + defp log_ok_source(source), do: " source=#{inspect(source)}" + + defp log_time(label, measurements, key, force) do + case measurements do + %{^key => time} -> + us = System.convert_time_unit(time, :native, :microsecond) + ms = div(us, 100) / 10 + + if force or ms > 0 do + [?\s, label, ?=, :io_lib_format.fwrite_g(ms), ?m, ?s] + else + [] + end + + %{} -> + [] + end + end + + defp log_stacktrace(stacktrace, repo) do + with [_ | _] <- stacktrace, + {module, function, arity, info} <- last_non_ecto(Enum.reverse(stacktrace), repo, nil) do + [ + IO.ANSI.light_black(), + ?\n, + "โ†ณ ", + Exception.format_mfa(module, function, arity), + log_stacktrace_info(info) + ] + else + _ -> [] + end + end + + defp log_stacktrace_info([file: file, line: line] ++ _) do + [", at: ", file, ?:, Integer.to_string(line)] + end + + defp log_stacktrace_info(_) do + [] + end + + @repo_modules [Ecto.Repo.Queryable, Ecto.Repo.Schema, Ecto.Repo.Transaction] + + defp last_non_ecto([{mod, _, _, _} | _stacktrace], repo, last) + when mod == repo or mod in @repo_modules, + do: last + + defp last_non_ecto([last | stacktrace], repo, _last), + do: last_non_ecto(stacktrace, repo, last) + + defp last_non_ecto([], _repo, last), + do: last + + ## Connection helpers + + defp checkout_or_transaction(fun, adapter_meta, opts, callback) do + %{pid: pool, telemetry: telemetry, opts: default_opts} = adapter_meta + opts = with_log(telemetry, [], opts ++ default_opts) + + callback = fn conn -> + previous_conn = put_conn(pool, conn) + + try do + callback.() + after + reset_conn(pool, previous_conn) + end + end + + apply(DBConnection, fun, [get_conn_or_pool(pool), callback, opts]) + end + + defp get_conn_or_pool(pool) do + Process.get(key(pool), pool) + end + + defp get_conn(pool) do + Process.get(key(pool)) + end + + defp put_conn(pool, conn) do + Process.put(key(pool), conn) + end + + defp reset_conn(pool, conn) do + if conn do + put_conn(pool, conn) + else + Process.delete(key(pool)) + end + end + + defp key(pool), do: {__MODULE__, pool} + + defp sql_color("SELECT" <> _), do: :cyan + defp sql_color("ROLLBACK" <> _), do: :red + defp sql_color("LOCK" <> _), do: :white + defp sql_color("INSERT" <> _), do: :green + defp sql_color("UPDATE" <> _), do: :yellow + defp sql_color("DELETE" <> _), do: :red + defp sql_color("begin" <> _), do: :magenta + defp sql_color("commit" <> _), do: :magenta + defp sql_color(_), do: nil +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/application.ex b/deps/ecto_sql/lib/ecto/adapters/sql/application.ex new file mode 100644 index 0000000..682f162 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/application.ex @@ -0,0 +1,14 @@ +defmodule Ecto.Adapters.SQL.Application do + @moduledoc false + use Application + + def start(_type, _args) do + children = [ + {DynamicSupervisor, strategy: :one_for_one, name: Ecto.MigratorSupervisor}, + {Task.Supervisor, name: Ecto.Adapters.SQL.StorageSupervisor}, + ] + + opts = [strategy: :one_for_one, name: Ecto.Adapters.SQL.Supervisor] + Supervisor.start_link(children, opts) + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/connection.ex b/deps/ecto_sql/lib/ecto/adapters/sql/connection.ex new file mode 100644 index 0000000..6a76b08 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/connection.ex @@ -0,0 +1,135 @@ +defmodule Ecto.Adapters.SQL.Connection do + @moduledoc """ + Specifies the behaviour to be implemented by all SQL connections. + """ + + @typedoc "The query name" + @type name :: String.t + + @typedoc "The SQL statement" + @type statement :: String.t + + @typedoc "The cached query which is a DBConnection Query" + @type cached :: map + + @type connection :: DBConnection.conn() + @type params :: [term] + + @doc """ + Receives options and returns `DBConnection` supervisor child + specification. + """ + @callback child_spec(options :: Keyword.t) :: {module, Keyword.t} + + @doc """ + Prepares and executes the given query with `DBConnection`. + """ + @callback prepare_execute(connection, name, statement, params, options :: Keyword.t) :: + {:ok, cached, term} | {:error, Exception.t} + + @doc """ + Executes a cached query. + """ + @callback execute(connection, cached, params, options :: Keyword.t) :: + {:ok, cached, term} | {:ok, term} | {:error | :reset, Exception.t} + + @doc """ + Runs the given statement as a query. + """ + @callback query(connection, statement, params, options :: Keyword.t) :: + {:ok, term} | {:error, Exception.t} + + @doc """ + Runs the given statement as a multi-result query. + """ + @callback query_many(connection, statement, params, options :: Keyword.t) :: + {:ok, term} | {:error, Exception.t} + + @doc """ + Returns a stream that prepares and executes the given query with + `DBConnection`. + """ + @callback stream(connection, statement, params, options :: Keyword.t) :: + Enum.t + + @doc """ + Receives the exception returned by `c:query/4`. + + The constraints are in the keyword list and must return the + constraint type, like `:unique`, and the constraint name as + a string, for example: + + [unique: "posts_title_index"] + + Must return an empty list if the error does not come + from any constraint. + """ + @callback to_constraints(exception :: Exception.t, options :: Keyword.t) :: Keyword.t + + ## Queries + + @doc """ + Receives a query and must return a SELECT query. + """ + @callback all(query :: Ecto.Query.t) :: iodata + + @doc """ + Receives a query and values to update and must return an UPDATE query. + """ + @callback update_all(query :: Ecto.Query.t) :: iodata + + @doc """ + Receives a query and must return a DELETE query. + """ + @callback delete_all(query :: Ecto.Query.t) :: iodata + + @doc """ + Returns an INSERT for the given `rows` in `table` returning + the given `returning`. + """ + @callback insert(prefix ::String.t, table :: String.t, + header :: [atom], rows :: [[atom | nil]], + on_conflict :: Ecto.Adapter.Schema.on_conflict, returning :: [atom], + placeholders :: [term]) :: iodata + + @doc """ + Returns an UPDATE for the given `fields` in `table` filtered by + `filters` returning the given `returning`. + """ + @callback update(prefix :: String.t, table :: String.t, fields :: [atom], + filters :: [atom], returning :: [atom]) :: iodata + + @doc """ + Returns a DELETE for the `filters` returning the given `returning`. + """ + @callback delete(prefix :: String.t, table :: String.t, + filters :: [atom], returning :: [atom]) :: iodata + + @doc """ + Executes an EXPLAIN query or similar depending on the adapter to obtains statistics of the given query. + + Receives the `connection`, `query`, `params` for the query, + and all `opts` including those related to the EXPLAIN statement and shared opts. + + Must execute the explain query and return the result. + """ + @callback explain_query(connection, query :: String.t, params :: Keyword.t, opts :: Keyword.t) :: + {:ok, term} | {:error, Exception.t} + + ## DDL + + @doc """ + Receives a DDL command and returns a query that executes it. + """ + @callback execute_ddl(command :: Ecto.Adapter.Migration.command) :: String.t | [iodata] + + @doc """ + Receives a query result and returns a list of logs. + """ + @callback ddl_logs(result :: term) :: [{Logger.level, Logger.message, Logger.metadata}] + + @doc """ + Returns a queryable to check if the given `table` exists. + """ + @callback table_exists_query(table :: String.t) :: {iodata, [term]} +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex b/deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex new file mode 100644 index 0000000..0adc5ff --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/sandbox.ex @@ -0,0 +1,628 @@ +defmodule Ecto.Adapters.SQL.Sandbox do + @moduledoc ~S""" + A pool for concurrent transactional tests. + + The sandbox pool is implemented on top of an ownership mechanism. + When started, the pool is in automatic mode, which means the + repository will automatically check connections out as with any + other pool. + + The `mode/2` function can be used to change the pool mode from + automatic to either manual or shared. In the latter two modes, + the connection must be explicitly checked out before use. + When explicit checkouts are made, the sandbox will wrap the + connection in a transaction by default and control who has + access to it. This means developers have a safe mechanism for + running concurrent tests against the database. + + ## Database support + + While both PostgreSQL and MySQL support SQL Sandbox, only PostgreSQL + supports concurrent tests while running the SQL Sandbox. Therefore, do + not run concurrent tests with MySQL as you may run into deadlocks due to + its transaction implementation. + + ## Example + + The first step is to configure your database to use the + `Ecto.Adapters.SQL.Sandbox` pool. You set those options in your + `config/config.exs` (or preferably `config/test.exs`) if you + haven't yet: + + config :my_app, Repo, + pool: Ecto.Adapters.SQL.Sandbox + + Now with the test database properly configured, you can write + transactional tests: + + # At the end of your test_helper.exs + # Set the pool mode to manual for explicit checkouts + Ecto.Adapters.SQL.Sandbox.mode(Repo, :manual) + + defmodule PostTest do + # Once the mode is manual, tests can also be async + use ExUnit.Case, async: true + + setup do + # Explicitly get a connection before each test + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + end + + test "create post" do + # Use the repository as usual + assert %Post{} = Repo.insert!(%Post{}) + end + end + + ## Collaborating processes + + The example above is straight-forward because we have only + a single process using the database connection. However, + sometimes a test may need to interact with multiple processes, + all using the same connection so they all belong to the same + transaction. + + Before we discuss solutions, let's see what happens if we try + to use a connection from a new process without explicitly + checking it out first: + + setup do + # Explicitly get a connection before each test + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + end + + test "calls worker that runs a query" do + GenServer.call(MyApp.Worker, :run_query) + end + + The test above will fail with an error similar to: + + ** (DBConnection.OwnershipError) cannot find ownership process for #PID<0.35.0> + + That's because the `setup` block is checking out the connection only + for the test process. Once the worker attempts to perform a query, + there is no connection assigned to it and it will fail. + + The sandbox module provides two ways of doing so, via allowances or + by running in shared mode. + + ### Allowances + + The idea behind allowances is that you can explicitly tell a process + which checked out connection it should use, allowing multiple processes + to collaborate over the same connection. Let's give it a try: + + test "calls worker that runs a query" do + allow = Process.whereis(MyApp.Worker) + Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), allow) + GenServer.call(MyApp.Worker, :run_query) + end + + And that's it, by calling `allow/3`, we are explicitly assigning + the parent's connection (i.e. the test process' connection) to + the task. + + Because allowances use an explicit mechanism, their advantage + is that you can still run your tests in async mode. The downside + is that you need to explicitly control and allow every single + process. This is not always possible. In such cases, you will + want to use shared mode. + + ### Shared mode + + Shared mode allows a process to share its connection with any other + process automatically, without relying on explicit allowances. + Let's change the example above to use shared mode: + + setup do + # Explicitly get a connection before each test + :ok = Ecto.Adapters.SQL.Sandbox.checkout(Repo) + # Setting the shared mode must be done only after checkout + Ecto.Adapters.SQL.Sandbox.mode(Repo, {:shared, self()}) + end + + test "calls worker that runs a query" do + GenServer.call(MyApp.Worker, :run_query) + end + + By calling `mode({:shared, self()})`, any process that needs + to talk to the database will now use the same connection as the + one checked out by the test process during the `setup` block. + + Make sure to always check a connection out before setting the mode + to `{:shared, self()}`. + + The advantage of shared mode is that by calling a single function, + you will ensure all upcoming processes and operations will use that + shared connection, without a need to explicitly allow them. The + downside is that tests can no longer run concurrently in shared mode. + + Also, beware that if the test process terminates while the worker is + using the connection, the connection will be taken away from the worker, + which will error. Therefore it is important to guarantee the work is done + before the test concludes. In the example above, we are using a `call`, + which is synchronous, avoiding the problem, but you may need to explicitly + flush the worker or terminate it under such scenarios in your tests. + + ### Summing up + + There are two mechanisms for explicit ownerships: + + * Using allowances - requires explicit allowances via `allow/3`. + Tests may run concurrently. + + * Using shared mode - does not require explicit allowances. + Tests cannot run concurrently. + + ## FAQ + + When running the sandbox mode concurrently, developers may run into + issues we explore in the upcoming sections. + + ### "owner exited" + + In some situations, you may see error reports similar to the one below: + + 23:59:59.999 [error] Postgrex.Protocol (#PID<>) disconnected: + ** (DBConnection.Error) owner #PID<> exited + Client #PID<> is still using a connection from owner + + Such errors are usually followed by another error report from another + process that failed while executing a database query. + + To understand the failure, we need to answer the question: who are the + owner and client processes? The owner process is the one that checks + out the connection, which, in the majority of cases, is the test process, + the one running your tests. In other words, the error happens because + the test process has finished, either because the test succeeded or + because it failed, while the client process was trying to get information + from the database. Since the owner process, the one that owns the + connection, no longer exists, Ecto will check the connection back in + and notify the client process using the connection that the connection + owner is no longer available. + + This can happen in different situations. For example, imagine you query + a GenServer in your test that is using a database connection: + + test "gets results from GenServer" do + {:ok, pid} = MyAppServer.start_link() + Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), pid) + assert MyAppServer.get_my_data_fast(timeout: 1000) == [...] + end + + In the test above, we spawn the server and allow it to perform database + queries using the connection owned by the test process. Since we gave + a timeout of 1 second, in case the database takes longer than one second + to reply, the test process will fail, due to the timeout, making the + "owner down" message to be printed because the server process is still + waiting on a connection reply. + + In some situations, such failures may be intermittent. Imagine that you + allow a process that queries the database every half second: + + test "queries periodically" do + {:ok, pid} = PeriodicServer.start_link() + Ecto.Adapters.SQL.Sandbox.allow(Repo, self(), pid) + # more tests + end + + Because the server is querying the database from time to time, there is + a chance that, when the test exits, the periodic process may be querying + the database, regardless of test success or failure. + + ### "owner timed out because it owned the connection for longer than Nms" + + In some situations, you may see error reports similar to the one below: + + 09:56:43.081 [error] Postgrex.Protocol (#PID<>) disconnected: + ** (DBConnection.ConnectionError) owner #PID<> timed out + because it owned the connection for longer than 120000ms + + If you have a long running test (or you're debugging with IEx.pry), + the timeout for the connection ownership may be too short. You can + increase the timeout by setting the `:ownership_timeout` options for + your repo config in `config/config.exs` (or preferably in `config/test.exs`): + + config :my_app, MyApp.Repo, + ownership_timeout: NEW_TIMEOUT_IN_MILLISECONDS + + The `:ownership_timeout` option is part of `DBConnection.Ownership` + and defaults to 120000ms. Timeouts are given as integers in milliseconds. + + Alternately, if this is an issue for only a handful of long-running tests, + you can pass an `:ownership_timeout` option when calling + `Ecto.Adapters.SQL.Sandbox.checkout/2` instead of setting a longer timeout + globally in your config. + + ### Deferred constraints + + Some databases allow to defer constraint validation to the transaction + commit time, instead of the particular statement execution time. This + feature, for instance, allows for a cyclic foreign key referencing. + Since the SQL Sandbox mode rolls back transactions, tests might report + false positives because deferred constraints are never checked by the + database. To manually force deferred constraints validation when using + PostgreSQL use the following line right at the end of your test case: + + Repo.query!("SET CONSTRAINTS ALL IMMEDIATE") + + ### Database locks and deadlocks + + Since the sandbox relies on concurrent transactional tests, there is + a chance your tests may trigger deadlocks in your database. This is + specially true with MySQL, where the solutions presented here are not + enough to avoid deadlocks and therefore making the use of concurrent tests + with MySQL prohibited. + + However, even on databases like PostgreSQL, performance degradations or + deadlocks may still occur. For example, imagine a "users" table with a + unique index on the "email" column. Now consider multiple tests are + trying to insert the same user email to the database. They will attempt + to retrieve the same database lock, causing only one test to succeed and + run while all other tests wait for the lock. + + In other situations, two different tests may proceed in a way that + each test retrieves locks desired by the other, leading to a situation + that cannot be resolved, a deadlock. For instance: + + ```text + Transaction 1: Transaction 2: + begin + begin + update posts where id = 1 + update posts where id = 2 + update posts where id = 1 + update posts where id = 2 + **deadlock** + ``` + + There are different ways to avoid such problems. One of them is + to make sure your tests work on distinct data. Regardless of + your choice between using fixtures or factories for test data, + make sure you get a new set of data per test. This is specially + important for data that is meant to be unique like user emails. + + For example, instead of: + + def insert_user do + Repo.insert!(%User{email: "sample@example.com"}) + end + + prefer: + + def insert_user do + Repo.insert!(%User{email: "sample-#{counter()}@example.com"}) + end + + defp counter do + System.unique_integer([:positive]) + end + + In fact, avoiding unique emails like above can also have a positive + impact on the test suite performance, as it reduces contention and + wait between concurrent tests. We have heard reports where using + dynamic values for uniquely indexed columns, as we did for email + above, made a test suite run between 2x to 3x faster. + + Deadlocks may happen in other circumstances. If you believe you + are hitting a scenario that has not been described here, please + report an issue so we can improve our examples. As a last resort, + you can always disable the test triggering the deadlock from + running asynchronously by setting "async: false". + """ + + defmodule Connection do + @moduledoc false + if Code.ensure_loaded?(DBConnection) do + @behaviour DBConnection + end + + def connect(_opts) do + raise "should never be invoked" + end + + def disconnect(err, {conn_mod, state, _in_transaction?}) do + conn_mod.disconnect(err, state) + end + + def checkout(state), do: proxy(:checkout, state, []) + def checkin(state), do: proxy(:checkin, state, []) + def ping(state), do: proxy(:ping, state, []) + + def handle_begin(opts, {conn_mod, state, false}) do + opts = [mode: :savepoint] ++ opts + + case conn_mod.handle_begin(opts, state) do + {:ok, value, state} -> + {:ok, value, {conn_mod, state, true}} + {kind, err, state} -> + {kind, err, {conn_mod, state, false}} + end + end + def handle_commit(opts, {conn_mod, state, true}) do + opts = [mode: :savepoint] ++ opts + proxy(:handle_commit, {conn_mod, state, false}, [opts]) + end + def handle_rollback(opts, {conn_mod, state, _}) do + opts = [mode: :savepoint] ++ opts + proxy(:handle_rollback, {conn_mod, state, false}, [opts]) + end + + def handle_status(opts, state), + do: proxy(:handle_status, state, [maybe_savepoint(opts, state)]) + def handle_prepare(query, opts, state), + do: proxy(:handle_prepare, state, [query, maybe_savepoint(opts, state)]) + def handle_execute(query, params, opts, state), + do: proxy(:handle_execute, state, [query, params, maybe_savepoint(opts, state)]) + def handle_close(query, opts, state), + do: proxy(:handle_close, state, [query, maybe_savepoint(opts, state)]) + def handle_declare(query, params, opts, state), + do: proxy(:handle_declare, state, [query, params, maybe_savepoint(opts, state)]) + def handle_fetch(query, cursor, opts, state), + do: proxy(:handle_fetch, state, [query, cursor, maybe_savepoint(opts, state)]) + def handle_deallocate(query, cursor, opts, state), + do: proxy(:handle_deallocate, state, [query, cursor, maybe_savepoint(opts, state)]) + + defp maybe_savepoint(opts, {_, _, in_transaction?}) do + if not in_transaction? and Keyword.get(opts, :sandbox_subtransaction, true) do + [mode: :savepoint] ++ opts + else + opts + end + end + + defp proxy(fun, {conn_mod, state, in_transaction?}, args) do + result = apply(conn_mod, fun, args ++ [state]) + pos = :erlang.tuple_size(result) + :erlang.setelement(pos, result, {conn_mod, :erlang.element(pos, result), in_transaction?}) + end + end + + @doc """ + Starts a process that owns the connection and returns its pid. + + The owner process is not linked to the caller, it is your responsibility to + ensure it will be stopped. In tests, this is done by terminating the pool + in an `ExUnit.Callbacks.on_exit/2` callback: + + setup tags do + pid = Ecto.Adapters.SQL.Sandbox.start_owner!(MyApp.Repo, shared: not tags[:async]) + on_exit(fn -> Ecto.Adapters.SQL.Sandbox.stop_owner(pid) end) + :ok + end + + ## Options + + * `:shared` - if `true`, the pool runs in the shared mode. Defaults to `false` + + The remaining options are passed to `checkout/2`. + """ + @doc since: "3.4.4" + def start_owner!(repo, opts \\ []) do + parent = self() + + {:ok, pid} = + Agent.start(fn -> + {shared, opts} = Keyword.pop(opts, :shared, false) + :ok = checkout(repo, opts) + + if shared do + :ok = mode(repo, {:shared, self()}) + else + :ok = allow(repo, self(), parent) + end + end) + + pid + end + + @doc """ + Stops an owner process started by `start_owner!/2`. + """ + @doc since: "3.4.4" + @spec stop_owner(pid()) :: :ok + def stop_owner(pid) do + GenServer.stop(pid) + end + + @doc """ + Sets the mode for the `repo` pool. + + The modes can be: + + * `:auto` - this is the default mode. When trying to use the repository, + processes can automatically checkout a connection without calling + `checkout/2` or `start_owner/2` before. This is the mode you will run + on before your test suite starts + + * `:manual` - in this mode, the connection always has to be explicitly + checked before used. Other processes are allowed to use the same + connection if they are explicitly allowed via `allow/4`. You usually + set the mode to manual at the end of your `test/test_helper.exs` file. + This is also the mode you will run your async tests in + + * `{:shared, pid}` - after checking out a connection in manual mode, + you can change the mode to `{:shared, pid}`, where pid is the process + that owns the connection, most often `{:shared, self()}`. This makes it + so all processes can use the same connection as the one owner by the + current process. This is the mode you will run your sync tests in + + Whenever you change the mode to `:manual` or `:auto`, all existing + connections are checked in. Therefore, it is recommend to set those + modes before your test suite starts, as otherwise you will check in + connections being used in any other test running concurrently. + """ + def mode(repo, mode) + when (is_atom(repo) or is_pid(repo)) and mode in [:auto, :manual] + when (is_atom(repo) or is_pid(repo)) and elem(mode, 0) == :shared and is_pid(elem(mode, 1)) do + %{pid: pool, opts: opts} = lookup_meta!(repo) + DBConnection.Ownership.ownership_mode(pool, mode, opts) + end + + @doc """ + Checks a connection out for the given `repo`. + + The process calling `checkout/2` will own the connection + until it calls `checkin/2` or until it crashes in which case + the connection will be automatically reclaimed by the pool. + + ## Options + + * `:sandbox` - when true the connection is wrapped in + a transaction. Defaults to true. + + * `:isolation` - set the query to the given isolation level. + + * `:ownership_timeout` - limits how long the connection can be + owned. Defaults to the value in your repo config in + `config/config.exs` (or preferably in `config/test.exs`), or + 120000 ms if not set. The timeout exists for sanity checking + purposes, to ensure there is no connection leakage, and can + be bumped whenever necessary. + + """ + def checkout(repo, opts \\ []) when is_atom(repo) or is_pid(repo) do + %{pid: pool, opts: pool_opts} = lookup_meta!(repo) + + pool_opts = + if Keyword.get(opts, :sandbox, true) do + [ + post_checkout: &post_checkout(&1, &2, opts), + pre_checkin: &pre_checkin(&1, &2, &3, opts) + ] ++ pool_opts + else + pool_opts + end + + pool_opts_overrides = Keyword.take(opts, [:ownership_timeout, :isolation_level]) + pool_opts = Keyword.merge(pool_opts, pool_opts_overrides) + + case DBConnection.Ownership.ownership_checkout(pool, pool_opts) do + :ok -> + if isolation = opts[:isolation] do + set_transaction_isolation_level(repo, isolation) + end + + :ok + + other -> + other + end + end + + defp set_transaction_isolation_level(repo, isolation) do + query = "SET TRANSACTION ISOLATION LEVEL #{isolation}" + + case Ecto.Adapters.SQL.query(repo, query, [], sandbox_subtransaction: false) do + {:ok, _} -> + :ok + + {:error, error} -> + checkin(repo, []) + raise error + end + end + + @doc """ + Checks in the connection back into the sandbox pool. + """ + def checkin(repo, _opts \\ []) when is_atom(repo) or is_pid(repo) do + %{pid: pool, opts: opts} = lookup_meta!(repo) + DBConnection.Ownership.ownership_checkin(pool, opts) + end + + @doc """ + Allows the `allow` process to use the same connection as `parent`. + + `allow` may be a PID or a locally registered name. + """ + def allow(repo, parent, allow, _opts \\ []) when is_atom(repo) or is_pid(repo) do + case GenServer.whereis(allow) do + pid when is_pid(pid) -> + %{pid: pool, opts: opts} = lookup_meta!(repo) + DBConnection.Ownership.ownership_allow(pool, parent, pid, opts) + + other -> + raise """ + only PID or a locally registered process can be allowed to \ + use the same connection as parent but the lookup returned #{inspect(other)} + """ + end + end + + @doc """ + Runs a function outside of the sandbox. + """ + def unboxed_run(repo, fun) when is_atom(repo) or is_pid(repo) do + checkin(repo) + checkout(repo, sandbox: false) + + try do + fun.() + after + checkin(repo) + end + end + + defp lookup_meta!(repo) do + %{opts: opts} = + meta = + repo + |> find_repo() + |> Ecto.Adapter.lookup_meta() + + if opts[:pool] != DBConnection.Ownership do + raise """ + cannot invoke sandbox operation with pool #{inspect(opts[:pool])}. + To use the SQL Sandbox, configure your repository pool as: + + pool: #{inspect(__MODULE__)} + """ + end + + meta + end + + defp find_repo(repo) when is_atom(repo), do: repo.get_dynamic_repo() + defp find_repo(repo), do: repo + + defp post_checkout(conn_mod, conn_state, opts) do + case conn_mod.handle_begin([mode: :transaction] ++ opts, conn_state) do + {:ok, _, conn_state} -> + {:ok, Connection, {conn_mod, conn_state, false}} + + {_error_or_disconnect, err, conn_state} -> + {:disconnect, err, conn_mod, conn_state} + end + end + + defp pre_checkin(:checkin, Connection, {conn_mod, conn_state, _in_transaction?}, opts) do + case conn_mod.handle_rollback([mode: :transaction] ++ opts, conn_state) do + {:ok, _, conn_state} -> + {:ok, conn_mod, conn_state} + + {:idle, _conn_state} -> + raise """ + Ecto SQL sandbox transaction was already committed/rolled back. + + The sandbox works by running each test in a transaction and closing the\ + transaction afterwards. However, the transaction has already terminated.\ + Your test code is likely committing or rolling back transactions manually,\ + either by invoking procedures or running custom SQL commands. + + One option is to manually checkout a connection without a sandbox: + + Ecto.Adapters.SQL.Sandbox.checkout(repo, sandbox: false) + + But remember you will have to undo any database changes performed by such tests. + """ + + {_error_or_disconnect, err, conn_state} -> + {:disconnect, err, conn_mod, conn_state} + end + end + + defp pre_checkin(_, Connection, {conn_mod, conn_state, _in_transaction?}, _opts) do + {:ok, conn_mod, conn_state} + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/sql/stream.ex b/deps/ecto_sql/lib/ecto/adapters/sql/stream.ex new file mode 100644 index 0000000..eb815e7 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/sql/stream.ex @@ -0,0 +1,43 @@ +defmodule Ecto.Adapters.SQL.Stream do + @moduledoc false + + defstruct [:meta, :statement, :params, :opts] + + def build(meta, statement, params, opts) do + %__MODULE__{meta: meta, statement: statement, params: params, opts: opts} + end +end + +alias Ecto.Adapters.SQL.Stream + +defimpl Enumerable, for: Stream do + def count(_), do: {:error, __MODULE__} + + def member?(_, _), do: {:error, __MODULE__} + + def slice(_), do: {:error, __MODULE__} + + def reduce(stream, acc, fun) do + %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream + Ecto.Adapters.SQL.reduce(meta, statement, params, opts, acc, fun) + end +end + +defimpl Collectable, for: Stream do + def into(stream) do + %Stream{meta: meta, statement: statement, params: params, opts: opts} = stream + {state, fun} = Ecto.Adapters.SQL.into(meta, statement, params, opts) + {state, make_into(fun, stream)} + end + + defp make_into(fun, stream) do + fn + state, :done -> + fun.(state, :done) + stream + + state, acc -> + fun.(state, acc) + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/tds.ex b/deps/ecto_sql/lib/ecto/adapters/tds.ex new file mode 100644 index 0000000..b6566fd --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/tds.ex @@ -0,0 +1,298 @@ +defmodule Ecto.Adapters.Tds do + @moduledoc """ + Adapter module for MSSQL Server using the TDS protocol. + + ## Options + + Tds options split in different categories described + below. All options can be given via the repository + configuration. + + ### Connection options + + * `:hostname` - Server hostname + * `:port` - Server port (default: 1433) + * `:username` - Username + * `:password` - User password + * `:database` - the database to connect to + * `:pool` - The connection pool module, may be set to `Ecto.Adapters.SQL.Sandbox` + * `:ssl` - Set to true if ssl should be used (default: false) + * `:ssl_opts` - A list of ssl options, see Erlang's `ssl` docs + * `:show_sensitive_data_on_connection_error` - show connection data and + configuration whenever there is an error attempting to connect to the + database + + We also recommend developers to consult the `Tds.start_link/1` documentation + for a complete list of all supported options for driver. + + ### Storage options + + * `:collation` - the database collation. Used during database creation but + it is ignored later + + If you need collation other than Latin1, add `tds_encoding` as dependency to + your project `mix.exs` file then amend `config/config.ex` by adding: + + config :tds, :text_encoder, Tds.Encoding + + This should give you extended set of most encoding. For complete list check + `Tds.Encoding` [documentation](https://hexdocs.pm/tds_encoding). + + ### After connect flags + + After connecting to MSSQL server, TDS will check if there are any flags set in + connection options that should affect connection session behaviour. All flags are + MSSQL standard *SET* options. The following flags are currently supported: + + * `:set_language` - sets session language (consult stored procedure output + `exec sp_helplanguage` for valid values) + * `:set_datefirst` - number in range 1..7 + * `:set_dateformat` - atom, one of `:mdy | :dmy | :ymd | :ydm | :myd | :dym` + * `:set_deadlock_priority` - atom, one of `:low | :high | :normal | -10..10` + * `:set_lock_timeout` - number in milliseconds > 0 + * `:set_remote_proc_transactions` - atom, one of `:on | :off` + * `:set_implicit_transactions` - atom, one of `:on | :off` + * `:set_allow_snapshot_isolation` - atom, one of `:on | :off` + (required if `Repo.transaction(fn -> ... end, isolation_level: :snapshot)` is used) + * `:set_read_committed_snapshot` - atom, one of `:on | :off` + + ## Limitations + + ### UUIDs + + MSSQL server has slightly different binary storage format for UUIDs (`uniqueidentifier`). + If you use `:binary_id`, the proper choice is made. Otherwise you must use the `Tds.Ecto.UUID` + type. Avoid using `Ecto.UUID` since it may cause unpredictable application behaviour. + + ### SQL `Char`, `VarChar` and `Text` types + + When working with binaries and strings,there are some limitations you should be aware of: + + - Strings that should be stored in mentioned sql types must be encoded to column + codepage (defined in collation). If collation is different than database collation, + it is not possible to store correct value into database since the connection + respects the database collation. Ecto does not provide way to override parameter + codepage. + + - If you need other than Latin1 or other than your database default collation, as + mentioned in "Storage Options" section, then manually encode strings using + `Tds.Encoding.encode/2` into desired codepage and then tag parameter as `:binary`. + Please be aware that queries that use this approach in where clauses can be 10x slower + due increased logical reads in database. + + - You can't store VarChar codepoints encoded in one collation/codepage to column that + is encoded in different collation/codepage. You will always get wrong result. This is + not adapter or driver limitation but rather how string encoding works for single byte + encoded strings in MSSQL server. Don't be confused if you are always seeing latin1 chars, + they are simply in each codepoint table. + + In particular, if a field has the type `:text`, only raw binaries will be allowed. + To avoid above limitations always use `:string` (NVarChar) type for text if possible. + If you really need to use VarChar's column type, you can use the `Tds.Ecto.VarChar` + Ecto type. + + ### JSON support + + Even though the adapter will convert `:map` fields into JSON back and forth, + actual value is stored in NVarChar column. + + ### Query hints and table hints + + MSSQL supports both query hints and table hints: https://docs.microsoft.com/en-us/sql/t-sql/queries/hints-transact-sql-query + + For Ecto compatibility, the query hints must be given via the `lock` option, and they + will be translated to MSSQL's "OPTION". If you need to pass multiple options, you + can separate them by comma: + + from query, lock: "HASH GROUP, FAST 10" + + Table hints are specified as a list alongside a `from` or `join`: + + from query, hints: ["INDEX (IX_Employee_ManagerID)"] + + The `:migration_lock` will be treated as a table hint and defaults to "UPDLOCK". + + ### Multi Repo calls in transactions + + To avoid deadlocks in your app, we exposed `:isolation_level` repo transaction option. + This will tell to SQL Server Transaction Manager how to begin transaction. + By default, if this option is omitted, isolation level is set to `:read_committed`. + + Any attempt to manually set the transaction isolation via queries, such as + + Ecto.Adapter.SQL.query("SET TRANSACTION ISOLATION LEVEL XYZ") + + will fail once explicit transaction is started using `c:Ecto.Repo.transaction/2` + and reset back to :read_committed. + + There is `Ecto.Query.lock/3` function can help by setting it to `WITH(NOLOCK)`. + This should allow you to do eventually consistent reads and avoid locks on given + table if you don't need to write to database. + + NOTE: after explicit transaction ends (commit or rollback) implicit transactions + will run as READ_COMMITTED. + """ + + use Ecto.Adapters.SQL, + driver: :tds + + require Logger + require Ecto.Query + + @behaviour Ecto.Adapter.Storage + + @doc false + def autogenerate(:binary_id), do: Tds.Ecto.UUID.bingenerate() + def autogenerate(:embed_id), do: Tds.Ecto.UUID.generate() + def autogenerate(type), do: super(type) + + @doc false + @impl true + def loaders({:map, _}, type), do: [&json_decode/1, &Ecto.Type.embedded_load(type, &1, :json)] + def loaders(:map, type), do: [&json_decode/1, type] + def loaders(:boolean, type), do: [&bool_decode/1, type] + def loaders(:binary_id, type), do: [Tds.Ecto.UUID, type] + def loaders(_, type), do: [type] + + @impl true + def dumpers({:map, _}, type), do: [&Ecto.Type.embedded_dump(type, &1, :json)] + def dumpers(:binary_id, type), do: [type, Tds.Ecto.UUID] + def dumpers(_, type), do: [type] + + defp bool_decode(<<0>>), do: {:ok, false} + defp bool_decode(<<1>>), do: {:ok, true} + defp bool_decode(0), do: {:ok, false} + defp bool_decode(1), do: {:ok, true} + defp bool_decode(x) when is_boolean(x), do: {:ok, x} + + defp json_decode(x) when is_binary(x), do: {:ok, Tds.json_library().decode!(x)} + defp json_decode(x), do: {:ok, x} + + # Storage API + @doc false + @impl true + def storage_up(opts) do + database = + Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + + command = + ~s(CREATE DATABASE [#{database}]) + |> concat_if(opts[:collation], &"COLLATE=#{&1}") + + case run_query(Keyword.put(opts, :database, "master"), command) do + {:ok, _} -> + :ok + + {:error, %{mssql: %{number: 1801}}} -> + {:error, :already_up} + + {:error, error} -> + {:error, Exception.message(error)} + end + end + + defp concat_if(content, nil, _fun), do: content + defp concat_if(content, value, fun), do: content <> " " <> fun.(value) + + @doc false + @impl true + def storage_down(opts) do + database = + Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + + case run_query(Keyword.put(opts, :database, "master"), "DROP DATABASE [#{database}]") do + {:ok, _} -> + :ok + + {:error, %{mssql: %{number: 3701}}} -> + {:error, :already_down} + + {:error, error} -> + {:error, Exception.message(error)} + end + end + + @impl Ecto.Adapter.Storage + def storage_status(opts) do + database = + Keyword.fetch!(opts, :database) || raise ":database is nil in repository configuration" + + opts = Keyword.put(opts, :database, "master") + + check_database_query = + "SELECT [name] FROM [master].[sys].[databases] WHERE [name] = '#{database}'" + + case run_query(opts, check_database_query) do + {:ok, %{num_rows: 0}} -> :down + {:ok, %{num_rows: _}} -> :up + other -> {:error, other} + end + end + + defp run_query(opts, sql_command) do + {:ok, _} = Application.ensure_all_started(:ecto_sql) + {:ok, _} = Application.ensure_all_started(:tds) + + timeout = Keyword.get(opts, :timeout, 15_000) + + opts = + opts + |> Keyword.drop([:name, :log, :pool, :pool_size]) + |> Keyword.put(:backoff_type, :stop) + |> Keyword.put(:max_restarts, 0) + + {:ok, pid} = Task.Supervisor.start_link() + + task = + Task.Supervisor.async_nolink(pid, fn -> + {:ok, conn} = Tds.start_link(opts) + value = Ecto.Adapters.Tds.Connection.execute(conn, sql_command, [], opts) + GenServer.stop(conn) + value + end) + + case Task.yield(task, timeout) || Task.shutdown(task) do + {:ok, {:ok, result}} -> + {:ok, result} + + {:ok, {:error, error}} -> + {:error, error} + + {:exit, {%{__struct__: struct} = error, _}} + when struct in [Tds.Error, DBConnection.Error] -> + {:error, error} + + {:exit, reason} -> + {:error, RuntimeError.exception(Exception.format_exit(reason))} + + nil -> + {:error, RuntimeError.exception("command timed out")} + end + end + + + @impl true + def supports_ddl_transaction? do + true + end + + @impl true + def lock_for_migrations(meta, opts, fun) do + %{opts: adapter_opts, repo: repo} = meta + + if Keyword.fetch(adapter_opts, :pool_size) == {:ok, 1} do + Ecto.Adapters.SQL.raise_migration_pool_size_error() + end + + opts = Keyword.put(opts, :timeout, :infinity) + + {:ok, result} = + transaction(meta, opts, fn -> + lock_name = "'ecto_#{inspect(repo)}'" + Ecto.Adapters.SQL.query!(meta, "sp_getapplock @Resource = #{lock_name}, @LockMode = 'Exclusive', @LockOwner = 'Transaction', @LockTimeout = -1", [], opts) + fun.() + end) + + result + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/tds/connection.ex b/deps/ecto_sql/lib/ecto/adapters/tds/connection.ex new file mode 100644 index 0000000..c6f5ad2 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/tds/connection.ex @@ -0,0 +1,1780 @@ +if Code.ensure_loaded?(Tds) do + defmodule Ecto.Adapters.Tds.Connection do + @moduledoc false + require Logger + alias Tds.Query + alias Ecto.Query.Tagged + alias Ecto.Adapters.SQL + require Ecto.Schema + + @behaviour Ecto.Adapters.SQL.Connection + + @impl true + def child_spec(opts) do + opts + |> Keyword.put_new(:use_elixir_calendar_types, true) + |> Tds.child_spec() + end + + @impl true + def prepare_execute(pid, _name, statement, params, opts \\ []) do + query = %Query{statement: statement} + params = prepare_params(params) + + opts = Keyword.put(opts, :parameters, params) + DBConnection.prepare_execute(pid, query, params, opts) + end + + @impl true + def execute(pid, statement, params, opts) when is_binary(statement) or is_list(statement) do + query = %Query{statement: statement} + params = prepare_params(params) + opts = Keyword.put(opts, :parameters, params) + + case DBConnection.prepare_execute(pid, query, params, opts) do + {:ok, _, %Tds.Result{columns: nil, num_rows: num_rows, rows: []}} + when num_rows >= 0 -> + {:ok, %Tds.Result{columns: nil, num_rows: num_rows, rows: nil}} + + {:ok, _, query} -> + {:ok, query} + + {:error, _} = err -> + err + end + end + + def execute(pid, %{} = query, params, opts) do + opts = Keyword.put_new(opts, :parameters, params) + params = prepare_params(params) + opts = Keyword.put(opts, :parameters, params) + + case DBConnection.prepare_execute(pid, query, params, opts) do + {:ok, _, query} -> {:ok, query} + {:error, _} = err -> err + end + end + + @impl true + def stream(_conn, _sql, _params, _opts) do + error!(nil, "Repo.stream is not supported in the Tds adapter") + end + + @impl true + def query(conn, sql, params, opts) do + params = prepare_params(params) + Tds.query(conn, sql, params, opts) + end + + @impl true + def query_many(_conn, _sql, _params, _opts) do + error!(nil, "query_many is not supported in the Tds adapter") + end + + @impl true + def to_constraints(%Tds.Error{mssql: %{number: code, msg_text: message}}, _opts) do + Tds.Error.get_constraint_violations(code, message) + end + + def to_constraints(_, _opts), do: [] + + defp prepare_params(params) do + {params, _} = + Enum.map_reduce(params, 1, fn param, acc -> + {value, type} = prepare_param(param) + {%Tds.Parameter{name: "@#{acc}", value: value, type: type}, acc + 1} + end) + + params + end + + # Decimal + defp prepare_param(%Decimal{} = value) do + {value, :decimal} + end + + defp prepare_param(%NaiveDateTime{} = value) do + {value, :datetime2} + end + + defp prepare_param(%DateTime{} = value) do + {value, :datetimeoffset} + end + + defp prepare_param(%Date{} = value) do + {value, :date} + end + + defp prepare_param(%Time{} = value) do + {value, :time} + end + + defp prepare_param(%{__struct__: module} = _value) do + # just in case dumpers/loaders are not defined for the this struct + error!( + nil, + "Tds adapter is unable to convert struct `#{inspect(module)}` into supported MSSQL types" + ) + end + + defp prepare_param(%{} = value), do: {json_library().encode!(value), :string} + defp prepare_param(value), do: prepare_raw_param(value) + + defp prepare_raw_param(value) when is_binary(value) do + type = if String.printable?(value), do: :string, else: :binary + {value, type} + end + + defp prepare_raw_param(value) when value == true, do: {1, :boolean} + defp prepare_raw_param(value) when value == false, do: {0, :boolean} + defp prepare_raw_param({_, :varchar} = value), do: value + defp prepare_raw_param(value), do: {value, nil} + + defp json_library(), do: Application.get_env(:tds, :json_library, Jason) + + ## Query + + @parent_as __MODULE__ + alias Ecto.Query + alias Ecto.Query.{BooleanExpr, JoinExpr, QueryExpr, WithExpr} + + @impl true + def all(query, as_prefix \\ []) do + sources = create_names(query, as_prefix) + + cte = cte(query, sources) + from = from(query, sources) + select = select(query, sources) + join = join(query, sources) + where = where(query, sources) + group_by = group_by(query, sources) + having = having(query, sources) + _window = window(query, sources) + combinations = combinations(query) + order_by = order_by(query, sources) + # limit = is handled in select (TOP X) + offset = offset(query, sources) + lock = lock(query, sources) + + if query.offset != nil and query.order_bys == [], + do: error!(query, "ORDER BY is mandatory when OFFSET is set") + + [cte, select, from, join, where, group_by, having, combinations, order_by, lock | offset] + end + + @impl true + def update_all(query) do + sources = create_names(query, []) + cte = cte(query, sources) + {table, name, _model} = elem(sources, 0) + + fields = update_fields(query, sources) + from = " FROM #{table} AS #{name}" + join = join(query, sources) + where = where(query, sources) + lock = lock(query, sources) + + [ + cte, + "UPDATE ", + name, + " SET ", + fields, + returning(query, 0, "INSERTED"), + from, + join, + where | lock + ] + end + + @impl true + def delete_all(query) do + sources = create_names(query, []) + cte = cte(query, sources) + {table, name, _model} = elem(sources, 0) + + delete = "DELETE #{name}" + from = " FROM #{table} AS #{name}" + join = join(query, sources) + where = where(query, sources) + lock = lock(query, sources) + + [cte, delete, returning(query, 0, "DELETED"), from, join, where | lock] + end + + @impl true + def insert(prefix, table, header, rows, on_conflict, returning, placeholders) do + counter_offset = length(placeholders) + 1 + [] = on_conflict(on_conflict, header) + returning = returning(returning, "INSERTED") + + values = + if header == [] do + [returning, " DEFAULT VALUES"] + else + [ + ?\s, + ?(, + quote_names(header), + ?), + returning | + insert_all(rows, counter_offset) + ] + end + + ["INSERT INTO ", quote_table(prefix, table), values] + end + + defp on_conflict({:raise, _, []}, _header) do + [] + end + + defp on_conflict({_, _, _}, _header) do + error!(nil, "Tds adapter supports only on_conflict: :raise") + end + + defp insert_all(%Ecto.Query{} = query, _counter) do + [?\s, all(query)] + end + defp insert_all(rows, counter) do + sql = + intersperse_reduce(rows, ",", counter, fn row, counter -> + {row, counter} = insert_each(row, counter) + {[?(, row, ?)], counter} + end) + |> elem(0) + + [" VALUES " | sql] + end + + defp insert_each(values, counter) do + intersperse_reduce(values, ", ", counter, fn + nil, counter -> + {"DEFAULT", counter} + + {%Query{} = query, params_counter}, counter -> + {[?(, all(query), ?)], counter + params_counter} + + {:placeholder, placeholder_index}, counter -> + {[?@ | placeholder_index], counter} + + _, counter -> + {[?@ | Integer.to_string(counter)], counter + 1} + end) + end + + @impl true + def update(prefix, table, fields, filters, returning) do + {fields, count} = + intersperse_reduce(fields, ", ", 1, fn field, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + end) + + {filters, _count} = + intersperse_reduce(filters, " AND ", count, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc + 1} + + {field, _value}, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + + field, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + end) + + [ + "UPDATE ", + quote_table(prefix, table), + " SET ", + fields, + returning(returning, "INSERTED"), + " WHERE " | filters + ] + end + + @impl true + def delete(prefix, table, filters, returning) do + {filters, _} = + intersperse_reduce(filters, " AND ", 1, fn + {field, nil}, acc -> + {[quote_name(field), " IS NULL"], acc + 1} + + {field, _value}, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + + field, acc -> + {[quote_name(field), " = @", Integer.to_string(acc)], acc + 1} + end) + + [ + "DELETE FROM ", + quote_table(prefix, table), + returning(returning, "DELETED"), + " WHERE " | filters + ] + end + + @impl true + def explain_query(conn, query, params, opts) do + params = prepare_params(params) + + case Tds.query_multi(conn, build_explain_query(query), params, opts) do + {:ok, [_, %Tds.Result{} = result, _]} -> + {:ok, SQL.format_table(result)} + + error -> + error + end + end + + def build_explain_query(query) do + [ + "SET STATISTICS XML ON; ", + "SET STATISTICS PROFILE ON; ", + query, + "; ", + "SET STATISTICS XML OFF; ", + "SET STATISTICS PROFILE OFF;" + ] + |> IO.iodata_to_binary() + end + + ## Query generation + + binary_ops = [ + ==: " = ", + !=: " <> ", + <=: " <= ", + >=: " >= ", + <: " < ", + >: " > ", + +: " + ", + -: " - ", + *: " * ", + /: " / ", + and: " AND ", + or: " OR ", + ilike: " LIKE ", + like: " LIKE " + ] + + @binary_ops Keyword.keys(binary_ops) + + Enum.map(binary_ops, fn {op, str} -> + defp handle_call(unquote(op), 2), do: {:binary_op, unquote(str)} + end) + + defp handle_call(fun, _arity), do: {:fun, Atom.to_string(fun)} + + defp select(%{select: %{fields: fields}, distinct: distinct} = query, sources) do + [ + "SELECT ", + distinct(distinct, sources, query), + limit(query, sources), + select(fields, sources, query) + ] + end + + defp distinct(nil, _sources, _query), do: [] + defp distinct(%QueryExpr{expr: true}, _sources, _query), do: "DISTINCT " + defp distinct(%QueryExpr{expr: false}, _sources, _query), do: [] + + defp distinct(%QueryExpr{expr: exprs}, _sources, query) when is_list(exprs) do + error!( + query, + "DISTINCT with multiple columns is not supported by MsSQL. " <> + "Please use distinct(true) if you need distinct resultset" + ) + end + + defp select([], _sources, _query) do + "CAST(1 as bit)" + end + + defp select(fields, sources, query) do + intersperse_map(fields, ", ", fn + {:&, _, [idx]} -> + case elem(sources, idx) do + {source, _, nil} -> + error!( + query, + "Tds adapter does not support selecting all fields from #{source} without a schema. " <> + "Please specify a schema or specify exactly which fields you want in projection" + ) + + {_, source, _} -> + source + end + + {key, value} -> + [select_expr(value, sources, query), " AS ", quote_name(key)] + + value -> + select_expr(value, sources, query) + end) + end + + defp select_expr({:not, _, [expr]}, sources, query) do + [?~, ?(, select_expr(expr, sources, query), ?)] + end + + defp select_expr(value, sources, query), do: expr(value, sources, query) + + defp from(%{from: %{source: source, hints: hints}} = query, sources) do + {from, name} = get_source(query, sources, 0, source) + + [" FROM ", from, " AS ", name, hints(hints)] + end + + defp cte(%{with_ctes: %WithExpr{queries: [_ | _] = queries}} = query, sources) do + ctes = intersperse_map(queries, ", ", &cte_expr(&1, sources, query)) + ["WITH ", ctes, " "] + end + + defp cte(%{with_ctes: _}, _), do: [] + + defp cte_expr({name, cte}, sources, query) do + [quote_name(name), cte_header(cte, query), " AS ", cte_query(cte, sources, query)] + end + + defp cte_header(%QueryExpr{}, query) do + error!( + query, + "Tds adapter does not support fragment in CTE" + ) + end + + defp cte_header(%Ecto.Query{select: %{fields: fields}} = query, _) do + [ + " (", + intersperse_map(fields, ",", fn + {key, _} -> + quote_name(key) + + other -> + error!( + query, + "Tds adapter expected field name or alias in CTE header," <> + " instead got #{inspect(other)}" + ) + end), + ?) + ] + end + + defp cte_query(%Ecto.Query{} = query, sources, parent_query) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [?(, all(query, subquery_as_prefix(sources)), ?)] + end + + defp update_fields(%Query{updates: updates} = query, sources) do + for( + %{expr: expr} <- updates, + {op, kw} <- expr, + {key, value} <- kw, + do: update_op(op, key, value, sources, query) + ) + |> Enum.intersperse(", ") + end + + defp update_op(:set, key, value, sources, query) do + {_table, name, _model} = elem(sources, 0) + [name, ?., quote_name(key), " = " | expr(value, sources, query)] + end + + defp update_op(:inc, key, value, sources, query) do + {_table, name, _model} = elem(sources, 0) + quoted = quote_name(key) + + [name, ?., quoted, " = ", name, ?., quoted, " + " | expr(value, sources, query)] + end + + defp update_op(command, _key, _value, _sources, query) do + error!(query, "Unknown update operation #{inspect(command)} for TDS") + end + + defp join(%{joins: []}, _sources), do: [] + + defp join(%{joins: joins} = query, sources) do + [ + ?\s, + intersperse_map(joins, ?\s, fn + %JoinExpr{on: %QueryExpr{expr: expr}, qual: qual, ix: ix, source: source, hints: hints} -> + {join, name} = get_source(query, sources, ix, source) + qual_text = join_qual(qual) + join = join || ["(", expr(source, sources, query) | ")"] + [qual_text, join, " AS ", name, hints(hints) | join_on(qual, expr, sources, query)] + end) + ] + end + + defp join_on(:cross, true, _sources, _query), do: [] + defp join_on(:inner_lateral, true, _sources, _query), do: [] + defp join_on(:left_lateral, true, _sources, _query), do: [] + defp join_on(_qual, true, _sources, _query), do: [" ON 1 = 1"] + defp join_on(_qual, expr, sources, query), do: [" ON " | expr(expr, sources, query)] + + defp join_qual(:inner), do: "INNER JOIN " + defp join_qual(:inner_loop), do: "INNER LOOP JOIN " + defp join_qual(:inner_hash), do: "INNER HASH JOIN " + defp join_qual(:inner_merge), do: "INNER MERGE JOIN " + defp join_qual(:inner_remote), do: "INNER REMOTE JOIN " + defp join_qual(:left), do: "LEFT OUTER JOIN " + defp join_qual(:right), do: "RIGHT OUTER JOIN " + defp join_qual(:full), do: "FULL OUTER JOIN " + defp join_qual(:cross), do: "CROSS JOIN " + defp join_qual(:inner_lateral), do: "CROSS APPLY " + defp join_qual(:left_lateral), do: "OUTER APPLY " + + defp where(%Query{wheres: wheres} = query, sources) do + boolean(" WHERE ", wheres, sources, query) + end + + defp having(%Query{havings: havings} = query, sources) do + boolean(" HAVING ", havings, sources, query) + end + + defp window(%{windows: []}, _sources), do: [] + + defp window(_query, _sources), + do: raise(RuntimeError, "Tds adapter does not support window functions") + + defp group_by(%{group_bys: []}, _sources), do: [] + + defp group_by(%{group_bys: group_bys} = query, sources) do + [ + " GROUP BY " + | intersperse_map(group_bys, ", ", fn %QueryExpr{expr: expr} -> + intersperse_map(expr, ", ", &expr(&1, sources, query)) + end) + ] + end + + defp order_by(%{order_bys: []}, _sources), do: [] + + defp order_by(%{order_bys: order_bys} = query, sources) do + [ + " ORDER BY " + | intersperse_map(order_bys, ", ", fn %QueryExpr{expr: expr} -> + intersperse_map(expr, ", ", &order_by_expr(&1, sources, query)) + end) + ] + end + + defp order_by_expr({dir, expr}, sources, query) do + str = expr(expr, sources, query) + + case dir do + :asc -> str + :desc -> [str | " DESC"] + _ -> error!(query, "#{dir} is not supported in ORDER BY in MSSQL") + end + end + + defp limit(%Query{limit: nil}, _sources), do: [] + + defp limit( + %Query{ + limit: %QueryExpr{ + expr: expr + } + } = query, + sources + ) do + case Map.get(query, :offset) do + nil -> + ["TOP(", expr(expr, sources, query), ") "] + + _ -> + [] + end + end + + defp offset(%{offset: nil}, _sources), do: [] + + defp offset(%Query{offset: _, limit: nil} = query, _sources) do + error!(query, "You must provide a limit while using an offset") + end + + defp offset(%{offset: offset, limit: limit} = query, sources) do + [ + " OFFSET ", + expr(offset.expr, sources, query), + " ROW", + " FETCH NEXT ", + expr(limit.expr, sources, query), + " ROWS ONLY" + ] + end + + defp hints([_ | _] = hints), do: [" WITH (", Enum.intersperse(hints, ", "), ?)] + defp hints([]), do: [] + + defp lock(%{lock: nil}, _sources), do: [] + defp lock(%{lock: binary}, _sources) when is_binary(binary), do: [" OPTION (", binary, ?)] + defp lock(%{lock: expr} = query, sources), do: [" OPTION (", expr(expr, sources, query), ?)] + + defp combinations(%{combinations: combinations}) do + Enum.map(combinations, fn + {:union, query} -> [" UNION (", all(query), ")"] + {:union_all, query} -> [" UNION ALL (", all(query), ")"] + {:except, query} -> [" EXCEPT (", all(query), ")"] + {:except_all, query} -> [" EXCEPT ALL (", all(query), ")"] + {:intersect, query} -> [" INTERSECT (", all(query), ")"] + {:intersect_all, query} -> [" INTERSECT ALL (", all(query), ")"] + end) + end + + defp boolean(_name, [], _sources, _query), do: [] + + defp boolean(name, [%{expr: expr, op: op} | query_exprs], sources, query) do + [ + name + | Enum.reduce(query_exprs, {op, paren_expr(expr, sources, query)}, fn + %BooleanExpr{expr: expr, op: op}, {op, acc} -> + {op, [acc, operator_to_boolean(op), paren_expr(expr, sources, query)]} + + %BooleanExpr{expr: expr, op: op}, {_, acc} -> + {op, [?(, acc, ?), operator_to_boolean(op), paren_expr(expr, sources, query)]} + end) + |> elem(1) + ] + end + + defp operator_to_boolean(:and), do: " AND " + defp operator_to_boolean(:or), do: " OR " + + defp parens_for_select([first_expr | _] = expr) do + if is_binary(first_expr) and String.match?(first_expr, ~r/^\s*select/i) do + [?(, expr, ?)] + else + expr + end + end + + defp paren_expr(true, _sources, _query) do + ["(1 = 1)"] + end + + defp paren_expr(false, _sources, _query) do + ["(1 = 0)"] + end + + defp paren_expr(expr, sources, query) do + [?(, expr(expr, sources, query), ?)] + end + + # :^ - represents parameter ix is index number + defp expr({:^, [], [idx]}, _sources, _query) do + "@#{idx + 1}" + end + + defp expr({{:., _, [{:parent_as, _, [as]}, field]}, _, []}, _sources, query) + when is_atom(field) do + {ix, sources} = get_parent_sources_ix(query, as) + {_, name, _} = elem(sources, ix) + [name, ?. | quote_name(field)] + end + + defp expr({{:., _, [{:&, _, [idx]}, field]}, _, []}, sources, _query) + when is_atom(field) or is_binary(field) do + {_, name, _} = elem(sources, idx) + [name, ?. | quote_name(field)] + end + + defp expr({:&, _, [idx]}, sources, _query) do + {_table, source, _schema} = elem(sources, idx) + source + end + + defp expr({:&, _, [idx, fields, _counter]}, sources, query) do + {_table, name, schema} = elem(sources, idx) + + if is_nil(schema) and is_nil(fields) do + error!( + query, + "Tds adapter requires a schema module when using selector #{inspect(name)} but " <> + "none was given. Please specify schema " <> + "or specify exactly which fields from #{inspect(name)} you what in projection" + ) + end + + Enum.map_join(fields, ", ", &"#{name}.#{quote_name(&1)}") + end + + # example from {:in, [], [1, {:^, [], [0, 0]}]} + defp expr({:in, _, [_left, []]}, _sources, _query) do + "0=1" + end + + # example from(p in Post, where: p.id in [1,2, ^some_id]) + defp expr({:in, _, [left, right]}, sources, query) when is_list(right) do + args = Enum.map_join(right, ",", &expr(&1, sources, query)) + [expr(left, sources, query), " IN (", args | ")"] + end + + # example from(p in Post, where: p.id in []) + defp expr({:in, _, [_, {:^, _, [_, 0]}]}, _sources, _query), do: "0=1" + + # example from(p in Post, where: p.id in ^some_list) + # or from(p in Post, where: p.id in ^[]) + defp expr({:in, _, [left, {:^, _, [idx, length]}]}, sources, query) do + args = list_param_to_args(idx, length) + [expr(left, sources, query), " IN (", args | ")"] + end + + defp expr({:in, _, [left, %Ecto.SubQuery{} = subquery]}, sources, query) do + [expr(left, sources, query), " IN ", expr(subquery, sources, query)] + end + + defp expr({:in, _, [left, right]}, sources, query) do + [expr(left, sources, query), " = ANY(", expr(right, sources, query) | ")"] + end + + defp expr({:is_nil, _, [arg]}, sources, query) do + "#{expr(arg, sources, query)} IS NULL" + end + + defp expr({:not, _, [expr]}, sources, query) do + ["NOT (", expr(expr, sources, query) | ")"] + end + + defp expr({:filter, _, _}, _sources, query) do + error!(query, "Tds adapter does not support aggregate filters") + end + + defp expr(%Ecto.SubQuery{query: query}, sources, parent_query) do + query = put_in(query.aliases[@parent_as], {parent_query, sources}) + [?(, all(query, subquery_as_prefix(sources)), ?)] + end + + defp expr({:fragment, _, [kw]}, _sources, query) when is_list(kw) or tuple_size(kw) == 3 do + error!(query, "Tds adapter does not support keyword or interpolated fragments") + end + + defp expr({:fragment, _, parts}, sources, query) do + Enum.map(parts, fn + {:raw, part} -> part + {:expr, expr} -> expr(expr, sources, query) + end) + |> parens_for_select + end + + defp expr({:literal, _, [literal]}, _sources, _query) do + quote_name(literal) + end + + defp expr({:datetime_add, _, [datetime, count, interval]}, sources, query) do + [ + "DATEADD(", + interval, + ", ", + interval_count(count, sources, query), + ", CAST(", + expr(datetime, sources, query), + " AS datetime2(6)))" + ] + end + + defp expr({:date_add, _, [date, count, interval]}, sources, query) do + [ + "CAST(DATEADD(", + interval, + ", ", + interval_count(count, sources, query), + ", CAST(", + expr(date, sources, query), + " AS datetime2(6))" | ") AS date)" + ] + end + + defp expr({:count, _, []}, _sources, _query), do: "count(*)" + + defp expr({:json_extract_path, _, _}, _sources, query) do + error!( + query, + "Tds adapter does not support json_extract_path expression" <> + ", use fragment with JSON_VALUE/JSON_QUERY" + ) + end + + defp expr({fun, _, args}, sources, query) when is_atom(fun) and is_list(args) do + {modifier, args} = + case args do + [rest, :distinct] -> {"DISTINCT ", [rest]} + _ -> {"", args} + end + + case handle_call(fun, length(args)) do + {:binary_op, op} -> + [left, right] = args + [op_to_binary(left, sources, query), op | op_to_binary(right, sources, query)] + + {:fun, fun} -> + [fun, ?(, modifier, intersperse_map(args, ", ", &expr(&1, sources, query)), ?)] + end + end + + defp expr(list, sources, query) when is_list(list) do + Enum.map_join(list, ", ", &expr(&1, sources, query)) + end + + defp expr({string, :varchar}, _sources, _query) + when is_binary(string) do + "'#{escape_string(string)}'" + end + + defp expr(string, _sources, _query) when is_binary(string) do + "N'#{escape_string(string)}'" + end + + defp expr(%Decimal{exp: exp} = decimal, _sources, _query) do + # this should help gaining precision for decimals values embedded in query + # but this is still not good enough, for instance: + # + # from(p in Post, select: type(2.0 + ^"2", p.cost()))) + # + # Post.cost is :decimal, but we don't know precision and scale since + # such info is only available in migration files. So query compilation + # will yield + # + # SELECT CAST(CAST(2.0 as decimal(38, 1)) + @1 AS decimal) + # FROM [posts] AS p0 + # + # as long as we have CAST(... as DECIMAL) without precision and scale + # value could be truncated + [ + "CAST(", + Decimal.to_string(decimal, :normal), + " as decimal(38, #{abs(exp)})", + ?) + ] + end + + defp expr(%Tagged{value: binary, type: :binary}, _sources, _query) when is_binary(binary) do + hex = Base.encode16(binary, case: :lower) + "0x#{hex}" + end + + defp expr(%Tagged{value: binary, type: :uuid}, _sources, _query) when is_binary(binary) do + case binary do + <<_::64, ?-, _::32, ?-, _::32, ?-, _::32, ?-, _::96>> -> + {:ok, value} = Tds.Ecto.UUID.dump(binary) + value + + any -> + any + end + end + + defp expr(%Tagged{value: other, type: type}, sources, query) + when type in [:varchar, :nvarchar] do + "CAST(#{expr(other, sources, query)} AS #{column_type(type, [])}(max))" + end + + defp expr(%Tagged{value: other, type: :integer}, sources, query) do + "CAST(#{expr(other, sources, query)} AS bigint)" + end + + defp expr(%Tagged{value: other, type: type}, sources, query) do + "CAST(#{expr(other, sources, query)} AS #{column_type(type, [])})" + end + + defp expr(nil, _sources, _query), do: "NULL" + defp expr(true, _sources, _query), do: "1" + defp expr(false, _sources, _query), do: "0" + + defp expr(literal, _sources, _query) when is_binary(literal) do + "'#{escape_string(literal)}'" + end + + defp expr(literal, _sources, _query) when is_integer(literal) do + Integer.to_string(literal) + end + + defp expr(literal, _sources, _query) when is_float(literal) do + Float.to_string(literal) + end + + defp expr(field, _sources, query) do + error!(query, "unsupported MSSQL expressions: `#{inspect(field)}`") + end + + defp op_to_binary({op, _, [_, _]} = expr, sources, query) when op in @binary_ops do + paren_expr(expr, sources, query) + end + + defp op_to_binary({:is_nil, _, [_]} = expr, sources, query) do + paren_expr(expr, sources, query) + end + + defp op_to_binary(expr, sources, query) do + expr(expr, sources, query) + end + + defp interval_count(count, _sources, _query) when is_integer(count) do + Integer.to_string(count) + end + + defp interval_count(count, _sources, _query) when is_float(count) do + :erlang.float_to_binary(count, [:compact, decimals: 16]) + end + + defp interval_count(count, sources, query) do + expr(count, sources, query) + end + + defp returning([], _verb), do: [] + + defp returning(returning, verb) when is_list(returning) do + [" OUTPUT ", intersperse_map(returning, ", ", &[verb, ?., quote_name(&1)])] + end + + defp returning(%{select: nil}, _, _), + do: [] + + defp returning(%{select: %{fields: fields}} = query, idx, verb), + do: [ + " OUTPUT " + | intersperse_map(fields, ", ", fn + {{:., _, [{:&, _, [^idx]}, key]}, _, _} -> [verb, ?., quote_name(key)] + _ -> error!(query, "MSSQL can only return table #{verb} columns") + end) + ] + + defp create_names(%{sources: sources}, as_prefix) do + create_names(sources, 0, tuple_size(sources), as_prefix) |> List.to_tuple() + end + + defp create_names(sources, pos, limit, as_prefix) when pos < limit do + [create_name(sources, pos, as_prefix) | create_names(sources, pos + 1, limit, as_prefix)] + end + + defp create_names(_sources, pos, pos, as_prefix) do + [as_prefix] + end + + defp subquery_as_prefix(sources) do + [?s | :erlang.element(tuple_size(sources), sources)] + end + + defp create_name(sources, pos, as_prefix) do + case elem(sources, pos) do + {:fragment, _, _} -> + {nil, as_prefix ++ [?f | Integer.to_string(pos)], nil} + + {table, model, prefix} -> + name = as_prefix ++ [create_alias(table) | Integer.to_string(pos)] + {quote_table(prefix, table), name, model} + + %Ecto.SubQuery{} -> + {nil, as_prefix ++ [?s | Integer.to_string(pos)], nil} + end + end + + defp create_alias(<>) when first in ?a..?z when first in ?A..?Z do + first + end + + defp create_alias(_) do + ?t + end + + # DDL + alias Ecto.Migration.{Table, Index, Reference, Constraint} + + @creates [:create, :create_if_not_exists] + @drops [:drop, :drop_if_exists] + + @impl true + def execute_ddl({command, %Table{} = table, columns}) when command in @creates do + prefix = table.prefix + + pk_name = + if table.prefix, + do: "#{table.prefix}_#{table.name}", + else: table.name + + table_structure = + table + |> column_definitions(columns) + |> Kernel.++(pk_definitions(columns, ", CONSTRAINT [#{pk_name}_pkey] ")) + |> case do + [] -> [] + list -> [" (", list, ?)] + end + + create_if_not_exists = + if_table_not_exists(command == :create_if_not_exists, table.name, prefix) + + [ + [ + create_if_not_exists, + "CREATE TABLE ", + quote_table(prefix, table.name), + table_structure, + engine_expr(table.engine), + options_expr(table.options), + "; " + ] + ] + end + + def execute_ddl({command, %Table{}, :cascade}) when command in @drops, + do: error!(nil, "MSSQL does not support `CASCADE` in DROP TABLE commands") + + def execute_ddl({command, %Table{} = table, :restrict}) when command in @drops do + prefix = table.prefix + + [ + [ + if_table_exists(command == :drop_if_exists, table.name, prefix), + "DROP TABLE ", + quote_table(prefix, table.name), + "; " + ] + ] + end + + def execute_ddl({:alter, %Table{} = table, changes}) do + statement_prefix = ["ALTER TABLE ", quote_table(table.prefix, table.name), " "] + + pk_name = + if table.prefix, + do: "#{table.prefix}_#{table.name}", + else: table.name + + pkeys = + case pk_definitions(changes, " CONSTRAINT [#{pk_name}_pkey] ") do + [] -> [] + sql -> [statement_prefix, "ADD", sql] + end + + [ + [ + column_changes(statement_prefix, table, changes), + pkeys + ] + ] + end + + def execute_ddl({command, %Index{} = index}) when command in @creates do + prefix = index.prefix + + if index.using do + error!(nil, "MSSQL does not support `using` in indexes") + end + + with_options = + if index.concurrently or index.options != nil do + [ + " WITH", + ?(, + if_do(index.concurrently, "ONLINE=ON"), + if_do(index.concurrently and index.options != nil, ","), + if_do(index.options != nil, index.options), + ?) + ] + else + [] + end + + include = + index.include + |> List.wrap() + |> intersperse_map(", ", &index_expr/1) + + [ + [ + if_index_not_exists( + command == :create_if_not_exists, + index.name, + unquoted_name(prefix, index.table) + ), + "CREATE", + if_do(index.unique, " UNIQUE"), + " INDEX ", + quote_name(index.name), + " ON ", + quote_table(prefix, index.table), + " (", + intersperse_map(index.columns, ", ", &index_expr/1), + ?), + if_do(include != [], [" INCLUDE ", ?(, include, ?)]), + if_do(index.where, [" WHERE (", index.where, ?)]), + with_options, + ?; + ] + ] + end + + def execute_ddl({:create, %Constraint{exclude: exclude}}) when exclude != nil do + msg = + "`:exclude` is not supported Tds adapter check constraint parameter, instead " <> + "set `:check` attribute with negated expression." + + error!(nil, msg) + end + + def execute_ddl({:create, %Constraint{validate: false}}) do + error!(nil, "`:validate` is not supported by the Tds adapter") + end + + def execute_ddl({:create, %Constraint{} = constraint}) do + table_name = quote_table(constraint.prefix, constraint.table) + + [ + [ + "ALTER TABLE ", + table_name, + " ADD CONSTRAINT ", + quote_name(constraint.name), + " ", + "CHECK (", + constraint.check, + "); " + ] + ] + end + + def execute_ddl({command, %Index{}, :cascade}) when command in @drops, + do: error!(nil, "MSSQL does not support `CASCADE` in DROP INDEX commands") + + def execute_ddl({command, %Index{} = index, :restrict}) when command in @drops do + prefix = index.prefix + + [ + [ + if_index_exists( + command == :drop_if_exists, + index.name, + unquoted_name(prefix, index.table) + ), + "DROP INDEX ", + quote_name(index.name), + " ON ", + quote_table(prefix, index.table), + if_do(index.concurrently, " LOCK=NONE"), + "; " + ] + ] + end + + def execute_ddl({command, %Constraint{}, :cascade}) when command in @drops, + do: error!(nil, "MSSQL does not support `CASCADE` in DROP CONSTRAINT commands") + + def execute_ddl({command, %Constraint{} = constraint, _}) when command in @drops do + table_name = quote_table(constraint.prefix, constraint.table) + + [ + [ + if_check_constraint_exists( + command == :drop_if_exists, + constraint.name, + constraint.prefix + ), + "ALTER TABLE ", + table_name, + " DROP CONSTRAINT ", + quote_name(constraint.name), + "; " + ] + ] + end + + def execute_ddl({:rename, %Table{} = current_table, %Table{} = new_table}) do + [ + [ + "EXEC sp_rename '", + unquoted_name(current_table.prefix, current_table.name), + "', '", + unquoted_name(new_table.prefix, new_table.name), + "'" + ] + ] + end + + def execute_ddl({:rename, table, current_column, new_column}) do + [ + [ + "EXEC sp_rename '", + unquoted_name(table.prefix, table.name, current_column), + "', '", + unquoted_name(new_column), + "', 'COLUMN'" + ] + ] + end + + def execute_ddl(string) when is_binary(string), do: [string] + + def execute_ddl(keyword) when is_list(keyword), + do: error!(nil, "Tds adapter does not support keyword lists in execute") + + @impl true + def ddl_logs(_), do: [] + + @impl true + def table_exists_query(table) do + {"SELECT 1 FROM sys.tables WHERE [name] = @1", [table]} + end + + defp pk_definitions(columns, prefix) do + pks = + for {_, name, _, opts} <- columns, + opts[:primary_key], + do: name + + case pks do + [] -> + [] + + _ -> + [prefix, "PRIMARY KEY CLUSTERED (", quote_names(pks), ?)] + end + end + + defp column_definitions(table, columns) do + intersperse_map(columns, ", ", &column_definition(table, &1)) + end + + defp column_definition(table, {:add, name, %Reference{} = ref, opts}) do + [ + quote_name(name), + " ", + reference_column_type(ref.type, opts), + column_options(table, name, opts), + reference_expr(ref, table, name) + ] + end + + defp column_definition(table, {:add, name, type, opts}) do + [quote_name(name), " ", column_type(type, opts), column_options(table, name, opts)] + end + + defp column_changes(statement, table, columns) do + for column <- columns do + column_change(statement, table, column) + end + end + + defp column_change(_statement_prefix, _table, {_command, _name, %Reference{validate: false}, _opts}) do + error!(nil, "validate: false on references is not supported in Tds") + end + + defp column_change(statement_prefix, table, {:add, name, %Reference{} = ref, opts}) do + [ + [ + statement_prefix, + "ADD ", + quote_name(name), + " ", + reference_column_type(ref.type, opts), + column_options(table, name, opts), + "; " + ], + [statement_prefix, "ADD", constraint_expr(ref, table, name), "; "] + ] + end + + defp column_change(statement_prefix, table, {:add, name, type, opts}) do + [ + [ + statement_prefix, + "ADD ", + quote_name(name), + " ", + column_type(type, opts), + column_options(table, name, opts), + "; " + ] + ] + end + + defp column_change( + statement_prefix, + %{name: table_name, prefix: prefix} = table, + {:add_if_not_exists, column_name, type, opts} + ) do + [ + [ + if_column_not_exists(prefix, table_name, column_name), + statement_prefix, + "ADD ", + quote_name(column_name), + " ", + column_type(type, opts), + column_options(table, column_name, opts), + "; " + ] + ] + end + + defp column_change(statement_prefix, table, {:modify, name, %Reference{} = ref, opts}) do + [ + drop_constraint_from_expr(opts[:from], table, name, statement_prefix), + maybe_drop_default_expr(statement_prefix, table, name, opts), + [ + statement_prefix, + "ALTER COLUMN ", + quote_name(name), + " ", + reference_column_type(ref.type, opts), + column_options(table, name, opts), + "; " + ], + [statement_prefix, "ADD", constraint_expr(ref, table, name), "; "], + [column_default_value(statement_prefix, table, name, opts)] + ] + end + + defp column_change(statement_prefix, table, {:modify, name, type, opts}) do + [ + drop_constraint_from_expr(opts[:from], table, name, statement_prefix), + maybe_drop_default_expr(statement_prefix, table, name, opts), + [ + statement_prefix, + "ALTER COLUMN ", + quote_name(name), + " ", + column_type(type, opts), + null_expr(Keyword.get(opts, :null)), + "; " + ], + [column_default_value(statement_prefix, table, name, opts)] + ] + end + + defp column_change(statement_prefix, _table, {:remove, name}) do + [statement_prefix, "DROP COLUMN ", quote_name(name), "; "] + end + + defp column_change( + statement_prefix, + %{name: table, prefix: prefix}, + {:remove_if_exists, column_name, _} + ) do + [ + [ + if_column_exists(prefix, table, column_name), + statement_prefix, + "DROP COLUMN ", + quote_name(column_name), + "; " + ] + ] + end + + defp column_options(table, name, opts) do + default = Keyword.fetch(opts, :default) + null = Keyword.get(opts, :null) + [null_expr(null), default_expr(table, name, default)] + end + + defp column_default_value(statement_prefix, table, name, opts) do + default_expression = default_expr(table, name, Keyword.fetch(opts, :default)) + + case default_expression do + [] -> [] + _ -> [statement_prefix, "ADD", default_expression, " FOR ", quote_name(name), "; "] + end + end + + defp null_expr(false), do: [" NOT NULL"] + defp null_expr(true), do: [" NULL"] + defp null_expr(_), do: [] + + defp default_expr(_table, _name, {:ok, nil}), + do: [] + + defp default_expr(table, name, {:ok, literal}) when is_binary(literal), + do: [ + " CONSTRAINT ", + constraint_name("DF", table, name), + " DEFAULT (N'", + escape_string(literal), + "')" + ] + + defp default_expr(table, name, {:ok, true}), + do: [" CONSTRAINT ", constraint_name("DF", table, name), " DEFAULT (1)"] + + defp default_expr(table, name, {:ok, false}), + do: [" CONSTRAINT ", constraint_name("DF", table, name), " DEFAULT (0)"] + + defp default_expr(table, name, {:ok, literal}) when is_number(literal), + do: [ + " CONSTRAINT ", + constraint_name("DF", table, name), + " DEFAULT (", + to_string(literal), + ")" + ] + + defp default_expr(table, name, {:ok, {:fragment, expr}}), + do: [" CONSTRAINT ", constraint_name("DF", table, name), " DEFAULT (", expr, ")"] + + defp default_expr(_table, _name, :error), do: [] + + defp drop_constraint_from_expr(%Reference{} = ref, table, name, stm_prefix) do + [stm_prefix, "DROP CONSTRAINT ", reference_name(ref, table, name), "; "] + end + + defp drop_constraint_from_expr(_, _, _, _), + do: [] + + defp maybe_drop_default_expr(statement_prefix, table, name, opts) do + if Keyword.has_key?(opts, :default) do + constraint_name = constraint_name("DF", table, name) + if_exists_drop_constraint(constraint_name, statement_prefix) + else + [] + end + end + + defp constraint_name(type, table, name), + do: quote_name("#{type}_#{table.prefix}_#{table.name}_#{name}") + + defp index_expr(literal) when is_binary(literal), do: literal + defp index_expr(literal), do: quote_name(literal) + + defp engine_expr(_storage_engine), do: [""] + + defp options_expr(nil), do: [] + + defp options_expr(keyword) when is_list(keyword), + do: error!(nil, "Tds adapter does not support keyword lists in :options") + + defp options_expr(options), do: [" ", to_string(options)] + + defp column_type(type, opts) do + size = Keyword.get(opts, :size) + precision = Keyword.get(opts, :precision) + scale = Keyword.get(opts, :scale) + ecto_to_db(type, size, precision, scale) + end + + defp constraint_expr(%Reference{} = ref, table, name) do + {current_columns, reference_columns} = Enum.unzip([{name, ref.column} | ref.with]) + + if ref.match do + error!(nil, ":match is not supported in references for tds") + end + + [ + " CONSTRAINT ", + reference_name(ref, table, name), + " FOREIGN KEY (#{quote_names(current_columns)})", + " REFERENCES ", + quote_table(ref.prefix || table.prefix, ref.table), + "(#{quote_names(reference_columns)})", + reference_on_delete(ref.on_delete), + reference_on_update(ref.on_update) + ] + end + + defp reference_expr(%Reference{} = ref, table, name) do + [",", constraint_expr(ref, table, name)] + end + + defp reference_name(%Reference{name: nil}, table, column), + do: quote_name("#{table.name}_#{column}_fkey") + + defp reference_name(%Reference{name: name}, _table, _column), do: quote_name(name) + + defp reference_column_type(:id, _opts), do: "BIGINT" + defp reference_column_type(:serial, _opts), do: "INT" + defp reference_column_type(:bigserial, _opts), do: "BIGINT" + defp reference_column_type(type, opts), do: column_type(type, opts) + + defp reference_on_delete(:nilify_all), do: " ON DELETE SET NULL" + defp reference_on_delete(:delete_all), do: " ON DELETE CASCADE" + defp reference_on_delete(:nothing), do: " ON DELETE NO ACTION" + defp reference_on_delete(_), do: [] + + defp reference_on_update(:nilify_all), do: " ON UPDATE SET NULL" + defp reference_on_update(:update_all), do: " ON UPDATE CASCADE" + defp reference_on_update(:nothing), do: " ON UPDATE NO ACTION" + defp reference_on_update(_), do: [] + + ## Helpers + + defp get_source(query, sources, ix, source) do + {expr, name, _schema} = elem(sources, ix) + {expr || expr(source, sources, query), name} + end + + defp get_parent_sources_ix(query, as) do + case query.aliases[@parent_as] do + {%{aliases: %{^as => ix}}, sources} -> {ix, sources} + {%{} = parent, _sources} -> get_parent_sources_ix(parent, as) + end + end + + defp quote_name(name) when is_atom(name) do + quote_name(Atom.to_string(name)) + end + + defp quote_name(name) when is_binary(name) do + if String.contains?(name, ["[", "]"]) do + error!(nil, "bad literal/field/table name #{inspect(name)} ('[' and ']' are not permitted)") + end + + "[#{name}]" + end + + defp quote_names(names), do: intersperse_map(names, ?,, "e_name/1) + + defp quote_table(nil, name), do: quote_table(name) + + defp quote_table({server, db, schema}, name), + do: [quote_table(server), ".", quote_table(db), ".", quote_table(schema), ".", quote_table(name)] + + defp quote_table({db, schema}, name), + do: [quote_table(db), ".", quote_table(schema), ".", quote_table(name)] + + defp quote_table(prefix, name), + do: [quote_table(prefix), ".", quote_table(name)] + + defp quote_table(name) when is_atom(name), do: quote_table(Atom.to_string(name)) + + defp quote_table(name) do + if String.contains?(name, "[") or String.contains?(name, "]") do + error!(nil, "bad table name #{inspect(name)} '[' and ']' are not permitted") + end + + "[#{name}]" + end + + defp unquoted_name(prefix, name, column_name), + do: unquoted_name(unquoted_name(prefix, name), column_name) + + defp unquoted_name(nil, name), do: unquoted_name(name) + + defp unquoted_name(prefix, name) do + prefix = if is_atom(prefix), do: Atom.to_string(prefix), else: prefix + name = if is_atom(name), do: Atom.to_string(name), else: name + + [prefix, ".", name] + end + + defp unquoted_name(name) when is_atom(name), do: unquoted_name(Atom.to_string(name)) + + defp unquoted_name(name) do + if String.contains?(name, ["[", "]"]) do + error!(nil, "bad table name #{inspect(name)} '[' and ']' are not permitted") + end + + name + end + + defp intersperse_map([], _separator, _mapper), do: [] + defp intersperse_map([elem], _separator, mapper), do: mapper.(elem) + + defp intersperse_map([elem | rest], separator, mapper) do + [mapper.(elem), separator | intersperse_map(rest, separator, mapper)] + end + + defp intersperse_reduce(list, separator, user_acc, reducer, acc \\ []) + + defp intersperse_reduce([], _separator, user_acc, _reducer, acc), + do: {acc, user_acc} + + defp intersperse_reduce([elem], _separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + {[acc | elem], user_acc} + end + + defp intersperse_reduce([elem | rest], separator, user_acc, reducer, acc) do + {elem, user_acc} = reducer.(elem, user_acc) + intersperse_reduce(rest, separator, user_acc, reducer, [acc, elem, separator]) + end + + defp if_do(condition, value) do + if condition, do: value, else: [] + end + + defp escape_string(value) when is_binary(value) do + value |> :binary.replace("'", "''", [:global]) + end + + defp ecto_to_db(type, size, precision, scale, query \\ nil) + + defp ecto_to_db({:array, _}, _, _, _, query), + do: error!(query, "Array type is not supported by TDS") + + defp ecto_to_db(:id, _, _, _, _), do: "bigint" + defp ecto_to_db(:serial, _, _, _, _), do: "int IDENTITY(1,1)" + defp ecto_to_db(:bigserial, _, _, _, _), do: "bigint IDENTITY(1,1)" + defp ecto_to_db(:binary_id, _, _, _, _), do: "uniqueidentifier" + defp ecto_to_db(:boolean, _, _, _, _), do: "bit" + defp ecto_to_db(:string, nil, _, _, _), do: "nvarchar(255)" + defp ecto_to_db(:string, :max, _, _, _), do: "nvarchar(max)" + defp ecto_to_db(:string, s, _, _, _) when s in 1..4_000, do: "nvarchar(#{s})" + defp ecto_to_db(:float, nil, _, _, _), do: "float" + defp ecto_to_db(:float, s, _, _, _) when s in 1..53, do: "float(#{s})" + defp ecto_to_db(:binary, nil, _, _, _), do: "varbinary(max)" + defp ecto_to_db(:binary, s, _, _, _) when s in 1..8_000, do: "varbinary(#{s})" + defp ecto_to_db(:uuid, _, _, _, _), do: "uniqueidentifier" + defp ecto_to_db(:map, nil, _, _, _), do: "nvarchar(max)" + defp ecto_to_db(:map, s, _, _, _) when s in 0..4_000, do: "nvarchar(#{s})" + defp ecto_to_db({:map, _}, nil, _, _, _), do: "nvarchar(max)" + defp ecto_to_db({:map, _}, s, _, _, _) when s in 1..4_000, do: "nvarchar(#{s})" + defp ecto_to_db(:time, _, _, _, _), do: "time(0)" + defp ecto_to_db(:time_usec, _, p, _, _) when p in 0..7, do: "time(#{p})" + defp ecto_to_db(:time_usec, _, _, _, _), do: "time(6)" + defp ecto_to_db(:utc_datetime, _, _, _, _), do: "datetime" + defp ecto_to_db(:utc_datetime_usec, _, p, _, _) when p in 0..7, do: "datetime2(#{p})" + defp ecto_to_db(:utc_datetime_usec, _, _, _, _), do: "datetime2(6)" + defp ecto_to_db(:naive_datetime, _, _, _, _), do: "datetime" + defp ecto_to_db(:naive_datetime_usec, _, p, _, _) when p in 0..7, do: "datetime2(#{p})" + defp ecto_to_db(:naive_datetime_usec, _, _, _, _), do: "datetime2(6)" + + defp ecto_to_db(other, size, _, _, _) when is_integer(size) do + "#{Atom.to_string(other)}(#{size})" + end + + defp ecto_to_db(other, _, precision, scale, _) when is_integer(precision) do + "#{Atom.to_string(other)}(#{precision},#{scale || 0})" + end + + defp ecto_to_db(atom, nil, nil, nil, _) when is_atom(atom) do + Atom.to_string(atom) + end + + defp ecto_to_db(type, _, _, _, _) do + raise ArgumentError, + "unsupported type `#{inspect(type)}`. The type can either be an atom, a string " <> + "or a tuple of the form `{:map, t}` where `t` itself follows the same conditions." + end + + defp error!(nil, message) do + raise ArgumentError, message + end + + defp error!(query, message) do + raise Ecto.QueryError, query: query, message: message + end + + defp if_table_not_exists(condition, name, prefix) do + if_do(condition, [ + "IF NOT EXISTS (SELECT * FROM [INFORMATION_SCHEMA].[TABLES] ", + "WHERE ", + "[TABLE_NAME] = ", + ?', + "#{name}", + ?', + if_do(prefix != nil, [ + " AND [TABLE_SCHEMA] = ", + ?', + "#{prefix}", + ?' + ]), + ") " + ]) + end + + defp if_table_exists(condition, name, prefix) do + if_do(condition, [ + "IF EXISTS (SELECT * FROM [INFORMATION_SCHEMA].[TABLES] ", + "WHERE ", + "[TABLE_NAME] = ", + ?', + "#{name}", + ?', + if_do(prefix != nil, [ + " AND [TABLE_SCHEMA] = ", + ?', + "#{prefix}", + ?' + ]), + ") " + ]) + end + + defp if_column_exists(prefix, table, column_name) do + [ + "IF EXISTS (SELECT 1 FROM [sys].[columns] ", + "WHERE [name] = N'#{column_name}' AND ", + "[object_id] = OBJECT_ID(N'", + if_do(prefix != nil, ["#{prefix}", ?.]), + "#{table}", + "')) " + ] + end + + defp if_column_not_exists(prefix, table, column_name) do + [ + "IF NOT EXISTS (SELECT 1 FROM [sys].[columns] ", + "WHERE [name] = N'#{column_name}' AND ", + "[object_id] = OBJECT_ID(N'", + if_do(prefix != nil, ["#{prefix}", ?.]), + "#{table}", + "')) " + ] + end + + defp list_param_to_args(idx, length) do + Enum.map_join(1..length, ",", &"@#{idx + &1}") + end + + defp as_string(atom) when is_atom(atom), do: Atom.to_string(atom) + defp as_string(str), do: str + + defp if_index_exists(condition, index_name, table_name) do + if_do(condition, [ + "IF EXISTS (SELECT name FROM sys.indexes WHERE name = N'", + as_string(index_name), + "' AND object_id = OBJECT_ID(N'", + as_string(table_name), + "')) " + ]) + end + + defp if_index_not_exists(condition, index_name, table_name) do + if_do(condition, [ + "IF NOT EXISTS (SELECT name FROM sys.indexes WHERE name = N'", + as_string(index_name), + "' AND object_id = OBJECT_ID(N'", + as_string(table_name), + "')) " + ]) + end + + defp if_check_constraint_exists(condition, name, prefix) do + if_do(condition, [ + "IF NOT EXISTS (SELECT * ", + "FROM [INFORMATION_SCHEMA].[CHECK_CONSTRAINTS] ", + "WHERE [CONSTRAINT_NAME] = N'#{name}'", + if_do(prefix != nil, [ + " AND [CONSTRAINT_SCHEMA] = N'#{prefix}'" + ]), + ") " + ]) + end + + # types + # "U" - table, + # "C", "PK", "UQ", "F ", "D " - constraints + defp if_object_exists(name, type, statement) do + [ + "IF (OBJECT_ID(N'", + name, + "', '", + type, + "') IS NOT NULL) ", + statement + ] + end + + defp if_exists_drop_constraint(name, statement_prefix) do + [ + if_object_exists( + name, + "D", + "#{statement_prefix}DROP CONSTRAINT #{name}; " + ) + ] + end + end +end diff --git a/deps/ecto_sql/lib/ecto/adapters/tds/types.ex b/deps/ecto_sql/lib/ecto/adapters/tds/types.ex new file mode 100644 index 0000000..937b7ef --- /dev/null +++ b/deps/ecto_sql/lib/ecto/adapters/tds/types.ex @@ -0,0 +1,305 @@ +if Code.ensure_loaded?(Tds) do + defmodule Tds.Ecto.UUID do + @moduledoc """ + An TDS adapter type for UUIDs strings. + + If you are using Tds adapter and UUIDs in your project, instead of `Ecto.UUID` + you should use Tds.Ecto.UUID to generate correct bytes that should be stored + in database. + """ + + use Ecto.Type + + @typedoc """ + A hex-encoded UUID string. + """ + @type t :: <<_::288>> + + @typedoc """ + A raw binary representation of a UUID. + """ + @type raw :: <<_::128>> + + @doc false + @impl true + def type(), do: :uuid + + @doc """ + Casts to UUID. + """ + @impl true + @spec cast(t | raw | any) :: {:ok, t} | :error + def cast(<< a1, a2, a3, a4, a5, a6, a7, a8, ?-, + b1, b2, b3, b4, ?-, + c1, c2, c3, c4, ?-, + d1, d2, d3, d4, ?-, + e1, e2, e3, e4, e5, e6, e7, e8, e9, e10, e11, e12 >>) do + << c(a1), c(a2), c(a3), c(a4), c(a5), c(a6), c(a7), c(a8), ?-, + c(b1), c(b2), c(b3), c(b4), ?-, + c(c1), c(c2), c(c3), c(c4), ?-, + c(d1), c(d2), c(d3), c(d4), ?-, + c(e1), c(e2), c(e3), c(e4), c(e5), c(e6), c(e7), c(e8), c(e9), c(e10), c(e11), c(e12) >> + catch + :error -> :error + else + casted -> {:ok, casted} + end + + def cast(<>), do: encode(bin) + def cast(_), do: :error + + @doc """ + Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. + """ + def cast!(value) do + case cast(value) do + {:ok, uuid} -> uuid + :error -> raise Ecto.CastError, type: __MODULE__, value: value + end + end + + @compile {:inline, c: 1} + + defp c(?0), do: ?0 + defp c(?1), do: ?1 + defp c(?2), do: ?2 + defp c(?3), do: ?3 + defp c(?4), do: ?4 + defp c(?5), do: ?5 + defp c(?6), do: ?6 + defp c(?7), do: ?7 + defp c(?8), do: ?8 + defp c(?9), do: ?9 + defp c(?A), do: ?a + defp c(?B), do: ?b + defp c(?C), do: ?c + defp c(?D), do: ?d + defp c(?E), do: ?e + defp c(?F), do: ?f + defp c(?a), do: ?a + defp c(?b), do: ?b + defp c(?c), do: ?c + defp c(?d), do: ?d + defp c(?e), do: ?e + defp c(?f), do: ?f + defp c(_), do: throw(:error) + + @doc """ + Converts a string representing a UUID into a binary. + """ + @impl true + @spec dump(t | any) :: {:ok, raw} | :error + def dump(<>) do + try do + << d(a7)::4, d(a8)::4, d(a5)::4, d(a6)::4, + d(a3)::4, d(a4)::4, d(a1)::4, d(a2)::4, + d(b3)::4, d(b4)::4, d(b1)::4, d(b2)::4, + d(c3)::4, d(c4)::4, d(c1)::4, d(c2)::4, + d(d1)::4, d(d2)::4, d(d3)::4, d(d4)::4, + d(e1)::4, d(e2)::4, d(e3)::4, d(e4)::4, + d(e5)::4, d(e6)::4, d(e7)::4, d(e8)::4, + d(e9)::4, d(e10)::4, d(e11)::4, d(e12)::4 >> + catch + :error -> :error + else + binary -> + {:ok, binary} + end + end + + def dump(_), do: :error + + def dump!(value) do + case dump(value) do + {:ok, binary} -> binary + :error -> raise ArgumentError, "Invalid uuid value #{inspect(value)}" + end + end + + @compile {:inline, d: 1} + + defp d(?0), do: 0 + defp d(?1), do: 1 + defp d(?2), do: 2 + defp d(?3), do: 3 + defp d(?4), do: 4 + defp d(?5), do: 5 + defp d(?6), do: 6 + defp d(?7), do: 7 + defp d(?8), do: 8 + defp d(?9), do: 9 + defp d(?A), do: 10 + defp d(?B), do: 11 + defp d(?C), do: 12 + defp d(?D), do: 13 + defp d(?E), do: 14 + defp d(?F), do: 15 + defp d(?a), do: 10 + defp d(?b), do: 11 + defp d(?c), do: 12 + defp d(?d), do: 13 + defp d(?e), do: 14 + defp d(?f), do: 15 + defp d(_), do: throw(:error) + + @doc """ + Converts a binary UUID into a string. + """ + @impl true + @spec load(raw | any) :: {:ok, t} | :error + def load(<<_::128>> = uuid) do + encode(uuid) + end + + def load(<<_::64, ?-, _::32, ?-, _::32, ?-, _::32, ?-, _::96>> = string) do + raise ArgumentError, "trying to load string UUID as Tds.Ecto.UUID: #{inspect string}. " <> + "Maybe you wanted to declare :uuid as your database field?" + end + + def load(_), do: :error + + @doc """ + Generates a version 4 (random) UUID. + """ + @spec generate() :: t + def generate do + {:ok, uuid} = encode(bingenerate()) + uuid + end + + @doc """ + Generates a version 4 (random) UUID in the binary format. + """ + @spec bingenerate() :: raw + def bingenerate do + <> = :crypto.strong_rand_bytes(15) + <> + end + + # Callback invoked by autogenerate fields. + @impl true + def autogenerate, do: generate() + + defp encode(<>) do + << e(a7), e(a8), e(a5), e(a6), e(a3), e(a4), e(a1), e(a2), ?-, + e(b3), e(b4), e(b1), e(b2), ?-, + e(c3), e(c4), e(c1), e(c2), ?-, + e(d1), e(d2), e(d3), e(d4), ?-, + e(e1), e(e2), e(e3), e(e4), e(e5), e(e6), e(e7), e(e8), e(e9), e(e10), e(e11), e(e12) >> + catch + :error -> :error + else + encoded -> {:ok, encoded} + end + + @compile {:inline, e: 1} + + defp e(0), do: ?0 + defp e(1), do: ?1 + defp e(2), do: ?2 + defp e(3), do: ?3 + defp e(4), do: ?4 + defp e(5), do: ?5 + defp e(6), do: ?6 + defp e(7), do: ?7 + defp e(8), do: ?8 + defp e(9), do: ?9 + defp e(10), do: ?a + defp e(11), do: ?b + defp e(12), do: ?c + defp e(13), do: ?d + defp e(14), do: ?e + defp e(15), do: ?f + end + + defmodule Tds.Ecto.VarChar do + @moduledoc """ + An Tds adapter Ecto Type that wraps erlang string into tuple so TDS driver + can understand if erlang string should be encoded as NVarChar or Varchar. + + Due some limitations in Ecto and Tds driver, it is not possible to + support collations other than the one that is set on connection during login. + Please be aware of this limitation if you plan to store varchar values in + your database using Ecto since you will probably lose some codepoints in + the value during encoding. Instead use `tds_encoding` library and first + encode value and then annotate it as `:binary` by calling `Ecto.Query.API.type/2` + in your query. This way all codepoints will be properly preserved during + insert to database. + """ + use Ecto.Type + + @typedoc """ + A erlang string + """ + @type t :: String.t + + @typedoc """ + A value annotated as varchar. + """ + @type varchar :: {String.t, :varchar} + + @doc false + @impl true + def type(), do: :varchar + + @doc """ + Casts to string. + """ + @spec cast(t | varchar | any) :: {:ok, t} | :error + @impl true + def cast({value, :varchar}) do + # In case we get already dumped value + {:ok, value} + end + + def cast(value) when is_binary(value) do + {:ok, value} + end + + def cast(_), do: :error + + @doc """ + Same as `cast/1` but raises `Ecto.CastError` on invalid arguments. + """ + @spec cast!(t | varchar | any) :: t + def cast!(value) do + case cast(value) do + {:ok, uuid} -> uuid + :error -> raise Ecto.CastError, type: __MODULE__, value: value + end + end + + @doc """ + Loads the DB type as is. + """ + @impl true + @spec load(t | any) :: {:ok, t} | :error + def load(value) do + {:ok, value} + end + + @doc """ + Converts a string representing a VarChar into a tuple `{value, :varchar}`. + + Returns `:error` if value is not binary. + """ + @impl true + @spec dump(t | any) :: {:ok, varchar} | :error + def dump(value) when is_binary(value) do + {:ok, {value, :varchar}} + end + + def dump(_), do: :error + end +end diff --git a/deps/ecto_sql/lib/ecto/migration.ex b/deps/ecto_sql/lib/ecto/migration.ex new file mode 100644 index 0000000..3675ba8 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migration.ex @@ -0,0 +1,1368 @@ +defmodule Ecto.Migration do + @moduledoc """ + Migrations are used to modify your database schema over time. + + This module provides many helpers for migrating the database, + allowing developers to use Elixir to alter their storage in + a way that is database independent. + + Migrations typically provide two operations: `up` and `down`, + allowing us to migrate the database forward or roll it back + in case of errors. + + In order to manage migrations, Ecto creates a table called + `schema_migrations` in the database, which stores all migrations + that have already been executed. You can configure the name of + this table with the `:migration_source` configuration option. + + You can configure a different database for the table that + manages your migrations by setting the `:migration_repo` + configuration option to a different repository. + + Ecto also locks the `schema_migrations` table when running + migrations, guaranteeing two different servers cannot run the same + migration at the same time. + + ## Creating your first migration + + Migrations are defined inside the "priv/REPO/migrations" where REPO + is the last part of the repository name in underscore. For example, + migrations for `MyApp.Repo` would be found in "priv/repo/migrations". + For `MyApp.CustomRepo`, it would be found in "priv/custom_repo/migrations". + + Each file in the migrations directory has the following structure: + + ```text + NUMBER_NAME.exs + ``` + + The NUMBER is a unique number that identifies the migration. It is + usually the timestamp of when the migration was created. The NAME + must also be unique and it quickly identifies what the migration + does. For example, if you need to track the "weather" in your system, + you can start a new file at "priv/repo/migrations/20190417140000_add_weather_table.exs" + that will have the following contents: + + defmodule MyRepo.Migrations.AddWeatherTable do + use Ecto.Migration + + def up do + create table("weather") do + add :city, :string, size: 40 + add :temp_lo, :integer + add :temp_hi, :integer + add :prcp, :float + + timestamps() + end + end + + def down do + drop table("weather") + end + end + + The `up/0` function is responsible to migrate your database forward. + the `down/0` function is executed whenever you want to rollback. + The `down/0` function must always do the opposite of `up/0`. + Inside those functions, we invoke the API defined in this module, + you will find conveniences for managing tables, indexes, columns, + references, as well as running custom SQL commands. + + To run a migration, we generally use Mix tasks. For example, you can + run the migration above by going to the root of your project and + typing: + + $ mix ecto.migrate + + You can also roll it back by calling: + + $ mix ecto.rollback --step 1 + + Note rollback requires us to say how much we want to rollback. + On the other hand, `mix ecto.migrate` will always run all pending + migrations. + + In practice, we don't create migration files by hand either, we + typically use `mix ecto.gen.migration` to generate the file with + the proper timestamp and then we just fill in its contents: + + $ mix ecto.gen.migration add_weather_table + + ## Mix tasks + + As seen above, Ecto provides many Mix tasks to help developers work + with migrations. We summarize them below: + + * `mix ecto.gen.migration` - generates a + migration that the user can fill in with particular commands + * `mix ecto.migrate` - migrates a repository + * `mix ecto.migrations` - shows all migrations and their status + * `mix ecto.rollback` - rolls back a particular migration + + Run `mix help COMMAND` for more information on a particular command. + For a lower level API for running migrations, see `Ecto.Migrator`. + + ## Change + + Having to write both `up/0` and `down/0` functions for every + migration is tedious and error prone. For this reason, Ecto allows + you to defined a `change/0` callback with all of the code you want + to execute when migrating and Ecto will automatically figure out + the `down/0` for you. For example, the migration above can be + written as: + + defmodule MyRepo.Migrations.AddWeatherTable do + use Ecto.Migration + + def change do + create table("weather") do + add :city, :string, size: 40 + add :temp_lo, :integer + add :temp_hi, :integer + add :prcp, :float + + timestamps() + end + end + end + + However, note that not all commands are reversible. Trying to rollback + a non-reversible command will raise an `Ecto.MigrationError`. + + A notable command in this regard is `execute/2`, which is reversible in + `change/0` by accepting a pair of plain SQL strings. The first is run on + forward migrations (`up/0`) and the second when rolling back (`down/0`). + + If `up/0` and `down/0` are implemented in a migration, they take precedence, + and `change/0` isn't invoked. + + ## Field Types + + The Ecto primitive types are mapped to the appropriate database + type by the various database adapters. For example, `:string` is + converted to `:varchar`, `:binary` to `:bytea` or `:blob`, and so on. + + In particular, note that: + + * the `:string` type in migrations by default has a limit of 255 characters. + If you need more or less characters, pass the `:size` option, such + as `add :field, :string, size: 10`. If you don't want to impose a limit, + most databases support a `:text` type or similar + + * the `:binary` type in migrations by default has no size limit. If you want + to impose a limit, pass the `:size` option accordingly. In MySQL, passing + the size option changes the underlying field from "blob" to "varbinary" + + Any other type will be given as is to the database. For example, you + can use `:text`, `:char`, or `:varchar` as types. Types that have spaces + in their names can be wrapped in double quotes, such as `:"int unsigned"`, + `:"time without time zone"`, etc. + + ## Executing and flushing + + Instructions inside of migrations are not executed immediately. Instead + they are performed after the relevant `up`, `change`, or `down` callback + terminates. + + However, in some situations you may want to guarantee that all of the + previous steps have been executed before continuing. This is useful when + you need to apply a set of changes to the table before continuing with the + migration. This can be done with `flush/0`: + + def up do + ... + flush() + ... + end + + However `flush/0` will raise if it would be called from `change` function when doing a rollback. + To avoid that we recommend to use `execute/2` with anonymous functions instead. + For more information and example usage please take a look at `execute/2` function. + + ## Comments + + Migrations where you create or alter a table support specifying table + and column comments. The same can be done when creating constraints + and indexes. Not all databases support this feature. + + def up do + create index("posts", [:name], comment: "Index Comment") + create constraint("products", "price_must_be_positive", check: "price > 0", comment: "Constraint Comment") + create table("weather", prefix: "north_america", comment: "Table Comment") do + add :city, :string, size: 40, comment: "Column Comment" + timestamps() + end + end + + ## Repo configuration + + The following migration configuration options are available for a given repository: + + * `:migration_source` - Version numbers of migrations will be saved in a + table named `schema_migrations` by default. You can configure the name of + the table via: + + config :app, App.Repo, migration_source: "my_migrations" + + * `:migration_primary_key` - By default, Ecto uses the `:id` column with type + `:bigserial`, but you can configure it via: + + config :app, App.Repo, migration_primary_key: [name: :uuid, type: :binary_id] + + config :app, App.Repo, migration_primary_key: false + + * `:migration_foreign_key` - By default, Ecto uses the migration_primary_key type + for foreign keys when references/2 is used, but you can configure it via: + + config :app, App.Repo, migration_foreign_key: [column: :uuid, type: :binary_id] + + * `:migration_timestamps` - By default, Ecto uses the `:naive_datetime` as the type, + `:inserted_at` as the name of the column for storing insertion times, `:updated_at` as + the name of the column for storing last-updated-at times, but you can configure it + via: + + config :app, App.Repo, migration_timestamps: [ + type: :utc_datetime, + inserted_at: :created_at, + updated_at: :changed_at + ] + + * `:migration_lock` - By default, Ecto will lock the migration table. This allows + multiple nodes to attempt to run migrations at the same time but only one will + succeed. You can disable the `migration_lock` by setting it to `false` + + config :app, App.Repo, migration_lock: false + + * `:migration_default_prefix` - Ecto defaults to `nil` for the database prefix for + migrations, but you can configure it via: + + config :app, App.Repo, migration_default_prefix: "my_prefix" + + * `:migration_repo` - The migration repository is where the table managing the + migrations will be stored (`migration_source` defines the table name). It defaults + to the given repository itself but you can configure it via: + + config :app, App.Repo, migration_repo: App.MigrationRepo + + * `:priv` - the priv directory for the repo with the location of important assets, + such as migrations. For a repository named `MyApp.FooRepo`, `:priv` defaults to + "priv/foo_repo" and migrations should be placed at "priv/foo_repo/migrations" + + * `:start_apps_before_migration` - A list of applications to be started before + running migrations. Used by `Ecto.Migrator.with_repo/3` and the migration tasks: + + config :app, App.Repo, start_apps_before_migration: [:ssl, :some_custom_logger] + + ## Prefixes + + Migrations support specifying a table prefix or index prefix which will + target either a schema (if using PostgreSQL) or a different database (if using + MySQL). If no prefix is provided, the default schema or database is used. + + Any reference declared in the table migration refers by default to the table + with the same declared prefix. The prefix is specified in the table options: + + def up do + create table("weather", prefix: "north_america") do + add :city, :string, size: 40 + add :temp_lo, :integer + add :temp_hi, :integer + add :prcp, :float + add :group_id, references(:groups) + + timestamps() + end + + create index("weather", [:city], prefix: "north_america") + end + + Note: if using MySQL with a prefixed table, you must use the same prefix + for the references since cross-database references are not supported. + + When using a prefixed table with either MySQL or PostgreSQL, you must use the + same prefix for the index field to ensure that you index the prefix-qualified + table. + + ## Transaction Callbacks + + If possible, each migration runs inside a transaction. This is true for Postgres, + but not true for MySQL, as the latter does not support DDL transactions. + + In some rare cases, you may need to execute some common behavior after beginning + a migration transaction, or before committing that transaction. For instance, one + might desire to set a `lock_timeout` for each lock in the migration transaction. + + You can do so by defining `c:after_begin/0` and `c:before_commit/0` callbacks to + your migration. + + However, if you need do so for every migration module, implement this callback + for every migration can be quite repetitive. Luckily, you can handle this by + providing your migration module: + + defmodule MyApp.Migration do + defmacro __using__(_) do + quote do + use Ecto.Migration + + def after_begin() do + repo().query! "SET lock_timeout TO '5s'" + end + end + end + end + + Then in your migrations you can `use MyApp.Migration` to share this behavior + among all your migrations. + """ + + @doc """ + Migration code to run immediately after the transaction is opened. + + Keep in mind that it is treated like any normal migration code, and should + consider both the up *and* down cases of the migration. + """ + @callback after_begin() :: term + + @doc """ + Migration code to run immediately before the transaction is closed. + + Keep in mind that it is treated like any normal migration code, and should + consider both the up *and* down cases of the migration. + """ + @callback before_commit() :: term + @optional_callbacks after_begin: 0, before_commit: 0 + + defmodule Index do + @moduledoc """ + Used internally by adapters. + + To define an index in a migration, see `Ecto.Migration.index/3`. + """ + defstruct table: nil, + prefix: nil, + name: nil, + columns: [], + unique: false, + concurrently: false, + using: nil, + include: [], + where: nil, + comment: nil, + options: nil + + @type t :: %__MODULE__{ + table: String.t, + prefix: atom, + name: atom, + columns: [atom | String.t], + unique: boolean, + concurrently: boolean, + using: atom | String.t, + include: [atom | String.t], + where: atom | String.t, + comment: String.t | nil, + options: String.t + } + end + + defmodule Table do + @moduledoc """ + Used internally by adapters. + + To define a table in a migration, see `Ecto.Migration.table/2`. + """ + defstruct name: nil, prefix: nil, comment: nil, primary_key: true, engine: nil, options: nil + @type t :: %__MODULE__{name: String.t, prefix: atom | nil, comment: String.t | nil, primary_key: boolean, + engine: atom, options: String.t} + end + + defmodule Reference do + @moduledoc """ + Used internally by adapters. + + To define a reference in a migration, see `Ecto.Migration.references/2`. + """ + defstruct name: nil, prefix: nil, table: nil, column: :id, type: :bigserial, + on_delete: :nothing, on_update: :nothing, validate: true, + with: [], match: nil + @type t :: %__MODULE__{table: String.t, prefix: atom | nil, column: atom, type: atom, + on_delete: atom, on_update: atom, validate: boolean, + with: list, match: atom | nil} + end + + defmodule Constraint do + @moduledoc """ + Used internally by adapters. + + To define a constraint in a migration, see `Ecto.Migration.constraint/3`. + """ + defstruct name: nil, table: nil, check: nil, exclude: nil, prefix: nil, comment: nil, validate: true + @type t :: %__MODULE__{name: atom, table: String.t, prefix: atom | nil, + check: String.t | nil, exclude: String.t | nil, comment: String.t | nil, validate: boolean} + end + + defmodule Command do + @moduledoc """ + Used internally by adapters. + + This represents the up and down legs of a reversible raw command + that is usually defined with `Ecto.Migration.execute/1`. + + To define a reversible command in a migration, see `Ecto.Migration.execute/2`. + """ + defstruct up: nil, down: nil + @type t :: %__MODULE__{up: String.t, down: String.t} + end + + alias Ecto.Migration.Runner + + @doc false + defmacro __using__(_) do + quote location: :keep do + import Ecto.Migration + @disable_ddl_transaction false + @disable_migration_lock false + @before_compile Ecto.Migration + end + end + + @doc false + defmacro __before_compile__(_env) do + quote do + def __migration__ do + [ + disable_ddl_transaction: @disable_ddl_transaction, + disable_migration_lock: @disable_migration_lock + ] + end + end + end + + @doc """ + Creates a table. + + By default, the table will also include an `:id` primary key field that + has a type of `:bigserial`. Check the `table/2` docs for more information. + + ## Examples + + create table(:posts) do + add :title, :string, default: "Untitled" + add :body, :text + + timestamps() + end + + """ + defmacro create(object, do: block) do + expand_create(object, :create, block) + end + + @doc """ + Creates a table if it does not exist. + + Works just like `create/2` but does not raise an error when the table + already exists. + """ + defmacro create_if_not_exists(object, do: block) do + expand_create(object, :create_if_not_exists, block) + end + + defp expand_create(object, command, block) do + quote do + table = %Table{} = unquote(object) + Runner.start_command({unquote(command), Ecto.Migration.__prefix__(table)}) + + if primary_key = table.primary_key && Ecto.Migration.__primary_key__() do + {name, type, opts} = primary_key + add(name, type, opts) + end + + unquote(block) + Runner.end_command() + table + end + end + + @doc """ + Alters a table. + + ## Examples + + alter table("posts") do + add :summary, :text + modify :title, :text + remove :views + end + + """ + defmacro alter(object, do: block) do + quote do + table = %Table{} = unquote(object) + Runner.start_command({:alter, Ecto.Migration.__prefix__(table)}) + unquote(block) + Runner.end_command() + end + end + + @doc """ + Creates one of the following: + + * an index + * a table with only the :id primary key + * a constraint + + When reversing (in a `change/0` running backwards), indexes are only dropped + if they exist, and no errors are raised. To enforce dropping an index, use + `drop/1`. + + ## Examples + + create index("posts", [:name]) + create table("version") + create constraint("products", "price_must_be_positive", check: "price > 0") + + """ + def create(%Index{} = index) do + Runner.execute {:create, __prefix__(index)} + index + end + + def create(%Constraint{} = constraint) do + Runner.execute {:create, __prefix__(constraint)} + constraint + end + + def create(%Table{} = table) do + do_create table, :create + table + end + + @doc """ + Creates an index or a table with only `:id` field if one does not yet exist. + + ## Examples + + create_if_not_exists index("posts", [:name]) + + create_if_not_exists table("version") + + """ + def create_if_not_exists(%Index{} = index) do + Runner.execute {:create_if_not_exists, __prefix__(index)} + end + + def create_if_not_exists(%Table{} = table) do + do_create table, :create_if_not_exists + end + + defp do_create(table, command) do + columns = + if primary_key = table.primary_key && Ecto.Migration.__primary_key__() do + {name, type, opts} = primary_key + [{:add, name, type, opts}] + else + [] + end + + Runner.execute {command, __prefix__(table), columns} + end + + @doc """ + Drops one of the following: + + * an index + * a table + * a constraint + + ## Examples + + drop index("posts", [:name]) + drop table("posts") + drop constraint("products", "price_must_be_positive") + drop index("posts", [:name]), mode: :cascade + drop table("posts"), mode: :cascade + + ## Options + + * `:mode` - when set to `:cascade`, automatically drop objects that depend + on the index, and in turn all objects that depend on those objects + on the table. Default is `:restrict` + + """ + def drop(%{} = index_or_table_or_constraint, opts \\ []) when is_list(opts) do + Runner.execute {:drop, __prefix__(index_or_table_or_constraint), Keyword.get(opts, :mode, :restrict)} + index_or_table_or_constraint + end + + @doc """ + Drops a table or index if it exists. + + Does not raise an error if the specified table or index does not exist. + + ## Examples + + drop_if_exists index("posts", [:name]) + drop_if_exists table("posts") + drop_if_exists index("posts", [:name]), mode: :cascade + drop_if_exists table("posts"), mode: :cascade + + ## Options + + * `:mode` - when set to `:cascade`, automatically drop objects that depend + on the index, and in turn all objects that depend on those objects + on the table. Default is `:restrict` + + """ + def drop_if_exists(%{} = index_or_table, opts \\ []) when is_list(opts) do + Runner.execute {:drop_if_exists, __prefix__(index_or_table), Keyword.get(opts, :mode, :restrict)} + + index_or_table + end + + @doc """ + Returns a table struct that can be given to `create/2`, `alter/2`, `drop/1`, + etc. + + ## Examples + + create table("products") do + add :name, :string + add :price, :decimal + end + + drop table("products") + + create table("products", primary_key: false) do + add :name, :string + add :price, :decimal + end + + ## Options + + * `:primary_key` - when `false`, a primary key field is not generated on table + creation. + * `:engine` - customizes the table storage for supported databases. For MySQL, + the default is InnoDB. + * `:prefix` - the prefix for the table. This prefix will automatically be used + for all constraints and references defined for this table unless explicitly + overridden in said constraints/references. + * `:comment` - adds a comment to the table. + * `:options` - provide custom options that will be appended after the generated + statement. For example, "WITH", "INHERITS", or "ON COMMIT" clauses. + + """ + def table(name, opts \\ []) + + def table(name, opts) when is_atom(name) do + table(Atom.to_string(name), opts) + end + + def table(name, opts) when is_binary(name) and is_list(opts) do + struct(%Table{name: name}, opts) + end + + @doc ~S""" + Returns an index struct that can be given to `create/1`, `drop/1`, etc. + + Expects the table name as the first argument and the index field(s) as + the second. The fields can be atoms, representing columns, or strings, + representing expressions that are sent as-is to the database. + + ## Options + + * `:name` - the name of the index. Defaults to "#{table}_#{column}_index". + * `:unique` - indicates whether the index should be unique. Defaults to + `false`. + * `:concurrently` - indicates whether the index should be created/dropped + concurrently. + * `:using` - configures the index type. + * `:prefix` - specify an optional prefix for the index. + * `:where` - specify conditions for a partial index. + * `:include` - specify fields for a covering index. This is not supported + by all databases. For more information on PostgreSQL support, please + [read the official docs](https://www.postgresql.org/docs/current/indexes-index-only-scans.html). + * `:comment` - adds a comment to the index. + + ## Adding/dropping indexes concurrently + + PostgreSQL supports adding/dropping indexes concurrently (see the + [docs](http://www.postgresql.org/docs/current/static/sql-createindex.html)). + However, this feature does not work well with the transactions used by + Ecto to guarantee integrity during migrations. + + Therefore, to migrate indexes concurrently, you need to set + both `@disable_ddl_transaction` and `@disable_migration_lock` to true: + + defmodule MyRepo.Migrations.CreateIndexes do + use Ecto.Migration + @disable_ddl_transaction true + @disable_migration_lock true + + def change do + create index("posts", [:slug], concurrently: true) + end + end + + Disabling DDL transactions removes the guarantee that all of the changes + in the migration will happen at once. Disabling the migration lock removes + the guarantee only a single node will run a given migration if multiple + nodes are attempting to migrate at the same time. + + Since running migrations outside a transaction and without locks can be + dangerous, consider performing very few operations in migrations that add + concurrent indexes. We recommend to run migrations with concurrent indexes + in isolation and disable those features only temporarily. + + ## Index types + + When creating an index, the index type can be specified with the `:using` + option. The `:using` option can be an atom or a string, and its value is + passed to the generated `USING` clause as-is. + + For example, PostgreSQL supports several index types like B-tree (the + default), Hash, GIN, and GiST. More information on index types can be found + in the [PostgreSQL docs](http://www.postgresql.org/docs/current/indexes-types.html). + + ## Partial indexes + + Databases like PostgreSQL and MSSQL support partial indexes. + + A partial index is an index built over a subset of a table. The subset + is defined by a conditional expression using the `:where` option. + The `:where` option can be an atom or a string; its value is passed + to the generated `WHERE` clause as-is. + + More information on partial indexes can be found in the [PostgreSQL + docs](http://www.postgresql.org/docs/current/indexes-partial.html). + + ## Examples + + # With no name provided, the name of the below index defaults to + # products_category_id_sku_index + create index("products", [:category_id, :sku], unique: true) + + # The name can also be set explicitly + create index("products", [:category_id, :sku], name: :my_special_name) + + # Indexes can be added concurrently + create index("products", [:category_id, :sku], concurrently: true) + + # The index type can be specified + create index("products", [:name], using: :hash) + + # Partial indexes are created by specifying a :where option + create index("products", [:user_id], where: "price = 0", name: :free_products_index) + + # Covering indexes are created by specifying a :include option + create index("products", [:user_id], include: [:category_id]) + + Indexes also support custom expressions. Some databases may require the + index expression to be written between parentheses: + + # Create an index on a custom expression + create index("products", ["(lower(name))"], name: :products_lower_name_index) + + # Create a tsvector GIN index on PostgreSQL + create index("products", ["(to_tsvector('english', name))"], + name: :products_name_vector, using: "GIN") + """ + def index(table, columns, opts \\ []) + + def index(table, columns, opts) when is_atom(table) do + index(Atom.to_string(table), columns, opts) + end + + def index(table, column, opts) when is_binary(table) and is_atom(column) do + index(table, [column], opts) + end + + def index(table, columns, opts) when is_binary(table) and is_list(columns) and is_list(opts) do + validate_index_opts!(opts) + index = struct(%Index{table: table, columns: columns}, opts) + %{index | name: index.name || default_index_name(index)} + end + + @doc """ + Shortcut for creating a unique index. + + See `index/3` for more information. + """ + def unique_index(table, columns, opts \\ []) + + def unique_index(table, columns, opts) when is_list(opts) do + index(table, columns, [unique: true] ++ opts) + end + + defp default_index_name(index) do + [index.table, index.columns, "index"] + |> List.flatten + |> Enum.map(&to_string(&1)) + |> Enum.map(&String.replace(&1, ~r"[^\w_]", "_")) + |> Enum.map(&String.replace_trailing(&1, "_", "")) + |> Enum.join("_") + |> String.to_atom + end + + @doc """ + Executes arbitrary SQL, anonymous function or a keyword command. + + The argument is typically a string, containing the SQL command to be executed. + Keyword commands exist for non-SQL adapters and are not used in most situations. + + Supplying an anonymous function does allow for arbitrary code to execute as + part of the migration. This is most often used in combination with `repo/0` + by library authors who want to create high-level migration helpers. + + Reversible commands can be defined by calling `execute/2`. + + ## Examples + + execute "CREATE EXTENSION postgres_fdw" + + execute create: "posts", capped: true, size: 1024 + + execute(fn -> repo().query!("SELECT $1::integer + $2", [40, 2], [log: :info]) end) + + execute(fn -> repo().update_all("posts", set: [published: true]) end) + """ + def execute(command) when is_binary(command) or is_function(command, 0) or is_list(command) do + Runner.execute command + end + + @doc """ + Executes reversible SQL commands. + + This is useful for database-specific functionality that does not + warrant special support in Ecto, for example, creating and dropping + a PostgreSQL extension. The `execute/2` form avoids having to define + separate `up/0` and `down/0` blocks that each contain an `execute/1` + expression. + + The allowed parameters are explained in `execute/1`. + + ## Examples + + defmodule MyApp.MyMigration do + use Ecto.Migration + + def change do + execute "CREATE EXTENSION postgres_fdw", "DROP EXTENSION postgres_fdw" + execute(&execute_up/0, &execute_down/0) + end + + defp execute_up, do: repo().query!("select 'Up query โ€ฆ';", [], [log: :info]) + defp execute_down, do: repo().query!("select 'Down query โ€ฆ';", [], [log: :info]) + end + """ + def execute(up, down) when (is_binary(up) or is_function(up, 0) or is_list(up)) and + (is_binary(down) or is_function(down, 0) or is_list(down)) do + Runner.execute %Command{up: up, down: down} + end + + @doc """ + Gets the migrator direction. + """ + @spec direction :: :up | :down + def direction do + Runner.migrator_direction() + end + + @doc """ + Gets the migrator repo. + """ + @spec repo :: Ecto.Repo.t + def repo do + Runner.repo() + end + + @doc """ + Gets the migrator prefix. + """ + def prefix do + Runner.prefix() + end + + @doc """ + Adds a column when creating or altering a table. + + This function also accepts Ecto primitive types as column types + that are normalized by the database adapter. For example, + `:string` is converted to `:varchar`, `:binary` to `:bits` or `:blob`, + and so on. + + However, the column type is not always the same as the type used in your + schema. For example, a schema that has a `:string` field can be supported by + columns of type `:char`, `:varchar`, `:text`, and others. For this reason, + this function also accepts `:text` and other type annotations that are native + to the database. These are passed to the database as-is. + + To sum up, the column type may be either an Ecto primitive type, + which is normalized in cases where the database does not understand it, + such as `:string` or `:binary`, or a database type which is passed as-is. + Custom Ecto types like `Ecto.UUID` are not supported because + they are application-level concerns and may not always map to the database. + + Note: It may be necessary to quote case-sensitive, user-defined type names. + For example, PostgreSQL normalizes all identifiers to lower case unless + they are wrapped in double quotes. To ensure a case-sensitive type name + is sent properly, it must be defined `:'"LikeThis"'` or `:"\"LikeThis\""`. + This is not necessary for column names because Ecto quotes them automatically. + Type names are not automatically quoted because they may be expressions such + as `varchar(255)`. + + ## Examples + + create table("posts") do + add :title, :string, default: "Untitled" + end + + alter table("posts") do + add :summary, :text # Database type + add :object, :map # Elixir type which is handled by the database + add :custom, :'"UserDefinedType"' # A case-sensitive, user-defined type name + end + + ## Options + + * `:primary_key` - when `true`, marks this field as the primary key. + If multiple fields are marked, a composite primary key will be created. + * `:default` - the column's default value. It can be a string, number, empty + list, list of strings, list of numbers, or a fragment generated by + `fragment/1`. + * `:null` - determines whether the column accepts null values. When not specified, + the database will use its default behaviour (which is to treat the column as nullable + in most databases). + * `:size` - the size of the type (for example, the number of characters). + The default is no size, except for `:string`, which defaults to `255`. + * `:precision` - the precision for a numeric type. Required when `:scale` is + specified. + * `:scale` - the scale of a numeric type. Defaults to `0`. + * `:comment` - adds a comment to the added column. + * `:after` - positions field after the specified one. Only supported on MySQL, + it is ignored by other databases. + + """ + def add(column, type, opts \\ []) when is_atom(column) and is_list(opts) do + validate_precision_opts!(opts, column) + validate_type!(type) + Runner.subcommand {:add, column, type, opts} + end + + @doc """ + Adds a column if it not exists yet when altering a table. + + If the `type` value is a `%Reference{}`, it is used to add a constraint. + + `type` and `opts` are exactly the same as in `add/3`. + + This command is not reversible as Ecto does not know about column existence before the creation attempt. + + ## Examples + + alter table("posts") do + add_if_not_exists :title, :string, default: "" + end + + """ + def add_if_not_exists(column, type, opts \\ []) when is_atom(column) and is_list(opts) do + validate_precision_opts!(opts, column) + validate_type!(type) + Runner.subcommand {:add_if_not_exists, column, type, opts} + end + + @doc """ + Renames a table. + + ## Examples + + rename table("posts"), to: table("new_posts") + """ + def rename(%Table{} = table_current, to: %Table{} = table_new) do + Runner.execute {:rename, __prefix__(table_current), __prefix__(table_new)} + table_new + end + + @doc """ + Renames a column. + + Note that this occurs outside of the `alter` statement. + + ## Examples + + rename table("posts"), :title, to: :summary + """ + def rename(%Table{} = table, current_column, to: new_column) when is_atom(current_column) and is_atom(new_column) do + Runner.execute {:rename, __prefix__(table), current_column, new_column} + table + end + + @doc """ + Generates a fragment to be used as a default value. + + ## Examples + + create table("posts") do + add :inserted_at, :naive_datetime, default: fragment("now()") + end + """ + def fragment(expr) when is_binary(expr) do + {:fragment, expr} + end + + @doc """ + Adds `:inserted_at` and `:updated_at` timestamp columns. + + Those columns are of `:naive_datetime` type and by default cannot be null. A + list of `opts` can be given to customize the generated fields. + + Following options will override the repo configuration specified by + `:migration_timestamps` option. + + ## Options + + * `:inserted_at` - the name of the column for storing insertion times. + Setting it to `false` disables the column. + * `:updated_at` - the name of the column for storing last-updated-at times. + Setting it to `false` disables the column. + * `:type` - the type of the `:inserted_at` and `:updated_at` columns. + Defaults to `:naive_datetime`. + * `:default` - the columns' default value. It can be a string, number, empty + list, list of strings, list of numbers, or a fragment generated by + `fragment/1`. + + """ + def timestamps(opts \\ []) when is_list(opts) do + opts = Keyword.merge(Runner.repo_config(:migration_timestamps, []), opts) + opts = Keyword.put_new(opts, :null, false) + + {type, opts} = Keyword.pop(opts, :type, :naive_datetime) + {inserted_at, opts} = Keyword.pop(opts, :inserted_at, :inserted_at) + {updated_at, opts} = Keyword.pop(opts, :updated_at, :updated_at) + + if inserted_at != false, do: add(inserted_at, type, opts) + if updated_at != false, do: add(updated_at, type, opts) + end + + @doc """ + Modifies the type of a column when altering a table. + + This command is not reversible unless the `:from` option is provided. + When the `:from` option is set, the adapter will try to drop + the corresponding foreign key constraints before modifying the type. + Generally speaking, you want to pass the type and each option + you are modifying to `:from`, so the column can be rolled back properly. + However, note that `:from` cannot be be used to modify primary keys, + as those are generally trickier to revert. + + See `add/3` for more information on supported types. + + If you want to modify a column without changing its type, + such as adding or dropping a null constraints, consider using + the `execute/2` command with the relevant SQL command instead + of `modify/3`, if supported by your database. This may avoid + redundant type updates and be more efficient, as an unnecessary + type update can lock the table, even if the type actually + doesn't change. + + ## Examples + + alter table("posts") do + modify :title, :text + end + + # Self rollback when using the :from option + alter table("posts") do + modify :title, :text, from: :string + end + + # Modify column with rollback options + alter table("posts") do + modify :title, :text, null: false, from: {:string, null: true} + end + + ## Options + + * `:null` - determines whether the column accepts null values. If this option is + not set, the nullable behaviour of the underlying column is not modified. + * `:default` - changes the default value of the column. + * `:from` - specifies the current type and options of the column. + * `:size` - specifies the size of the type (for example, the number of characters). + The default is no size. + * `:precision` - the precision for a numeric type. Required when `:scale` is + specified. + * `:scale` - the scale of a numeric type. Defaults to `0`. + * `:comment` - adds a comment to the modified column. + """ + def modify(column, type, opts \\ []) when is_atom(column) and is_list(opts) do + validate_precision_opts!(opts, column) + validate_type!(type) + Runner.subcommand {:modify, column, type, opts} + end + + @doc """ + Removes a column when altering a table. + + This command is not reversible as Ecto does not know what type it should add + the column back as. See `remove/3` as a reversible alternative. + + ## Examples + + alter table("posts") do + remove :title + end + + """ + def remove(column) when is_atom(column) do + Runner.subcommand {:remove, column} + end + + @doc """ + Removes a column in a reversible way when altering a table. + + `type` and `opts` are exactly the same as in `add/3`, and + they are used when the command is reversed. + + If the `type` value is a `%Reference{}`, it is used to remove the constraint. + + ## Examples + + alter table("posts") do + remove :title, :string, default: "" + end + + """ + def remove(column, type, opts \\ []) when is_atom(column) do + validate_type!(type) + Runner.subcommand {:remove, column, type, opts} + end + + @doc """ + Removes a column only if the column exists when altering the constraint if the reference type is passed + once it only has the constraint name on reference structure. + + This command is not reversible as Ecto does not know about column existence before the removal attempt. + + ## Examples + + alter table("posts") do + remove_if_exists :title, :string + end + + """ + def remove_if_exists(column, type) when is_atom(column) do + validate_type!(type) + Runner.subcommand {:remove_if_exists, column, type} + end + + @doc ~S""" + Defines a foreign key. + + By default it assumes you are linking to the referenced table + via its primary key with name `:id`. If you are using a non-default + key setup (e.g. using `uuid` type keys) you must ensure you set the + options, such as `:name` and `:type`, to match your target key. + + ## Examples + + create table("products") do + add :group_id, references("groups") + end + + create table("categories") do + add :group_id, :integer + # A composite foreign that points from categories (product_id, group_id) + # to products (id, group_id) + add :product_id, references("products", with: [group_id: :group_id]) + end + + ## Options + + * `:name` - The name of the underlying reference, which defaults to + "#{table}_#{column}_fkey". + * `:column` - The column name in the referenced table, which defaults to `:id`. + * `:prefix` - The prefix for the reference. Defaults to the prefix + defined by the block's `table/2` struct (the "products" table in + the example above), or `nil`. + * `:type` - The foreign key type, which defaults to `:bigserial`. + * `:on_delete` - What to do if the referenced entry is deleted. May be + `:nothing` (default), `:delete_all`, `:nilify_all`, or `:restrict`. + * `:on_update` - What to do if the referenced entry is updated. May be + `:nothing` (default), `:update_all`, `:nilify_all`, or `:restrict`. + * `:validate` - Whether or not to validate the foreign key constraint on + creation or not. Only available in PostgreSQL, and should be followed by + a command to validate the foreign key in a following migration if false. + * `:with` - defines additional keys to the foreign key in order to build + a composite primary key + * `:match` - select if the match is `:simple`, `:partial`, or `:full`. This is + [supported only by PostgreSQL](https://www.postgresql.org/docs/current/sql-createtable.html) + at the moment. + + """ + def references(table, opts \\ []) + + def references(table, opts) when is_atom(table) do + references(Atom.to_string(table), opts) + end + + def references(table, opts) when is_binary(table) and is_list(opts) do + opts = Keyword.merge(foreign_key_repo_opts(), opts) + reference = struct(%Reference{table: table}, opts) + + unless reference.on_delete in [:nothing, :delete_all, :nilify_all, :restrict] do + raise ArgumentError, "unknown :on_delete value: #{inspect reference.on_delete}" + end + + unless reference.on_update in [:nothing, :update_all, :nilify_all, :restrict] do + raise ArgumentError, "unknown :on_update value: #{inspect reference.on_update}" + end + + reference + end + + defp foreign_key_repo_opts() do + case Runner.repo_config(:migration_primary_key, []) do + false -> [] + opts -> opts + end + |> Keyword.take([:type]) + |> Keyword.merge(Runner.repo_config(:migration_foreign_key, [])) + end + + @doc ~S""" + Defines a constraint (either a check constraint or an exclusion constraint) + to be evaluated by the database when a row is inserted or updated. + + ## Examples + + create constraint("users", :price_must_be_positive, check: "price > 0") + create constraint("size_ranges", :no_overlap, exclude: ~s|gist (int4range("from", "to", '[]') WITH &&)|) + drop constraint("products", "price_must_be_positive") + + ## Options + + * `:check` - A check constraint expression. Required when creating a check constraint. + * `:exclude` - An exclusion constraint expression. Required when creating an exclusion constraint. + * `:prefix` - The prefix for the table. + * `:validate` - Whether or not to validate the constraint on creation (true by default). Only + available in PostgreSQL, and should be followed by a command to validate the new constraint in + a following migration if false. + * `:comment` - adds a comment to the constraint. + + """ + def constraint(table, name, opts \\ []) + + def constraint(table, name, opts) when is_atom(table) do + constraint(Atom.to_string(table), name, opts) + end + + def constraint(table, name, opts) when is_binary(table) and is_list(opts) do + struct(%Constraint{table: table, name: name}, opts) + end + + @doc "Executes queue migration commands." + defmacro flush do + quote do + if direction() == :down and not function_exported?(__MODULE__, :down, 0) do + raise "calling flush() inside change when doing rollback is not supported." + else + Runner.flush() + end + end + end + + # Validation helpers + defp validate_type!(:datetime) do + raise ArgumentError, "the :datetime type in migrations is not supported, " <> + "please use :utc_datetime or :naive_datetime instead" + end + + defp validate_type!(type) when is_atom(type) do + case Atom.to_string(type) do + "Elixir." <> _ -> + raise ArgumentError, + "#{inspect type} is not a valid database type, " <> + "please use an atom like :string, :text and so on" + _ -> + :ok + end + end + + defp validate_type!({type, subtype}) when is_atom(type) and is_atom(subtype) do + validate_type!(subtype) + end + + defp validate_type!({type, subtype}) when is_atom(type) and is_tuple(subtype) do + for t <- Tuple.to_list(subtype), do: validate_type!(t) + end + + defp validate_type!(%Reference{} = reference) do + reference + end + + defp validate_type!(type) do + raise ArgumentError, """ + invalid migration type: #{inspect(type)}. Expected one of: + + * an atom, such as :string + * a quoted atom, such as :"integer unsigned" + * an Ecto.Type, such as Ecto.UUID + * a tuple of the above, such as {:array, :integer} or {:array, Ecto.UUID} + * a reference, such as references(:users) + + All Ecto types are allowed and properly translated. + All other types are sent to the database as is. + """ + end + + defp validate_index_opts!(opts) when is_list(opts) do + case Keyword.get_values(opts, :where) do + [_, _ | _] -> + raise ArgumentError, + "only one `where` keyword is supported when declaring a partial index. " <> + "To specify multiple conditions, write a single WHERE clause using AND between them" + + _ -> + :ok + end + end + + defp validate_index_opts!(opts), do: opts + + defp validate_precision_opts!(opts, column) when is_list(opts) do + if opts[:scale] && !opts[:precision] do + raise ArgumentError, "column #{Atom.to_string(column)} is missing precision option" + end + end + + @doc false + def __prefix__(%{prefix: prefix} = index_or_table) do + runner_prefix = Runner.prefix() + + cond do + is_nil(prefix) -> + prefix = runner_prefix || Runner.repo_config(:migration_default_prefix, nil) + %{index_or_table | prefix: prefix} + is_nil(runner_prefix) or runner_prefix == to_string(prefix) -> + index_or_table + true -> + raise Ecto.MigrationError, message: + "the :prefix option `#{prefix}` does not match the migrator prefix `#{runner_prefix}`" + end + end + + @doc false + def __primary_key__() do + case Runner.repo_config(:migration_primary_key, []) do + false -> + false + + opts when is_list(opts) -> + opts = Keyword.put(opts, :primary_key, true) + {name, opts} = Keyword.pop(opts, :name, :id) + {type, opts} = Keyword.pop(opts, :type, :bigserial) + {name, type, opts} + end + end +end diff --git a/deps/ecto_sql/lib/ecto/migration/runner.ex b/deps/ecto_sql/lib/ecto/migration/runner.ex new file mode 100644 index 0000000..3de10ce --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migration/runner.ex @@ -0,0 +1,413 @@ +defmodule Ecto.Migration.Runner do + @moduledoc false + use Agent, restart: :temporary + + require Logger + + alias Ecto.Migration.Table + alias Ecto.Migration.Index + alias Ecto.Migration.Constraint + alias Ecto.Migration.Command + + @doc """ + Runs the given migration. + """ + def run(repo, config, version, module, direction, operation, migrator_direction, opts) do + level = Keyword.get(opts, :log, :info) + sql = Keyword.get(opts, :log_migrations_sql, false) + log = %{level: level, sql: sql} + args = {self(), repo, config, module, direction, migrator_direction, log} + + {:ok, runner} = DynamicSupervisor.start_child(Ecto.MigratorSupervisor, {__MODULE__, args}) + metadata(runner, opts) + + log(level, "== Running #{version} #{inspect module}.#{operation}/0 #{direction}") + {time, _} = :timer.tc(fn -> perform_operation(repo, module, operation) end) + log(level, "== Migrated #{version} in #{inspect(div(time, 100_000) / 10)}s") + + stop() + end + + @doc """ + Stores the runner metadata. + """ + def metadata(runner, opts) do + prefix = opts[:prefix] + Process.put(:ecto_migration, %{runner: runner, prefix: prefix && to_string(prefix)}) + end + + @doc """ + Starts the runner for the specified repo. + """ + def start_link({parent, repo, config, module, direction, migrator_direction, log}) do + Agent.start_link(fn -> + Process.link(parent) + + %{ + direction: direction, + repo: repo, + migration: module, + migrator_direction: migrator_direction, + command: nil, + subcommands: [], + log: log, + commands: [], + config: config + } + end) + end + + @doc """ + Stops the runner. + """ + def stop() do + Agent.stop(runner()) + end + + @doc """ + Accesses the given repository configuration. + """ + def repo_config(key, default) do + Agent.get(runner(), &Keyword.get(&1.config, key, default)) + end + + @doc """ + Returns the migrator command (up or down). + + * forward + up: up + * forward + down: down + * forward + change: up + * backward + change: down + + """ + def migrator_direction do + Agent.get(runner(), & &1.migrator_direction) + end + + @doc """ + Gets the repo for this migration + """ + def repo do + Agent.get(runner(), & &1.repo) + end + + @doc """ + Gets the prefix for this migration + """ + def prefix do + case Process.get(:ecto_migration) do + %{prefix: prefix} -> prefix + _ -> raise "could not find migration runner process for #{inspect self()}" + end + end + + @doc """ + Executes queue migration commands. + + Reverses the order commands are executed when doing a rollback + on a change/0 function and resets commands queue. + """ + def flush do + %{commands: commands, direction: direction, repo: repo, log: log, migration: migration} = + Agent.get_and_update(runner(), fn state -> {state, %{state | commands: []}} end) + + commands = if direction == :backward, do: commands, else: Enum.reverse(commands) + + for command <- commands do + execute_in_direction(repo, migration, direction, log, command) + end + end + + @doc """ + Queues command tuples or strings for execution. + + Ecto.MigrationError will be raised when the server + is in `:backward` direction and `command` is irreversible. + """ + def execute(command) do + reply = + Agent.get_and_update(runner(), fn + %{command: nil} = state -> + {:ok, %{state | subcommands: [], commands: [command|state.commands]}} + %{command: _} = state -> + {:error, %{state | command: nil}} + end) + + case reply do + :ok -> + :ok + :error -> + raise Ecto.MigrationError, "cannot execute nested commands" + end + end + + @doc """ + Starts a command. + """ + def start_command(command) do + reply = + Agent.get_and_update(runner(), fn + %{command: nil} = state -> + {:ok, %{state | command: command}} + %{command: _} = state -> + {:error, %{state | command: command}} + end) + + case reply do + :ok -> + :ok + :error -> + raise Ecto.MigrationError, "cannot execute nested commands" + end + end + + @doc """ + Queues and clears current command. Must call `start_command/1` first. + """ + def end_command do + Agent.update runner(), fn state -> + {operation, object} = state.command + command = {operation, object, Enum.reverse(state.subcommands)} + %{state | command: nil, subcommands: [], commands: [command|state.commands]} + end + end + + @doc """ + Adds a subcommand to the current command. Must call `start_command/1` first. + """ + def subcommand(subcommand) do + reply = + Agent.get_and_update(runner(), fn + %{command: nil} = state -> + {:error, state} + state -> + {:ok, update_in(state.subcommands, &[subcommand|&1])} + end) + + case reply do + :ok -> + :ok + :error -> + raise Ecto.MigrationError, message: "cannot execute command outside of block" + end + end + + ## Execute + + defp execute_in_direction(repo, migration, :forward, log, %Command{up: up}) do + log_and_execute_ddl(repo, migration, log, up) + end + + defp execute_in_direction(repo, migration, :forward, log, command) do + log_and_execute_ddl(repo, migration, log, command) + end + + defp execute_in_direction(repo, migration, :backward, log, %Command{down: down}) do + log_and_execute_ddl(repo, migration, log, down) + end + + defp execute_in_direction(repo, migration, :backward, log, command) do + if reversed = reverse(command) do + log_and_execute_ddl(repo, migration, log, reversed) + else + raise Ecto.MigrationError, message: + "cannot reverse migration command: #{command command}. " <> + "You will need to explicitly define up/0 and down/0 in your migration" + end + end + + defp reverse({:create, %Index{} = index}), + do: {:drop, index, :restrict} + defp reverse({:create_if_not_exists, %Index{} = index}), + do: {:drop_if_exists, index, :restrict} + defp reverse({:drop, %Index{} = index, _}), + do: {:create, index} + defp reverse({:drop_if_exists, %Index{} = index, _}), + do: {:create_if_not_exists, index} + + defp reverse({:create, %Table{} = table, _columns}), + do: {:drop, table, :restrict} + defp reverse({:create_if_not_exists, %Table{} = table, _columns}), + do: {:drop_if_exists, table, :restrict} + defp reverse({:rename, %Table{} = table_current, %Table{} = table_new}), + do: {:rename, table_new, table_current} + defp reverse({:rename, %Table{} = table, current_column, new_column}), + do: {:rename, table, new_column, current_column} + defp reverse({:alter, %Table{} = table, changes}) do + if reversed = table_reverse(changes, []) do + {:alter, table, reversed} + end + end + + # It is not a good idea to reverse constraints because + # we can't guarantee data integrity when applying them back. + defp reverse({:create_if_not_exists, %Constraint{} = constraint}), + do: {:drop_if_exists, constraint, :restrict} + defp reverse({:create, %Constraint{} = constraint}), + do: {:drop, constraint, :restrict} + + defp reverse(_command), do: false + + defp table_reverse([{:remove, name, type, opts}| t], acc) do + table_reverse(t, [{:add, name, type, opts} | acc]) + end + defp table_reverse([{:modify, name, type, opts} | t], acc) do + case opts[:from] do + nil -> + false + + {reverse_type, from_opts} when is_list(from_opts) -> + reverse_from = {type, Keyword.delete(opts, :from)} + reverse_opts = Keyword.put(from_opts, :from, reverse_from) + table_reverse(t, [{:modify, name, reverse_type, reverse_opts} | acc]) + + reverse_type -> + reverse_opts = Keyword.put(opts, :from, type) + table_reverse(t, [{:modify, name, reverse_type, reverse_opts} | acc]) + end + end + defp table_reverse([{:add, name, _type, _opts} | t], acc) do + table_reverse(t, [{:remove, name} | acc]) + end + defp table_reverse([_ | _], _acc) do + false + end + defp table_reverse([], acc) do + acc + end + + ## Helpers + + defp perform_operation(repo, module, operation) do + if function_exported?(repo, :in_transaction?, 0) and repo.in_transaction?() do + if function_exported?(module, :after_begin, 0) do + module.after_begin() + flush() + end + + apply(module, operation, []) + flush() + + if function_exported?(module, :before_commit, 0) do + module.before_commit() + flush() + end + else + apply(module, operation, []) + flush() + end + end + + defp runner do + case Process.get(:ecto_migration) do + %{runner: runner} -> runner + _ -> raise "could not find migration runner process for #{inspect self()}" + end + end + + defp log_and_execute_ddl(repo, migration, log, {instruction, %Index{} = index}) do + if index.concurrently do + migration_config = migration.__migration__() + + if not migration_config[:disable_ddl_transaction] do + IO.warn """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable ddl transaction. \ + Please set: + + use Ecto.Migration + @disable_ddl_transaction true + + """, [] + end + + if not migration_config[:disable_migration_lock] do + IO.warn """ + Migration #{inspect(migration)} has set index `#{index.name}` on table \ + `#{index.table}` to concurrently but did not disable migration lock. \ + Please set: + + use Ecto.Migration + @disable_migration_lock true + + """, [] + end + end + + log_and_execute_ddl(repo, log, {instruction, index}) + end + + defp log_and_execute_ddl(repo, _migration, log, command) do + log_and_execute_ddl(repo, log, command) + end + + defp log_and_execute_ddl(_repo, _log, func) when is_function(func, 0) do + func.() + :ok + end + + defp log_and_execute_ddl(repo, %{level: level, sql: sql}, command) do + log(level, command(command)) + meta = Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) + {:ok, logs} = repo.__adapter__().execute_ddl(meta, command, timeout: :infinity, log: sql) + + Enum.each(logs, fn {level, message, metadata} -> + log(level, message, metadata) + end) + + :ok + end + + defp log(level, msg, metadata \\ []) + defp log(false, _msg, _metadata), do: :ok + defp log(true, msg, metadata), do: Logger.log(:info, msg, metadata) + defp log(level, msg, metadata), do: Logger.log(level, msg, metadata) + + defp command(ddl) when is_binary(ddl) or is_list(ddl), + do: "execute #{inspect ddl}" + + defp command({:create, %Table{} = table, _}), + do: "create table #{quote_name(table.prefix, table.name)}" + defp command({:create_if_not_exists, %Table{} = table, _}), + do: "create table if not exists #{quote_name(table.prefix, table.name)}" + defp command({:alter, %Table{} = table, _}), + do: "alter table #{quote_name(table.prefix, table.name)}" + defp command({:drop, %Table{} = table, mode}), + do: "drop table #{quote_name(table.prefix, table.name)}#{drop_mode(mode)}" + defp command({:drop_if_exists, %Table{} = table, mode}), + do: "drop table if exists #{quote_name(table.prefix, table.name)}#{drop_mode(mode)}" + + defp command({:create, %Index{} = index}), + do: "create index #{quote_name(index.prefix, index.name)}" + defp command({:create_if_not_exists, %Index{} = index}), + do: "create index if not exists #{quote_name(index.prefix, index.name)}" + defp command({:drop, %Index{} = index, mode}), + do: "drop index #{quote_name(index.prefix, index.name)}#{drop_mode(mode)}" + defp command({:drop_if_exists, %Index{} = index, mode}), + do: "drop index if exists #{quote_name(index.prefix, index.name)}#{drop_mode(mode)}" + defp command({:rename, %Table{} = current_table, %Table{} = new_table}), + do: "rename table #{quote_name(current_table.prefix, current_table.name)} to #{quote_name(new_table.prefix, new_table.name)}" + defp command({:rename, %Table{} = table, current_column, new_column}), + do: "rename column #{current_column} to #{new_column} on table #{quote_name(table.prefix, table.name)}" + + defp command({:create, %Constraint{check: nil, exclude: nil}}), + do: raise ArgumentError, "a constraint must have either a check or exclude option" + defp command({:create, %Constraint{check: check, exclude: exclude}}) when is_binary(check) and is_binary(exclude), + do: raise ArgumentError, "a constraint must not have both check and exclude options" + defp command({:create, %Constraint{check: check} = constraint}) when is_binary(check), + do: "create check constraint #{constraint.name} on table #{quote_name(constraint.prefix, constraint.table)}" + defp command({:create, %Constraint{exclude: exclude} = constraint}) when is_binary(exclude), + do: "create exclude constraint #{constraint.name} on table #{quote_name(constraint.prefix, constraint.table)}" + defp command({:drop, %Constraint{} = constraint, _}), + do: "drop constraint #{constraint.name} from table #{quote_name(constraint.prefix, constraint.table)}" + defp command({:drop_if_exists, %Constraint{} = constraint, _}), + do: "drop constraint if exists #{constraint.name} from table #{quote_name(constraint.prefix, constraint.table)}" + + defp drop_mode(:restrict), do: "" + defp drop_mode(:cascade), do: " cascade" + + defp quote_name(nil, name), do: quote_name(name) + defp quote_name(prefix, name), do: quote_name(prefix) <> "." <> quote_name(name) + defp quote_name(name) when is_atom(name), do: quote_name(Atom.to_string(name)) + defp quote_name(name), do: name +end diff --git a/deps/ecto_sql/lib/ecto/migration/schema_migration.ex b/deps/ecto_sql/lib/ecto/migration/schema_migration.ex new file mode 100644 index 0000000..041b091 --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migration/schema_migration.ex @@ -0,0 +1,64 @@ +defmodule Ecto.Migration.SchemaMigration do + # Defines a schema that works with a table that tracks schema migrations. + # The table name defaults to `schema_migrations`. + @moduledoc false + use Ecto.Schema + + import Ecto.Query, only: [from: 2] + + @primary_key false + schema "schema_migrations" do + field :version, :integer + timestamps updated_at: false + end + + # The migration flag is used to signal to the repository + # we are in a migration operation. + @default_opts [timeout: :infinity, log: false, schema_migration: true] + + def ensure_schema_migrations_table!(repo, config, opts) do + {repo, source} = get_repo_and_source(repo, config) + table_name = String.to_atom(source) + table = %Ecto.Migration.Table{name: table_name, prefix: opts[:prefix]} + meta = Ecto.Adapter.lookup_meta(repo.get_dynamic_repo()) + + commands = [ + {:add, :version, :bigint, primary_key: true}, + {:add, :inserted_at, :naive_datetime, []} + ] + + repo.__adapter__().execute_ddl(meta, {:create_if_not_exists, table, commands}, @default_opts) + end + + def versions(repo, config, prefix) do + {repo, source} = get_repo_and_source(repo, config) + {repo, from(m in source, select: type(m.version, :integer)), [prefix: prefix] ++ @default_opts} + end + + def up(repo, config, version, opts) do + {repo, source} = get_repo_and_source(repo, config) + + %__MODULE__{version: version} + |> Ecto.put_meta(source: source) + |> repo.insert(default_opts(opts)) + end + + def down(repo, config, version, opts) do + {repo, source} = get_repo_and_source(repo, config) + + from(m in source, where: m.version == type(^version, :integer)) + |> repo.delete_all(default_opts(opts)) + end + + def get_repo_and_source(repo, config) do + {Keyword.get(config, :migration_repo, repo), + Keyword.get(config, :migration_source, "schema_migrations")} + end + + defp default_opts(opts) do + Keyword.merge( + @default_opts, + [prefix: opts[:prefix], log: Keyword.get(opts, :log_migrator_sql, false)] + ) + end +end diff --git a/deps/ecto_sql/lib/ecto/migrator.ex b/deps/ecto_sql/lib/ecto/migrator.ex new file mode 100644 index 0000000..aa4c8ac --- /dev/null +++ b/deps/ecto_sql/lib/ecto/migrator.ex @@ -0,0 +1,776 @@ +defmodule Ecto.Migrator do + @moduledoc """ + Lower level API for managing migrations. + + EctoSQL provides three mix tasks for running and managing migrations: + + * `mix ecto.migrate` - migrates a repository + * `mix ecto.rollback` - rolls back a particular migration + * `mix ecto.migrations` - shows all migrations and their status + + Those tasks are built on top of the functions in this module. + While the tasks above cover most use cases, it may be necessary + from time to time to jump into the lower level API. For example, + if you are assembling an Elixir release, Mix is not available, + so this module provides a nice complement to still migrate your + system. + + To learn more about migrations in general, see `Ecto.Migration`. + + ## Example: Running an individual migration + + Imagine you have this migration: + + defmodule MyApp.MigrationExample do + use Ecto.Migration + + def up do + execute "CREATE TABLE users(id serial PRIMARY_KEY, username text)" + end + + def down do + execute "DROP TABLE users" + end + end + + You can execute it manually with: + + Ecto.Migrator.up(Repo, 20080906120000, MyApp.MigrationExample) + + ## Example: Running migrations in a release + + Elixir v1.9 introduces `mix release`, which generates a self-contained + directory that consists of your application code, all of its dependencies, + plus the whole Erlang Virtual Machine (VM) and runtime. + + When a release is assembled, Mix is no longer available inside a release + and therefore none of the Mix tasks. Users may still need a mechanism to + migrate their databases. This can be achieved with using the `Ecto.Migrator` + module: + + defmodule MyApp.Release do + @app :my_app + + def migrate do + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + Application.load(@app) + Application.fetch_env!(@app, :ecto_repos) + end + end + + The example above uses `with_repo/3` to make sure the repository is + started and then runs all migrations up or a given migration down. + Note you will have to replace `MyApp` and `:my_app` on the first two + lines by your actual application name. Once the file above is added + to your application, you can assemble a new release and invoke the + commands above in the release root like this: + + $ bin/my_app eval "MyApp.Release.migrate" + $ bin/my_app eval "MyApp.Release.rollback(MyApp.Repo, 20190417140000)" + + """ + + require Logger + require Ecto.Query + + alias Ecto.Migration.Runner + alias Ecto.Migration.SchemaMigration + + @doc """ + Ensures the repo is started to perform migration operations. + + All of the application required to run the repo will be started + before hand with chosen mode. If the repo has not yet been started, + it is manually started, with a `:pool_size` of 2, before the given + function is executed, and the repo is then terminated. If the repo + was already started, then the function is directly executed, without + terminating the repo afterwards. + + Although this function was designed to start repositories for running + migrations, it can be used by any code, Mix task, or release tooling + that needs to briefly start a repository to perform a certain operation + and then terminate. + + The repo may also configure a `:start_apps_before_migration` option + which is a list of applications to be started before the migration + runs. + + It returns `{:ok, fun_return, apps}`, with all apps that have been + started, or `{:error, term}`. + + ## Options + + * `:pool_size` - The pool size to start the repo for migrations. + Defaults to 2. + * `:mode` - The mode to start all applications. + Defaults to `:permanent`. + + ## Examples + + {:ok, _, _} = + Ecto.Migrator.with_repo(repo, fn repo -> + Ecto.Migrator.run(repo, :up, all: true) + end) + + """ + def with_repo(repo, fun, opts \\ []) do + config = repo.config() + mode = Keyword.get(opts, :mode, :permanent) + apps = [:ecto_sql | config[:start_apps_before_migration] || []] + + extra_started = + Enum.flat_map(apps, fn app -> + {:ok, started} = Application.ensure_all_started(app, mode) + started + end) + + {:ok, repo_started} = repo.__adapter__().ensure_all_started(config, mode) + started = extra_started ++ repo_started + pool_size = Keyword.get(opts, :pool_size, 2) + migration_repo = config[:migration_repo] || repo + + case ensure_repo_started(repo, pool_size) do + {:ok, repo_after} -> + case ensure_migration_repo_started(migration_repo, repo) do + {:ok, migration_repo_after} -> + try do + {:ok, fun.(repo), started} + after + after_action(repo, repo_after) + after_action(migration_repo, migration_repo_after) + end + + {:error, _} = error -> + after_action(repo, repo_after) + error + end + + {:error, _} = error -> + error + end + end + + @doc """ + Gets the migrations path from a repository. + + This function accepts an optional second parameter to customize the + migrations directory. This can be used to specify a custom migrations + path. + """ + @spec migrations_path(Ecto.Repo.t, String.t) :: String.t + def migrations_path(repo, directory \\ "migrations") do + config = repo.config() + priv = config[:priv] || "priv/#{repo |> Module.split |> List.last |> Macro.underscore}" + app = Keyword.fetch!(config, :otp_app) + Application.app_dir(app, Path.join(priv, directory)) + end + + @doc """ + Gets all migrated versions. + + This function ensures the migration table exists + if no table has been defined yet. + + ## Options + + * `:prefix` - the prefix to run the migrations on + * `:dynamic_repo` - the name of the Repo supervisor process. + See `c:Ecto.Repo.put_dynamic_repo/1`. + * `:skip_table_creation` - skips any attempt to create the migration table + Useful for situations where user needs to check migrations but has + insufficient permissions to create the table. Note that migrations + commands may fail if this is set to true. Defaults to `false`. Accepts a + boolean. + """ + @spec migrated_versions(Ecto.Repo.t, Keyword.t) :: [integer] + def migrated_versions(repo, opts \\ []) do + lock_for_migrations true, repo, opts, fn _config, versions -> versions end + end + + @doc """ + Runs an up migration on the given repository. + + ## Options + + * `:log` - the level to use for logging of migration instructions. + Defaults to `:info`. Can be any of `Logger.level/0` values or a boolean. + * `:log_migrations_sql` - the level to use for logging of SQL commands + generated by migrations. Defaults to `false`. Can be any of `Logger.level/0` values + or a boolean. + * `:log_migrator_sql` - the level to use for logging of SQL commands emitted + by the migrator, such as transactions, locks, etc. Defaults to `false`. + * `:prefix` - the prefix to run the migrations on + * `:dynamic_repo` - the name of the Repo supervisor process. + See `c:Ecto.Repo.put_dynamic_repo/1`. + * `:strict_version_order` - abort when applying a migration with old timestamp + (otherwise it emits a warning) + """ + @spec up(Ecto.Repo.t, integer, module, Keyword.t) :: :ok | :already_up + def up(repo, version, module, opts \\ []) do + opts = + if log_sql = opts[:log_sql] do + IO.warn(":log_sql is deprecated, please use log_migrations_sql instead") + Keyword.put(opts, :log_migrations_sql, log_sql) + else + opts + end + + conditional_lock_for_migrations module, version, repo, opts, fn config, versions -> + if version in versions do + :already_up + else + result = do_up(repo, config, version, module, opts) + + if version != Enum.max([version | versions]) do + latest = Enum.max(versions) + + message = """ + You are running migration #{version} but an older \ + migration with version #{latest} has already run. + + This can be an issue if you have already ran #{latest} in production \ + because a new deployment may migrate #{version} but a rollback command \ + would revert #{latest} instead of #{version}. + + If this can be an issue, we recommend to rollback #{version} and change \ + it to a version later than #{latest}. + """ + + if opts[:strict_version_order] do + raise Ecto.MigrationError, message + else + Logger.warn message + end + end + + result + end + end + end + + defp do_up(repo, config, version, module, opts) do + async_migrate_maybe_in_transaction(repo, config, version, module, :up, opts, fn -> + attempt(repo, config, version, module, :forward, :up, :up, opts) + || attempt(repo, config, version, module, :forward, :change, :up, opts) + || {:error, Ecto.MigrationError.exception( + "#{inspect module} does not implement a `up/0` or `change/0` function")} + end) + end + + @doc """ + Runs a down migration on the given repository. + + ## Options + + * `:log` - the level to use for logging of migration commands. Defaults to `:info`. + Can be any of `Logger.level/0` values or a boolean. + * `:log_migrations_sql` - the level to use for logging of SQL commands + generated by migrations. Defaults to `false`. Can be any of `Logger.level/0` + values or a boolean. + * `:log_migrator_sql` - the level to use for logging of SQL commands emitted + by the migrator, such as transactions, locks, etc. Defaults to `false`. + Can be any of `Logger.level/0` values or a boolean. + * `:prefix` - the prefix to run the migrations on + * `:dynamic_repo` - the name of the Repo supervisor process. + See `c:Ecto.Repo.put_dynamic_repo/1`. + + """ + @spec down(Ecto.Repo.t, integer, module) :: :ok | :already_down + def down(repo, version, module, opts \\ []) do + opts = + if log_sql = opts[:log_sql] do + IO.warn(":log_sql is deprecated, please use log_migrations_sql instead") + Keyword.put(opts, :log_migrations_sql, log_sql) + else + opts + end + + conditional_lock_for_migrations module, version, repo, opts, fn config, versions -> + if version in versions do + do_down(repo, config, version, module, opts) + else + :already_down + end + end + end + + defp do_down(repo, config, version, module, opts) do + async_migrate_maybe_in_transaction(repo, config, version, module, :down, opts, fn -> + attempt(repo, config, version, module, :forward, :down, :down, opts) + || attempt(repo, config, version, module, :backward, :change, :down, opts) + || {:error, Ecto.MigrationError.exception( + "#{inspect module} does not implement a `down/0` or `change/0` function")} + end) + end + + defp async_migrate_maybe_in_transaction(repo, config, version, module, direction, opts, fun) do + dynamic_repo = repo.get_dynamic_repo() + + fun_with_status = fn -> + result = fun.() + apply(SchemaMigration, direction, [repo, config, version, opts]) + result + end + + fn -> run_maybe_in_transaction(repo, dynamic_repo, module, fun_with_status, opts) end + |> Task.async() + |> Task.await(:infinity) + end + + defp run_maybe_in_transaction(repo, dynamic_repo, module, fun, opts) do + repo.put_dynamic_repo(dynamic_repo) + + if module.__migration__[:disable_ddl_transaction] || + not repo.__adapter__().supports_ddl_transaction? do + fun.() + else + {:ok, result} = + repo.transaction(fun, log: migrator_log(opts), timeout: :infinity) + + result + end + catch kind, reason -> + {kind, reason, __STACKTRACE__} + end + + defp attempt(repo, config, version, module, direction, operation, reference, opts) do + if Code.ensure_loaded?(module) and + function_exported?(module, operation, 0) do + Runner.run(repo, config, version, module, direction, operation, reference, opts) + :ok + end + end + + @doc """ + Runs migrations for the given repository. + + Equivalent to: + + Ecto.Migrator.run(repo, [Ecto.Migrator.migrations_path(repo)], direction, opts) + + See `run/4` for more information. + """ + @spec run(Ecto.Repo.t, atom, Keyword.t) :: [integer] + def run(repo, direction, opts) do + run(repo, [migrations_path(repo)], direction, opts) + end + + @doc ~S""" + Apply migrations to a repository with a given strategy. + + The second argument identifies where the migrations are sourced from. + A binary representing directory (or a list of binaries representing + directories) may be passed, in which case we will load all files + following the "#{VERSION}_#{NAME}.exs" schema. The `migration_source` + may also be a list of tuples that identify the version number and + migration modules to be run, for example: + + Ecto.Migrator.run(Repo, [{0, MyApp.Migration1}, {1, MyApp.Migration2}, ...], :up, opts) + + A strategy (which is one of `:all`, `:step`, `:to`, or `:to_exclusive`) must be given as + an option. + + ## Execution model + + In order to run migrations, at least two database connections are + necessary. One is used to lock the "schema_migrations" table and + the other one to effectively run the migrations. This allows multiple + nodes to run migrations at the same time, but guarantee that only one + of them will effectively migrate the database. + + A downside of this approach is that migrations cannot run dynamically + during test under the `Ecto.Adapters.SQL.Sandbox`, as the sandbox has + to share a single connection across processes to guarantee the changes + can be reverted. + + ## Options + + * `:all` - runs all available if `true` + + * `:step` - runs the specific number of migrations + + * `:to` - runs all until the supplied version is reached + (including the version given in `:to`) + + * `:to_exclusive` - runs all until the supplied version is reached + (excluding the version given in `:to_exclusive`) + + Plus all other options described in `up/4`. + """ + @spec run(Ecto.Repo.t, String.t | [String.t] | [{integer, module}], atom, Keyword.t) :: [integer] + def run(repo, migration_source, direction, opts) do + opts = + if log_sql = opts[:log_sql] do + IO.warn(":log_sql is deprecated, please use log_migrations_sql instead") + Keyword.put(opts, :log_migrations_sql, log_sql) + else + opts + end + + migration_source = List.wrap(migration_source) + + pending = + lock_for_migrations true, repo, opts, fn _config, versions -> + cond do + opts[:all] -> + pending_all(versions, migration_source, direction) + to = opts[:to] -> + pending_to(versions, migration_source, direction, to) + to_exclusive = opts[:to_exclusive] -> + pending_to_exclusive(versions, migration_source, direction, to_exclusive) + step = opts[:step] -> + pending_step(versions, migration_source, direction, step) + true -> + {:error, ArgumentError.exception("expected one of :all, :to, :to_exclusive, or :step strategies")} + end + end + + # The lock above already created the table, so we can now skip it. + opts = Keyword.put(opts, :skip_table_creation, true) + + ensure_no_duplication!(pending) + migrate(Enum.map(pending, &load_migration!/1), direction, repo, opts) + end + + @doc """ + Returns an array of tuples as the migration status of the given repo, + without actually running any migrations. + + Equivalent to: + + Ecto.Migrator.migrations(repo, [Ecto.Migrator.migrations_path(repo)]) + + """ + @spec migrations(Ecto.Repo.t) :: [{:up | :down, id :: integer(), name :: String.t}] + def migrations(repo) do + migrations(repo, [migrations_path(repo)]) + end + + @doc """ + Returns an array of tuples as the migration status of the given repo, + without actually running any migrations. + """ + @spec migrations(Ecto.Repo.t, String.t | [String.t], Keyword.t) :: + [{:up | :down, id :: integer(), name :: String.t}] + def migrations(repo, directories, opts \\ []) do + directories = List.wrap(directories) + + repo + |> migrated_versions(opts) + |> collect_migrations(directories) + |> Enum.sort_by(fn {_, version, _} -> version end) + end + + defp collect_migrations(versions, migration_source) do + ups_with_file = + versions + |> pending_in_direction(migration_source, :down) + |> Enum.map(fn {version, name, _} -> {:up, version, name} end) + + ups_without_file = + versions + |> versions_without_file(migration_source) + |> Enum.map(fn version -> {:up, version, "** FILE NOT FOUND **"} end) + + downs = + versions + |> pending_in_direction(migration_source, :up) + |> Enum.map(fn {version, name, _} -> {:down, version, name} end) + + ups_with_file ++ ups_without_file ++ downs + end + + defp versions_without_file(versions, migration_source) do + versions_with_file = + migration_source + |> migrations_for() + |> Enum.map(fn {version, _, _} -> version end) + + versions -- versions_with_file + end + + defp lock_for_migrations(lock_or_migration_number, repo, opts, fun) do + dynamic_repo = Keyword.get(opts, :dynamic_repo, repo) + skip_table_creation = Keyword.get(opts, :skip_table_creation, false) + previous_dynamic_repo = repo.put_dynamic_repo(dynamic_repo) + + try do + config = repo.config() + + unless skip_table_creation do + verbose_schema_migration repo, "create schema migrations table", fn -> + SchemaMigration.ensure_schema_migrations_table!(repo, config, opts) + end + end + + {migration_repo, query, all_opts} = SchemaMigration.versions(repo, config, opts[:prefix]) + + migration_lock? = + Keyword.get(opts, :migration_lock, Keyword.get(config, :migration_lock, true)) + + opts = + opts + |> Keyword.put(:migration_source, config[:migration_source] || "schema_migrations") + |> Keyword.put(:log, migrator_log(opts)) + + result = + if lock_or_migration_number && migration_lock? do + # If there is a migration_repo, it wins over dynamic_repo, + # otherwise the dynamic_repo is the one locked in migrations. + meta_repo = if migration_repo != repo, do: migration_repo, else: dynamic_repo + meta = Ecto.Adapter.lookup_meta(meta_repo) + + migration_repo.__adapter__().lock_for_migrations(meta, opts, fn -> + fun.(config, migration_repo.all(query, all_opts)) + end) + else + fun.(config, migration_repo.all(query, all_opts)) + end + + case result do + {kind, reason, stacktrace} -> + :erlang.raise(kind, reason, stacktrace) + + {:error, error} -> + raise error + + result -> + result + end + after + repo.put_dynamic_repo(previous_dynamic_repo) + end + end + + defp conditional_lock_for_migrations(module, version, repo, opts, fun) do + lock = if module.__migration__[:disable_migration_lock], do: false, else: version + lock_for_migrations(lock, repo, opts, fun) + end + + defp pending_to(versions, migration_source, direction, target) do + within_target_version? = fn + {version, _, _}, target, :up -> + version <= target + {version, _, _}, target, :down -> + version >= target + end + + pending_in_direction(versions, migration_source, direction) + |> Enum.take_while(&(within_target_version?.(&1, target, direction))) + end + + defp pending_to_exclusive(versions, migration_source, direction, target) do + within_target_version? = fn + {version, _, _}, target, :up -> + version < target + {version, _, _}, target, :down -> + version > target + end + + pending_in_direction(versions, migration_source, direction) + |> Enum.take_while(&(within_target_version?.(&1, target, direction))) + end + + defp pending_step(versions, migration_source, direction, count) do + pending_in_direction(versions, migration_source, direction) + |> Enum.take(count) + end + + defp pending_all(versions, migration_source, direction) do + pending_in_direction(versions, migration_source, direction) + end + + defp pending_in_direction(versions, migration_source, :up) do + migration_source + |> migrations_for() + |> Enum.filter(fn {version, _name, _file} -> not (version in versions) end) + end + + defp pending_in_direction(versions, migration_source, :down) do + migration_source + |> migrations_for() + |> Enum.filter(fn {version, _name, _file} -> version in versions end) + |> Enum.reverse + end + + defp migrations_for(migration_source) when is_list(migration_source) do + migration_source + |> Enum.flat_map(fn + directory when is_binary(directory) -> + Path.join([directory, "**", "*.exs"]) + |> Path.wildcard() + |> Enum.map(&extract_migration_info/1) + |> Enum.filter(& &1) + + {version, module} -> + [{version, module, module}] + end) + |> Enum.sort() + end + + defp extract_migration_info(file) do + base = Path.basename(file) + + case Integer.parse(Path.rootname(base)) do + {integer, "_" <> name} -> {integer, name, file} + _ -> nil + end + end + + defp ensure_no_duplication!([{version, name, _} | t]) do + cond do + List.keyfind(t, version, 0) -> + raise Ecto.MigrationError, "migrations can't be executed, migration version #{version} is duplicated" + + List.keyfind(t, name, 1) -> + raise Ecto.MigrationError, "migrations can't be executed, migration name #{name} is duplicated" + + true -> + ensure_no_duplication!(t) + end + end + + defp ensure_no_duplication!([]), do: :ok + + defp load_migration!({version, _, mod}) when is_atom(mod) do + if migration?(mod) do + {version, mod} + else + raise Ecto.MigrationError, "module #{inspect(mod)} is not an Ecto.Migration" + end + end + + defp load_migration!({version, _, file}) when is_binary(file) do + loaded_modules = file |> Code.compile_file() |> Enum.map(&elem(&1, 0)) + + if mod = Enum.find(loaded_modules, &migration?/1) do + {version, mod} + else + raise Ecto.MigrationError, "file #{Path.relative_to_cwd(file)} does not define an Ecto.Migration" + end + end + + defp migration?(mod) do + function_exported?(mod, :__migration__, 0) + end + + defp migrate([], direction, _repo, opts) do + level = Keyword.get(opts, :log, :info) + log(level, "Migrations already #{direction}") + [] + end + + defp migrate(migrations, direction, repo, opts) do + for {version, mod} <- migrations, + do_direction(direction, repo, version, mod, opts), + do: version + end + + defp do_direction(:up, repo, version, mod, opts) do + conditional_lock_for_migrations mod, version, repo, opts, fn config, versions -> + unless version in versions do + do_up(repo, config, version, mod, opts) + end + end + end + + defp do_direction(:down, repo, version, mod, opts) do + conditional_lock_for_migrations mod, version, repo, opts, fn config, versions -> + if version in versions do + do_down(repo, config, version, mod, opts) + end + end + end + + defp verbose_schema_migration(repo, reason, fun) do + try do + fun.() + rescue + error -> + Logger.error """ + Could not #{reason}. This error usually happens due to the following: + + * The database does not exist + * The "schema_migrations" table, which Ecto uses for managing + migrations, was defined by another library + * There is a deadlock while migrating (such as using concurrent + indexes with a migration_lock) + + To fix the first issue, run "mix ecto.create". + + To address the second, you can run "mix ecto.drop" followed by + "mix ecto.create". Alternatively you may configure Ecto to use + another table and/or repository for managing migrations: + + config #{inspect repo.config[:otp_app]}, #{inspect repo}, + migration_source: "some_other_table_for_schema_migrations", + migration_repo: AnotherRepoForSchemaMigrations + + The full error report is shown below. + """ + reraise error, __STACKTRACE__ + end + end + + defp log(false, _msg), do: :ok + defp log(true, msg), do: Logger.info(msg) + defp log(level, msg), do: Logger.log(level, msg) + + defp migrator_log(opts) do + Keyword.get(opts, :log_migrator_sql, false) + end + + defp ensure_repo_started(repo, pool_size) do + case repo.start_link(pool_size: pool_size) do + {:ok, _} -> + {:ok, :stop} + + {:error, {:already_started, _pid}} -> + {:ok, :restart} + + {:error, _} = error -> + error + end + end + + defp ensure_migration_repo_started(repo, repo) do + {:ok, :noop} + end + + defp ensure_migration_repo_started(migration_repo, _repo) do + case migration_repo.start_link() do + {:ok, _} -> + {:ok, :stop} + + {:error, {:already_started, _pid}} -> + {:ok, :noop} + + {:error, _} = error -> + error + end + end + + defp after_action(repo, :restart) do + if Process.whereis(repo) do + %{pid: pid} = Ecto.Adapter.lookup_meta(repo) + Supervisor.restart_child(repo, pid) + end + end + + defp after_action(repo, :stop) do + repo.stop() + end + + defp after_action(_repo, :noop) do + :noop + end +end diff --git a/deps/ecto_sql/lib/mix/ecto_sql.ex b/deps/ecto_sql/lib/mix/ecto_sql.ex new file mode 100644 index 0000000..a2980be --- /dev/null +++ b/deps/ecto_sql/lib/mix/ecto_sql.ex @@ -0,0 +1,45 @@ +defmodule Mix.EctoSQL do + @moduledoc false + + @doc """ + Ensures the given repository's migrations paths exists on the file system. + """ + @spec ensure_migrations_paths(Ecto.Repo.t, Keyword.t) :: [String.t] + def ensure_migrations_paths(repo, opts) do + paths = Keyword.get_values(opts, :migrations_path) + paths = if paths == [], do: [Path.join(source_repo_priv(repo), "migrations")], else: paths + + if not Mix.Project.umbrella?() do + for path <- paths, not File.dir?(path) do + raise_missing_migrations(Path.relative_to_cwd(path), repo) + end + end + + paths + end + + defp raise_missing_migrations(path, repo) do + Mix.raise """ + Could not find migrations directory #{inspect path} + for repo #{inspect repo}. + + This may be because you are in a new project and the + migration directory has not been created yet. Creating an + empty directory at the path above will fix this error. + + If you expected existing migrations to be found, please + make sure your repository has been properly configured + and the configured path exists. + """ + end + + @doc """ + Returns the private repository path relative to the source. + """ + def source_repo_priv(repo) do + config = repo.config() + priv = config[:priv] || "priv/#{repo |> Module.split() |> List.last() |> Macro.underscore()}" + app = Keyword.fetch!(config, :otp_app) + Path.join(Mix.Project.deps_paths()[app] || File.cwd!(), priv) + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.dump.ex b/deps/ecto_sql/lib/mix/tasks/ecto.dump.ex new file mode 100644 index 0000000..e6c76db --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.dump.ex @@ -0,0 +1,79 @@ +defmodule Mix.Tasks.Ecto.Dump do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Dumps the repository database structure" + @default_opts [quiet: false] + + @aliases [ + d: :dump_path, + q: :quiet, + r: :repo + ] + + @switches [ + dump_path: :string, + quiet: :boolean, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean + ] + + @moduledoc """ + Dumps the current environment's database structure for the + given repository into a structure file. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + This task needs some shell utility to be present on the machine + running the task. + + Database | Utility needed + :--------- | :------------- + PostgreSQL | pg_dump + MySQL | mysqldump + + ## Example + + $ mix ecto.dump + + ## Command line options + + * `-r`, `--repo` - the repo to load the structure info from + * `-d`, `--dump-path` - the path of the dump file to create + * `-q`, `--quiet` - run the command quietly + * `--no-compile` - does not compile applications before dumping + * `--no-deps-check` - does not check dependencies before dumping + """ + + @impl true + def run(args) do + {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases + opts = Keyword.merge(@default_opts, opts) + + Enum.each parse_repo(args), fn repo -> + ensure_repo(repo, args) + ensure_implements(repo.__adapter__(), Ecto.Adapter.Structure, + "dump structure for #{inspect repo}") + + migration_repo = repo.config()[:migration_repo] || repo + + for repo <- Enum.uniq([repo, migration_repo]) do + config = Keyword.merge(repo.config(), opts) + + case repo.__adapter__().structure_dump(source_repo_priv(repo), config) do + {:ok, location} -> + unless opts[:quiet] do + Mix.shell().info "The structure for #{inspect repo} has been dumped to #{location}" + end + {:error, term} when is_binary(term) -> + Mix.raise "The structure for #{inspect repo} couldn't be dumped: #{term}" + {:error, term} -> + Mix.raise "The structure for #{inspect repo} couldn't be dumped: #{inspect term}" + end + end + end + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.gen.migration.ex b/deps/ecto_sql/lib/mix/tasks/ecto.gen.migration.ex new file mode 100644 index 0000000..70fd9c0 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.gen.migration.ex @@ -0,0 +1,121 @@ +defmodule Mix.Tasks.Ecto.Gen.Migration do + use Mix.Task + + import Macro, only: [camelize: 1, underscore: 1] + import Mix.Generator + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Generates a new migration for the repo" + + @aliases [ + r: :repo + ] + + @switches [ + change: :string, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :string + ] + + @moduledoc """ + Generates a migration. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + ## Examples + + $ mix ecto.gen.migration add_posts_table + $ mix ecto.gen.migration add_posts_table -r Custom.Repo + + The generated migration filename will be prefixed with the current + timestamp in UTC which is used for versioning and ordering. + + By default, the migration will be generated to the + "priv/YOUR_REPO/migrations" directory of the current application + but it can be configured to be any subdirectory of `priv` by + specifying the `:priv` key under the repository configuration. + + This generator will automatically open the generated file if + you have `ECTO_EDITOR` set in your environment variable. + + ## Command line options + + * `-r`, `--repo` - the repo to generate migration for + * `--no-compile` - does not compile applications before running + * `--no-deps-check` - does not check dependencies before running + * `--migrations-path` - the path to run the migrations from, defaults to `priv/repo/migrations` + + ## Configuration + + If the current app configuration specifies a custom migration module + the generated migration code will use that rather than the default + `Ecto.Migration`: + + config :ecto_sql, migration_module: MyApplication.CustomMigrationModule + + """ + + @impl true + def run(args) do + repos = parse_repo(args) + + Enum.map repos, fn repo -> + case OptionParser.parse!(args, strict: @switches, aliases: @aliases) do + {opts, [name]} -> + ensure_repo(repo, args) + path = opts[:migrations_path] || Path.join(source_repo_priv(repo), "migrations") + base_name = "#{underscore(name)}.exs" + file = Path.join(path, "#{timestamp()}_#{base_name}") + unless File.dir?(path), do: create_directory path + + fuzzy_path = Path.join(path, "*_#{base_name}") + if Path.wildcard(fuzzy_path) != [] do + Mix.raise "migration can't be created, there is already a migration file with name #{name}." + end + + # The :change option may be used by other tasks but not the CLI + assigns = [mod: Module.concat([repo, Migrations, camelize(name)]), change: opts[:change]] + create_file file, migration_template(assigns) + + if open?(file) and Mix.shell().yes?("Do you want to run this migration?") do + Mix.Task.run "ecto.migrate", ["-r", inspect(repo), "--migrations-path", path] + end + + file + + {_, _} -> + Mix.raise "expected ecto.gen.migration to receive the migration file name, " <> + "got: #{inspect Enum.join(args, " ")}" + end + end + end + + defp timestamp do + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + + defp pad(i) when i < 10, do: <> + defp pad(i), do: to_string(i) + + defp migration_module do + case Application.get_env(:ecto_sql, :migration_module, Ecto.Migration) do + migration_module when is_atom(migration_module) -> migration_module + other -> Mix.raise "Expected :migration_module to be a module, got: #{inspect(other)}" + end + end + + embed_template :migration, """ + defmodule <%= inspect @mod %> do + use <%= inspect migration_module() %> + + def change do + <%= @change %> + end + end + """ +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.load.ex b/deps/ecto_sql/lib/mix/tasks/ecto.load.ex new file mode 100644 index 0000000..f77a3d3 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.load.ex @@ -0,0 +1,121 @@ +defmodule Mix.Tasks.Ecto.Load do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Loads previously dumped database structure" + @default_opts [force: false, quiet: false] + + @aliases [ + d: :dump_path, + f: :force, + q: :quiet, + r: :repo + ] + + @switches [ + dump_path: :string, + force: :boolean, + quiet: :boolean, + repo: [:string, :keep], + no_compile: :boolean, + no_deps_check: :boolean, + skip_if_loaded: :boolean + ] + + @moduledoc """ + Loads the current environment's database structure for the + given repository from a previously dumped structure file. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + This task needs some shell utility to be present on the machine + running the task. + + Database | Utility needed + :--------- | :------------- + PostgreSQL | psql + MySQL | mysql + + ## Example + + $ mix ecto.load + + ## Command line options + + * `-r`, `--repo` - the repo to load the structure info into + * `-d`, `--dump-path` - the path of the dump file to load from + * `-q`, `--quiet` - run the command quietly + * `-f`, `--force` - do not ask for confirmation when loading data. + Configuration is asked only when `:start_permanent` is set to true + (typically in production) + * `--no-compile` - does not compile applications before loading + * `--no-deps-check` - does not check dependencies before loading + * `--skip-if-loaded` - does not load the dump file if the repo has the migrations table up + """ + + @impl true + def run(args, table_exists? \\ &Ecto.Adapters.SQL.table_exists?/2) do + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + opts = Keyword.merge(@default_opts, opts) + + Enum.each(parse_repo(args), fn repo -> + ensure_repo(repo, args) + + ensure_implements( + repo.__adapter__(), + Ecto.Adapter.Structure, + "load structure for #{inspect(repo)}" + ) + + {migration_repo, source} = Ecto.Migration.SchemaMigration.get_repo_and_source(repo, repo.config()) + {:ok, loaded?, _} = Ecto.Migrator.with_repo(migration_repo, &table_exists?.(&1, source)) + + for repo <- Enum.uniq([repo, migration_repo]) do + cond do + loaded? and opts[:skip_if_loaded] -> + :ok + + (skip_safety_warnings?() and not loaded?) or opts[:force] or confirm_load(repo, loaded?) -> + load_structure(repo, opts) + + true -> + :ok + end + end + end) + end + + defp skip_safety_warnings? do + Mix.Project.config()[:start_permanent] != true + end + + defp confirm_load(repo, false) do + Mix.shell().yes?( + "Are you sure you want to load a new structure for #{inspect(repo)}? Any existing data in this repo may be lost." + ) + end + + defp confirm_load(repo, true) do + Mix.shell().yes?(""" + It looks like a structure was already loaded for #{inspect(repo)}. Any attempt to load it again might fail. + Are you sure you want to proceed? + """) + end + + defp load_structure(repo, opts) do + config = Keyword.merge(repo.config(), opts) + + case repo.__adapter__().structure_load(source_repo_priv(repo), config) do + {:ok, location} -> + unless opts[:quiet] do + Mix.shell().info "The structure for #{inspect repo} has been loaded from #{location}" + end + {:error, term} when is_binary(term) -> + Mix.raise "The structure for #{inspect repo} couldn't be loaded: #{term}" + {:error, term} -> + Mix.raise "The structure for #{inspect repo} couldn't be loaded: #{inspect term}" + end + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex b/deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex new file mode 100644 index 0000000..27352e1 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.migrate.ex @@ -0,0 +1,149 @@ +defmodule Mix.Tasks.Ecto.Migrate do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Runs the repository migrations" + + @aliases [ + n: :step, + r: :repo + ] + + @switches [ + all: :boolean, + step: :integer, + to: :integer, + to_exclusive: :integer, + quiet: :boolean, + prefix: :string, + pool_size: :integer, + log_sql: :boolean, + log_migrations_sql: :boolean, + log_migrator_sql: :boolean, + strict_version_order: :boolean, + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep + ] + + @moduledoc """ + Runs the pending migrations for the given repository. + + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application, where "YOUR_REPO" is the last segment + in your repository name. For example, the repository `MyApp.Repo` + will use "priv/repo/migrations". The repository `Whatever.MyRepo` + will use "priv/my_repo/migrations". + + You can configure a repository to use another directory by specifying + the `:priv` key under the repository configuration. The "migrations" + part will be automatically appended to it. For instance, to use + "priv/custom_repo/migrations": + + config :my_app, MyApp.Repo, priv: "priv/custom_repo" + + This task runs all pending migrations by default. To migrate up to a + specific version number, supply `--to version_number`. To migrate a + specific number of times, use `--step n`. + + The repositories to migrate are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + Since Ecto tasks can only be executed once, if you need to migrate + multiple repositories, set `:ecto_repos` accordingly or pass the `-r` + flag multiple times. + + If a repository has not yet been started, one will be started outside + your application supervision tree and shutdown afterwards. + + ## Examples + + $ mix ecto.migrate + $ mix ecto.migrate -r Custom.Repo + + $ mix ecto.migrate -n 3 + $ mix ecto.migrate --step 3 + + $ mix ecto.migrate --to 20080906120000 + + ## Command line options + + * `--all` - run all pending migrations + + * `--log-migrations-sql` - log SQL generated by migration commands + + * `--log-migrator-sql` - log SQL generated by the migrator, such as + transactions, table locks, etc + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which + case the migrations are loaded from all the given directories and sorted + as if they were in the same one + + * `--no-compile` - does not compile applications before migrating + + * `--no-deps-check` - does not check dependencies before migrating + + * `--pool-size` - the pool size if the repository is started + only for the task (defaults to 2) + + * `--prefix` - the prefix to run migrations on + + * `--quiet` - do not log migration commands + + * `-r`, `--repo` - the repo to migrate + + * `--step`, `-n` - run n number of pending migrations + + * `--strict-version-order` - abort when applying a migration with old + timestamp (otherwise it emits a warning) + + * `--to` - run all migrations up to and including version + + * `--to-exclusive` - run all migrations up to and excluding version + + """ + + @impl true + def run(args, migrator \\ &Ecto.Migrator.run/4) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases + + opts = + if opts[:to] || opts[:to_exclusive] || opts[:step] || opts[:all], + do: opts, + else: Keyword.put(opts, :all, true) + + opts = + if opts[:quiet], + do: Keyword.merge(opts, [log: false, log_migrations_sql: false, log_migrator_sql: false]), + else: opts + + # Start ecto_sql explicitly before as we don't need + # to restart those apps if migrated. + {:ok, _} = Application.ensure_all_started(:ecto_sql) + + for repo <- repos do + ensure_repo(repo, args) + paths = ensure_migrations_paths(repo, opts) + pool = repo.config[:pool] + + fun = + if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do + &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :up, opts) end) + else + &migrator.(&1, paths, :up, opts) + end + + case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do + {:ok, _migrated, _apps} -> :ok + {:error, error} -> Mix.raise "Could not start repo #{inspect repo}, error: #{inspect error}" + end + end + + :ok + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.migrations.ex b/deps/ecto_sql/lib/mix/tasks/ecto.migrations.ex new file mode 100644 index 0000000..5065ae7 --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.migrations.ex @@ -0,0 +1,97 @@ +defmodule Mix.Tasks.Ecto.Migrations do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Displays the repository migration status" + + @aliases [ + r: :repo + ] + + @switches [ + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep, + prefix: :string + ] + + @moduledoc """ + Displays the up / down migration status for the given repository. + + The repository must be set under `:ecto_repos` in the + current app configuration or given via the `-r` option. + + By default, migrations are expected at "priv/YOUR_REPO/migrations" + directory of the current application but it can be configured + by specifying the `:priv` key under the repository configuration. + + If the repository has not been started yet, one will be + started outside our application supervision tree and shutdown + afterwards. + + ## Examples + + $ mix ecto.migrations + $ mix ecto.migrations -r Custom.Repo + + ## Command line options + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which + case the migrations are loaded from all the given directories and sorted as + if they were in the same one. + + Note, if you have previously run migrations from paths `a/` and `b/`, and now + run `mix ecto.migrations --migrations-path a/` (omitting path `b/`), the + migrations from the path `b/` will be shown in the output as `** FILE NOT FOUND **`. + + * `--no-compile` - does not compile applications before running + + * `--no-deps-check` - does not check dependencies before running + + * `--prefix` - the prefix to check migrations on + + * `-r`, `--repo` - the repo to obtain the status for + + """ + + @impl true + def run(args, migrations \\ &Ecto.Migrator.migrations/3, puts \\ &IO.puts/1) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse! args, strict: @switches, aliases: @aliases + + for repo <- repos do + ensure_repo(repo, args) + paths = ensure_migrations_paths(repo, opts) + + case Ecto.Migrator.with_repo(repo, &migrations.(&1, paths, opts), [mode: :temporary]) do + {:ok, repo_status, _} -> + puts.( + """ + + Repo: #{inspect(repo)} + + Status Migration ID Migration Name + -------------------------------------------------- + """ <> + Enum.map_join(repo_status, "\n", fn {status, number, description} -> + " #{format(status, 10)}#{format(number, 16)}#{description}" + end) <> "\n" + ) + + {:error, error} -> + Mix.raise "Could not start repo #{inspect repo}, error: #{inspect error}" + end + end + + :ok + end + + defp format(content, pad) do + content + |> to_string + |> String.pad_trailing(pad) + end +end diff --git a/deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex b/deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex new file mode 100644 index 0000000..c804c0e --- /dev/null +++ b/deps/ecto_sql/lib/mix/tasks/ecto.rollback.ex @@ -0,0 +1,145 @@ +defmodule Mix.Tasks.Ecto.Rollback do + use Mix.Task + import Mix.Ecto + import Mix.EctoSQL + + @shortdoc "Rolls back the repository migrations" + + @aliases [ + r: :repo, + n: :step + ] + + @switches [ + all: :boolean, + step: :integer, + to: :integer, + to_exclusive: :integer, + quiet: :boolean, + prefix: :string, + pool_size: :integer, + log_sql: :boolean, + log_migrations_sql: :boolean, + log_migrator_sql: :boolean, + repo: [:keep, :string], + no_compile: :boolean, + no_deps_check: :boolean, + migrations_path: :keep + ] + + @moduledoc """ + Reverts applied migrations in the given repository. + + Migrations are expected at "priv/YOUR_REPO/migrations" directory + of the current application, where "YOUR_REPO" is the last segment + in your repository name. For example, the repository `MyApp.Repo` + will use "priv/repo/migrations". The repository `Whatever.MyRepo` + will use "priv/my_repo/migrations". + + You can configure a repository to use another directory by specifying + the `:priv` key under the repository configuration. The "migrations" + part will be automatically appended to it. For instance, to use + "priv/custom_repo/migrations": + + config :my_app, MyApp.Repo, priv: "priv/custom_repo" + + This task rolls back the last applied migration by default. To roll + back to a version number, supply `--to version_number`. To roll + back a specific number of times, use `--step n`. To undo all applied + migrations, provide `--all`. + + The repositories to rollback are the ones specified under the + `:ecto_repos` option in the current app configuration. However, + if the `-r` option is given, it replaces the `:ecto_repos` config. + + If a repository has not yet been started, one will be started outside + your application supervision tree and shutdown afterwards. + + ## Examples + + $ mix ecto.rollback + $ mix ecto.rollback -r Custom.Repo + + $ mix ecto.rollback -n 3 + $ mix ecto.rollback --step 3 + + $ mix ecto.rollback --to 20080906120000 + + ## Command line options + + * `--all` - run all pending migrations + + * `--log-migrations-sql` - log SQL generated by migration commands + + * `--log-migrator-sql` - log SQL generated by the migrator, such as + transactions, table locks, etc + + * `--migrations-path` - the path to load the migrations from, defaults to + `"priv/repo/migrations"`. This option may be given multiple times in which + case the migrations are loaded from all the given directories and sorted + as if they were in the same one + + * `--no-compile` - does not compile applications before migrating + + * `--no-deps-check` - does not check dependencies before migrating + + * `--pool-size` - the pool size if the repository is started + only for the task (defaults to 2) + + * `--prefix` - the prefix to run migrations on + + * `--quiet` - do not log migration commands + + * `-r`, `--repo` - the repo to migrate + + * `--step`, `-n` - revert n migrations + + * `--strict-version-order` - abort when applying a migration with old + timestamp (otherwise it emits a warning) + + * `--to` - revert all migrations down to and including version + + * `--to-exclusive` - revert all migrations down to and excluding version + + """ + + @impl true + def run(args, migrator \\ &Ecto.Migrator.run/4) do + repos = parse_repo(args) + {opts, _} = OptionParser.parse!(args, strict: @switches, aliases: @aliases) + + opts = + if opts[:to] || opts[:to_exclusive] || opts[:step] || opts[:all], + do: opts, + else: Keyword.put(opts, :step, 1) + + opts = + if opts[:quiet], + do: Keyword.merge(opts, [log: false, log_migrations_sql: false, log_migrator_sql: false]), + else: opts + + # Start ecto_sql explicitly before as we don't need + # to restart those apps if migrated. + {:ok, _} = Application.ensure_all_started(:ecto_sql) + + for repo <- repos do + ensure_repo(repo, args) + paths = ensure_migrations_paths(repo, opts) + pool = repo.config[:pool] + + fun = + if Code.ensure_loaded?(pool) and function_exported?(pool, :unboxed_run, 2) do + &pool.unboxed_run(&1, fn -> migrator.(&1, paths, :down, opts) end) + else + &migrator.(&1, paths, :down, opts) + end + + case Ecto.Migrator.with_repo(repo, fun, [mode: :temporary] ++ opts) do + {:ok, _migrated, _apps} -> :ok + {:error, error} -> Mix.raise "Could not start repo #{inspect repo}, error: #{inspect error}" + end + end + + :ok + end +end diff --git a/deps/ecto_sql/mix.exs b/deps/ecto_sql/mix.exs new file mode 100644 index 0000000..c3c76ef --- /dev/null +++ b/deps/ecto_sql/mix.exs @@ -0,0 +1,204 @@ +defmodule EctoSQL.MixProject do + use Mix.Project + + @source_url "https://github.com/elixir-ecto/ecto_sql" + @version "3.8.3" + @adapters ~w(pg myxql tds) + + def project do + [ + app: :ecto_sql, + version: @version, + elixir: "~> 1.10", + deps: deps(), + test_paths: test_paths(System.get_env("ECTO_ADAPTER")), + xref: [ + exclude: [ + MyXQL, + Ecto.Adapters.MyXQL.Connection, + Postgrex, + Ecto.Adapters.Postgres.Connection, + Tds, + Tds.Ecto.UUID, + Ecto.Adapters.Tds.Connection + ] + ], + + # Custom testing + aliases: [ + "test.all": ["test", "test.adapters", "test.as_a_dep"], + "test.adapters": &test_adapters/1, + "test.as_a_dep": &test_as_a_dep/1 + ], + preferred_cli_env: ["test.all": :test, "test.adapters": :test], + + # Hex + description: "SQL-based adapters for Ecto and database migrations", + package: package(), + + # Docs + name: "Ecto SQL", + docs: docs() + ] + end + + def application do + [ + extra_applications: [:logger, :eex], + env: [postgres_map_type: "jsonb"], + mod: {Ecto.Adapters.SQL.Application, []} + ] + end + + defp deps do + [ + ecto_dep(), + {:telemetry, "~> 0.4.0 or ~> 1.0"}, + + # Drivers + {:db_connection, "~> 2.5 or ~> 2.4.1"}, + postgrex_dep(), + myxql_dep(), + tds_dep(), + + # Bring something in for JSON during tests + {:jason, ">= 0.0.0", only: [:test, :docs]}, + + # Docs + {:ex_doc, "~> 0.21", only: :docs}, + + # Benchmarks + {:benchee, "~> 0.11.0", only: :bench}, + {:benchee_json, "~> 0.4.0", only: :bench} + ] + end + + defp ecto_dep do + if path = System.get_env("ECTO_PATH") do + {:ecto, path: path} + else + {:ecto, "~> 3.8.4"} + end + end + + defp postgrex_dep do + if path = System.get_env("POSTGREX_PATH") do + {:postgrex, path: path} + else + {:postgrex, "~> 0.15.0 or ~> 0.16.0 or ~> 1.0", optional: true} + end + end + + defp myxql_dep do + if path = System.get_env("MYXQL_PATH") do + {:myxql, path: path} + else + {:myxql, "~> 0.6.0", optional: true} + end + end + + defp tds_dep do + if path = System.get_env("TDS_PATH") do + {:tds, path: path} + else + {:tds, "~> 2.1.1 or ~> 2.2", optional: true} + end + end + + defp test_paths(adapter) when adapter in @adapters, do: ["integration_test/#{adapter}"] + defp test_paths(nil), do: ["test"] + defp test_paths(other), do: raise("unknown adapter #{inspect(other)}") + + defp package do + [ + maintainers: ["Eric Meadows-Jรถnsson", "Josรฉ Valim", "James Fish", "Michaล‚ Muskaล‚a"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url}, + files: + ~w(.formatter.exs mix.exs README.md CHANGELOG.md lib) ++ + ~w(integration_test/sql integration_test/support) + ] + end + + defp test_as_a_dep(args) do + IO.puts("==> Compiling ecto_sql from a dependency") + File.rm_rf!("tmp/as_a_dep") + File.mkdir_p!("tmp/as_a_dep") + + File.cd!("tmp/as_a_dep", fn -> + File.write!("mix.exs", """ + defmodule DepsOnEctoSQL.MixProject do + use Mix.Project + + def project do + [ + app: :deps_on_ecto_sql, + version: "0.0.1", + deps: [{:ecto_sql, path: "../.."}] + ] + end + end + """) + + mix_cmd_with_status_check(["do", "deps.get,", "compile", "--force" | args]) + end) + end + + defp test_adapters(args) do + for adapter <- @adapters, do: env_run(adapter, args) + end + + defp env_run(adapter, args) do + IO.puts("==> Running tests for ECTO_ADAPTER=#{adapter} mix test") + + mix_cmd_with_status_check( + ["test", ansi_option() | args], + env: [{"ECTO_ADAPTER", adapter}] + ) + end + + defp ansi_option do + if IO.ANSI.enabled?(), do: "--color", else: "--no-color" + end + + defp mix_cmd_with_status_check(args, opts \\ []) do + {_, res} = System.cmd("mix", args, [into: IO.binstream(:stdio, :line)] ++ opts) + + if res > 0 do + System.at_exit(fn _ -> exit({:shutdown, 1}) end) + end + end + + defp docs do + [ + main: "Ecto.Adapters.SQL", + source_ref: "v#{@version}", + canonical: "http://hexdocs.pm/ecto_sql", + source_url: @source_url, + extras: ["CHANGELOG.md"], + skip_undefined_reference_warnings_on: ["CHANGELOG.md"], + groups_for_modules: [ + # Ecto.Adapters.SQL, + # Ecto.Adapters.SQL.Sandbox, + # Ecto.Migration, + # Ecto.Migrator, + + "Built-in adapters": [ + Ecto.Adapters.MyXQL, + Ecto.Adapters.Tds, + Ecto.Adapters.Postgres + ], + "Adapter specification": [ + Ecto.Adapter.Migration, + Ecto.Adapter.Structure, + Ecto.Adapters.SQL.Connection, + Ecto.Migration.Command, + Ecto.Migration.Constraint, + Ecto.Migration.Index, + Ecto.Migration.Reference, + Ecto.Migration.Table + ] + ] + ] + end +end diff --git a/deps/esbuild/.fetch b/deps/esbuild/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/esbuild/.formatter.exs b/deps/esbuild/.formatter.exs new file mode 100644 index 0000000..d2cda26 --- /dev/null +++ b/deps/esbuild/.formatter.exs @@ -0,0 +1,4 @@ +# Used by "mix format" +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/esbuild/.hex b/deps/esbuild/.hex new file mode 100644 index 0000000000000000000000000000000000000000..52542272f91de843aaeba8448e92cbe018527cf0 GIT binary patch literal 271 zcmZ9{!BWF83>Gc=$d*I`wbMx&rgwfEmm}9b?A!ercjKPTev}*S z$Xtyv&um_&)?|4vqm-|8tF@~n^w3Yd6^5ky>oB}0y)r2od5s; literal 0 HcmV?d00001 diff --git a/deps/esbuild/CHANGELOG.md b/deps/esbuild/CHANGELOG.md new file mode 100644 index 0000000..502ba9c --- /dev/null +++ b/deps/esbuild/CHANGELOG.md @@ -0,0 +1,73 @@ +# CHANGELOG + +## v0.5.0 (2022-05-27) + + * Raise exception if no args are found to use with esbuild + * Update esbuild to 0.14.41 + * Support overridable cacertfile + * Add support for armv7 + * Attempt multiple directories to install esbuild + +## v0.4.0 (2021-11-27) + + * Attach system target architecture to saved esbuild executable + * Store download on user cache directory + * Update esbuild to 0.14.0 + * Add support for 32bit linux + +## v0.3.4 (2021-10-30) + + * Support armv7l + * Update esbuild to 0.13.10 + +## v0.3.3 (2021-10-11) + + * Fallback if Mix.Project is not available + * Update esbuild to 0.13.4 + +## v0.3.2 (2021-10-06) + + * Do not load runtime config by default on `esbuild.install` task + * Update latest known `esbuild` version + * Allow `config :esbuild, :path, path` to configure the path to the esbuild install + * Support `HTTP_PROXY/HTTPS_PROXY` to fetch esbuild + +## v0.3.1 (2021-09-08) + + * Fix target detection on arm32 + +## v0.3.0 (2021-09-05) + + * No longer load `config/runtime.exs` by default, instead support `--runtime-config` flag + * Update initial `esbuild` version to `0.12.18` + +## v0.2.2 (2021-08-28) + + * `mix esbuild.install --if-missing` also checks version + +## v0.2.1 (2021-08-09) + + * Require Elixir v1.10 + * Make sure `bin_path` directory exists before writing to it + * Fix target detection for FreeBSD + +## v0.2.0 (2021-07-29) + + * Bump to esbuild 0.12.17 + +## v0.1.3 (2021-07-21) + + * Fix Windows support + +## v0.1.2 (2021-07-18) + + * Improve docs and error messages + * Reenable esbuild task + +## v0.1.1 (2021-07-18) + + * Fix target detection on ARM Macs and OTP < 24 + +## v0.1.0 (2021-07-18) + + * First release diff --git a/deps/esbuild/LICENSE.md b/deps/esbuild/LICENSE.md new file mode 100644 index 0000000..6568d08 --- /dev/null +++ b/deps/esbuild/LICENSE.md @@ -0,0 +1,22 @@ +# MIT License + +Copyright (c) 2021 Wojtek Mach, Josรฉ Valim. + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/esbuild/README.md b/deps/esbuild/README.md new file mode 100644 index 0000000..e44dd49 --- /dev/null +++ b/deps/esbuild/README.md @@ -0,0 +1,163 @@ +# Esbuild + +[![CI](https://github.com/phoenixframework/esbuild/actions/workflows/main.yml/badge.svg)](https://github.com/phoenixframework/esbuild/actions/workflows/main.yml) + +Mix tasks for installing and invoking [esbuild](https://github.com/evanw/esbuild/). + +## Installation + +If you are going to build assets in production, then you add +`esbuild` as dependency on all environments but only start it +in dev: + +```elixir +def deps do + [ + {:esbuild, "~> 0.5", runtime: Mix.env() == :dev} + ] +end +``` + +However, if your assets are precompiled during development, +then it only needs to be a dev dependency: + +```elixir +def deps do + [ + {:esbuild, "~> 0.5", only: :dev} + ] +end +``` + +Once installed, change your `config/config.exs` to pick your +esbuild version of choice: + +```elixir +config :esbuild, version: "0.14.41" +``` + +Now you can install esbuild by running: + +```bash +$ mix esbuild.install +``` + +And invoke esbuild with: + +```bash +$ mix esbuild default assets/js/app.js --bundle --minify --target=es2016 --outdir=priv/static/assets/ +``` + +The executable is kept at `_build/esbuild-TARGET`. +Where `TARGET` is your system target architecture. + +## Profiles + +The first argument to `esbuild` is the execution profile. +You can define multiple execution profiles with the current +directory, the OS environment, and default arguments to the +`esbuild` task: + +```elixir +config :esbuild, + version: "0.14.41", + default: [ + args: ~w(js/app.js), + cd: Path.expand("../assets", __DIR__) + ] +``` + +When `mix esbuild default` is invoked, the task arguments will be appended +to the ones configured above. Note profiles must be configured in your +`config/config.exs`, as `esbuild` runs without starting your application +(and therefore it won't pick settings in `config/runtime.exs`). + +## Adding to Phoenix + +To add `esbuild` to an application using Phoenix, you need only four steps. Installation requires that Phoenix watchers can accept module-function-args tuples which is not built into Phoenix 1.5.9. + +First add it as a dependency in your `mix.exs`: + +```elixir +def deps do + [ + {:phoenix, github: "phoenixframework/phoenix", branch: "v1.5", override: true}, + {:esbuild, "~> 0.5", runtime: Mix.env() == :dev} + ] +end +``` + +Now let's change `config/config.exs` to configure `esbuild` to use +`assets/js/app.js` as an entry point and write to `priv/static/assets`: + +```elixir +config :esbuild, + version: "0.14.41", + default: [ + args: ~w(js/app.js --bundle --target=es2016 --outdir=../priv/static/assets), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] +``` + +> Make sure the "assets" directory from priv/static is listed in the +> :only option for Plug.Static in your lib/my_app_web/endpoint.ex + +For development, we want to enable watch mode. So find the `watchers` +configuration in your `config/dev.exs` and add: + +```elixir + esbuild: {Esbuild, :install_and_run, [:default, ~w(--sourcemap=inline --watch)]} +``` + +Note we are inlining source maps and enabling the file system watcher. + +Finally, back in your `mix.exs`, make sure you have a `assets.deploy` +alias for deployments, which will also use the `--minify` option: + +```elixir +"assets.deploy": ["esbuild default --minify", "phx.digest"] +``` + +## Third-party JS packages + +If you have JavaScript dependencies, you have two options +to add them to your application: + + 1. Vendor those dependencies inside your project and + import them in your "assets/js/app.js" using a relative + path: + + import topbar from "../vendor/topbar" + + 2. Call `npm install topbar --save` inside your assets + directory and `esbuild` will be able to automatically + pick them up: + + import topbar from "topbar" + +## CSS + +`esbuild` has basic support for CSS. If you import a css file at the +top of your main `.js` file, `esbuild` will also bundle it, and write +it to the same directory as your `app.js`: + +```js +import "../css/app.css" +``` + +However, if you want to use a CSS framework, you will need to use a separate tool. +Here are some options to do so: + + * Use [standalone Tailwind](https://github.com/phoenixframework/tailwind) or + [standalone SASS](https://github.com/CargoSense/dart_sass). Both similar to + `esbuild`. + + * You can use `esbuild` plugins (requires `npm`). See [Phoenix' official + guide on using them](https://hexdocs.pm/phoenix/asset_management.html). + +## License + +Copyright (c) 2021 Wojtek Mach, Josรฉ Valim. + +esbuild source code is licensed under the [MIT License](LICENSE.md). diff --git a/deps/esbuild/hex_metadata.config b/deps/esbuild/hex_metadata.config new file mode 100644 index 0000000..6b8f6cc --- /dev/null +++ b/deps/esbuild/hex_metadata.config @@ -0,0 +1,21 @@ +{<<"app">>,<<"esbuild">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"Mix tasks for installing and invoking esbuild">>}. +{<<"elixir">>,<<"~> 1.10">>}. +{<<"files">>, + [<<"lib">>,<<"lib/esbuild.ex">>,<<"lib/mix">>,<<"lib/mix/tasks">>, + <<"lib/mix/tasks/esbuild.ex">>,<<"lib/mix/tasks/esbuild.install.ex">>, + <<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>,<<"LICENSE.md">>, + <<"CHANGELOG.md">>]}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"links">>, + [{<<"GitHub">>,<<"https://github.com/phoenixframework/esbuild">>}, + {<<"esbuild">>,<<"https://esbuild.github.io">>}]}. +{<<"name">>,<<"esbuild">>}. +{<<"requirements">>, + [[{<<"app">>,<<"castore">>}, + {<<"name">>,<<"castore">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<">= 0.0.0">>}]]}. +{<<"version">>,<<"0.5.0">>}. diff --git a/deps/esbuild/lib/esbuild.ex b/deps/esbuild/lib/esbuild.ex new file mode 100644 index 0000000..021e85b --- /dev/null +++ b/deps/esbuild/lib/esbuild.ex @@ -0,0 +1,309 @@ +defmodule Esbuild do + # https://registry.npmjs.org/esbuild/latest + @latest_version "0.14.41" + + @moduledoc """ + Esbuild is an installer and runner for [esbuild](https://esbuild.github.io). + + ## Profiles + + You can define multiple esbuild profiles. By default, there is a + profile called `:default` which you can configure its args, current + directory and environment: + + config :esbuild, + version: "#{@latest_version}", + default: [ + args: ~w(js/app.js --bundle --target=es2016 --outdir=../priv/static/assets), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] + + ## Esbuild configuration + + There are two global configurations for the esbuild application: + + * `:version` - the expected esbuild version + + * `:path` - the path to find the esbuild executable at. By + default, it is automatically downloaded and placed inside + the `_build` directory of your current app + + Overriding the `:path` is not recommended, as we will automatically + download and manage `esbuild` for you. But in case you can't download + it (for example, the npm registry is behind a proxy), you may want to + set the `:path` to a configurable system location. + + For instance, you can install `esbuild` globally with `npm`: + + $ npm install -g esbuild + + On Unix, the executable will be at: + + NPM_ROOT/esbuild/node_modules/esbuild-TARGET/bin/esbuild + + On Windows, it will be at: + + NPM_ROOT/esbuild/node_modules/esbuild-windows-(32|64)/esbuild.exe + + Where `NPM_ROOT` is the result of `npm root -g` and `TARGET` is your system + target architecture. + + Once you find the location of the executable, you can store it in a + `MIX_ESBUILD_PATH` environment variable, which you can then read in + your configuration file: + + config :esbuild, path: System.get_env("MIX_ESBUILD_PATH") + + """ + + use Application + require Logger + + @doc false + def start(_, _) do + unless Application.get_env(:esbuild, :version) do + Logger.warn(""" + esbuild version is not configured. Please set it in your config files: + + config :esbuild, :version, "#{latest_version()}" + """) + end + + configured_version = configured_version() + + case bin_version() do + {:ok, ^configured_version} -> + :ok + + {:ok, version} -> + Logger.warn(""" + Outdated esbuild version. Expected #{configured_version}, got #{version}. \ + Please run `mix esbuild.install` or update the version in your config files.\ + """) + + :error -> + :ok + end + + Supervisor.start_link([], strategy: :one_for_one) + end + + @doc false + # Latest known version at the time of publishing. + def latest_version, do: @latest_version + + @doc """ + Returns the configured esbuild version. + """ + def configured_version do + Application.get_env(:esbuild, :version, latest_version()) + end + + @doc """ + Returns the configuration for the given profile. + + Returns nil if the profile does not exist. + """ + def config_for!(profile) when is_atom(profile) do + Application.get_env(:esbuild, profile) || + raise ArgumentError, """ + unknown esbuild profile. Make sure the profile is defined in your config/config.exs file, such as: + + config :esbuild, + #{profile}: [ + args: ~w(js/app.js --bundle --target=es2016 --outdir=../priv/static/assets), + cd: Path.expand("../assets", __DIR__), + env: %{"NODE_PATH" => Path.expand("../deps", __DIR__)} + ] + """ + end + + @doc """ + Returns the path to the executable. + + The executable may not be available if it was not yet installed. + """ + def bin_path do + name = "esbuild-#{target()}" + + Application.get_env(:esbuild, :path) || + if Code.ensure_loaded?(Mix.Project) do + Path.join(Path.dirname(Mix.Project.build_path()), name) + else + Path.expand("_build/#{name}") + end + end + + @doc """ + Returns the version of the esbuild executable. + + Returns `{:ok, version_string}` on success or `:error` when the executable + is not available. + """ + def bin_version do + path = bin_path() + + with true <- File.exists?(path), + {result, 0} <- System.cmd(path, ["--version"]) do + {:ok, String.trim(result)} + else + _ -> :error + end + end + + @doc """ + Runs the given command with `args`. + + The given args will be appended to the configured args. + The task output will be streamed directly to stdio. It + returns the status of the underlying call. + """ + def run(profile, extra_args) when is_atom(profile) and is_list(extra_args) do + config = config_for!(profile) + args = config[:args] || [] + + if args == [] and extra_args == [] do + raise "no arguments passed to esbuild" + end + + opts = [ + cd: config[:cd] || File.cwd!(), + env: config[:env] || %{}, + into: IO.stream(:stdio, :line), + stderr_to_stdout: true + ] + + bin_path() + |> System.cmd(args ++ extra_args, opts) + |> elem(1) + end + + @doc """ + Installs, if not available, and then runs `esbuild`. + + Returns the same as `run/2`. + """ + def install_and_run(profile, args) do + unless File.exists?(bin_path()) do + install() + end + + run(profile, args) + end + + @doc """ + Installs esbuild with `configured_version/0`. + """ + def install do + version = configured_version() + tmp_opts = if System.get_env("MIX_XDG"), do: %{os: :linux}, else: %{} + + tmp_dir = + freshdir_p(:filename.basedir(:user_cache, "phx-esbuild", tmp_opts)) || + freshdir_p(Path.join(System.tmp_dir!(), "phx-esbuild")) || + raise "could not install esbuild. Set MIX_XGD=1 and then set XDG_CACHE_HOME to the path you want to use as cache" + + name = "esbuild-#{target()}" + url = "https://registry.npmjs.org/#{name}/-/#{name}-#{version}.tgz" + tar = fetch_body!(url) + + case :erl_tar.extract({:binary, tar}, [:compressed, cwd: to_charlist(tmp_dir)]) do + :ok -> :ok + other -> raise "couldn't unpack archive: #{inspect(other)}" + end + + bin_path = bin_path() + File.mkdir_p!(Path.dirname(bin_path)) + + case :os.type() do + {:win32, _} -> + File.cp!(Path.join([tmp_dir, "package", "esbuild.exe"]), bin_path) + + _ -> + File.cp!(Path.join([tmp_dir, "package", "bin", "esbuild"]), bin_path) + end + end + + defp freshdir_p(path) do + with {:ok, _} <- File.rm_rf(path), + :ok <- File.mkdir_p(path) do + path + else + _ -> nil + end + end + + # Available targets: https://github.com/evanw/esbuild/tree/master/npm + defp target do + case :os.type() do + {:win32, _} -> + "windows-#{:erlang.system_info(:wordsize) * 8}" + + {:unix, osname} -> + arch_str = :erlang.system_info(:system_architecture) + [arch | _] = arch_str |> List.to_string() |> String.split("-") + + case arch do + "amd64" -> "#{osname}-64" + "x86_64" -> "#{osname}-64" + "i686" -> "#{osname}-32" + "i386" -> "#{osname}-32" + "aarch64" -> "#{osname}-arm64" + # TODO: remove when we require OTP 24 + "arm" when osname == :darwin -> "darwin-arm64" + "arm" -> "#{osname}-arm" + "armv7" <> _ -> "#{osname}-arm" + _ -> raise "esbuild is not available for architecture: #{arch_str}" + end + end + end + + defp fetch_body!(url) do + url = String.to_charlist(url) + Logger.debug("Downloading esbuild from #{url}") + + {:ok, _} = Application.ensure_all_started(:inets) + {:ok, _} = Application.ensure_all_started(:ssl) + + if proxy = System.get_env("HTTP_PROXY") || System.get_env("http_proxy") do + Logger.debug("Using HTTP_PROXY: #{proxy}") + %{host: host, port: port} = URI.parse(proxy) + :httpc.set_options([{:proxy, {{String.to_charlist(host), port}, []}}]) + end + + if proxy = System.get_env("HTTPS_PROXY") || System.get_env("https_proxy") do + Logger.debug("Using HTTPS_PROXY: #{proxy}") + %{host: host, port: port} = URI.parse(proxy) + :httpc.set_options([{:https_proxy, {{String.to_charlist(host), port}, []}}]) + end + + # https://erlef.github.io/security-wg/secure_coding_and_deployment_hardening/inets + cacertfile = cacertfile() |> String.to_charlist() + + http_options = [ + ssl: [ + verify: :verify_peer, + cacertfile: cacertfile, + depth: 2, + customize_hostname_check: [ + match_fun: :public_key.pkix_verify_hostname_match_fun(:https) + ] + ] + ] + + options = [body_format: :binary] + + case :httpc.request(:get, {url, []}, http_options, options) do + {:ok, {{_, 200, _}, _headers, body}} -> + body + + other -> + raise "couldn't fetch #{url}: #{inspect(other)}" + end + end + + defp cacertfile() do + Application.get_env(:esbuild, :cacerts_path) || CAStore.file_path() + end +end diff --git a/deps/esbuild/lib/mix/tasks/esbuild.ex b/deps/esbuild/lib/mix/tasks/esbuild.ex new file mode 100644 index 0000000..791501e --- /dev/null +++ b/deps/esbuild/lib/mix/tasks/esbuild.ex @@ -0,0 +1,58 @@ +defmodule Mix.Tasks.Esbuild do + @moduledoc """ + Invokes esbuild with the given args. + + Usage: + + $ mix esbuild TASK_OPTIONS PROFILE ESBUILD_ARGS + + Example: + + $ mix esbuild default assets/js/app.js --bundle --minify --target=es2016 --outdir=priv/static/assets + + If esbuild is not installed, it is automatically downloaded. + Note the arguments given to this task will be appended + to any configured arguments. + + ## Options + + * `--runtime-config` - load the runtime configuration + before executing command + + Note flags to control this Mix task must be given before the + profile: + + $ mix esbuild --runtime-config default assets/js/app.js + + """ + + @shortdoc "Invokes esbuild with the profile and args" + + use Mix.Task + + @impl true + def run(args) do + switches = [runtime_config: :boolean] + {opts, remaining_args} = OptionParser.parse_head!(args, switches: switches) + + if opts[:runtime_config] do + Mix.Task.run("app.config") + else + Application.ensure_all_started(:esbuild) + end + + Mix.Task.reenable("esbuild") + install_and_run(remaining_args) + end + + defp install_and_run([profile | args] = all) do + case Esbuild.install_and_run(String.to_atom(profile), args) do + 0 -> :ok + status -> Mix.raise("`mix esbuild #{Enum.join(all, " ")}` exited with #{status}") + end + end + + defp install_and_run([]) do + Mix.raise("`mix esbuild` expects the profile as argument") + end +end diff --git a/deps/esbuild/lib/mix/tasks/esbuild.install.ex b/deps/esbuild/lib/mix/tasks/esbuild.install.ex new file mode 100644 index 0000000..3efe426 --- /dev/null +++ b/deps/esbuild/lib/mix/tasks/esbuild.install.ex @@ -0,0 +1,56 @@ +defmodule Mix.Tasks.Esbuild.Install do + @moduledoc """ + Installs esbuild under `_build`. + + ```bash + $ mix esbuild.install + $ mix esbuild.install --if-missing + ``` + + By default, it installs #{Esbuild.latest_version()} but you + can configure it in your config files, such as: + + config :esbuild, :version, "#{Esbuild.latest_version()}" + + ## Options + + * `--runtime-config` - load the runtime configuration + before executing command + + * `--if-missing` - install only if the given version + does not exist + """ + + @shortdoc "Installs esbuild under _build" + use Mix.Task + + @impl true + def run(args) do + valid_options = [runtime_config: :boolean, if_missing: :boolean] + + case OptionParser.parse_head!(args, strict: valid_options) do + {opts, []} -> + if opts[:runtime_config], do: Mix.Task.run("app.config") + + if opts[:if_missing] && latest_version?() do + :ok + else + Esbuild.install() + end + + {_, _} -> + Mix.raise(""" + Invalid arguments to esbuild.install, expected one of: + + mix esbuild.install + mix esbuild.install --runtime-config + mix esbuild.install --if-missing + """) + end + end + + defp latest_version?() do + version = Esbuild.configured_version() + match?({:ok, ^version}, Esbuild.bin_version()) + end +end diff --git a/deps/esbuild/mix.exs b/deps/esbuild/mix.exs new file mode 100644 index 0000000..fe73e5f --- /dev/null +++ b/deps/esbuild/mix.exs @@ -0,0 +1,49 @@ +defmodule Esbuild.MixProject do + use Mix.Project + + @version "0.5.0" + @source_url "https://github.com/phoenixframework/esbuild" + + def project do + [ + app: :esbuild, + version: @version, + elixir: "~> 1.10", + deps: deps(), + description: "Mix tasks for installing and invoking esbuild", + package: [ + links: %{ + "GitHub" => @source_url, + "esbuild" => "https://esbuild.github.io" + }, + licenses: ["MIT"] + ], + docs: [ + main: "Esbuild", + source_url: @source_url, + source_ref: "v#{@version}", + extras: ["CHANGELOG.md"] + ], + xref: [ + exclude: [:httpc, :public_key] + ], + aliases: [test: ["esbuild.install --if-missing", "test"]] + ] + end + + def application do + [ + # inets/ssl may be used by Mix tasks but we should not impose them. + extra_applications: [:logger], + mod: {Esbuild, []}, + env: [default: []] + ] + end + + defp deps do + [ + {:castore, ">= 0.0.0"}, + {:ex_doc, ">= 0.0.0", only: :docs} + ] + end +end diff --git a/deps/file_system/.fetch b/deps/file_system/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/file_system/.hex b/deps/file_system/.hex new file mode 100644 index 0000000000000000000000000000000000000000..edb82b99eadec30dc71dc411b1d81ac4800d5df4 GIT binary patch literal 276 zcmZ9H%}&EG5QI$&1qq2qsl4m|8*fsry=xMvlPXD3IrHMUSFSVCeBb;IyJ632Kgf-C zq&B70fz{hMO|qQIAmwYlw${`4nsr329i_q$0E%|mp^X7_jS+p;$e}mUN+`xUsHp%N zU|S1#WL7VYCi;+NJ@>tb4sGM@lhxxyP40--I-cdU{;Zp{?l51s{|*))IpJKh^GGh4 zfL@qPHWD*vLq?|@j3Gu>4SS23Z1TzH8o+VsuthHO9rn~8UpnUby9 0.2", only: :test }, + ] + end + ... +end +``` + + +### Subscription API + +You can spawn a worker and subscribe to events from it: + +```elixir +{:ok, pid} = FileSystem.start_link(dirs: ["/path/to/some/files"]) +FileSystem.subscribe(pid) +``` + +or + +```elixir +{:ok, pid} = FileSystem.start_link(dirs: ["/path/to/some/files"], name: :my_monitor_name) +FileSystem.subscribe(:my_monitor_name) +``` + +The pid you subscribed from will now receive messages like + +``` +{:file_event, worker_pid, {file_path, events}} +``` +and +``` +{:file_event, worker_pid, :stop} +``` + +### Example with GenServer + +```elixir +defmodule Watcher do + use GenServer + + def start_link(args) do + GenServer.start_link(__MODULE__, args) + end + + def init(args) do + {:ok, watcher_pid} = FileSystem.start_link(args) + FileSystem.subscribe(watcher_pid) + {:ok, %{watcher_pid: watcher_pid}} + end + + def handle_info({:file_event, watcher_pid, {path, events}}, %{watcher_pid: watcher_pid}=state) do + # YOUR OWN LOGIC FOR PATH AND EVENTS + {:noreply, state} + end + + def handle_info({:file_event, watcher_pid, :stop}, %{watcher_pid: watcher_pid}=state) do + # YOUR OWN LOGIC WHEN MONITOR STOP + {:noreply, state} + end +end +``` + + +## Tweaking behaviour via extra arguments + +For each platform, you can pass extra arguments to the underlying listener process. + +Each backend support different extra arguments, check backend module documentation for more information. + +Here is an example to get instant notifications on file changes for Mac OS X: + +```elixir +FileSystem.start_link(dirs: ["/path/to/some/files"], latency: 0, watch_root: true) +``` diff --git a/deps/file_system/c_src/mac/cli.c b/deps/file_system/c_src/mac/cli.c new file mode 100644 index 0000000..84b0c86 --- /dev/null +++ b/deps/file_system/c_src/mac/cli.c @@ -0,0 +1,180 @@ +#include +#include "cli.h" + +const char* cli_info_purpose = "A flexible command-line interface for the FSEvents API"; +const char* cli_info_usage = "Usage: fsevent_watch [OPTIONS]... [PATHS]..."; +const char* cli_info_help[] = { + " -h, --help you're looking at it", + " -V, --version print version number and exit", + " -p, --show-plist display the embedded Info.plist values", + " -s, --since-when=EventID fire historical events since ID", + " -l, --latency=seconds latency period (default='0.5')", + " -n, --no-defer enable no-defer latency modifier", + " -r, --watch-root watch for when the root path has changed", + // " -i, --ignore-self ignore current process", + " -F, --file-events provide file level event data", + " -f, --format=name output format (ignored)", + 0 +}; + +static void default_args (struct cli_info* args_info) +{ + args_info->since_when_arg = kFSEventStreamEventIdSinceNow; + args_info->latency_arg = 0.5; + args_info->no_defer_flag = false; + args_info->watch_root_flag = false; + args_info->ignore_self_flag = false; + args_info->file_events_flag = false; + args_info->mark_self_flag = false; + args_info->format_arg = 0; +} + +static void cli_parser_release (struct cli_info* args_info) +{ + unsigned int i; + + for (i=0; i < args_info->inputs_num; ++i) { + free(args_info->inputs[i]); + } + + if (args_info->inputs_num) { + free(args_info->inputs); + } + + args_info->inputs_num = 0; +} + +void cli_parser_init (struct cli_info* args_info) +{ + default_args(args_info); + + args_info->inputs = 0; + args_info->inputs_num = 0; +} + +void cli_parser_free (struct cli_info* args_info) +{ + cli_parser_release(args_info); +} + +static void cli_print_info_dict (const void *key, + const void *value, + void *context) +{ + CFStringRef entry = CFStringCreateWithFormat(NULL, NULL, + CFSTR("%@:\n %@"), key, value); + if (entry) { + CFShow(entry); + CFRelease(entry); + } +} + +void cli_show_plist (void) +{ + CFBundleRef mainBundle = CFBundleGetMainBundle(); + CFRetain(mainBundle); + CFDictionaryRef mainBundleDict = CFBundleGetInfoDictionary(mainBundle); + if (mainBundleDict) { + CFRetain(mainBundleDict); + printf("Embedded Info.plist metadata:\n\n"); + CFDictionaryApplyFunction(mainBundleDict, cli_print_info_dict, NULL); + CFRelease(mainBundleDict); + } + CFRelease(mainBundle); + printf("\n"); +} + +void cli_print_version (void) +{ + printf("%s %s\n\n", "VXZ", "1.0"); +} + +void cli_print_help (void) +{ + cli_print_version(); + + printf("\n%s\n", cli_info_purpose); + printf("\n%s\n", cli_info_usage); + printf("\n"); + + int i = 0; + while (cli_info_help[i]) { + printf("%s\n", cli_info_help[i++]); + } +} + +int cli_parser (int argc, const char** argv, struct cli_info* args_info) +{ + static struct option longopts[] = { + { "help", no_argument, NULL, 'h' }, + { "version", no_argument, NULL, 'V' }, + { "show-plist", no_argument, NULL, 'p' }, + { "since-when", required_argument, NULL, 's' }, + { "latency", required_argument, NULL, 'l' }, + { "no-defer", no_argument, NULL, 'n' }, + { "watch-root", no_argument, NULL, 'r' }, + { "ignore-self", no_argument, NULL, 'i' }, + { "file-events", no_argument, NULL, 'F' }, + { "mark-self", no_argument, NULL, 'm' }, + { "format", required_argument, NULL, 'f' }, + { 0, 0, 0, 0 } + }; + + const char* shortopts = "hVps:l:nriFf:"; + + int c = -1; + + while ((c = getopt_long(argc, (char * const*)argv, shortopts, longopts, NULL)) != -1) { + switch(c) { + case 's': // since-when + args_info->since_when_arg = strtoull(optarg, NULL, 0); + break; + case 'l': // latency + args_info->latency_arg = strtod(optarg, NULL); + break; + case 'n': // no-defer + args_info->no_defer_flag = true; + break; + case 'r': // watch-root + args_info->watch_root_flag = true; + break; + case 'i': // ignore-self + args_info->ignore_self_flag = true; + break; + case 'F': // file-events + args_info->file_events_flag = true; + break; + case 'm': // mark-self + args_info->mark_self_flag = true; + break; + case 'f': // format + // XXX: ignored + break; + case 'V': // version + cli_print_version(); + exit(EXIT_SUCCESS); + case 'p': // show-plist + cli_show_plist(); + exit(EXIT_SUCCESS); + case 'h': // help + case '?': // invalid option + case ':': // missing argument + cli_print_help(); + exit((c == 'h') ? EXIT_SUCCESS : EXIT_FAILURE); + } + } + + if (optind < argc) { + int i = 0; + args_info->inputs_num = (unsigned int)(argc - optind); + args_info->inputs = + (char**)(malloc ((args_info->inputs_num)*sizeof(char*))); + while (optind < argc) + if (argv[optind++] != argv[0]) { + args_info->inputs[i++] = strdup(argv[optind-1]); + } + } + + return EXIT_SUCCESS; +} + diff --git a/deps/file_system/c_src/mac/cli.h b/deps/file_system/c_src/mac/cli.h new file mode 100644 index 0000000..f176cf0 --- /dev/null +++ b/deps/file_system/c_src/mac/cli.h @@ -0,0 +1,36 @@ +#ifndef CLI_H +#define CLI_H + +#include "common.h" + +#ifndef CLI_NAME +#define CLI_NAME "fsevent_watch" +#endif /* CLI_NAME */ + +struct cli_info { + UInt64 since_when_arg; + double latency_arg; + bool no_defer_flag; + bool watch_root_flag; + bool ignore_self_flag; + bool file_events_flag; + bool mark_self_flag; + int format_arg; + + char** inputs; + unsigned inputs_num; +}; + +extern const char* cli_info_purpose; +extern const char* cli_info_usage; +extern const char* cli_info_help[]; + +void cli_print_help(void); +void cli_print_version(void); + +int cli_parser (int argc, const char** argv, struct cli_info* args_info); +void cli_parser_init (struct cli_info* args_info); +void cli_parser_free (struct cli_info* args_info); + + +#endif /* CLI_H */ diff --git a/deps/file_system/c_src/mac/common.h b/deps/file_system/c_src/mac/common.h new file mode 100644 index 0000000..70bd648 --- /dev/null +++ b/deps/file_system/c_src/mac/common.h @@ -0,0 +1,55 @@ +#ifndef fsevent_watch_common_h +#define fsevent_watch_common_h + +#include +#include +#include +#include +#include "compat.h" + +#define _str(s) #s +#define _xstr(s) _str(s) + +#define COMPILED_AT __DATE__ " " __TIME__ + +#define FPRINTF_FLAG_CHECK(flags, flag, msg, fd) \ + do { \ + if ((flags) & (flag)) { \ + fprintf(fd, "%s\n", msg); } } \ + while (0) + +#define FLAG_CHECK_STDERR(flags, flag, msg) \ + FPRINTF_FLAG_CHECK(flags, flag, msg, stderr) + +/* + * FSEVENTSBITS: + * generated by `make printflags` (and pasted here) + * flags MUST be ordered (bits ascending) and sorted + * + * idea from: http://www.openbsd.org/cgi-bin/cvsweb/src/sbin/ifconfig/ifconfig.c (see printb()) + */ +#define FSEVENTSBITS \ +"\1mustscansubdirs\2userdropped\3kerneldropped\4eventidswrapped\5historydone\6rootchanged\7mount\10unmount\11created\12removed\13inodemetamod\14renamed\15modified\16finderinfomod\17changeowner\20xattrmod\21isfile\22isdir\23issymlink\24ownevent" + +static inline void +sprintb(char *buf, unsigned short v, char *bits) +{ + int i, any = 0; + char c; + char *bufp = buf; + + while ((i = *bits++)) { + if (v & (1 << (i-1))) { + if (any) + *bufp++ = ','; + any = 1; + for (; (c = *bits) > 32; bits++) + *bufp++ = c; + } else + for (; *bits > 32; bits++) + ; + } + *bufp = '\0'; +} + +#endif /* fsevent_watch_common_h */ diff --git a/deps/file_system/c_src/mac/compat.c b/deps/file_system/c_src/mac/compat.c new file mode 100644 index 0000000..ab84dfd --- /dev/null +++ b/deps/file_system/c_src/mac/compat.c @@ -0,0 +1,25 @@ +#include "compat.h" + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1060 +FSEventStreamCreateFlags kFSEventStreamCreateFlagIgnoreSelf = 0x00000008; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1070 +FSEventStreamCreateFlags kFSEventStreamCreateFlagFileEvents = 0x00000010; +FSEventStreamEventFlags kFSEventStreamEventFlagItemCreated = 0x00000100; +FSEventStreamEventFlags kFSEventStreamEventFlagItemRemoved = 0x00000200; +FSEventStreamEventFlags kFSEventStreamEventFlagItemInodeMetaMod = 0x00000400; +FSEventStreamEventFlags kFSEventStreamEventFlagItemRenamed = 0x00000800; +FSEventStreamEventFlags kFSEventStreamEventFlagItemModified = 0x00001000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemFinderInfoMod = 0x00002000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemChangeOwner = 0x00004000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemXattrMod = 0x00008000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemIsFile = 0x00010000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemIsDir = 0x00020000; +FSEventStreamEventFlags kFSEventStreamEventFlagItemIsSymlink = 0x00040000; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1090 +FSEventStreamCreateFlags kFSEventStreamCreateFlagMarkSelf = 0x00000020; +FSEventStreamEventFlags kFSEventStreamEventFlagOwnEvent = 0x00080000; +#endif diff --git a/deps/file_system/c_src/mac/compat.h b/deps/file_system/c_src/mac/compat.h new file mode 100644 index 0000000..d44c0c8 --- /dev/null +++ b/deps/file_system/c_src/mac/compat.h @@ -0,0 +1,47 @@ +/** + * @headerfile compat.h + * FSEventStream flag compatibility shim + * + * In order to compile a binary against an older SDK yet still support the + * features present in later OS releases, we need to define any missing enum + * constants not present in the older SDK. This allows us to safely defer + * feature detection to runtime (and avoid recompilation). + */ + + +#ifndef fsevent_watch_compat_h +#define fsevent_watch_compat_h + +#ifndef __CORESERVICES__ +#include +#endif // __CORESERVICES__ + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1060 +// ignoring events originating from the current process introduced in 10.6 +extern FSEventStreamCreateFlags kFSEventStreamCreateFlagIgnoreSelf; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1070 +// file-level events introduced in 10.7 +extern FSEventStreamCreateFlags kFSEventStreamCreateFlagFileEvents; +extern FSEventStreamEventFlags kFSEventStreamEventFlagItemCreated, + kFSEventStreamEventFlagItemRemoved, + kFSEventStreamEventFlagItemInodeMetaMod, + kFSEventStreamEventFlagItemRenamed, + kFSEventStreamEventFlagItemModified, + kFSEventStreamEventFlagItemFinderInfoMod, + kFSEventStreamEventFlagItemChangeOwner, + kFSEventStreamEventFlagItemXattrMod, + kFSEventStreamEventFlagItemIsFile, + kFSEventStreamEventFlagItemIsDir, + kFSEventStreamEventFlagItemIsSymlink; +#endif + +#if MAC_OS_X_VERSION_MAX_ALLOWED < 1090 +// marking, rather than ignoring, events originating from the current process introduced in 10.9 +extern FSEventStreamCreateFlags kFSEventStreamCreateFlagMarkSelf; +extern FSEventStreamEventFlags kFSEventStreamEventFlagOwnEvent; +#endif + + +#endif // fsevent_watch_compat_h diff --git a/deps/file_system/c_src/mac/main.c b/deps/file_system/c_src/mac/main.c new file mode 100644 index 0000000..392529b --- /dev/null +++ b/deps/file_system/c_src/mac/main.c @@ -0,0 +1,234 @@ +#include "common.h" +#include "cli.h" + +// TODO: set on fire. cli.{h,c} handle both parsing and defaults, so there's +// no need to set those here. also, in order to scope metadata by path, +// each stream will need its own configuration... so this won't work as +// a global any more. In the end the goal is to make the output format +// able to declare not just that something happened and what flags were +// attached, but what path it was watching that caused those events (so +// that the path itself can be used for routing that information to the +// relevant callback). +// +// Structure for storing metadata parsed from the commandline +static struct { + FSEventStreamEventId sinceWhen; + CFTimeInterval latency; + FSEventStreamCreateFlags flags; + CFMutableArrayRef paths; + int format; +} config = { + (UInt64) kFSEventStreamEventIdSinceNow, + (double) 0.3, + (CFOptionFlags) kFSEventStreamCreateFlagNone, + NULL, + 0 +}; + +// Prototypes +static void append_path(const char* path); +static inline void parse_cli_settings(int argc, const char* argv[]); +static void callback(FSEventStreamRef streamRef, + void* clientCallBackInfo, + size_t numEvents, + void* eventPaths, + const FSEventStreamEventFlags eventFlags[], + const FSEventStreamEventId eventIds[]); + + +static void append_path(const char* path) +{ + CFStringRef pathRef = CFStringCreateWithCString(kCFAllocatorDefault, + path, + kCFStringEncodingUTF8); + CFArrayAppendValue(config.paths, pathRef); + CFRelease(pathRef); +} + +// Parse commandline settings +static inline void parse_cli_settings(int argc, const char* argv[]) +{ + // runtime os version detection + SInt32 osMajorVersion, osMinorVersion; + if (!(Gestalt(gestaltSystemVersionMajor, &osMajorVersion) == noErr)) { + osMajorVersion = 0; + } + if (!(Gestalt(gestaltSystemVersionMinor, &osMinorVersion) == noErr)) { + osMinorVersion = 0; + } + + if ((osMajorVersion == 10) & (osMinorVersion < 5)) { + fprintf(stderr, "The FSEvents API is unavailable on this version of macos!\n"); + exit(EXIT_FAILURE); + } + + struct cli_info args_info; + cli_parser_init(&args_info); + + if (cli_parser(argc, argv, &args_info) != 0) { + exit(EXIT_FAILURE); + } + + config.paths = CFArrayCreateMutable(NULL, + (CFIndex)0, + &kCFTypeArrayCallBacks); + + config.sinceWhen = args_info.since_when_arg; + config.latency = args_info.latency_arg; + config.format = args_info.format_arg; + + if (args_info.no_defer_flag) { + config.flags |= kFSEventStreamCreateFlagNoDefer; + } + if (args_info.watch_root_flag) { + config.flags |= kFSEventStreamCreateFlagWatchRoot; + } + + if (args_info.ignore_self_flag) { + if ((osMajorVersion > 10) | ((osMajorVersion == 10) & (osMinorVersion >= 6))) { + config.flags |= kFSEventStreamCreateFlagIgnoreSelf; + } else { + fprintf(stderr, "MacOSX 10.6 or later is required for --ignore-self\n"); + exit(EXIT_FAILURE); + } + } + + if (args_info.file_events_flag) { + if ((osMajorVersion > 10) | ((osMajorVersion == 10) & (osMinorVersion >= 7))) { + config.flags |= kFSEventStreamCreateFlagFileEvents; + } else { + fprintf(stderr, "MacOSX 10.7 or later required for --file-events\n"); + exit(EXIT_FAILURE); + } + } + + if (args_info.mark_self_flag) { + if ((osMajorVersion > 10) | ((osMajorVersion == 10) & (osMinorVersion >= 9))) { + config.flags |= kFSEventStreamCreateFlagMarkSelf; + } else { + fprintf(stderr, "MacOSX 10.9 or later required for --mark-self\n"); + exit(EXIT_FAILURE); + } + } + + if (args_info.inputs_num == 0) { + append_path("."); + } else { + for (unsigned int i=0; i < args_info.inputs_num; ++i) { + append_path(args_info.inputs[i]); + } + } + + cli_parser_free(&args_info); + +#ifdef DEBUG + fprintf(stderr, "config.sinceWhen %llu\n", config.sinceWhen); + fprintf(stderr, "config.latency %f\n", config.latency); + fprintf(stderr, "config.flags %#.8x\n", config.flags); + + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagUseCFTypes, + " Using CF instead of C types"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagNoDefer, + " NoDefer latency modifier enabled"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagWatchRoot, + " WatchRoot notifications enabled"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagIgnoreSelf, + " IgnoreSelf enabled"); + FLAG_CHECK_STDERR(config.flags, kFSEventStreamCreateFlagFileEvents, + " FileEvents enabled"); + + fprintf(stderr, "config.paths\n"); + + long numpaths = CFArrayGetCount(config.paths); + + for (long i = 0; i < numpaths; i++) { + char path[PATH_MAX]; + CFStringGetCString(CFArrayGetValueAtIndex(config.paths, i), + path, + PATH_MAX, + kCFStringEncodingUTF8); + fprintf(stderr, " %s\n", path); + } + + fprintf(stderr, "\n"); +#endif +} + +static void callback(__attribute__((unused)) FSEventStreamRef streamRef, + __attribute__((unused)) void* clientCallBackInfo, + size_t numEvents, + void* eventPaths, + const FSEventStreamEventFlags eventFlags[], + const FSEventStreamEventId eventIds[]) +{ + char** paths = eventPaths; + char *buf = calloc(sizeof(FSEVENTSBITS), sizeof(char)); + + for (size_t i = 0; i < numEvents; i++) { + sprintb(buf, eventFlags[i], FSEVENTSBITS); + printf("%llu\t%#.8x=[%s]\t%s\n", eventIds[i], eventFlags[i], buf, paths[i]); + } + fflush(stdout); + free(buf); + + if (fcntl(STDIN_FILENO, F_GETFD) == -1) { + CFRunLoopStop(CFRunLoopGetCurrent()); + } +} + +static void stdin_callback(CFFileDescriptorRef fdref, CFOptionFlags callBackTypes, void *info) +{ + char buf[1024]; + int nread; + + do { + nread = read(STDIN_FILENO, buf, sizeof(buf)); + if (nread == -1 && errno == EAGAIN) { + CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack); + return; + } else if (nread == 0) { + exit(1); + return; + } + } while (nread > 0); +} + +int main(int argc, const char* argv[]) +{ + parse_cli_settings(argc, argv); + + FSEventStreamContext context = {0, NULL, NULL, NULL, NULL}; + FSEventStreamRef stream; + stream = FSEventStreamCreate(kCFAllocatorDefault, + (FSEventStreamCallback)&callback, + &context, + config.paths, + config.sinceWhen, + config.latency, + config.flags); + +#ifdef DEBUG + FSEventStreamShow(stream); + fprintf(stderr, "\n"); +#endif + + fcntl(STDIN_FILENO, F_SETFL, O_NONBLOCK); + + CFFileDescriptorRef fdref = CFFileDescriptorCreate(kCFAllocatorDefault, STDIN_FILENO, false, stdin_callback, NULL); + CFFileDescriptorEnableCallBacks(fdref, kCFFileDescriptorReadCallBack); + CFRunLoopSourceRef source = CFFileDescriptorCreateRunLoopSource(kCFAllocatorDefault, fdref, 0); + CFRunLoopAddSource(CFRunLoopGetCurrent(), source, kCFRunLoopDefaultMode); + CFRelease(source); + + FSEventStreamScheduleWithRunLoop(stream, + CFRunLoopGetCurrent(), + kCFRunLoopDefaultMode); + FSEventStreamStart(stream); + CFRunLoopRun(); + FSEventStreamFlushSync(stream); + FSEventStreamStop(stream); + + return 0; +} + +// vim: ts=2 sts=2 et sw=2 diff --git a/deps/file_system/hex_metadata.config b/deps/file_system/hex_metadata.config new file mode 100644 index 0000000..b549996 --- /dev/null +++ b/deps/file_system/hex_metadata.config @@ -0,0 +1,20 @@ +{<<"app">>,<<"file_system">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>, + <<"A file system change watcher wrapper based on [fs](https://github.com/synrc/fs)">>}. +{<<"elixir">>,<<"~> 1.3">>}. +{<<"files">>, + [<<"lib">>,<<"lib/file_system">>,<<"lib/file_system/worker.ex">>, + <<"lib/file_system/backends">>,<<"lib/file_system/backends/fs_inotify.ex">>, + <<"lib/file_system/backends/fs_mac.ex">>, + <<"lib/file_system/backends/fs_poll.ex">>, + <<"lib/file_system/backends/fs_windows.ex">>, + <<"lib/file_system/backend.ex">>,<<"lib/file_system.ex">>,<<"README.md">>, + <<"mix.exs">>,<<"c_src/mac/cli.c">>,<<"c_src/mac/cli.h">>, + <<"c_src/mac/common.h">>,<<"c_src/mac/compat.c">>,<<"c_src/mac/compat.h">>, + <<"c_src/mac/main.c">>,<<"priv/inotifywait.exe">>]}. +{<<"licenses">>,[<<"WTFPL">>]}. +{<<"links">>,[{<<"Github">>,<<"https://github.com/falood/file_system">>}]}. +{<<"name">>,<<"file_system">>}. +{<<"requirements">>,[]}. +{<<"version">>,<<"0.2.10">>}. diff --git a/deps/file_system/lib/file_system.ex b/deps/file_system/lib/file_system.ex new file mode 100644 index 0000000..5dc1903 --- /dev/null +++ b/deps/file_system/lib/file_system.ex @@ -0,0 +1,52 @@ +defmodule FileSystem do + @moduledoc File.read!("README.md") + + @doc """ + ## Options + + * `:dirs` ([string], required), the dir list to monitor + + * `:backend` (atom, optional), default backends: `:fs_mac` + for `macos`, `:fs_inotify` for `linux`, `freebsd` and `openbsd`, + `:fs_windows` for `windows` + + * `:name` (atom, optional), `name` can be used to subscribe as + the same as pid when the `name` is given. The `name` should + be the name of worker process. + + * All rest options will treated as backend options. See backend + module documents for more details. + + ## Example + + Simple usage: + + iex> {:ok, pid} = FileSystem.start_link(dirs: ["/tmp/fs"]) + iex> FileSystem.subscribe(pid) + + Get instant notifications on file changes for Mac OS X: + + iex> FileSystem.start_link(dirs: ["/path/to/some/files"], latency: 0) + + Named monitor with specified backend: + + iex> FileSystem.start_link(backend: :fs_mac, dirs: ["/tmp/fs"], name: :worker) + iex> FileSystem.subscribe(:worker) + """ + @spec start_link(Keyword.t) :: GenServer.on_start() + def start_link(options) do + FileSystem.Worker.start_link(options) + end + + @doc """ + Register the current process as a subscriber of a file_system worker. + The pid you subscribed from will now receive messages like + + {:file_event, worker_pid, {file_path, events}} + {:file_event, worker_pid, :stop} + """ + @spec subscribe(GenServer.server) :: :ok + def subscribe(pid) do + GenServer.call(pid, :subscribe) + end +end diff --git a/deps/file_system/lib/file_system/backend.ex b/deps/file_system/lib/file_system/backend.ex new file mode 100644 index 0000000..2659f32 --- /dev/null +++ b/deps/file_system/lib/file_system/backend.ex @@ -0,0 +1,70 @@ +require Logger + +defmodule FileSystem.Backend do + @moduledoc """ + FileSystem Backend Behaviour. + """ + + @callback bootstrap() :: :ok | {:error, atom()} + @callback supported_systems() :: [{atom(), atom()}] + @callback known_events() :: [atom()] + + @doc """ + Get and validate backend module, return `{:ok, backend_module}` when success and + return `{:error, reason}` when fail. + When `nil` is given, will return default backend by os. + When a custom module is given, make sure `start_link/1`, `bootstrap/0` and + `supported_system/0` are defnied. + """ + @spec backend(atom) :: {:ok, atom()} | {:error, atom()} + def backend(backend) do + with {:ok, module} <- backend_module(backend), + :ok <- validate_os(backend, module), + :ok <- module.bootstrap + do + {:ok, module} + else + {:error, reason} -> {:error, reason} + end + end + + defp backend_module(nil) do + case :os.type() do + {:unix, :darwin} -> :fs_mac + {:unix, :linux} -> :fs_inotify + {:unix, :freebsd} -> :fs_inotify + {:unix, :openbsd} -> :fs_inotify + {:win32, :nt} -> :fs_windows + system -> {:unsupported_system, system} + end |> backend_module + end + defp backend_module(:fs_mac), do: {:ok, FileSystem.Backends.FSMac} + defp backend_module(:fs_inotify), do: {:ok, FileSystem.Backends.FSInotify} + defp backend_module(:fs_windows), do: {:ok, FileSystem.Backends.FSWindows} + defp backend_module(:fs_poll), do: {:ok, FileSystem.Backends.FSPoll} + defp backend_module({:unsupported_system, system}) do + Logger.error "I'm so sorry but `file_system` does NOT support your current system #{inspect system} for now." + {:error, :unsupported_system} + end + defp backend_module(module) do + functions = module.__info__(:functions) + {:start_link, 1} in functions && + {:bootstrap, 0} in functions && + {:supported_systems, 0} in functions || + raise "illegal backend" + rescue + _ -> + Logger.error "You are using custom backend `#{inspect module}`, make sure it's a legal file_system backend module." + {:error, :illegal_backend} + end + + defp validate_os(backend, module) do + os_type = :os.type() + if os_type in module.supported_systems() do + :ok + else + Logger.error "The backend `#{backend}` you are using does NOT support your current system #{inspect os_type}." + {:error, :unsupported_system} + end + end +end diff --git a/deps/file_system/lib/file_system/backends/fs_inotify.ex b/deps/file_system/lib/file_system/backends/fs_inotify.ex new file mode 100644 index 0000000..8fd5566 --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_inotify.ex @@ -0,0 +1,169 @@ +require Logger + +defmodule FileSystem.Backends.FSInotify do + @moduledoc """ + This file is a fork from https://github.com/synrc/fs. + FileSystem backend for linux, freebsd and openbsd, a GenServer receive data from Port, parse event + and send it to the worker process. + Need `inotify-tools` installed to use this backend. + + ## Backend Options + + * `:recursive` (bool, default: true), monitor directories and their contents recursively + + ## Executable File Path + + The default behaivour to find executable file is finding `inotifywait` from `$PATH`, there're two ways to custom it, useful when run `:file_system` with escript. + + * config with `config.exs` + `config :file_system, :fs_inotify, executable_file: "YOUR_EXECUTABLE_FILE_PATH"` + + * config with `FILESYSTEM_FSINOTIFY_EXECUTABLE_FILE` os environment + FILESYSTEM_FSINOTIFY_EXECUTABLE_FILE=YOUR_EXECUTABLE_FILE_PATH + """ + + use GenServer + @behaviour FileSystem.Backend + @sep_char <<1>> + + def bootstrap do + exec_file = executable_path() + if is_nil(exec_file) do + Logger.error "`inotify-tools` is needed to run `file_system` for your system, check https://github.com/rvoicilas/inotify-tools/wiki for more information about how to install it. If it's already installed but not be found, appoint executable file with `config.exs` or `FILESYSTEM_FSINOTIFY_EXECUTABLE_FILE` env." + {:error, :fs_inotify_bootstrap_error} + else + :ok + end + end + + def supported_systems do + [{:unix, :linux}, {:unix, :freebsd}, {:unix, :openbsd}] + end + + def known_events do + [:created, :deleted, :closed, :modified, :isdir, :attribute, :undefined] + end + + defp executable_path do + executable_path(:system_env) || executable_path(:config) || executable_path(:system_path) + end + + defp executable_path(:config) do + Application.get_env(:file_system, :fs_inotify)[:executable_file] + end + + defp executable_path(:system_env) do + System.get_env("FILESYSTEM_FSINOTIFY_EXECUTABLE_FILE") + end + + defp executable_path(:system_path) do + System.find_executable("inotifywait") + end + + def parse_options(options) do + case Keyword.pop(options, :dirs) do + {nil, _} -> + Logger.error "required argument `dirs` is missing" + {:error, :missing_dirs_argument} + {dirs, rest} -> + format = ["%w", "%e", "%f"] |> Enum.join(@sep_char) |> to_charlist + args = [ + '-e', 'modify', '-e', 'close_write', '-e', 'moved_to', '-e', 'moved_from', + '-e', 'create', '-e', 'delete', '-e', 'attrib', '--format', format, '--quiet', '-m', '-r' + | dirs |> Enum.map(&Path.absname/1) |> Enum.map(&to_charlist/1) + ] + parse_options(rest, args) + end + end + + defp parse_options([], result), do: {:ok, result} + defp parse_options([{:recursive, true} | t], result) do + parse_options(t, result) + end + defp parse_options([{:recursive, false} | t], result) do + parse_options(t, result -- ['-r']) + end + defp parse_options([{:recursive, value} | t], result) do + Logger.error "unknown value `#{inspect value}` for recursive, ignore" + parse_options(t, result) + end + defp parse_options([h | t], result) do + Logger.error "unknown option `#{inspect h}`, ignore" + parse_options(t, result) + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + {worker_pid, rest} = Keyword.pop(args, :worker_pid) + + case parse_options(rest) do + {:ok, port_args} -> + bash_args = ['-c', '#{executable_path()} "$0" "$@" & PID=$!; read a; kill -KILL $PID'] + + all_args = + case :os.type() do + {:unix, :freebsd} -> + bash_args ++ ['--'] ++ port_args + + _ -> + bash_args ++ port_args + end + + port = Port.open( + {:spawn_executable, '/bin/sh'}, + [:stream, :exit_status, {:line, 16384}, {:args, all_args}, {:cd, System.tmp_dir!()}] + ) + + Process.link(port) + Process.flag(:trap_exit, true) + + {:ok, %{port: port, worker_pid: worker_pid}} + + {:error, _} -> + :ignore + end + end + + def handle_info({port, {:data, {:eol, line}}}, %{port: port}=state) do + {file_path, events} = line |> parse_line + send(state.worker_pid, {:backend_file_event, self(), {file_path, events}}) + {:noreply, state} + end + + def handle_info({port, {:exit_status, _}}, %{port: port}=state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info({:EXIT, port, _reason}, %{port: port}=state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + def parse_line(line) do + {path, flags} = + case line |> to_string |> String.split(@sep_char, trim: true) do + [dir, flags, file] -> {Path.join(dir, file), flags} + [path, flags] -> {path, flags} + end + {path, flags |> String.split(",") |> Enum.map(&convert_flag/1)} + end + + defp convert_flag("CREATE"), do: :created + defp convert_flag("MOVED_TO"), do: :moved_to + defp convert_flag("DELETE"), do: :deleted + defp convert_flag("MOVED_FROM"), do: :moved_from + defp convert_flag("ISDIR"), do: :isdir + defp convert_flag("MODIFY"), do: :modified + defp convert_flag("CLOSE_WRITE"), do: :modified + defp convert_flag("CLOSE"), do: :closed + defp convert_flag("ATTRIB"), do: :attribute + defp convert_flag(_), do: :undefined +end diff --git a/deps/file_system/lib/file_system/backends/fs_mac.ex b/deps/file_system/lib/file_system/backends/fs_mac.ex new file mode 100644 index 0000000..cd0236a --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_mac.ex @@ -0,0 +1,180 @@ +require Logger + +defmodule FileSystem.Backends.FSMac do + @moduledoc """ + This file is a fork from https://github.com/synrc/fs. + FileSysetm backend for macos, a GenServer receive data from Port, parse event + and send it to the worker process. + Will compile executable the buildin executable file when file the first time it is used. + + ## Backend Options + + * `:latency` (float, default: 0.5), latency period. + + * `:no_defer` (bool, default: false), enable no-defer latency modifier. + Works with latency parameter, Also check apple `FSEvent` api documents + https://developer.apple.com/documentation/coreservices/kfseventstreamcreateflagnodefer + + * `:watch_root` (bool, default: false), watch for when the root path has changed. + Set the flag `true` to monitor events when watching `/tmp/fs/dir` and run + `mv /tmp/fs /tmp/fx`. Also check apple `FSEvent` api documents + https://developer.apple.com/documentation/coreservices/kfseventstreamcreateflagwatchroot + + * recursive is enabled by default, no option to disable it for now. + + ## Executable File Path + + The default executable file is `mac_listener` in `priv` dir of `:file_system` application, there're two ways to custom it, useful when run `:file_system` with escript. + + * config with `config.exs` + `config :file_system, :fs_mac, executable_file: "YOUR_EXECUTABLE_FILE_PATH"` + + * config with `FILESYSTEM_FSMAC_EXECUTABLE_FILE` os environment + FILESYSTEM_FSMAC_EXECUTABLE_FILE=YOUR_EXECUTABLE_FILE_PATH + """ + + use GenServer + @behaviour FileSystem.Backend + + @default_exec_file "mac_listener" + + def bootstrap do + exec_file = executable_path() + if not is_nil(exec_file) and File.exists?(exec_file) do + :ok + else + Logger.error "Can't find executable `mac_listener`" + {:error, :fs_mac_bootstrap_error} + end + end + + def supported_systems do + [{:unix, :darwin}] + end + + def known_events do + [ :mustscansubdirs, :userdropped, :kerneldropped, :eventidswrapped, :historydone, + :rootchanged, :mount, :unmount, :created, :removed, :inodemetamod, :renamed, :modified, + :finderinfomod, :changeowner, :xattrmod, :isfile, :isdir, :issymlink, :ownevent, + ] + end + + defp executable_path do + executable_path(:system_env) || executable_path(:config) || executable_path(:system_path) || executable_path(:priv) + end + + defp executable_path(:config) do + Application.get_env(:file_system, :fs_mac)[:executable_file] + end + + defp executable_path(:system_env) do + System.get_env("FILESYSTEM_FSMAC_EXECUTABLE_FILE") + end + + defp executable_path(:system_path) do + System.find_executable(@default_exec_file) + end + + defp executable_path(:priv) do + case :code.priv_dir(:file_system) do + {:error, _} -> + Logger.error "`priv` dir for `:file_system` application is not avalible in current runtime, appoint executable file with `config.exs` or `FILESYSTEM_FSMAC_EXECUTABLE_FILE` env." + nil + dir when is_list(dir) -> + Path.join(dir, @default_exec_file) + end + end + + def parse_options(options) do + case Keyword.pop(options, :dirs) do + {nil, _} -> + Logger.error "required argument `dirs` is missing" + {:error, :missing_dirs_argument} + {dirs, rest} -> + args = ['-F' | dirs |> Enum.map(&Path.absname/1) |> Enum.map(&to_charlist/1)] + parse_options(rest, args) + end + end + + defp parse_options([], result), do: {:ok, result} + defp parse_options([{:latency, latency} | t], result) do + result = + if is_float(latency) or is_integer(latency) do + ['--latency=#{latency / 1}' | result] + else + Logger.error "latency should be integer or float, got `#{inspect latency}, ignore" + result + end + parse_options(t, result) + end + defp parse_options([{:no_defer, true} | t], result) do + parse_options(t, ['--no-defer' | result]) + end + defp parse_options([{:no_defer, false} | t], result) do + parse_options(t, result) + end + defp parse_options([{:no_defer, value} | t], result) do + Logger.error "unknown value `#{inspect value}` for no_defer, ignore" + parse_options(t, result) + end + defp parse_options([{:with_root, true} | t], result) do + parse_options(t, ['--with-root' | result]) + end + defp parse_options([{:with_root, false} | t], result) do + parse_options(t, result) + end + defp parse_options([{:with_root, value} | t], result) do + Logger.error "unknown value `#{inspect value}` for with_root, ignore" + parse_options(t, result) + end + defp parse_options([h | t], result) do + Logger.error "unknown option `#{inspect h}`, ignore" + parse_options(t, result) + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + {worker_pid, rest} = Keyword.pop(args, :worker_pid) + case parse_options(rest) do + {:ok, port_args} -> + port = Port.open( + {:spawn_executable, to_charlist(executable_path())}, + [:stream, :exit_status, {:line, 16384}, {:args, port_args}, {:cd, System.tmp_dir!()}] + ) + Process.link(port) + Process.flag(:trap_exit, true) + {:ok, %{port: port, worker_pid: worker_pid}} + {:error, _} -> + :ignore + end + end + + def handle_info({port, {:data, {:eol, line}}}, %{port: port}=state) do + {file_path, events} = line |> parse_line + send(state.worker_pid, {:backend_file_event, self(), {file_path, events}}) + {:noreply, state} + end + + def handle_info({port, {:exit_status, _}}, %{port: port}=state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info({:EXIT, port, _reason}, %{port: port}=state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + def parse_line(line) do + [_, _, events, path] = line |> to_string |> String.split(["\t", "="], parts: 4) + {path, events |> String.split(["[", ",", "]"], trim: true) |> Enum.map(&String.to_existing_atom/1)} + end + +end diff --git a/deps/file_system/lib/file_system/backends/fs_poll.ex b/deps/file_system/lib/file_system/backends/fs_poll.ex new file mode 100644 index 0000000..362ed1d --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_poll.ex @@ -0,0 +1,106 @@ +require Logger + +defmodule FileSystem.Backends.FSPoll do + @moduledoc """ + FileSysetm backend for any OS, a GenServer that regularly scans file system to + detect changes and send them to the worker process. + + ## Backend Options + + * `:interval` (integer, default: 1000), polling interval + + ## Use FSPoll Backend + + Unlike other backends, polling backend is never automatically chosen in any + OS environment, despite being usable on all platforms. + + To use polling backend, one has to explicitly specify in the backend option. + """ + + use GenServer + @behaviour FileSystem.Backend + + def bootstrap, do: :ok + + def supported_systems do + [{:unix, :linux}, {:unix, :freebsd}, {:unix, :openbsd}, {:unix, :darwin}, {:win32, :nt}] + end + + def known_events do + [:created, :deleted, :modified] + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + worker_pid = Keyword.fetch!(args, :worker_pid) + dirs = Keyword.fetch!(args, :dirs) + interval = Keyword.get(args, :interval, 1000) + + Logger.info("Polling file changes every #{interval}ms...") + send(self(), :first_check) + + {:ok, {worker_pid, dirs, interval, %{}}} + end + + def handle_info(:first_check, {worker_pid, dirs, interval, _empty_map}) do + schedule_check(interval) + {:noreply, {worker_pid, dirs, interval, files_mtimes(dirs)}} + end + + def handle_info(:check, {worker_pid, dirs, interval, stale_mtimes}) do + fresh_mtimes = files_mtimes(dirs) + + diff(stale_mtimes, fresh_mtimes) + |> Tuple.to_list + |> Enum.zip([:created, :deleted, :modified]) + |> Enum.each(&report_change(&1, worker_pid)) + + schedule_check(interval) + {:noreply, {worker_pid, dirs, interval, fresh_mtimes}} + end + + defp schedule_check(interval) do + Process.send_after(self(), :check, interval) + end + + defp files_mtimes(dirs, files_mtimes_map \\ %{}) do + Enum.reduce(dirs, files_mtimes_map, fn dir, map -> + case File.stat!(dir) do + %{type: :regular, mtime: mtime} -> + Map.put(map, dir, mtime) + %{type: :directory} -> + dir + |> Path.join("*") + |> Path.wildcard + |> files_mtimes(map) + %{type: _other} -> + map + end + end) + end + + @doc false + def diff(stale_mtimes, fresh_mtimes) do + fresh_file_paths = fresh_mtimes |> Map.keys |> MapSet.new + stale_file_paths = stale_mtimes |> Map.keys |> MapSet.new + + created_file_paths = + MapSet.difference(fresh_file_paths, stale_file_paths) |> MapSet.to_list + deleted_file_paths = + MapSet.difference(stale_file_paths, fresh_file_paths) |> MapSet.to_list + modified_file_paths = + for file_path <- MapSet.intersection(stale_file_paths, fresh_file_paths), + stale_mtimes[file_path] != fresh_mtimes[file_path], do: file_path + + {created_file_paths, deleted_file_paths, modified_file_paths} + end + + defp report_change({file_paths, event}, worker_pid) do + for file_path <- file_paths do + send(worker_pid, {:backend_file_event, self(), {file_path, [event]}}) + end + end +end diff --git a/deps/file_system/lib/file_system/backends/fs_windows.ex b/deps/file_system/lib/file_system/backends/fs_windows.ex new file mode 100644 index 0000000..8e3d1ae --- /dev/null +++ b/deps/file_system/lib/file_system/backends/fs_windows.ex @@ -0,0 +1,160 @@ +require Logger + +defmodule FileSystem.Backends.FSWindows do + @moduledoc """ + This file is a fork from https://github.com/synrc/fs. + FileSysetm backend for windows, a GenServer receive data from Port, parse event + and send it to the worker process. + Need binary executable file packaged in to use this backend. + + ## Backend Options + + * `:recursive` (bool, default: true), monitor directories and their contents recursively + + ## Executable File Path + + The default executable file is `inotifywait.exe` in `priv` dir of `:file_system` application, there're two ways to custom it, useful when run `:file_system` with escript. + + * config with `config.exs` + `config :file_system, :fs_windows, executable_file: "YOUR_EXECUTABLE_FILE_PATH"` + + * config with `FILESYSTEM_FSWINDOWS_EXECUTABLE_FILE` os environment + FILESYSTEM_FSWINDOWS_EXECUTABLE_FILE=YOUR_EXECUTABLE_FILE_PATH + """ + + use GenServer + @behaviour FileSystem.Backend + @sep_char <<1>> + + @default_exec_file "inotifywait.exe" + + def bootstrap do + exec_file = executable_path() + if not is_nil(exec_file) and File.exists?(exec_file) do + :ok + else + Logger.error "Can't find executable `inotifywait.exe`" + {:error, :fs_windows_bootstrap_error} + end + end + + def supported_systems do + [{:win32, :nt}] + end + + def known_events do + [:created, :modified, :removed, :renamed, :undefined] + end + + defp executable_path do + executable_path(:system_env) || executable_path(:config) || executable_path(:system_path) || executable_path(:priv) + end + + defp executable_path(:config) do + Application.get_env(:file_system, :fs_windows)[:executable_file] + end + + defp executable_path(:system_env) do + System.get_env("FILESYSTEM_FSMWINDOWS_EXECUTABLE_FILE") + end + + defp executable_path(:system_path) do + System.find_executable(@default_exec_file) + end + + defp executable_path(:priv) do + case :code.priv_dir(:file_system) do + {:error, _} -> + Logger.error "`priv` dir for `:file_system` application is not avalible in current runtime, appoint executable file with `config.exs` or `FILESYSTEM_FSWINDOWS_EXECUTABLE_FILE` env." + nil + dir when is_list(dir) -> + Path.join(dir, @default_exec_file) + end + end + + def parse_options(options) do + case Keyword.pop(options, :dirs) do + {nil, _} -> + Logger.error "required argument `dirs` is missing" + {:error, :missing_dirs_argument} + {dirs, rest} -> + format = ["%w", "%e", "%f"] |> Enum.join(@sep_char) |> to_charlist + args = [ + '--format', format, '--quiet', '-m', '-r' + | dirs |> Enum.map(&Path.absname/1) |> Enum.map(&to_charlist/1) + ] + parse_options(rest, args) + end + end + + defp parse_options([], result), do: {:ok, result} + defp parse_options([{:recursive, true} | t], result) do + parse_options(t, result) + end + defp parse_options([{:recursive, false} | t], result) do + parse_options(t, result -- ['-r']) + end + defp parse_options([{:recursive, value} | t], result) do + Logger.error "unknown value `#{inspect value}` for recursive, ignore" + parse_options(t, result) + end + defp parse_options([h | t], result) do + Logger.error "unknown option `#{inspect h}`, ignore" + parse_options(t, result) + end + + def start_link(args) do + GenServer.start_link(__MODULE__, args, []) + end + + def init(args) do + {worker_pid, rest} = Keyword.pop(args, :worker_pid) + case parse_options(rest) do + {:ok, port_args} -> + port = Port.open( + {:spawn_executable, to_charlist(executable_path())}, + [:stream, :exit_status, {:line, 16384}, {:args, port_args}, {:cd, System.tmp_dir!()}] + ) + Process.link(port) + Process.flag(:trap_exit, true) + {:ok, %{port: port, worker_pid: worker_pid}} + {:error, _} -> + :ignore + end + end + + def handle_info({port, {:data, {:eol, line}}}, %{port: port}=state) do + {file_path, events} = line |> parse_line + send(state.worker_pid, {:backend_file_event, self(), {file_path, events}}) + {:noreply, state} + end + + def handle_info({port, {:exit_status, _}}, %{port: port}=state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info({:EXIT, port, _reason}, %{port: port}=state) do + send(state.worker_pid, {:backend_file_event, self(), :stop}) + {:stop, :normal, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + def parse_line(line) do + {path, flags} = + case line |> to_string |> String.split(@sep_char, trim: true) do + [dir, flags, file] -> {Enum.join([dir, file], "\\"), flags} + [path, flags] -> {path, flags} + end + {path |> Path.split() |> Path.join(), flags |> String.split(",") |> Enum.map(&convert_flag/1)} + end + + defp convert_flag("CREATE"), do: :created + defp convert_flag("MODIFY"), do: :modified + defp convert_flag("DELETE"), do: :removed + defp convert_flag("MOVED_TO"), do: :renamed + defp convert_flag(_), do: :undefined +end diff --git a/deps/file_system/lib/file_system/worker.ex b/deps/file_system/lib/file_system/worker.ex new file mode 100644 index 0000000..e24b755 --- /dev/null +++ b/deps/file_system/lib/file_system/worker.ex @@ -0,0 +1,50 @@ +defmodule FileSystem.Worker do + @moduledoc """ + FileSystem Worker Process with the backend GenServer, receive events from Port Process + and forward it to subscribers. + """ + + use GenServer + + @doc false + def start_link(args) do + {opts, args} = Keyword.split(args, [:name]) + GenServer.start_link(__MODULE__, args, opts) + end + + @doc false + def init(args) do + {backend, rest} = Keyword.pop(args, :backend) + with {:ok, backend} <- FileSystem.Backend.backend(backend), + {:ok, backend_pid} <- backend.start_link([{:worker_pid, self()} | rest]) + do + {:ok, %{backend_pid: backend_pid, subscribers: %{}}} + else + _ -> :ignore + end + end + + @doc false + def handle_call(:subscribe, {pid, _}, state) do + ref = Process.monitor(pid) + state = put_in(state, [:subscribers, ref], pid) + {:reply, :ok, state} + end + + @doc false + def handle_info({:backend_file_event, backend_pid, file_event}, %{backend_pid: backend_pid}=state) do + state.subscribers |> Enum.each(fn {_ref, subscriber_pid} -> + send(subscriber_pid, {:file_event, self(), file_event}) + end) + {:noreply, state} + end + + def handle_info({:DOWN, ref, _, _pid, _reason}, state) do + subscribers = Map.drop(state.subscribers, [ref]) + {:noreply, %{state | subscribers: subscribers}} + end + + def handle_info(_, state) do + {:noreply, state} + end +end diff --git a/deps/file_system/mix.exs b/deps/file_system/mix.exs new file mode 100644 index 0000000..25c2257 --- /dev/null +++ b/deps/file_system/mix.exs @@ -0,0 +1,75 @@ +defmodule FileSystem.Mixfile do + use Mix.Project + + def project do + [ app: :file_system, + version: "0.2.10", + elixir: "~> 1.3", + deps: deps(), + description: "A file system change watcher wrapper based on [fs](https://github.com/synrc/fs)", + source_url: "https://github.com/falood/file_system", + package: package(), + compilers: [:file_system | Mix.compilers()], + aliases: ["compile.file_system": &file_system/1], + docs: [ + extras: ["README.md"], + main: "readme", + ] + ] + end + + def application do + [ + applications: [:logger], + ] + end + + defp deps do + [ + { :ex_doc, "~> 0.14", only: :docs }, + ] + end + + defp file_system(_args) do + case :os.type() do + {:unix, :darwin} -> compile_mac() + _ -> :ok + end + end + + defp compile_mac do + require Logger + source = "c_src/mac/*.c" + target = "priv/mac_listener" + + if Mix.Utils.stale?(Path.wildcard(source), [target]) do + Logger.info "Compiling file system watcher for Mac..." + cmd = "clang -framework CoreFoundation -framework CoreServices -Wno-deprecated-declarations #{source} -o #{target}" + if Mix.shell().cmd(cmd) > 0 do + Logger.error "Could not compile file system watcher for Mac, try to run #{inspect cmd} manually inside the dependency." + else + Logger.info "Done." + end + :ok + else + :noop + end + end + + defp package do + %{ maintainers: ["Xiangrong Hao", "Max Veytsman"], + files: [ + "lib", "README.md", "mix.exs", + "c_src/mac/cli.c", + "c_src/mac/cli.h", + "c_src/mac/common.h", + "c_src/mac/compat.c", + "c_src/mac/compat.h", + "c_src/mac/main.c", + "priv/inotifywait.exe", + ], + licenses: ["WTFPL"], + links: %{"Github" => "https://github.com/falood/file_system"} + } + end +end diff --git a/deps/file_system/priv/inotifywait.exe b/deps/file_system/priv/inotifywait.exe new file mode 100644 index 0000000000000000000000000000000000000000..8b6a86ecc6dd6e5096aeaec450b76cee1504e44b GIT binary patch literal 14848 zcmeHOeQ+FCk?%LNJG(1OYwgOGyHTKlp|2&NFccj1PtNw0ff|rtK&1_I6gvv0xsb@smmSbOOApgaK%RfMR5hr z{kms%wUQi?s{4y8#@U|N{kr@0>+aX@?bBw@>+dF=h(h?h{4&uKc=NMe;2#H5=#DS@ z$2ff=_RR7p)b3}N4;{BXv*bEsZg$+vWs60pY>rr_TPd1$(d^%M&>VO2R;H<`VP&9t z{{Yc$rPF&mem_(l?FG`*JQXF%fYRVox9!2(#OD}3Y@Lah=G%>y*w1fz03Xo#(P{ge ziJIlV>34woEcA=rMAvZp7}0(sAlAR8h#Kqh*V;CTFoW6?1%6cx!_>;w=`!&CGywW! zu7Taal8X!*Cu@Gp6x_C>QHH#>{mQ& zn?_;!7JP1AM5KT(Bf5}BnEe%>mUTMulpvp=kp6c2yw-YN&5NYl$OWwwb{7Js;s~ra zAK(@}bhe?JjMb27Sr?+O;SJLhTUqZ=f>bh+jT8Q!bpTM(-2E+F1-Bm%ZAeqiKcFf`SXguGD`xv!V>FP;+`54Bp$3GHHph%l@79YL+4YRTcG?@URFq ziymds-_8=1Ve95l6K(UB*2$WX)!imVC8q;qxBw<^!LUIx@?5vxA8mJ#nGJ1cU&R66 zL5k|`vYFbtc@h^^H^T$(6yXgsg)ezUQ|evNeN=Y`X9}iXQE-L>x;yn0mVPktg%EO@ z;8i_kvXf+)(CU_TVfuSmhkXNU6>H0AXq~s|4p2ysoOQF+k<-f<9jl^int{qnZI?CH zt=d|`iEw%wOEtDNb*~^zxTCQ)XF0}8bpq%~Wu&lr5ZPGUbxkpUtz*5h=r^dlnKX#k zaZ#l0x_J?53cEcMrQTH+0*@uNI&l{KoX=)ARs;i>t%Yx zr(+5G^otReNygywDo-GbC0577Sir;fqxQjmXqz6N5sV+AHwNQqi|@jk#ma1TN8pNw zj5fC@5W z;O{ByVIjJeVOy({;%pIQaa$eI-tHr``Z>*81)fRNwvg63iQO%vc^tP%>{}si!6YhI zbp-YsqqKU2QgBRz`ym(6OpqFk&?GY%+9Z#FT2MwsP?M}vGeor^ zT8;At7Omm+urh1|h<43VUUvo^NBrc`Ca^_Tjpb&&vDj}zi?j)Bk|Ae(&9GM$SsaL5 zU@i?j7t|7&3+u&Pc+}clcu35JUxo~fxnSAPoD0zs?3RyU0?5+@>8N?y1jnzdF{x4; zU@1;7{zTwBBaL1TwCtj7O|i62=bWG)QpTDkbx6qRg}k#7pq~GVuyZNvty2p{3rTD9 zb7%q^ZOFMyhNg4P5y+xH5>0Jl()SSnQm}!~cJH{C55v3`b$SkOtXURrY;D@~382x~ z*#-(bI5RcG!>P@9%`^P76Cb~p4P{Dz*7XBe(Q*ofrKqRNPGz=r+j=~7Cu)r5T+X9L zs{4l&=s5PjPa%JuE!;=lyYxtEE1EvueVOi$8A)veQN|5=I`q^PAl-@8rhLWS4Z0C@ zR=0v$jo*qIulwZ`OQr>JLN*ifG(oBDP}6V$kF7>L4f+YH0~QJ5S(cjO^?|)9W;i?W zrdZ&4lpw>|DHQihJe_`N<1DJ3H;^`lHvodOi>a-Rfe`#{NP%*$1Rb$qSEd(HwCz|U z%OM2~?<$rXWVvdTMXbbw%o1BXTkPsNVqE@ai?z=dyJn6USIgOA7x`jXE{3<8joec& zh9!VKH4)nUCipq1Q%^(7lDptf#i4JlOWJhAO-88H9d=jZ^-v<*7QGWLRcN&v09fu) zt=P@r-R$#j@p%`!{b?3uHTJnyo{4u5l3gvvBrlkR(E=+2Ymi`uGlbrj zXm=03OB_NIHk>zbb~L=hz`P>>^CE_G9o`V*9U$Ud&j>-_^=P!j60!6b&&^Lp&$nV3 zEvXs`HL$ZzO=DC%Q&lBv-=CjR>1nivb`;rF-2N18`dPKzPCwNZ zb_l=TU{o$XI4fdLFdDSC+iR9yPNpKp`U2*6jU4zH{G`Bd2z*IkD9oH40uO~R4I5+&yf1tT?Z*Ya7-pS67pM$w4;gILh`|=# zV(_?c6Zj#6rM@ma-xp|#&A*5o1^=cf+kB(IcLN&quhFB_LO+YLA6jA$i)(zpaFi9| zvzS)FdsXc60E>_U+W@c_Hmil=%iYo!2c~07{1p;0Obf^q><9u8rZ(yX=L@tE@W-?T z@E6klm@1+DjKCiW{DbnyFs|0_qQg@Mw7UVX7kEtIm_QeB0lioI2xLE_Jr4LKjrF{w zeID?<_7q^d{uRv3i2M?Mibj3|*c|#D;7rN_ZNJI6a638hkD@yRtb&>qqWp61yA8eV zn8T)(jJ<{a8aWrEUkBJnQ1lw;sV?bje}a0j`#4gJ?3LFha~l0!j|3Bev$rl_`(+KE$I6Ou)h`TcIwr?ha6gf zYQE*;DrFYRsT;!ecdX*?=k%)tj=Exb~}Au{}nBze?e7Ye||+L1xI)L*gpg7 zqy=dodsYvt6_m%Bn&@5hydDGgM9r2ST1j85f1wJb9qXIuI@JYaa+y}Dkw*eQZ z=K*`@Mf!1gk$Oq!jRD0ly48!+XoLaVjP-!ajmy+8X_c`>HPU)xJ1CbLyU^ZlT!Z#i zhK2SMbV98XJq#|vsoRaeQVDgp@eYxFFW`^E52{7Thz_qk7-$Y zGul1so=6Ai_o2OtJ`kBi?_t+{AKQ-g#hhz|vyUzZrH`(R^#DErZEJ+*McNaa#NPu}(!HqVDS8j!8hStA zI(itemp+W^^Aq%_oIA#a=;C=8}M?W929s1oz!om z4eCAgpui^uJ|*x4fiDXDfk0BsX$ssf@R-0UpQ3T^F@dJKR^6)}Q6E#EQlC>#srg#F zwo1E98_-^Ov5s~Ot4*P% zhtbBD5VRWsHPq)AU>r1sDmxGDX3!KWXA{~<&=h$I3Sa5`70BiVB9xaInpG<2q9RkO;g26^bXYi0A8EBP%NE(-;*^~z2uFqHfEWQ&z- z;eZ8?i;V{h_V{cj>7ZfBc{`Wo+&N%n#h~3z?gR~IU3|I76Q#fJN3yFaQEu%#K_6dN!XlgKJt}FbwsHU368pm@il}c=uJxrAnC&!U0$v zP7W!Ati?iv6YL_%By8eQxlK-qCrZe)N})`r{8c4^BhRV0Ig2>gig~OxM>Gc`mwCZ@ zR7PAVpDkyrF*#t31`Ch+JkJ^*DV!Oy%LS{ijBjTnl`^VGK<>A^oNLSQb31oB#Zh~# z;z}4U)IDCx7SGJ>z>M>iTzPKCPN#Inwa1QMplis@=B@Fpd*T98a7x9!fboYd)Cs3J zEfFkW__=J4TbaQks*_VXXt^hCgn)KBB|Cy>ApXJJ8gZ&(p!mi0A7aa{631dj^>`Ja98s9qgb%F7=k1#r#o$rJC)N!LXLg zS-0#Ru`xuy?V@Ho?im_5jau!Y@Pm4#Ro+!86r?Rxf45z<*rDPdDel>_Rp&`AtAo7F zV1}Mku>88o3l@5-lZ4i3WXrbP@NmEFl?vH2I}2G4H{Yjd%eLNO6cpQ|Ew$xcztXmi z3=gjp-B@x}_h8fGQYEO^z~z1~FSa>j^s?T$i=EYO@YNRbB00dhB$S#T2E9F89TAF? zSch~FI}lvvTOp2udq&tz;xKU`l1a2-;x#`Dp&#BGLe9zvd!17VPc|<;W=D!P@n}&t z9>h5xgvmx;a}TN$8#rAO{i@_rHlH8%3nC&>M)mP#SH^nIohasrqe%Hh-*5L@1&itR zrLtZwpwrb3${X7=P~<#2ki~i{j!9+pu$jO+gB)VV<6h(%H!V4XATfDo=alf%lraw6 z&Wh{etiT5lgb*H_>ZMBj+xA}YKBa3$5!M^H%> z8b>`XQkhI#CYhjFbTg>7v^-F=_?!eRfNJ4(+@ewBB5o$z(zuOc{v3L641vr^U@FX1 z)xy2wxK}8GUd9_V^o*05&prMa7J8U^b3nC316#vX=$bc8p#;l~il%Yc1D>XOjATTM zPQxCyHP7<%XX3Pwb$QV+0a;MR3$rzX4*S!jbbRI?f^V|$wMCoR z%iSj3fLW9<16%y#(T$*&WKPG?x&hxh_#ZA<6Z>ZH;g2U%_py7|LivT9&jXlr07K0T zN`cOReoBVO;g4k;)&l;SaHx$pwp5`N;WiWP3XP!UCqWK|a`^0k3wyN$n;krDwmQSX z_3fG&cHbtp>l#Q{m=LE~1zI>Yig1i67`uZlIw5biiET;YodHhIRz@O8_r=1-#ATKC+zM~go$x>EjysTpV!tZ^l>oi zL&^L((8lpEi=^M4bubam#A+^b&dzk*DY(;-DNbiDj*u{xbI~`MUGC>| zSz2sz36z{Tn6sGkiUYt)@-(Sy=h|vZ2sv~7e|)u3#y>~Glf#JY=CrthTmFjR43QF; zsCStMU36I&xT@ObzS%bewc^K_>d%Qbb|D;d`KuH^JpKxvDRt^qr1oP3_D`MEo-UO2 zs^+*dwZI)N*hN$6aIi=cy8&4Z|M{CAlQdTC%Q@xN^~zG@1yTo2^@HO|<2 zU=z5gVOlkHCvedN-H9VI7tS6(xoP1Z*fqU<1yn^5Q);gY^U@;}`YW&%aSn0b6_9JH zjd{7&_)D7WF6R%Pf1mi~lP_=9cTIishtEFObo6P8K7Py5!;5-fxFcff@s4;j9yV1+ zJln3{?wo8TU07z>jL71xc9I8ZHQDy6|FA%;xqU>#5r1+3DN z3z?Q&s2Q4WBp2!_(juuqA_a+1inueX8wtL$#g&X=pfSLeEQOdD!?=sIL`R}IA{Hc< zLsyq(EXAvluZLv}7_6zK$yNR5)*Ftkh$RnO~+N1 z7LUio9h0gB0fZ|PQ@24(ivwZCXo!FS#N$Rp*P4d=}u7MywJYm{toUxOZtg>uN_= z4>y(?1X{BOqtwz!B$V8!E!D!v)T#Sp5rgL!)p6BFahQbkKzOGfLOs zg}0K?sJ2>B*YR?>BJ0GirhZ zKjN<4>zQT8DVU>TfDE_IX}rEw{arNC@J`&Fx3eEak1X4%cAl}k&MUSxHEh9sn%o1M zP*C)?c2?ZtW)D~U<5@2~Zs%Obb4JT)Y+swR-gxF@Pp3JaE!v}&R}OAj{n4PxG;7KR z^SGG7)#40`@FO8uRm5G**3La=`bxON%*j1zCR-|XuJtvP-HKNpERH&_(Vq34B8Ra( z{__yV3~*3f>*fmVwDR~n)Jfd1jalAn)ViUn)x*4EHK>Knf3gO zKDX1X*nK&^)8E=Tii;&Hn3O14djV^zrmvl0`P=V1fMk`P1{jcK>&B Q+qeGdnf_;s|L-37U-Vtrr~m)} literal 0 HcmV?d00001 diff --git a/deps/floki/.fetch b/deps/floki/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/floki/.hex b/deps/floki/.hex new file mode 100644 index 0000000000000000000000000000000000000000..f1c9e50472f6277cae253b3486d2693b066bf1fe GIT binary patch literal 270 zcmZ9{%~Ap}5Cq@|ik^HFDnFCIn{Q%GGBfT1$x;hg+$KOGr#5LkiJmjwBffLA&e%V+M>F zL`NHwA@$a<;}@@&)98B|Xq)c`QqLV5KZqAOEysE2%kBDKhhVhxLdJSAPDq><3$7-Q xx)}BvrEpC#)FP%b3kM-0%YmY?H4J9u^YX`xujfYU^?OX2m;T6hdhguc`~n&KN|XQq literal 0 HcmV?d00001 diff --git a/deps/floki/CHANGELOG.md b/deps/floki/CHANGELOG.md new file mode 100644 index 0000000..a494521 --- /dev/null +++ b/deps/floki/CHANGELOG.md @@ -0,0 +1,679 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](http://keepachangelog.com/en/1.0.0/) +and this project adheres to [Semantic Versioning](http://semver.org/spec/v2.0.0.html). + +## [Unreleased][unreleased] + +## [0.33.1] - 2022-06-28 + +### Fixed + +- Remove some warnings for unused code. + +## [0.33.0] - 2022-06-28 + +### Added + +- Add support for searching elements that contains text in a case-insensitive manner with +`fl-icontains` - thanks [@nuno84](https://github.com/nuno84) + +### Changed + +- Drop support for Elixir 1.8 and 1.9. +- Fix and improve internal things - thanks [@derek-zhou](https://github.com/derek-zhou) and [@hissssst](https://github.com/hissssst) + +## [0.32.1] - 2022-03-24 + +### Fixed + +- Allow root nodes to be selected using pseudo-classes - thanks [@rzane](https://github.com/rzane) + +## [0.32.0] - 2021-10-18 + +### Added + +- Add an HTML tokenizer written in Elixir - this still experimental and it's not stable API yet. +- Add support for HTML IDs containing periods in the selectors - thanks [@Hugo-Hache](https://github.com/Hugo-Hache) +- Add support for case-insensitive CSS attribute selectors - thanks [@fcapovilla](https://github.com/fcapovilla) +- Add the `:root` pseudo-class selector - thanks [@fcapovilla](https://github.com/fcapovilla) + +## [0.31.0] - 2021-06-11 + +### Changed + +- Treat `style` and `title` tags as plaintext in Mochiweb - thanks [@SweetMNM](https://github.com/SweetMNM) + +## [0.30.1] - 2021-03-29 + +### Fixed + +- Fix typespecs of `Floki.traverse_and_update/2` to make clear that it does not accept text nodes directly. + +## [0.30.0] - 2021-02-06 + +### Added + +- Add ":disabled" pseudo selector - thanks [@vnegrisolo](https://github.com/vnegrisolo) +- Add [Gleam](https://github.com/gleam-lang/gleam) adapter - thanks [@CrowdHailer](https://github.com/CrowdHailer) +- Add pretty option to `Floki.raw_html/2` - thanks [@evaldobratti](https://github.com/evaldobratti) +- Add `html_parser` option to `parse_` functions. This enables a more dynamic and functional +configuration of the HTML parser in use. + +### Changed + +- Remove support for Elixir 1.7 - thanks [@carlosfrodrigues](https://github.com/carlosfrodrigues) +- Replace `IO.warn` by `Logger.info` for deprecation warnings - thanks [@juulSme](https://github.com/juulSme) + +### Fixed + +- Fix typespecs for `find`, `attr` and `attribute` functions - thanks [@mtarnovan](https://github.com/mtarnovan) +- Documentation Improvements - thanks [@kianmeng](https://github.com/kianmeng) + +## [0.29.0] - 2020-10-02 + +### Added + +- Add `Floki.find_and_update/3` that updates nodes inside a tree, like traverse and update +but without allowing changes in the children nodes. There for the tree cannot grow in size, +but can have nodes removed. + +### Changed + +- Deprecate `Floki.map/2` because we have now `Floki.find_and_update/3` and `Floki.traverse_and_update/2` that +are powerful APIs. `Floki.map/2` can be replaced by `Enum.map/2` as well - thanks [@josevalim](https://github.com/josevalim) for the idea! +- Update optional dependency `fast_html` to `v2.0.4` + +### Fixed + +- Fix a bug when parsing a HTML with a XML inside using Mochiweb's parser + +### Improvements + +- Add more typespecs + +## [0.28.0] - 2020-08-26 + +### Added + +- Add support for `:checked` pseudo-class selector - thanks [@wojtekmach](https://github.com/wojtekmach) + +### Changed + +- Drop support for Elixir 1.6 +- Update version of `fast_html` to 2.0 in docs and CI - thanks [@rinpatch](https://github.com/rinpatch) + +### Fixed + +- Fix docs by mentioning HTML nodes supported for `traverse_and_update` - thanks [@hubertlepicki](https://github.com/hubertlepicki) + +## [0.27.0] - 2020-07-07 + +### Added + +- `Floki.filter_out/2` now can filter text nodes - thanks [@ckruse](https://github.com/ckruse) +- Support more encoding entities in `Floki.raw_html/1` - thanks [@ntenczar](https://github.com/ntenczar) + +### Fixed + +- Fix `Floki.attribute/2` when there is only text nodes in the document - thanks [@ckruse](https://github.com/ckruse) + +### Improvements + +- Performance improvements of `Floki.raw_html/1` function - thanks [@josevalim](https://github.com/josevalim) +- Improvements in the docs and specs of `Floki.traverse_and_update/2` and `Floki.children/1` - thanks [@josevalim](https://github.com/josevalim) +- Improvements in the spec of `Floki.traverse_and_update/2` - thanks [@Dalgona](https://github.com/Dalgona) +- Improve the CI setup to run the formatter correctly - thanks [@Cleidiano](https://github.com/Cleidiano) + +## [0.26.0] - 2020-02-17 + +### Added + +- Add support for the pseudo-class selectors `:nth-last-child` and `:nth-last-of-type` + +### Fixed + +- Fix the typespecs of `Floki.traverse_and_update/3` - thanks [@RichMorin](https://github.com/RichMorin) + +### Changed + +- Update optional dependency `fast_html` to `v1.0.3` + +## [0.25.0] - 2020-01-26 + +### Added + +- Add `Floki.parse_fragment!/1` and `Floki.parse_document!/1` that has the same functionality of +the functions without the bang, but they return the document or fragment without the either tuple +and will raise exception in case of errors - thanks [@schneiderderek](https://github.com/schneiderderek) +- Add `Floki.traverse_and_update/3` which accepts an accumulator which is useful to keep +the state while traversing the HTML tree - thanks [@Dalgona](https://github.com/Dalgona) + +### Changed + +- Update the `html_entities` dependency from `v0.5.0` to `v0.5.1` + +## [0.24.0] - 2020-01-01 + +### Added + +- Add support for [`fast_html`](https://hexdocs.pm/fast_html), which is a "C Node" wrapping +Lexborisov's [myhtml](https://github.com/lexborisov/myhtml) - thanks [@rinpatch](https://github.com/rinpatch) +- Add setup to run our test suite against all parsers on CI - thanks [@rinpatch](https://github.com/rinpatch) +- Add `Floki.parse_document/1` and `Floki.parse_fragment/1` in order to correct parse documents +and fragments of documents - it also prevents the confusion and inconsistency of `parse/1`. +- Configure `dialyxir` in order to run Dialyzer easily. + +### Changed + +- Deprecate `Floki.parse/1` and all the functions that uses it underneath. This means that all +the functions that accepted HTML as binary are deprecated as well. This includes `find/2`, `attr/4`, +`filter_out/2`, `text/2` and `attribute/2`. The recommendation is to use those functions with an +already parsed document or fragment. +- Remove support for `Elixir 1.5`. + +## [0.23.1] - 2019-12-01 + +### Fixed + +- It fixes the Mochiweb parser when there is an invalid charref. + +## [0.23.0] - 2019-09-11 + +### Changed + +- Remove Mochiweb as a hex dependency. It brings the code from the original project +to Floki's codebase - thanks [@josevalim](https://github.com/josevalim) + +## [0.22.0] - 2019-08-21 + +### Added + +- Add `Floki.traverse_and_update/2` that works in similar way to `Floki.map/2` but +traverse the tree and update the children elements. The difference from "map" is that +this function can create a tree with more or less nodes. - thanks [@ericlathrop](https://github.com/ericlathrop) + +### Changed + +- Remove support for Elixir 1.4. + +## [0.21.0] - 2019-04-17 + +### Added + +- Add a possibility to filter `style` tags on `Floki.text/2` - thanks [@Vict0rynox](https://github.com/Vict0rynox) + +### Fixed + +- Fix `Floki.text/2` to consider the previous filter of `js` when filtering `style` - thanks [@Vict0rynox](https://github.com/Vict0rynox) +- Fix typespecs for `Floki.filter_out/2` - thanks [@myfreeweb](https://github.com/myfreeweb) + +### Changed + +- Drop support for Elixir 1.3 and below - thanks [@herbstrith](https://github.com/herbstrith) + +## [0.20.4] - 2018-09-24 + +### Fixed + +- Fix `Floki.raw_html` to accept lists as attribute values - thanks [@katehedgpeth](https://github.com/katehedgpeth) + +## [0.20.3] - 2018-06-22 + +### Fixed + +- Fix style and script tags with comments - thanks [@francois2metz](https://github.com/francois2metz) + +## [0.20.2] - 2018-05-09 + +### Fixed + +- Fix `Floki.raw_html/1` to correct handle quotes and double quotes on attributes - thanks [@grych](https://github.com/grych) + +## [0.20.1] - 2018-04-05 + +### Fixed + +- Remove `Enumerable.slice/1` compile warning for `Floki.HTMLTree` - thanks [@thecodeboss](https://github.com/thecodeboss) +- Fix `Floki.find/2` that was failing on HTML that consists entirely of a comment - thanks [@ShaneWilton](https://github.com/ShaneWilton) + +## [0.20.0] - 2018-02-06 + +### Added + +- Configurable raw_html/2 to allow optional encode of HTML entities - thanks [@davydog187](https://github.com/davydog187) + +### Fixed + +- Fix serialization of the tree after updating attribute - thanks [@francois2metz](https://github.com/francois2metz) + +## [0.19.3] - 2018-01-25 + +### Fixed + +- Skip HTML entities encode for `Floki.raw_html/1` for `script` or `style` tags +- Add `:html_entities` app to the list of OTP applications. It fixes production releases. + +## [0.19.2] - 2017-12-22 + +### Fixed + +- **(BREAKING CHANGE)** Re-encode HTML entities on `Floki.raw_html/1`. + +## [0.19.1] - 2017-12-04 + +### Fixed + +- Fixed doctype serialization for `Floki.raw_html/1` - thanks [@jhchen][https://github.com/jhchen] + +## [0.19.0] - 2017-11-11 + +### Added + +- Added support for `nth-of-type`, `first-of-type`, `last-of-type` and `last-child` pseudo-classes - thanks [@saleem1337](https://github.com/saleem1337). +- Added support for `nth-child` pseudo-class functional notation - thanks [@nirev](https://github.com/nirev). +- Added functional notation support for `nth-of-type` pseudo-class. +- Added a [Contributing guide](https://github.com/philss/floki/blob/master/CONTRIBUTING.md). + +### Fixed + +- Format all files according to the Elixir 1.6 formatter - thanks [@fcevado](https://github.com/fcevado). +- Fix `Floki.raw_html` to support raw text - thanks [@craig-day](https://github.com/craig-day). + +## [0.18.1] - 2017-10-13 + +### Added + +- Added a [Code of Conduct](https://github.com/philss/floki/blob/master/CODE_OF_CONDUCT.md). + +### Fixed + +- Fix XML tag when building HTML tree. +- Return empty list when `Floki.filter_out/2` result is empty. + +## [0.18.0] - 2017-08-05 + +### Added + +- Added `Floki.attr/4` that receives a function enabling manipulation of attribute values - thanks [@erikdsi](https://github.com/erikdsi). +- Implement the String.Chars protocol for Floki.Selector. +- Implement the Enumerable protocol for Floki.HTMLTree. + +### Changed + +- Changed `Floki.transform/2` to `Floki.map/2` and `Floki.Finder.apply_transform/2` to `Floki.Finder.map/2` - thanks [@aphillipo](https://github.com/aphillipo). + +### Fixed + +- Fix `Floki.raw_html/1` to consider XML prefixes - thanks [@sergey-kintsel](https://github.com/sergey-kintsel). +- Fix `raw_html` for self closing tags with content - thanks [@navinpeiris](https://github.com/navinpeiris). + +### Removed + +- Removed support for Elixir 1.2. + +## [0.17.2] - 2017-05-25 + +### Fixed + +- Fix attribute selectors in :not() - thanks [@jjcarstens](https://github.com/jjcarstens) and [@Eiji7](https://github.com/Eiji7) +- Fix selector parser to consider combinators across selectors separated by commas. +For further details, please check the [pull request](https://github.com/philss/floki/pull/115) - thanks [@jjcarstens](https://github.com/jjcarstens) and [@mischov](https://github.com/mischov) + +## [0.17.1] - 2017-05-22 + +### Fixed + +- Fix search when body has unencoded angles (`<` and `>`) - thanks [@sergey-kintsel](https://github.com/sergey-kintsel) +- Fix crash caused by XML declaration inside body - thanks [@erikdsi](https://github.com/erikdsi) +- Fix issue when finding fails if HTML begins with XML tag - thanks [@sergey-kintsel](https://github.com/sergey-kintsel) + +## [0.17.0] - 2017-04-12 + +### Added + +- Add support for multiple pseudo-selectors, line :not() and :nth-child() - thanks [@jjcarstens](https://github.com/jjcarstens) +- Add support for multiple selectors inside the :not() pseudo-class selector - thanks [@jjcarstens](https://github.com/jjcarstens) + +## [0.16.0] - 2017-04-05 + +### Added + +- Add support for selectors that only include a pseudo-class selector - thanks [@buhman](https://github.com/buhman) +- Add support for a new selector: `fl-contains`, which returns elements that contains a given text - thanks [@buhman](https://github.com/buhman) + +### Fixed + +- Fix `:not()` pseudo-class selector to accept simple pseudo-class selectors as well - thanks [@mischov](https://github.com/mischov) + +## [0.15.0] - 2017-03-14 + +### Added + +- Added support for the `:not()` pseudo-class selector. + +### Fixed + +- Fixed pseudo-class selectors that are used in conjunction with combinators - thanks [@Eiji7](https://github.com/Eiji7) +- Fixed order of elements after search using descendant combinator - thanks [@Eiji7](https://github.com/Eiji7) + +## [0.14.0] - 2017-02-07 + +### Added + +- Added support for configuring `html5ever` as the HTML parser. Issue #83 - thanks [@hansihe](https://github.com/hansihe) +and [@aphillipo](https://github.com/aphillipo)! + +## [0.13.2] - 2017-02-07 + +### Fixed + +- Fixed bug that was causing Floki.text/1 and Floki.filter_out/2 +to ignore "trees" with only text nodes. Issue #91 - thanks [@boydm](https://github.com/boydm). + +## [0.13.1] - 2017-01-22 + +### Fixed + +- Fix ordering of duplicated descendant matches - thanks [@mmmries](https://github.com/mmmries) +- Fix ordering of `Floki.text/1` when there are only root nodes - thanks [@mmmries](https://github.com/mmmries) + +## [0.13.0] - 2017-01-22 + +### Added + +- Floki.filter_out/2 is now able to understand complex selectors to filter out from the tree. + +## [0.12.1] - 2017-01-20 + +### Fixed + +- Fix search for elements using descendant combinator - issue #84 - thanks [@mmmries](https://github.com/mmmries) + +## [0.12.0] - 2016-12-28 + +### Added + +- Add basic support for nth-child pseudo-class selector. +Closes [issue #64](https://github.com/philss/floki/issues/64). + +### Changed + +- Remove support for Elixir 1.1 and below. +- Remove public documentation for internal code. + +## [0.11.0] - 2016-10-12 + +### Added + +- First attempt to transform nodes with `Floki.transform/2`. It is not able to update +the tree yet, but works good with results from `Floki.find/2` - thanks [@bobjflong](https://github.com/bobjflong) + +### Changed + +- Using Logger to notify unknown tokens in selector parser - thanks [@teamon](https://github.com/teamon) and [@geonnave](https://github.com/geonnave) +- Replace `mochiweb_html` with `mochiweb` package. This is needed to fix conflict with other +packages that are using `mochiweb`. - thanks [@aphillipo](https://github.com/aphillipo) + +## [0.10.1] - 2016-08-28 + +### Fixed + +- Fix sibling search after immediate children - thanks [@gmile](https://github.com/gmile). + +## [0.10.0] - 2016-08-05 + +### Changed + +- Change the search for namespaced elements using the correct CSS3 syntax. + +### Fixed + +- Fix the search for child elements when is more than two elements deep - thanks [@gmile](https://github.com/gmile) + +## [0.9.0] - 2016-06-16 + +### Added + +- A separator between text when getting text from nodes - thanks [@rochdi](https://github.com/rochdi). + +## [0.8.1] - 2016-05-20 + +### Added + +- Support rendering boolean attributes on `Floki.raw_html/1` - thanks [@iamvery](https://github.com/iamvery). + +### Changed + +- Update Mochiweb HTML parser dependency to version 2.15.0. + +## [0.8.0] - 2016-03-06 + +### Added + +- Add possibility to search tags with namespaces. +- Accept `Floki.Selector` as parameter of `Floki.find/2` instead of only strings - thanks [@hansihe](https://github.com/hansihe). + +### Changed + +- Using a smaller package with only the Mochiweb HTML parser. + +## [0.7.2] - 2016-02-23 + +### Fixed + +- Replace `
` nodes by newline (`\n`) in `DeepText` - thanks [@maxneuvians](https://github.com/maxneuvians). +- Allow `FilterOut` to filter special nodes, like `comment`. + +## [0.7.1] - 2015-11-14 + +### Fixed + +- Ignore PHP scripts when finding nodes. + +## [0.7.0] - 2015-11-03 + +### Added + +- Add support for excluding script notes in `Floki.text`. +By default, it will exclude those nodes, but it can be enabled with +the flag `js: true` - thanks [@vikeri](https://github.com/vikeri)! + +### Fixed + +- Fix find for sibling nodes when the precedent selector match an element +at the end of sibling list - fix [issue #39](https://github.com/philss/floki/issues/39) + +## [0.6.1] - 2015-10-11 + +### Fixed + +- Fix the `Floki.raw_html/1` to build HTML comments properly. + +## [0.6.0] - 2015-10-07 + +### Added + +- Add `Floki.raw_html/2`. + +## [0.5.0] - 2015-09-27 + +### Added + +- Add the child combinator to `Floki.find/2`. +- Add the adjacent sibling combinator to `Floki.find/2`. +- Add the general adjacent sibling combinator to `Floki.find/2`. + +## [0.4.1] - 2015-09-18 + +### Fixed + +- Ignoring other files that are not lexer files (".xrl") under `src/` directory +in Hex package. This fixes a crash when compiling using OTP 17.5 on Mac OS X. +Huge thanks to [@henrik](https://github.com/henrik) and [@licyeus](https://github.com/licyeus) that pointed the +[issue](https://github.com/philss/floki/issues/24)! + +## [0.4.0] - 2015-09-17 + +### Added + +- A robust representation of selectors in order to enable queries using a mix of selector types, +such as classes with attributes, attributes with types, classes with classes and so on. +Here is a list with examples of what is possible now: + - `Floki.find(html, "a.foo")` + - `Floki.find(html, "a.foo[data-action=post]")` + - `Floki.find(html, ".foo.bar")` + - `Floki.find(html, "a.foo[href$='.org']")` +Thanks to [@licyeus](https://github.com/licyeus) to point out the [issue](https://github.com/philss/floki/issues/18)! +- Include `mochiweb` in the applications list at mix.exs - thanks [@EricDykstra](https://github.com/EricDykstra) + +### Changed + +- `Floki.find/2` will now return a list instead of tuple when searching only by IDs. +For now on, Floki should always return the results inside a list, even if it's an ID match. + +### Removed + +- `Floki.find/2` does not accept tuples as selectors anymore. +This is because with the robust selectors representation, it won't be necessary to query directly using +tuples or another data structures rather than string. + +## [0.3.3] - 2015-08-23 + +### Fixed + +- Fix `Floki.find/2` when there is a non-HTML input. +It closes the [issue #17](https://github.com/philss/floki/issues/17) + +## [0.3.2] - 2015-06-27 + +### Fixed + +- Fix `Floki.DeepText` when there is a comment inside nodes. + +## [0.3.1] - 2015-06-21 + +### Fixed + +- Fix `Floki.find/2` to consider XML trees. + +## [0.3.0] - 2015-06-07 + +### Added + +- Add attribute equals selector. This feature enables the user to search using +HTML attributes other than "class" or "id". +E.g: `Floki.find(html, "[data-model=user]")` - [@nelsonr](https://github.com/nelsonr) + +## [0.2.1] - 2015-06-04 + +### Fixed + +- Fix `parse/1` when parsing a part of HTML without a root node - [@antonmi](https://github.com/antonmi) + +## [0.2.0] - 2015-05-03 + +### Added + +- Support HTML string when searching for attributes with `Floki.attribute/2`. +- Option for `Floki.text/2` to disable deep search and use flat search instead. + +### Changed + +- Change `Floki.text/1` to perform a deep search of text nodes. +- Consider doctests in the test suite. + +## [0.1.1] - 2015-03-25 + +### Added + +- Add CHANGELOG.md following the [Keep a changelog](http://keepachangelog.com/). + +### Changed + +- Using MochiWeb as a hex dependency instead of embedded code. +It closes the [issue #5](https://github.com/philss/floki/issues/5) + +## [0.1.0] - 2015-02-15 + +### Added + +- Descendant selectors, like ".class tag" to Floki.find/2. +- Multiple selection, like ".class1, .class2" to Floki.find/2. + +## [0.0.5] - 2014-12-21 + +### Added + +- `Floki.text/1`, which returns all text in the same level +of the parent element inside HTML. + +### Changed + +- Elixir version requirement from "~> 1.0.0" to ">= 1.0.0". + +[unreleased]: https://github.com/philss/floki/compare/v0.33.1...HEAD +[0.33.1]: https://github.com/philss/floki/compare/v0.33.0...v0.33.1 +[0.33.0]: https://github.com/philss/floki/compare/v0.32.1...v0.33.0 +[0.32.1]: https://github.com/philss/floki/compare/v0.32.0...v0.32.1 +[0.32.0]: https://github.com/philss/floki/compare/v0.31.0...v0.32.0 +[0.31.0]: https://github.com/philss/floki/compare/v0.30.1...v0.31.0 +[0.30.1]: https://github.com/philss/floki/compare/v0.30.0...v0.30.1 +[0.30.0]: https://github.com/philss/floki/compare/v0.29.0...v0.30.0 +[0.29.0]: https://github.com/philss/floki/compare/v0.28.0...v0.29.0 +[0.28.0]: https://github.com/philss/floki/compare/v0.27.0...v0.28.0 +[0.27.0]: https://github.com/philss/floki/compare/v0.26.0...v0.27.0 +[0.26.0]: https://github.com/philss/floki/compare/v0.25.0...v0.26.0 +[0.25.0]: https://github.com/philss/floki/compare/v0.24.0...v0.25.0 +[0.24.0]: https://github.com/philss/floki/compare/v0.23.1...v0.24.0 +[0.23.1]: https://github.com/philss/floki/compare/v0.23.0...v0.23.1 +[0.23.0]: https://github.com/philss/floki/compare/v0.22.0...v0.23.0 +[0.22.0]: https://github.com/philss/floki/compare/v0.21.0...v0.22.0 +[0.21.0]: https://github.com/philss/floki/compare/v0.20.4...v0.21.0 +[0.20.4]: https://github.com/philss/floki/compare/v0.20.3...v0.20.4 +[0.20.3]: https://github.com/philss/floki/compare/v0.20.2...v0.20.3 +[0.20.2]: https://github.com/philss/floki/compare/v0.20.1...v0.20.2 +[0.20.1]: https://github.com/philss/floki/compare/v0.20.0...v0.20.1 +[0.20.0]: https://github.com/philss/floki/compare/v0.19.3...v0.20.0 +[0.19.3]: https://github.com/philss/floki/compare/v0.19.2...v0.19.3 +[0.19.2]: https://github.com/philss/floki/compare/v0.19.1...v0.19.2 +[0.19.1]: https://github.com/philss/floki/compare/v0.19.0...v0.19.1 +[0.19.0]: https://github.com/philss/floki/compare/v0.18.1...v0.19.0 +[0.18.1]: https://github.com/philss/floki/compare/v0.18.0...v0.18.1 +[0.18.0]: https://github.com/philss/floki/compare/v0.17.2...v0.18.0 +[0.17.2]: https://github.com/philss/floki/compare/v0.17.1...v0.17.2 +[0.17.1]: https://github.com/philss/floki/compare/v0.17.0...v0.17.1 +[0.17.0]: https://github.com/philss/floki/compare/v0.16.0...v0.17.0 +[0.16.0]: https://github.com/philss/floki/compare/v0.15.0...v0.16.0 +[0.15.0]: https://github.com/philss/floki/compare/v0.14.0...v0.15.0 +[0.14.0]: https://github.com/philss/floki/compare/v0.13.2...v0.14.0 +[0.13.2]: https://github.com/philss/floki/compare/v0.13.1...v0.13.2 +[0.13.1]: https://github.com/philss/floki/compare/v0.13.0...v0.13.1 +[0.13.0]: https://github.com/philss/floki/compare/v0.12.1...v0.13.0 +[0.12.1]: https://github.com/philss/floki/compare/v0.12.0...v0.12.1 +[0.12.0]: https://github.com/philss/floki/compare/v0.11.0...v0.12.0 +[0.11.0]: https://github.com/philss/floki/compare/v0.10.1...v0.11.0 +[0.10.1]: https://github.com/philss/floki/compare/v0.10.0...v0.10.1 +[0.10.0]: https://github.com/philss/floki/compare/v0.9.0...v0.10.0 +[0.9.0]: https://github.com/philss/floki/compare/v0.8.1...v0.9.0 +[0.8.1]: https://github.com/philss/floki/compare/v0.8.0...v0.8.1 +[0.8.0]: https://github.com/philss/floki/compare/v0.7.2...v0.8.0 +[0.7.2]: https://github.com/philss/floki/compare/v0.7.1...v0.7.2 +[0.7.1]: https://github.com/philss/floki/compare/v0.7.0...v0.7.1 +[0.7.0]: https://github.com/philss/floki/compare/v0.6.1...v0.7.0 +[0.6.1]: https://github.com/philss/floki/compare/v0.6.0...v0.6.1 +[0.6.0]: https://github.com/philss/floki/compare/v0.5.0...v0.6.0 +[0.5.0]: https://github.com/philss/floki/compare/v0.4.1...v0.5.0 +[0.4.1]: https://github.com/philss/floki/compare/v0.4.0...v0.4.1 +[0.4.0]: https://github.com/philss/floki/compare/v0.3.3...v0.4.0 +[0.3.3]: https://github.com/philss/floki/compare/v0.3.2...v0.3.3 +[0.3.2]: https://github.com/philss/floki/compare/v0.3.1...v0.3.2 +[0.3.1]: https://github.com/philss/floki/compare/v0.3.0...v0.3.1 +[0.3.0]: https://github.com/philss/floki/compare/v0.2.1...v0.3.0 +[0.2.1]: https://github.com/philss/floki/compare/v0.2.0...v0.2.1 +[0.2.0]: https://github.com/philss/floki/compare/v0.1.1...v0.2.0 +[0.1.1]: https://github.com/philss/floki/compare/v0.1.0...v0.1.1 +[0.1.0]: https://github.com/philss/floki/compare/v0.0.5...v0.1.0 +[0.0.5]: https://github.com/philss/floki/compare/v0.0.3...v0.0.5 diff --git a/deps/floki/CODE_OF_CONDUCT.md b/deps/floki/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..937bdd8 --- /dev/null +++ b/deps/floki/CODE_OF_CONDUCT.md @@ -0,0 +1,54 @@ +# Code of Conduct + +Contact: Philip Sampaio Silva - philip.sampaio+flokiconduct@gmail.com + +## Why have a Code of Conduct? + +As contributors and maintainers of this project, we are committed to providing a friendly, safe and welcoming environment for all, regardless of age, disability, gender, nationality, race, religion, sexuality, or similar personal characteristic. + +The goal of the Code of Conduct is to specify a baseline standard of behavior so that people with different social values and communication styles can talk about Floki effectively, productively, and respectfully, even in face of disagreements. The Code of Conduct also provides a mechanism for resolving conflicts in the community when they arise. + +## Our Values + +These are the values Floki developers should aspire to: + + * Be friendly and welcoming + * Be patient + * Remember that people have varying communication styles and that not everyone is using their native language. (Meaning and tone can be lost in translation.) + * Be thoughtful + * Productive communication requires effort. Think about how your words will be interpreted. + * Remember that sometimes it is best to refrain entirely from commenting. + * Be respectful + * In particular, respect differences of opinion. It is important that we resolve disagreements and differing views constructively. + * Avoid destructive behavior + * Derailing: stay on topic; if you want to talk about something else, start a new conversation. + * Unconstructive criticism: don't merely decry the current state of affairs; offer (or at least solicit) suggestions as to how things may be improved. + * Snarking (pithy, unproductive, sniping comments). + +The following actions are explicitly forbidden: + + * Insulting, demeaning, hateful, or threatening remarks. + * Discrimination based on age, disability, gender, nationality, race, religion, sexuality, or similar personal characteristic. + * Bullying or systematic harassment. + * Unwelcome sexual advances. + * Incitement to any of these. + +## Where does the Code of Conduct apply? + +If you participate in or contribute to the Floki project in any way, you are encouraged to follow the Code of Conduct while doing so. + +Explicit enforcement of the Code of Conduct applies to the official mediums operated by the Floki project: + +* The official GitHub project and code reviews. + +Other Floki related activities (such as conferences, meetups, and other unofficial forums) are encouraged to adopt this Code of Conduct. Such groups must provide their own contact information. + +Project maintainers may remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct. + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by emailing: philip.sampaio+flokiconduct@gmail.com. All complaints will be reviewed and investigated and will result in a response that is deemed necessary and appropriate to the circumstances. **All reports will be kept confidential**. + +**The goal of the Code of Conduct is to resolve conflicts in the most harmonious way possible**. We hope that in most cases issues may be resolved through polite discussion and mutual agreement. Bannings and other forceful measures are to be employed only as a last resort. **Do not** post about the issue publicly or try to rally sentiment against a particular individual or group. + +## Acknowledgements + +This document was based on the Code of Conduct from the Elixir project that was based on the Go project with parts derived from Django's Code of Conduct, Rust's Code of Conduct and the Contributor Covenant. diff --git a/deps/floki/CONTRIBUTING.md b/deps/floki/CONTRIBUTING.md new file mode 100644 index 0000000..b86000f --- /dev/null +++ b/deps/floki/CONTRIBUTING.md @@ -0,0 +1,41 @@ +## Contributing + +We want to encourage you to contribute to Floki with documentation, code and ideas. +To do so, you can read the existing docs and take a look at our source code through [closed pull requests](https://github.com/philss/floki/pulls?q=is%3Apr+is%3Aclosed). + +Before starting, please read our [Code of Conduct](https://github.com/philss/floki/blob/master/CODE_OF_CONDUCT.md) and our [License](https://github.com/philss/floki/blob/master/LICENSE) files. + +### Docs + +This project uses the [ExDoc](https://github.com/elixir-lang/ex_doc) tool, which generates documentation based on code docs. +Floki uses the [Elixir style](https://hexdocs.pm/elixir/writing-documentation.html) of writing docs. + +Documentation is a very important portion of software. We want to always improve our communication using docs. + +### Ideas + +To contribute with ideas, you can [open issues](https://github.com/philss/floki/issues/new) using Github. Please write a clear description of +what you want to propose, along with a motivation and examples. + +This can make the project very rich, even if your proposal is not accepted. It worth the discussion and possible implementations. + +### Code + +Contributing with code (and documentation) is easy if you already know how to work with Pull Request (PR) on GitHub. +If you don't know yet, it's basically: + +1. Fork the project. +2. Clone your fork using the project URL (git or https). +3. Create a new branch to implement your functionality or code. This can be done with the command: +`git checkout -b your-branch-name-here` +4. Make your changes and add a new commit with a clear message saying **why** this change is being made. +5. Push your code with the command: `git push -u origin your-branch-name-here`. +6. Go to your fork page at GitHub. +7. Open a new PR. + +Today we only need a one :shipit: (it's the sign of approval) in order to merge a PR. Unfortunately we can't guarantee that all PRs will be merged. +But we can guarantee that all PRs that are not accepted will have an explanation for that. + +If you have questions, security issues or suggestions about the project and prefer to talk privately, please send me an email at: philip.sampaio+flokicontributing@gmail.com. + +Thank you. diff --git a/deps/floki/LICENSE b/deps/floki/LICENSE new file mode 100644 index 0000000..b581b49 --- /dev/null +++ b/deps/floki/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Philip Sampaio Silva + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/deps/floki/README.md b/deps/floki/README.md new file mode 100644 index 0000000..5a54d6b --- /dev/null +++ b/deps/floki/README.md @@ -0,0 +1,273 @@ +[![Actions Status](https://github.com/philss/floki/workflows/CI/badge.svg?branch=master)](https://github.com/philss/floki/actions) +[![Floki version](https://img.shields.io/hexpm/v/floki.svg)](https://hex.pm/packages/floki) +[![Hex Docs](https://img.shields.io/badge/hex-docs-lightgreen.svg)](https://hexdocs.pm/floki/) +[![Hex.pm](https://img.shields.io/hexpm/dt/floki.svg)](https://hex.pm/packages/floki) +[![License](https://img.shields.io/hexpm/l/floki.svg)](https://github.com/philss/floki/blob/master/LICENSE) +[![Last Updated](https://img.shields.io/github/last-commit/philss/floki.svg)](https://github.com/philss/floki/commits/master) + +Floki logo + +**Floki is a simple HTML parser that enables search for nodes using CSS selectors**. + +[Check the documentation ๐Ÿ“™](https://hexdocs.pm/floki). + +## Usage + +Take this HTML as an example: + +```html + + + +
+

Floki

+ Enables search using CSS selectors + Github page + philss +
+ Hex package + + +``` + +Here are some queries that you can perform (with return examples): + +```elixir +{:ok, document} = Floki.parse_document(html) + +Floki.find(document, "p.headline") +# => [{"p", [{"class", "headline"}], ["Floki"]}] + +document +|> Floki.find("p.headline") +|> Floki.raw_html +# =>

Floki

+``` + +Each HTML node is represented by a tuple like: + + {tag_name, attributes, children_nodes} + +Example of node: + + {"p", [{"class", "headline"}], ["Floki"]} + +So even if the only child node is the element text, it is represented inside a list. + +## Installation + +Add Floki to your `mix.exs`: + +```elixir +defp deps do + [ + {:floki, "~> 0.33.0"} + ] +end +``` + +After that, run `mix deps.get`. + +You can check the [changelog](CHANGELOG.md) for changes. + +## Dependencies + +Floki needs the `:leex` module in order to compile. +Normally this module is installed with Erlang in a complete installation. + +If you get this ["module :leex is not available"](https://github.com/philss/floki/issues/35) error message, you need to install the `erlang-dev` and `erlang-parsetools` packages in order get the `:leex` module. The packages names may be different depending on your OS. + +### Alternative HTML parsers + +By default Floki uses a patched version of `mochiweb_html` for parsing fragments +due to its ease of installation (it's written in Erlang and has no outside dependencies). + +However one might want to use an alternative parser due to the following +concerns: + +- Performance - It can be [up to 20 times slower than the alternatives](https://hexdocs.pm/fast_html/readme.html#benchmarks) on big HTML + documents. +- Correctness - in some cases `mochiweb_html` will produce different results + from what is specified in [HTML5 specification](https://html.spec.whatwg.org/). + For example, a correct parser would parse ` <b> bold </b> text ` + as `{"title", [], [" bold text "]}` since content inside `` is + to be [treated as plaintext](https://html.spec.whatwg.org/#the-title-element). + Albeit `mochiweb_html` would parse it as `{"title", [], [{"b", [], [" bold "]}, " text "]}`. + +Floki supports the following alternative parsers: + +- `fast_html` - A wrapper for [lexbor](https://github.com/lexbor/lexbor). A pure C HTML parser. +- `html5ever` - A wrapper for [html5ever](https://github.com/servo/html5ever) written in Rust, developed as a part of the Servo project. + +`fast_html` is generally faster, according to the +[benchmarks](https://hexdocs.pm/fast_html/readme.html#benchmarks) conducted by +its developers. + +You can perform a benchmark by running the following: + + $ sh benchs/extract.sh + $ mix run benchs/parse_document.exs + +Extracting the files is needed only once. + +#### Using `html5ever` as the HTML parser + +This dependency is written with a NIF using [Rustler](https://github.com/rusterlium/rustler), but +you don't need to install anything to compile it thanks to [RustlerPrecompiled](https://hexdocs.pm/rustler_precompiled/). + +```elixir +defp deps do + [ + {:floki, "~> 0.33.0"}, + {:html5ever, "~> 0.13.0"} + ] +end +``` + +Run `mix deps.get` and compiles the project with `mix compile` to make sure it works. + +Then you need to configure your app to use `html5ever`: + +```elixir +# in config/config.exs + +config :floki, :html_parser, Floki.HTMLParser.Html5ever +``` + +Notice that you can pass the HTML parser as an option in `parse_document/2` and `parse_fragment/2`. + +#### Using `fast_html` as the HTML parser + +A C compiler, GNU\Make and CMake need to be installed on the system in order to +compile lexbor. + +First, add `fast_html` to your dependencies: + +```elixir +defp deps do + [ + {:floki, "~> 0.33.0"}, + {:fast_html, "~> 2.0"} + ] +end +``` + +Run `mix deps.get` and compiles the project with `mix compile` to make sure it works. + +Then you need to configure your app to use `fast_html`: + +```elixir +# in config/config.exs + +config :floki, :html_parser, Floki.HTMLParser.FastHtml +``` + +## More about Floki API + +To parse a HTML document, try: + +```elixir +html = """ + <html> + <body> + <div class="example"></div> + </body> + </html> +""" + +{:ok, document} = Floki.parse_document(html) +# => {:ok, [{"html", [], [{"body", [], [{"div", [{"class", "example"}], []}]}]}]} +``` + +To find elements with the class `example`, try: + +```elixir +Floki.find(document, ".example") +# => [{"div", [{"class", "example"}], []}] +``` + +To convert your node tree back to raw HTML (spaces are ignored): + +```elixir +document +|> Floki.find(".example") +|> Floki.raw_html +# => <div class="example"></div> +``` + +To fetch some attribute from elements, try: + +```elixir +Floki.attribute(document, ".example", "class") +# => ["example"] +``` + +You can get attributes from elements that you already have: + +```elixir +document +|> Floki.find(".example") +|> Floki.attribute("class") +# => ["example"] +``` + +If you want to get the text from an element, try: + +```elixir +document +|> Floki.find(".headline") +|> Floki.text + +# => "Floki" +``` + +## Supported selectors + +Here you find all the [CSS selectors](https://www.w3.org/TR/selectors/#selectors) supported in the current version: + +| Pattern | Description | +|-----------------|------------------------------| +| * | any element | +| E | an element of type `E` | +| E[foo] | an `E` element with a "foo" attribute | +| E[foo="bar"] | an E element whose "foo" attribute value is exactly equal to "bar" | +| E[foo~="bar"] | an E element whose "foo" attribute value is a list of whitespace-separated values, one of which is exactly equal to "bar" | +| E[foo^="bar"] | an E element whose "foo" attribute value begins exactly with the string "bar" | +| E[foo$="bar"] | an E element whose "foo" attribute value ends exactly with the string "bar" | +| E[foo*="bar"] | an E element whose "foo" attribute value contains the substring "bar" | +| E[foo\|="en"] | an E element whose "foo" attribute has a hyphen-separated list of values beginning (from the left) with "en" | +| E:nth-child(n) | an E element, the n-th child of its parent | +| E:nth-last-child(n) | an E element, the n-th child of its parent, counting from bottom to up | +| E:first-child | an E element, first child of its parent | +| E:last-child | an E element, last child of its parent | +| E:nth-of-type(n) | an E element, the n-th child of its type among its siblings | +| E:nth-last-of-type(n) | an E element, the n-th child of its type among its siblings, counting from bottom to up | +| E:first-of-type | an E element, first child of its type among its siblings | +| E:last-of-type | an E element, last child of its type among its siblings | +| E:checked | An E element (checkbox, radio, or option) that is checked | +| E:disabled | An E element (button, input, select, textarea, or option) that is disabled | +| E.warning | an E element whose class is "warning" | +| E#myid | an E element with ID equal to "myid" (for ids containing periods, use `#my\\.id` or `[id="my.id"]`) | +| E:not(s) | an E element that does not match simple selector s | +| :root | the root node or nodes (in case of fragments) of the document. Most of the times this is the `html` tag | +| E F | an F element descendant of an E element | +| E > F | an F element child of an E element | +| E + F | an F element immediately preceded by an E element | +| E ~ F | an F element preceded by an E element | + +There are also some selectors based on non-standard specifications. They are: + +| Pattern | Description | +|-----------------------|------------------------------------------------------------------------| +| E:fl-contains('foo') | an E element that contains "foo" inside a text node | +| E:fl-icontains('foo') | an E element that contains "foo" inside a text node (case insensitive) | + +## Special thanks + +* [@arasatasaygin](https://github.com/arasatasaygin) for Floki's logo from the [Open Logos project](http://openlogos.org/). + +## License + +Copyright (c) 2014 Philip Sampaio Silva + +Floki is under MIT license. Check the [LICENSE](https://github.com/philss/floki/blob/master/LICENSE) file for more details. diff --git a/deps/floki/hex_metadata.config b/deps/floki/hex_metadata.config new file mode 100644 index 0000000..c03ed9a --- /dev/null +++ b/deps/floki/hex_metadata.config @@ -0,0 +1,40 @@ +{<<"app">>,<<"floki">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>, + <<"Floki is a simple HTML parser that enables search for nodes using CSS selectors.">>}. +{<<"elixir">>,<<"~> 1.10">>}. +{<<"files">>, + [<<"lib/floki">>,<<"lib/floki/deep_text.ex">>,<<"lib/floki/filter_out.ex">>, + <<"lib/floki/finder.ex">>,<<"lib/floki/flat_text.ex">>,<<"lib/floki/html">>, + <<"lib/floki/html/numeric_charref.ex">>,<<"lib/floki/html/tokenizer.ex">>, + <<"lib/floki/html_parser.ex">>,<<"lib/floki/html_parser">>, + <<"lib/floki/html_parser/fast_html.ex">>, + <<"lib/floki/html_parser/html5ever.ex">>, + <<"lib/floki/html_parser/mochiweb.ex">>,<<"lib/floki/html_tree.ex">>, + <<"lib/floki/html_tree">>,<<"lib/floki/html_tree/comment.ex">>, + <<"lib/floki/html_tree/html_node.ex">>, + <<"lib/floki/html_tree/id_seeder.ex">>,<<"lib/floki/html_tree/text.ex">>, + <<"lib/floki/parse_error.ex">>,<<"lib/floki/selector">>, + <<"lib/floki/selector/functional.ex">>, + <<"lib/floki/selector/tokenizer.ex">>, + <<"lib/floki/selector/attribute_selector.ex">>, + <<"lib/floki/selector/combinator.ex">>,<<"lib/floki/selector/parser.ex">>, + <<"lib/floki/selector/pseudo_class.ex">>,<<"lib/floki/traversal.ex">>, + <<"lib/floki/raw_html.ex">>,<<"lib/floki/entities.ex">>, + <<"lib/floki/selector.ex">>,<<"lib/floki.ex">>, + <<"src/floki_selector_lexer.xrl">>,<<"src/floki_mochi_html.erl">>, + <<"src/floki.gleam">>,<<"mix.exs">>,<<"README.md">>,<<"LICENSE">>, + <<"CODE_OF_CONDUCT.md">>,<<"CONTRIBUTING.md">>,<<"CHANGELOG.md">>]}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"links">>, + [{<<"Changelog">>,<<"https://hexdocs.pm/floki/changelog.html">>}, + {<<"GitHub">>,<<"https://github.com/philss/floki">>}, + {<<"Sponsor">>,<<"https://github.com/sponsors/philss">>}]}. +{<<"name">>,<<"floki">>}. +{<<"requirements">>, + [[{<<"app">>,<<"html_entities">>}, + {<<"name">>,<<"html_entities">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 0.5.0">>}]]}. +{<<"version">>,<<"0.33.1">>}. diff --git a/deps/floki/lib/floki.ex b/deps/floki/lib/floki.ex new file mode 100644 index 0000000..8194b80 --- /dev/null +++ b/deps/floki/lib/floki.ex @@ -0,0 +1,696 @@ +defmodule Floki do + alias Floki.{Finder, FilterOut, HTMLTree} + + require Logger + + @moduledoc """ + Floki is a simple HTML parser that enables search for nodes using CSS selectors. + + ## Example + + Assuming that you have the following HTML: + + ```html + <!doctype html> + <html> + <body> + <section id="content"> + <p class="headline">Floki</p> + <a href="http://github.com/philss/floki">Github page</a> + <span data-model="user">philss</span> + </section> + </body> + </html> + ``` + + To parse this, you can use the function `Floki.parse_document/1`: + + ```elixir + {:ok, html} = Floki.parse_document(doc) + # => + # [{"html", [], + # [ + # {"body", [], + # [ + # {"section", [{"id", "content"}], + # [ + # {"p", [{"class", "headline"}], ["Floki"]}, + # {"a", [{"href", "http://github.com/philss/floki"}], ["Github page"]}, + # {"span", [{"data-model", "user"}], ["philss"]} + # ]} + # ]} + # ]}] + ``` + + With this document you can perform queries such as: + + * `Floki.find(html, "#content")` + * `Floki.find(html, ".headline")` + * `Floki.find(html, "a")` + * `Floki.find(html, "[data-model=user]")` + * `Floki.find(html, "#content a")` + * `Floki.find(html, ".headline, a")` + + Each HTML node is represented by a tuple like: + + {tag_name, attributes, children_nodes} + + Example of node: + + {"p", [{"class", "headline"}], ["Floki"]} + + So even if the only child node is the element text, it is represented + inside a list. + """ + + @type html_declaration :: {:pi, String.t(), [html_attribute()]} + @type html_comment :: {:comment, String.t()} + @type html_doctype :: {:doctype, String.t(), String.t(), String.t()} + @type html_attribute :: {String.t(), String.t()} + @type html_text :: String.t() + @type html_tag :: {String.t(), [html_attribute()], [html_node()]} + @type html_node :: + html_tag() | html_comment() | html_doctype() | html_declaration() | html_text() + @type html_tree :: [html_node()] + + @type css_selector :: String.t() | Floki.Selector.t() | [Floki.Selector.t()] + + @doc """ + Parses a HTML Document from a String. + + The expect string is a valid HTML, but the parser will try + to parse even with errors. + """ + + @spec parse(binary()) :: html_tag() | html_tree() | String.t() + + @deprecated "Use `parse_document/1` or `parse_fragment/1` instead." + def parse(html) do + with {:ok, document} <- Floki.HTMLParser.parse_document(html) do + if length(document) == 1 do + hd(document) + else + document + end + end + end + + @doc """ + Parses a HTML Document from a string. + + It will use the available parser from application env or the one from the + `:html_parser` option. + Check https://github.com/philss/floki#alternative-html-parsers for more details. + + ## Examples + + iex> Floki.parse_document("<html><head></head><body>hello</body></html>") + {:ok, [{"html", [], [{"head", [], []}, {"body", [], ["hello"]}]}]} + + iex> Floki.parse_document("<html><head></head><body>hello</body></html>", html_parser: Floki.HTMLParser.Mochiweb) + {:ok, [{"html", [], [{"head", [], []}, {"body", [], ["hello"]}]}]} + + """ + + @spec parse_document(binary(), Keyword.t()) :: {:ok, html_tree()} | {:error, String.t()} + + defdelegate parse_document(document, opts \\ []), to: Floki.HTMLParser + + @doc """ + Parses a HTML Document from a string. + + Similar to `Floki.parse_document/1`, but raises `Floki.ParseError` if there was an + error parsing the document. + + ## Example + + iex> Floki.parse_document!("<html><head></head><body>hello</body></html>") + [{"html", [], [{"head", [], []}, {"body", [], ["hello"]}]}] + + """ + + @spec parse_document!(binary(), Keyword.t()) :: html_tree() + + def parse_document!(document, opts \\ []) do + case parse_document(document, opts) do + {:ok, parsed_document} -> parsed_document + {:error, message} -> raise Floki.ParseError, message: message + end + end + + @doc """ + Parses a HTML fragment from a string. + + It will use the available parser from application env or the one from the + `:html_parser` option. + + Check https://github.com/philss/floki#alternative-html-parsers for more details. + """ + + @spec parse_fragment(binary(), Keyword.t()) :: {:ok, html_tree()} | {:error, String.t()} + + defdelegate parse_fragment(fragment, opts \\ []), to: Floki.HTMLParser + + @doc """ + Parses a HTML fragment from a string. + + Similar to `Floki.parse_fragment/1`, but raises `Floki.ParseError` if there was an + error parsing the fragment. + """ + + @spec parse_fragment!(binary(), Keyword.t()) :: html_tree() + + def parse_fragment!(fragment, opts \\ []) do + case parse_fragment(fragment, opts) do + {:ok, parsed_fragment} -> parsed_fragment + {:error, message} -> raise Floki.ParseError, message: message + end + end + + @doc """ + Converts HTML tree to raw HTML. + Note that the resultant HTML may be different from the original one. + Spaces after tags and doctypes are ignored. + + ## Options + + - `:encode`: accepts `true` or `false`. Will encode html special characters + to html entities. + You can also control the encoding behaviour at the application level via + `config :floki, :encode_raw_html, true | false` + + - `:pretty`: accepts `true` or `false`. Will format the output, ignoring + breaklines and spaces from the input and putting new ones in order to pretty format + the html. + + ## Examples + + iex> Floki.raw_html({"div", [{"class", "wrapper"}], ["my content"]}) + ~s(<div class="wrapper">my content</div>) + + iex> Floki.raw_html({"div", [{"class", "wrapper"}], ["10 > 5"]}, encode: true) + ~s(<div class="wrapper">10 > 5</div>) + + iex> Floki.raw_html({"div", [{"class", "wrapper"}], ["10 > 5"]}, encode: false) + ~s(<div class="wrapper">10 > 5</div>) + + iex> Floki.raw_html({"div", [], ["\\n ", {"span", [], "Fully indented"}, " \\n"]}, pretty: true) + \"\"\" + <div> + <span> + Fully indented + </span> + </div> + \"\"\" + """ + + @spec raw_html(html_tree | binary, keyword) :: binary + + defdelegate raw_html(html_tree, options \\ []), to: Floki.RawHTML + + @doc """ + Find elements inside a HTML tree or string. + + ## Examples + + iex> {:ok, html} = Floki.parse_fragment("<p><span class=hint>hello</span></p>") + iex> Floki.find(html, ".hint") + [{"span", [{"class", "hint"}], ["hello"]}] + + iex> {:ok, html} = Floki.parse_fragment("<div id=important><div>Content</div></div>") + iex> Floki.find(html, "#important") + [{"div", [{"id", "important"}], [{"div", [], ["Content"]}]}] + + iex> {:ok, html} = Floki.parse_fragment("<p><a href='https://google.com'>Google</a></p>") + iex> Floki.find(html, "a") + [{"a", [{"href", "https://google.com"}], ["Google"]}] + + iex> Floki.find([{ "div", [], [{"a", [{"href", "https://google.com"}], ["Google"]}]}], "div a") + [{"a", [{"href", "https://google.com"}], ["Google"]}] + + """ + + @spec find(binary() | html_tree() | html_node(), css_selector()) :: html_tree + + def find(html, selector) when is_binary(html) do + Logger.info( + "deprecation: parse the HTML with parse_document or parse_fragment before using find/2" + ) + + with {:ok, document} <- Floki.parse_document(html) do + {tree, results} = Finder.find(document, selector) + + Enum.map(results, fn html_node -> HTMLTree.to_tuple(tree, html_node) end) + end + end + + def find(html_tree_as_tuple, selector) do + {tree, results} = Finder.find(html_tree_as_tuple, selector) + + Enum.map(results, fn html_node -> HTMLTree.to_tuple(tree, html_node) end) + end + + @doc """ + Changes the attribute values of the elements matched by `selector` + with the function `mutation` and returns the whole element tree. + + ## Examples + + iex> Floki.attr([{"div", [{"id", "a"}], []}], "#a", "id", fn(id) -> String.replace(id, "a", "b") end) + [{"div", [{"id", "b"}], []}] + + iex> Floki.attr([{"div", [{"class", "name"}], []}], "div", "id", fn _ -> "b" end) + [{"div", [{"id", "b"}, {"class", "name"}], []}] + + """ + @spec attr(binary | html_tree | html_node, css_selector(), binary, (binary -> binary)) :: + html_tree + + def attr(html_elem_tuple, selector, attribute_name, mutation) when is_tuple(html_elem_tuple) do + attr([html_elem_tuple], selector, attribute_name, mutation) + end + + def attr(html, selector, attribute_name, mutation) when is_binary(html) do + Logger.info( + "deprecation: parse the HTML with parse_document or parse_fragment before using attr/4" + ) + + with {:ok, document} <- Floki.parse_document(html) do + attr(document, selector, attribute_name, mutation) + end + end + + def attr(html_tree_list, selector, attribute_name, mutation) when is_list(html_tree_list) do + find_and_update(html_tree_list, selector, fn + {tag, attrs} -> + modified_attrs = + if Enum.any?(attrs, &match?({^attribute_name, _}, &1)) do + Enum.map( + attrs, + fn attribute -> + with {^attribute_name, attribute_value} <- attribute do + {attribute_name, mutation.(attribute_value)} + end + end + ) + else + [{attribute_name, mutation.(nil)} | attrs] + end + + {tag, modified_attrs} + + other -> + other + end) + end + + @deprecated """ + Use `find_and_update/3` or `Enum.map/2` instead. + """ + def map(_html_tree_or_list, _fun) + + def map(html_tree_list, fun) when is_list(html_tree_list) do + Enum.map(html_tree_list, &Finder.map(&1, fun)) + end + + def map(html_tree, fun), do: Finder.map(html_tree, fun) + + @doc """ + Searches for elements inside the HTML tree and update those that matches the selector. + + It will return the updated HTML tree. + + This function works in a way similar to `traverse_and_update`, but instead of updating + the children nodes, it will only updates the `tag` and `attributes` of the matching nodes. + + If `fun` returns `:delete`, the HTML node will be removed from the tree. + + ## Examples + + iex> Floki.find_and_update([{"a", [{"href", "http://elixir-lang.com"}], ["Elixir"]}], "a", fn + iex> {"a", [{"href", href}]} -> + iex> {"a", [{"href", String.replace(href, "http://", "https://")}]} + iex> other -> + iex> other + iex> end) + [{"a", [{"href", "https://elixir-lang.com"}], ["Elixir"]}] + """ + + @spec find_and_update( + html_tree(), + css_selector(), + ({String.t(), [html_attribute()]} -> {String.t(), [html_attribute()]} | :delete) + ) :: html_tree() + def find_and_update(html_tree, selector, fun) do + {tree, results} = Finder.find(html_tree, selector) + + operations_with_nodes = + Enum.map(results, fn + html_node = %Floki.HTMLTree.HTMLNode{} -> + case fun.({html_node.type, html_node.attributes}) do + {updated_tag, updated_attrs} -> + {:update, %{html_node | type: updated_tag, attributes: updated_attrs}} + + :delete -> + {:delete, html_node} + end + + other -> + {:no_op, other} + end) + + tree + |> HTMLTree.patch_nodes(operations_with_nodes) + |> HTMLTree.to_tuple_list() + end + + @doc """ + Traverses and updates a HTML tree structure. + + This function returns a new tree structure that is the result of applying the + given `fun` on all nodes except text nodes. + The tree is traversed in a post-walk fashion, where the children are traversed + before the parent. + + When the function `fun` encounters HTML tag, it receives a tuple with + `{name, attributes, children}`, and should either return a similar tuple or + `nil` to delete the current node. + + The function `fun` can also encounter HTML doctype, comment or declaration and + will receive, and should return, different tuple for these types. See the + documentation for `t:html_comment/0`, `t:html_doctype/0` and + `t:html_declaration/0` for details. + + **Note**: this won't update text nodes, but you can transform them when working + with children nodes. + + ## Examples + + iex> html = [{"div", [], ["hello"]}] + iex> Floki.traverse_and_update(html, fn + ...> {"div", attrs, children} -> {"p", attrs, children} + ...> other -> other + ...> end) + [{"p", [], ["hello"]}] + + iex> html = [{"div", [], [{:comment, "I am comment"}, {"span", [], ["hello"]}]}] + iex> Floki.traverse_and_update(html, fn + ...> {"span", _attrs, _children} -> nil + ...> {:comment, text} -> {"span", [], text} + ...> other -> other + ...> end) + [{"div", [], [{"span", [], "I am comment"}]}] + """ + + @spec traverse_and_update( + html_tree(), + (html_tag() | html_comment() | html_doctype() | html_declaration() -> html_node() | nil) + ) :: html_tree() + + defdelegate traverse_and_update(html_tree, fun), to: Floki.Traversal + + @doc """ + Traverses and updates a HTML tree structure with an accumulator. + + This function returns a new tree structure and the final value of accumulator + which are the result of applying the given `fun` on all nodes except text nodes. + The tree is traversed in a post-walk fashion, where the children are traversed + before the parent. + + When the function `fun` encounters HTML tag, it receives a tuple with + `{name, attributes, children}` and an accumulator. It and should return a + 2-tuple like `{new_node, new_acc}`, where `new_node` is either a similar tuple + or `nil` to delete the current node, and `new_acc` is an updated value for the + accumulator. + + The function `fun` can also encounter HTML doctype, comment or declaration and + will receive, and should return, different tuple for these types. See the + documentation for `t:html_comment/0`, `t:html_doctype/0` and + `t:html_declaration/0` for details. + + **Note**: this won't update text nodes, but you can transform them when working + with children nodes. + + ## Examples + + iex> html = [{"div", [], [{:comment, "I am a comment"}, "hello"]}, {"div", [], ["world"]}] + iex> Floki.traverse_and_update(html, 0, fn + ...> {"div", attrs, children}, acc -> + ...> {{"p", [{"data-count", to_string(acc)} | attrs], children}, acc + 1} + ...> other, acc -> {other, acc} + ...> end) + {[ + {"p", [{"data-count", "0"}], [{:comment, "I am a comment"}, "hello"]}, + {"p", [{"data-count", "1"}], ["world"]} + ], 2} + + iex> html = {"div", [], [{"span", [], ["hello"]}]} + iex> Floki.traverse_and_update(html, [deleted: 0], fn + ...> {"span", _attrs, _children}, acc -> + ...> {nil, Keyword.put(acc, :deleted, acc[:deleted] + 1)} + ...> tag, acc -> + ...> {tag, acc} + ...> end) + {{"div", [], []}, [deleted: 1]} + """ + + @spec traverse_and_update( + html_tree(), + traverse_acc, + (html_tag() | html_comment() | html_doctype() | html_declaration(), traverse_acc -> + {html_node() | nil, traverse_acc}) + ) :: {html_node(), traverse_acc} + when traverse_acc: any() + defdelegate traverse_and_update(html_tree, acc, fun), to: Floki.Traversal + + @doc """ + Returns the text nodes from a HTML tree. + + By default, it will perform a deep search through the HTML tree. + You can disable deep search with the option `deep` assigned to false. + You can include content of script tags with the option `js` assigned to true. + You can specify a separator between nodes content. + + ## Examples + + iex> Floki.text({"div", [], [{"span", [], ["hello"]}, " world"]}) + "hello world" + + iex> Floki.text({"div", [], [{"span", [], ["hello"]}, " world"]}, deep: false) + " world" + + iex> Floki.text({"div", [], [{"script", [], ["hello"]}, " world"]}) + " world" + + iex> Floki.text({"div", [], [{"script", [], ["hello"]}, " world"]}, js: true) + "hello world" + + iex> Floki.text({"ul", [], [{"li", [], ["hello"]}, {"li", [], ["world"]}]}, sep: "-") + "hello-world" + + iex> Floki.text([{"div", [], ["hello world"]}]) + "hello world" + + iex> Floki.text([{"p", [], ["1"]},{"p", [], ["2"]}]) + "12" + + iex> Floki.text({"div", [], [{"style", [], ["hello"]}, " world"]}, style: false) + " world" + + iex> Floki.text({"div", [], [{"style", [], ["hello"]}, " world"]}, style: true) + "hello world" + + """ + + @spec text(html_tree | binary) :: binary + + def text(html, opts \\ [deep: true, js: false, style: true, sep: ""]) do + cleaned_html_tree = + html + |> parse_it() + |> clean_html_tree(:js, opts[:js]) + |> clean_html_tree(:style, opts[:style]) + + search_strategy = + case opts[:deep] do + false -> Floki.FlatText + _ -> Floki.DeepText + end + + case opts[:sep] do + nil -> search_strategy.get(cleaned_html_tree) + sep -> search_strategy.get(cleaned_html_tree, sep) + end + end + + @doc """ + Returns the direct child nodes of a HTML node. + + By default, it will also include all texts. You can disable + this behaviour by using the option `include_text` to `false`. + + If the given node is not an HTML tag, then it returns nil. + + ## Examples + + iex> Floki.children({"div", [], ["text", {"span", [], []}]}) + ["text", {"span", [], []}] + + iex> Floki.children({"div", [], ["text", {"span", [], []}]}, include_text: false) + [{"span", [], []}] + + iex> Floki.children({:comment, "comment"}) + nil + + """ + + @spec children(html_node(), Keyword.t()) :: html_tree() | nil + + def children(html_node, opts \\ [include_text: true]) do + case html_node do + {_, _, subtree} -> + filter_children(subtree, opts[:include_text]) + + _ -> + nil + end + end + + defp filter_children(children, false), do: Enum.filter(children, &is_tuple(&1)) + defp filter_children(children, _), do: children + + @doc """ + Returns a list with attribute values for a given selector. + + ## Examples + + iex> Floki.attribute([{"a", [{"href", "https://google.com"}], ["Google"]}], "a", "href") + ["https://google.com"] + + iex> Floki.attribute([{"a", [{"class", "foo"}, {"href", "https://google.com"}], ["Google"]}], "a", "class") + ["foo"] + + """ + + @spec attribute(binary | html_tree | html_node, binary, binary) :: list + + def attribute(html, selector, attribute_name) do + html + |> find(selector) + |> attribute_values(attribute_name) + end + + @doc """ + Returns a list with attribute values from elements. + + ## Examples + + iex> Floki.attribute([{"a", [{"href", "https://google.com"}], ["Google"]}], "href") + ["https://google.com"] + + """ + + @spec attribute(binary | html_tree | html_node, binary) :: list + def attribute(html, attribute_name) when is_binary(html) do + Logger.info( + "deprecation: parse the HTML with parse_document or parse_fragment before using attribute/2" + ) + + with {:ok, document} <- Floki.parse_document(html) do + attribute_values(document, attribute_name) + end + end + + def attribute(elements, attribute_name) do + attribute_values(elements, attribute_name) + end + + defp attribute_values(element, attr_name) when is_tuple(element) do + attribute_values([element], attr_name) + end + + defp attribute_values(elements, attr_name) do + values = + Enum.reduce( + elements, + [], + fn + {_, attributes, _}, acc -> + case attribute_match?(attributes, attr_name) do + {_attr_name, value} -> + [value | acc] + + _ -> + acc + end + + _, acc -> + acc + end + ) + + Enum.reverse(values) + end + + defp attribute_match?(attributes, attribute_name) do + Enum.find( + attributes, + fn {attr_name, _} -> + attr_name == attribute_name + end + ) + end + + defp parse_it(html) when is_binary(html) do + Logger.info( + "deprecation: parse the HTML with parse_document or parse_fragment before using text/2" + ) + + {:ok, document} = Floki.parse_document(html) + document + end + + defp parse_it(html), do: html + + defp clean_html_tree(html_tree, :js, true), do: html_tree + defp clean_html_tree(html_tree, :js, _), do: filter_out(html_tree, "script") + + defp clean_html_tree(html_tree, :style, true), do: html_tree + defp clean_html_tree(html_tree, :style, _), do: filter_out(html_tree, "style") + + @doc """ + Returns the nodes from a HTML tree that don't match the filter selector. + + ## Examples + + iex> Floki.filter_out({"div", [], [{"script", [], ["hello"]}, " world"]}, "script") + {"div", [], [" world"]} + + iex> Floki.filter_out([{"body", [], [{"script", [], []}, {"div", [], []}]}], "script") + [{"body", [], [{"div", [], []}]}] + + iex> Floki.filter_out({"div", [], [{:comment, "comment"}, " text"]}, :comment) + {"div", [], [" text"]} + + iex> Floki.filter_out({"div", [], ["text"]}, :text) + {"div", [], []} + + """ + + @spec filter_out(html_node() | html_tree() | binary(), FilterOut.selector()) :: + html_node() | html_tree() + + def filter_out(html, selector) when is_binary(html) do + Logger.info( + "deprecation: parse the HTML with parse_document or parse_fragment before using filter_out/2" + ) + + with {:ok, document} <- Floki.parse_document(html) do + FilterOut.filter_out(document, selector) + end + end + + def filter_out(elements, selector) do + FilterOut.filter_out(elements, selector) + end +end diff --git a/deps/floki/lib/floki/deep_text.ex b/deps/floki/lib/floki/deep_text.ex new file mode 100644 index 0000000..0f8bbd8 --- /dev/null +++ b/deps/floki/lib/floki/deep_text.ex @@ -0,0 +1,30 @@ +defmodule Floki.DeepText do + @moduledoc false + + # DeepText is a strategy to get text nodes from a HTML tree using a deep search + # algorithm. It will get all string nodes and concat them. + + @type html_tree :: tuple | list + + @spec get(html_tree, binary) :: binary + + def get(html_tree, sep \\ "") do + get_text(html_tree, "", sep) + end + + defp get_text(text, "", _sep) when is_binary(text), do: text + defp get_text(text, acc, sep) when is_binary(text), do: Enum.join([acc, text], sep) + + defp get_text(nodes, acc, sep) when is_list(nodes) do + Enum.reduce(nodes, acc, fn child, istr -> + get_text(child, istr, sep) + end) + end + + defp get_text({:comment, _}, acc, _), do: acc + defp get_text({"br", _, _}, acc, _), do: acc <> "\n" + + defp get_text({_, _, nodes}, acc, sep) do + get_text(nodes, acc, sep) + end +end diff --git a/deps/floki/lib/floki/entities.ex b/deps/floki/lib/floki/entities.ex new file mode 100644 index 0000000..2e1e1a0 --- /dev/null +++ b/deps/floki/lib/floki/entities.ex @@ -0,0 +1,2242 @@ +defmodule Floki.Entities do + # This file was generated by "Mix.Tasks.GenerateEntities" + + @moduledoc false + + @doc """ + Returns unicode codepoints for a given HTML entity. + """ + @spec get(binary()) :: list(integer) + def get("Æ"), do: [198] + def get("Æ"), do: [198] + def get("&"), do: [38] + def get("&"), do: [38] + def get("Á"), do: [193] + def get("Á"), do: [193] + def get("Ă"), do: [258] + def get("Â"), do: [194] + def get("Â"), do: [194] + def get("А"), do: [1040] + def get("𝔄"), do: [120_068] + def get("À"), do: [192] + def get("À"), do: [192] + def get("Α"), do: [913] + def get("Ā"), do: [256] + def get("⩓"), do: [10835] + def get("Ą"), do: [260] + def get("𝔸"), do: [120_120] + def get("⁡"), do: [8289] + def get("Å"), do: [197] + def get("Å"), do: [197] + def get("𝒜"), do: [119_964] + def get("≔"), do: [8788] + def get("Ã"), do: [195] + def get("Ã"), do: [195] + def get("Ä"), do: [196] + def get("Ä"), do: [196] + def get("∖"), do: [8726] + def get("⫧"), do: [10983] + def get("⌆"), do: [8966] + def get("Б"), do: [1041] + def get("∵"), do: [8757] + def get("ℬ"), do: [8492] + def get("Β"), do: [914] + def get("𝔅"), do: [120_069] + def get("𝔹"), do: [120_121] + def get("˘"), do: [728] + def get("ℬ"), do: [8492] + def get("≎"), do: [8782] + def get("Ч"), do: [1063] + def get("©"), do: [169] + def get("©"), do: [169] + def get("Ć"), do: [262] + def get("⋒"), do: [8914] + def get("ⅅ"), do: [8517] + def get("ℭ"), do: [8493] + def get("Č"), do: [268] + def get("Ç"), do: [199] + def get("Ç"), do: [199] + def get("Ĉ"), do: [264] + def get("∰"), do: [8752] + def get("Ċ"), do: [266] + def get("¸"), do: [184] + def get("·"), do: [183] + def get("ℭ"), do: [8493] + def get("Χ"), do: [935] + def get("⊙"), do: [8857] + def get("⊖"), do: [8854] + def get("⊕"), do: [8853] + def get("⊗"), do: [8855] + def get("∲"), do: [8754] + def get("”"), do: [8221] + def get("’"), do: [8217] + def get("∷"), do: [8759] + def get("⩴"), do: [10868] + def get("≡"), do: [8801] + def get("∯"), do: [8751] + def get("∮"), do: [8750] + def get("ℂ"), do: [8450] + def get("∐"), do: [8720] + def get("∳"), do: [8755] + def get("⨯"), do: [10799] + def get("𝒞"), do: [119_966] + def get("⋓"), do: [8915] + def get("≍"), do: [8781] + def get("ⅅ"), do: [8517] + def get("⤑"), do: [10513] + def get("Ђ"), do: [1026] + def get("Ѕ"), do: [1029] + def get("Џ"), do: [1039] + def get("‡"), do: [8225] + def get("↡"), do: [8609] + def get("⫤"), do: [10980] + def get("Ď"), do: [270] + def get("Д"), do: [1044] + def get("∇"), do: [8711] + def get("Δ"), do: [916] + def get("𝔇"), do: [120_071] + def get("´"), do: [180] + def get("˙"), do: [729] + def get("˝"), do: [733] + def get("`"), do: [96] + def get("˜"), do: [732] + def get("⋄"), do: [8900] + def get("ⅆ"), do: [8518] + def get("𝔻"), do: [120_123] + def get("¨"), do: [168] + def get("⃜"), do: [8412] + def get("≐"), do: [8784] + def get("∯"), do: [8751] + def get("¨"), do: [168] + def get("⇓"), do: [8659] + def get("⇐"), do: [8656] + def get("⇔"), do: [8660] + def get("⫤"), do: [10980] + def get("⟸"), do: [10232] + def get("⟺"), do: [10234] + def get("⟹"), do: [10233] + def get("⇒"), do: [8658] + def get("⊨"), do: [8872] + def get("⇑"), do: [8657] + def get("⇕"), do: [8661] + def get("∥"), do: [8741] + def get("↓"), do: [8595] + def get("⤓"), do: [10515] + def get("⇵"), do: [8693] + def get("̑"), do: [785] + def get("⥐"), do: [10576] + def get("⥞"), do: [10590] + def get("↽"), do: [8637] + def get("⥖"), do: [10582] + def get("⥟"), do: [10591] + def get("⇁"), do: [8641] + def get("⥗"), do: [10583] + def get("⊤"), do: [8868] + def get("↧"), do: [8615] + def get("⇓"), do: [8659] + def get("𝒟"), do: [119_967] + def get("Đ"), do: [272] + def get("Ŋ"), do: [330] + def get("Ð"), do: [208] + def get("Ð"), do: [208] + def get("É"), do: [201] + def get("É"), do: [201] + def get("Ě"), do: [282] + def get("Ê"), do: [202] + def get("Ê"), do: [202] + def get("Э"), do: [1069] + def get("Ė"), do: [278] + def get("𝔈"), do: [120_072] + def get("È"), do: [200] + def get("È"), do: [200] + def get("∈"), do: [8712] + def get("Ē"), do: [274] + def get("◻"), do: [9723] + def get("▫"), do: [9643] + def get("Ę"), do: [280] + def get("𝔼"), do: [120_124] + def get("Ε"), do: [917] + def get("⩵"), do: [10869] + def get("≂"), do: [8770] + def get("⇌"), do: [8652] + def get("ℰ"), do: [8496] + def get("⩳"), do: [10867] + def get("Η"), do: [919] + def get("Ë"), do: [203] + def get("Ë"), do: [203] + def get("∃"), do: [8707] + def get("ⅇ"), do: [8519] + def get("Ф"), do: [1060] + def get("𝔉"), do: [120_073] + def get("◼"), do: [9724] + def get("▪"), do: [9642] + def get("𝔽"), do: [120_125] + def get("∀"), do: [8704] + def get("ℱ"), do: [8497] + def get("ℱ"), do: [8497] + def get("Ѓ"), do: [1027] + def get(">"), do: [62] + def get(">"), do: [62] + def get("Γ"), do: [915] + def get("Ϝ"), do: [988] + def get("Ğ"), do: [286] + def get("Ģ"), do: [290] + def get("Ĝ"), do: [284] + def get("Г"), do: [1043] + def get("Ġ"), do: [288] + def get("𝔊"), do: [120_074] + def get("⋙"), do: [8921] + def get("𝔾"), do: [120_126] + def get("≥"), do: [8805] + def get("⋛"), do: [8923] + def get("≧"), do: [8807] + def get("⪢"), do: [10914] + def get("≷"), do: [8823] + def get("⩾"), do: [10878] + def get("≳"), do: [8819] + def get("𝒢"), do: [119_970] + def get("≫"), do: [8811] + def get("Ъ"), do: [1066] + def get("ˇ"), do: [711] + def get("^"), do: [94] + def get("Ĥ"), do: [292] + def get("ℌ"), do: [8460] + def get("ℋ"), do: [8459] + def get("ℍ"), do: [8461] + def get("─"), do: [9472] + def get("ℋ"), do: [8459] + def get("Ħ"), do: [294] + def get("≎"), do: [8782] + def get("≏"), do: [8783] + def get("Е"), do: [1045] + def get("IJ"), do: [306] + def get("Ё"), do: [1025] + def get("Í"), do: [205] + def get("Í"), do: [205] + def get("Î"), do: [206] + def get("Î"), do: [206] + def get("И"), do: [1048] + def get("İ"), do: [304] + def get("ℑ"), do: [8465] + def get("Ì"), do: [204] + def get("Ì"), do: [204] + def get("ℑ"), do: [8465] + def get("Ī"), do: [298] + def get("ⅈ"), do: [8520] + def get("⇒"), do: [8658] + def get("∬"), do: [8748] + def get("∫"), do: [8747] + def get("⋂"), do: [8898] + def get("⁣"), do: [8291] + def get("⁢"), do: [8290] + def get("Į"), do: [302] + def get("𝕀"), do: [120_128] + def get("Ι"), do: [921] + def get("ℐ"), do: [8464] + def get("Ĩ"), do: [296] + def get("І"), do: [1030] + def get("Ï"), do: [207] + def get("Ï"), do: [207] + def get("Ĵ"), do: [308] + def get("Й"), do: [1049] + def get("𝔍"), do: [120_077] + def get("𝕁"), do: [120_129] + def get("𝒥"), do: [119_973] + def get("Ј"), do: [1032] + def get("Є"), do: [1028] + def get("Х"), do: [1061] + def get("Ќ"), do: [1036] + def get("Κ"), do: [922] + def get("Ķ"), do: [310] + def get("К"), do: [1050] + def get("𝔎"), do: [120_078] + def get("𝕂"), do: [120_130] + def get("𝒦"), do: [119_974] + def get("Љ"), do: [1033] + def get("<"), do: [60] + def get("<"), do: [60] + def get("Ĺ"), do: [313] + def get("Λ"), do: [923] + def get("⟪"), do: [10218] + def get("ℒ"), do: [8466] + def get("↞"), do: [8606] + def get("Ľ"), do: [317] + def get("Ļ"), do: [315] + def get("Л"), do: [1051] + def get("⟨"), do: [10216] + def get("←"), do: [8592] + def get("⇤"), do: [8676] + def get("⇆"), do: [8646] + def get("⌈"), do: [8968] + def get("⟦"), do: [10214] + def get("⥡"), do: [10593] + def get("⇃"), do: [8643] + def get("⥙"), do: [10585] + def get("⌊"), do: [8970] + def get("↔"), do: [8596] + def get("⥎"), do: [10574] + def get("⊣"), do: [8867] + def get("↤"), do: [8612] + def get("⥚"), do: [10586] + def get("⊲"), do: [8882] + def get("⧏"), do: [10703] + def get("⊴"), do: [8884] + def get("⥑"), do: [10577] + def get("⥠"), do: [10592] + def get("↿"), do: [8639] + def get("⥘"), do: [10584] + def get("↼"), do: [8636] + def get("⥒"), do: [10578] + def get("⇐"), do: [8656] + def get("⇔"), do: [8660] + def get("⋚"), do: [8922] + def get("≦"), do: [8806] + def get("≶"), do: [8822] + def get("⪡"), do: [10913] + def get("⩽"), do: [10877] + def get("≲"), do: [8818] + def get("𝔏"), do: [120_079] + def get("⋘"), do: [8920] + def get("⇚"), do: [8666] + def get("Ŀ"), do: [319] + def get("⟵"), do: [10229] + def get("⟷"), do: [10231] + def get("⟶"), do: [10230] + def get("⟸"), do: [10232] + def get("⟺"), do: [10234] + def get("⟹"), do: [10233] + def get("𝕃"), do: [120_131] + def get("↙"), do: [8601] + def get("↘"), do: [8600] + def get("ℒ"), do: [8466] + def get("↰"), do: [8624] + def get("Ł"), do: [321] + def get("≪"), do: [8810] + def get("⤅"), do: [10501] + def get("М"), do: [1052] + def get(" "), do: [8287] + def get("ℳ"), do: [8499] + def get("𝔐"), do: [120_080] + def get("∓"), do: [8723] + def get("𝕄"), do: [120_132] + def get("ℳ"), do: [8499] + def get("Μ"), do: [924] + def get("Њ"), do: [1034] + def get("Ń"), do: [323] + def get("Ň"), do: [327] + def get("Ņ"), do: [325] + def get("Н"), do: [1053] + def get("​"), do: [8203] + def get("​"), do: [8203] + def get("​"), do: [8203] + def get("​"), do: [8203] + def get("≫"), do: [8811] + def get("≪"), do: [8810] + def get(" "), do: [10] + def get("𝔑"), do: [120_081] + def get("⁠"), do: [8288] + def get(" "), do: [160] + def get("ℕ"), do: [8469] + def get("⫬"), do: [10988] + def get("≢"), do: [8802] + def get("≭"), do: [8813] + def get("∦"), do: [8742] + def get("∉"), do: [8713] + def get("≠"), do: [8800] + def get("≂̸"), do: [8770, 824] + def get("∄"), do: [8708] + def get("≯"), do: [8815] + def get("≱"), do: [8817] + def get("≧̸"), do: [8807, 824] + def get("≫̸"), do: [8811, 824] + def get("≹"), do: [8825] + def get("⩾̸"), do: [10878, 824] + def get("≵"), do: [8821] + def get("≎̸"), do: [8782, 824] + def get("≏̸"), do: [8783, 824] + def get("⋪"), do: [8938] + def get("⧏̸"), do: [10703, 824] + def get("⋬"), do: [8940] + def get("≮"), do: [8814] + def get("≰"), do: [8816] + def get("≸"), do: [8824] + def get("≪̸"), do: [8810, 824] + def get("⩽̸"), do: [10877, 824] + def get("≴"), do: [8820] + def get("⪢̸"), do: [10914, 824] + def get("⪡̸"), do: [10913, 824] + def get("⊀"), do: [8832] + def get("⪯̸"), do: [10927, 824] + def get("⋠"), do: [8928] + def get("∌"), do: [8716] + def get("⋫"), do: [8939] + def get("⧐̸"), do: [10704, 824] + def get("⋭"), do: [8941] + def get("⊏̸"), do: [8847, 824] + def get("⋢"), do: [8930] + def get("⊐̸"), do: [8848, 824] + def get("⋣"), do: [8931] + def get("⊂⃒"), do: [8834, 8402] + def get("⊈"), do: [8840] + def get("⊁"), do: [8833] + def get("⪰̸"), do: [10928, 824] + def get("⋡"), do: [8929] + def get("≿̸"), do: [8831, 824] + def get("⊃⃒"), do: [8835, 8402] + def get("⊉"), do: [8841] + def get("≁"), do: [8769] + def get("≄"), do: [8772] + def get("≇"), do: [8775] + def get("≉"), do: [8777] + def get("∤"), do: [8740] + def get("𝒩"), do: [119_977] + def get("Ñ"), do: [209] + def get("Ñ"), do: [209] + def get("Ν"), do: [925] + def get("Œ"), do: [338] + def get("Ó"), do: [211] + def get("Ó"), do: [211] + def get("Ô"), do: [212] + def get("Ô"), do: [212] + def get("О"), do: [1054] + def get("Ő"), do: [336] + def get("𝔒"), do: [120_082] + def get("Ò"), do: [210] + def get("Ò"), do: [210] + def get("Ō"), do: [332] + def get("Ω"), do: [937] + def get("Ο"), do: [927] + def get("𝕆"), do: [120_134] + def get("“"), do: [8220] + def get("‘"), do: [8216] + def get("⩔"), do: [10836] + def get("𝒪"), do: [119_978] + def get("Ø"), do: [216] + def get("Ø"), do: [216] + def get("Õ"), do: [213] + def get("Õ"), do: [213] + def get("⨷"), do: [10807] + def get("Ö"), do: [214] + def get("Ö"), do: [214] + def get("‾"), do: [8254] + def get("⏞"), do: [9182] + def get("⎴"), do: [9140] + def get("⏜"), do: [9180] + def get("∂"), do: [8706] + def get("П"), do: [1055] + def get("𝔓"), do: [120_083] + def get("Φ"), do: [934] + def get("Π"), do: [928] + def get("±"), do: [177] + def get("ℌ"), do: [8460] + def get("ℙ"), do: [8473] + def get("⪻"), do: [10939] + def get("≺"), do: [8826] + def get("⪯"), do: [10927] + def get("≼"), do: [8828] + def get("≾"), do: [8830] + def get("″"), do: [8243] + def get("∏"), do: [8719] + def get("∷"), do: [8759] + def get("∝"), do: [8733] + def get("𝒫"), do: [119_979] + def get("Ψ"), do: [936] + def get("""), do: [34] + def get("""), do: [34] + def get("𝔔"), do: [120_084] + def get("ℚ"), do: [8474] + def get("𝒬"), do: [119_980] + def get("⤐"), do: [10512] + def get("®"), do: [174] + def get("®"), do: [174] + def get("Ŕ"), do: [340] + def get("⟫"), do: [10219] + def get("↠"), do: [8608] + def get("⤖"), do: [10518] + def get("Ř"), do: [344] + def get("Ŗ"), do: [342] + def get("Р"), do: [1056] + def get("ℜ"), do: [8476] + def get("∋"), do: [8715] + def get("⇋"), do: [8651] + def get("⥯"), do: [10607] + def get("ℜ"), do: [8476] + def get("Ρ"), do: [929] + def get("⟩"), do: [10217] + def get("→"), do: [8594] + def get("⇥"), do: [8677] + def get("⇄"), do: [8644] + def get("⌉"), do: [8969] + def get("⟧"), do: [10215] + def get("⥝"), do: [10589] + def get("⇂"), do: [8642] + def get("⥕"), do: [10581] + def get("⌋"), do: [8971] + def get("⊢"), do: [8866] + def get("↦"), do: [8614] + def get("⥛"), do: [10587] + def get("⊳"), do: [8883] + def get("⧐"), do: [10704] + def get("⊵"), do: [8885] + def get("⥏"), do: [10575] + def get("⥜"), do: [10588] + def get("↾"), do: [8638] + def get("⥔"), do: [10580] + def get("⇀"), do: [8640] + def get("⥓"), do: [10579] + def get("⇒"), do: [8658] + def get("ℝ"), do: [8477] + def get("⥰"), do: [10608] + def get("⇛"), do: [8667] + def get("ℛ"), do: [8475] + def get("↱"), do: [8625] + def get("⧴"), do: [10740] + def get("Щ"), do: [1065] + def get("Ш"), do: [1064] + def get("Ь"), do: [1068] + def get("Ś"), do: [346] + def get("⪼"), do: [10940] + def get("Š"), do: [352] + def get("Ş"), do: [350] + def get("Ŝ"), do: [348] + def get("С"), do: [1057] + def get("𝔖"), do: [120_086] + def get("↓"), do: [8595] + def get("←"), do: [8592] + def get("→"), do: [8594] + def get("↑"), do: [8593] + def get("Σ"), do: [931] + def get("∘"), do: [8728] + def get("𝕊"), do: [120_138] + def get("√"), do: [8730] + def get("□"), do: [9633] + def get("⊓"), do: [8851] + def get("⊏"), do: [8847] + def get("⊑"), do: [8849] + def get("⊐"), do: [8848] + def get("⊒"), do: [8850] + def get("⊔"), do: [8852] + def get("𝒮"), do: [119_982] + def get("⋆"), do: [8902] + def get("⋐"), do: [8912] + def get("⋐"), do: [8912] + def get("⊆"), do: [8838] + def get("≻"), do: [8827] + def get("⪰"), do: [10928] + def get("≽"), do: [8829] + def get("≿"), do: [8831] + def get("∋"), do: [8715] + def get("∑"), do: [8721] + def get("⋑"), do: [8913] + def get("⊃"), do: [8835] + def get("⊇"), do: [8839] + def get("⋑"), do: [8913] + def get("Þ"), do: [222] + def get("Þ"), do: [222] + def get("™"), do: [8482] + def get("Ћ"), do: [1035] + def get("Ц"), do: [1062] + def get(" "), do: [9] + def get("Τ"), do: [932] + def get("Ť"), do: [356] + def get("Ţ"), do: [354] + def get("Т"), do: [1058] + def get("𝔗"), do: [120_087] + def get("∴"), do: [8756] + def get("Θ"), do: [920] + def get("  "), do: [8287, 8202] + def get(" "), do: [8201] + def get("∼"), do: [8764] + def get("≃"), do: [8771] + def get("≅"), do: [8773] + def get("≈"), do: [8776] + def get("𝕋"), do: [120_139] + def get("⃛"), do: [8411] + def get("𝒯"), do: [119_983] + def get("Ŧ"), do: [358] + def get("Ú"), do: [218] + def get("Ú"), do: [218] + def get("↟"), do: [8607] + def get("⥉"), do: [10569] + def get("Ў"), do: [1038] + def get("Ŭ"), do: [364] + def get("Û"), do: [219] + def get("Û"), do: [219] + def get("У"), do: [1059] + def get("Ű"), do: [368] + def get("𝔘"), do: [120_088] + def get("Ù"), do: [217] + def get("Ù"), do: [217] + def get("Ū"), do: [362] + def get("_"), do: [95] + def get("⏟"), do: [9183] + def get("⎵"), do: [9141] + def get("⏝"), do: [9181] + def get("⋃"), do: [8899] + def get("⊎"), do: [8846] + def get("Ų"), do: [370] + def get("𝕌"), do: [120_140] + def get("↑"), do: [8593] + def get("⤒"), do: [10514] + def get("⇅"), do: [8645] + def get("↕"), do: [8597] + def get("⥮"), do: [10606] + def get("⊥"), do: [8869] + def get("↥"), do: [8613] + def get("⇑"), do: [8657] + def get("⇕"), do: [8661] + def get("↖"), do: [8598] + def get("↗"), do: [8599] + def get("ϒ"), do: [978] + def get("Υ"), do: [933] + def get("Ů"), do: [366] + def get("𝒰"), do: [119_984] + def get("Ũ"), do: [360] + def get("Ü"), do: [220] + def get("Ü"), do: [220] + def get("⊫"), do: [8875] + def get("⫫"), do: [10987] + def get("В"), do: [1042] + def get("⊩"), do: [8873] + def get("⫦"), do: [10982] + def get("⋁"), do: [8897] + def get("‖"), do: [8214] + def get("‖"), do: [8214] + def get("∣"), do: [8739] + def get("|"), do: [124] + def get("❘"), do: [10072] + def get("≀"), do: [8768] + def get(" "), do: [8202] + def get("𝔙"), do: [120_089] + def get("𝕍"), do: [120_141] + def get("𝒱"), do: [119_985] + def get("⊪"), do: [8874] + def get("Ŵ"), do: [372] + def get("⋀"), do: [8896] + def get("𝔚"), do: [120_090] + def get("𝕎"), do: [120_142] + def get("𝒲"), do: [119_986] + def get("𝔛"), do: [120_091] + def get("Ξ"), do: [926] + def get("𝕏"), do: [120_143] + def get("𝒳"), do: [119_987] + def get("Я"), do: [1071] + def get("Ї"), do: [1031] + def get("Ю"), do: [1070] + def get("Ý"), do: [221] + def get("Ý"), do: [221] + def get("Ŷ"), do: [374] + def get("Ы"), do: [1067] + def get("𝔜"), do: [120_092] + def get("𝕐"), do: [120_144] + def get("𝒴"), do: [119_988] + def get("Ÿ"), do: [376] + def get("Ж"), do: [1046] + def get("Ź"), do: [377] + def get("Ž"), do: [381] + def get("З"), do: [1047] + def get("Ż"), do: [379] + def get("​"), do: [8203] + def get("Ζ"), do: [918] + def get("ℨ"), do: [8488] + def get("ℤ"), do: [8484] + def get("𝒵"), do: [119_989] + def get("á"), do: [225] + def get("á"), do: [225] + def get("ă"), do: [259] + def get("∾"), do: [8766] + def get("∾̳"), do: [8766, 819] + def get("∿"), do: [8767] + def get("â"), do: [226] + def get("â"), do: [226] + def get("´"), do: [180] + def get("´"), do: [180] + def get("а"), do: [1072] + def get("æ"), do: [230] + def get("æ"), do: [230] + def get("⁡"), do: [8289] + def get("𝔞"), do: [120_094] + def get("à"), do: [224] + def get("à"), do: [224] + def get("ℵ"), do: [8501] + def get("ℵ"), do: [8501] + def get("α"), do: [945] + def get("ā"), do: [257] + def get("⨿"), do: [10815] + def get("&"), do: [38] + def get("&"), do: [38] + def get("∧"), do: [8743] + def get("⩕"), do: [10837] + def get("⩜"), do: [10844] + def get("⩘"), do: [10840] + def get("⩚"), do: [10842] + def get("∠"), do: [8736] + def get("⦤"), do: [10660] + def get("∠"), do: [8736] + def get("∡"), do: [8737] + def get("⦨"), do: [10664] + def get("⦩"), do: [10665] + def get("⦪"), do: [10666] + def get("⦫"), do: [10667] + def get("⦬"), do: [10668] + def get("⦭"), do: [10669] + def get("⦮"), do: [10670] + def get("⦯"), do: [10671] + def get("∟"), do: [8735] + def get("⊾"), do: [8894] + def get("⦝"), do: [10653] + def get("∢"), do: [8738] + def get("Å"), do: [197] + def get("⍼"), do: [9084] + def get("ą"), do: [261] + def get("𝕒"), do: [120_146] + def get("≈"), do: [8776] + def get("⩰"), do: [10864] + def get("⩯"), do: [10863] + def get("≊"), do: [8778] + def get("≋"), do: [8779] + def get("'"), do: [39] + def get("≈"), do: [8776] + def get("≊"), do: [8778] + def get("å"), do: [229] + def get("å"), do: [229] + def get("𝒶"), do: [119_990] + def get("*"), do: [42] + def get("≈"), do: [8776] + def get("≍"), do: [8781] + def get("ã"), do: [227] + def get("ã"), do: [227] + def get("ä"), do: [228] + def get("ä"), do: [228] + def get("∳"), do: [8755] + def get("⨑"), do: [10769] + def get("⫭"), do: [10989] + def get("≌"), do: [8780] + def get("϶"), do: [1014] + def get("‵"), do: [8245] + def get("∽"), do: [8765] + def get("⋍"), do: [8909] + def get("⊽"), do: [8893] + def get("⌅"), do: [8965] + def get("⌅"), do: [8965] + def get("⎵"), do: [9141] + def get("⎶"), do: [9142] + def get("≌"), do: [8780] + def get("б"), do: [1073] + def get("„"), do: [8222] + def get("∵"), do: [8757] + def get("∵"), do: [8757] + def get("⦰"), do: [10672] + def get("϶"), do: [1014] + def get("ℬ"), do: [8492] + def get("β"), do: [946] + def get("ℶ"), do: [8502] + def get("≬"), do: [8812] + def get("𝔟"), do: [120_095] + def get("⋂"), do: [8898] + def get("◯"), do: [9711] + def get("⋃"), do: [8899] + def get("⨀"), do: [10752] + def get("⨁"), do: [10753] + def get("⨂"), do: [10754] + def get("⨆"), do: [10758] + def get("★"), do: [9733] + def get("▽"), do: [9661] + def get("△"), do: [9651] + def get("⨄"), do: [10756] + def get("⋁"), do: [8897] + def get("⋀"), do: [8896] + def get("⤍"), do: [10509] + def get("⧫"), do: [10731] + def get("▪"), do: [9642] + def get("▴"), do: [9652] + def get("▾"), do: [9662] + def get("◂"), do: [9666] + def get("▸"), do: [9656] + def get("␣"), do: [9251] + def get("▒"), do: [9618] + def get("░"), do: [9617] + def get("▓"), do: [9619] + def get("█"), do: [9608] + def get("=⃥"), do: [61, 8421] + def get("≡⃥"), do: [8801, 8421] + def get("⌐"), do: [8976] + def get("𝕓"), do: [120_147] + def get("⊥"), do: [8869] + def get("⊥"), do: [8869] + def get("⋈"), do: [8904] + def get("╗"), do: [9559] + def get("╔"), do: [9556] + def get("╖"), do: [9558] + def get("╓"), do: [9555] + def get("═"), do: [9552] + def get("╦"), do: [9574] + def get("╩"), do: [9577] + def get("╤"), do: [9572] + def get("╧"), do: [9575] + def get("╝"), do: [9565] + def get("╚"), do: [9562] + def get("╜"), do: [9564] + def get("╙"), do: [9561] + def get("║"), do: [9553] + def get("╬"), do: [9580] + def get("╣"), do: [9571] + def get("╠"), do: [9568] + def get("╫"), do: [9579] + def get("╢"), do: [9570] + def get("╟"), do: [9567] + def get("⧉"), do: [10697] + def get("╕"), do: [9557] + def get("╒"), do: [9554] + def get("┐"), do: [9488] + def get("┌"), do: [9484] + def get("─"), do: [9472] + def get("╥"), do: [9573] + def get("╨"), do: [9576] + def get("┬"), do: [9516] + def get("┴"), do: [9524] + def get("⊟"), do: [8863] + def get("⊞"), do: [8862] + def get("⊠"), do: [8864] + def get("╛"), do: [9563] + def get("╘"), do: [9560] + def get("┘"), do: [9496] + def get("└"), do: [9492] + def get("│"), do: [9474] + def get("╪"), do: [9578] + def get("╡"), do: [9569] + def get("╞"), do: [9566] + def get("┼"), do: [9532] + def get("┤"), do: [9508] + def get("├"), do: [9500] + def get("‵"), do: [8245] + def get("˘"), do: [728] + def get("¦"), do: [166] + def get("¦"), do: [166] + def get("𝒷"), do: [119_991] + def get("⁏"), do: [8271] + def get("∽"), do: [8765] + def get("⋍"), do: [8909] + def get("\"), do: [92] + def get("⧅"), do: [10693] + def get("⟈"), do: [10184] + def get("•"), do: [8226] + def get("•"), do: [8226] + def get("≎"), do: [8782] + def get("⪮"), do: [10926] + def get("≏"), do: [8783] + def get("≏"), do: [8783] + def get("ć"), do: [263] + def get("∩"), do: [8745] + def get("⩄"), do: [10820] + def get("⩉"), do: [10825] + def get("⩋"), do: [10827] + def get("⩇"), do: [10823] + def get("⩀"), do: [10816] + def get("∩︀"), do: [8745, 65024] + def get("⁁"), do: [8257] + def get("ˇ"), do: [711] + def get("⩍"), do: [10829] + def get("č"), do: [269] + def get("ç"), do: [231] + def get("ç"), do: [231] + def get("ĉ"), do: [265] + def get("⩌"), do: [10828] + def get("⩐"), do: [10832] + def get("ċ"), do: [267] + def get("¸"), do: [184] + def get("¸"), do: [184] + def get("⦲"), do: [10674] + def get("¢"), do: [162] + def get("¢"), do: [162] + def get("·"), do: [183] + def get("𝔠"), do: [120_096] + def get("ч"), do: [1095] + def get("✓"), do: [10003] + def get("✓"), do: [10003] + def get("χ"), do: [967] + def get("○"), do: [9675] + def get("⧃"), do: [10691] + def get("ˆ"), do: [710] + def get("≗"), do: [8791] + def get("↺"), do: [8634] + def get("↻"), do: [8635] + def get("®"), do: [174] + def get("Ⓢ"), do: [9416] + def get("⊛"), do: [8859] + def get("⊚"), do: [8858] + def get("⊝"), do: [8861] + def get("≗"), do: [8791] + def get("⨐"), do: [10768] + def get("⫯"), do: [10991] + def get("⧂"), do: [10690] + def get("♣"), do: [9827] + def get("♣"), do: [9827] + def get(":"), do: [58] + def get("≔"), do: [8788] + def get("≔"), do: [8788] + def get(","), do: [44] + def get("@"), do: [64] + def get("∁"), do: [8705] + def get("∘"), do: [8728] + def get("∁"), do: [8705] + def get("ℂ"), do: [8450] + def get("≅"), do: [8773] + def get("⩭"), do: [10861] + def get("∮"), do: [8750] + def get("𝕔"), do: [120_148] + def get("∐"), do: [8720] + def get("©"), do: [169] + def get("©"), do: [169] + def get("℗"), do: [8471] + def get("↵"), do: [8629] + def get("✗"), do: [10007] + def get("𝒸"), do: [119_992] + def get("⫏"), do: [10959] + def get("⫑"), do: [10961] + def get("⫐"), do: [10960] + def get("⫒"), do: [10962] + def get("⋯"), do: [8943] + def get("⤸"), do: [10552] + def get("⤵"), do: [10549] + def get("⋞"), do: [8926] + def get("⋟"), do: [8927] + def get("↶"), do: [8630] + def get("⤽"), do: [10557] + def get("∪"), do: [8746] + def get("⩈"), do: [10824] + def get("⩆"), do: [10822] + def get("⩊"), do: [10826] + def get("⊍"), do: [8845] + def get("⩅"), do: [10821] + def get("∪︀"), do: [8746, 65024] + def get("↷"), do: [8631] + def get("⤼"), do: [10556] + def get("⋞"), do: [8926] + def get("⋟"), do: [8927] + def get("⋎"), do: [8910] + def get("⋏"), do: [8911] + def get("¤"), do: [164] + def get("¤"), do: [164] + def get("↶"), do: [8630] + def get("↷"), do: [8631] + def get("⋎"), do: [8910] + def get("⋏"), do: [8911] + def get("∲"), do: [8754] + def get("∱"), do: [8753] + def get("⌭"), do: [9005] + def get("⇓"), do: [8659] + def get("⥥"), do: [10597] + def get("†"), do: [8224] + def get("ℸ"), do: [8504] + def get("↓"), do: [8595] + def get("‐"), do: [8208] + def get("⊣"), do: [8867] + def get("⤏"), do: [10511] + def get("˝"), do: [733] + def get("ď"), do: [271] + def get("д"), do: [1076] + def get("ⅆ"), do: [8518] + def get("‡"), do: [8225] + def get("⇊"), do: [8650] + def get("⩷"), do: [10871] + def get("°"), do: [176] + def get("°"), do: [176] + def get("δ"), do: [948] + def get("⦱"), do: [10673] + def get("⥿"), do: [10623] + def get("𝔡"), do: [120_097] + def get("⇃"), do: [8643] + def get("⇂"), do: [8642] + def get("⋄"), do: [8900] + def get("⋄"), do: [8900] + def get("♦"), do: [9830] + def get("♦"), do: [9830] + def get("¨"), do: [168] + def get("ϝ"), do: [989] + def get("⋲"), do: [8946] + def get("÷"), do: [247] + def get("÷"), do: [247] + def get("÷"), do: [247] + def get("⋇"), do: [8903] + def get("⋇"), do: [8903] + def get("ђ"), do: [1106] + def get("⌞"), do: [8990] + def get("⌍"), do: [8973] + def get("$"), do: [36] + def get("𝕕"), do: [120_149] + def get("˙"), do: [729] + def get("≐"), do: [8784] + def get("≑"), do: [8785] + def get("∸"), do: [8760] + def get("∔"), do: [8724] + def get("⊡"), do: [8865] + def get("⌆"), do: [8966] + def get("↓"), do: [8595] + def get("⇊"), do: [8650] + def get("⇃"), do: [8643] + def get("⇂"), do: [8642] + def get("⤐"), do: [10512] + def get("⌟"), do: [8991] + def get("⌌"), do: [8972] + def get("𝒹"), do: [119_993] + def get("ѕ"), do: [1109] + def get("⧶"), do: [10742] + def get("đ"), do: [273] + def get("⋱"), do: [8945] + def get("▿"), do: [9663] + def get("▾"), do: [9662] + def get("⇵"), do: [8693] + def get("⥯"), do: [10607] + def get("⦦"), do: [10662] + def get("џ"), do: [1119] + def get("⟿"), do: [10239] + def get("⩷"), do: [10871] + def get("≑"), do: [8785] + def get("é"), do: [233] + def get("é"), do: [233] + def get("⩮"), do: [10862] + def get("ě"), do: [283] + def get("≖"), do: [8790] + def get("ê"), do: [234] + def get("ê"), do: [234] + def get("≕"), do: [8789] + def get("э"), do: [1101] + def get("ė"), do: [279] + def get("ⅇ"), do: [8519] + def get("≒"), do: [8786] + def get("𝔢"), do: [120_098] + def get("⪚"), do: [10906] + def get("è"), do: [232] + def get("è"), do: [232] + def get("⪖"), do: [10902] + def get("⪘"), do: [10904] + def get("⪙"), do: [10905] + def get("⏧"), do: [9191] + def get("ℓ"), do: [8467] + def get("⪕"), do: [10901] + def get("⪗"), do: [10903] + def get("ē"), do: [275] + def get("∅"), do: [8709] + def get("∅"), do: [8709] + def get("∅"), do: [8709] + def get(" "), do: [8196] + def get(" "), do: [8197] + def get(" "), do: [8195] + def get("ŋ"), do: [331] + def get(" "), do: [8194] + def get("ę"), do: [281] + def get("𝕖"), do: [120_150] + def get("⋕"), do: [8917] + def get("⧣"), do: [10723] + def get("⩱"), do: [10865] + def get("ε"), do: [949] + def get("ε"), do: [949] + def get("ϵ"), do: [1013] + def get("≖"), do: [8790] + def get("≕"), do: [8789] + def get("≂"), do: [8770] + def get("⪖"), do: [10902] + def get("⪕"), do: [10901] + def get("="), do: [61] + def get("≟"), do: [8799] + def get("≡"), do: [8801] + def get("⩸"), do: [10872] + def get("⧥"), do: [10725] + def get("≓"), do: [8787] + def get("⥱"), do: [10609] + def get("ℯ"), do: [8495] + def get("≐"), do: [8784] + def get("≂"), do: [8770] + def get("η"), do: [951] + def get("ð"), do: [240] + def get("ð"), do: [240] + def get("ë"), do: [235] + def get("ë"), do: [235] + def get("€"), do: [8364] + def get("!"), do: [33] + def get("∃"), do: [8707] + def get("ℰ"), do: [8496] + def get("ⅇ"), do: [8519] + def get("≒"), do: [8786] + def get("ф"), do: [1092] + def get("♀"), do: [9792] + def get("ffi"), do: [64259] + def get("ff"), do: [64256] + def get("ffl"), do: [64260] + def get("𝔣"), do: [120_099] + def get("fi"), do: [64257] + def get("fj"), do: [102, 106] + def get("♭"), do: [9837] + def get("fl"), do: [64258] + def get("▱"), do: [9649] + def get("ƒ"), do: [402] + def get("𝕗"), do: [120_151] + def get("∀"), do: [8704] + def get("⋔"), do: [8916] + def get("⫙"), do: [10969] + def get("⨍"), do: [10765] + def get("½"), do: [189] + def get("½"), do: [189] + def get("⅓"), do: [8531] + def get("¼"), do: [188] + def get("¼"), do: [188] + def get("⅕"), do: [8533] + def get("⅙"), do: [8537] + def get("⅛"), do: [8539] + def get("⅔"), do: [8532] + def get("⅖"), do: [8534] + def get("¾"), do: [190] + def get("¾"), do: [190] + def get("⅗"), do: [8535] + def get("⅜"), do: [8540] + def get("⅘"), do: [8536] + def get("⅚"), do: [8538] + def get("⅝"), do: [8541] + def get("⅞"), do: [8542] + def get("⁄"), do: [8260] + def get("⌢"), do: [8994] + def get("𝒻"), do: [119_995] + def get("≧"), do: [8807] + def get("⪌"), do: [10892] + def get("ǵ"), do: [501] + def get("γ"), do: [947] + def get("ϝ"), do: [989] + def get("⪆"), do: [10886] + def get("ğ"), do: [287] + def get("ĝ"), do: [285] + def get("г"), do: [1075] + def get("ġ"), do: [289] + def get("≥"), do: [8805] + def get("⋛"), do: [8923] + def get("≥"), do: [8805] + def get("≧"), do: [8807] + def get("⩾"), do: [10878] + def get("⩾"), do: [10878] + def get("⪩"), do: [10921] + def get("⪀"), do: [10880] + def get("⪂"), do: [10882] + def get("⪄"), do: [10884] + def get("⋛︀"), do: [8923, 65024] + def get("⪔"), do: [10900] + def get("𝔤"), do: [120_100] + def get("≫"), do: [8811] + def get("⋙"), do: [8921] + def get("ℷ"), do: [8503] + def get("ѓ"), do: [1107] + def get("≷"), do: [8823] + def get("⪒"), do: [10898] + def get("⪥"), do: [10917] + def get("⪤"), do: [10916] + def get("≩"), do: [8809] + def get("⪊"), do: [10890] + def get("⪊"), do: [10890] + def get("⪈"), do: [10888] + def get("⪈"), do: [10888] + def get("≩"), do: [8809] + def get("⋧"), do: [8935] + def get("𝕘"), do: [120_152] + def get("`"), do: [96] + def get("ℊ"), do: [8458] + def get("≳"), do: [8819] + def get("⪎"), do: [10894] + def get("⪐"), do: [10896] + def get(">"), do: [62] + def get(">"), do: [62] + def get("⪧"), do: [10919] + def get("⩺"), do: [10874] + def get("⋗"), do: [8919] + def get("⦕"), do: [10645] + def get("⩼"), do: [10876] + def get("⪆"), do: [10886] + def get("⥸"), do: [10616] + def get("⋗"), do: [8919] + def get("⋛"), do: [8923] + def get("⪌"), do: [10892] + def get("≷"), do: [8823] + def get("≳"), do: [8819] + def get("≩︀"), do: [8809, 65024] + def get("≩︀"), do: [8809, 65024] + def get("⇔"), do: [8660] + def get(" "), do: [8202] + def get("½"), do: [189] + def get("ℋ"), do: [8459] + def get("ъ"), do: [1098] + def get("↔"), do: [8596] + def get("⥈"), do: [10568] + def get("↭"), do: [8621] + def get("ℏ"), do: [8463] + def get("ĥ"), do: [293] + def get("♥"), do: [9829] + def get("♥"), do: [9829] + def get("…"), do: [8230] + def get("⊹"), do: [8889] + def get("𝔥"), do: [120_101] + def get("⤥"), do: [10533] + def get("⤦"), do: [10534] + def get("⇿"), do: [8703] + def get("∻"), do: [8763] + def get("↩"), do: [8617] + def get("↪"), do: [8618] + def get("𝕙"), do: [120_153] + def get("―"), do: [8213] + def get("𝒽"), do: [119_997] + def get("ℏ"), do: [8463] + def get("ħ"), do: [295] + def get("⁃"), do: [8259] + def get("‐"), do: [8208] + def get("í"), do: [237] + def get("í"), do: [237] + def get("⁣"), do: [8291] + def get("î"), do: [238] + def get("î"), do: [238] + def get("и"), do: [1080] + def get("е"), do: [1077] + def get("¡"), do: [161] + def get("¡"), do: [161] + def get("⇔"), do: [8660] + def get("𝔦"), do: [120_102] + def get("ì"), do: [236] + def get("ì"), do: [236] + def get("ⅈ"), do: [8520] + def get("⨌"), do: [10764] + def get("∭"), do: [8749] + def get("⧜"), do: [10716] + def get("℩"), do: [8489] + def get("ij"), do: [307] + def get("ī"), do: [299] + def get("ℑ"), do: [8465] + def get("ℐ"), do: [8464] + def get("ℑ"), do: [8465] + def get("ı"), do: [305] + def get("⊷"), do: [8887] + def get("Ƶ"), do: [437] + def get("∈"), do: [8712] + def get("℅"), do: [8453] + def get("∞"), do: [8734] + def get("⧝"), do: [10717] + def get("ı"), do: [305] + def get("∫"), do: [8747] + def get("⊺"), do: [8890] + def get("ℤ"), do: [8484] + def get("⊺"), do: [8890] + def get("⨗"), do: [10775] + def get("⨼"), do: [10812] + def get("ё"), do: [1105] + def get("į"), do: [303] + def get("𝕚"), do: [120_154] + def get("ι"), do: [953] + def get("⨼"), do: [10812] + def get("¿"), do: [191] + def get("¿"), do: [191] + def get("𝒾"), do: [119_998] + def get("∈"), do: [8712] + def get("⋹"), do: [8953] + def get("⋵"), do: [8949] + def get("⋴"), do: [8948] + def get("⋳"), do: [8947] + def get("∈"), do: [8712] + def get("⁢"), do: [8290] + def get("ĩ"), do: [297] + def get("і"), do: [1110] + def get("ï"), do: [239] + def get("ï"), do: [239] + def get("ĵ"), do: [309] + def get("й"), do: [1081] + def get("𝔧"), do: [120_103] + def get("ȷ"), do: [567] + def get("𝕛"), do: [120_155] + def get("𝒿"), do: [119_999] + def get("ј"), do: [1112] + def get("є"), do: [1108] + def get("κ"), do: [954] + def get("ϰ"), do: [1008] + def get("ķ"), do: [311] + def get("к"), do: [1082] + def get("𝔨"), do: [120_104] + def get("ĸ"), do: [312] + def get("х"), do: [1093] + def get("ќ"), do: [1116] + def get("𝕜"), do: [120_156] + def get("𝓀"), do: [120_000] + def get("⇚"), do: [8666] + def get("⇐"), do: [8656] + def get("⤛"), do: [10523] + def get("⤎"), do: [10510] + def get("≦"), do: [8806] + def get("⪋"), do: [10891] + def get("⥢"), do: [10594] + def get("ĺ"), do: [314] + def get("⦴"), do: [10676] + def get("ℒ"), do: [8466] + def get("λ"), do: [955] + def get("⟨"), do: [10216] + def get("⦑"), do: [10641] + def get("⟨"), do: [10216] + def get("⪅"), do: [10885] + def get("«"), do: [171] + def get("«"), do: [171] + def get("←"), do: [8592] + def get("⇤"), do: [8676] + def get("⤟"), do: [10527] + def get("⤝"), do: [10525] + def get("↩"), do: [8617] + def get("↫"), do: [8619] + def get("⤹"), do: [10553] + def get("⥳"), do: [10611] + def get("↢"), do: [8610] + def get("⪫"), do: [10923] + def get("⤙"), do: [10521] + def get("⪭"), do: [10925] + def get("⪭︀"), do: [10925, 65024] + def get("⤌"), do: [10508] + def get("❲"), do: [10098] + def get("{"), do: [123] + def get("["), do: [91] + def get("⦋"), do: [10635] + def get("⦏"), do: [10639] + def get("⦍"), do: [10637] + def get("ľ"), do: [318] + def get("ļ"), do: [316] + def get("⌈"), do: [8968] + def get("{"), do: [123] + def get("л"), do: [1083] + def get("⤶"), do: [10550] + def get("“"), do: [8220] + def get("„"), do: [8222] + def get("⥧"), do: [10599] + def get("⥋"), do: [10571] + def get("↲"), do: [8626] + def get("≤"), do: [8804] + def get("←"), do: [8592] + def get("↢"), do: [8610] + def get("↽"), do: [8637] + def get("↼"), do: [8636] + def get("⇇"), do: [8647] + def get("↔"), do: [8596] + def get("⇆"), do: [8646] + def get("⇋"), do: [8651] + def get("↭"), do: [8621] + def get("⋋"), do: [8907] + def get("⋚"), do: [8922] + def get("≤"), do: [8804] + def get("≦"), do: [8806] + def get("⩽"), do: [10877] + def get("⩽"), do: [10877] + def get("⪨"), do: [10920] + def get("⩿"), do: [10879] + def get("⪁"), do: [10881] + def get("⪃"), do: [10883] + def get("⋚︀"), do: [8922, 65024] + def get("⪓"), do: [10899] + def get("⪅"), do: [10885] + def get("⋖"), do: [8918] + def get("⋚"), do: [8922] + def get("⪋"), do: [10891] + def get("≶"), do: [8822] + def get("≲"), do: [8818] + def get("⥼"), do: [10620] + def get("⌊"), do: [8970] + def get("𝔩"), do: [120_105] + def get("≶"), do: [8822] + def get("⪑"), do: [10897] + def get("↽"), do: [8637] + def get("↼"), do: [8636] + def get("⥪"), do: [10602] + def get("▄"), do: [9604] + def get("љ"), do: [1113] + def get("≪"), do: [8810] + def get("⇇"), do: [8647] + def get("⌞"), do: [8990] + def get("⥫"), do: [10603] + def get("◺"), do: [9722] + def get("ŀ"), do: [320] + def get("⎰"), do: [9136] + def get("⎰"), do: [9136] + def get("≨"), do: [8808] + def get("⪉"), do: [10889] + def get("⪉"), do: [10889] + def get("⪇"), do: [10887] + def get("⪇"), do: [10887] + def get("≨"), do: [8808] + def get("⋦"), do: [8934] + def get("⟬"), do: [10220] + def get("⇽"), do: [8701] + def get("⟦"), do: [10214] + def get("⟵"), do: [10229] + def get("⟷"), do: [10231] + def get("⟼"), do: [10236] + def get("⟶"), do: [10230] + def get("↫"), do: [8619] + def get("↬"), do: [8620] + def get("⦅"), do: [10629] + def get("𝕝"), do: [120_157] + def get("⨭"), do: [10797] + def get("⨴"), do: [10804] + def get("∗"), do: [8727] + def get("_"), do: [95] + def get("◊"), do: [9674] + def get("◊"), do: [9674] + def get("⧫"), do: [10731] + def get("("), do: [40] + def get("⦓"), do: [10643] + def get("⇆"), do: [8646] + def get("⌟"), do: [8991] + def get("⇋"), do: [8651] + def get("⥭"), do: [10605] + def get("‎"), do: [8206] + def get("⊿"), do: [8895] + def get("‹"), do: [8249] + def get("𝓁"), do: [120_001] + def get("↰"), do: [8624] + def get("≲"), do: [8818] + def get("⪍"), do: [10893] + def get("⪏"), do: [10895] + def get("["), do: [91] + def get("‘"), do: [8216] + def get("‚"), do: [8218] + def get("ł"), do: [322] + def get("<"), do: [60] + def get("<"), do: [60] + def get("⪦"), do: [10918] + def get("⩹"), do: [10873] + def get("⋖"), do: [8918] + def get("⋋"), do: [8907] + def get("⋉"), do: [8905] + def get("⥶"), do: [10614] + def get("⩻"), do: [10875] + def get("⦖"), do: [10646] + def get("◃"), do: [9667] + def get("⊴"), do: [8884] + def get("◂"), do: [9666] + def get("⥊"), do: [10570] + def get("⥦"), do: [10598] + def get("≨︀"), do: [8808, 65024] + def get("≨︀"), do: [8808, 65024] + def get("∺"), do: [8762] + def get("¯"), do: [175] + def get("¯"), do: [175] + def get("♂"), do: [9794] + def get("✠"), do: [10016] + def get("✠"), do: [10016] + def get("↦"), do: [8614] + def get("↦"), do: [8614] + def get("↧"), do: [8615] + def get("↤"), do: [8612] + def get("↥"), do: [8613] + def get("▮"), do: [9646] + def get("⨩"), do: [10793] + def get("м"), do: [1084] + def get("—"), do: [8212] + def get("∡"), do: [8737] + def get("𝔪"), do: [120_106] + def get("℧"), do: [8487] + def get("µ"), do: [181] + def get("µ"), do: [181] + def get("∣"), do: [8739] + def get("*"), do: [42] + def get("⫰"), do: [10992] + def get("·"), do: [183] + def get("·"), do: [183] + def get("−"), do: [8722] + def get("⊟"), do: [8863] + def get("∸"), do: [8760] + def get("⨪"), do: [10794] + def get("⫛"), do: [10971] + def get("…"), do: [8230] + def get("∓"), do: [8723] + def get("⊧"), do: [8871] + def get("𝕞"), do: [120_158] + def get("∓"), do: [8723] + def get("𝓂"), do: [120_002] + def get("∾"), do: [8766] + def get("μ"), do: [956] + def get("⊸"), do: [8888] + def get("⊸"), do: [8888] + def get("⋙̸"), do: [8921, 824] + def get("≫⃒"), do: [8811, 8402] + def get("≫̸"), do: [8811, 824] + def get("⇍"), do: [8653] + def get("⇎"), do: [8654] + def get("⋘̸"), do: [8920, 824] + def get("≪⃒"), do: [8810, 8402] + def get("≪̸"), do: [8810, 824] + def get("⇏"), do: [8655] + def get("⊯"), do: [8879] + def get("⊮"), do: [8878] + def get("∇"), do: [8711] + def get("ń"), do: [324] + def get("∠⃒"), do: [8736, 8402] + def get("≉"), do: [8777] + def get("⩰̸"), do: [10864, 824] + def get("≋̸"), do: [8779, 824] + def get("ʼn"), do: [329] + def get("≉"), do: [8777] + def get("♮"), do: [9838] + def get("♮"), do: [9838] + def get("ℕ"), do: [8469] + def get(" "), do: [160] + def get(" "), do: [160] + def get("≎̸"), do: [8782, 824] + def get("≏̸"), do: [8783, 824] + def get("⩃"), do: [10819] + def get("ň"), do: [328] + def get("ņ"), do: [326] + def get("≇"), do: [8775] + def get("⩭̸"), do: [10861, 824] + def get("⩂"), do: [10818] + def get("н"), do: [1085] + def get("–"), do: [8211] + def get("≠"), do: [8800] + def get("⇗"), do: [8663] + def get("⤤"), do: [10532] + def get("↗"), do: [8599] + def get("↗"), do: [8599] + def get("≐̸"), do: [8784, 824] + def get("≢"), do: [8802] + def get("⤨"), do: [10536] + def get("≂̸"), do: [8770, 824] + def get("∄"), do: [8708] + def get("∄"), do: [8708] + def get("𝔫"), do: [120_107] + def get("≧̸"), do: [8807, 824] + def get("≱"), do: [8817] + def get("≱"), do: [8817] + def get("≧̸"), do: [8807, 824] + def get("⩾̸"), do: [10878, 824] + def get("⩾̸"), do: [10878, 824] + def get("≵"), do: [8821] + def get("≯"), do: [8815] + def get("≯"), do: [8815] + def get("⇎"), do: [8654] + def get("↮"), do: [8622] + def get("⫲"), do: [10994] + def get("∋"), do: [8715] + def get("⋼"), do: [8956] + def get("⋺"), do: [8954] + def get("∋"), do: [8715] + def get("њ"), do: [1114] + def get("⇍"), do: [8653] + def get("≦̸"), do: [8806, 824] + def get("↚"), do: [8602] + def get("‥"), do: [8229] + def get("≰"), do: [8816] + def get("↚"), do: [8602] + def get("↮"), do: [8622] + def get("≰"), do: [8816] + def get("≦̸"), do: [8806, 824] + def get("⩽̸"), do: [10877, 824] + def get("⩽̸"), do: [10877, 824] + def get("≮"), do: [8814] + def get("≴"), do: [8820] + def get("≮"), do: [8814] + def get("⋪"), do: [8938] + def get("⋬"), do: [8940] + def get("∤"), do: [8740] + def get("𝕟"), do: [120_159] + def get("¬"), do: [172] + def get("¬"), do: [172] + def get("∉"), do: [8713] + def get("⋹̸"), do: [8953, 824] + def get("⋵̸"), do: [8949, 824] + def get("∉"), do: [8713] + def get("⋷"), do: [8951] + def get("⋶"), do: [8950] + def get("∌"), do: [8716] + def get("∌"), do: [8716] + def get("⋾"), do: [8958] + def get("⋽"), do: [8957] + def get("∦"), do: [8742] + def get("∦"), do: [8742] + def get("⫽⃥"), do: [11005, 8421] + def get("∂̸"), do: [8706, 824] + def get("⨔"), do: [10772] + def get("⊀"), do: [8832] + def get("⋠"), do: [8928] + def get("⪯̸"), do: [10927, 824] + def get("⊀"), do: [8832] + def get("⪯̸"), do: [10927, 824] + def get("⇏"), do: [8655] + def get("↛"), do: [8603] + def get("⤳̸"), do: [10547, 824] + def get("↝̸"), do: [8605, 824] + def get("↛"), do: [8603] + def get("⋫"), do: [8939] + def get("⋭"), do: [8941] + def get("⊁"), do: [8833] + def get("⋡"), do: [8929] + def get("⪰̸"), do: [10928, 824] + def get("𝓃"), do: [120_003] + def get("∤"), do: [8740] + def get("∦"), do: [8742] + def get("≁"), do: [8769] + def get("≄"), do: [8772] + def get("≄"), do: [8772] + def get("∤"), do: [8740] + def get("∦"), do: [8742] + def get("⋢"), do: [8930] + def get("⋣"), do: [8931] + def get("⊄"), do: [8836] + def get("⫅̸"), do: [10949, 824] + def get("⊈"), do: [8840] + def get("⊂⃒"), do: [8834, 8402] + def get("⊈"), do: [8840] + def get("⫅̸"), do: [10949, 824] + def get("⊁"), do: [8833] + def get("⪰̸"), do: [10928, 824] + def get("⊅"), do: [8837] + def get("⫆̸"), do: [10950, 824] + def get("⊉"), do: [8841] + def get("⊃⃒"), do: [8835, 8402] + def get("⊉"), do: [8841] + def get("⫆̸"), do: [10950, 824] + def get("≹"), do: [8825] + def get("ñ"), do: [241] + def get("ñ"), do: [241] + def get("≸"), do: [8824] + def get("⋪"), do: [8938] + def get("⋬"), do: [8940] + def get("⋫"), do: [8939] + def get("⋭"), do: [8941] + def get("ν"), do: [957] + def get("#"), do: [35] + def get("№"), do: [8470] + def get(" "), do: [8199] + def get("⊭"), do: [8877] + def get("⤄"), do: [10500] + def get("≍⃒"), do: [8781, 8402] + def get("⊬"), do: [8876] + def get("≥⃒"), do: [8805, 8402] + def get(">⃒"), do: [62, 8402] + def get("⧞"), do: [10718] + def get("⤂"), do: [10498] + def get("≤⃒"), do: [8804, 8402] + def get("<⃒"), do: [60, 8402] + def get("⊴⃒"), do: [8884, 8402] + def get("⤃"), do: [10499] + def get("⊵⃒"), do: [8885, 8402] + def get("∼⃒"), do: [8764, 8402] + def get("⇖"), do: [8662] + def get("⤣"), do: [10531] + def get("↖"), do: [8598] + def get("↖"), do: [8598] + def get("⤧"), do: [10535] + def get("Ⓢ"), do: [9416] + def get("ó"), do: [243] + def get("ó"), do: [243] + def get("⊛"), do: [8859] + def get("⊚"), do: [8858] + def get("ô"), do: [244] + def get("ô"), do: [244] + def get("о"), do: [1086] + def get("⊝"), do: [8861] + def get("ő"), do: [337] + def get("⨸"), do: [10808] + def get("⊙"), do: [8857] + def get("⦼"), do: [10684] + def get("œ"), do: [339] + def get("⦿"), do: [10687] + def get("𝔬"), do: [120_108] + def get("˛"), do: [731] + def get("ò"), do: [242] + def get("ò"), do: [242] + def get("⧁"), do: [10689] + def get("⦵"), do: [10677] + def get("Ω"), do: [937] + def get("∮"), do: [8750] + def get("↺"), do: [8634] + def get("⦾"), do: [10686] + def get("⦻"), do: [10683] + def get("‾"), do: [8254] + def get("⧀"), do: [10688] + def get("ō"), do: [333] + def get("ω"), do: [969] + def get("ο"), do: [959] + def get("⦶"), do: [10678] + def get("⊖"), do: [8854] + def get("𝕠"), do: [120_160] + def get("⦷"), do: [10679] + def get("⦹"), do: [10681] + def get("⊕"), do: [8853] + def get("∨"), do: [8744] + def get("↻"), do: [8635] + def get("⩝"), do: [10845] + def get("ℴ"), do: [8500] + def get("ℴ"), do: [8500] + def get("ª"), do: [170] + def get("ª"), do: [170] + def get("º"), do: [186] + def get("º"), do: [186] + def get("⊶"), do: [8886] + def get("⩖"), do: [10838] + def get("⩗"), do: [10839] + def get("⩛"), do: [10843] + def get("ℴ"), do: [8500] + def get("ø"), do: [248] + def get("ø"), do: [248] + def get("⊘"), do: [8856] + def get("õ"), do: [245] + def get("õ"), do: [245] + def get("⊗"), do: [8855] + def get("⨶"), do: [10806] + def get("ö"), do: [246] + def get("ö"), do: [246] + def get("⌽"), do: [9021] + def get("∥"), do: [8741] + def get("¶"), do: [182] + def get("¶"), do: [182] + def get("∥"), do: [8741] + def get("⫳"), do: [10995] + def get("⫽"), do: [11005] + def get("∂"), do: [8706] + def get("п"), do: [1087] + def get("%"), do: [37] + def get("."), do: [46] + def get("‰"), do: [8240] + def get("⊥"), do: [8869] + def get("‱"), do: [8241] + def get("𝔭"), do: [120_109] + def get("φ"), do: [966] + def get("ϕ"), do: [981] + def get("ℳ"), do: [8499] + def get("☎"), do: [9742] + def get("π"), do: [960] + def get("⋔"), do: [8916] + def get("ϖ"), do: [982] + def get("ℏ"), do: [8463] + def get("ℎ"), do: [8462] + def get("ℏ"), do: [8463] + def get("+"), do: [43] + def get("⨣"), do: [10787] + def get("⊞"), do: [8862] + def get("⨢"), do: [10786] + def get("∔"), do: [8724] + def get("⨥"), do: [10789] + def get("⩲"), do: [10866] + def get("±"), do: [177] + def get("±"), do: [177] + def get("⨦"), do: [10790] + def get("⨧"), do: [10791] + def get("±"), do: [177] + def get("⨕"), do: [10773] + def get("𝕡"), do: [120_161] + def get("£"), do: [163] + def get("£"), do: [163] + def get("≺"), do: [8826] + def get("⪳"), do: [10931] + def get("⪷"), do: [10935] + def get("≼"), do: [8828] + def get("⪯"), do: [10927] + def get("≺"), do: [8826] + def get("⪷"), do: [10935] + def get("≼"), do: [8828] + def get("⪯"), do: [10927] + def get("⪹"), do: [10937] + def get("⪵"), do: [10933] + def get("⋨"), do: [8936] + def get("≾"), do: [8830] + def get("′"), do: [8242] + def get("ℙ"), do: [8473] + def get("⪵"), do: [10933] + def get("⪹"), do: [10937] + def get("⋨"), do: [8936] + def get("∏"), do: [8719] + def get("⌮"), do: [9006] + def get("⌒"), do: [8978] + def get("⌓"), do: [8979] + def get("∝"), do: [8733] + def get("∝"), do: [8733] + def get("≾"), do: [8830] + def get("⊰"), do: [8880] + def get("𝓅"), do: [120_005] + def get("ψ"), do: [968] + def get(" "), do: [8200] + def get("𝔮"), do: [120_110] + def get("⨌"), do: [10764] + def get("𝕢"), do: [120_162] + def get("⁗"), do: [8279] + def get("𝓆"), do: [120_006] + def get("ℍ"), do: [8461] + def get("⨖"), do: [10774] + def get("?"), do: [63] + def get("≟"), do: [8799] + def get("""), do: [34] + def get("""), do: [34] + def get("⇛"), do: [8667] + def get("⇒"), do: [8658] + def get("⤜"), do: [10524] + def get("⤏"), do: [10511] + def get("⥤"), do: [10596] + def get("∽̱"), do: [8765, 817] + def get("ŕ"), do: [341] + def get("√"), do: [8730] + def get("⦳"), do: [10675] + def get("⟩"), do: [10217] + def get("⦒"), do: [10642] + def get("⦥"), do: [10661] + def get("⟩"), do: [10217] + def get("»"), do: [187] + def get("»"), do: [187] + def get("→"), do: [8594] + def get("⥵"), do: [10613] + def get("⇥"), do: [8677] + def get("⤠"), do: [10528] + def get("⤳"), do: [10547] + def get("⤞"), do: [10526] + def get("↪"), do: [8618] + def get("↬"), do: [8620] + def get("⥅"), do: [10565] + def get("⥴"), do: [10612] + def get("↣"), do: [8611] + def get("↝"), do: [8605] + def get("⤚"), do: [10522] + def get("∶"), do: [8758] + def get("ℚ"), do: [8474] + def get("⤍"), do: [10509] + def get("❳"), do: [10099] + def get("}"), do: [125] + def get("]"), do: [93] + def get("⦌"), do: [10636] + def get("⦎"), do: [10638] + def get("⦐"), do: [10640] + def get("ř"), do: [345] + def get("ŗ"), do: [343] + def get("⌉"), do: [8969] + def get("}"), do: [125] + def get("р"), do: [1088] + def get("⤷"), do: [10551] + def get("⥩"), do: [10601] + def get("”"), do: [8221] + def get("”"), do: [8221] + def get("↳"), do: [8627] + def get("ℜ"), do: [8476] + def get("ℛ"), do: [8475] + def get("ℜ"), do: [8476] + def get("ℝ"), do: [8477] + def get("▭"), do: [9645] + def get("®"), do: [174] + def get("®"), do: [174] + def get("⥽"), do: [10621] + def get("⌋"), do: [8971] + def get("𝔯"), do: [120_111] + def get("⇁"), do: [8641] + def get("⇀"), do: [8640] + def get("⥬"), do: [10604] + def get("ρ"), do: [961] + def get("ϱ"), do: [1009] + def get("→"), do: [8594] + def get("↣"), do: [8611] + def get("⇁"), do: [8641] + def get("⇀"), do: [8640] + def get("⇄"), do: [8644] + def get("⇌"), do: [8652] + def get("⇉"), do: [8649] + def get("↝"), do: [8605] + def get("⋌"), do: [8908] + def get("˚"), do: [730] + def get("≓"), do: [8787] + def get("⇄"), do: [8644] + def get("⇌"), do: [8652] + def get("‏"), do: [8207] + def get("⎱"), do: [9137] + def get("⎱"), do: [9137] + def get("⫮"), do: [10990] + def get("⟭"), do: [10221] + def get("⇾"), do: [8702] + def get("⟧"), do: [10215] + def get("⦆"), do: [10630] + def get("𝕣"), do: [120_163] + def get("⨮"), do: [10798] + def get("⨵"), do: [10805] + def get(")"), do: [41] + def get("⦔"), do: [10644] + def get("⨒"), do: [10770] + def get("⇉"), do: [8649] + def get("›"), do: [8250] + def get("𝓇"), do: [120_007] + def get("↱"), do: [8625] + def get("]"), do: [93] + def get("’"), do: [8217] + def get("’"), do: [8217] + def get("⋌"), do: [8908] + def get("⋊"), do: [8906] + def get("▹"), do: [9657] + def get("⊵"), do: [8885] + def get("▸"), do: [9656] + def get("⧎"), do: [10702] + def get("⥨"), do: [10600] + def get("℞"), do: [8478] + def get("ś"), do: [347] + def get("‚"), do: [8218] + def get("≻"), do: [8827] + def get("⪴"), do: [10932] + def get("⪸"), do: [10936] + def get("š"), do: [353] + def get("≽"), do: [8829] + def get("⪰"), do: [10928] + def get("ş"), do: [351] + def get("ŝ"), do: [349] + def get("⪶"), do: [10934] + def get("⪺"), do: [10938] + def get("⋩"), do: [8937] + def get("⨓"), do: [10771] + def get("≿"), do: [8831] + def get("с"), do: [1089] + def get("⋅"), do: [8901] + def get("⊡"), do: [8865] + def get("⩦"), do: [10854] + def get("⇘"), do: [8664] + def get("⤥"), do: [10533] + def get("↘"), do: [8600] + def get("↘"), do: [8600] + def get("§"), do: [167] + def get("§"), do: [167] + def get(";"), do: [59] + def get("⤩"), do: [10537] + def get("∖"), do: [8726] + def get("∖"), do: [8726] + def get("✶"), do: [10038] + def get("𝔰"), do: [120_112] + def get("⌢"), do: [8994] + def get("♯"), do: [9839] + def get("щ"), do: [1097] + def get("ш"), do: [1096] + def get("∣"), do: [8739] + def get("∥"), do: [8741] + def get("­"), do: [173] + def get("­"), do: [173] + def get("σ"), do: [963] + def get("ς"), do: [962] + def get("ς"), do: [962] + def get("∼"), do: [8764] + def get("⩪"), do: [10858] + def get("≃"), do: [8771] + def get("≃"), do: [8771] + def get("⪞"), do: [10910] + def get("⪠"), do: [10912] + def get("⪝"), do: [10909] + def get("⪟"), do: [10911] + def get("≆"), do: [8774] + def get("⨤"), do: [10788] + def get("⥲"), do: [10610] + def get("←"), do: [8592] + def get("∖"), do: [8726] + def get("⨳"), do: [10803] + def get("⧤"), do: [10724] + def get("∣"), do: [8739] + def get("⌣"), do: [8995] + def get("⪪"), do: [10922] + def get("⪬"), do: [10924] + def get("⪬︀"), do: [10924, 65024] + def get("ь"), do: [1100] + def get("/"), do: [47] + def get("⧄"), do: [10692] + def get("⌿"), do: [9023] + def get("𝕤"), do: [120_164] + def get("♠"), do: [9824] + def get("♠"), do: [9824] + def get("∥"), do: [8741] + def get("⊓"), do: [8851] + def get("⊓︀"), do: [8851, 65024] + def get("⊔"), do: [8852] + def get("⊔︀"), do: [8852, 65024] + def get("⊏"), do: [8847] + def get("⊑"), do: [8849] + def get("⊏"), do: [8847] + def get("⊑"), do: [8849] + def get("⊐"), do: [8848] + def get("⊒"), do: [8850] + def get("⊐"), do: [8848] + def get("⊒"), do: [8850] + def get("□"), do: [9633] + def get("□"), do: [9633] + def get("▪"), do: [9642] + def get("▪"), do: [9642] + def get("→"), do: [8594] + def get("𝓈"), do: [120_008] + def get("∖"), do: [8726] + def get("⌣"), do: [8995] + def get("⋆"), do: [8902] + def get("☆"), do: [9734] + def get("★"), do: [9733] + def get("ϵ"), do: [1013] + def get("ϕ"), do: [981] + def get("¯"), do: [175] + def get("⊂"), do: [8834] + def get("⫅"), do: [10949] + def get("⪽"), do: [10941] + def get("⊆"), do: [8838] + def get("⫃"), do: [10947] + def get("⫁"), do: [10945] + def get("⫋"), do: [10955] + def get("⊊"), do: [8842] + def get("⪿"), do: [10943] + def get("⥹"), do: [10617] + def get("⊂"), do: [8834] + def get("⊆"), do: [8838] + def get("⫅"), do: [10949] + def get("⊊"), do: [8842] + def get("⫋"), do: [10955] + def get("⫇"), do: [10951] + def get("⫕"), do: [10965] + def get("⫓"), do: [10963] + def get("≻"), do: [8827] + def get("⪸"), do: [10936] + def get("≽"), do: [8829] + def get("⪰"), do: [10928] + def get("⪺"), do: [10938] + def get("⪶"), do: [10934] + def get("⋩"), do: [8937] + def get("≿"), do: [8831] + def get("∑"), do: [8721] + def get("♪"), do: [9834] + def get("¹"), do: [185] + def get("¹"), do: [185] + def get("²"), do: [178] + def get("²"), do: [178] + def get("³"), do: [179] + def get("³"), do: [179] + def get("⊃"), do: [8835] + def get("⫆"), do: [10950] + def get("⪾"), do: [10942] + def get("⫘"), do: [10968] + def get("⊇"), do: [8839] + def get("⫄"), do: [10948] + def get("⟉"), do: [10185] + def get("⫗"), do: [10967] + def get("⥻"), do: [10619] + def get("⫂"), do: [10946] + def get("⫌"), do: [10956] + def get("⊋"), do: [8843] + def get("⫀"), do: [10944] + def get("⊃"), do: [8835] + def get("⊇"), do: [8839] + def get("⫆"), do: [10950] + def get("⊋"), do: [8843] + def get("⫌"), do: [10956] + def get("⫈"), do: [10952] + def get("⫔"), do: [10964] + def get("⫖"), do: [10966] + def get("⇙"), do: [8665] + def get("⤦"), do: [10534] + def get("↙"), do: [8601] + def get("↙"), do: [8601] + def get("⤪"), do: [10538] + def get("ß"), do: [223] + def get("ß"), do: [223] + def get("⌖"), do: [8982] + def get("τ"), do: [964] + def get("⎴"), do: [9140] + def get("ť"), do: [357] + def get("ţ"), do: [355] + def get("т"), do: [1090] + def get("⃛"), do: [8411] + def get("⌕"), do: [8981] + def get("𝔱"), do: [120_113] + def get("∴"), do: [8756] + def get("∴"), do: [8756] + def get("θ"), do: [952] + def get("ϑ"), do: [977] + def get("ϑ"), do: [977] + def get("≈"), do: [8776] + def get("∼"), do: [8764] + def get(" "), do: [8201] + def get("≈"), do: [8776] + def get("∼"), do: [8764] + def get("þ"), do: [254] + def get("þ"), do: [254] + def get("˜"), do: [732] + def get("×"), do: [215] + def get("×"), do: [215] + def get("⊠"), do: [8864] + def get("⨱"), do: [10801] + def get("⨰"), do: [10800] + def get("∭"), do: [8749] + def get("⤨"), do: [10536] + def get("⊤"), do: [8868] + def get("⌶"), do: [9014] + def get("⫱"), do: [10993] + def get("𝕥"), do: [120_165] + def get("⫚"), do: [10970] + def get("⤩"), do: [10537] + def get("‴"), do: [8244] + def get("™"), do: [8482] + def get("▵"), do: [9653] + def get("▿"), do: [9663] + def get("◃"), do: [9667] + def get("⊴"), do: [8884] + def get("≜"), do: [8796] + def get("▹"), do: [9657] + def get("⊵"), do: [8885] + def get("◬"), do: [9708] + def get("≜"), do: [8796] + def get("⨺"), do: [10810] + def get("⨹"), do: [10809] + def get("⧍"), do: [10701] + def get("⨻"), do: [10811] + def get("⏢"), do: [9186] + def get("𝓉"), do: [120_009] + def get("ц"), do: [1094] + def get("ћ"), do: [1115] + def get("ŧ"), do: [359] + def get("≬"), do: [8812] + def get("↞"), do: [8606] + def get("↠"), do: [8608] + def get("⇑"), do: [8657] + def get("⥣"), do: [10595] + def get("ú"), do: [250] + def get("ú"), do: [250] + def get("↑"), do: [8593] + def get("ў"), do: [1118] + def get("ŭ"), do: [365] + def get("û"), do: [251] + def get("û"), do: [251] + def get("у"), do: [1091] + def get("⇅"), do: [8645] + def get("ű"), do: [369] + def get("⥮"), do: [10606] + def get("⥾"), do: [10622] + def get("𝔲"), do: [120_114] + def get("ù"), do: [249] + def get("ù"), do: [249] + def get("↿"), do: [8639] + def get("↾"), do: [8638] + def get("▀"), do: [9600] + def get("⌜"), do: [8988] + def get("⌜"), do: [8988] + def get("⌏"), do: [8975] + def get("◸"), do: [9720] + def get("ū"), do: [363] + def get("¨"), do: [168] + def get("¨"), do: [168] + def get("ų"), do: [371] + def get("𝕦"), do: [120_166] + def get("↑"), do: [8593] + def get("↕"), do: [8597] + def get("↿"), do: [8639] + def get("↾"), do: [8638] + def get("⊎"), do: [8846] + def get("υ"), do: [965] + def get("ϒ"), do: [978] + def get("υ"), do: [965] + def get("⇈"), do: [8648] + def get("⌝"), do: [8989] + def get("⌝"), do: [8989] + def get("⌎"), do: [8974] + def get("ů"), do: [367] + def get("◹"), do: [9721] + def get("𝓊"), do: [120_010] + def get("⋰"), do: [8944] + def get("ũ"), do: [361] + def get("▵"), do: [9653] + def get("▴"), do: [9652] + def get("⇈"), do: [8648] + def get("ü"), do: [252] + def get("ü"), do: [252] + def get("⦧"), do: [10663] + def get("⇕"), do: [8661] + def get("⫨"), do: [10984] + def get("⫩"), do: [10985] + def get("⊨"), do: [8872] + def get("⦜"), do: [10652] + def get("ϵ"), do: [1013] + def get("ϰ"), do: [1008] + def get("∅"), do: [8709] + def get("ϕ"), do: [981] + def get("ϖ"), do: [982] + def get("∝"), do: [8733] + def get("↕"), do: [8597] + def get("ϱ"), do: [1009] + def get("ς"), do: [962] + def get("⊊︀"), do: [8842, 65024] + def get("⫋︀"), do: [10955, 65024] + def get("⊋︀"), do: [8843, 65024] + def get("⫌︀"), do: [10956, 65024] + def get("ϑ"), do: [977] + def get("⊲"), do: [8882] + def get("⊳"), do: [8883] + def get("в"), do: [1074] + def get("⊢"), do: [8866] + def get("∨"), do: [8744] + def get("⊻"), do: [8891] + def get("≚"), do: [8794] + def get("⋮"), do: [8942] + def get("|"), do: [124] + def get("|"), do: [124] + def get("𝔳"), do: [120_115] + def get("⊲"), do: [8882] + def get("⊂⃒"), do: [8834, 8402] + def get("⊃⃒"), do: [8835, 8402] + def get("𝕧"), do: [120_167] + def get("∝"), do: [8733] + def get("⊳"), do: [8883] + def get("𝓋"), do: [120_011] + def get("⫋︀"), do: [10955, 65024] + def get("⊊︀"), do: [8842, 65024] + def get("⫌︀"), do: [10956, 65024] + def get("⊋︀"), do: [8843, 65024] + def get("⦚"), do: [10650] + def get("ŵ"), do: [373] + def get("⩟"), do: [10847] + def get("∧"), do: [8743] + def get("≙"), do: [8793] + def get("℘"), do: [8472] + def get("𝔴"), do: [120_116] + def get("𝕨"), do: [120_168] + def get("℘"), do: [8472] + def get("≀"), do: [8768] + def get("≀"), do: [8768] + def get("𝓌"), do: [120_012] + def get("⋂"), do: [8898] + def get("◯"), do: [9711] + def get("⋃"), do: [8899] + def get("▽"), do: [9661] + def get("𝔵"), do: [120_117] + def get("⟺"), do: [10234] + def get("⟷"), do: [10231] + def get("ξ"), do: [958] + def get("⟸"), do: [10232] + def get("⟵"), do: [10229] + def get("⟼"), do: [10236] + def get("⋻"), do: [8955] + def get("⨀"), do: [10752] + def get("𝕩"), do: [120_169] + def get("⨁"), do: [10753] + def get("⨂"), do: [10754] + def get("⟹"), do: [10233] + def get("⟶"), do: [10230] + def get("𝓍"), do: [120_013] + def get("⨆"), do: [10758] + def get("⨄"), do: [10756] + def get("△"), do: [9651] + def get("⋁"), do: [8897] + def get("⋀"), do: [8896] + def get("ý"), do: [253] + def get("ý"), do: [253] + def get("я"), do: [1103] + def get("ŷ"), do: [375] + def get("ы"), do: [1099] + def get("¥"), do: [165] + def get("¥"), do: [165] + def get("𝔶"), do: [120_118] + def get("ї"), do: [1111] + def get("𝕪"), do: [120_170] + def get("𝓎"), do: [120_014] + def get("ю"), do: [1102] + def get("ÿ"), do: [255] + def get("ÿ"), do: [255] + def get("ź"), do: [378] + def get("ž"), do: [382] + def get("з"), do: [1079] + def get("ż"), do: [380] + def get("ℨ"), do: [8488] + def get("ζ"), do: [950] + def get("𝔷"), do: [120_119] + def get("ж"), do: [1078] + def get("⇝"), do: [8669] + def get("𝕫"), do: [120_171] + def get("𝓏"), do: [120_015] + def get("‍"), do: [8205] + def get("‌"), do: [8204] + def get(_), do: [] +end diff --git a/deps/floki/lib/floki/filter_out.ex b/deps/floki/lib/floki/filter_out.ex new file mode 100644 index 0000000..65a6a63 --- /dev/null +++ b/deps/floki/lib/floki/filter_out.ex @@ -0,0 +1,45 @@ +defmodule Floki.FilterOut do + @moduledoc false + + # Helper functions for filtering out a specific element from the tree. + + @type selector :: :comment | :text | Floki.css_selector() + + def filter_out(html_tree_or_node, type) when type in [:text, :comment] do + mapper(html_tree_or_node, type) + end + + def filter_out(html_tree, selector) when is_list(html_tree) do + Floki.find_and_update(html_tree, selector, fn + {_tag, _attrs} -> :delete + other -> other + end) + end + + def filter_out(html_node, selector) do + [html_node] + |> Floki.find_and_update(selector, fn + {_tag, _attrs} -> :delete + other -> other + end) + |> List.first() || [] + end + + defp filter({nodetext, _, _}, selector) when nodetext === selector, do: false + defp filter({nodetext, _}, selector) when nodetext === selector, do: false + defp filter(text, :text) when is_binary(text), do: false + defp filter(_, _), do: true + + defp mapper(nodes, selector) when is_list(nodes) do + nodes + |> Stream.filter(&filter(&1, selector)) + |> Stream.map(&mapper(&1, selector)) + |> Enum.to_list() + end + + defp mapper({nodetext, x, y}, selector) do + {nodetext, x, mapper(y, selector)} + end + + defp mapper(nodetext, _), do: nodetext +end diff --git a/deps/floki/lib/floki/finder.ex b/deps/floki/lib/floki/finder.ex new file mode 100644 index 0000000..931d674 --- /dev/null +++ b/deps/floki/lib/floki/finder.ex @@ -0,0 +1,196 @@ +defmodule Floki.Finder do + require Logger + + @moduledoc false + + # The finder engine traverse the HTML tree searching for nodes matching + # selectors. + + alias Floki.{HTMLTree, Selector} + alias HTMLTree.HTMLNode + + # Find elements inside a HTML tree. + # Second argument can be either a selector string, a selector struct or a list of selector structs. + + @spec find(Floki.html_tree(), Floki.css_selector()) :: {HTMLTree.t(), [HTMLTree.HTMLNode.t()]} + + def find([], _), do: {%HTMLTree{}, []} + def find(html_as_string, _) when is_binary(html_as_string), do: {%HTMLTree{}, []} + + def find(html_tree, selector_as_string) when is_binary(selector_as_string) do + selectors = get_selectors(selector_as_string) + find_selectors(html_tree, selectors) + end + + def find(html_tree, selectors) when is_list(selectors) do + find_selectors(html_tree, selectors) + end + + def find(html_tree, selector = %Selector{}) do + find_selectors(html_tree, [selector]) + end + + @spec map(Floki.html_tree() | Floki.html_node(), function()) :: + Floki.html_tree() | Floki.html_node() + + def map({name, attrs, rest}, fun) do + {new_name, new_attrs} = fun.({name, attrs}) + + {new_name, new_attrs, Enum.map(rest, &map(&1, fun))} + end + + def map(other, _fun), do: other + + defp find_selectors(html_tuple_or_list, selectors) do + tree = HTMLTree.build(html_tuple_or_list) + + results = + tree.node_ids + |> Enum.reverse() + |> get_nodes(tree) + |> Enum.flat_map(fn html_node -> get_matches_for_selectors(tree, html_node, selectors) end) + |> Enum.uniq() + + {tree, results} + end + + defp get_selectors(selector_as_string) do + selector_as_string + |> Selector.Tokenizer.tokenize() + |> Selector.Parser.parse() + end + + defp get_matches_for_selectors(tree, html_node, selectors) do + Enum.flat_map(selectors, fn selector -> get_matches(tree, html_node, selector) end) + end + + defp get_matches(tree, html_node, selector = %Selector{combinator: nil}) do + if selector_match?(tree, html_node, selector) do + [html_node] + else + [] + end + end + + defp get_matches(tree, html_node, selector = %Selector{combinator: combinator}) do + if selector_match?(tree, html_node, selector) do + traverse_with(combinator, tree, [html_node]) + else + [] + end + end + + defp selector_match?(tree, html_node, selector) do + Selector.match?(html_node, selector, tree) + end + + # The stack serves as accumulator when there is another combinator to traverse. + # So the scope of one combinator is the stack (or acc) or the parent one. + defp traverse_with(_, _, []), do: [] + defp traverse_with(nil, _, results), do: results + + defp traverse_with(%Selector.Combinator{match_type: :child, selector: s}, tree, stack) do + results = + Enum.flat_map(stack, fn html_node -> + nodes = + html_node.children_nodes_ids + |> Enum.reverse() + |> get_nodes(tree) + + Enum.filter(nodes, fn html_node -> selector_match?(tree, html_node, s) end) + end) + + traverse_with(s.combinator, tree, results) + end + + defp traverse_with(%Selector.Combinator{match_type: :sibling, selector: s}, tree, stack) do + results = + Enum.flat_map(stack, fn html_node -> + # It treats sibling as list to easily ignores those that didn't match + sibling_id = + html_node + |> get_siblings(tree) + |> Enum.take(1) + + nodes = get_nodes(sibling_id, tree) + + # Finally, try to match those siblings with the selector + Enum.filter(nodes, fn html_node -> selector_match?(tree, html_node, s) end) + end) + + traverse_with(s.combinator, tree, results) + end + + defp traverse_with(%Selector.Combinator{match_type: :general_sibling, selector: s}, tree, stack) do + results = + Enum.flat_map(stack, fn html_node -> + sibling_ids = get_siblings(html_node, tree) + + nodes = get_nodes(sibling_ids, tree) + + # Finally, try to match those siblings with the selector + Enum.filter(nodes, fn html_node -> selector_match?(tree, html_node, s) end) + end) + + traverse_with(s.combinator, tree, results) + end + + defp traverse_with(%Selector.Combinator{match_type: :descendant, selector: s}, tree, stack) do + results = + Enum.flat_map(stack, fn html_node -> + ids_to_match = get_descendant_ids(html_node.node_id, tree) + nodes = get_nodes(ids_to_match, tree) + + Enum.filter(nodes, fn html_node -> selector_match?(tree, html_node, s) end) + end) + + traverse_with(s.combinator, tree, results) + end + + defp get_nodes(ids, tree) do + Enum.map(ids, fn id -> Map.get(tree.nodes, id) end) + end + + defp get_node(id, tree) do + Map.get(tree.nodes, id) + end + + defp get_sibling_ids_from([], _html_node), do: [] + + defp get_sibling_ids_from(ids, html_node) do + ids + |> Enum.reverse() + |> Enum.drop_while(fn id -> id != html_node.node_id end) + |> tl() + end + + defp get_siblings(html_node, tree) do + parent = get_node(html_node.parent_node_id, tree) + + ids = + if parent do + get_sibling_ids_from(parent.children_nodes_ids, html_node) + else + get_sibling_ids_from(Enum.reverse(tree.root_nodes_ids), html_node) + end + + Enum.filter(ids, fn id -> + case get_node(id, tree) do + %HTMLNode{} -> true + _ -> false + end + end) + end + + # finds all descendant node ids recursively through the tree preserving the order + defp get_descendant_ids(node_id, tree) do + case get_node(node_id, tree) do + %{children_nodes_ids: node_ids} -> + reversed_ids = Enum.reverse(node_ids) + reversed_ids ++ Enum.flat_map(reversed_ids, &get_descendant_ids(&1, tree)) + + _ -> + [] + end + end +end diff --git a/deps/floki/lib/floki/flat_text.ex b/deps/floki/lib/floki/flat_text.ex new file mode 100644 index 0000000..ff77294 --- /dev/null +++ b/deps/floki/lib/floki/flat_text.ex @@ -0,0 +1,41 @@ +defmodule Floki.FlatText do + @moduledoc false + + # FlatText is a strategy to get text nodes from a HTML tree without search deep + # in the tree. It only gets the text nodes from the first level of nodes. + + # Example + + # iex> Floki.FlatText.get([{"a", [], ["The meaning of life is...", {"strong", [], ["something else"]}] }]) + # "The meaning of life is..." + + @type html_tree :: tuple | list + + @spec get(html_tree, binary) :: binary + + def get(html_nodes, sep \\ "") + + def get(html_nodes, sep) when is_list(html_nodes) do + Enum.reduce(html_nodes, "", fn html_node, acc -> + text_from_node(html_node, acc, sep) + end) + end + + def get(html_node, sep) do + text_from_node(html_node, "", sep) + end + + defp text_from_node({_tag, _attrs, html_nodes}, acc, sep) do + Enum.reduce(html_nodes, acc, fn html_node, acc -> + capture_text(html_node, acc, sep) + end) + end + + defp text_from_node(text, "", _sep) when is_binary(text), do: text + defp text_from_node(text, acc, sep) when is_binary(text), do: Enum.join([acc, text], sep) + defp text_from_node(_, acc, _), do: acc + + defp capture_text(text, "", _sep) when is_binary(text), do: text + defp capture_text(text, acc, sep) when is_binary(text), do: Enum.join([acc, text], sep) + defp capture_text(_html_node, acc, _), do: acc +end diff --git a/deps/floki/lib/floki/html/numeric_charref.ex b/deps/floki/lib/floki/html/numeric_charref.ex new file mode 100644 index 0000000..4e6da46 --- /dev/null +++ b/deps/floki/lib/floki/html/numeric_charref.ex @@ -0,0 +1,108 @@ +defmodule Floki.HTML.NumericCharref do + @moduledoc false + + # REPLACEMENT CHARACTER + def to_unicode_number(0x00), do: {:ok, {:table, 0xFFFD}} + # EURO SIGN (โ‚ฌ) + def to_unicode_number(0x80), do: {:ok, {:table, 0x20AC}} + # SINGLE LOW-9 QUOTATION MARK (โ€š) + def to_unicode_number(0x82), do: {:ok, {:table, 0x201A}} + # LATIN SMALL LETTER F WITH HOOK (ฦ’) + def to_unicode_number(0x83), do: {:ok, {:table, 0x0192}} + # DOUBLE LOW-9 QUOTATION MARK (โ€ž) + def to_unicode_number(0x84), do: {:ok, {:table, 0x201E}} + # HORIZONTAL ELLIPSIS (โ€ฆ) + def to_unicode_number(0x85), do: {:ok, {:table, 0x2026}} + # DAGGER (โ€ ) + def to_unicode_number(0x86), do: {:ok, {:table, 0x2020}} + # DOUBLE DAGGER (โ€ก) + def to_unicode_number(0x87), do: {:ok, {:table, 0x2021}} + # MODIFIER LETTER CIRCUMFLEX ACCENT (ห†) + def to_unicode_number(0x88), do: {:ok, {:table, 0x02C6}} + # PER MILLE SIGN (โ€ฐ) + def to_unicode_number(0x89), do: {:ok, {:table, 0x2030}} + # LATIN CAPITAL LETTER S WITH CARON (ล ) + def to_unicode_number(0x8A), do: {:ok, {:table, 0x0160}} + # SINGLE LEFT-POINTING ANGLE QUOTATION MARK (โ€น) + def to_unicode_number(0x8B), do: {:ok, {:table, 0x2039}} + # LATIN CAPITAL LIGATURE OE (ล’) + def to_unicode_number(0x8C), do: {:ok, {:table, 0x0152}} + # LATIN CAPITAL LETTER Z WITH CARON (ลฝ) + def to_unicode_number(0x8E), do: {:ok, {:table, 0x017D}} + # LEFT SINGLE QUOTATION MARK (โ€˜) + def to_unicode_number(0x91), do: {:ok, {:table, 0x2018}} + # RIGHT SINGLE QUOTATION MARK (โ€™) + def to_unicode_number(0x92), do: {:ok, {:table, 0x2019}} + # LEFT DOUBLE QUOTATION MARK (โ€œ) + def to_unicode_number(0x93), do: {:ok, {:table, 0x201C}} + # RIGHT DOUBLE QUOTATION MARK (โ€) + def to_unicode_number(0x94), do: {:ok, {:table, 0x201D}} + # BULLET (โ€ข) + def to_unicode_number(0x95), do: {:ok, {:table, 0x2022}} + # EN DASH (โ€“) + def to_unicode_number(0x96), do: {:ok, {:table, 0x2013}} + # EM DASH (โ€”) + def to_unicode_number(0x97), do: {:ok, {:table, 0x2014}} + # SMALL TILDE (หœ) + def to_unicode_number(0x98), do: {:ok, {:table, 0x02DC}} + # TRADE MARK SIGN (โ„ข) + def to_unicode_number(0x99), do: {:ok, {:table, 0x2122}} + # LATIN SMALL LETTER S WITH CARON (ลก) + def to_unicode_number(0x9A), do: {:ok, {:table, 0x0161}} + # SINGLE RIGHT-POINTING ANGLE QUOTATION MARK (โ€บ) + def to_unicode_number(0x9B), do: {:ok, {:table, 0x203A}} + # LATIN SMALL LIGATURE OE (ล“) + def to_unicode_number(0x9C), do: {:ok, {:table, 0x0153}} + # LATIN SMALL LETTER Z WITH CARON (ลพ) + def to_unicode_number(0x9E), do: {:ok, {:table, 0x017E}} + # LATIN CAPITAL LETTER Y WITH DIAERESIS (ลธ) + def to_unicode_number(0x9F), do: {:ok, {:table, 0x0178}} + + def to_unicode_number(number) when number in 0xD800..0xDFFF or number > 0x10FFFF, + do: {:ok, {:range_one, 0xFFFD}} + + def to_unicode_number(number) + when number in 0x0001..0x0008 or number in 0x000D..0x001F or number in 0x007F..0x009F or + number in 0xFDD0..0xFDEF or + number in [ + 0x000B, + 0xFFFE, + 0xFFFF, + 0x1FFFE, + 0x1FFFF, + 0x2FFFE, + 0x2FFFF, + 0x3FFFE, + 0x3FFFF, + 0x4FFFE, + 0x4FFFF, + 0x5FFFE, + 0x5FFFF, + 0x6FFFE, + 0x6FFFF, + 0x7FFFE, + 0x7FFFF, + 0x8FFFE, + 0x8FFFF, + 0x9FFFE, + 0x9FFFF, + 0xAFFFE, + 0xAFFFF, + 0xBFFFE, + 0xBFFFF, + 0xCFFFE, + 0xCFFFF, + 0xDFFFE, + 0xDFFFF, + 0xEFFFE, + 0xEFFFF, + 0xFFFFE, + 0xFFFFF, + 0x10FFFE, + 0x10FFFF + ] do + {:ok, {:list_of_errors, number}} + end + + def to_unicode_number(number), do: {:ok, {:unicode, number}} +end diff --git a/deps/floki/lib/floki/html/tokenizer.ex b/deps/floki/lib/floki/html/tokenizer.ex new file mode 100644 index 0000000..afc31c5 --- /dev/null +++ b/deps/floki/lib/floki/html/tokenizer.ex @@ -0,0 +1,2868 @@ +defmodule Floki.HTML.Tokenizer do + @moduledoc false + + # HTML tokenizer built according to the specs of WHATWG/W3C. + # https://html.spec.whatwg.org/multipage/#toc-syntax + # + # In order to find the docs of a given state, add it as an anchor to the link above. + # Example: https://html.spec.whatwg.org/multipage/parsing.html#data-state + # + # The tests for this module can be found in test/floki/html/generated/tokenizer. + # They were generated based on test files from https://github.com/html5lib/html5lib-tests + # In order to update those test files you first need to run the task: + # + # mix generate_tokenizer_tests filename.tests + # + # Where "filename.tests" is a file present in "test/html5lib-tests/tokenizer" directory. + # + # This tokenizer depends on an entities list that is generated with another mix task. + # That file shouldn't change much, but if needed, it can be updated with: + # + # mix generate_entities + # + # This tokenizer does not work with streams yet. + + defmodule Doctype do + @moduledoc false + + defstruct name: nil, + public_id: nil, + system_id: nil, + force_quirks: :off + + @type t :: %__MODULE__{ + name: iodata(), + public_id: iodata() | nil, + system_id: iodata() | nil, + force_quirks: :on | :off + } + end + + defmodule Attribute do + @moduledoc false + + defstruct name: "", value: "" + + @type t :: %__MODULE__{ + name: iodata(), + value: iodata() + } + end + + defmodule StartTag do + @moduledoc false + + defstruct name: "", + self_close: nil, + attributes: [] + + @type t :: %__MODULE__{ + name: iodata(), + self_close: boolean() | nil, + attributes: list(Attribute.t()) + } + end + + defmodule EndTag do + @moduledoc false + + defstruct name: "", + self_close: nil, + attributes: [] + + @type t :: %__MODULE__{ + name: iodata(), + self_close: boolean() | nil, + attributes: list(Attribute.t()) + } + end + + defmodule Comment do + @moduledoc false + + defstruct data: "" + + @type t :: %__MODULE__{ + data: iodata() + } + end + + defmodule CharrefState do + @moduledoc false + + defstruct candidate: nil, done: false, length: 0 + + @type t :: %__MODULE__{ + candidate: binary(), + done: boolean(), + length: integer() + } + end + + # It represents the state of tokenization. + defmodule State do + @moduledoc false + + defstruct return_state: nil, + eof_last_state: nil, + adjusted_current_node: nil, + token: nil, + tokens: [], + buffer: "", + last_start_tag: nil, + errors: [], + emit: nil, + charref_state: nil, + charref_code: nil + + @type token :: Doctype.t() | Comment.t() | StartTag.t() | EndTag.t() | {:char, iodata()} + + @type t :: %__MODULE__{ + return_state: + :data + | :rcdata + | :attribute_value_double_quoted + | :attribute_value_single_quoted + | :attribute_value_unquoted, + eof_last_state: atom(), + buffer: iodata(), + token: token() | nil, + tokens: list(token()), + errors: [{:parse_error, binary() | nil}], + last_start_tag: StartTag.t(), + charref_state: CharrefState.t(), + charref_code: integer(), + emit: (token() -> token()) + } + end + + @lower_ASCII_letters ?a..?z + @upper_ASCII_letters ?A..?Z + @ascii_digits ?0..?9 + @space_chars [?\t, ?\n, ?\f, ?\s] + + defguardp is_lower_letter(c) when c in @lower_ASCII_letters + defguardp is_upper_letter(c) when c in @upper_ASCII_letters + defguardp is_digit(c) when c in @ascii_digits + defguardp is_letter(c) when c in @upper_ASCII_letters or c in @lower_ASCII_letters + defguardp is_space(c) when c in @space_chars + + @less_than_sign ?< + @greater_than_sign ?> + @exclamation_mark ?! + @solidus ?/ + @hyphen_minus ?- + @replacement_char 0xFFFD + + @spec tokenize(binary()) :: State.t() + def tokenize(html) do + pattern = :binary.compile_pattern(["\r\n", "\r"]) + + html + |> String.replace(pattern, "\n") + |> data(%State{emit: fn token -> token end}) + end + + # It assumes that the parser stops at the end of file. + # If we need to work with streams, this can't reverse here. + defp eof(last_state, s) do + %{ + s + | eof_last_state: last_state, + tokens: Enum.reverse([:eof | s.tokens]), + errors: Enum.reverse(s.errors) + } + end + + # ยง tokenizer-data-state + + defp data(<<?&, html::binary>>, s) do + character_reference(html, %{s | return_state: :data}) + end + + defp data(<<?<, html::binary>>, s) do + tag_open(html, s) + end + + defp data(<<0, html::binary>>, s) do + data(html, %{s | tokens: append_char_token(s, 0)}) + end + + defp data("", s) do + eof(:data, s) + end + + defp data(<<c::utf8, html::binary>>, s) do + data(html, %{s | tokens: append_char_token(s, c)}) + end + + # ยง tokenizer-rcdata-state: re-entrant + + @spec rcdata(binary(), %State{}) :: %State{} + def rcdata(<<?&, html::binary>>, s) do + character_reference(html, %{s | return_state: :rcdata}) + end + + def rcdata(<<?<, html::binary>>, s) do + rcdata_less_than_sign(html, s) + end + + def rcdata(<<0, html::binary>>, s) do + rcdata(html, %{s | tokens: append_char_token(s, @replacement_char)}) + end + + def rcdata("", s) do + eof(:rcdata, s) + end + + def rcdata(<<c::utf8, html::binary>>, s) do + rcdata(html, %{s | tokens: append_char_token(s, c)}) + end + + # ยง tokenizer-rawtext-state: re-entrant + + @spec rawtext(binary(), State.t()) :: State.t() + def rawtext(<<?<, html::binary>>, s) do + rawtext_less_than_sign(html, s) + end + + def rawtext(<<0, html::binary>>, s) do + rawtext(html, %{s | tokens: append_char_token(s, @replacement_char)}) + end + + def rawtext("", s) do + eof(:rawtext, s) + end + + def rawtext(<<c::utf8, html::binary>>, s) do + rawtext(html, %{s | tokens: append_char_token(s, c)}) + end + + # ยง tokenizer-script-data-state: re-entrant + + @spec script_data(binary(), State.t()) :: State.t() + def script_data(<<?<, html::binary>>, s) do + script_data_less_than_sign(html, s) + end + + def script_data(<<0, html::binary>>, s) do + script_data(html, %{s | tokens: append_char_token(s, @replacement_char)}) + end + + def script_data("", s) do + eof(:script_data, s) + end + + def script_data(<<c::utf8, html::binary>>, s) do + script_data(html, %{ + s + | tokens: append_char_token(s, c) + }) + end + + # ยง tokenizer-plaintext-state: re-entrant + + @spec plaintext(binary(), State.t()) :: State.t() + def plaintext(<<0, html::binary>>, s) do + plaintext(html, %{s | tokens: append_char_token(s, @replacement_char)}) + end + + def plaintext("", s) do + eof(:plaintext, s) + end + + def plaintext(<<c::utf8, html::binary>>, s) do + plaintext(html, %{s | tokens: append_char_token(s, c)}) + end + + # ยง tokenizer-tag-open-state + + defp tag_open(<<?!, html::binary>>, s) do + markup_declaration_open(html, s) + end + + defp tag_open(<<?/, html::binary>>, s) do + end_tag_open(html, s) + end + + defp tag_open(html = <<c, _rest::binary>>, s) + when is_letter(c) do + token = %StartTag{name: ""} + + tag_name(html, %{s | token: token}) + end + + defp tag_open(html = <<??, _rest::binary>>, s) do + token = %Comment{data: ""} + + bogus_comment(html, %{s | token: token}) + end + + defp tag_open(html, s) do + data(html, %{ + s + | token: nil, + tokens: append_char_token(s, @less_than_sign) + }) + end + + # ยง tokenizer-end-tag-open-state + + defp end_tag_open(html = <<c, _rest::binary>>, s) + when is_letter(c) do + token = %EndTag{name: ""} + + tag_name(html, %{s | token: token}) + end + + defp end_tag_open(<<?>, html::binary>>, s) do + data(html, %{s | token: nil}) + end + + defp end_tag_open("", s) do + eof(:data, %{ + s + | token: nil, + tokens: append_char_token(s, [@less_than_sign, @solidus]), + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp end_tag_open(html, s) do + token = %Comment{data: ""} + + bogus_comment(html, %{s | token: token}) + end + + # ยง tokenizer-tag-name-state + + defp tag_name(<<c, html::binary>>, s) + when is_space(c) do + before_attribute_name(html, s) + end + + defp tag_name(<<?/, html::binary>>, s) do + self_closing_start_tag(html, s) + end + + defp tag_name(<<?>, html::binary>>, s) do + data(html, %{ + s + | last_start_tag: s.token, + tokens: [s.emit.(s.token) | s.tokens], + token: nil + }) + end + + defp tag_name(<<c, html::binary>>, s) + when is_upper_letter(c) do + new_token = %{s.token | name: [s.token.name | [c + 32]]} + + tag_name(html, %{s | token: new_token}) + end + + defp tag_name(<<0, html::binary>>, s) do + tag_name(html, %{ + s + | token: %{s.token | name: [s.token.name | [@replacement_char]]}, + errors: [ + {:parse_error, "unexpected-null-character"} + | s.errors + ] + }) + end + + defp tag_name("", s) do + eof(:tag_name, %{ + s + | errors: [{:parse_error, "eof-in-tag"} | s.errors] + }) + end + + defp tag_name(<<c::utf8, html::binary>>, s) do + new_token = %{s.token | name: [s.token.name | [c]]} + + tag_name(html, %{s | token: new_token}) + end + + # ยง tokenizer-rcdata-less-than-sign-state + + defp rcdata_less_than_sign(<<?/, html::binary>>, s) do + rcdata_end_tag_open(html, %{s | buffer: ""}) + end + + defp rcdata_less_than_sign(html, s) do + rcdata(html, %{ + s + | token: nil, + tokens: append_char_token(s, @less_than_sign) + }) + end + + # ยง tokenizer-rcdata-end-tag-open-state + + defp rcdata_end_tag_open( + html = <<c, _rest::binary>>, + s + ) + when is_letter(c) do + token = %EndTag{name: ""} + rcdata_end_tag_name(html, %{s | token: token}) + end + + defp rcdata_end_tag_open(html, s) do + rcdata(html, %{s | tokens: append_char_token(s, [@less_than_sign, @solidus])}) + end + + # ยง tokenizer-rcdata-end-tag-name-state + + defp rcdata_end_tag_name(html = <<c, rest::binary>>, s) + when is_space(c) do + if appropriate_tag?(s) do + before_attribute_name(rest, s) + else + rcdata(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp rcdata_end_tag_name(html = <<?/, rest::binary>>, s) do + if appropriate_tag?(s) do + self_closing_start_tag(rest, s) + else + rcdata(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp rcdata_end_tag_name(html = <<?>, rest::binary>>, s) do + if appropriate_tag?(s) do + data(rest, %{ + s + | token: nil, + tokens: [s.emit.(s.token) | s.tokens] + }) + else + rcdata(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + # TODO: should we always declare %State{}? + defp rcdata_end_tag_name(<<c, html::binary>>, %State{} = s) + when is_upper_letter(c) do + c_downcased = c + 32 + new_token = %{s.token | name: [s.token.name | [c_downcased]]} + + rcdata(html, %{s | token: new_token, buffer: [s.buffer | [c_downcased]]}) + end + + defp rcdata_end_tag_name(<<c, html::binary>>, s) + when is_lower_letter(c) do + col = s.col + 1 + new_token = %{s.token | name: [s.name | [c]]} + + rcdata_end_tag_name(html, %{s | token: new_token, buffer: [s.buffer | [c]], col: col}) + end + + defp rcdata_end_tag_name(html, s) do + rcdata(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + + # ยง tokenizer-rawtext-end-tag-name-state + + defp rawtext_end_tag_name(html = <<c::utf8, rest::binary>>, s) + when is_space(c) do + if appropriate_tag?(s) do + before_attribute_name(rest, s) + else + rawtext(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp rawtext_end_tag_name(html = <<"/", rest::binary>>, s) do + if appropriate_tag?(s) do + self_closing_start_tag(rest, s) + else + rawtext(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp rawtext_end_tag_name(html = <<">", rest::binary>>, s) do + if appropriate_tag?(s) do + data(rest, %{ + s + | token: nil, + tokens: [s.emit.(s.token) | s.tokens] + }) + else + rawtext(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp rawtext_end_tag_name(<<c, html::binary>>, s) + when is_upper_letter(c) do + new_token = %{s.token | name: [s.token.name | [c + 32]]} + + rawtext(html, %{s | token: new_token, buffer: [s.buffer | [c]]}) + end + + defp rawtext_end_tag_name(<<c, html::binary>>, s) + when is_lower_letter(c) do + col = s.col + 1 + new_token = %{s.token | name: [s.name | [c]]} + + rawtext_end_tag_name(html, %{s | token: new_token, buffer: [s.buffer | [c]], col: col}) + end + + defp rawtext_end_tag_name(html, s) do + rawtext(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + + # ยง tokenizer-script-data-end-tag-name-state + + defp script_data_end_tag_name(html = <<c, rest::binary>>, s) + when is_space(c) do + if appropriate_tag?(s) do + before_attribute_name(rest, s) + else + script_data(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp script_data_end_tag_name(html = <<?/, rest::binary>>, s) do + if appropriate_tag?(s) do + self_closing_start_tag(rest, s) + else + script_data(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp script_data_end_tag_name(html = <<?>, rest::binary>>, s) do + if appropriate_tag?(s) do + data(rest, %{ + s + | token: nil, + tokens: [s.emit.(s.token) | s.tokens] + }) + else + script_data(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + defp script_data_end_tag_name(<<c, html::binary>>, s) + when is_upper_letter(c) do + c_downcased = c + 32 + new_token = %{s.token | name: [s.token.name | [c_downcased]]} + + script_data(html, %{s | token: new_token, buffer: [s.buffer | [c_downcased]]}) + end + + defp script_data_end_tag_name(<<c, html::binary>>, s) + when is_lower_letter(c) do + new_token = %{s.token | name: [s.name | [c]]} + + script_data_end_tag_name(html, %{s | token: new_token, buffer: [s.buffer | [c]]}) + end + + defp script_data_end_tag_name(html, s) do + script_data(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + + # ยง tokenizer-script-data-escaped-end-tag-name-state: re-entrant + + @spec script_data_escaped_end_tag_name(binary(), State.t()) :: State.t() + def script_data_escaped_end_tag_name(html = <<c, rest::binary>>, s) + when is_space(c) do + if appropriate_tag?(s) do + before_attribute_name(rest, s) + else + script_data_escaped(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + def script_data_escaped_end_tag_name(html = <<?/, rest::binary>>, s) do + if appropriate_tag?(s) do + self_closing_start_tag(rest, s) + else + script_data_escaped(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + def script_data_escaped_end_tag_name(html = <<?>, rest::binary>>, s) do + if appropriate_tag?(s) do + data(rest, %{ + s + | token: nil, + tokens: [s.emit.(s.token) | s.tokens] + }) + else + script_data_escaped(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + end + + def script_data_escaped_end_tag_name(<<c, html::binary>>, s) + when is_upper_letter(c) do + new_token = %{s.token | name: [s.name | [c + 32]]} + + script_data_escaped(html, %{s | token: new_token, buffer: [s.buffer | [c]]}) + end + + def script_data_escaped_end_tag_name(<<c, html::binary>>, s) + when is_lower_letter(c) do + new_token = %{s.token | name: [s.token.name | [c]]} + + script_data_escaped_end_tag_name(html, %{ + s + | token: new_token, + buffer: [s.buffer | [c]] + }) + end + + def script_data_escaped_end_tag_name(html, s) do + script_data_escaped(html, %{ + s + | tokens: tokens_for_inappropriate_end_tag(s), + buffer: "" + }) + end + + # ยง tokenizer-rawtext-less-than-sign-state + + defp rawtext_less_than_sign(<<?/, html::binary>>, s) do + rawtext_end_tag_open(html, %{s | buffer: ""}) + end + + defp rawtext_less_than_sign(html, s) do + rawtext(html, %{s | tokens: append_char_token(s, 0x003C)}) + end + + # ยง tokenizer-rawtext-end-tag-open-state + + defp rawtext_end_tag_open( + html = <<c, _rest::binary>>, + s + ) + when is_letter(c) do + token = %EndTag{name: ""} + rawtext_end_tag_name(html, %{s | token: token}) + end + + defp rawtext_end_tag_open(html, s) do + rawtext(html, %{s | tokens: append_char_token(s, [@less_than_sign, @solidus])}) + end + + # ยง tokenizer-script-data-less-than-sign-state + + defp script_data_less_than_sign(<<?/, html::binary>>, s) do + script_data_end_tag_open(html, %{s | buffer: ""}) + end + + defp script_data_less_than_sign(<<?!, html::binary>>, s) do + script_data_less_than_sign(html, %{ + s + | tokens: append_char_token(s, [@less_than_sign, @exclamation_mark]) + }) + end + + defp script_data_less_than_sign(html, s) do + script_data(html, %{s | tokens: append_char_token(s, @less_than_sign)}) + end + + # ยง tokenizer-script-data-end-tag-open-state + + defp script_data_end_tag_open( + html = <<c, _rest::binary>>, + s + ) + when is_letter(c) do + end_tag = %EndTag{name: ""} + script_data_end_tag_name(html, %{s | token: end_tag}) + end + + defp script_data_end_tag_open(html, s) do + script_data(html, %{ + s + | tokens: append_char_token(s, [@less_than_sign, @solidus]) + }) + end + + # ยง tokenizer-script-data-escape-start-state: re-entrant + + ## Unused + # @spec script_data_escape_start(binary(), State.t()) :: State.t() + # def script_data_escape_start(<<?-, html::binary>>, s) do + # script_data_escape_start_dash( + # html, + # %{ + # s + # | tokens: append_char_token(s, @hyphen_minus) + # } + # ) + # end + + # def script_data_escape_start(html, s) do + # script_data(html, s) + # end + + # ยง tokenizer-script-data-escape-start-dash-state + + # defp script_data_escape_start_dash(<<?-, html::binary>>, s) do + # script_data_escaped_dash_dash( + # html, + # %{ + # s + # | tokens: append_char_token(s, @hyphen_minus) + # } + # ) + # end + + # defp script_data_escape_start_dash(html, s) do + # script_data(html, s) + # end + + # ยง tokenizer-script-data-escaped-state + + defp script_data_escaped(<<?-, html::binary>>, s) do + script_data_escaped_dash( + html, + %{s | tokens: append_char_token(s, @hyphen_minus)} + ) + end + + defp script_data_escaped(<<?<, html::binary>>, s) do + script_data_escaped_less_than_sign(html, s) + end + + defp script_data_escaped(<<0, html::binary>>, s) do + script_data_escaped(html, %{s | tokens: append_char_token(s, @replacement_char)}) + end + + defp script_data_escaped("", s) do + eof(:script_data_escaped, s) + end + + defp script_data_escaped(<<c::utf8, html::binary>>, s) do + script_data_escaped(html, %{s | tokens: append_char_token(s, c)}) + end + + # ยง tokenizer-script-data-escaped-dash-state + + defp script_data_escaped_dash(<<?-, html::binary>>, s) do + script_data_escaped_dash_dash( + html, + %{ + s + | tokens: append_char_token(s, @hyphen_minus) + } + ) + end + + defp script_data_escaped_dash(<<?<, html::binary>>, s) do + script_data_escaped_less_than_sign(html, s) + end + + defp script_data_escaped_dash(<<0, html::binary>>, s) do + script_data_escaped(html, %{ + s + | tokens: append_char_token(s, @replacement_char) + }) + end + + defp script_data_escaped_dash("", s) do + eof(:tokenize, s) + end + + defp script_data_escaped_dash( + <<c::utf8, html::binary>>, + s + ) do + script_data_escaped(html, %{ + s + | tokens: append_char_token(s, c) + }) + end + + # ยง tokenizer-script-data-escaped-dash-dash-state + + defp script_data_escaped_dash_dash(<<?-, html::binary>>, s) do + script_data_escaped_dash_dash( + html, + %{s | tokens: append_char_token(s, @hyphen_minus)} + ) + end + + defp script_data_escaped_dash_dash(<<?<, html::binary>>, s) do + script_data_escaped_less_than_sign(html, s) + end + + defp script_data_escaped_dash_dash(<<?>, html::binary>>, s) do + script_data(html, %{ + s + | tokens: append_char_token(s, @greater_than_sign) + }) + end + + defp script_data_escaped_dash_dash(<<0, html::binary>>, s) do + script_data_escaped(html, %{ + s + | tokens: append_char_token(s, @replacement_char) + }) + end + + defp script_data_escaped_dash_dash("", s) do + eof(:script_data_escaped_dash_dash, s) + end + + defp script_data_escaped_dash_dash( + <<c::utf8, html::binary>>, + s + ) do + script_data_escaped(html, %{ + s + | tokens: append_char_token(s, <<c::utf8>>) + }) + end + + # ยง tokenizer-script-data-escaped-less-than-sign-state + + defp script_data_escaped_less_than_sign(<<?/, html::binary>>, s) do + script_data_escaped_end_tag_open(html, %{s | buffer: ""}) + end + + defp script_data_escaped_less_than_sign( + html = <<c, _rest::binary>>, + s + ) + when is_lower_letter(c) or is_upper_letter(c) do + # TODO: revert this after implement the script_data_double_scape_start state + # script_data_double_escape_start( + data( + html, + %{ + s + | buffer: "", + tokens: append_char_token(s, @less_than_sign) + } + ) + end + + defp script_data_escaped_less_than_sign(html, s) do + script_data_escaped(html, %{ + s + | tokens: append_char_token(s, @less_than_sign) + }) + end + + # ยง tokenizer-script-data-escaped-end-tag-open-state + + defp script_data_escaped_end_tag_open( + html = <<c, _rest::binary>>, + s + ) + when is_lower_letter(c) or is_upper_letter(c) do + script_data_escaped_end_tag_name( + html, + %{ + s + | token: %EndTag{name: ""} + } + ) + end + + defp script_data_escaped_end_tag_open(html, s) do + script_data_escaped(html, %{ + s + | tokens: append_char_token(s, [@less_than_sign, @solidus]) + }) + end + + # ยง tokenizer-script-data-double-escape-start-state: re-entrant + + @spec script_data_double_escaped_end_tag_open(binary(), State.t()) :: State.t() + def script_data_double_escaped_end_tag_open( + <<c, html::binary>>, + s + ) + when c in [@solidus, @greater_than_sign | @space_chars] do + s = %{s | tokens: append_char_token(s, <<c::utf8>>)} + + if s.buffer && IO.chardata_to_string(s.buffer) == "script" do + script_data_double_escaped(html, s) + else + script_data_escaped(html, s) + end + end + + def script_data_double_escaped_end_tag_open( + <<c, html::binary>>, + s + ) + when is_upper_letter(c) do + script_data_double_escaped_end_tag_open(html, %{ + s + | buffer: [s.buffer, c + 32], + tokens: append_char_token(s, c) + }) + end + + def script_data_double_escaped_end_tag_open( + <<c, html::binary>>, + s + ) + when is_lower_letter(c) do + script_data_double_escaped_end_tag_open(html, %{ + s + | buffer: [s.buffer, c], + tokens: append_char_token(s, c) + }) + end + + def script_data_double_escaped_end_tag_open(html, s) do + script_data_escaped(html, s) + end + + # ยง tokenizer-script-data-double-escaped-state: re-entrant + + @spec script_data_double_escaped(binary(), State.t()) :: State.t() + def script_data_double_escaped(<<?-, html::binary>>, s) do + script_data_double_escaped_dash(html, %{ + s + | tokens: append_char_token(s, @hyphen_minus) + }) + end + + def script_data_double_escaped(<<?<, html::binary>>, s) do + script_data_double_escaped_less_than_sign(html, %{ + s + | tokens: append_char_token(s, @less_than_sign) + }) + end + + def script_data_double_escaped(<<0, html::binary>>, s) do + script_data_double_escaped(html, %{s | tokens: append_char_token(s, @replacement_char)}) + end + + def script_data_double_escaped("", s) do + eof(:script_data_double_escaped, s) + end + + def script_data_double_escaped(<<c::utf8, html::binary>>, s) do + script_data_double_escaped(html, %{s | tokens: append_char_token(s, c)}) + end + + # ยง tokenizer-script-data-double-escaped-dash-state + + defp script_data_double_escaped_dash(<<?-, html::binary>>, s) do + script_data_double_escaped_dash_dash(html, %{ + s + | tokens: append_char_token(s, @hyphen_minus) + }) + end + + defp script_data_double_escaped_dash(<<?<, html::binary>>, s) do + script_data_double_escaped_less_than_sign(html, %{ + s + | tokens: append_char_token(s, @less_than_sign) + }) + end + + defp script_data_double_escaped_dash(<<0, html::binary>>, s) do + script_data_double_escaped(html, %{ + s + | tokens: append_char_token(s, @replacement_char) + }) + end + + defp script_data_double_escaped_dash("", s) do + eof(:script_data_double_escaped_dash, s) + end + + defp script_data_double_escaped_dash(<<c::utf8, html::binary>>, s) do + script_data_double_escaped(html, %{ + s + | tokens: append_char_token(s, c) + }) + end + + # ยง tokenizer-script-data-double-escaped-dash-dash-state + + defp script_data_double_escaped_dash_dash(<<?-, html::binary>>, s) do + script_data_double_escaped_dash_dash(html, %{ + s + | tokens: append_char_token(s, @hyphen_minus) + }) + end + + defp script_data_double_escaped_dash_dash(<<?<, html::binary>>, s) do + script_data_double_escaped_less_than_sign(html, %{ + s + | tokens: append_char_token(s, @less_than_sign) + }) + end + + defp script_data_double_escaped_dash_dash(<<?>, html::binary>>, s) do + script_data(html, %{ + s + | tokens: append_char_token(s, @greater_than_sign) + }) + end + + defp script_data_double_escaped_dash_dash( + <<0, html::binary>>, + s + ) do + script_data_double_escaped(html, %{ + s + | tokens: append_char_token(s, @replacement_char) + }) + end + + defp script_data_double_escaped_dash_dash("", s) do + eof(:script_data_double_escaped_dash_dash, s) + end + + defp script_data_double_escaped_dash_dash( + <<c::utf8, html::binary>>, + s + ) do + script_data_double_escaped(html, %{ + s + | tokens: append_char_token(s, c) + }) + end + + # ยง tokenizer-script-data-double-escaped-less-than-sign-state + + defp script_data_double_escaped_less_than_sign( + <<?/, html::binary>>, + s + ) do + script_data_double_escape_end(html, %{ + s + | buffer: "", + tokens: append_char_token(s, @solidus) + }) + end + + defp script_data_double_escaped_less_than_sign(html, s) do + script_data_double_escaped(html, s) + end + + # ยง tokenizer-script-data-double-escape-end-state + + defp script_data_double_escape_end( + <<c, html::binary>>, + s + ) + when c in [?/, ?> | @space_chars] do + if IO.chardata_to_string(s.buffer) == "script" do + script_data_escaped(html, %{s | tokens: append_char_token(s, c)}) + else + script_data_double_escaped(html, %{s | tokens: append_char_token(s, c)}) + end + end + + defp script_data_double_escape_end( + <<c, html::binary>>, + s + ) + when is_upper_letter(c) do + script_data_double_escape_end(html, %{ + s + | buffer: [s.buffer | [c + 32]], + tokens: append_char_token(s, c) + }) + end + + defp script_data_double_escape_end( + <<c, html::binary>>, + s + ) + when is_lower_letter(c) do + script_data_double_escape_end(html, %{ + s + | buffer: [s.buffer | [c]], + tokens: append_char_token(s, c) + }) + end + + defp script_data_double_escape_end(html, s) do + script_data_double_escaped(html, s) + end + + # ยง tokenizer-before-attribute-name-state + + defp before_attribute_name(<<c, html::binary>>, s) + when is_space(c) do + before_attribute_name(html, s) + end + + defp before_attribute_name(html = <<c, _rest::binary>>, s) + when c in [?/, ?>] do + after_attribute_name(html, s) + end + + defp before_attribute_name("", s) do + after_attribute_name("", s) + end + + defp before_attribute_name(<<?=, html::binary>>, s) do + new_token = %StartTag{ + s.token + | attributes: [ + %Attribute{name: "=", value: ""} | s.token.attributes + ] + } + + attribute_name(html, %{ + s + | errors: [{:parse_error, nil} | s.errors], + token: new_token + }) + end + + defp before_attribute_name(html, s) do + # NOTE: token here can be a StartTag or EndTag. Attributes on end tags will be ignored. + new_token = %{ + s.token + | attributes: [ + %Attribute{name: "", value: ""} | s.token.attributes + ] + } + + attribute_name(html, %{ + s + | token: new_token + }) + end + + # ยง tokenizer-attribute-name-state + + defp attribute_name(html = <<c, _rest::binary>>, s) + when c in [@solidus, @greater_than_sign | @space_chars] do + # FIXME: before changing the state, verify if same attr already exists. + after_attribute_name(html, s) + end + + defp attribute_name("", s) do + # FIXME: before changing the state, verify if same attr already exists. + after_attribute_name("", s) + end + + defp attribute_name(<<?=, html::binary>>, s) do + # FIXME: before changing the state, verify if same attr already exists. + before_attribute_value(html, s) + end + + defp attribute_name(<<c, html::binary>>, s) + when is_upper_letter(c) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | name: [attr.name | [c + 32]]} + new_token = %StartTag{s.token | attributes: [new_attr | attrs]} + + attribute_name(html, %{s | token: new_token}) + end + + defp attribute_name(<<0, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | name: [attr.name | [@replacement_char]]} + new_token = %StartTag{s.token | attributes: [new_attr | attrs]} + + attribute_name(html, %{s | token: new_token}) + end + + defp attribute_name(<<c, html::binary>>, s) + when c in [?", ?', ?<] do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | name: [attr.name | [c]]} + new_token = %StartTag{s.token | attributes: [new_attr | attrs]} + + attribute_name(html, %{ + s + | errors: [{:parse_error, nil} | s.errors], + token: new_token + }) + end + + defp attribute_name(<<c::utf8, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | name: [attr.name | [c]]} + + # NOTE: token here can be a StartTag or EndTag. Attributes on end tags will be ignored. + new_token = %{s.token | attributes: [new_attr | attrs]} + + attribute_name(html, %{s | token: new_token}) + end + + # ยง tokenizer-after-attribute-name-state + + defp after_attribute_name(<<c, html::binary>>, s) + when is_space(c) do + after_attribute_name(html, s) + end + + defp after_attribute_name(<<?/, html::binary>>, s) do + self_closing_start_tag(html, s) + end + + defp after_attribute_name(<<?=, html::binary>>, s) do + before_attribute_value(html, s) + end + + defp after_attribute_name(<<?>, html::binary>>, s) do + data(html, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil + }) + end + + defp after_attribute_name("", s) do + eof(:data, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_attribute_name(html, s) do + attribute = %Attribute{name: "", value: ""} + new_token = %StartTag{s.token | attributes: [attribute | s.token.attributes]} + + attribute_name(html, %{s | token: new_token}) + end + + # ยง tokenizer-before-attribute-value-state + + defp before_attribute_value(<<c, html::binary>>, s) + when is_space(c) do + before_attribute_value(html, s) + end + + defp before_attribute_value(<<?", html::binary>>, s) do + attribute_value_double_quoted(html, s) + end + + defp before_attribute_value(<<?', html::binary>>, s) do + attribute_value_single_quoted(html, s) + end + + defp before_attribute_value(html = <<?>, _rest::binary>>, s) do + attribute_value_unquoted(html, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_attribute_value(html, s) do + attribute_value_unquoted(html, s) + end + + # ยง tokenizer-attribute-value-double-quoted-state + + defp attribute_value_double_quoted(<<?", html::binary>>, s) do + after_attribute_value_quoted(html, s) + end + + defp attribute_value_double_quoted(<<?&, html::binary>>, s) do + character_reference(html, %{s | return_state: :attribute_value_double_quoted}) + end + + defp attribute_value_double_quoted(<<0, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | [@replacement_char]]} + + attribute_value_double_quoted(html, %{ + s + | errors: [{:parse_error, nil} | s.errors], + token: %StartTag{s.token | attributes: [new_attr | attrs]} + }) + end + + defp attribute_value_double_quoted("", s) do + eof(:attribute_value_double_quoted, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + defp attribute_value_double_quoted(<<c::utf8, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | [c]]} + + attribute_value_double_quoted(html, %{ + s + | token: %StartTag{s.token | attributes: [new_attr | attrs]} + }) + end + + # ยง tokenizer-attribute-value-single-quoted-state + + defp attribute_value_single_quoted(<<?', html::binary>>, s) do + after_attribute_value_quoted(html, s) + end + + defp attribute_value_single_quoted(<<?&, html::binary>>, s) do + character_reference(html, %{s | return_state: :attribute_value_single_quoted}) + end + + defp attribute_value_single_quoted(<<0, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | [@replacement_char]]} + + attribute_value_single_quoted(html, %{ + s + | errors: [{:parse_error, nil} | s.errors], + token: %StartTag{s.token | attributes: [new_attr | attrs]} + }) + end + + defp attribute_value_single_quoted("", s) do + eof(:attribute_value_single_quoted, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + defp attribute_value_single_quoted(<<c::utf8, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | [c]]} + + # NOTE: token here can be a StartTag or EndTag. Attributes on end tags will be ignored. + attribute_value_single_quoted(html, %{ + s + | token: %{s.token | attributes: [new_attr | attrs]} + }) + end + + # ยง tokenizer-attribute-value-unquoted-state + + defp attribute_value_unquoted(<<c, html::binary>>, s) when is_space(c) do + before_attribute_name(html, s) + end + + defp attribute_value_unquoted(<<?&, html::binary>>, s) do + character_reference(html, %{s | return_state: :attribute_value_unquoted}) + end + + defp attribute_value_unquoted(<<?>, html::binary>>, s) do + data(html, %{s | tokens: [s.emit.(s.token) | s.tokens], token: nil}) + end + + defp attribute_value_unquoted(<<0, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | [@replacement_char]]} + + attribute_value_unquoted(html, %{ + s + | errors: [{:parse_error, nil} | s.errors], + token: %{s.token | attributes: [new_attr | attrs]} + }) + end + + defp attribute_value_unquoted(<<c, html::binary>>, s) + when c in [?", ?', ?<, ?=, ?`] do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | [c]]} + + attribute_value_unquoted(html, %{ + s + | errors: [{:parse_error, nil} | s.errors], + token: %{s.token | attributes: [new_attr | attrs]} + }) + end + + defp attribute_value_unquoted("", s) do + eof(:attribute_value_unquoted, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + defp attribute_value_unquoted(<<c::utf8, html::binary>>, s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | [c]]} + + attribute_value_unquoted(html, %{ + s + | token: %{s.token | attributes: [new_attr | attrs]} + }) + end + + # ยง tokenizer-after-attribute-value-quoted-state + + defp after_attribute_value_quoted(<<c, html::binary>>, s) + when is_space(c) do + before_attribute_name(html, s) + end + + defp after_attribute_value_quoted(<<?/, html::binary>>, s) do + self_closing_start_tag(html, s) + end + + defp after_attribute_value_quoted(<<?>, html::binary>>, s) do + data(html, %{s | tokens: [s.emit.(s.token) | s.tokens], token: nil}) + end + + defp after_attribute_value_quoted("", s) do + eof(:after_attribute_value_quoted, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_attribute_value_quoted(html, s) do + before_attribute_name(html, s) + end + + # ยง tokenizer-self-closing-start-tag-state + + defp self_closing_start_tag(<<?>, html::binary>>, s) do + tag = %StartTag{s.token | self_close: true} + data(html, %{s | tokens: [tag | s.tokens], token: nil}) + end + + defp self_closing_start_tag("", s) do + eof(:self_closing_start_tag, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + defp self_closing_start_tag(html, s) do + before_attribute_name(html, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-bogus-comment-state + + defp bogus_comment(<<?>, html::binary>>, s) do + data(html, %{s | tokens: [s.emit.(s.token) | s.tokens], token: nil}) + end + + defp bogus_comment("", s) do + eof(:bogus_comment, %{s | tokens: [s.emit.(s.token) | s.tokens], token: nil}) + end + + defp bogus_comment(<<0, html::binary>>, s) do + comment = %Comment{s.token | data: [s.token.data | [@replacement_char]]} + + bogus_comment(html, %{s | token: comment}) + end + + defp bogus_comment(<<c::utf8, html::binary>>, s) do + comment = %Comment{s.token | data: [s.token.data | [c]]} + + bogus_comment(html, %{s | token: comment}) + end + + # ยง tokenizer-markup-declaration-open-state + + defp markup_declaration_open(<<"--", html::binary>>, s) do + token = %Comment{data: ""} + + comment_start( + html, + %{s | token: token} + ) + end + + defp markup_declaration_open( + <<d, o, c, t, y, p, e, html::binary>>, + s + ) + when d in [?D, ?d] and o in [?O, ?o] and c in [?C, ?c] and + t in [?T, ?t] and y in [?Y, ?y] and + p in [?P, ?p] and e in [?E, ?e] do + doctype(html, s) + end + + # TODO: fix the check for adjusted current node in HTML namespace + defp markup_declaration_open(<<"[CDATA[", html::binary>>, s = %State{adjusted_current_node: n}) + when not is_nil(n) do + cdata_section(html, s) + end + + defp markup_declaration_open(html, s) do + bogus_comment(html, %{ + s + | token: %Comment{}, + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-comment-start-state + + defp comment_start(<<?-, html::binary>>, s) do + comment_start_dash(html, s) + end + + defp comment_start(<<?>, html::binary>>, s) do + data(html, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp comment_start(html, s) do + comment(html, s) + end + + # ยง tokenizer-comment-start-dash-state + + defp comment_start_dash(<<?-, html::binary>>, s) do + comment_end(html, s) + end + + defp comment_start_dash(<<?>, html::binary>>, s) do + data(html, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp comment_start_dash("", s) do + eof(:comment_start_dash, %{ + s + | errors: [{:parse_error, nil} | s.errors], + tokens: [s.emit.(s.token) | s.tokens], + token: nil + }) + end + + defp comment_start_dash(html, s) do + new_comment = %Comment{s.token | data: [s.token.data | [@hyphen_minus]]} + + comment(html, %{s | token: new_comment}) + end + + # ยง tokenizer-comment-state + + defp comment(<<?<, html::binary>>, s) do + new_comment = %Comment{s.token | data: [s.token.data | [@less_than_sign]]} + + comment_less_than_sign(html, %{s | token: new_comment}) + end + + defp comment(<<?-, html::binary>>, s) do + comment_end_dash(html, s) + end + + defp comment(<<0, html::binary>>, s) do + new_comment = %Comment{s.token | data: [s.token.data | [@replacement_char]]} + + comment(html, %{ + s + | token: new_comment, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp comment("", s) do + eof(:comment, %{ + s + | errors: [{:parse_error, nil} | s.errors], + tokens: [s.emit.(s.token) | s.tokens], + token: nil + }) + end + + defp comment(<<c::utf8, html::binary>>, s) do + new_token = %Comment{s.token | data: [s.token.data | [c]]} + + comment( + html, + %{s | token: new_token} + ) + end + + # ยง tokenizer-comment-less-than-sign-state + + defp comment_less_than_sign(<<?!, html::binary>>, s) do + new_comment = %Comment{s.token | data: [s.token.data | [@exclamation_mark]]} + + comment_less_than_sign_bang(html, %{s | token: new_comment}) + end + + defp comment_less_than_sign(<<?<, html::binary>>, s) do + new_comment = %Comment{s.token | data: [s.token.data | [@less_than_sign]]} + + comment_less_than_sign(html, %{s | token: new_comment}) + end + + defp comment_less_than_sign(html, s) do + comment(html, s) + end + + # ยง tokenizer-comment-less-than-sign-bang-state + + defp comment_less_than_sign_bang(<<?-, html::binary>>, s) do + comment_less_than_sign_bang_dash(html, s) + end + + defp comment_less_than_sign_bang(html, s) do + comment(html, s) + end + + # ยง tokenizer-comment-less-than-sign-bang-dash-state + + defp comment_less_than_sign_bang_dash(<<?-, html::binary>>, s) do + comment_less_than_sign_bang_dash_dash(html, s) + end + + defp comment_less_than_sign_bang_dash(html, s) do + comment_end_dash(html, s) + end + + # ยง tokenizer-comment-less-than-sign-bang-dash-dash-state + + defp comment_less_than_sign_bang_dash_dash(html = <<?>, _rest::binary>>, s) do + comment_end(html, s) + end + + defp comment_less_than_sign_bang_dash_dash(html = "", s) do + comment_end(html, s) + end + + defp comment_less_than_sign_bang_dash_dash(html, s) do + comment_end(html, %{s | errors: [{:parse_error, nil} | s.errors]}) + end + + # ยง tokenizer-comment-end-dash-state + + defp comment_end_dash(<<?-, html::binary>>, s) do + comment_end(html, s) + end + + defp comment_end_dash("", s) do + eof(:comment_end_dash, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp comment_end_dash(html, s) do + new_comment = %Comment{s.token | data: [s.token.data | [@hyphen_minus]]} + + comment(html, %{s | token: new_comment}) + end + + # ยง tokenizer-comment-end-state + + defp comment_end(<<?>, html::binary>>, s) do + data( + html, + %{s | tokens: [s.emit.(s.token) | s.tokens], token: nil} + ) + end + + defp comment_end(<<?!, html::binary>>, s) do + comment_end_bang(html, s) + end + + defp comment_end(<<?-, html::binary>>, s) do + new_comment = %Comment{s.token | data: [s.token.data | [@hyphen_minus]]} + + comment_end(html, %{s | token: new_comment}) + end + + defp comment_end("", s) do + eof(:comment_end, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp comment_end(html, s) do + new_comment = %Comment{s.token | data: [s.token.data | ["--"]]} + + comment(html, %{s | token: new_comment}) + end + + # ยง tokenizer-comment-end-bang-state + + defp comment_end_bang(<<?-, html::binary>>, s) do + new_comment = %Comment{s.token | data: [s.token.data | ["--!"]]} + + comment_end_dash(html, %{s | token: new_comment}) + end + + defp comment_end_bang(<<?>, html::binary>>, s) do + data(html, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp comment_end_bang("", s) do + eof(:comment_end_bang, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp comment_end_bang(html, s) do + new_comment = %Comment{s.token | data: [s.token.data | ["--!"]]} + + comment(html, %{s | token: new_comment}) + end + + # ยง tokenizer-doctype-state + + defp doctype(<<c, html::binary>>, s) + when is_space(c) do + before_doctype_name(html, s) + end + + defp doctype("", s) do + doctype_token = %Doctype{force_quirks: :on} + eof(:doctype, %{s | tokens: [doctype_token | s.tokens], token: nil}) + end + + defp doctype(html, s) do + before_doctype_name(html, %{ + s + | errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-before-doctype-name-state + + defp before_doctype_name(<<c, html::binary>>, s) + when is_space(c) do + before_doctype_name(html, s) + end + + defp before_doctype_name(<<c, html::binary>>, s) + when is_upper_letter(c) do + token = %Doctype{name: [c + 32]} + + doctype_name(html, %{s | token: token}) + end + + defp before_doctype_name(<<0, html::binary>>, s) do + token = %Doctype{ + name: [@replacement_char], + force_quirks: :on + } + + doctype_name(html, %{s | token: token}) + end + + defp before_doctype_name(<<?>, html::binary>>, s) do + token = %Doctype{ + force_quirks: :on + } + + data(html, %{ + s + | tokens: [token | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_name("", s) do + token = %Doctype{ + force_quirks: :on + } + + eof(:before_doctype_name, %{ + s + | tokens: [token | s.tokens], + token: nil, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_name(<<c::utf8, html::binary>>, s) do + token = %Doctype{ + name: [c] + } + + doctype_name(html, %{s | token: token}) + end + + # ยง tokenizer-doctype-name-state + + defp doctype_name(<<c, html::binary>>, s) + when is_space(c) do + after_doctype_name(html, s) + end + + defp doctype_name(<<?>, html::binary>>, s) do + data(html, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil + }) + end + + defp doctype_name(<<c, html::binary>>, s) when is_upper_letter(c) do + new_token = %Doctype{ + s.token + | name: [s.token.name | [c + 32]] + } + + doctype_name(html, %{s | token: new_token}) + end + + defp doctype_name(<<0, html::binary>>, s) do + new_token = %Doctype{s.token | name: [s.token.name | [@replacement_char]]} + + doctype_name(html, %{ + s + | token: new_token, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_name("", s) do + new_token = %Doctype{s.token | force_quirks: :on} + + eof(:doctype_name, %{ + s + | tokens: [new_token | s.tokens], + token: nil, + errors: [{:parse_error, "eof-in-doctype"} | s.errors] + }) + end + + defp doctype_name(<<c::utf8, html::binary>>, s) do + new_token = %Doctype{s.token | name: [s.token.name | [c]]} + + doctype_name(html, %{s | token: new_token}) + end + + # ยง tokenizer-after-doctype-name-state + + defp after_doctype_name(<<c, html::binary>>, s) + when is_space(c) do + after_doctype_name(html, s) + end + + defp after_doctype_name(<<?>, html::binary>>, s) do + data(html, %{ + s + | tokens: [s.emit.(s.token) | s.tokens], + token: nil + }) + end + + defp after_doctype_name("", s) do + token = %Doctype{s.token | force_quirks: :on} + + eof(:after_doctype_name, %{ + s + | tokens: [token | s.tokens], + token: nil, + errors: [{:parse_error, "eof-in-doctype"} | s.errors] + }) + end + + defp after_doctype_name( + <<p, u, b, l, i, c, html::binary>>, + s + ) + when p in [?P, ?p] and u in [?U, ?u] and b in [?B, ?b] and + l in [?L, ?l] and i in [?I, ?i] and + c in [?C, ?c] do + after_doctype_public_keyword(html, s) + end + + defp after_doctype_name( + <<s1, y, s2, t, e, m, html::binary>>, + state + ) + when s1 in [?S, ?s] and y in [?Y, ?y] and + s2 in [?S, ?s] and t in [?T, ?t] and + e in [?E, ?e] and m in [?M, ?m] do + after_doctype_system_keyword(html, state) + end + + defp after_doctype_name(html, s) do + token = %Doctype{s.token | force_quirks: :on} + + bogus_doctype(html, %{ + s + | token: token, + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-after-doctype-public-keyword-state + + defp after_doctype_public_keyword(<<c, html::binary>>, s) + when is_space(c) do + before_doctype_public_identifier(html, s) + end + + defp after_doctype_public_keyword(<<?", html::binary>>, s) do + doctype = %Doctype{s.token | public_id: ""} + + doctype_public_identifier_double_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_public_keyword(<<?', html::binary>>, s) do + doctype = %Doctype{s.token | public_id: ""} + + doctype_public_identifier_single_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_public_keyword(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_public_keyword("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:after_doctype_public_keyword, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_public_keyword(<<_c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + bogus_doctype(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-before-doctype-public-identifier-state + + defp before_doctype_public_identifier(<<c, html::binary>>, s) + when is_space(c) do + before_doctype_public_identifier(html, s) + end + + defp before_doctype_public_identifier(<<?", html::binary>>, s) do + doctype = %Doctype{s.token | public_id: ""} + + doctype_public_identifier_double_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_public_identifier(<<?', html::binary>>, s) do + doctype = %Doctype{s.token | public_id: ""} + + doctype_public_identifier_single_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_public_identifier(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_public_identifier("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:before_doctype_public_identifier, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_public_identifier(<<_::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + bogus_doctype(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-doctype-public-identifier-double-quoted-state + + defp doctype_public_identifier_double_quoted(<<?", html::binary>>, s) do + after_doctype_public_identifier(html, s) + end + + defp doctype_public_identifier_double_quoted(<<0, html::binary>>, s) do + doctype = %Doctype{s.token | public_id: [s.token.public_id | [@replacement_char]]} + + doctype_public_identifier_double_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_public_identifier_double_quoted(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_public_identifier_double_quoted("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:doctype_public_identifier_double_quoted, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_public_identifier_double_quoted(<<c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | public_id: [s.token.public_id | [c]]} + + doctype_public_identifier_double_quoted(html, %{s | token: doctype}) + end + + # ยง tokenizer-doctype-public-identifier-single-quoted-state + + defp doctype_public_identifier_single_quoted(<<?', html::binary>>, s) do + after_doctype_public_identifier(html, s) + end + + defp doctype_public_identifier_single_quoted(<<0, html::binary>>, s) do + doctype = %Doctype{s.token | public_id: [s.token.public_id | [@replacement_char]]} + + doctype_public_identifier_single_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_public_identifier_single_quoted(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_public_identifier_single_quoted("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:doctype_public_identifier_single_quoted, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_public_identifier_single_quoted(<<c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | public_id: [s.token.public_id | [c]]} + + doctype_public_identifier_single_quoted(html, %{s | token: doctype}) + end + + # ยง tokenizer-after-doctype-public-identifier-state + + defp after_doctype_public_identifier(<<c, html::binary>>, s) when is_space(c) do + between_doctype_public_and_system_identifiers(html, s) + end + + defp after_doctype_public_identifier(<<?>, html::binary>>, s) do + data(html, %{s | token: nil, tokens: [s.emit.(s.token) | s.tokens]}) + end + + defp after_doctype_public_identifier(<<?", html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_double_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_public_identifier(<<?', html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_single_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_public_identifier("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:after_doctype_public_identifier, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_public_identifier(<<_c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + bogus_doctype(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-between-doctype-public-and-system-identifiers-state + + defp between_doctype_public_and_system_identifiers(<<c, html::binary>>, s) when is_space(c) do + between_doctype_public_and_system_identifiers(html, s) + end + + defp between_doctype_public_and_system_identifiers(<<?>, html::binary>>, s) do + data(html, %{s | token: nil, tokens: [s.emit.(s.token) | s.tokens]}) + end + + defp between_doctype_public_and_system_identifiers(<<?", html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_double_quoted(html, %{s | token: doctype}) + end + + defp between_doctype_public_and_system_identifiers(<<?', html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_single_quoted(html, %{s | token: doctype}) + end + + defp between_doctype_public_and_system_identifiers("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:between_doctype_public_and_system_identifiers, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp between_doctype_public_and_system_identifiers(<<_c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + bogus_doctype(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-after-doctype-system-keyword-state + + defp after_doctype_system_keyword(<<c, html::binary>>, s) when is_space(c) do + before_doctype_system_identifier(html, s) + end + + defp after_doctype_system_keyword(<<?", html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_double_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_system_keyword(<<?', html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_single_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_system_keyword(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_system_keyword("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:after_doctype_system_keyword, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_system_keyword(<<_c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + bogus_doctype(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-before-doctype-system-identifier-state + + defp before_doctype_system_identifier(<<c, html::binary>>, s) when is_space(c) do + before_doctype_system_identifier(html, s) + end + + defp before_doctype_system_identifier(<<?", html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_double_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_system_identifier(<<?', html::binary>>, s) do + doctype = %Doctype{s.token | system_id: ""} + + doctype_system_identifier_single_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_system_identifier(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_system_identifier("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:before_doctype_system_identifier, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp before_doctype_system_identifier(<<_::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + bogus_doctype(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-doctype-system-identifier-double-quoted-state + + defp doctype_system_identifier_double_quoted(<<?", html::binary>>, s) do + after_doctype_system_identifier(html, s) + end + + defp doctype_system_identifier_double_quoted(<<0, html::binary>>, s) do + doctype = %Doctype{s.token | system_id: [s.token.system_id | [@replacement_char]]} + + doctype_system_identifier_double_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_system_identifier_double_quoted(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_system_identifier_double_quoted("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:doctype_system_identifier_double_quoted, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_system_identifier_double_quoted(<<c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | system_id: [s.token.system_id | [c]]} + + doctype_system_identifier_double_quoted(html, %{s | token: doctype}) + end + + # ยง tokenizer-doctype-system-identifier-single-quoted-state + + defp doctype_system_identifier_single_quoted(<<?', html::binary>>, s) do + after_doctype_system_identifier(html, s) + end + + defp doctype_system_identifier_single_quoted(<<0, html::binary>>, s) do + doctype = %Doctype{s.token | system_id: [s.token.system_id | [@replacement_char]]} + + doctype_system_identifier_single_quoted(html, %{ + s + | token: doctype, + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_system_identifier_single_quoted(<<?>, html::binary>>, s) do + doctype = %Doctype{s.token | force_quirks: :on} + + data(html, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_system_identifier_single_quoted("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:doctype_system_identifier_single_quoted, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp doctype_system_identifier_single_quoted(<<c::utf8, html::binary>>, s) do + doctype = %Doctype{s.token | system_id: [s.token.system_id | [c]]} + + doctype_system_identifier_single_quoted(html, %{s | token: doctype}) + end + + # ยง tokenizer-after-doctype-system-identifier-state + + defp after_doctype_system_identifier(<<c, html::binary>>, s) when is_space(c) do + after_doctype_system_identifier(html, s) + end + + defp after_doctype_system_identifier(<<?>, html::binary>>, s) do + data(html, %{s | token: nil, tokens: [s.emit.(s.token) | s.tokens]}) + end + + defp after_doctype_system_identifier("", s) do + doctype = %Doctype{s.token | force_quirks: :on} + + eof(:after_doctype_system_identifier, %{ + s + | token: nil, + tokens: [doctype | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + defp after_doctype_system_identifier(<<_c::utf8, html::binary>>, s) do + bogus_doctype(html, %{ + s + | token: nil, + tokens: [s.emit.(s.token) | s.tokens], + errors: [{:parse_error, nil} | s.errors] + }) + end + + # ยง tokenizer-bogus-doctype-state + + defp bogus_doctype(<<?>, html::binary>>, s) do + data(html, %{s | token: nil, tokens: [s.emit.(s.token) | s.tokens]}) + end + + defp bogus_doctype(<<0, html::binary>>, s) do + # TODO: set error + bogus_doctype(html, s) + end + + defp bogus_doctype("", s) do + eof(:bogus_doctype, %{s | token: nil, tokens: [s.emit.(s.token) | s.tokens]}) + end + + defp bogus_doctype(<<_c::utf8, html::binary>>, s) do + bogus_doctype(html, s) + end + + # ยง tokenizer-cdata-section-state + + defp cdata_section(<<?], html::binary>>, s) do + cdata_section_bracket(html, s) + end + + defp cdata_section("", s) do + eof(:cdata_section, %{s | errors: [{:parse_error, nil} | s.errors]}) + end + + defp cdata_section(<<c::utf8, html::binary>>, s) do + cdata_section(html, %{s | tokens: append_char_token(s, c)}) + end + + # ยง tokenizer-cdata-section-bracket-state + + defp cdata_section_bracket(<<?], html::binary>>, s) do + cdata_section_end(html, s) + end + + defp cdata_section_bracket(html, s) do + cdata_section(html, %{s | tokens: append_char_token(s, ?])}) + end + + # ยง tokenizer-cdata-section-end-state + + defp cdata_section_end(<<?], html::binary>>, s) do + cdata_section_end(html, %{s | tokens: append_char_token(s, ?])}) + end + + defp cdata_section_end(<<?>, html::binary>>, s) do + data(html, s) + end + + defp cdata_section_end(html, s) do + cdata_section(html, %{s | tokens: append_char_token(s, [?], ?]])}) + end + + # ยง tokenizer-character-reference-state + + defp character_reference(<<c, _rest::binary>> = html, s) + when c in [?<, ?& | @space_chars] do + character_reference_end(html, %{s | buffer: "&"}) + end + + defp character_reference(<<?#, html::binary>>, s) do + numeric_character_reference(html, %{s | buffer: ["&" | [?#]]}) + end + + defp character_reference(html, s) do + seek_charref(html, %{s | buffer: "&", charref_state: %CharrefState{done: false}}) + end + + defp seek_charref( + <<c, html::binary>>, + s = %State{charref_state: %CharrefState{done: false}} + ) + when c == ?; or is_letter(c) or + is_digit(c) do + buffer = IO.chardata_to_string([s.buffer | [c]]) + candidate = Floki.Entities.get(buffer) + + charref_state = + if candidate != [] do + %CharrefState{s.charref_state | candidate: buffer} + else + s.charref_state + end + + len = charref_state.length + 1 + done_by_length? = len > 60 + done_by_semicolon? = c == ?; + + seek_charref(html, %{ + s + | buffer: buffer, + charref_state: %{ + charref_state + | length: len, + done: done_by_semicolon? || done_by_length? + } + }) + end + + defp seek_charref(html, s) do + charref_state = %CharrefState{s.charref_state | done: true} + + seek_charref_end(html, %{s | charref_state: charref_state}) + end + + defp seek_charref_end(html, s = %State{return_state: return_state}) + when return_state in [ + :attribute_value_double_quoted, + :attribute_value_single_quoted, + :attribute_value_unquoted + ] do + last_char = + s.buffer + |> IO.chardata_to_string() + |> String.codepoints() + |> List.last() + + with true <- last_char != ";", + <<c, _html::binary>> + when c == ?= or is_letter(c) or + is_digit(c) <- html do + character_reference_end(html, s) + else + _ -> + buffer = + if s.buffer == s.charref_state.candidate do + character_buffer(s) + else + s.buffer + end + + character_reference_end(html, %{s | buffer: buffer}) + end + end + + defp seek_charref_end(html, s) do + candidate = s.charref_state.candidate + + ends_with_semicolon? = String.ends_with?(s.buffer, ";") + + parse_error_on_unmatch? = + String.starts_with?(s.buffer, "&") && ends_with_semicolon? && candidate == nil + + parse_error_on_non_semicolon_ending? = !ends_with_semicolon? + + state = + cond do + parse_error_on_unmatch? -> + %{s | errors: [{:parse_error, nil} | s.errors]} + + parse_error_on_non_semicolon_ending? -> + %{ + s + | errors: [ + { + :parse_error, + "missing-semicolon-after-character-reference" + } + | s.errors + ] + } + + true -> + s + end + + buffer = character_buffer(s) + html = charref_html_after_buffer(html, s) + + character_reference_end(html, %{state | buffer: buffer}) + end + + defp character_buffer(%State{charref_state: %CharrefState{candidate: candidate}, buffer: buffer}) do + if candidate do + Floki.Entities.get(candidate) + else + buffer + end + end + + ## Helper functions that modifies the HTML string. + # OPTIMIZE: avoid concatenation of string. + defp charref_html_after_buffer(html, %State{ + charref_state: %CharrefState{candidate: candidate}, + buffer: buffer + }) + when is_binary(buffer) and is_binary(candidate) do + String.replace_prefix(buffer, candidate, "") <> html + end + + defp charref_html_after_buffer( + html, + s = %State{ + charref_state: %CharrefState{candidate: candidate} + } + ) + when is_binary(candidate) do + String.replace_prefix(IO.chardata_to_string(s.buffer), candidate, "") <> html + end + + defp charref_html_after_buffer(html, _), do: html + + # ยง tokenizer-numeric-character-reference-state + + defp numeric_character_reference(html, s) do + do_numeric_character_reference(html, %{s | charref_code: 0}) + end + + defp do_numeric_character_reference(<<c, html::binary>>, s) + when c in [?x, ?X] do + hexadecimal_character_reference_start(html, %{s | buffer: [s.buffer | [c]]}) + end + + defp do_numeric_character_reference(html, s) do + decimal_character_reference_start(html, s) + end + + # ยง tokenizer-hexadecimal-character-reference-start-state + + defp hexadecimal_character_reference_start(html = <<c, _rest::binary>>, s) + when is_letter(c) or is_digit(c) do + hexadecimal_character_reference(html, s) + end + + defp hexadecimal_character_reference_start(html, s) do + # set parse error + + character_reference_end(html, s) + end + + # ยง tokenizer-decimal-character-reference-start-state + + defp decimal_character_reference_start(html = <<c, _rest::binary>>, s) when is_digit(c) do + decimal_character_reference(html, s) + end + + defp decimal_character_reference_start(html, s) do + # set parse error + character_reference_end(html, s) + end + + # ยง tokenizer-hexadecimal-character-reference-state + + defp hexadecimal_character_reference(<<c, html::binary>>, s) when is_digit(c) do + hexadecimal_character_reference(html, %{s | charref_code: s.charref_code * 16 + c - 0x30}) + end + + defp hexadecimal_character_reference(<<c, html::binary>>, s) when c in ?A..?F do + hexadecimal_character_reference(html, %{s | charref_code: s.charref_code * 16 + c - 0x37}) + end + + defp hexadecimal_character_reference(<<c, html::binary>>, s) when c in ?a..?f do + hexadecimal_character_reference(html, %{s | charref_code: s.charref_code * 16 + c - 0x57}) + end + + defp hexadecimal_character_reference(<<?;, html::binary>>, s) do + numeric_character_reference_end(html, s) + end + + defp hexadecimal_character_reference(html, s) do + # set parse error + numeric_character_reference_end(html, s) + end + + # ยง tokenizer-decimal-character-reference-state + + defp decimal_character_reference(<<c, html::binary>>, s) when is_digit(c) do + decimal_character_reference(html, %{s | charref_code: s.charref_code * 10 + c - 0x30}) + end + + defp decimal_character_reference(<<?;, html::binary>>, s) do + numeric_character_reference_end(html, s) + end + + defp decimal_character_reference(html, s) do + # set parse error + + numeric_character_reference_end(html, s) + end + + # ยง tokenizer-decimal-character-reference-state + + defp numeric_character_reference_end(html, s) do + # set parse errors + {:ok, {_, numeric_char}} = Floki.HTML.NumericCharref.to_unicode_number(s.charref_code) + + character_reference_end(html, %{s | buffer: [numeric_char]}) + end + + # ยง tokenizer-character-reference-end-state + + @spec character_reference_end(binary(), State.t()) :: State.t() + defp character_reference_end(html, s) do + state = + if part_of_attr?(s) do + [attr | attrs] = s.token.attributes + new_attr = %Attribute{attr | value: [attr.value | s.buffer]} + new_tag = %StartTag{s.token | attributes: [new_attr | attrs]} + + %{s | token: new_tag} + else + %{s | tokens: append_char_token(s, s.buffer)} + end + + case state.return_state do + :data -> + data(html, state) + + :rcdata -> + rcdata(html, state) + + :attribute_value_unquoted -> + attribute_value_unquoted(html, state) + + :attribute_value_single_quoted -> + attribute_value_single_quoted(html, state) + + :attribute_value_double_quoted -> + attribute_value_double_quoted(html, state) + end + end + + defp part_of_attr?(state) do + state.return_state in [ + :attribute_value_double_quoted, + :attribute_value_single_quoted, + :attribute_value_unquoted + ] + end + + defp append_char_token(state, char) do + case state.tokens do + [{:char, data} | rest] -> + if is_binary(char) do + [state.emit.({:char, [data | char]}) | rest] + else + [state.emit.({:char, [data | [char]]}) | rest] + end + + other_tokens -> + if is_list(char) || is_binary(char) do + [state.emit.({:char, char}) | other_tokens] + else + [state.emit.({:char, [char]}) | other_tokens] + end + end + end + + defp appropriate_tag?(state) do + with %StartTag{name: start_tag_name} <- state.last_start_tag, + %EndTag{name: end_tag_name} <- state.token do + IO.chardata_to_string(start_tag_name) == IO.chardata_to_string(end_tag_name) + else + _ -> false + end + end + + defp tokens_for_inappropriate_end_tag(state) do + [ + state.emit.({:char, state.buffer}), + state.emit.({:char, [@solidus]}), + state.emit.({:char, [@less_than_sign]}) | state.tokens + ] + end +end diff --git a/deps/floki/lib/floki/html_parser.ex b/deps/floki/lib/floki/html_parser.ex new file mode 100644 index 0000000..55f2e1b --- /dev/null +++ b/deps/floki/lib/floki/html_parser.ex @@ -0,0 +1,38 @@ +defmodule Floki.HTMLParser do + @moduledoc """ + A entry point to dynamic dispatch functions to + the configured HTML parser. + + The configuration can be done with the `:html_parser` + option when calling the functions, or for the `:floki` application: + + Floki.parse_document(document, html_parser: Floki.HTMLParser.FastHtml) + + Or: + + use Mix.Config + config :floki, :html_parser, Floki.HTMLParser.Mochiweb + + The default parser is Mochiweb, which comes with Floki. + You can also choose between Html5ever or FastHtml. + + This module is also a behaviour that those parsers must implement. + """ + + @default_parser Floki.HTMLParser.Mochiweb + + @callback parse_document(binary()) :: {:ok, Floki.html_tree()} | {:error, String.t()} + @callback parse_fragment(binary()) :: {:ok, Floki.html_tree()} | {:error, String.t()} + + def parse_document(html, opts \\ []) do + parser(opts).parse_document(html) + end + + def parse_fragment(html, opts \\ []) do + parser(opts).parse_fragment(html) + end + + defp parser(opts) do + opts[:html_parser] || Application.get_env(:floki, :html_parser, @default_parser) + end +end diff --git a/deps/floki/lib/floki/html_parser/fast_html.ex b/deps/floki/lib/floki/html_parser/fast_html.ex new file mode 100644 index 0000000..dd0431d --- /dev/null +++ b/deps/floki/lib/floki/html_parser/fast_html.ex @@ -0,0 +1,30 @@ +defmodule Floki.HTMLParser.FastHtml do + @behaviour Floki.HTMLParser + @moduledoc false + + @impl true + def parse_document(html) do + execute_with_module(fn module -> module.decode(html) end) + end + + @impl true + def parse_fragment(html) do + execute_with_module(fn module -> module.decode_fragment(html) end) + end + + defp execute_with_module(fun) do + case Code.ensure_loaded(:fast_html) do + {:module, module} -> + case fun.(module) do + {:ok, result} -> + {:ok, result} + + {:error, _message} = error -> + error + end + + {:error, _reason} -> + raise "Expected module :fast_html to be available." + end + end +end diff --git a/deps/floki/lib/floki/html_parser/html5ever.ex b/deps/floki/lib/floki/html_parser/html5ever.ex new file mode 100644 index 0000000..70a2c26 --- /dev/null +++ b/deps/floki/lib/floki/html_parser/html5ever.ex @@ -0,0 +1,26 @@ +defmodule Floki.HTMLParser.Html5ever do + @behaviour Floki.HTMLParser + + @moduledoc false + + @impl true + def parse_document(html) do + case Code.ensure_loaded(Html5ever) do + {:module, module} -> + case module.parse(html) do + {:ok, result} -> + {:ok, result} + + {:error, _message} = error -> + error + end + + {:error, _reason} -> + raise "Expected module Html5ever to be available." + end + end + + # NOTE: html5ever does not implement parse_fragment yet. + @impl true + def parse_fragment(html), do: parse_document(html) +end diff --git a/deps/floki/lib/floki/html_parser/mochiweb.ex b/deps/floki/lib/floki/html_parser/mochiweb.ex new file mode 100644 index 0000000..6bdf0e7 --- /dev/null +++ b/deps/floki/lib/floki/html_parser/mochiweb.ex @@ -0,0 +1,18 @@ +defmodule Floki.HTMLParser.Mochiweb do + @behaviour Floki.HTMLParser + + @moduledoc false + @root_node "floki" + + @impl true + def parse_document(html) do + html = "<#{@root_node}>#{html}</#{@root_node}>" + {@root_node, [], parsed} = :floki_mochi_html.parse(html) + + {:ok, parsed} + end + + # NOTE: mochi_html cannot make a distinction of a fragment and document. + @impl true + def parse_fragment(html), do: parse_document(html) +end diff --git a/deps/floki/lib/floki/html_tree.ex b/deps/floki/lib/floki/html_tree.ex new file mode 100644 index 0000000..774a70f --- /dev/null +++ b/deps/floki/lib/floki/html_tree.ex @@ -0,0 +1,292 @@ +defmodule Floki.HTMLTree do + @moduledoc false + + # Builds a `Map` representing a HTML tree based on tuples or list of tuples. + # + # It is useful because keeps references for each node, and the possibility to + # update the tree. + + alias Floki.HTMLTree + alias Floki.HTMLTree.{HTMLNode, Text, Comment, IDSeeder} + + defstruct nodes: %{}, root_nodes_ids: [], node_ids: [] + + @type t :: %__MODULE__{ + nodes: %{optional(pos_integer()) => HTMLNode.t() | Text.t() | Comment.t()}, + root_nodes_ids: [pos_integer()], + node_ids: [pos_integer()] + } + + def build({:comment, comment}) do + %HTMLTree{ + root_nodes_ids: [1], + node_ids: [1], + nodes: %{ + 1 => %Comment{content: comment, node_id: 1} + } + } + end + + def build({tag, attrs, children}) do + root_id = IDSeeder.seed([]) + root_node = %HTMLNode{type: tag, attributes: attrs, node_id: root_id} + + build_tree( + %HTMLTree{root_nodes_ids: [root_id], node_ids: [root_id], nodes: %{root_id => root_node}}, + children, + root_id, + [] + ) + end + + def build(html_tuples) when is_list(html_tuples) do + reducer = fn + {:pi, _, _}, tree -> + tree + + {tag, attrs, children}, tree -> + root_id = IDSeeder.seed(tree.node_ids) + + root_node = %HTMLNode{type: tag, attributes: attrs, node_id: root_id} + + build_tree( + %{ + tree + | nodes: Map.put(tree.nodes, root_id, root_node), + node_ids: [root_id | tree.node_ids], + root_nodes_ids: [root_id | tree.root_nodes_ids] + }, + children, + root_id, + [] + ) + + text, tree when is_binary(text) -> + root_id = IDSeeder.seed(tree.node_ids) + + root_node = %Text{content: text, node_id: root_id} + + build_tree( + %{ + tree + | nodes: Map.put(tree.nodes, root_id, root_node), + node_ids: [root_id | tree.node_ids], + root_nodes_ids: [root_id | tree.root_nodes_ids] + }, + [], + root_id, + [] + ) + + {:comment, comment}, tree -> + root_id = IDSeeder.seed(tree.node_ids) + + root_node = %Comment{content: comment, node_id: root_id} + + build_tree( + %{ + tree + | nodes: Map.put(tree.nodes, root_id, root_node), + node_ids: [root_id | tree.node_ids], + root_nodes_ids: [root_id | tree.root_nodes_ids] + }, + [], + root_id, + [] + ) + + _, tree -> + tree + end + + Enum.reduce(html_tuples, %HTMLTree{}, reducer) + end + + def build(_), do: %HTMLTree{} + + def delete_node(tree, html_node) do + do_delete(tree, [html_node], []) + end + + def to_tuple_list(html_tree) do + html_tree.root_nodes_ids + |> Enum.reverse() + |> Enum.map(fn node_id -> + root = Map.get(html_tree.nodes, node_id) + + HTMLTree.to_tuple(html_tree, root) + end) + end + + def to_tuple(_tree, %Text{content: text}), do: text + def to_tuple(_tree, %Comment{content: comment}), do: {:comment, comment} + + def to_tuple(tree, html_node) do + children = + html_node.children_nodes_ids + |> Enum.reverse() + |> Enum.map(fn id -> to_tuple(tree, Map.get(tree.nodes, id)) end) + + {html_node.type, html_node.attributes, children} + end + + defp do_delete(tree, [], []), do: tree + + defp do_delete(tree, [html_node | t], stack_ids) do + new_tree_nodes = delete_node_from_nodes(tree.nodes, html_node) + + ids_for_stack = get_ids_for_delete_stack(html_node) + + do_delete( + %{ + tree + | nodes: new_tree_nodes, + node_ids: List.delete(tree.node_ids, html_node.node_id), + root_nodes_ids: List.delete(tree.root_nodes_ids, html_node.node_id) + }, + t, + ids_for_stack ++ stack_ids + ) + end + + defp do_delete(tree, [], stack_ids) do + html_nodes = + tree.nodes + |> Map.take(stack_ids) + |> Map.values() + + do_delete(tree, html_nodes, []) + end + + defp delete_node_from_nodes(nodes, html_node) do + tree_nodes = Map.delete(nodes, html_node.node_id) + parent_node = Map.get(nodes, html_node.parent_node_id) + + if parent_node do + children_ids = List.delete(parent_node.children_nodes_ids, html_node.node_id) + new_parent = %{parent_node | children_nodes_ids: children_ids} + %{tree_nodes | new_parent.node_id => new_parent} + else + tree_nodes + end + end + + defp get_ids_for_delete_stack(%HTMLNode{children_nodes_ids: ids}), do: ids + defp get_ids_for_delete_stack(_), do: [] + + defp build_tree(tree, [], _, []), do: tree + + defp build_tree(tree, [{:pi, _, _} | children], parent_id, stack), + do: build_tree(tree, children, parent_id, stack) + + defp build_tree(tree, [{tag, attrs, child_children} | children], parent_id, stack) do + new_id = IDSeeder.seed(tree.node_ids) + + new_node = %HTMLNode{type: tag, attributes: attrs, node_id: new_id, parent_node_id: parent_id} + + nodes = put_new_node(tree.nodes, new_node) + + build_tree( + %{tree | nodes: nodes, node_ids: [new_id | tree.node_ids]}, + child_children, + new_id, + [{parent_id, children} | stack] + ) + end + + defp build_tree(tree, [{:comment, comment} | children], parent_id, stack) do + new_id = IDSeeder.seed(tree.node_ids) + new_node = %Comment{content: comment, node_id: new_id, parent_node_id: parent_id} + + nodes = put_new_node(tree.nodes, new_node) + + build_tree( + %{tree | nodes: nodes, node_ids: [new_id | tree.node_ids]}, + children, + parent_id, + stack + ) + end + + defp build_tree(tree, [text | children], parent_id, stack) when is_binary(text) do + new_id = IDSeeder.seed(tree.node_ids) + new_node = %Text{content: text, node_id: new_id, parent_node_id: parent_id} + + nodes = put_new_node(tree.nodes, new_node) + + build_tree( + %{tree | nodes: nodes, node_ids: [new_id | tree.node_ids]}, + children, + parent_id, + stack + ) + end + + defp build_tree(tree, [_other | children], parent_id, stack) do + build_tree(tree, children, parent_id, stack) + end + + defp build_tree(tree, [], _, [{parent_node_id, children} | stack]) do + build_tree(tree, children, parent_node_id, stack) + end + + defp put_new_node(nodes, new_node) do + parent_node = Map.get(nodes, new_node.parent_node_id) + children_ids = parent_node.children_nodes_ids + updated_parent = %{parent_node | children_nodes_ids: [new_node.node_id | children_ids]} + + nodes + |> Map.put(new_node.node_id, new_node) + |> Map.put(new_node.parent_node_id, updated_parent) + end + + def patch_nodes(html_tree, operation_with_nodes) do + Enum.reduce(operation_with_nodes, html_tree, fn node_with_op, tree -> + case node_with_op do + {:update, node} -> + put_in(tree.nodes[node.node_id], node) + + {:delete, node} -> + delete_node(tree, node) + + {:no_op, _node} -> + tree + end + end) + end + + # Enables using functions from `Enum` and `Stream` modules + defimpl Enumerable do + def count(html_tree) do + {:ok, length(html_tree.node_ids)} + end + + def member?(html_tree, html_node = %{node_id: node_id}) do + a_node = Map.get(html_tree.nodes, node_id) + + {:ok, a_node === html_node} + end + + def member?(_, _) do + {:ok, false} + end + + def slice(_) do + {:error, __MODULE__} + end + + def reduce(html_tree, state, fun) do + do_reduce(%{html_tree | node_ids: Enum.reverse(html_tree.node_ids)}, state, fun) + end + + defp do_reduce(_, {:halt, acc}, _fun), do: {:halted, acc} + defp do_reduce(tree, {:suspend, acc}, fun), do: {:suspended, acc, &do_reduce(tree, &1, fun)} + defp do_reduce(%HTMLTree{node_ids: []}, {:cont, acc}, _fun), do: {:done, acc} + + defp do_reduce(html_tree = %HTMLTree{node_ids: [h | t]}, {:cont, acc}, fun) do + tree = %{html_tree | node_ids: t} + head_node = Map.get(html_tree.nodes, h) + do_reduce(tree, fun.(head_node, acc), fun) + end + end +end diff --git a/deps/floki/lib/floki/html_tree/comment.ex b/deps/floki/lib/floki/html_tree/comment.ex new file mode 100644 index 0000000..42e2e10 --- /dev/null +++ b/deps/floki/lib/floki/html_tree/comment.ex @@ -0,0 +1,13 @@ +defmodule Floki.HTMLTree.Comment do + @moduledoc false + + # Represents a comment inside an HTML tree with reference to its parent node id. + # TODO: rename content to data + defstruct content: "", node_id: nil, parent_node_id: nil + + @type t :: %__MODULE__{ + content: String.t(), + node_id: pos_integer(), + parent_node_id: pos_integer() + } +end diff --git a/deps/floki/lib/floki/html_tree/html_node.ex b/deps/floki/lib/floki/html_tree/html_node.ex new file mode 100644 index 0000000..5915a5b --- /dev/null +++ b/deps/floki/lib/floki/html_tree/html_node.ex @@ -0,0 +1,13 @@ +defmodule Floki.HTMLTree.HTMLNode do + @moduledoc false + + # Represents a HTML node with "references" to its children nodes ids and parent node id. + defstruct type: "", attributes: [], children_nodes_ids: [], node_id: nil, parent_node_id: nil + + @type t :: %__MODULE__{ + type: String.t(), + attributes: [{String.t(), String.t()}], + node_id: pos_integer(), + parent_node_id: pos_integer() + } +end diff --git a/deps/floki/lib/floki/html_tree/id_seeder.ex b/deps/floki/lib/floki/html_tree/id_seeder.ex new file mode 100644 index 0000000..c39f393 --- /dev/null +++ b/deps/floki/lib/floki/html_tree/id_seeder.ex @@ -0,0 +1,6 @@ +defmodule Floki.HTMLTree.IDSeeder do + @moduledoc false + + def seed([]), do: 1 + def seed([h | _]), do: h + 1 +end diff --git a/deps/floki/lib/floki/html_tree/text.ex b/deps/floki/lib/floki/html_tree/text.ex new file mode 100644 index 0000000..c84cd64 --- /dev/null +++ b/deps/floki/lib/floki/html_tree/text.ex @@ -0,0 +1,12 @@ +defmodule Floki.HTMLTree.Text do + @moduledoc false + + # Represents a text node inside an HTML tree with reference to its parent node id. + defstruct content: "", node_id: nil, parent_node_id: nil + + @type t :: %__MODULE__{ + content: String.t(), + node_id: pos_integer(), + parent_node_id: pos_integer() + } +end diff --git a/deps/floki/lib/floki/parse_error.ex b/deps/floki/lib/floki/parse_error.ex new file mode 100644 index 0000000..7d94d1b --- /dev/null +++ b/deps/floki/lib/floki/parse_error.ex @@ -0,0 +1,3 @@ +defmodule Floki.ParseError do + defexception [:message] +end diff --git a/deps/floki/lib/floki/raw_html.ex b/deps/floki/lib/floki/raw_html.ex new file mode 100644 index 0000000..c21c974 --- /dev/null +++ b/deps/floki/lib/floki/raw_html.ex @@ -0,0 +1,217 @@ +defmodule Floki.RawHTML do + @moduledoc false + + @self_closing_tags [ + "area", + "base", + "br", + "col", + "command", + "embed", + "hr", + "img", + "input", + "keygen", + "link", + "meta", + "param", + "source", + "track", + "wbr" + ] + + @encoder &HtmlEntities.encode/1 + + def raw_html(html_tree, options) do + encoder = + case Keyword.fetch(options, :encode) do + {:ok, true} -> @encoder + {:ok, false} -> & &1 + :error -> default_encoder() + end + + padding = + case Keyword.fetch(options, :pretty) do + {:ok, true} -> %{pad: " ", line_ending: "\n", depth: 0} + _ -> :noop + end + + IO.iodata_to_binary(build_raw_html(html_tree, [], encoder, padding)) + end + + defp build_raw_html([], html, _encoder, _padding), do: html + + defp build_raw_html(string, _html, encoder, padding) when is_binary(string) do + leftpad_content(padding, encoder.(string)) + end + + defp build_raw_html(tuple, html, encoder, padding) when is_tuple(tuple), + do: build_raw_html([tuple], html, encoder, padding) + + defp build_raw_html([string | tail], html, encoder, padding) when is_binary(string) do + build_raw_html(tail, [html, leftpad_content(padding, encoder.(string))], encoder, padding) + end + + defp build_raw_html([{:comment, comment} | tail], html, encoder, padding), + do: build_raw_html(tail, [html, leftpad(padding), "<!--", comment, "-->"], encoder, padding) + + defp build_raw_html([{:pi, tag, attrs} | tail], html, encoder, padding) do + build_raw_html( + tail, + [html, leftpad(padding), "<?", tag, " ", tag_attrs(attrs), "?>"], + encoder, + padding + ) + end + + defp build_raw_html([{:doctype, type, public, system} | tail], html, encoder, padding) do + attr = + case {public, system} do + {"", ""} -> [] + {"", system} -> [" SYSTEM \"", system | "\""] + {public, system} -> [" PUBLIC \"", public, "\" \"", system | "\""] + end + + build_raw_html( + tail, + [html, leftpad(padding), "<!DOCTYPE ", type, attr | ">"], + encoder, + padding + ) + end + + defp build_raw_html([{type, attrs, children} | tail], html, encoder, padding) do + build_raw_html( + tail, + [html | tag_for(type, attrs, children, encoder, padding)], + encoder, + padding + ) + end + + defp tag_attrs(attr_list) do + map_intersperse(attr_list, ?\s, &build_attrs/1) + end + + defp tag_with_attrs(type, [], children, padding), + do: [leftpad(padding), "<", type | close_open_tag(type, children)] + + defp tag_with_attrs(type, attrs, children, padding), + do: [leftpad(padding), "<", type, ?\s, tag_attrs(attrs) | close_open_tag(type, children)] + + defp close_open_tag(type, []) when type in @self_closing_tags, do: "/>" + defp close_open_tag(_type, _), do: ">" + + defp close_end_tag(type, [], _padding) when type in @self_closing_tags, do: "" + + defp close_end_tag(type, _, padding), + do: [leftpad(padding), "</", type, ">", line_ending(padding)] + + defp build_attrs({attr, value}), do: [attr, "=\"", html_escape(value) | "\""] + defp build_attrs(attr), do: attr + + defp tag_for(type, attrs, children, encoder, padding) do + encoder = + case type do + "script" -> & &1 + "style" -> & &1 + _ -> encoder + end + + [ + tag_with_attrs(type, attrs, children, padding), + line_ending(padding), + build_raw_html(children, "", encoder, pad_increase(padding)), + close_end_tag(type, children, padding) + ] + end + + defp default_encoder do + if Application.get_env(:floki, :encode_raw_html, true) do + @encoder + else + & &1 + end + end + + # html_escape + # Optimized IO data implementation from Plug.HTML + + defp html_escape(data) when is_binary(data), do: html_escape(data, 0, data, []) + defp html_escape(data), do: html_escape(IO.iodata_to_binary(data)) + + escapes = [ + {?<, "<"}, + {?>, ">"}, + {?&, "&"}, + {?", """}, + {?', "'"} + ] + + for {match, insert} <- escapes do + defp html_escape(<<unquote(match), rest::bits>>, skip, original, acc) do + html_escape(rest, skip + 1, original, [acc | unquote(insert)]) + end + end + + defp html_escape(<<_char, rest::bits>>, skip, original, acc) do + html_escape(rest, skip, original, acc, 1) + end + + defp html_escape(<<>>, _skip, _original, acc) do + acc + end + + for {match, insert} <- escapes do + defp html_escape(<<unquote(match), rest::bits>>, skip, original, acc, len) do + part = binary_part(original, skip, len) + html_escape(rest, skip + len + 1, original, [acc, part | unquote(insert)]) + end + end + + defp html_escape(<<_char, rest::bits>>, skip, original, acc, len) do + html_escape(rest, skip, original, acc, len + 1) + end + + defp html_escape(<<>>, 0, original, _acc, _len) do + original + end + + defp html_escape(<<>>, skip, original, acc, len) do + [acc | binary_part(original, skip, len)] + end + + # helpers + + # TODO: Use Enum.map_intersperse/3 when we require Elixir v1.10+ + + defp map_intersperse([], _, _), + do: [] + + defp map_intersperse([last], _, mapper), + do: [mapper.(last)] + + defp map_intersperse([head | rest], separator, mapper), + do: [mapper.(head), separator | map_intersperse(rest, separator, mapper)] + + defp leftpad(:noop), do: "" + defp leftpad(%{pad: pad, depth: depth}), do: String.duplicate(pad, depth) + + defp leftpad_content(:noop, string), do: string + + defp leftpad_content(padding, string) do + trimmed = String.trim(string) + + if trimmed == "" do + "" + else + [leftpad(padding), trimmed, line_ending(padding)] + end + end + + defp pad_increase(:noop), do: :noop + defp pad_increase(padder = %{depth: depth}), do: %{padder | depth: depth + 1} + + defp line_ending(:noop), do: "" + defp line_ending(%{line_ending: line_ending}), do: line_ending +end diff --git a/deps/floki/lib/floki/selector.ex b/deps/floki/lib/floki/selector.ex new file mode 100644 index 0000000..f1a1cca --- /dev/null +++ b/deps/floki/lib/floki/selector.ex @@ -0,0 +1,229 @@ +defmodule Floki.Selector do + require Logger + @moduledoc false + + # Represents a CSS selector. It also have functions to match nodes with a given selector. + + alias Floki.{Selector, HTMLTree} + alias Selector.{AttributeSelector, PseudoClass} + alias HTMLTree.{HTMLNode, Text, Comment} + + defstruct id: nil, + type: nil, + classes: [], + attributes: [], + namespace: nil, + pseudo_classes: [], + combinator: nil + + @type t :: %__MODULE__{ + id: String.t() | nil, + type: String.t() | nil, + classes: [String.t()], + attributes: [AttributeSelector.t()], + namespace: String.t() | nil, + pseudo_classes: [PseudoClass.t()], + combinator: Selector.Combinator.t() | nil + } + + defimpl String.Chars do + def to_string(selector) do + Enum.join([ + namespace(selector), + selector.type, + id(selector), + classes(selector), + Enum.join(selector.attributes), + Enum.join(selector.pseudo_classes), + selector.combinator + ]) + end + + defp namespace(%{namespace: nil}), do: "" + defp namespace(%{namespace: ns}), do: "#{ns} | " + + defp id(%{id: nil}), do: "" + defp id(%{id: id}), do: "##{id}" + + defp classes(%{classes: []}), do: "" + defp classes(%{classes: classes}), do: ".#{Enum.join(classes, ".")}" + end + + @wildcards [nil, "*"] + defguardp is_wildcard(x) when x in @wildcards + + @doc false + + # Returns if a given node matches with a given selector. + def match?( + _node, + %Selector{ + id: nil, + type: nil, + classes: [], + attributes: [], + namespace: nil, + pseudo_classes: [], + combinator: nil + }, + _tree + ) do + false + end + + def match?(nil, _selector, _tree), do: false + def match?({:comment, _comment}, _selector, _tree), do: false + def match?({:pi, _xml, _xml_attrs}, _selector, _tree), do: false + def match?(%Text{}, _selector, _tree), do: false + def match?(%Comment{}, _selector, _tree), do: false + + def match?(html_node, selector, tree) do + id_match?(html_node, selector.id) && namespace_match?(html_node, selector.namespace) && + type_match?(html_node, selector.type) && classes_matches?(html_node, selector.classes) && + attributes_matches?(html_node, selector.attributes) && + pseudo_classes_match?(html_node, selector.pseudo_classes, tree) + end + + defp id_match?(_node, nil), do: true + defp id_match?(%HTMLNode{attributes: []}, _), do: false + defp id_match?(%HTMLNode{type: :pi}, _), do: false + + defp id_match?(%HTMLNode{attributes: attributes}, id) do + Enum.any?(attributes, fn attribute -> + case attribute do + {"id", ^id} -> true + _ -> false + end + end) + end + + defp namespace_match?(_node, namespace) when is_wildcard(namespace), do: true + defp namespace_match?(%HTMLNode{type: :pi}, _type), do: false + + defp namespace_match?(%HTMLNode{type: type_maybe_with_namespace}, namespace) do + case String.split(type_maybe_with_namespace, ":") do + [^namespace, _type] -> + true + + _ -> + false + end + end + + defp type_match?(_node, type) when is_wildcard(type), do: true + defp type_match?(%HTMLNode{type: :pi}, _type), do: false + + defp type_match?(%HTMLNode{type: type_maybe_with_namespace}, type) do + case String.split(type_maybe_with_namespace, ":") do + [_ns, ^type] -> + true + + [^type] -> + true + + _ -> + false + end + end + + defp type_match?(_, _), do: false + + defp classes_matches?(_node, []), do: true + defp classes_matches?(%HTMLNode{attributes: []}, _), do: false + + defp classes_matches?(%HTMLNode{attributes: attributes}, classes) do + case :proplists.get_value("class", attributes, nil) do + nil -> false + class -> classes -- String.split(class, ~r/\s+/) == [] + end + end + + defp attributes_matches?(_node, []), do: true + defp attributes_matches?(%HTMLNode{attributes: []}, _), do: false + + defp attributes_matches?(%HTMLNode{attributes: attributes}, attributes_selectors) do + Enum.all?(attributes_selectors, fn attribute_selector -> + AttributeSelector.match?(attributes, attribute_selector) + end) + end + + defp pseudo_classes_match?(_html_node, [], _tree), do: true + + defp pseudo_classes_match?(html_node, pseudo_classes, tree) do + Enum.all?(pseudo_classes, &pseudo_class_match?(html_node, &1, tree)) + end + + defp pseudo_class_match?(html_node, pseudo_class = %{name: "nth-child"}, tree) do + PseudoClass.match_nth_child?(tree, html_node, pseudo_class) + end + + defp pseudo_class_match?(html_node, %{name: "first-child"}, tree) do + PseudoClass.match_nth_child?(tree, html_node, %PseudoClass{name: "nth-child", value: 1}) + end + + defp pseudo_class_match?(html_node, %{name: "last-child"}, tree) do + PseudoClass.match_nth_last_child?(tree, html_node, %PseudoClass{ + name: "nth-last-child", + value: 1 + }) + end + + defp pseudo_class_match?(html_node, pseudo_class = %{name: "nth-last-child"}, tree) do + PseudoClass.match_nth_last_child?(tree, html_node, pseudo_class) + end + + defp pseudo_class_match?(html_node, pseudo_class = %{name: "nth-of-type"}, tree) do + PseudoClass.match_nth_of_type?(tree, html_node, pseudo_class) + end + + defp pseudo_class_match?(html_node, %{name: "first-of-type"}, tree) do + PseudoClass.match_nth_of_type?(tree, html_node, %PseudoClass{ + name: "nth-of-type", + value: 1 + }) + end + + defp pseudo_class_match?(html_node, %{name: "last-of-type"}, tree) do + PseudoClass.match_nth_last_of_type?(tree, html_node, %PseudoClass{ + name: "nth-last-of-type", + value: 1 + }) + end + + defp pseudo_class_match?(html_node, pseudo_class = %{name: "nth-last-of-type"}, tree) do + PseudoClass.match_nth_last_of_type?(tree, html_node, pseudo_class) + end + + defp pseudo_class_match?(html_node, pseudo_class = %{name: "not"}, tree) do + Enum.all?(pseudo_class.value, &(!Selector.match?(html_node, &1, tree))) + end + + defp pseudo_class_match?(html_node, %{name: "checked"}, _tree) do + PseudoClass.match_checked?(html_node) + end + + defp pseudo_class_match?(html_node, %{name: "disabled"}, _tree) do + PseudoClass.match_disabled?(html_node) + end + + defp pseudo_class_match?(html_node, pseudo_class = %{name: "fl-contains"}, tree) do + PseudoClass.match_contains?(tree, html_node, pseudo_class) + end + + # Case insensitive contains + defp pseudo_class_match?(html_node, pseudo_class = %{name: "fl-icontains"}, tree) do + PseudoClass.match_icontains?(tree, html_node, pseudo_class) + end + + defp pseudo_class_match?(html_node, %{name: "root"}, tree) do + PseudoClass.match_root?(html_node, tree) + end + + defp pseudo_class_match?(_html_node, %{name: unknown_pseudo_class}, _tree) do + Logger.info(fn -> + "Pseudo-class #{inspect(unknown_pseudo_class)} is not implemented. Ignoring." + end) + + false + end +end diff --git a/deps/floki/lib/floki/selector/attribute_selector.ex b/deps/floki/lib/floki/selector/attribute_selector.ex new file mode 100644 index 0000000..3133c3e --- /dev/null +++ b/deps/floki/lib/floki/selector/attribute_selector.ex @@ -0,0 +1,140 @@ +defmodule Floki.Selector.AttributeSelector do + @moduledoc false + + # It is very similar to the `Selector` module, but is specialized in attributes + # and attribute selectors. + + alias Floki.Selector.AttributeSelector + + defstruct match_type: nil, attribute: nil, value: nil, flag: nil + + @type match_type :: + nil + | :equal + | :includes + | :dash_match + | :prefix_match + | :suffix_match + | :substring_match + + @type t :: %__MODULE__{ + match_type: match_type(), + attribute: String.t(), + value: String.t() | nil, + flag: String.t() | nil + } + + defimpl String.Chars do + def to_string(selector) do + "[#{selector.attribute}#{type(selector.match_type)}'#{selector.value}'#{flag(selector.flag)}]" + end + + defp type(match_type) do + case match_type do + :equal -> "=" + :includes -> "~=" + :dash_match -> "|=" + :prefix_match -> "^=" + :suffix_match -> "$=" + :substring_match -> "*=" + _ -> "" + end + end + + defp flag(nil), do: "" + defp flag(flag), do: " " <> flag + end + + # Returns if attributes of a node matches with a given attribute selector. + def match?(attributes, s = %AttributeSelector{match_type: nil, value: nil}) do + attribute_present?(s.attribute, attributes) + end + + # Case-insensitive matches + + def match?(attributes, s = %AttributeSelector{match_type: :equal, flag: "i"}) do + String.downcase(get_value(s.attribute, attributes)) == String.downcase(s.value) + end + + def match?(attributes, s = %AttributeSelector{match_type: :includes, flag: "i"}) do + selector_value = String.downcase(s.value) + + s.attribute + |> get_value(attributes) + # Splits by whitespaces ("a b c" -> ["a", "b", "c"]) + |> String.split(~r/\s+/) + |> Enum.any?(fn v -> String.downcase(v) == selector_value end) + end + + def match?(attributes, s = %AttributeSelector{match_type: :dash_match, flag: "i"}) do + selector_value = String.downcase(s.value) + value = String.downcase(get_value(s.attribute, attributes)) + + value == selector_value || String.starts_with?(value, "#{selector_value}-") + end + + def match?(attributes, s = %AttributeSelector{match_type: :prefix_match, flag: "i"}) do + s.attribute + |> get_value(attributes) + |> String.downcase() + |> String.starts_with?(String.downcase(s.value)) + end + + def match?(attributes, s = %AttributeSelector{match_type: :suffix_match, flag: "i"}) do + s.attribute + |> get_value(attributes) + |> String.downcase() + |> String.ends_with?(String.downcase(s.value)) + end + + def match?(attributes, s = %AttributeSelector{match_type: :substring_match, flag: "i"}) do + s.attribute + |> get_value(attributes) + |> String.downcase() + |> String.contains?(String.downcase(s.value)) + end + + # Case-sensitive matches + + def match?(attributes, s = %AttributeSelector{match_type: :equal}) do + get_value(s.attribute, attributes) == s.value + end + + def match?(attributes, s = %AttributeSelector{match_type: :includes, value: value}) do + get_value(s.attribute, attributes) + |> String.split(~r/\s+/) + |> Enum.any?(fn v -> v == value end) + end + + def match?(attributes, s = %AttributeSelector{match_type: :dash_match}) do + value = get_value(s.attribute, attributes) + + value == s.value || String.starts_with?(value, "#{s.value}-") + end + + def match?(attributes, s = %AttributeSelector{match_type: :prefix_match}) do + s.attribute |> get_value(attributes) |> String.starts_with?(s.value) + end + + def match?(attributes, s = %AttributeSelector{match_type: :suffix_match}) do + s.attribute |> get_value(attributes) |> String.ends_with?(s.value) + end + + def match?(attributes, s = %AttributeSelector{match_type: :substring_match}) do + s.attribute |> get_value(attributes) |> String.contains?(s.value) + end + + defp get_value(attr_name, attributes) do + Enum.find_value(attributes, "", fn + {^attr_name, value} -> value + _ -> false + end) + end + + defp attribute_present?(name, attributes) do + Enum.any?(attributes, fn + {^name, _v} -> true + _ -> false + end) + end +end diff --git a/deps/floki/lib/floki/selector/combinator.ex b/deps/floki/lib/floki/selector/combinator.ex new file mode 100644 index 0000000..fe25d49 --- /dev/null +++ b/deps/floki/lib/floki/selector/combinator.ex @@ -0,0 +1,44 @@ +defmodule Floki.Selector.Combinator do + @moduledoc false + + # Represents the conjunction of a combinator with its selector. + # + # Combinators can have the following match types: + # + # - descendant; + # e.g.: "a b" + # - child; + # e.g.: "a > b" + # - adjacent sibling; + # e.g.: "a + b" + # - general sibling; + # e.g.: "a ~ b" + + defstruct match_type: nil, selector: nil + + @type match_type :: + :descendant + | :child + | :adjacent_sibling + | :general_sibling + + @type t :: %__MODULE__{ + match_type: match_type(), + selector: Floki.Selector.t() + } + + defimpl String.Chars do + def to_string(combinator) do + match_type = + case combinator.match_type do + :descendant -> " " + :child -> " > " + :adjacent_sibling -> " + " + :general_sibling -> " ~ " + _ -> "" + end + + "#{match_type}#{combinator.selector}" + end + end +end diff --git a/deps/floki/lib/floki/selector/functional.ex b/deps/floki/lib/floki/selector/functional.ex new file mode 100644 index 0000000..2a45b38 --- /dev/null +++ b/deps/floki/lib/floki/selector/functional.ex @@ -0,0 +1,53 @@ +defmodule Floki.Selector.Functional do + @moduledoc false + + # Represents a functional notation for a selector + + defstruct [:stream, :a, :b] + + @regex ~r/^\s*(?<a>[-+]?[0-9]*[n])\s*(?<b>[+-]\s*[0-9]+)?\s*$/ + + def parse(expr) when is_list(expr) do + parse(to_string(expr)) + end + + def parse(expr) do + expr = String.downcase(expr) + + case Regex.named_captures(@regex, expr) do + nil -> :invalid + %{"a" => a, "b" => b} -> {:ok, build(a, b)} + end + end + + defp build(a, ""), do: build(a, "0") + + defp build(a, b) do + a = parse_num(a) + b = parse_num(b) + + stream = + Stream.map(0..100_000, fn x -> + a * x + b + end) + + %__MODULE__{stream: stream, a: a, b: b} + end + + defp parse_num(n_str) do + n_str + |> String.replace(" ", "") + |> String.trim("n") + |> case do + "-" -> -1 + "" -> 1 + n -> String.to_integer(n) + end + end + + defimpl String.Chars do + def to_string(functional) do + "#{functional.a}x+#{functional.b}" + end + end +end diff --git a/deps/floki/lib/floki/selector/parser.ex b/deps/floki/lib/floki/selector/parser.ex new file mode 100644 index 0000000..987016e --- /dev/null +++ b/deps/floki/lib/floki/selector/parser.ex @@ -0,0 +1,262 @@ +defmodule Floki.Selector.Parser do + require Logger + + @moduledoc false + + # Parses a list of tokens returned from `Tokenizer` and transform into a `Selector`. + + alias Floki.Selector + alias Selector.{Functional, Tokenizer, PseudoClass, AttributeSelector, Combinator} + + @attr_match_types [ + :equal, + :dash_match, + :includes, + :prefix_match, + :suffix_match, + :substring_match + ] + + # Returns a list of `Selector` structs with the parsed selectors. + + def parse(selector) when is_binary(selector) do + token_list = Tokenizer.tokenize(selector) + parse(token_list) + end + + def parse(tokens) do + do_parse_all(tokens, []) + end + + defp do_parse_all([], selectors) do + Enum.reverse(selectors) + end + + defp do_parse_all(tokens, selectors) do + {selector, remaining_tokens} = do_parse(tokens, %Selector{}) + do_parse_all(remaining_tokens, [selector | selectors]) + end + + defp do_parse([], selector), do: {selector, []} + defp do_parse([{:close_parentesis, _} | t], selector), do: {selector, t} + defp do_parse([{:comma, _} | t], selector), do: {selector, t} + + defp do_parse([{:identifier, _, namespace}, {:namespace_pipe, _} | t], selector) do + do_parse(t, %{selector | namespace: to_string(namespace)}) + end + + defp do_parse([{:identifier, _, type} | t], selector) do + do_parse(t, %{selector | type: to_string(type)}) + end + + defp do_parse([{'*', _} | t], selector) do + do_parse(t, %{selector | type: "*"}) + end + + defp do_parse([{:hash, _, id} | t], selector) do + do_parse(t, %{selector | id: to_string(id) |> String.replace("\\.", ".")}) + end + + defp do_parse([{:class, _, class} | t], selector) do + do_parse(t, %{selector | classes: [to_string(class) | selector.classes]}) + end + + defp do_parse([{'[', _} | t], selector) do + {t, result} = consume_attribute(t) + + do_parse(t, %{selector | attributes: [result | selector.attributes]}) + end + + defp do_parse([{:pseudo_not, _} | t], selector) do + {t, pseudo_not_class} = do_parse_pseudo_not(t, %PseudoClass{name: "not", value: []}) + pseudo_classes = Enum.reject([pseudo_not_class | selector.pseudo_classes], &is_nil(&1)) + do_parse(t, %{selector | pseudo_classes: pseudo_classes}) + end + + defp do_parse([{:pseudo, _, pseudo_class} | t], selector) do + pseudo_classes = [%PseudoClass{name: to_string(pseudo_class)} | selector.pseudo_classes] + do_parse(t, %{selector | pseudo_classes: pseudo_classes}) + end + + defp do_parse([{:pseudo_class_int, _, pseudo_class_int} | t], selector) do + [pseudo_class | pseudo_classes] = selector.pseudo_classes + + do_parse(t, %{ + selector + | pseudo_classes: [%{pseudo_class | value: pseudo_class_int} | pseudo_classes] + }) + end + + defp do_parse([{:pseudo_class_even, _} | t], selector) do + [pseudo_class | pseudo_classes] = selector.pseudo_classes + do_parse(t, %{selector | pseudo_classes: [%{pseudo_class | value: "even"} | pseudo_classes]}) + end + + defp do_parse([{:pseudo_class_odd, _} | t], selector) do + [pseudo_class | pseudo_classes] = selector.pseudo_classes + do_parse(t, %{selector | pseudo_classes: [%{pseudo_class | value: "odd"} | pseudo_classes]}) + end + + defp do_parse([{:pseudo_class_pattern, _, pattern} | t], selector) do + [pseudo_class | pseudo_classes] = selector.pseudo_classes + + value = + case Functional.parse(pattern) do + :invalid -> to_string(pattern) + {:ok, value} -> value + end + + do_parse(t, %{selector | pseudo_classes: [%{pseudo_class | value: value} | pseudo_classes]}) + end + + defp do_parse([{:pseudo_class_quoted, _, pattern} | t], selector) do + [pseudo_class | pseudo_classes] = selector.pseudo_classes + + do_parse(t, %{ + selector + | pseudo_classes: [%{pseudo_class | value: to_string(pattern)} | pseudo_classes] + }) + end + + defp do_parse([{:space, _} | t], selector) do + {remaining_tokens, combinator} = consume_combinator(t, :descendant) + + {%{selector | combinator: combinator}, remaining_tokens} + end + + defp do_parse([{:greater, _} | t], selector) do + {remaining_tokens, combinator} = consume_combinator(t, :child) + + {%{selector | combinator: combinator}, remaining_tokens} + end + + defp do_parse([{:plus, _} | t], selector) do + {remaining_tokens, combinator} = consume_combinator(t, :sibling) + + {%{selector | combinator: combinator}, remaining_tokens} + end + + defp do_parse([{:tilde, _} | t], selector) do + {remaining_tokens, combinator} = consume_combinator(t, :general_sibling) + + {%{selector | combinator: combinator}, remaining_tokens} + end + + defp do_parse([{:unknown, _, unknown} | t], selector) do + Logger.info(fn -> "Unknown token #{inspect(unknown)}. Ignoring." end) + + do_parse(t, selector) + end + + defp consume_attribute(tokens), do: consume_attribute(:consuming, tokens, %AttributeSelector{}) + defp consume_attribute(_, [], attr_selector), do: {[], attr_selector} + defp consume_attribute(:done, tokens, attr_selector), do: {tokens, attr_selector} + + defp consume_attribute(:consuming, [{:identifier, _, identifier} | t], attr_selector) do + new_selector = set_attribute_name_or_value(attr_selector, identifier) + consume_attribute(:consuming, t, new_selector) + end + + defp consume_attribute(:consuming, [{match_type, _} | t], attr_selector) + when match_type in @attr_match_types do + new_selector = %{attr_selector | match_type: match_type} + consume_attribute(:consuming, t, new_selector) + end + + defp consume_attribute(:consuming, [{:quoted, _, value} | t], attr_selector) do + new_selector = %{attr_selector | value: to_string(value)} + consume_attribute(:consuming, t, new_selector) + end + + defp consume_attribute(:consuming, [{:attribute_identifier, _, value} | t], attr_selector) do + flag = String.at(to_string(value), -2) + new_selector = %{attr_selector | flag: flag} + consume_attribute(:done, t, new_selector) + end + + defp consume_attribute(:consuming, [{']', _} | t], attr_selector) do + consume_attribute(:done, t, attr_selector) + end + + defp consume_attribute(:consuming, [unknown | t], attr_selector) do + Logger.info(fn -> "Unknown token #{inspect(unknown)}. Ignoring." end) + consume_attribute(:consuming, t, attr_selector) + end + + defp set_attribute_name_or_value(attr_selector, identifier) do + # When match type is not defined, this is an attribute name. + # Otherwise, it is an attribute value. + case attr_selector.match_type do + nil -> %{attr_selector | attribute: to_string(identifier)} + _ -> %{attr_selector | value: to_string(identifier)} + end + end + + defp consume_combinator(tokens, combinator_type) when is_atom(combinator_type) do + consume_combinator(tokens, %Combinator{match_type: combinator_type, selector: %Selector{}}) + end + + defp consume_combinator([], combinator), do: {[], combinator} + + defp consume_combinator(tokens, combinator) do + {selector, remaining_tokens} = do_parse(tokens, %Selector{}) + + {remaining_tokens, %{combinator | selector: selector}} + end + + defp do_parse_pseudo_not([], pseudo_class) do + {[], pseudo_class} + end + + defp do_parse_pseudo_not([{:close_parentesis, _} | t], pseudo_class) do + {t, pseudo_class} + end + + defp do_parse_pseudo_not([{:space, _} | t], pseudo_class) do + do_parse_pseudo_not(t, pseudo_class) + end + + defp do_parse_pseudo_not(tokens, pseudo_class) do + do_parse_pseudo_not(tokens, %Selector{}, pseudo_class) + end + + defp do_parse_pseudo_not([], pseudo_not_selector, pseudo_class) do + pseudo_class = update_pseudo_not_value(pseudo_class, pseudo_not_selector) + {[], pseudo_class} + end + + defp do_parse_pseudo_not([{:close_parentesis, _} | t], pseudo_not_selector, pseudo_class) do + pseudo_class = update_pseudo_not_value(pseudo_class, pseudo_not_selector) + {t, pseudo_class} + end + + defp do_parse_pseudo_not([{:comma, _} | t], pseudo_not_selector, pseudo_class) do + pseudo_class = update_pseudo_not_value(pseudo_class, pseudo_not_selector) + do_parse_pseudo_not(t, pseudo_class) + end + + defp do_parse_pseudo_not([{:space, _} | t], pseudo_not_selector, pseudo_class) do + do_parse_pseudo_not(t, pseudo_not_selector, pseudo_class) + end + + defp do_parse_pseudo_not(tokens = [{'[', _} | _t], pseudo_not_selector, pseudo_class) do + {pseudo_not_selector, remaining_tokens} = do_parse(tokens, pseudo_not_selector) + pseudo_class = update_pseudo_not_value(pseudo_class, pseudo_not_selector) + do_parse_pseudo_not(remaining_tokens, pseudo_class) + end + + defp do_parse_pseudo_not([next_token | t], pseudo_not_selector, pseudo_class) do + {pseudo_not_selector, _} = do_parse([next_token], pseudo_not_selector) + do_parse_pseudo_not(t, pseudo_not_selector, pseudo_class) + end + + defp update_pseudo_not_value(pseudo_class, pseudo_not_selector = %Selector{combinator: nil}) do + pseudo_not_value = [pseudo_not_selector | Map.get(pseudo_class, :value, [])] + %{pseudo_class | value: pseudo_not_value} + end + + defp update_pseudo_not_value(_pseudo_class, _pseudo_not_selector) do + Logger.info("Only simple selectors are allowed in :not() pseudo-class. Ignoring.") + nil + end +end diff --git a/deps/floki/lib/floki/selector/pseudo_class.ex b/deps/floki/lib/floki/selector/pseudo_class.ex new file mode 100644 index 0000000..34f488d --- /dev/null +++ b/deps/floki/lib/floki/selector/pseudo_class.ex @@ -0,0 +1,182 @@ +defmodule Floki.Selector.PseudoClass do + @moduledoc false + + require Logger + + # Represents a pseudo-class selector + defstruct name: "", value: nil + + alias Floki.HTMLTree.{HTMLNode, Text} + alias Floki.Selector.Functional + + @type t :: %__MODULE__{ + name: String.t(), + value: String.t() | [Floki.Selector.t()] + } + + defimpl String.Chars do + def to_string(%{name: name, value: nil}) do + ":#{name}" + end + + def to_string(%{name: name, value: selectors}) when is_list(selectors) do + ":#{name}(#{Enum.join(selectors)})" + end + + def to_string(pseudo_class) do + ":#{pseudo_class.name}(#{pseudo_class.value})" + end + end + + def match_nth_child?(tree, html_node, %__MODULE__{value: value}) do + tree + |> pseudo_nodes(html_node) + |> Enum.reverse() + |> node_position(html_node) + |> match_position?(value, "nth-child") + end + + def match_nth_of_type?(tree, html_node, %__MODULE__{value: value}) do + tree + |> pseudo_nodes(html_node) + |> filter_nodes_by_type(tree.nodes, html_node.type) + |> Enum.reverse() + |> node_position(html_node) + |> match_position?(value, "nth-of-type") + end + + def match_nth_last_child?(tree, html_node, %__MODULE__{value: value}) do + tree + |> pseudo_nodes(html_node) + |> node_position(html_node) + |> match_position?(value, "nth-last-child") + end + + def match_nth_last_of_type?(tree, html_node, %__MODULE__{value: value}) do + tree + |> pseudo_nodes(html_node) + |> filter_nodes_by_type(tree.nodes, html_node.type) + |> node_position(html_node) + |> match_position?(value, "nth-last-of-type") + end + + def match_contains?(tree, html_node, %__MODULE__{value: value}) do + res = + Enum.find(html_node.children_nodes_ids, fn id -> + case Map.get(tree.nodes, id) do + %Text{content: content} -> content =~ value + _ -> false + end + end) + + res != nil + end + + # Case insensitive contains + def match_icontains?(tree, html_node, %__MODULE__{value: value}) do + downcase_value = String.downcase(value) + + res = + Enum.find(html_node.children_nodes_ids, fn id -> + case Map.get(tree.nodes, id) do + %Text{content: content} -> String.downcase(content) =~ downcase_value + _ -> false + end + end) + + res != nil + end + + defp match_position?(relative_position, value, name) do + case value do + position when is_integer(position) -> + relative_position == position + + "even" -> + rem(relative_position, 2) == 0 + + "odd" -> + rem(relative_position, 2) == 1 + + %Functional{stream: s} -> + relative_position in s + + expression -> + Logger.info(fn -> + "Pseudo-class #{name} with expressions like #{inspect(expression)} are not supported yet. Ignoring." + end) + + false + end + end + + def match_checked?(%{type: "input"} = html_node) do + case List.keyfind(html_node.attributes, "checked", 0) do + {"checked", _} -> true + _ -> false + end + end + + def match_checked?(%{type: "option"} = html_node) do + case List.keyfind(html_node.attributes, "selected", 0) do + {"selected", _} -> true + _ -> false + end + end + + def match_checked?(_) do + false + end + + @disableable_html_nodes ~w[button input select option textarea] + + def match_disabled?(%{type: type} = html_node) when type in @disableable_html_nodes do + case List.keyfind(html_node.attributes, "disabled", 0) do + {"disabled", _} -> true + _ -> false + end + end + + def match_disabled?(_html_node) do + false + end + + def match_root?(html_node, tree) do + html_node.node_id in tree.root_nodes_ids + end + + defp node_position(ids, %HTMLNode{node_id: node_id}) do + position = Enum.find_index(ids, fn id -> id == node_id end) + position + 1 + end + + defp pseudo_nodes(tree, %HTMLNode{parent_node_id: nil}) do + tree.root_nodes_ids + |> filter_only_html_nodes(tree.nodes) + end + + defp pseudo_nodes(tree, %HTMLNode{parent_node_id: parent_node_id}) do + parent_node = Map.fetch!(tree.nodes, parent_node_id) + + parent_node.children_nodes_ids + |> filter_only_html_nodes(tree.nodes) + end + + defp filter_only_html_nodes(ids, nodes) do + Enum.filter(ids, fn id -> + case nodes do + %{^id => %HTMLNode{}} -> true + _ -> false + end + end) + end + + defp filter_nodes_by_type(ids, nodes, type) do + Enum.filter(ids, fn id -> + case nodes do + %{^id => %HTMLNode{type: ^type}} -> true + _ -> false + end + end) + end +end diff --git a/deps/floki/lib/floki/selector/tokenizer.ex b/deps/floki/lib/floki/selector/tokenizer.ex new file mode 100644 index 0000000..6994742 --- /dev/null +++ b/deps/floki/lib/floki/selector/tokenizer.ex @@ -0,0 +1,16 @@ +defmodule Floki.Selector.Tokenizer do + @moduledoc false + + # It decodes a given selector and returns the tokens that represents it. + # Check the rules in "src/floki_selector_lexer.xrl" + def tokenize(selector) do + char_list = + selector + |> String.trim() + |> String.to_charlist() + + {:ok, token_list, _} = :floki_selector_lexer.string(char_list) + + token_list + end +end diff --git a/deps/floki/lib/floki/traversal.ex b/deps/floki/lib/floki/traversal.ex new file mode 100644 index 0000000..ae5d6bf --- /dev/null +++ b/deps/floki/lib/floki/traversal.ex @@ -0,0 +1,32 @@ +defmodule Floki.Traversal do + @moduledoc false + + def traverse_and_update(html_node, fun) do + html_node + |> traverse_and_update([], fn element, acc -> {fun.(element), acc} end) + |> elem(0) + end + + def traverse_and_update(html_node, acc, fun) + def traverse_and_update([], acc, _fun), do: {[], acc} + def traverse_and_update(text, acc, _fun) when is_binary(text), do: {text, acc} + def traverse_and_update({:pi, _, _} = xml_tag, acc, fun), do: fun.(xml_tag, acc) + def traverse_and_update({:comment, _children} = comment, acc, fun), do: fun.(comment, acc) + def traverse_and_update({:doctype, _, _, _} = doctype, acc, fun), do: fun.(doctype, acc) + + def traverse_and_update([head | tail], acc, fun) do + case traverse_and_update(head, acc, fun) do + {nil, new_acc} -> + traverse_and_update(tail, new_acc, fun) + + {mapped_head, new_acc} -> + {mapped_tail, new_acc2} = traverse_and_update(tail, new_acc, fun) + {[mapped_head | mapped_tail], new_acc2} + end + end + + def traverse_and_update({elem, attrs, children}, acc, fun) do + {mapped_children, new_acc} = traverse_and_update(children, acc, fun) + fun.({elem, attrs, mapped_children}, new_acc) + end +end diff --git a/deps/floki/mix.exs b/deps/floki/mix.exs new file mode 100644 index 0000000..e8b1713 --- /dev/null +++ b/deps/floki/mix.exs @@ -0,0 +1,132 @@ +defmodule Floki.Mixfile do + use Mix.Project + + @description "Floki is a simple HTML parser that enables search for nodes using CSS selectors." + @source_url "https://github.com/philss/floki" + @version "0.33.1" + + def project do + [ + app: :floki, + name: "Floki", + version: @version, + description: @description, + elixir: "~> 1.10", + package: package(), + erlc_paths: ["src", "gen"], + deps: deps(), + aliases: aliases(), + docs: docs(), + dialyzer: [ + plt_file: {:no_warn, "priv/plts/dialyzer.plt"} + ], + elixirc_paths: elixirc_paths(Mix.env()) + ] + end + + def application do + [extra_applications: [:logger]] + end + + defp docs do + [ + extras: ["CHANGELOG.md", {:"README.md", [title: "Overview"]}], + main: "readme", + assets: "assets", + logo: "assets/images/floki-logo.svg", + source_url: @source_url, + source_ref: "v#{@version}", + skip_undefined_reference_warnings_on: ["CHANGELOG.md"] + ] + end + + defp deps do + # Needed to avoid installing unnecessary deps on the CI + parsers_deps = [ + html5ever: {:html5ever, ">= 0.8.0", optional: true, only: [:dev, :test]}, + fast_html: {:fast_html, ">= 0.0.0", optional: true, only: [:dev, :test]} + ] + + parsers = + case System.get_env("PARSER") do + nil -> [:fast_html, :html5ever] + parser when parser in ~w(html5ever fast_html) -> [String.to_atom(parser)] + _ -> [] + end + |> Enum.map(fn name -> Keyword.fetch!(parsers_deps, name) end) + + [ + {:html_entities, "~> 0.5.0"}, + {:jason, "~> 1.1", only: [:dev, :test, :docs]}, + {:earmark, "~> 1.2", only: :dev}, + {:ex_doc, "~> 0.28.2", only: :dev, runtime: false}, + {:benchee, "~> 1.1.0", only: :dev}, + {:credo, ">= 0.0.0", only: [:dev, :test]}, + {:dialyxir, "~> 1.0", only: [:dev], runtime: false} + ] ++ parsers + end + + defp aliases do + # Hardcoded because we can't load the floki application and get the module list at this point. + parsers = [Floki.HTMLParser.Mochiweb, Floki.HTMLParser.FastHtml, Floki.HTMLParser.Html5ever] + + {aliases, cli_names} = + Enum.map_reduce(parsers, [], fn parser, acc -> + cli_name = + parser + |> Module.split() + |> List.last() + |> Macro.underscore() + + {{:"test.#{cli_name}", &test_with_parser(parser, &1)}, [cli_name | acc]} + end) + + aliases + |> Keyword.put(:test, &test_with_parser(cli_names, &1)) + end + + defp test_with_parser(parser_cli_names, args) when is_list(parser_cli_names) do + Enum.each(parser_cli_names, fn cli_name -> + Mix.shell().cmd("mix test.#{cli_name} --color #{Enum.join(args, " ")}", + env: [{"MIX_ENV", "test"}] + ) + end) + end + + defp test_with_parser(parser, args) do + Mix.shell().info("Running tests with #{parser}") + Application.put_env(:floki, :html_parser, parser, persistent: true) + Mix.env(:test) + Mix.Tasks.Test.run(args) + end + + defp package do + %{ + maintainers: ["Philip Sampaio Silva"], + licenses: ["MIT"], + files: [ + # We don't want to ship mix tasks. + "lib/floki", + "lib/floki.ex", + "src/*.xrl", + "src/floki_mochi_html.erl", + "src/floki.gleam", + "mix.exs", + "README.md", + "LICENSE", + "CODE_OF_CONDUCT.md", + "CONTRIBUTING.md", + "CHANGELOG.md" + ], + links: %{ + "Changelog" => "https://hexdocs.pm/floki/changelog.html", + "Sponsor" => "https://github.com/sponsors/philss", + "GitHub" => @source_url + } + } + end + + # Specifies which paths to compile per environment. + defp elixirc_paths(:test), do: ["test/support", "lib"] + defp elixirc_paths(_), do: ["lib"] +end diff --git a/deps/floki/src/floki.gleam b/deps/floki/src/floki.gleam new file mode 100644 index 0000000..5ee00a1 --- /dev/null +++ b/deps/floki/src/floki.gleam @@ -0,0 +1,13 @@ +pub external type HTMLNode + +pub external fn parse_document(raw: String) -> Result(List(HTMLNode), String) = + "Elixir.Floki" "parse_document" + +pub external fn find(tree: List(HTMLNode), query: String) -> List(HTMLNode) = + "Elixir.Floki" "find" + +pub external fn attribute(tree: List(HTMLNode), name: String) -> List(String) = + "Elixir.Floki" "attribute" + +pub external fn text(tree: List(HTMLNode)) -> String = + "Elixir.Floki" "text" diff --git a/deps/floki/src/floki_mochi_html.erl b/deps/floki/src/floki_mochi_html.erl new file mode 100644 index 0000000..dd9b658 --- /dev/null +++ b/deps/floki/src/floki_mochi_html.erl @@ -0,0 +1,872 @@ +%% @author Bob Ippolito <bob@mochimedia.com> +%% @copyright 2007 Mochi Media, Inc. +%% +%% Permission is hereby granted, free of charge, to any person obtaining a +%% copy of this software and associated documentation files (the "Software"), +%% to deal in the Software without restriction, including without limitation +%% the rights to use, copy, modify, merge, publish, distribute, sublicense, +%% and/or sell copies of the Software, and to permit persons to whom the +%% Software is furnished to do so, subject to the following conditions: +%% +%% The above copyright notice and this permission notice shall be included in +%% all copies or substantial portions of the Software. +%% +%% THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +%% IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +%% FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +%% THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +%% LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +%% FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER +%% DEALINGS IN THE SOFTWARE. + +%% @doc Loosely tokenizes and generates parse trees for HTML 4. +-module(floki_mochi_html). +-export([tokens/1, parse/1, parse_tokens/1, to_tokens/1, escape/1, + escape_attr/1, to_html/1]). +-ifdef(TEST). +-export([destack/1, destack/2, is_singleton/1]). +-endif. + +%% This is a macro to placate syntax highlighters.. +-define(QUOTE, $\"). %% $\" +-define(SQUOTE, $\'). %% $\' +-define(ADV_COL(S, N), + S#decoder{column=N+S#decoder.column, + offset=N+S#decoder.offset}). +-define(INC_COL(S), + S#decoder{column=1+S#decoder.column, + offset=1+S#decoder.offset}). +-define(INC_LINE(S), + S#decoder{column=1, + line=1+S#decoder.line, + offset=1+S#decoder.offset}). +-define(INC_CHAR(S, C), + case C of + $\n -> + S#decoder{column=1, + line=1+S#decoder.line, + offset=1+S#decoder.offset}; + _ -> + S#decoder{column=1+S#decoder.column, + offset=1+S#decoder.offset} + end). + +-define(IS_WHITESPACE(C), + (C =:= $\s orelse C =:= $\t orelse C =:= $\r orelse C =:= $\n)). +-define(IS_LETTER(C), + ((C >= $A andalso C =< $Z) orelse (C >= $a andalso C =< $z))). +-define(IS_LITERAL_SAFE(C), + ((C >= $A andalso C =< $Z) orelse (C >= $a andalso C =< $z) + orelse (C >= $0 andalso C =< $9))). +-define(PROBABLE_CLOSE(C), + (C =:= $> orelse ?IS_WHITESPACE(C))). + +-record(decoder, {line=1, + column=1, + offset=0}). + +%% @type html_node() = {string(), [html_attr()], [html_node() | string()]} +%% @type html_attr() = {string(), string()} +%% @type html_token() = html_data() | start_tag() | end_tag() | inline_html() | html_comment() | html_doctype() +%% @type html_data() = {data, string(), Whitespace::boolean()} +%% @type start_tag() = {start_tag, Name, [html_attr()], Singleton::boolean()} +%% @type end_tag() = {end_tag, Name} +%% @type html_comment() = {comment, Comment} +%% @type html_doctype() = {doctype, [Doctype]} +%% @type inline_html() = {'=', iolist()} + +%% External API. + +%% @spec parse(string() | binary()) -> html_node() +%% @doc tokenize and then transform the token stream into a HTML tree. +parse(Input) -> + parse_tokens(tokens(Input)). + +%% @spec parse_tokens([html_token()]) -> html_node() +%% @doc Transform the output of tokens(Doc) into a HTML tree. +parse_tokens(Tokens) when is_list(Tokens) -> + %% Skip over doctype, processing instructions + [{start_tag, Tag, Attrs, false} | Rest] = find_document(Tokens, normal), + {Tree, _} = tree(Rest, [norm({Tag, Attrs})]), + Tree. + +find_document(Tokens=[{start_tag, _Tag, _Attrs, false} | _Rest], Mode) -> + maybe_add_html_tag(Tokens, Mode); +find_document([{doctype, [<<"html">>]} | Rest], _Mode) -> + find_document(Rest, html5); +find_document([_T | Rest], Mode) -> + find_document(Rest, Mode); +find_document([], _Mode) -> + []. + +maybe_add_html_tag(Tokens=[{start_tag, Tag, _Attrs, false} | _], html5) + when Tag =/= <<"html">> -> + [{start_tag, <<"html">>, [], false} | Tokens]; +maybe_add_html_tag(Tokens, _Mode) -> + Tokens. + +%% @spec tokens(StringOrBinary) -> [html_token()] +%% @doc Transform the input UTF-8 HTML into a token stream. +tokens(Input) -> + tokens(iolist_to_binary(Input), #decoder{}, []). + +%% @spec to_tokens(html_node()) -> [html_token()] +%% @doc Convert a html_node() tree to a list of tokens. +to_tokens({Tag0}) -> + to_tokens({Tag0, [], []}); +to_tokens(T={'=', _}) -> + [T]; +to_tokens(T={doctype, _}) -> + [T]; +to_tokens(T={comment, _}) -> + [T]; +to_tokens({Tag0, Acc}) -> + %% This is only allowed in sub-tags: {p, [{"class", "foo"}]} + to_tokens({Tag0, [], Acc}); +to_tokens({Tag0, Attrs, Acc}) -> + Tag = to_tag(Tag0), + case is_singleton(Tag) of + true -> + to_tokens([], [{start_tag, Tag, Attrs, true}]); + false -> + to_tokens([{Tag, Acc}], [{start_tag, Tag, Attrs, false}]) + end. + +%% @spec to_html([html_token()] | html_node()) -> iolist() +%% @doc Convert a list of html_token() to a HTML document. +to_html(Node) when is_tuple(Node) -> + to_html(to_tokens(Node)); +to_html(Tokens) when is_list(Tokens) -> + to_html(Tokens, []). + +%% @spec escape(string() | atom() | binary()) -> binary() +%% @doc Escape a string such that it's safe for HTML (amp; lt; gt;). +escape(B) when is_binary(B) -> + escape(binary_to_list(B), []); +escape(A) when is_atom(A) -> + escape(atom_to_list(A), []); +escape(S) when is_list(S) -> + escape(S, []). + +%% @spec escape_attr(string() | binary() | atom() | integer() | float()) -> binary() +%% @doc Escape a string such that it's safe for HTML attrs +%% (amp; lt; gt; quot;). +escape_attr(B) when is_binary(B) -> + escape_attr(binary_to_list(B), []); +escape_attr(A) when is_atom(A) -> + escape_attr(atom_to_list(A), []); +escape_attr(S) when is_list(S) -> + escape_attr(S, []); +escape_attr(I) when is_integer(I) -> + escape_attr(integer_to_list(I), []); +%% CHANGED: Previously it used mochinum:digits/1 but float_to_list/1 is +%% good enough generally. In case it isn't, a similar alternative would +%% be lists:flatten(io_lib:format("~p", [F])). +escape_attr(F) when is_float(F) -> + escape_attr(erlang:float_to_list(F), []). + +to_html([], Acc) -> + lists:reverse(Acc); +to_html([{'=', Content} | Rest], Acc) -> + to_html(Rest, [Content | Acc]); +to_html([{pi, Tag, Attrs} | Rest], Acc) -> + Open = [<<"<?">>, + Tag, + attrs_to_html(Attrs, []), + <<"?>">>], + to_html(Rest, [Open | Acc]); +to_html([{comment, Comment} | Rest], Acc) -> + to_html(Rest, [[<<"<!--">>, Comment, <<"-->">>] | Acc]); +to_html([{doctype, Parts} | Rest], Acc) -> + Inside = doctype_to_html(Parts, Acc), + to_html(Rest, [[<<"<!DOCTYPE">>, Inside, <<">">>] | Acc]); +to_html([{data, Data, _Whitespace} | Rest], Acc) -> + to_html(Rest, [escape(Data) | Acc]); +to_html([{start_tag, Tag, Attrs, Singleton} | Rest], Acc) -> + Open = [<<"<">>, + Tag, + attrs_to_html(Attrs, []), + case Singleton of + true -> <<" />">>; + false -> <<">">> + end], + to_html(Rest, [Open | Acc]); +to_html([{end_tag, Tag} | Rest], Acc) -> + to_html(Rest, [[<<"</">>, Tag, <<">">>] | Acc]). + +doctype_to_html([], Acc) -> + lists:reverse(Acc); +doctype_to_html([Word | Rest], Acc) -> + case lists:all(fun (C) -> ?IS_LITERAL_SAFE(C) end, + binary_to_list(iolist_to_binary(Word))) of + true -> + doctype_to_html(Rest, [[<<" ">>, Word] | Acc]); + false -> + doctype_to_html(Rest, [[<<" \"">>, escape_attr(Word), ?QUOTE] | Acc]) + end. + +attrs_to_html([], Acc) -> + lists:reverse(Acc); +attrs_to_html([{K, V} | Rest], Acc) -> + attrs_to_html(Rest, + [[<<" ">>, escape(K), <<"=\"">>, + escape_attr(V), <<"\"">>] | Acc]). + +escape([], Acc) -> + list_to_binary(lists:reverse(Acc)); +escape("<" ++ Rest, Acc) -> + escape(Rest, lists:reverse("<", Acc)); +escape(">" ++ Rest, Acc) -> + escape(Rest, lists:reverse(">", Acc)); +escape("&" ++ Rest, Acc) -> + escape(Rest, lists:reverse("&", Acc)); +escape([C | Rest], Acc) -> + escape(Rest, [C | Acc]). + +escape_attr([], Acc) -> + list_to_binary(lists:reverse(Acc)); +escape_attr("<" ++ Rest, Acc) -> + escape_attr(Rest, lists:reverse("<", Acc)); +escape_attr(">" ++ Rest, Acc) -> + escape_attr(Rest, lists:reverse(">", Acc)); +escape_attr("&" ++ Rest, Acc) -> + escape_attr(Rest, lists:reverse("&", Acc)); +escape_attr([?QUOTE | Rest], Acc) -> + escape_attr(Rest, lists:reverse(""", Acc)); +escape_attr([C | Rest], Acc) -> + escape_attr(Rest, [C | Acc]). + +to_tag(A) when is_atom(A) -> + norm(atom_to_list(A)); +to_tag(L) -> + norm(L). + +to_tokens([], Acc) -> + lists:reverse(Acc); +to_tokens([{Tag, []} | Rest], Acc) -> + to_tokens(Rest, [{end_tag, to_tag(Tag)} | Acc]); +to_tokens([{Tag0, [{T0} | R1]} | Rest], Acc) -> + %% Allow {br} + to_tokens([{Tag0, [{T0, [], []} | R1]} | Rest], Acc); +to_tokens([{Tag0, [T0={'=', _C0} | R1]} | Rest], Acc) -> + %% Allow {'=', iolist()} + to_tokens([{Tag0, R1} | Rest], [T0 | Acc]); +to_tokens([{Tag0, [T0={comment, _C0} | R1]} | Rest], Acc) -> + %% Allow {comment, iolist()} + to_tokens([{Tag0, R1} | Rest], [T0 | Acc]); +to_tokens([{Tag0, [T0={pi, _S0, _A0} | R1]} | Rest], Acc) -> + %% Allow {pi, binary(), list()} + to_tokens([{Tag0, R1} | Rest], [T0 | Acc]); +to_tokens([{Tag0, [{T0, A0=[{_, _} | _]} | R1]} | Rest], Acc) -> + %% Allow {p, [{"class", "foo"}]} + to_tokens([{Tag0, [{T0, A0, []} | R1]} | Rest], Acc); +to_tokens([{Tag0, [{T0, C0} | R1]} | Rest], Acc) -> + %% Allow {p, "content"} and {p, <<"content">>} + to_tokens([{Tag0, [{T0, [], C0} | R1]} | Rest], Acc); +to_tokens([{Tag0, [{T0, A1, C0} | R1]} | Rest], Acc) when is_binary(C0) -> + %% Allow {"p", [{"class", "foo"}], <<"content">>} + to_tokens([{Tag0, [{T0, A1, binary_to_list(C0)} | R1]} | Rest], Acc); +to_tokens([{Tag0, [{T0, A1, C0=[C | _]} | R1]} | Rest], Acc) + when is_integer(C) -> + %% Allow {"p", [{"class", "foo"}], "content"} + to_tokens([{Tag0, [{T0, A1, [C0]} | R1]} | Rest], Acc); +to_tokens([{Tag0, [{T0, A1, C1} | R1]} | Rest], Acc) -> + %% Native {"p", [{"class", "foo"}], ["content"]} + Tag = to_tag(Tag0), + T1 = to_tag(T0), + case is_singleton(norm(T1)) of + true -> + to_tokens([{Tag, R1} | Rest], [{start_tag, T1, A1, true} | Acc]); + false -> + to_tokens([{T1, C1}, {Tag, R1} | Rest], + [{start_tag, T1, A1, false} | Acc]) + end; +to_tokens([{Tag0, [L | R1]} | Rest], Acc) when is_list(L) -> + %% List text + Tag = to_tag(Tag0), + to_tokens([{Tag, R1} | Rest], [{data, iolist_to_binary(L), false} | Acc]); +to_tokens([{Tag0, [B | R1]} | Rest], Acc) when is_binary(B) -> + %% Binary text + Tag = to_tag(Tag0), + to_tokens([{Tag, R1} | Rest], [{data, B, false} | Acc]). + +tokens(B, S=#decoder{offset=O}, Acc) -> + case B of + <<_:O/binary>> -> + lists:reverse(Acc); + _ -> + {Tag, S1} = tokenize(B, S), + case parse_flag(Tag) of + script -> + {Tag2, S2} = tokenize_script(B, S1), + tokens(B, S2, [Tag2, Tag | Acc]); + style -> + {Tag2, S2} = tokenize_style(B, S1), + tokens(B, S2, [Tag2, Tag | Acc]); + title -> + {Tag2, S2} = tokenize_title(B, S1), + tokens(B, S2, [Tag2, Tag | Acc]); + textarea -> + {Tag2, S2} = tokenize_textarea(B, S1), + tokens(B, S2, [Tag2, Tag | Acc]); + none -> + tokens(B, S1, [Tag | Acc]) + end + end. + +parse_flag({start_tag, B, _, false}) -> + case string:to_lower(binary_to_list(B)) of + "script" -> + script; + "style" -> + style; + "title" -> + title; + "textarea" -> + textarea; + _ -> + none + end; +parse_flag(_) -> + none. + +tokenize(B, S=#decoder{offset=O}) -> + case B of + <<_:O/binary, "<!--", _/binary>> -> + tokenize_comment(B, ?ADV_COL(S, 4)); + <<_:O/binary, "<!doctype", _/binary>> -> + tokenize_doctype(B, ?ADV_COL(S, 10)); + <<_:O/binary, "<!DOCTYPE", _/binary>> -> + tokenize_doctype(B, ?ADV_COL(S, 10)); + <<_:O/binary, "<![CDATA[", _/binary>> -> + tokenize_cdata(B, ?ADV_COL(S, 9)); + <<_:O/binary, "<?php", _/binary>> -> + {Body, S1} = raw_qgt(B, ?ADV_COL(S, 2)), + {{pi, Body, []}, S1}; + <<_:O/binary, "<?", _/binary>> -> + {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)), + {Attrs, S2} = tokenize_attributes(B, S1), + S3 = find_qgt(B, S2), + {{pi, Tag, Attrs}, S3}; + <<_:O/binary, "&", _/binary>> -> + tokenize_charref(B, ?INC_COL(S)); + <<_:O/binary, "</", _/binary>> -> + {Tag, S1} = tokenize_literal(B, ?ADV_COL(S, 2)), + {S2, _} = find_gt(B, S1), + {{end_tag, Tag}, S2}; + <<_:O/binary, "<", C, _/binary>> + when ?IS_WHITESPACE(C); not ?IS_LETTER(C) -> + %% This isn't really strict HTML + {{data, Data, _Whitespace}, S1} = tokenize_data(B, ?INC_COL(S)), + {{data, <<$<, Data/binary>>, false}, S1}; + <<_:O/binary, "<", _/binary>> -> + {Tag, S1} = tokenize_literal(B, ?INC_COL(S)), + {Attrs, S2} = tokenize_attributes(B, S1), + {S3, HasSlash} = find_gt(B, S2), + Singleton = HasSlash orelse is_singleton(Tag), + {{start_tag, Tag, Attrs, Singleton}, S3}; + _ -> + tokenize_data(B, S) + end. + +tree_data([{data, Data, Whitespace} | Rest], AllWhitespace, Acc) -> + tree_data(Rest, (Whitespace andalso AllWhitespace), [Data | Acc]); +tree_data(Rest, AllWhitespace, Acc) -> + {iolist_to_binary(lists:reverse(Acc)), AllWhitespace, Rest}. + +tree([], Stack) -> + {destack(Stack), []}; +tree([{end_tag, Tag} | Rest], Stack) -> + case destack(norm(Tag), Stack) of + S when is_list(S) -> + tree(Rest, S); + Result -> + {Result, []} + end; +tree([{start_tag, Tag, Attrs, true} | Rest], S) -> + tree(Rest, append_stack_child(norm({Tag, Attrs}), S)); +tree([{start_tag, Tag, Attrs, false} | Rest], S) -> + tree(Rest, stack(norm({Tag, Attrs}), S)); +tree([T={pi, _Tag, _Attrs} | Rest], S) -> + tree(Rest, append_stack_child(T, S)); +tree([T={comment, _Comment} | Rest], S) -> + tree(Rest, append_stack_child(T, S)); +tree(L=[{data, _Data, _Whitespace} | _], S) -> + case tree_data(L, true, []) of + {_, true, Rest} -> + tree(Rest, S); + {Data, false, Rest} -> + tree(Rest, append_stack_child(Data, S)) + end; +tree([{doctype, _} | Rest], Stack) -> + tree(Rest, Stack). + +norm({Tag, Attrs}) -> + {norm(Tag), [{norm(K), iolist_to_binary(V)} || {K, V} <- Attrs], []}; +norm(Tag) when is_binary(Tag) -> + Tag; +norm(Tag) -> + list_to_binary(string:to_lower(Tag)). + +stack(T1={TN, _, _}, Stack=[{TN, _, _} | _Rest]) + when TN =:= <<"li">> orelse TN =:= <<"option">> -> + [T1 | destack(TN, Stack)]; +stack(T1={TN0, _, _}, Stack=[{TN1, _, _} | _Rest]) + when (TN0 =:= <<"dd">> orelse TN0 =:= <<"dt">>) andalso + (TN1 =:= <<"dd">> orelse TN1 =:= <<"dt">>) -> + [T1 | destack(TN1, Stack)]; +stack(T1, Stack) -> + [T1 | Stack]. + +append_stack_child(StartTag, [{Name, Attrs, Acc} | Stack]) -> + [{Name, Attrs, [StartTag | Acc]} | Stack]. + +destack(<<"br">>, Stack) -> + %% This is an ugly hack to make dumb_br_test() pass, + %% this makes it such that br can never have children. + Stack; +destack(TagName, Stack) when is_list(Stack) -> + F = fun (X) -> + case X of + {TagName, _, _} -> + false; + _ -> + true + end + end, + case lists:splitwith(F, Stack) of + {_, []} -> + %% If we're parsing something like XML we might find + %% a <link>tag</link> that is normally a singleton + %% in HTML but isn't here + case {is_singleton(TagName), Stack} of + {true, [{T0, A0, Acc0} | Post0]} -> + case lists:splitwith(F, Acc0) of + {_, []} -> + %% Actually was a singleton + Stack; + {Pre, [{T1, A1, Acc1} | Post1]} -> + [{T0, A0, [{T1, A1, Acc1 ++ lists:reverse(Pre)} | Post1]} + | Post0] + end; + _ -> + %% No match, no state change + Stack + end; + {_Pre, [_T]} -> + %% Unfurl the whole stack, we're done + destack(Stack); + {Pre, [T, {T0, A0, Acc0} | Post]} -> + %% Unfurl up to the tag, then accumulate it + [{T0, A0, [destack(Pre ++ [T]) | Acc0]} | Post] + end. + +destack([{Tag, Attrs, Acc}]) -> + {Tag, Attrs, lists:reverse(Acc)}; +destack([{T1, A1, Acc1}, {T0, A0, Acc0} | Rest]) -> + destack([{T0, A0, [{T1, A1, lists:reverse(Acc1)} | Acc0]} | Rest]). + +is_singleton(<<"area">>) -> true; +is_singleton(<<"base">>) -> true; +is_singleton(<<"br">>) -> true; +is_singleton(<<"col">>) -> true; +is_singleton(<<"embed">>) -> true; +is_singleton(<<"hr">>) -> true; +is_singleton(<<"img">>) -> true; +is_singleton(<<"input">>) -> true; +is_singleton(<<"keygen">>) -> true; +is_singleton(<<"link">>) -> true; +is_singleton(<<"meta">>) -> true; +is_singleton(<<"param">>) -> true; +is_singleton(<<"source">>) -> true; +is_singleton(<<"track">>) -> true; +is_singleton(<<"wbr">>) -> true; +is_singleton(_) -> false. + +tokenize_data(B, S=#decoder{offset=O}) -> + tokenize_data(B, S, O, true). + +tokenize_data(B, S=#decoder{offset=O}, Start, Whitespace) -> + case B of + <<_:O/binary, C, _/binary>> when (C =/= $< andalso C =/= $&) -> + tokenize_data(B, ?INC_CHAR(S, C), Start, + (Whitespace andalso ?IS_WHITESPACE(C))); + _ -> + Len = O - Start, + <<_:Start/binary, Data:Len/binary, _/binary>> = B, + {{data, Data, Whitespace}, S} + end. + +tokenize_attributes(B, S) -> + tokenize_attributes(B, S, []). + +tokenize_attributes(B, S=#decoder{offset=O}, Acc) -> + case B of + <<_:O/binary>> -> + {lists:reverse(Acc), S}; + <<_:O/binary, C, _/binary>> when (C =:= $> orelse C =:= $/) -> + {lists:reverse(Acc), S}; + <<_:O/binary, "?>", _/binary>> -> + {lists:reverse(Acc), S}; + <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) -> + tokenize_attributes(B, ?INC_CHAR(S, C), Acc); + _ -> + {Attr, S1} = tokenize_literal(B, S), + {Value, S2} = tokenize_attr_value(Attr, B, S1), + tokenize_attributes(B, S2, [{Attr, Value} | Acc]) + end. + +tokenize_attr_value(Attr, B, S) -> + S1 = skip_whitespace(B, S), + O = S1#decoder.offset, + case B of + <<_:O/binary, "=", _/binary>> -> + S2 = skip_whitespace(B, ?INC_COL(S1)), + tokenize_quoted_or_unquoted_attr_value(B, S2); + _ -> + {Attr, S1} + end. + +tokenize_quoted_or_unquoted_attr_value(B, S=#decoder{offset=O}) -> + case B of + <<_:O/binary>> -> + { [], S }; + <<_:O/binary, Q, _/binary>> when Q =:= ?QUOTE orelse + Q =:= ?SQUOTE -> + tokenize_quoted_attr_value(B, ?INC_COL(S), [], Q); + <<_:O/binary, _/binary>> -> + tokenize_unquoted_attr_value(B, S, []) + end. + +tokenize_quoted_attr_value(B, S=#decoder{offset=O}, Acc, Q) -> + case B of + <<_:O/binary>> -> + { iolist_to_binary(lists:reverse(Acc)), S }; + <<_:O/binary, $&, _/binary>> -> + {{data, Data, false}, S1} = tokenize_charref(B, ?INC_COL(S)), + tokenize_quoted_attr_value(B, S1, [Data|Acc], Q); + <<_:O/binary, Q, _/binary>> -> + { iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S) }; + <<_:O/binary, C, _/binary>> -> + tokenize_quoted_attr_value(B, ?INC_COL(S), [C|Acc], Q) + end. + +tokenize_unquoted_attr_value(B, S=#decoder{offset=O}, Acc) -> + case B of + <<_:O/binary>> -> + { iolist_to_binary(lists:reverse(Acc)), S }; + <<_:O/binary, $&, _/binary>> -> + {{data, Data, false}, S1} = tokenize_charref(B, ?INC_COL(S)), + tokenize_unquoted_attr_value(B, S1, [Data|Acc]); + <<_:O/binary, $/, $>, _/binary>> -> + { iolist_to_binary(lists:reverse(Acc)), S }; + <<_:O/binary, C, _/binary>> when ?PROBABLE_CLOSE(C) -> + { iolist_to_binary(lists:reverse(Acc)), S }; + <<_:O/binary, C, _/binary>> -> + tokenize_unquoted_attr_value(B, ?INC_COL(S), [C|Acc]) + end. + +skip_whitespace(B, S=#decoder{offset=O}) -> + case B of + <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) -> + skip_whitespace(B, ?INC_CHAR(S, C)); + _ -> + S + end. + +tokenize_literal(Bin, S=#decoder{offset=O}) -> + case Bin of + <<_:O/binary, C, _/binary>> when C =:= $> + orelse C =:= $/ + orelse C =:= $= -> + %% Handle case where tokenize_literal would consume + %% 0 chars. http://github.com/mochi/mochiweb/pull/13 + {[C], ?INC_COL(S)}; + _ -> + tokenize_literal(Bin, S, []) + end. + +tokenize_literal(Bin, S=#decoder{offset=O}, Acc) -> + case Bin of + <<_:O/binary, $&, _/binary>> -> + {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)), + tokenize_literal(Bin, S1, [Data | Acc]); + <<_:O/binary, C, _/binary>> when not (?IS_WHITESPACE(C) + orelse C =:= $> + orelse C =:= $/ + orelse C =:= $=) -> + tokenize_literal(Bin, ?INC_COL(S), [C | Acc]); + _ -> + {iolist_to_binary(string:to_lower(lists:reverse(Acc))), S} + end. + +raw_qgt(Bin, S=#decoder{offset=O}) -> + raw_qgt(Bin, S, O). + +raw_qgt(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + <<_:O/binary, "?>", _/binary>> -> + Len = O - Start, + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + {Raw, ?ADV_COL(S, 2)}; + <<_:O/binary, C, _/binary>> -> + raw_qgt(Bin, ?INC_CHAR(S, C), Start); + <<_:O/binary>> -> + <<_:Start/binary, Raw/binary>> = Bin, + {Raw, S} + end. + +find_qgt(Bin, S=#decoder{offset=O}) -> + case Bin of + <<_:O/binary, "?>", _/binary>> -> + ?ADV_COL(S, 2); + <<_:O/binary, ">", _/binary>> -> + ?ADV_COL(S, 1); + <<_:O/binary, "/>", _/binary>> -> + ?ADV_COL(S, 2); + <<_:O/binary, C, _/binary>> -> + find_qgt(Bin, ?INC_CHAR(S, C)); + <<_:O/binary>> -> + S + end. + +find_gt(Bin, S) -> + find_gt(Bin, S, false). + +find_gt(Bin, S=#decoder{offset=O}, HasSlash) -> + case Bin of + <<_:O/binary, $/, _/binary>> -> + find_gt(Bin, ?INC_COL(S), true); + <<_:O/binary, $>, _/binary>> -> + {?INC_COL(S), HasSlash}; + <<_:O/binary, C, _/binary>> -> + find_gt(Bin, ?INC_CHAR(S, C), HasSlash); + _ -> + {S, HasSlash} + end. + +tokenize_charref(Bin, S=#decoder{offset=O}) -> + try + case tokenize_charref_raw(Bin, S, O) of + {C1, S1} when C1 >= 16#D800 andalso C1 =< 16#DFFF -> + %% Surrogate pair + tokeninize_charref_surrogate_pair(Bin, S1, C1); + {Unichar, S1} when is_integer(Unichar) -> + %% CHANGED: Previously this was mochiutf8:codepoint_to_bytes(Unichar) + %% but that is equivalent to the below. + {{data, <<Unichar/utf8>>, false}, + S1} + end + catch + throw:invalid_charref -> + {{data, <<"&">>, false}, S} + end. + +tokeninize_charref_surrogate_pair(Bin, S=#decoder{offset=O}, C1) -> + case Bin of + <<_:O/binary, $&, _/binary>> -> + case tokenize_charref_raw(Bin, ?INC_COL(S), O + 1) of + {C2, S1} when C2 >= 16#D800 andalso C1 =< 16#DFFF -> + {{data, + unicode:characters_to_binary( + <<C1:16, C2:16>>, + utf16, + utf8), + false}, + S1}; + _ -> + throw(invalid_charref) + end; + _ -> + throw(invalid_charref) + end. + +tokenize_charref_raw(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + <<_:O/binary>> -> + throw(invalid_charref); + <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) + orelse C =:= ?SQUOTE + orelse C =:= ?QUOTE + orelse C =:= $/ + orelse C =:= $> -> + throw(invalid_charref); + <<_:O/binary, $;, _/binary>> -> + Len = O - Start, + %% CHANGED: Previously this was mochiweb_charref:charref/1 + %% but the functionality below is equivalent; + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + + case 'Elixir.HtmlEntities':decode(<<$&, Raw/binary, $;>>) of + <<CP/utf8>> -> + {CP, ?INC_COL(S)}; + _ -> + throw(invalid_charref) + end; + _ -> + tokenize_charref_raw(Bin, ?INC_COL(S), Start) + end. + +tokenize_doctype(Bin, S) -> + tokenize_doctype(Bin, S, []). + +tokenize_doctype(Bin, S=#decoder{offset=O}, Acc) -> + case Bin of + <<_:O/binary>> -> + {{doctype, lists:reverse(Acc)}, S}; + <<_:O/binary, $>, _/binary>> -> + {{doctype, lists:reverse(Acc)}, ?INC_COL(S)}; + <<_:O/binary, C, _/binary>> when ?IS_WHITESPACE(C) -> + tokenize_doctype(Bin, ?INC_CHAR(S, C), Acc); + _ -> + {Word, S1} = tokenize_word_or_literal(Bin, S), + tokenize_doctype(Bin, S1, [Word | Acc]) + end. + +tokenize_word_or_literal(Bin, S=#decoder{offset=O}) -> + case Bin of + <<_:O/binary, C, _/binary>> when C =:= ?QUOTE orelse C =:= ?SQUOTE -> + tokenize_word(Bin, ?INC_COL(S), C); + <<_:O/binary, C, _/binary>> when not ?IS_WHITESPACE(C) -> + %% Sanity check for whitespace + tokenize_literal(Bin, S) + end. + +tokenize_word(Bin, S, Quote) -> + tokenize_word(Bin, S, Quote, []). + +tokenize_word(Bin, S=#decoder{offset=O}, Quote, Acc) -> + case Bin of + <<_:O/binary>> -> + {iolist_to_binary(lists:reverse(Acc)), S}; + <<_:O/binary, Quote, _/binary>> -> + {iolist_to_binary(lists:reverse(Acc)), ?INC_COL(S)}; + <<_:O/binary, $&, _/binary>> -> + {{data, Data, false}, S1} = tokenize_charref(Bin, ?INC_COL(S)), + tokenize_word(Bin, S1, Quote, [Data | Acc]); + <<_:O/binary, C, _/binary>> -> + tokenize_word(Bin, ?INC_CHAR(S, C), Quote, [C | Acc]) + end. + +tokenize_cdata(Bin, S=#decoder{offset=O}) -> + tokenize_cdata(Bin, S, O). + +tokenize_cdata(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + <<_:O/binary, "]]>", _/binary>> -> + Len = O - Start, + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + {{data, Raw, false}, ?ADV_COL(S, 3)}; + <<_:O/binary, C, _/binary>> -> + tokenize_cdata(Bin, ?INC_CHAR(S, C), Start); + _ -> + <<_:O/binary, Raw/binary>> = Bin, + {{data, Raw, false}, S} + end. + +tokenize_comment(Bin, S=#decoder{offset=O}) -> + tokenize_comment(Bin, S, O). + +tokenize_comment(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + <<_:O/binary, "-->", _/binary>> -> + Len = O - Start, + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + {{comment, Raw}, ?ADV_COL(S, 3)}; + <<_:O/binary, C, _/binary>> -> + tokenize_comment(Bin, ?INC_CHAR(S, C), Start); + <<_:Start/binary, Raw/binary>> -> + {{comment, Raw}, S} + end. + +tokenize_script(Bin, S=#decoder{offset=O}) -> + tokenize_script(Bin, S, O). + +tokenize_script(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + %% Just a look-ahead, we want the end_tag separately + <<_:O/binary, $<, $/, SS, CC, RR, II, PP, TT, ZZ, _/binary>> + when (SS =:= $s orelse SS =:= $S) andalso + (CC =:= $c orelse CC =:= $C) andalso + (RR =:= $r orelse RR =:= $R) andalso + (II =:= $i orelse II =:= $I) andalso + (PP =:= $p orelse PP =:= $P) andalso + (TT=:= $t orelse TT =:= $T) andalso + ?PROBABLE_CLOSE(ZZ) -> + Len = O - Start, + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + {{data, Raw, false}, S}; + <<_:O/binary, C, _/binary>> -> + tokenize_script(Bin, ?INC_CHAR(S, C), Start); + <<_:Start/binary, Raw/binary>> -> + {{data, Raw, false}, S} + end. + +tokenize_style(Bin, S=#decoder{offset=O}) -> + tokenize_style(Bin, S, O). + +tokenize_style(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + %% Just a look-ahead, we want the end_tag separately + <<_:O/binary, $<, $/, SS, TT, YY, LL, EE, ZZ, _/binary>> + when (SS =:= $s orelse SS =:= $S) andalso + (TT=:= $t orelse TT =:= $T) andalso + (YY=:= $y orelse YY =:= $Y) andalso + (LL=:= $l orelse LL =:= $L) andalso + (EE=:= $e orelse EE =:= $E) andalso + ?PROBABLE_CLOSE(ZZ) -> + Len = O - Start, + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + {{data, Raw, false}, S}; + <<_:O/binary, C, _/binary>> -> + tokenize_style(Bin, ?INC_CHAR(S, C), Start); + <<_:Start/binary, Raw/binary>> -> + {{data, Raw, false}, S} + end. + +tokenize_title(Bin, S=#decoder{offset=O}) -> + tokenize_title(Bin, S, O). + +tokenize_title(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + %% Just a look-ahead, we want the end_tag separately + <<_:O/binary, $<, $/, TT, II, TT2, LL, EE, ZZ, _/binary>> + when (TT=:= $t orelse TT =:= $T) andalso + (II=:= $i orelse II =:= $I) andalso + (TT2=:= $t orelse TT2 =:= $T) andalso + (LL=:= $l orelse LL =:= $L) andalso + (EE=:= $e orelse EE =:= $E) andalso + ?PROBABLE_CLOSE(ZZ) -> + Len = O - Start, + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + {{data, Raw, false}, S}; + <<_:O/binary, C, _/binary>> -> + tokenize_title(Bin, ?INC_CHAR(S, C), Start); + <<_:Start/binary, Raw/binary>> -> + {{data, Raw, false}, S} + end. + +tokenize_textarea(Bin, S=#decoder{offset=O}) -> + tokenize_textarea(Bin, S, O). + +tokenize_textarea(Bin, S=#decoder{offset=O}, Start) -> + case Bin of + %% Just a look-ahead, we want the end_tag separately + <<_:O/binary, $<, $/, TT, EE, XX, TT2, AA, RR, EE2, AA2, ZZ, _/binary>> + when (TT =:= $t orelse TT =:= $T) andalso + (EE =:= $e orelse EE =:= $E) andalso + (XX =:= $x orelse XX =:= $X) andalso + (TT2 =:= $t orelse TT2 =:= $T) andalso + (AA =:= $a orelse AA =:= $A) andalso + (RR =:= $r orelse RR =:= $R) andalso + (EE2 =:= $e orelse EE2 =:= $E) andalso + (AA2 =:= $a orelse AA2 =:= $A) andalso + ?PROBABLE_CLOSE(ZZ) -> + Len = O - Start, + <<_:Start/binary, Raw:Len/binary, _/binary>> = Bin, + {{data, Raw, false}, S}; + <<_:O/binary, C, _/binary>> -> + tokenize_textarea(Bin, ?INC_CHAR(S, C), Start); + <<_:Start/binary, Raw/binary>> -> + {{data, Raw, false}, S} + end. diff --git a/deps/floki/src/floki_selector_lexer.xrl b/deps/floki/src/floki_selector_lexer.xrl new file mode 100644 index 0000000..62534ea --- /dev/null +++ b/deps/floki/src/floki_selector_lexer.xrl @@ -0,0 +1,52 @@ +Definitions. + +IDENTIFIER = [-A-Za-z0-9_]+(\\\.[-A-Za-z0-9_]+)* +QUOTED = (\"[^"]*\"|\'[^']*\') +PARENTESIS = \([^)]*\) +INT = [0-9]+ +NOT = (n|N)(o|O)(t|T) +ODD = (o|O)(d|D)(d|D) +EVEN = (e|E)(v|V)(e|E)(n|N) +PSEUDO_PATT = (\+|-)?({INT})?(n|N)((\+|-){INT})? +SYMBOL = [\[\]*] +ATTRIBUTE_IDENTIFIER = \s[is]\] +W = [\s\t\r\n\f] + +Rules. + +{IDENTIFIER} : {token, {identifier, TokenLine, TokenChars}}. +{QUOTED} : {token, {quoted, TokenLine, remove_wrapper(TokenChars)}}. +{ATTRIBUTE_IDENTIFIER} : {token, {attribute_identifier, TokenLine, TokenChars}}. +{SYMBOL} : {token, {TokenChars, TokenLine}}. +#{IDENTIFIER} : {token, {hash, TokenLine, tail(TokenChars)}}. +\.{IDENTIFIER} : {token, {class, TokenLine, tail(TokenChars)}}. +\:{NOT}\( : {token, {pseudo_not, TokenLine}}. +\:{IDENTIFIER} : {token, {pseudo, TokenLine, tail(TokenChars)}}. +\({INT}\) : {token, {pseudo_class_int, TokenLine, list_to_integer(remove_wrapper(TokenChars))}}. +\({ODD}\) : {token, {pseudo_class_odd, TokenLine}}. +\({EVEN}\) : {token, {pseudo_class_even, TokenLine}}. +\({PSEUDO_PATT}\) : {token, {pseudo_class_pattern, TokenLine, remove_wrapper(TokenChars)}}. +\({QUOTED}\) : {token, {pseudo_class_quoted, TokenLine, remove_wrapper(remove_wrapper(TokenChars))}}. +{W}*\) : {token, {close_parentesis, TokenLine}}. +~= : {token, {includes, TokenLine}}. +\|= : {token, {dash_match, TokenLine}}. +\^= : {token, {prefix_match, TokenLine}}. +\$= : {token, {suffix_match, TokenLine}}. +\*= : {token, {substring_match, TokenLine}}. += : {token, {equal, TokenLine}}. +{W}*,{W}* : {token, {comma, TokenLine}}. +{W}*>{W}* : {token, {greater, TokenLine}}. +{W}*\+{W}* : {token, {plus, TokenLine}}. +{W}*~{W}* : {token, {tilde, TokenLine}}. +{W}*\|{W}* : {token, {namespace_pipe, TokenLine}}. +{W}+ : {token, {space, TokenLine}}. +. : {token, {unknown, TokenLine, TokenChars}}. + +Erlang code. + +remove_wrapper(Chars) -> + Len = string:len(Chars), + string:substr(Chars, 2, Len - 2). + +tail([_|T]) -> + T. diff --git a/deps/html_entities/.fetch b/deps/html_entities/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/html_entities/.hex b/deps/html_entities/.hex new file mode 100644 index 0000000000000000000000000000000000000000..e3e918044f73b9b6bb6aecd0daaf9db04e4dbcfa GIT binary patch literal 277 zcmZ9HIZ^{L5JiP;mIRLC(lV_j<0h__x`hTTjcZ1xoO5wxLedxf_x?}Wl|6}l;T!Hy zbRonOiMP2mUQelT`r59w4*DK=a_l4oPw0p>hMX;C@4%Z7G$@LeVsy-rb(G3v9U~16 zK0q>w#7o77AG~hovG*{fb-sO)I5ez$SG<;9&nIqu?lW(rV!8JJ6$wm46XYb#<iRSi z+Qa}xad0-+ES+VV6tE{R!N*aBsJ-$kPnpuFIIMhG?z+eE`BF(de~+YjX@5)};i!KB D2zpHP literal 0 HcmV?d00001 diff --git a/deps/html_entities/LICENSE b/deps/html_entities/LICENSE new file mode 100644 index 0000000..107d6dd --- /dev/null +++ b/deps/html_entities/LICENSE @@ -0,0 +1,7 @@ +Copyright (c) 2015 Martin Svalin + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/html_entities/README.md b/deps/html_entities/README.md new file mode 100644 index 0000000..8f7929e --- /dev/null +++ b/deps/html_entities/README.md @@ -0,0 +1,53 @@ +# HtmlEntities + +[![Module Version](https://img.shields.io/hexpm/v/html_entities.svg)](https://hex.pm/packages/html_entities) +[![Hex Docs](https://img.shields.io/badge/hex-docs-lightgreen.svg)](https://hexdocs.pm/html_entities/) +[![Total Download](https://img.shields.io/hexpm/dt/html_entities.svg)](https://hex.pm/packages/html_entities) +[![License](https://img.shields.io/hexpm/l/html_entities.svg)](https://github.com/martinsvalin/html_entities/blob/master/LICENSE) +[![Last Updated](https://img.shields.io/github/last-commit/martinsvalin/html_entities.svg)](https://github.com/martinsvalin/html_entities/commits/master) + +Elixir module for decoding and encoding HTML entities in a string. + +Entity names, codepoints and their corresponding characters are copied from +[Wikipedia](https://en.wikipedia.org/wiki/List_of_XML_and_HTML_character_entity_references). + +## Installation + +Add the dependency to your `mix.exs` file, then run `mix deps.get`. + +```elixir +defp deps do + [ + {:html_entities, "~> 0.5"} + ] +end +``` + +## Usage + +Inside IEx: + +```elixir +iex> HtmlEntities.decode("Tom & Jerry") +"Tom & Jerry" +iex> HtmlEntities.decode("¡Ay, caramba!") +"ยกAy, caramba!" +iex> HtmlEntities.encode("<< KAPOW!! >>") +"<< KAPOW!! >>" +``` + +Inside a module: + +```elixir +defmodule EntityTest do + def non_breaking_space do + HtmlEntities.decode("¡") + end +end +``` + +## License + +Copyright (c) 2015 Martin Svalin + +This library is MIT licensed. See the [LICENSE](https://github.com/martinsvalin/html_entities/blob/master/LICENSE) for details. diff --git a/deps/html_entities/hex_metadata.config b/deps/html_entities/hex_metadata.config new file mode 100644 index 0000000..92c0127 --- /dev/null +++ b/deps/html_entities/hex_metadata.config @@ -0,0 +1,14 @@ +{<<"app">>,<<"html_entities">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"Decode and encode HTML entities in a string.">>}. +{<<"elixir">>,<<"~> 1.3">>}. +{<<"files">>, + [<<"lib">>,<<"lib/html_entities">>,<<"lib/html_entities/util.ex">>, + <<"lib/html_entities_list.txt">>,<<"lib/html_entities.ex">>,<<"mix.exs">>, + <<"README.md">>,<<"LICENSE">>]}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"links">>, + [{<<"GitHub">>,<<"https://github.com/martinsvalin/html_entities">>}]}. +{<<"name">>,<<"html_entities">>}. +{<<"requirements">>,[]}. +{<<"version">>,<<"0.5.2">>}. diff --git a/deps/html_entities/lib/html_entities.ex b/deps/html_entities/lib/html_entities.ex new file mode 100644 index 0000000..2b7f765 --- /dev/null +++ b/deps/html_entities/lib/html_entities.ex @@ -0,0 +1,89 @@ +defmodule HtmlEntities do + @moduledoc """ + Decode and encode HTML entities in a string. + + ## Examples + + Decoding: + + iex> "Tom & Jerry" |> HtmlEntities.decode + "Tom & Jerry" + iex> "¡Ay, caramba!" |> HtmlEntities.decode + "ยกAy, caramba!" + iex> "ő ő" |> HtmlEntities.decode + "ล‘ ล‘" + + Encoding: + + iex> "Tom & Jerry" |> HtmlEntities.encode + "Tom & Jerry" + iex> "<< KAPOW!! >>" |> HtmlEntities.encode + "<< KAPOW!! >>" + """ + + @external_resource "lib/html_entities_list.txt" + + @doc "Decode HTML entities in a string." + @spec decode(String.t()) :: String.t() + def decode(string) when is_binary(string) do + decode(string, "") + end + + defp decode(<<"&", rest::binary>>, acc) do + case decode_entity(rest) do + {character, rest} -> decode(rest, <<acc::binary, character::binary>>) + :error -> decode(rest, <<acc::binary, ?&>>) + end + end + + defp decode(<<head, rest::binary>>, acc) do + decode(rest, <<acc::binary, head>>) + end + + defp decode(<<>>, acc) do + acc + end + + defp decode_entity(<<"#x", c, rest::binary>>) when c in ?0..?9 or c in ?a..?f or c in ?A..?F do + case Integer.parse(<<c, rest::binary>>, 16) do + {number, ";" <> rest} -> {<<number::utf8>>, rest} + _ -> :error + end + rescue + ArgumentError -> :error + end + + defp decode_entity(<<"#", rest::binary>>) do + case Integer.parse(rest, 10) do + {number, ";" <> rest} -> {<<number::utf8>>, rest} + _ -> :error + end + rescue + ArgumentError -> :error + end + + codes = HtmlEntities.Util.load_entities(@external_resource) + + for {name, _character, codepoint} <- codes do + defp decode_entity(<<unquote(name), ?;, rest::binary>>) do + {<<unquote(codepoint)::utf8>>, rest} + end + end + + defp decode_entity(_), do: :error + + @doc "Encode HTML entities in a string." + @spec encode(String.t()) :: String.t() + def encode(string) when is_binary(string) do + for <<x <- string>>, into: "" do + case x do + ?' -> "'" + ?" -> """ + ?& -> "&" + ?< -> "<" + ?> -> ">" + _ -> <<x>> + end + end + end +end diff --git a/deps/html_entities/lib/html_entities/util.ex b/deps/html_entities/lib/html_entities/util.ex new file mode 100644 index 0000000..a05ac01 --- /dev/null +++ b/deps/html_entities/lib/html_entities/util.ex @@ -0,0 +1,23 @@ +defmodule HtmlEntities.Util do + @moduledoc """ + Utility functions for managing metadata. + + Putting this code here makes it testable, and allows the code + generation part of HtmlEntities to be as small as possible. + """ + + @type entity :: {String.t(), String.t(), integer()} + + @doc "Load HTML entities from an external file." + @spec load_entities(String.t()) :: [entity()] + def load_entities(filename) do + File.stream!(filename) |> Enum.map(&convert_line_to_entity/1) + end + + @doc "Converts a line of comma-separated lines to entity definitions." + @spec convert_line_to_entity([String.t()] | File.Stream.t()) :: entity() + def convert_line_to_entity(line) do + [name, character, codepoint] = line |> String.trim_trailing() |> String.split(",") + {name, character, String.to_integer(codepoint)} + end +end diff --git a/deps/html_entities/lib/html_entities_list.txt b/deps/html_entities/lib/html_entities_list.txt new file mode 100644 index 0000000..36faa3d --- /dev/null +++ b/deps/html_entities/lib/html_entities_list.txt @@ -0,0 +1,253 @@ +quot,",34 +amp,&,38 +apos,',39 +lt,<,60 +gt,>,62 +nbsp,ย ,160 +iexcl,ยก,161 +cent,ยข,162 +pound,ยฃ,163 +curren,ยค,164 +yen,ยฅ,165 +brvbar,ยฆ,166 +sect,ยง,167 +uml,ยจ,168 +copy,ยฉ,169 +ordf,ยช,170 +laquo,ยซ,171 +not,ยฌ,172 +shy, ,173 +reg,ยฎ,174 +macr,ยฏ,175 +deg,ยฐ,176 +plusmn,ยฑ,177 +sup2,ยฒ,178 +sup3,ยณ,179 +acute,ยด,180 +micro,ยต,181 +para,ยถ,182 +middot,ยท,183 +cedil,ยธ,184 +sup1,ยน,185 +ordm,ยบ,186 +raquo,ยป,187 +frac14,ยผ,188 +frac12,ยฝ,189 +frac34,ยพ,190 +iquest,ยฟ,191 +Agrave,ร€,192 +Aacute,ร,193 +Acirc,ร‚,194 +Atilde,รƒ,195 +Auml,ร„,196 +Aring,ร…,197 +AElig,ร†,198 +Ccedil,ร‡,199 +Egrave,รˆ,200 +Eacute,ร‰,201 +Ecirc,รŠ,202 +Euml,ร‹,203 +Igrave,รŒ,204 +Iacute,ร,205 +Icirc,รŽ,206 +Iuml,ร,207 +ETH,ร,208 +Ntilde,ร‘,209 +Ograve,ร’,210 +Oacute,ร“,211 +Ocirc,ร”,212 +Otilde,ร•,213 +Ouml,ร–,214 +times,ร—,215 +Oslash,ร˜,216 +Ugrave,ร™,217 +Uacute,รš,218 +Ucirc,ร›,219 +Uuml,รœ,220 +Yacute,ร,221 +THORN,รž,222 +szlig,รŸ,223 +agrave,ร ,224 +aacute,รก,225 +acirc,รข,226 +atilde,รฃ,227 +auml,รค,228 +aring,รฅ,229 +aelig,รฆ,230 +ccedil,รง,231 +egrave,รจ,232 +eacute,รฉ,233 +ecirc,รช,234 +euml,รซ,235 +igrave,รฌ,236 +iacute,รญ,237 +icirc,รฎ,238 +iuml,รฏ,239 +eth,รฐ,240 +ntilde,รฑ,241 +ograve,รฒ,242 +oacute,รณ,243 +ocirc,รด,244 +otilde,รต,245 +ouml,รถ,246 +divide,รท,247 +oslash,รธ,248 +ugrave,รน,249 +uacute,รบ,250 +ucirc,รป,251 +uuml,รผ,252 +yacute,รฝ,253 +thorn,รพ,254 +yuml,รฟ,255 +OElig,ล’,338 +oelig,ล“,339 +Scaron,ล ,352 +scaron,ลก,353 +Yuml,ลธ,376 +fnof,ฦ’,402 +circ,ห†,710 +tilde,หœ,732 +Alpha,ฮ‘,913 +Beta,ฮ’,914 +Gamma,ฮ“,915 +Delta,ฮ”,916 +Epsilon,ฮ•,917 +Zeta,ฮ–,918 +Eta,ฮ—,919 +Theta,ฮ˜,920 +Iota,ฮ™,921 +Kappa,ฮš,922 +Lambda,ฮ›,923 +Mu,ฮœ,924 +Nu,ฮ,925 +Xi,ฮž,926 +Omicron,ฮŸ,927 +Pi,ฮ ,928 +Rho,ฮก,929 +Sigma,ฮฃ,931 +Tau,ฮค,932 +Upsilon,ฮฅ,933 +Phi,ฮฆ,934 +Chi,ฮง,935 +Psi,ฮจ,936 +Omega,ฮฉ,937 +alpha,ฮฑ,945 +beta,ฮฒ,946 +gamma,ฮณ,947 +delta,ฮด,948 +epsilon,ฮต,949 +zeta,ฮถ,950 +eta,ฮท,951 +theta,ฮธ,952 +iota,ฮน,953 +kappa,ฮบ,954 +lambda,ฮป,955 +mu,ฮผ,956 +nu,ฮฝ,957 +xi,ฮพ,958 +omicron,ฮฟ,959 +pi,ฯ€,960 +rho,ฯ,961 +sigmaf,ฯ‚,962 +sigma,ฯƒ,963 +tau,ฯ„,964 +upsilon,ฯ…,965 +phi,ฯ†,966 +chi,ฯ‡,967 +psi,ฯˆ,968 +omega,ฯ‰,969 +thetasym,ฯ‘,977 +upsih,ฯ’,978 +piv,ฯ–,982 +ensp,โ€‚,8194 +emsp,โ€ƒ,8195 +thinsp,โ€‰,8201 +zwnj, ,8204 +zwj, ,8205 +lrm, ,8206 +rlm, ,8207 +ndash,โ€“,8211 +mdash,โ€”,8212 +lsquo,โ€˜,8216 +rsquo,โ€™,8217 +sbquo,โ€š,8218 +ldquo,โ€œ,8220 +rdquo,โ€,8221 +bdquo,โ€ž,8222 +dagger,โ€ ,8224 +Dagger,โ€ก,8225 +bull,โ€ข,8226 +hellip,โ€ฆ,8230 +permil,โ€ฐ,8240 +prime,โ€ฒ,8242 +Prime,โ€ณ,8243 +lsaquo,โ€น,8249 +rsaquo,โ€บ,8250 +oline,โ€พ,8254 +frasl,โ„,8260 +euro,โ‚ฌ,8364 +image,โ„‘,8465 +weierp,โ„˜,8472 +real,โ„œ,8476 +trade,โ„ข,8482 +alefsym,โ„ต,8501 +larr,โ†,8592 +uarr,โ†‘,8593 +rarr,โ†’,8594 +darr,โ†“,8595 +harr,โ†”,8596 +crarr,โ†ต,8629 +lArr,โ‡,8656 +uArr,โ‡‘,8657 +rArr,โ‡’,8658 +dArr,โ‡“,8659 +hArr,โ‡”,8660 +forall,โˆ€,8704 +part,โˆ‚,8706 +exist,โˆƒ,8707 +empty,โˆ…,8709 +nabla,โˆ‡,8711 +isin,โˆˆ,8712 +notin,โˆ‰,8713 +ni,โˆ‹,8715 +prod,โˆ,8719 +sum,โˆ‘,8721 +minus,โˆ’,8722 +lowast,โˆ—,8727 +radic,โˆš,8730 +prop,โˆ,8733 +infin,โˆž,8734 +ang,โˆ ,8736 +and,โˆง,8743 +or,โˆจ,8744 +cap,โˆฉ,8745 +cup,โˆช,8746 +int,โˆซ,8747 +there4,โˆด,8756 +sim,โˆผ,8764 +cong,โ‰…,8773 +asymp,โ‰ˆ,8776 +ne,โ‰ ,8800 +equiv,โ‰ก,8801 +le,โ‰ค,8804 +ge,โ‰ฅ,8805 +sub,โŠ‚,8834 +sup,โŠƒ,8835 +nsub,โŠ„,8836 +sube,โŠ†,8838 +supe,โŠ‡,8839 +oplus,โŠ•,8853 +otimes,โŠ—,8855 +perp,โŠฅ,8869 +sdot,โ‹…,8901 +lceil,โŒˆ,8968 +rceil,โŒ‰,8969 +lfloor,โŒŠ,8970 +rfloor,โŒ‹,8971 +lang,โŒฉ,9001 +rang,โŒช,9002 +loz,โ—Š,9674 +spades,โ™ ,9824 +clubs,โ™ฃ,9827 +hearts,โ™ฅ,9829 +diams,โ™ฆ,9830 diff --git a/deps/html_entities/mix.exs b/deps/html_entities/mix.exs new file mode 100644 index 0000000..1f62fd8 --- /dev/null +++ b/deps/html_entities/mix.exs @@ -0,0 +1,53 @@ +defmodule HtmlEntities.Mixfile do + use Mix.Project + + @source_url "https://github.com/martinsvalin/html_entities" + @version "0.5.2" + + def project do + [ + app: :html_entities, + version: @version, + name: "HtmlEntities", + elixir: "~> 1.3", + description: description(), + package: package(), + deps: deps(), + docs: docs() + ] + end + + defp description do + """ + Decode and encode HTML entities in a string. + """ + end + + defp package do + [ + maintainers: ["Martin Svalin", "Dรกvid Kovรกcs", "Johan Wรคrlander"], + files: ["lib", "mix.exs", "README*", "LICENSE*"], + licenses: ["MIT"], + links: %{"GitHub" => @source_url} + ] + end + + defp deps do + [ + {:ex_doc, ">= 0.0.0", only: :dev, runtime: false} + ] + end + + defp docs do + [ + extras: ["README.md"], + main: "readme", + source_url: @source_url, + source_ref: "v#{@version}" + ] + end + + def application do + [applications: []] + end +end diff --git a/deps/jason/.fetch b/deps/jason/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/jason/.hex b/deps/jason/.hex new file mode 100644 index 0000000000000000000000000000000000000000..b5b17d325ee61876c338e23401a2733f78e729bc GIT binary patch literal 269 zcmZ9{$x_2G5Cl+>u$}mn)M&MJ<4>s3EZ7N>Q+7h-&W|H<<l2XR-9OWA+OyeDa>E^( zi!tVz&D-3XtRFN9owjSOJ$+Y9WptQ*P$_muE_r0f3S2nbqRD#>7?KoaRY9N;Wpp|E z!G;_Xo7ajBKV{v%df!7w>wG)1d2Cqak9a!bwzRG<*UNuBdL^sTx)^;10ii)6Xr>aZ u4AGKHz>p$SL09@H7@4BQ0N}_Wb!L_Ga%cM2b7k}L-KWe;yJz;`5AX}FluE$> literal 0 HcmV?d00001 diff --git a/deps/jason/CHANGELOG.md b/deps/jason/CHANGELOG.md new file mode 100644 index 0000000..999c625 --- /dev/null +++ b/deps/jason/CHANGELOG.md @@ -0,0 +1,107 @@ +# Changelog + +## 1.3.0 (21.12.2020) + +### Enhancements + +* Add the `Jason.OrderedObject` struct +* Support decoding objects preserving all the keys with `objects: :ordered_objects` option +* Support decoding floats to `Decimal` with `floats: :decimals` option +* Add `~j` and `~J` sigils in module `Jason.Sigil` to support writing JSON literals in code + +### Fixes +* Fix error reporting when decoding strings (it was possible to mis-attribute the offending byte) +* Verify fields given to `@derive` + +## 1.2.2 (08.09.2020) + +### Enhancements + +* Support Decimal 2.0 + +## 1.2.1 (04.05.2020) + +### Security + +* Fix `html_safe` escaping in `Jason.encode` + + The `<!--` sequence of characters would not be escaped in `Jason.encode` + with`html_escape`ย mode, which could lead to DoS attacks when used for + embedding ofย arbitrary, user controlled strings into HTML through JSON + (e.g. inside of `<script>` tags). + + If you were not using the `html_safe` option, you are not affected. + + Affected versions: < 1.2.1 + Patched versions: >= 1.2.1 + +## 1.2.0 (17.03.2020) + +### Enhancements + +* Add `Jason.Encode.keyword/2` + ([cb1f26a](https://github.com/michalmuskala/jason/commit/cb1f26a)). + +### Bug fixes + +* Fix `Jason.Helpers.json_map/1` value expansion + ([70b046a](https://github.com/michalmuskala/jason/commit/70b046a)). + +## 1.1.2 (19.10.2018) + +### Bug fixes + +* correctly handle the `pretty: false` option + ([ba318c8](https://github.com/michalmuskala/jason/commit/ba318c8)). + +## 1.1.1 (10.07.2018) + +### Bug fixes + +* correctly handle escape sequences in strings when pretty printing + ([794bbe4](https://github.com/michalmuskala/jason/commit/794bbe4)). + +## 1.1.0 (02.07.2018) + +### Enhancements + +* pretty-printing support through `Jason.Formatter` and `pretty: true` option + in `Jason.encode/2` ([d758e36](https://github.com/michalmuskala/jason/commit/d758e36)). + +### Bug fixes + +* silence variable warnings for fields with underscores used during deriving + ([88dd85c](https://github.com/michalmuskala/jason/commit/88dd85c)). +* **potential incompatibility** don't raise `Protocol.UndefinedError` in non-bang functions + ([ad0f57b](https://github.com/michalmuskala/jason/commit/ad0f57b)). + +## 1.0.1 (02.07.2018) + +### Bug fixes + +* fix `Jason.Encode.escape` type ([a57b430](https://github.com/michalmuskala/jason/commit/a57b430)) +* multiple documentation improvements + +## 1.0.0 (26.01.2018) + +No changes + +## 1.0.0-rc.3 (26.01.2018) + +### Changes + +* update `escape` option of `Jason.encode/2` to take values: + `:json | :unicode_safe | :html_safe | :javascript_safe` for consistency. Old values of + `:unicode` and `:javascript` are still supported for compatibility with Poison. + ([f42dcbd](https://github.com/michalmuskala/jason/commit/f42dcbd)) + +## 1.0.0-rc.2 (07.01.2018) + +### Bug fixes + +* add type for `strings` option ([b459ee4](https://github.com/michalmuskala/jason/commit/b459ee4)) +* support iodata in `decode!` ([a1f3456](https://github.com/michalmuskala/jason/commit/a1f3456)) + +## 1.0.0-rc.1 (22.12.2017) + +* Initial release diff --git a/deps/jason/LICENSE b/deps/jason/LICENSE new file mode 100644 index 0000000..f22c0ba --- /dev/null +++ b/deps/jason/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2017-present Michaล‚ Muskaล‚a + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/deps/jason/README.md b/deps/jason/README.md new file mode 100644 index 0000000..dba2fb2 --- /dev/null +++ b/deps/jason/README.md @@ -0,0 +1,141 @@ +# Jason + +A blazing fast JSON parser and generator in pure Elixir. + +The parser and generator are at least twice as fast as other Elixir/Erlang libraries +(most notably `Poison`). +The performance is comparable to `jiffy`, which is implemented in C as a NIF. +Jason is usually only twice as slow. + +Both parser and generator fully conform to +[RFC 8259](https://tools.ietf.org/html/rfc8259) and +[ECMA 404](http://www.ecma-international.org/publications/standards/Ecma-404.htm) +standards. The parser is tested using [JSONTestSuite](https://github.com/nst/JSONTestSuite). + +## Installation + +The package can be installed by adding `jason` to your list of dependencies +in `mix.exs`: + +```elixir +def deps do + [{:jason, "~> 1.2"}] +end +``` + +## Basic Usage + +``` elixir +iex(1)> Jason.encode!(%{"age" => 44, "name" => "Steve Irwin", "nationality" => "Australian"}) +"{\"age\":44,\"name\":\"Steve Irwin\",\"nationality\":\"Australian\"}" + +iex(2)> Jason.decode!(~s({"age":44,"name":"Steve Irwin","nationality":"Australian"})) +%{"age" => 44, "name" => "Steve Irwin", "nationality" => "Australian"} +``` + +Full documentation can be found at [https://hexdocs.pm/jason](https://hexdocs.pm/jason). + +## Use with other libraries + +### Postgrex + +Versions starting at 0.14.0 use `Jason` by default. For earlier versions, please refer to +[previous versions of this document](https://github.com/michalmuskala/jason/tree/v1.1.2#postgrex). + +### Ecto + +Versions starting at 3.0.0 use `Jason` by default. For earlier versions, please refer to +[previous versions of this document](https://github.com/michalmuskala/jason/tree/v1.1.2#ecto). + +### Plug (and Phoenix) + +Phoenix starting at 1.4.0 uses `Jason` by default. For earlier versions, please refer to +[previous versions of this document](https://github.com/michalmuskala/jason/tree/v1.1.2#plug-and-phoenix). + +### Absinthe + +You need to pass the `:json_codec` option to `Absinthe.Plug` + +```elixir +# When called directly: +plug Absinthe.Plug, + schema: MyApp.Schema, + json_codec: Jason + +# When used in phoenix router: +forward "/api", + to: Absinthe.Plug, + init_opts: [schema: MyApp.Schema, json_codec: Jason] +``` + +## Benchmarks + +Detailed benchmarks (including memory measurements): +https://gist.github.com/michalmuskala/4d64a5a7696ca84ac7c169a0206640d5 + +HTML reports for the benchmark (only performance measurements): +http://michal.muskala.eu/jason/decode.html and http://michal.muskala.eu/jason/encode.html + +### Running + +Benchmarks against most popular Elixir & Erlang json libraries can be executed after +going into the `bench/` folder and then executing `mix bench.encode` and `mix bench.decode`. +A HTML report of the benchmarks (after their execution) can be found in +`bench/output/encode.html` and `bench/output/decode.html` respectively. + +## Differences to Poison + +Jason has a couple feature differences compared to Poison. + + * Jason follows the JSON spec more strictly, for example it does not allow + unescaped newline characters in JSON strings - e.g. `"\"\n\""` will + produce a decoding error. + * no support for decoding into data structures (the `as:` option). + * no built-in encoders for `MapSet`, `Range` and `Stream`. + * no support for encoding arbitrary structs - explicit implementation + of the `Jason.Encoder` protocol is always required. + * different pretty-printing customisation options (default `pretty: true` works the same) + +If you require encoders for any of the unsupported collection types, I suggest +adding the needed implementations directly to your project: + +```elixir +defimpl Jason.Encoder, for: [MapSet, Range, Stream] do + def encode(struct, opts) do + Jason.Encode.list(Enum.to_list(struct), opts) + end +end +``` + +If you need to encode some struct that does not implement the protocol, +if you own the struct, you can derive the implementation specifying +which fields should be encoded to JSON: + +```elixir +@derive {Jason.Encoder, only: [....]} +defstruct # ... +``` + +It is also possible to encode all fields, although this should be +used carefully to avoid accidentally leaking private information +when new fields are added: + +```elixir +@derive Jason.Encoder +defstruct # ... +``` + +Finally, if you don't own the struct you want to encode to JSON, +you may use `Protocol.derive/3` placed outside of any module: + +```elixir +Protocol.derive(Jason.Encoder, NameOfTheStruct, only: [...]) +Protocol.derive(Jason.Encoder, NameOfTheStruct) +``` + +## License + +Jason is released under the Apache License 2.0 - see the [LICENSE](LICENSE) file. + +Some elements of tests and benchmarks have their origins in the +[Poison library](https://github.com/devinus/poison) and were initially licensed under [CC0-1.0](https://creativecommons.org/publicdomain/zero/1.0/). diff --git a/deps/jason/hex_metadata.config b/deps/jason/hex_metadata.config new file mode 100644 index 0000000..1e58f87 --- /dev/null +++ b/deps/jason/hex_metadata.config @@ -0,0 +1,21 @@ +{<<"app">>,<<"jason">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>, + <<"A blazing fast JSON parser and generator in pure Elixir.">>}. +{<<"elixir">>,<<"~> 1.4">>}. +{<<"files">>, + [<<"lib">>,<<"lib/jason.ex">>,<<"lib/encoder.ex">>,<<"lib/decoder.ex">>, + <<"lib/ordered_object.ex">>,<<"lib/formatter.ex">>,<<"lib/encode.ex">>, + <<"lib/codegen.ex">>,<<"lib/helpers.ex">>,<<"lib/sigil.ex">>, + <<"lib/fragment.ex">>,<<"mix.exs">>,<<"README.md">>,<<"LICENSE">>, + <<"CHANGELOG.md">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/michalmuskala/jason">>}]}. +{<<"name">>,<<"jason">>}. +{<<"requirements">>, + [[{<<"app">>,<<"decimal">>}, + {<<"name">>,<<"decimal">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.0 or ~> 2.0">>}]]}. +{<<"version">>,<<"1.3.0">>}. diff --git a/deps/jason/lib/codegen.ex b/deps/jason/lib/codegen.ex new file mode 100644 index 0000000..9679c8b --- /dev/null +++ b/deps/jason/lib/codegen.ex @@ -0,0 +1,138 @@ +defmodule Jason.Codegen do + @moduledoc false + + alias Jason.{Encode, EncodeError} + + def jump_table(ranges, default) do + ranges + |> ranges_to_orddict() + |> :array.from_orddict(default) + |> :array.to_orddict() + end + + def jump_table(ranges, default, max) do + ranges + |> ranges_to_orddict() + |> :array.from_orddict(default) + |> resize(max) + |> :array.to_orddict() + end + + defmacro bytecase(var, do: clauses) do + {ranges, default, literals} = clauses_to_ranges(clauses, []) + + jump_table = jump_table(ranges, default) + + quote do + case unquote(var) do + unquote(jump_table_to_clauses(jump_table, literals)) + end + end + end + + defmacro bytecase(var, max, do: clauses) do + {ranges, default, empty} = clauses_to_ranges(clauses, []) + + jump_table = jump_table(ranges, default, max) + + quote do + case unquote(var) do + unquote(jump_table_to_clauses(jump_table, empty)) + end + end + end + + def build_kv_iodata(kv, encode_args) do + elements = + kv + |> Enum.map(&encode_pair(&1, encode_args)) + |> Enum.intersperse(",") + + collapse_static(List.flatten(["{", elements] ++ '}')) + end + + defp clauses_to_ranges([{:->, _, [[{:in, _, [byte, range]}, rest], action]} | tail], acc) do + clauses_to_ranges(tail, [{range, {byte, rest, action}} | acc]) + end + + defp clauses_to_ranges([{:->, _, [[default, rest], action]} | tail], acc) do + {Enum.reverse(acc), {default, rest, action}, literal_clauses(tail)} + end + + defp literal_clauses(clauses) do + Enum.map(clauses, fn {:->, _, [[literal], action]} -> + {literal, action} + end) + end + + defp jump_table_to_clauses([{val, {{:_, _, _}, rest, action}} | tail], empty) do + quote do + <<unquote(val), unquote(rest)::bits>> -> + unquote(action) + end ++ jump_table_to_clauses(tail, empty) + end + + defp jump_table_to_clauses([{val, {byte, rest, action}} | tail], empty) do + quote do + <<unquote(byte), unquote(rest)::bits>> when unquote(byte) === unquote(val) -> + unquote(action) + end ++ jump_table_to_clauses(tail, empty) + end + + defp jump_table_to_clauses([], literals) do + Enum.flat_map(literals, fn {pattern, action} -> + quote do + unquote(pattern) -> + unquote(action) + end + end) + end + + defp resize(array, size), do: :array.resize(size, array) + + defp ranges_to_orddict(ranges) do + ranges + |> Enum.flat_map(fn + {int, value} when is_integer(int) -> + [{int, value}] + + {enum, value} -> + Enum.map(enum, &{&1, value}) + end) + |> :orddict.from_list() + end + + defp encode_pair({key, value}, encode_args) do + key = IO.iodata_to_binary(Encode.key(key, &escape_key/3)) + key = "\"" <> key <> "\":" + [key, quote(do: Encode.value(unquote(value), unquote_splicing(encode_args)))] + end + + defp escape_key(binary, _original, _skip) do + check_safe_key!(binary) + binary + end + + defp check_safe_key!(binary) do + for <<(<<byte>> <- binary)>> do + if byte > 0x7F or byte < 0x1F or byte in '"\\/' do + raise EncodeError, + "invalid byte #{inspect(byte, base: :hex)} in literal key: #{inspect(binary)}" + end + end + + :ok + end + + defp collapse_static([bin1, bin2 | rest]) when is_binary(bin1) and is_binary(bin2) do + collapse_static([bin1 <> bin2 | rest]) + end + + defp collapse_static([other | rest]) do + [other | collapse_static(rest)] + end + + defp collapse_static([]) do + [] + end +end diff --git a/deps/jason/lib/decoder.ex b/deps/jason/lib/decoder.ex new file mode 100644 index 0000000..8ffe178 --- /dev/null +++ b/deps/jason/lib/decoder.ex @@ -0,0 +1,693 @@ +defmodule Jason.DecodeError do + @type t :: %__MODULE__{position: integer, data: String.t} + + defexception [:position, :token, :data] + + def message(%{position: position, token: token}) when is_binary(token) do + "unexpected sequence at position #{position}: #{inspect token}" + end + def message(%{position: position, data: data}) when position == byte_size(data) do + "unexpected end of input at position #{position}" + end + def message(%{position: position, data: data}) do + byte = :binary.at(data, position) + str = <<byte>> + if String.printable?(str) do + "unexpected byte at position #{position}: " <> + "#{inspect byte, base: :hex} (#{inspect str})" + else + "unexpected byte at position #{position}: " <> + "#{inspect byte, base: :hex}" + end + end +end + +defmodule Jason.Decoder do + @moduledoc false + + import Bitwise + + alias Jason.{DecodeError, Codegen} + + import Codegen, only: [bytecase: 2, bytecase: 3] + import Record + + @dialyzer :no_improper_lists + + # @compile :native + + # We use integers instead of atoms to take advantage of the jump table + # optimization + @terminate 0 + @array 1 + @key 2 + @object 3 + + defrecordp :decode, [keys: nil, strings: nil, objects: nil, floats: nil] + + def parse(data, opts) when is_binary(data) do + key_decode = key_decode_function(opts) + string_decode = string_decode_function(opts) + float_decode = float_decode_function(opts) + object_decode = object_decode_function(opts) + decode = decode(keys: key_decode, strings: string_decode, objects: object_decode, floats: float_decode) + try do + value(data, data, 0, [@terminate], decode) + catch + {:position, position} -> + {:error, %DecodeError{position: position, data: data}} + {:token, token, position} -> + {:error, %DecodeError{token: token, position: position, data: data}} + else + value -> + {:ok, value} + end + end + + defp key_decode_function(%{keys: :atoms}), do: &String.to_atom/1 + defp key_decode_function(%{keys: :atoms!}), do: &String.to_existing_atom/1 + defp key_decode_function(%{keys: :strings}), do: &(&1) + defp key_decode_function(%{keys: fun}) when is_function(fun, 1), do: fun + + defp string_decode_function(%{strings: :copy}), do: &:binary.copy/1 + defp string_decode_function(%{strings: :reference}), do: &(&1) + + defp object_decode_function(%{objects: :maps}), do: &:maps.from_list/1 + defp object_decode_function(%{objects: :ordered_objects}), do: &Jason.OrderedObject.new(:lists.reverse(&1)) + + defp float_decode_function(%{floats: :native}) do + fn string, token, skip -> + try do + :erlang.binary_to_float(string) + catch + :error, :badarg -> + token_error(token, skip) + end + end + end + + defp float_decode_function(%{floats: :decimals}) do + fn string, token, skip -> + # silence xref warning + decimal = Decimal + try do + decimal.new(string) + rescue + Decimal.Error -> + token_error(token, skip) + end + end + end + + defp value(data, original, skip, stack, decode) do + bytecase data do + _ in '\s\n\t\r', rest -> + value(rest, original, skip + 1, stack, decode) + _ in '0', rest -> + number_zero(rest, original, skip, stack, decode, 1) + _ in '123456789', rest -> + number(rest, original, skip, stack, decode, 1) + _ in '-', rest -> + number_minus(rest, original, skip, stack, decode) + _ in '"', rest -> + string(rest, original, skip + 1, stack, decode, 0) + _ in '[', rest -> + array(rest, original, skip + 1, stack, decode) + _ in '{', rest -> + object(rest, original, skip + 1, stack, decode) + _ in ']', rest -> + empty_array(rest, original, skip + 1, stack, decode) + _ in 't', rest -> + case rest do + <<"rue", rest::bits>> -> + continue(rest, original, skip + 4, stack, decode, true) + <<_::bits>> -> + error(original, skip) + end + _ in 'f', rest -> + case rest do + <<"alse", rest::bits>> -> + continue(rest, original, skip + 5, stack, decode, false) + <<_::bits>> -> + error(original, skip) + end + _ in 'n', rest -> + case rest do + <<"ull", rest::bits>> -> + continue(rest, original, skip + 4, stack, decode, nil) + <<_::bits>> -> + error(original, skip) + end + _, rest -> + error(rest, original, skip + 1, stack, decode) + <<_::bits>> -> + error(original, skip) + end + end + + defp number_minus(<<?0, rest::bits>>, original, skip, stack, decode) do + number_zero(rest, original, skip, stack, decode, 2) + end + defp number_minus(<<byte, rest::bits>>, original, skip, stack, decode) + when byte in '123456789' do + number(rest, original, skip, stack, decode, 2) + end + defp number_minus(<<_rest::bits>>, original, skip, _stack, _decode) do + error(original, skip + 1) + end + + defp number(<<byte, rest::bits>>, original, skip, stack, decode, len) + when byte in '0123456789' do + number(rest, original, skip, stack, decode, len + 1) + end + defp number(<<?., rest::bits>>, original, skip, stack, decode, len) do + number_frac(rest, original, skip, stack, decode, len + 1) + end + defp number(<<e, rest::bits>>, original, skip, stack, decode, len) when e in 'eE' do + prefix = binary_part(original, skip, len) + number_exp_copy(rest, original, skip + len + 1, stack, decode, prefix) + end + defp number(<<rest::bits>>, original, skip, stack, decode, len) do + int = String.to_integer(binary_part(original, skip, len)) + continue(rest, original, skip + len, stack, decode, int) + end + + defp number_frac(<<byte, rest::bits>>, original, skip, stack, decode, len) + when byte in '0123456789' do + number_frac_cont(rest, original, skip, stack, decode, len + 1) + end + defp number_frac(<<_rest::bits>>, original, skip, _stack, _decode, len) do + error(original, skip + len) + end + + defp number_frac_cont(<<byte, rest::bits>>, original, skip, stack, decode, len) + when byte in '0123456789' do + number_frac_cont(rest, original, skip, stack, decode, len + 1) + end + defp number_frac_cont(<<e, rest::bits>>, original, skip, stack, decode, len) + when e in 'eE' do + number_exp(rest, original, skip, stack, decode, len + 1) + end + defp number_frac_cont(<<rest::bits>>, original, skip, stack, decode, len) do + token = binary_part(original, skip, len) + decode(floats: float_decode) = decode + float = float_decode.(token, token, skip) + continue(rest, original, skip + len, stack, decode, float) + end + + defp number_exp(<<byte, rest::bits>>, original, skip, stack, decode, len) + when byte in '0123456789' do + number_exp_cont(rest, original, skip, stack, decode, len + 1) + end + defp number_exp(<<byte, rest::bits>>, original, skip, stack, decode, len) + when byte in '+-' do + number_exp_sign(rest, original, skip, stack, decode, len + 1) + end + defp number_exp(<<_rest::bits>>, original, skip, _stack, _decode, len) do + error(original, skip + len) + end + + defp number_exp_sign(<<byte, rest::bits>>, original, skip, stack, decode, len) + when byte in '0123456789' do + number_exp_cont(rest, original, skip, stack, decode, len + 1) + end + defp number_exp_sign(<<_rest::bits>>, original, skip, _stack, _decode, len) do + error(original, skip + len) + end + + defp number_exp_cont(<<byte, rest::bits>>, original, skip, stack, decode, len) + when byte in '0123456789' do + number_exp_cont(rest, original, skip, stack, decode, len + 1) + end + defp number_exp_cont(<<rest::bits>>, original, skip, stack, decode, len) do + token = binary_part(original, skip, len) + decode(floats: float_decode) = decode + float = float_decode.(token, token, skip) + continue(rest, original, skip + len, stack, decode, float) + end + + defp number_exp_copy(<<byte, rest::bits>>, original, skip, stack, decode, prefix) + when byte in '0123456789' do + number_exp_cont(rest, original, skip, stack, decode, prefix, 1) + end + defp number_exp_copy(<<byte, rest::bits>>, original, skip, stack, decode, prefix) + when byte in '+-' do + number_exp_sign(rest, original, skip, stack, decode, prefix, 1) + end + defp number_exp_copy(<<_rest::bits>>, original, skip, _stack, _decode, _prefix) do + error(original, skip) + end + + defp number_exp_sign(<<byte, rest::bits>>, original, skip, stack, decode, prefix, len) + when byte in '0123456789' do + number_exp_cont(rest, original, skip, stack, decode, prefix, len + 1) + end + defp number_exp_sign(<<_rest::bits>>, original, skip, _stack, _decode, _prefix, len) do + error(original, skip + len) + end + + defp number_exp_cont(<<byte, rest::bits>>, original, skip, stack, decode, prefix, len) + when byte in '0123456789' do + number_exp_cont(rest, original, skip, stack, decode, prefix, len + 1) + end + defp number_exp_cont(<<rest::bits>>, original, skip, stack, decode, prefix, len) do + suffix = binary_part(original, skip, len) + string = prefix <> ".0e" <> suffix + prefix_size = byte_size(prefix) + initial_skip = skip - prefix_size - 1 + final_skip = skip + len + token = binary_part(original, initial_skip, prefix_size + len + 1) + decode(floats: float_decode) = decode + float = float_decode.(string, token, initial_skip) + continue(rest, original, final_skip, stack, decode, float) + end + + defp number_zero(<<?., rest::bits>>, original, skip, stack, decode, len) do + number_frac(rest, original, skip, stack, decode, len + 1) + end + defp number_zero(<<e, rest::bits>>, original, skip, stack, decode, len) when e in 'eE' do + number_exp_copy(rest, original, skip + len + 1, stack, decode, "0") + end + defp number_zero(<<rest::bits>>, original, skip, stack, decode, len) do + continue(rest, original, skip + len, stack, decode, 0) + end + + @compile {:inline, array: 5} + + defp array(rest, original, skip, stack, decode) do + value(rest, original, skip, [@array, [] | stack], decode) + end + + defp empty_array(<<rest::bits>>, original, skip, stack, decode) do + case stack do + [@array, [] | stack] -> + continue(rest, original, skip, stack, decode, []) + _ -> + error(original, skip - 1) + end + end + + defp array(data, original, skip, stack, decode, value) do + bytecase data do + _ in '\s\n\t\r', rest -> + array(rest, original, skip + 1, stack, decode, value) + _ in ']', rest -> + [acc | stack] = stack + value = :lists.reverse(acc, [value]) + continue(rest, original, skip + 1, stack, decode, value) + _ in ',', rest -> + [acc | stack] = stack + value(rest, original, skip + 1, [@array, [value | acc] | stack], decode) + _, _rest -> + error(original, skip) + <<_::bits>> -> + empty_error(original, skip) + end + end + + @compile {:inline, object: 5} + + defp object(rest, original, skip, stack, decode) do + key(rest, original, skip, [[] | stack], decode) + end + + defp object(data, original, skip, stack, decode, value) do + bytecase data do + _ in '\s\n\t\r', rest -> + object(rest, original, skip + 1, stack, decode, value) + _ in '}', rest -> + skip = skip + 1 + [key, acc | stack] = stack + decode(keys: key_decode) = decode + final = [{key_decode.(key), value} | acc] + decode(objects: object_decode) = decode + continue(rest, original, skip, stack, decode, object_decode.(final)) + _ in ',', rest -> + skip = skip + 1 + [key, acc | stack] = stack + decode(keys: key_decode) = decode + acc = [{key_decode.(key), value} | acc] + key(rest, original, skip, [acc | stack], decode) + _, _rest -> + error(original, skip) + <<_::bits>> -> + empty_error(original, skip) + end + end + + defp key(data, original, skip, stack, decode) do + bytecase data do + _ in '\s\n\t\r', rest -> + key(rest, original, skip + 1, stack, decode) + _ in '}', rest -> + case stack do + [[] | stack] -> + decode(objects: object_decode) = decode + continue(rest, original, skip + 1, stack, decode, object_decode.([])) + _ -> + error(original, skip) + end + _ in '"', rest -> + string(rest, original, skip + 1, [@key | stack], decode, 0) + _, _rest -> + error(original, skip) + <<_::bits>> -> + empty_error(original, skip) + end + end + + defp key(data, original, skip, stack, decode, value) do + bytecase data do + _ in '\s\n\t\r', rest -> + key(rest, original, skip + 1, stack, decode, value) + _ in ':', rest -> + value(rest, original, skip + 1, [@object, value | stack], decode) + _, _rest -> + error(original, skip) + <<_::bits>> -> + empty_error(original, skip) + end + end + + # TODO: check if this approach would be faster: + # https://git.ninenines.eu/cowlib.git/tree/src/cow_ws.erl#n469 + # http://bjoern.hoehrmann.de/utf-8/decoder/dfa/ + defp string(data, original, skip, stack, decode, len) do + bytecase data, 128 do + _ in '"', rest -> + decode(strings: string_decode) = decode + string = string_decode.(binary_part(original, skip, len)) + continue(rest, original, skip + len + 1, stack, decode, string) + _ in '\\', rest -> + part = binary_part(original, skip, len) + escape(rest, original, skip + len, stack, decode, part) + _ in unquote(0x00..0x1F), _rest -> + error(original, skip + len) + _, rest -> + string(rest, original, skip, stack, decode, len + 1) + <<char::utf8, rest::bits>> when char <= 0x7FF -> + string(rest, original, skip, stack, decode, len + 2) + <<char::utf8, rest::bits>> when char <= 0xFFFF -> + string(rest, original, skip, stack, decode, len + 3) + <<_char::utf8, rest::bits>> -> + string(rest, original, skip, stack, decode, len + 4) + <<_::bits>> -> + empty_error(original, skip + len) + end + end + + defp string(data, original, skip, stack, decode, acc, len) do + bytecase data, 128 do + _ in '"', rest -> + last = binary_part(original, skip, len) + string = IO.iodata_to_binary([acc | last]) + continue(rest, original, skip + len + 1, stack, decode, string) + _ in '\\', rest -> + part = binary_part(original, skip, len) + escape(rest, original, skip + len, stack, decode, [acc | part]) + _ in unquote(0x00..0x1F), _rest -> + error(original, skip + len) + _, rest -> + string(rest, original, skip, stack, decode, acc, len + 1) + <<char::utf8, rest::bits>> when char <= 0x7FF -> + string(rest, original, skip, stack, decode, acc, len + 2) + <<char::utf8, rest::bits>> when char <= 0xFFFF -> + string(rest, original, skip, stack, decode, acc, len + 3) + <<_char::utf8, rest::bits>> -> + string(rest, original, skip, stack, decode, acc, len + 4) + <<_::bits>> -> + empty_error(original, skip + len) + end + end + + defp escape(data, original, skip, stack, decode, acc) do + bytecase data do + _ in 'b', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '\b'], 0) + _ in 't', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '\t'], 0) + _ in 'n', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '\n'], 0) + _ in 'f', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '\f'], 0) + _ in 'r', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '\r'], 0) + _ in '"', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '\"'], 0) + _ in '/', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '/'], 0) + _ in '\\', rest -> + string(rest, original, skip + 2, stack, decode, [acc | '\\'], 0) + _ in 'u', rest -> + escapeu(rest, original, skip, stack, decode, acc) + _, _rest -> + error(original, skip + 1) + <<_::bits>> -> + empty_error(original, skip) + end + end + + defmodule Unescape do + @moduledoc false + + import Bitwise + + @digits Enum.concat([?0..?9, ?A..?F, ?a..?f]) + + def unicode_escapes(chars1 \\ @digits, chars2 \\ @digits) do + for char1 <- chars1, char2 <- chars2 do + {(char1 <<< 8) + char2, integer8(char1, char2)} + end + end + + defp integer8(char1, char2) do + (integer4(char1) <<< 4) + integer4(char2) + end + + defp integer4(char) when char in ?0..?9, do: char - ?0 + defp integer4(char) when char in ?A..?F, do: char - ?A + 10 + defp integer4(char) when char in ?a..?f, do: char - ?a + 10 + + defp token_error_clause(original, skip, len) do + quote do + _ -> + token_error(unquote_splicing([original, skip, len])) + end + end + + defmacro escapeu_first(int, last, rest, original, skip, stack, decode, acc) do + clauses = escapeu_first_clauses(last, rest, original, skip, stack, decode, acc) + quote location: :keep do + case unquote(int) do + unquote(clauses ++ token_error_clause(original, skip, 6)) + end + end + end + + defp escapeu_first_clauses(last, rest, original, skip, stack, decode, acc) do + for {int, first} <- unicode_escapes(), + not (first in 0xDC..0xDF) do + escapeu_first_clause(int, first, last, rest, original, skip, stack, decode, acc) + end + end + + defp escapeu_first_clause(int, first, last, rest, original, skip, stack, decode, acc) + when first in 0xD8..0xDB do + hi = + quote bind_quoted: [first: first, last: last] do + 0x10000 + ((((first &&& 0x03) <<< 8) + last) <<< 10) + end + args = [rest, original, skip, stack, decode, acc, hi] + [clause] = + quote location: :keep do + unquote(int) -> escape_surrogate(unquote_splicing(args)) + end + clause + end + + defp escapeu_first_clause(int, first, last, rest, original, skip, stack, decode, acc) + when first <= 0x00 do + skip = quote do: (unquote(skip) + 6) + acc = + quote bind_quoted: [acc: acc, first: first, last: last] do + if last <= 0x7F do + # 0????? + [acc, last] + else + # 110xxxx?? 10????? + byte1 = ((0b110 <<< 5) + (first <<< 2)) + (last >>> 6) + byte2 = (0b10 <<< 6) + (last &&& 0b111111) + [acc, byte1, byte2] + end + end + args = [rest, original, skip, stack, decode, acc, 0] + [clause] = + quote location: :keep do + unquote(int) -> string(unquote_splicing(args)) + end + clause + end + + defp escapeu_first_clause(int, first, last, rest, original, skip, stack, decode, acc) + when first <= 0x07 do + skip = quote do: (unquote(skip) + 6) + acc = + quote bind_quoted: [acc: acc, first: first, last: last] do + # 110xxx?? 10?????? + byte1 = ((0b110 <<< 5) + (first <<< 2)) + (last >>> 6) + byte2 = (0b10 <<< 6) + (last &&& 0b111111) + [acc, byte1, byte2] + end + args = [rest, original, skip, stack, decode, acc, 0] + [clause] = + quote location: :keep do + unquote(int) -> string(unquote_splicing(args)) + end + clause + end + + defp escapeu_first_clause(int, first, last, rest, original, skip, stack, decode, acc) + when first <= 0xFF do + skip = quote do: (unquote(skip) + 6) + acc = + quote bind_quoted: [acc: acc, first: first, last: last] do + # 1110xxxx 10xxxx?? 10?????? + byte1 = (0b1110 <<< 4) + (first >>> 4) + byte2 = ((0b10 <<< 6) + ((first &&& 0b1111) <<< 2)) + (last >>> 6) + byte3 = (0b10 <<< 6) + (last &&& 0b111111) + [acc, byte1, byte2, byte3] + end + args = [rest, original, skip, stack, decode, acc, 0] + [clause] = + quote location: :keep do + unquote(int) -> string(unquote_splicing(args)) + end + clause + end + + defmacro escapeu_last(int, original, skip) do + clauses = escapeu_last_clauses() + quote location: :keep do + case unquote(int) do + unquote(clauses ++ token_error_clause(original, skip, 6)) + end + end + end + + defp escapeu_last_clauses() do + for {int, last} <- unicode_escapes() do + [clause] = + quote do + unquote(int) -> unquote(last) + end + clause + end + end + + defmacro escapeu_surrogate(int, last, rest, original, skip, stack, decode, acc, + hi) do + clauses = escapeu_surrogate_clauses(last, rest, original, skip, stack, decode, acc, hi) + quote location: :keep do + case unquote(int) do + unquote(clauses ++ token_error_clause(original, skip, 12)) + end + end + end + + defp escapeu_surrogate_clauses(last, rest, original, skip, stack, decode, acc, hi) do + digits1 = 'Dd' + digits2 = Stream.concat([?C..?F, ?c..?f]) + for {int, first} <- unicode_escapes(digits1, digits2) do + escapeu_surrogate_clause(int, first, last, rest, original, skip, stack, decode, acc, hi) + end + end + + defp escapeu_surrogate_clause(int, first, last, rest, original, skip, stack, decode, acc, hi) do + skip = quote do: unquote(skip) + 12 + acc = + quote bind_quoted: [acc: acc, first: first, last: last, hi: hi] do + lo = ((first &&& 0x03) <<< 8) + last + [acc | <<(hi + lo)::utf8>>] + end + args = [rest, original, skip, stack, decode, acc, 0] + [clause] = + quote do + unquote(int) -> + string(unquote_splicing(args)) + end + clause + end + end + + defp escapeu(<<int1::16, int2::16, rest::bits>>, original, skip, stack, decode, acc) do + require Unescape + last = escapeu_last(int2, original, skip) + Unescape.escapeu_first(int1, last, rest, original, skip, stack, decode, acc) + end + defp escapeu(<<_rest::bits>>, original, skip, _stack, _decode, _acc) do + empty_error(original, skip) + end + + # @compile {:inline, escapeu_last: 3} + + defp escapeu_last(int, original, skip) do + require Unescape + Unescape.escapeu_last(int, original, skip) + end + + defp escape_surrogate(<<?\\, ?u, int1::16, int2::16, rest::bits>>, original, + skip, stack, decode, acc, hi) do + require Unescape + last = escapeu_last(int2, original, skip + 6) + Unescape.escapeu_surrogate(int1, last, rest, original, skip, stack, decode, acc, hi) + end + defp escape_surrogate(<<_rest::bits>>, original, skip, _stack, _decode, _acc, _hi) do + error(original, skip + 6) + end + + defp error(<<_rest::bits>>, _original, skip, _stack, _decode) do + throw {:position, skip - 1} + end + + defp empty_error(_original, skip) do + throw {:position, skip} + end + + @compile {:inline, error: 2, token_error: 2, token_error: 3} + defp error(_original, skip) do + throw {:position, skip} + end + + defp token_error(token, position) do + throw {:token, token, position} + end + + defp token_error(token, position, len) do + throw {:token, binary_part(token, position, len), position} + end + + @compile {:inline, continue: 6} + defp continue(rest, original, skip, stack, decode, value) do + case stack do + [@terminate | stack] -> + terminate(rest, original, skip, stack, decode, value) + [@array | stack] -> + array(rest, original, skip, stack, decode, value) + [@key | stack] -> + key(rest, original, skip, stack, decode, value) + [@object | stack] -> + object(rest, original, skip, stack, decode, value) + end + end + + defp terminate(<<byte, rest::bits>>, original, skip, stack, decode, value) + when byte in '\s\n\r\t' do + terminate(rest, original, skip + 1, stack, decode, value) + end + defp terminate(<<>>, _original, _skip, _stack, _decode, value) do + value + end + defp terminate(<<_rest::bits>>, original, skip, _stack, _decode, _value) do + error(original, skip) + end +end diff --git a/deps/jason/lib/encode.ex b/deps/jason/lib/encode.ex new file mode 100644 index 0000000..854ca1e --- /dev/null +++ b/deps/jason/lib/encode.ex @@ -0,0 +1,643 @@ +defmodule Jason.EncodeError do + defexception [:message] + + @type t :: %__MODULE__{message: String.t} + + def new({:duplicate_key, key}) do + %__MODULE__{message: "duplicate key: #{key}"} + end + def new({:invalid_byte, byte, original}) do + %__MODULE__{message: "invalid byte #{inspect byte, base: :hex} in #{inspect original}"} + end +end + +defmodule Jason.Encode do + @moduledoc """ + Utilities for encoding elixir values to JSON. + """ + + import Bitwise + + alias Jason.{Codegen, EncodeError, Encoder, Fragment, OrderedObject} + + @typep escape :: (String.t, String.t, integer -> iodata) + @typep encode_map :: (map, escape, encode_map -> iodata) + @opaque opts :: {escape, encode_map} + + @dialyzer :no_improper_lists + + # @compile :native + + @doc false + @spec encode(any, map) :: {:ok, iodata} | {:error, EncodeError.t | Exception.t} + def encode(value, opts) do + escape = escape_function(opts) + encode_map = encode_map_function(opts) + try do + {:ok, value(value, escape, encode_map)} + catch + :throw, %EncodeError{} = e -> + {:error, e} + :error, %Protocol.UndefinedError{protocol: Jason.Encoder} = e -> + {:error, e} + end + end + + defp encode_map_function(%{maps: maps}) do + case maps do + :naive -> &map_naive/3 + :strict -> &map_strict/3 + end + end + + defp escape_function(%{escape: escape}) do + case escape do + :json -> &escape_json/3 + :html_safe -> &escape_html/3 + :unicode_safe -> &escape_unicode/3 + :javascript_safe -> &escape_javascript/3 + # Keep for compatibility with Poison + :javascript -> &escape_javascript/3 + :unicode -> &escape_unicode/3 + end + end + + @doc """ + Equivalent to calling the `Jason.Encoder.encode/2` protocol function. + + Slightly more efficient for built-in types because of the internal dispatching. + """ + @spec value(term, opts) :: iodata + def value(value, {escape, encode_map}) do + value(value, escape, encode_map) + end + + @doc false + # We use this directly in the helpers and deriving for extra speed + def value(value, escape, _encode_map) when is_atom(value) do + encode_atom(value, escape) + end + + def value(value, escape, _encode_map) when is_binary(value) do + encode_string(value, escape) + end + + def value(value, _escape, _encode_map) when is_integer(value) do + integer(value) + end + + def value(value, _escape, _encode_map) when is_float(value) do + float(value) + end + + def value(value, escape, encode_map) when is_list(value) do + list(value, escape, encode_map) + end + + def value(%{__struct__: module} = value, escape, encode_map) do + struct(value, escape, encode_map, module) + end + + def value(value, escape, encode_map) when is_map(value) do + case Map.to_list(value) do + [] -> "{}" + keyword -> encode_map.(keyword, escape, encode_map) + end + end + + def value(value, escape, encode_map) do + Encoder.encode(value, {escape, encode_map}) + end + + @compile {:inline, integer: 1, float: 1} + + @spec atom(atom, opts) :: iodata + def atom(atom, {escape, _encode_map}) do + encode_atom(atom, escape) + end + + defp encode_atom(nil, _escape), do: "null" + defp encode_atom(true, _escape), do: "true" + defp encode_atom(false, _escape), do: "false" + defp encode_atom(atom, escape), + do: encode_string(Atom.to_string(atom), escape) + + @spec integer(integer) :: iodata + def integer(integer) do + Integer.to_string(integer) + end + + @spec float(float) :: iodata + def float(float) do + :io_lib_format.fwrite_g(float) + end + + @spec list(list, opts) :: iodata + def list(list, {escape, encode_map}) do + list(list, escape, encode_map) + end + + defp list([], _escape, _encode_map) do + "[]" + end + + defp list([head | tail], escape, encode_map) do + [?[, value(head, escape, encode_map) + | list_loop(tail, escape, encode_map)] + end + + defp list_loop([], _escape, _encode_map) do + ']' + end + + defp list_loop([head | tail], escape, encode_map) do + [?,, value(head, escape, encode_map) + | list_loop(tail, escape, encode_map)] + end + + @spec keyword(keyword, opts) :: iodata + def keyword(list, _) when list == [], do: "{}" + def keyword(list, {escape, encode_map}) when is_list(list) do + encode_map.(list, escape, encode_map) + end + + @spec map(map, opts) :: iodata + def map(value, {escape, encode_map}) do + case Map.to_list(value) do + [] -> "{}" + keyword -> encode_map.(keyword, escape, encode_map) + end + end + + defp map_naive([{key, value} | tail], escape, encode_map) do + ["{\"", key(key, escape), "\":", + value(value, escape, encode_map) + | map_naive_loop(tail, escape, encode_map)] + end + + defp map_naive_loop([], _escape, _encode_map) do + '}' + end + + defp map_naive_loop([{key, value} | tail], escape, encode_map) do + [",\"", key(key, escape), "\":", + value(value, escape, encode_map) + | map_naive_loop(tail, escape, encode_map)] + end + + defp map_strict([{key, value} | tail], escape, encode_map) do + key = IO.iodata_to_binary(key(key, escape)) + visited = %{key => []} + ["{\"", key, "\":", + value(value, escape, encode_map) + | map_strict_loop(tail, escape, encode_map, visited)] + end + + defp map_strict_loop([], _encode_map, _escape, _visited) do + '}' + end + + defp map_strict_loop([{key, value} | tail], escape, encode_map, visited) do + key = IO.iodata_to_binary(key(key, escape)) + case visited do + %{^key => _} -> + error({:duplicate_key, key}) + _ -> + visited = Map.put(visited, key, []) + [",\"", key, "\":", + value(value, escape, encode_map) + | map_strict_loop(tail, escape, encode_map, visited)] + end + end + + @spec struct(struct, opts) :: iodata + def struct(%module{} = value, {escape, encode_map}) do + struct(value, escape, encode_map, module) + end + + # TODO: benchmark the effect of inlining the to_iso8601 functions + for module <- [Date, Time, NaiveDateTime, DateTime] do + defp struct(value, _escape, _encode_map, unquote(module)) do + [?", unquote(module).to_iso8601(value), ?"] + end + end + + defp struct(value, _escape, _encode_map, Decimal) do + # silence the xref warning + decimal = Decimal + [?", decimal.to_string(value, :normal), ?"] + end + + defp struct(value, escape, encode_map, Fragment) do + %{encode: encode} = value + encode.({escape, encode_map}) + end + + defp struct(value, escape, encode_map, OrderedObject) do + case value do + %{values: []} -> "{}" + %{values: values} -> encode_map.(values, escape, encode_map) + end + end + + defp struct(value, escape, encode_map, _module) do + Encoder.encode(value, {escape, encode_map}) + end + + @doc false + # This is used in the helpers and deriving implementation + def key(string, escape) when is_binary(string) do + escape.(string, string, 0) + end + def key(atom, escape) when is_atom(atom) do + string = Atom.to_string(atom) + escape.(string, string, 0) + end + def key(other, escape) do + string = String.Chars.to_string(other) + escape.(string, string, 0) + end + + @spec string(String.t, opts) :: iodata + def string(string, {escape, _encode_map}) do + encode_string(string, escape) + end + + defp encode_string(string, escape) do + [?", escape.(string, string, 0), ?"] + end + + slash_escapes = Enum.zip('\b\t\n\f\r\"\\', 'btnfr"\\') + surogate_escapes = Enum.zip([0x2028, 0x2029], ["\\u2028", "\\u2029"]) + ranges = [{0x00..0x1F, :unicode} | slash_escapes] + html_ranges = [{0x00..0x1F, :unicode}, {?<, :unicode}, {?/, ?/} | slash_escapes] + escape_jt = Codegen.jump_table(html_ranges, :error) + + Enum.each(escape_jt, fn + {byte, :unicode} -> + sequence = List.to_string(:io_lib.format("\\u~4.16.0B", [byte])) + defp escape(unquote(byte)), do: unquote(sequence) + {byte, char} when is_integer(char) -> + defp escape(unquote(byte)), do: unquote(<<?\\, char>>) + {byte, :error} -> + defp escape(unquote(byte)), do: throw(:error) + end) + + ## regular JSON escape + + json_jt = Codegen.jump_table(ranges, :chunk, 0x7F + 1) + + defp escape_json(data, original, skip) do + escape_json(data, [], original, skip) + end + + Enum.map(json_jt, fn + {byte, :chunk} -> + defp escape_json(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + escape_json_chunk(rest, acc, original, skip, 1) + end + {byte, _escape} -> + defp escape_json(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + acc = [acc | escape(byte)] + escape_json(rest, acc, original, skip + 1) + end + end) + defp escape_json(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0x7FF do + escape_json_chunk(rest, acc, original, skip, 2) + end + defp escape_json(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0xFFFF do + escape_json_chunk(rest, acc, original, skip, 3) + end + defp escape_json(<<_char::utf8, rest::bits>>, acc, original, skip) do + escape_json_chunk(rest, acc, original, skip, 4) + end + defp escape_json(<<>>, acc, _original, _skip) do + acc + end + defp escape_json(<<byte, _rest::bits>>, _acc, original, _skip) do + error({:invalid_byte, byte, original}) + end + + Enum.map(json_jt, fn + {byte, :chunk} -> + defp escape_json_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + escape_json_chunk(rest, acc, original, skip, len + 1) + end + {byte, _escape} -> + defp escape_json_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + part = binary_part(original, skip, len) + acc = [acc, part | escape(byte)] + escape_json(rest, acc, original, skip + len + 1) + end + end) + defp escape_json_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0x7FF do + escape_json_chunk(rest, acc, original, skip, len + 2) + end + defp escape_json_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0xFFFF do + escape_json_chunk(rest, acc, original, skip, len + 3) + end + defp escape_json_chunk(<<_char::utf8, rest::bits>>, acc, original, skip, len) do + escape_json_chunk(rest, acc, original, skip, len + 4) + end + defp escape_json_chunk(<<>>, acc, original, skip, len) do + part = binary_part(original, skip, len) + [acc | part] + end + defp escape_json_chunk(<<byte, _rest::bits>>, _acc, original, _skip, _len) do + error({:invalid_byte, byte, original}) + end + + ## javascript safe JSON escape + + defp escape_javascript(data, original, skip) do + escape_javascript(data, [], original, skip) + end + + Enum.map(json_jt, fn + {byte, :chunk} -> + defp escape_javascript(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + escape_javascript_chunk(rest, acc, original, skip, 1) + end + {byte, _escape} -> + defp escape_javascript(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + acc = [acc | escape(byte)] + escape_javascript(rest, acc, original, skip + 1) + end + end) + defp escape_javascript(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0x7FF do + escape_javascript_chunk(rest, acc, original, skip, 2) + end + Enum.map(surogate_escapes, fn {byte, escape} -> + defp escape_javascript(<<unquote(byte)::utf8, rest::bits>>, acc, original, skip) do + acc = [acc | unquote(escape)] + escape_javascript(rest, acc, original, skip + 3) + end + end) + defp escape_javascript(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0xFFFF do + escape_javascript_chunk(rest, acc, original, skip, 3) + end + defp escape_javascript(<<_char::utf8, rest::bits>>, acc, original, skip) do + escape_javascript_chunk(rest, acc, original, skip, 4) + end + defp escape_javascript(<<>>, acc, _original, _skip) do + acc + end + defp escape_javascript(<<byte, _rest::bits>>, _acc, original, _skip) do + error({:invalid_byte, byte, original}) + end + + Enum.map(json_jt, fn + {byte, :chunk} -> + defp escape_javascript_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + escape_javascript_chunk(rest, acc, original, skip, len + 1) + end + {byte, _escape} -> + defp escape_javascript_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + part = binary_part(original, skip, len) + acc = [acc, part | escape(byte)] + escape_javascript(rest, acc, original, skip + len + 1) + end + end) + defp escape_javascript_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0x7FF do + escape_javascript_chunk(rest, acc, original, skip, len + 2) + end + Enum.map(surogate_escapes, fn {byte, escape} -> + defp escape_javascript_chunk(<<unquote(byte)::utf8, rest::bits>>, acc, original, skip, len) do + part = binary_part(original, skip, len) + acc = [acc, part | unquote(escape)] + escape_javascript(rest, acc, original, skip + len + 3) + end + end) + defp escape_javascript_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0xFFFF do + escape_javascript_chunk(rest, acc, original, skip, len + 3) + end + defp escape_javascript_chunk(<<_char::utf8, rest::bits>>, acc, original, skip, len) do + escape_javascript_chunk(rest, acc, original, skip, len + 4) + end + defp escape_javascript_chunk(<<>>, acc, original, skip, len) do + part = binary_part(original, skip, len) + [acc | part] + end + defp escape_javascript_chunk(<<byte, _rest::bits>>, _acc, original, _skip, _len) do + error({:invalid_byte, byte, original}) + end + + ## HTML safe JSON escape + + html_jt = Codegen.jump_table(html_ranges, :chunk, 0x7F + 1) + + defp escape_html(data, original, skip) do + escape_html(data, [], original, skip) + end + + Enum.map(html_jt, fn + {byte, :chunk} -> + defp escape_html(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + escape_html_chunk(rest, acc, original, skip, 1) + end + {byte, _escape} -> + defp escape_html(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + acc = [acc | escape(byte)] + escape_html(rest, acc, original, skip + 1) + end + end) + defp escape_html(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0x7FF do + escape_html_chunk(rest, acc, original, skip, 2) + end + Enum.map(surogate_escapes, fn {byte, escape} -> + defp escape_html(<<unquote(byte)::utf8, rest::bits>>, acc, original, skip) do + acc = [acc | unquote(escape)] + escape_html(rest, acc, original, skip + 3) + end + end) + defp escape_html(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0xFFFF do + escape_html_chunk(rest, acc, original, skip, 3) + end + defp escape_html(<<_char::utf8, rest::bits>>, acc, original, skip) do + escape_html_chunk(rest, acc, original, skip, 4) + end + defp escape_html(<<>>, acc, _original, _skip) do + acc + end + defp escape_html(<<byte, _rest::bits>>, _acc, original, _skip) do + error({:invalid_byte, byte, original}) + end + + Enum.map(html_jt, fn + {byte, :chunk} -> + defp escape_html_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + escape_html_chunk(rest, acc, original, skip, len + 1) + end + {byte, _escape} -> + defp escape_html_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + part = binary_part(original, skip, len) + acc = [acc, part | escape(byte)] + escape_html(rest, acc, original, skip + len + 1) + end + end) + defp escape_html_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0x7FF do + escape_html_chunk(rest, acc, original, skip, len + 2) + end + Enum.map(surogate_escapes, fn {byte, escape} -> + defp escape_html_chunk(<<unquote(byte)::utf8, rest::bits>>, acc, original, skip, len) do + part = binary_part(original, skip, len) + acc = [acc, part | unquote(escape)] + escape_html(rest, acc, original, skip + len + 3) + end + end) + defp escape_html_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0xFFFF do + escape_html_chunk(rest, acc, original, skip, len + 3) + end + defp escape_html_chunk(<<_char::utf8, rest::bits>>, acc, original, skip, len) do + escape_html_chunk(rest, acc, original, skip, len + 4) + end + defp escape_html_chunk(<<>>, acc, original, skip, len) do + part = binary_part(original, skip, len) + [acc | part] + end + defp escape_html_chunk(<<byte, _rest::bits>>, _acc, original, _skip, _len) do + error({:invalid_byte, byte, original}) + end + + ## unicode escape + + defp escape_unicode(data, original, skip) do + escape_unicode(data, [], original, skip) + end + + Enum.map(json_jt, fn + {byte, :chunk} -> + defp escape_unicode(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + escape_unicode_chunk(rest, acc, original, skip, 1) + end + {byte, _escape} -> + defp escape_unicode(<<byte, rest::bits>>, acc, original, skip) + when byte === unquote(byte) do + acc = [acc | escape(byte)] + escape_unicode(rest, acc, original, skip + 1) + end + end) + defp escape_unicode(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0xFF do + acc = [acc, "\\u00" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + 2) + end + defp escape_unicode(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0x7FF do + acc = [acc, "\\u0" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + 2) + end + defp escape_unicode(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0xFFF do + acc = [acc, "\\u0" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + 3) + end + defp escape_unicode(<<char::utf8, rest::bits>>, acc, original, skip) + when char <= 0xFFFF do + acc = [acc, "\\u" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + 3) + end + defp escape_unicode(<<char::utf8, rest::bits>>, acc, original, skip) do + char = char - 0x10000 + acc = + [ + acc, + "\\uD", Integer.to_string(0x800 ||| (char >>> 10), 16), + "\\uD" | Integer.to_string(0xC00 ||| (char &&& 0x3FF), 16) + ] + escape_unicode(rest, acc, original, skip + 4) + end + defp escape_unicode(<<>>, acc, _original, _skip) do + acc + end + defp escape_unicode(<<byte, _rest::bits>>, _acc, original, _skip) do + error({:invalid_byte, byte, original}) + end + + Enum.map(json_jt, fn + {byte, :chunk} -> + defp escape_unicode_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + escape_unicode_chunk(rest, acc, original, skip, len + 1) + end + {byte, _escape} -> + defp escape_unicode_chunk(<<byte, rest::bits>>, acc, original, skip, len) + when byte === unquote(byte) do + part = binary_part(original, skip, len) + acc = [acc, part | escape(byte)] + escape_unicode(rest, acc, original, skip + len + 1) + end + end) + defp escape_unicode_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0xFF do + part = binary_part(original, skip, len) + acc = [acc, part, "\\u00" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + len + 2) + end + defp escape_unicode_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0x7FF do + part = binary_part(original, skip, len) + acc = [acc, part, "\\u0" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + len + 2) + end + defp escape_unicode_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0xFFF do + part = binary_part(original, skip, len) + acc = [acc, part, "\\u0" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + len + 3) + end + defp escape_unicode_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) + when char <= 0xFFFF do + part = binary_part(original, skip, len) + acc = [acc, part, "\\u" | Integer.to_string(char, 16)] + escape_unicode(rest, acc, original, skip + len + 3) + end + defp escape_unicode_chunk(<<char::utf8, rest::bits>>, acc, original, skip, len) do + char = char - 0x10000 + part = binary_part(original, skip, len) + acc = + [ + acc, part, + "\\uD", Integer.to_string(0x800 ||| (char >>> 10), 16), + "\\uD" | Integer.to_string(0xC00 ||| (char &&& 0x3FF), 16) + ] + escape_unicode(rest, acc, original, skip + len + 4) + end + defp escape_unicode_chunk(<<>>, acc, original, skip, len) do + part = binary_part(original, skip, len) + [acc | part] + end + defp escape_unicode_chunk(<<byte, _rest::bits>>, _acc, original, _skip, _len) do + error({:invalid_byte, byte, original}) + end + + @compile {:inline, error: 1} + defp error(error) do + throw EncodeError.new(error) + end +end diff --git a/deps/jason/lib/encoder.ex b/deps/jason/lib/encoder.ex new file mode 100644 index 0000000..d693399 --- /dev/null +++ b/deps/jason/lib/encoder.ex @@ -0,0 +1,236 @@ +defprotocol Jason.Encoder do + @moduledoc """ + Protocol controlling how a value is encoded to JSON. + + ## Deriving + + The protocol allows leveraging the Elixir's `@derive` feature + to simplify protocol implementation in trivial cases. Accepted + options are: + + * `:only` - encodes only values of specified keys. + * `:except` - encodes all struct fields except specified keys. + + By default all keys except the `:__struct__` key are encoded. + + ## Example + + Let's assume a presence of the following struct: + + defmodule Test do + defstruct [:foo, :bar, :baz] + end + + If we were to call `@derive Jason.Encoder` just before `defstruct`, + an implementation similar to the following implementation would be generated: + + defimpl Jason.Encoder, for: Test do + def encode(value, opts) do + Jason.Encode.map(Map.take(value, [:foo, :bar, :baz]), opts) + end + end + + If we called `@derive {Jason.Encoder, only: [:foo]}`, an implementation + similar to the following implementation would be generated: + + defimpl Jason.Encoder, for: Test do + def encode(value, opts) do + Jason.Encode.map(Map.take(value, [:foo]), opts) + end + end + + If we called `@derive {Jason.Encoder, except: [:foo]}`, an implementation + similar to the following implementation would be generated: + + defimpl Jason.Encoder, for: Test do + def encode(value, opts) do + Jason.Encode.map(Map.take(value, [:bar, :baz]), opts) + end + end + + The actually generated implementations are more efficient computing some data + during compilation similar to the macros from the `Jason.Helpers` module. + + ## Explicit implementation + + If you wish to implement the protocol fully yourself, it is advised to + use functions from the `Jason.Encode` module to do the actual iodata + generation - they are highly optimized and verified to always produce + valid JSON. + """ + + @type t :: term + @type opts :: Jason.Encode.opts() + + @fallback_to_any true + + @doc """ + Encodes `value` to JSON. + + The argument `opts` is opaque - it can be passed to various functions in + `Jason.Encode` (or to the protocol function itself) for encoding values to JSON. + """ + @spec encode(t, opts) :: iodata + def encode(value, opts) +end + +defimpl Jason.Encoder, for: Any do + defmacro __deriving__(module, struct, opts) do + fields = fields_to_encode(struct, opts) + kv = Enum.map(fields, &{&1, generated_var(&1, __MODULE__)}) + escape = quote(do: escape) + encode_map = quote(do: encode_map) + encode_args = [escape, encode_map] + kv_iodata = Jason.Codegen.build_kv_iodata(kv, encode_args) + + quote do + defimpl Jason.Encoder, for: unquote(module) do + require Jason.Helpers + + def encode(%{unquote_splicing(kv)}, {unquote(escape), unquote(encode_map)}) do + unquote(kv_iodata) + end + end + end + end + + # The same as Macro.var/2 except it sets generated: true + defp generated_var(name, context) do + {name, [generated: true], context} + end + + def encode(%_{} = struct, _opts) do + raise Protocol.UndefinedError, + protocol: @protocol, + value: struct, + description: """ + Jason.Encoder protocol must always be explicitly implemented. + + If you own the struct, you can derive the implementation specifying \ + which fields should be encoded to JSON: + + @derive {Jason.Encoder, only: [....]} + defstruct ... + + It is also possible to encode all fields, although this should be \ + used carefully to avoid accidentally leaking private information \ + when new fields are added: + + @derive Jason.Encoder + defstruct ... + + Finally, if you don't own the struct you want to encode to JSON, \ + you may use Protocol.derive/3 placed outside of any module: + + Protocol.derive(Jason.Encoder, NameOfTheStruct, only: [...]) + Protocol.derive(Jason.Encoder, NameOfTheStruct) + """ + end + + def encode(value, _opts) do + raise Protocol.UndefinedError, + protocol: @protocol, + value: value, + description: "Jason.Encoder protocol must always be explicitly implemented" + end + + defp fields_to_encode(struct, opts) do + fields = Map.keys(struct) + + cond do + only = Keyword.get(opts, :only) -> + case only -- fields do + [] -> + only + + error_keys -> + raise ArgumentError, + "`:only` specified keys (#{inspect(error_keys)}) that are not defined in defstruct: " <> + "#{inspect(fields -- [:__struct__])}" + + end + + except = Keyword.get(opts, :except) -> + case except -- fields do + [] -> + fields -- [:__struct__ | except] + + error_keys -> + raise ArgumentError, + "`:except` specified keys (#{inspect(error_keys)}) that are not defined in defstruct: " <> + "#{inspect(fields -- [:__struct__])}" + + end + + true -> + fields -- [:__struct__] + end + end +end + +# The following implementations are formality - they are already covered +# by the main encoding mechanism in Jason.Encode, but exist mostly for +# documentation purposes and if anybody had the idea to call the protocol directly. + +defimpl Jason.Encoder, for: Atom do + def encode(atom, opts) do + Jason.Encode.atom(atom, opts) + end +end + +defimpl Jason.Encoder, for: Integer do + def encode(integer, _opts) do + Jason.Encode.integer(integer) + end +end + +defimpl Jason.Encoder, for: Float do + def encode(float, _opts) do + Jason.Encode.float(float) + end +end + +defimpl Jason.Encoder, for: List do + def encode(list, opts) do + Jason.Encode.list(list, opts) + end +end + +defimpl Jason.Encoder, for: Map do + def encode(map, opts) do + Jason.Encode.map(map, opts) + end +end + +defimpl Jason.Encoder, for: BitString do + def encode(binary, opts) when is_binary(binary) do + Jason.Encode.string(binary, opts) + end + + def encode(bitstring, _opts) do + raise Protocol.UndefinedError, + protocol: @protocol, + value: bitstring, + description: "cannot encode a bitstring to JSON" + end +end + +defimpl Jason.Encoder, for: [Date, Time, NaiveDateTime, DateTime] do + def encode(value, _opts) do + [?", @for.to_iso8601(value), ?"] + end +end + +defimpl Jason.Encoder, for: Decimal do + def encode(value, _opts) do + # silence the xref warning + decimal = Decimal + [?", decimal.to_string(value), ?"] + end +end + +defimpl Jason.Encoder, for: Jason.Fragment do + def encode(%{encode: encode}, opts) do + encode.(opts) + end +end diff --git a/deps/jason/lib/formatter.ex b/deps/jason/lib/formatter.ex new file mode 100644 index 0000000..88826eb --- /dev/null +++ b/deps/jason/lib/formatter.ex @@ -0,0 +1,255 @@ +defmodule Jason.Formatter do + @moduledoc ~S""" + Pretty-printing and minimizing functions for JSON-encoded data. + + Input is required to be in an 8-bit-wide encoding such as UTF-8 or Latin-1 + in `t:iodata/0` format. Input must have valid JSON, invalid JSON may produce + unexpected results or errors. + """ + + @type opts :: [ + {:indent, iodata} + | {:line_separator, iodata} + | {:record_separator, iodata} + | {:after_colon, iodata} + ] + + import Record + defrecordp :opts, [:indent, :line, :record, :colon] + + @dialyzer :no_improper_lists + + @doc ~S""" + Pretty-prints JSON-encoded `input`. + + `input` may contain multiple JSON objects or arrays, optionally separated + by whitespace (e.g., one object per line). Objects in output will be + separated by newlines. No trailing newline is emitted. + + ## Options + + * `:indent` - used for nested objects and arrays (default: two spaces - `" "`); + * `:line_separator` - used in nested objects (default: `"\n"`); + * `:record_separator` - separates root-level objects and arrays + (default is the value for `:line_separator` option); + * `:after_colon` - printed after a colon inside objects (default: one space - `" "`). + + ## Examples + + iex> Jason.Formatter.pretty_print(~s|{"a":{"b": [1, 2]}}|) + ~s|{ + "a": { + "b": [ + 1, + 2 + ] + } + }| + + """ + @spec pretty_print(iodata, opts) :: binary + def pretty_print(input, opts \\ []) do + input + |> pretty_print_to_iodata(opts) + |> IO.iodata_to_binary() + end + + @doc ~S""" + Pretty-prints JSON-encoded `input` and returns iodata. + + This function should be preferred to `pretty_print/2`, if the pretty-printed + JSON will be handed over to one of the IO functions or sent + over the socket. The Erlang runtime is able to leverage vectorised + writes and avoid allocating a continuous buffer for the whole + resulting string, lowering memory use and increasing performance. + """ + @spec pretty_print_to_iodata(iodata, opts) :: iodata + def pretty_print_to_iodata(input, opts \\ []) do + opts = parse_opts(opts, " ", "\n", nil, " ") + + depth = :first + empty = false + + {output, _state} = pp_iodata(input, [], depth, empty, opts) + + output + end + + @doc ~S""" + Minimizes JSON-encoded `input`. + + `input` may contain multiple JSON objects or arrays, optionally + separated by whitespace (e.g., one object per line). Minimized + output will contain one object per line. No trailing newline is emitted. + + ## Options + + * `:record_separator` - controls the string used as newline (default: `"\n"`). + + ## Examples + + iex> Jason.Formatter.minimize(~s|{ "a" : "b" , "c": \n\n 2}|) + ~s|{"a":"b","c":2}| + + """ + @spec minimize(iodata, opts) :: binary + def minimize(input, opts \\ []) do + input + |> minimize_to_iodata(opts) + |> IO.iodata_to_binary() + end + + @doc ~S""" + Minimizes JSON-encoded `input` and returns iodata. + + This function should be preferred to `minimize/2`, if the minimized + JSON will be handed over to one of the IO functions or sent + over the socket. The Erlang runtime is able to leverage vectorised + writes and avoid allocating a continuous buffer for the whole + resulting string, lowering memory use and increasing performance. + """ + @spec minimize_to_iodata(iodata, opts) :: iodata + def minimize_to_iodata(input, opts) do + record = Keyword.get(opts, :record_separator, "\n") + opts = opts(indent: "", line: "", record: record, colon: "") + + depth = :first + empty = false + + {output, _state} = pp_iodata(input, [], depth, empty, opts) + + output + end + + defp parse_opts([{option, value} | opts], indent, line, record, colon) do + value = IO.iodata_to_binary(value) + case option do + :indent -> parse_opts(opts, value, line, record, colon) + :record_separator -> parse_opts(opts, indent, line, value, colon) + :after_colon -> parse_opts(opts, indent, line, record, value) + :line_separator -> parse_opts(opts, indent, value, record || value, colon) + end + end + + defp parse_opts([], indent, line, record, colon) do + opts(indent: indent, line: line, record: record || line, colon: colon) + end + + for depth <- 1..16 do + defp tab(" ", unquote(depth)), do: unquote(String.duplicate(" ", depth)) + end + + defp tab("", _), do: "" + defp tab(indent, depth), do: List.duplicate(indent, depth) + + defp pp_iodata(<<>>, output_acc, depth, empty, opts) do + {output_acc, &pp_iodata(&1, &2, depth, empty, opts)} + end + + defp pp_iodata(<<byte, rest::binary>>, output_acc, depth, empty, opts) do + pp_byte(byte, rest, output_acc, depth, empty, opts) + end + + defp pp_iodata([], output_acc, depth, empty, opts) do + {output_acc, &pp_iodata(&1, &2, depth, empty, opts)} + end + + defp pp_iodata([byte | rest], output_acc, depth, empty, opts) when is_integer(byte) do + pp_byte(byte, rest, output_acc, depth, empty, opts) + end + + defp pp_iodata([head | tail], output_acc, depth, empty, opts) do + {output_acc, cont} = pp_iodata(head, output_acc, depth, empty, opts) + cont.(tail, output_acc) + end + + defp pp_byte(byte, rest, output, depth, empty, opts) when byte in ' \n\r\t' do + pp_iodata(rest, output, depth, empty, opts) + end + + defp pp_byte(byte, rest, output, depth, empty, opts) when byte in '{[' do + {out, depth} = + cond do + depth == :first -> {byte, 1} + depth == 0 -> {[opts(opts, :record), byte], 1} + empty -> {[opts(opts, :line), tab(opts(opts, :indent), depth), byte], depth + 1} + true -> {byte, depth + 1} + end + + empty = true + pp_iodata(rest, [output, out], depth, empty, opts) + end + + defp pp_byte(byte, rest, output, depth, true = _empty, opts) when byte in '}]' do + empty = false + depth = depth - 1 + pp_iodata(rest, [output, byte], depth, empty, opts) + end + + defp pp_byte(byte, rest, output, depth, false = empty, opts) when byte in '}]' do + depth = depth - 1 + out = [opts(opts, :line), tab(opts(opts, :indent), depth), byte] + pp_iodata(rest, [output, out], depth, empty, opts) + end + + defp pp_byte(byte, rest, output, depth, _empty, opts) when byte in ',' do + empty = false + out = [byte, opts(opts, :line), tab(opts(opts, :indent), depth)] + pp_iodata(rest, [output, out], depth, empty, opts) + end + + defp pp_byte(byte, rest, output, depth, empty, opts) when byte in ':' do + out = [byte, opts(opts, :colon)] + pp_iodata(rest, [output, out], depth, empty, opts) + end + + defp pp_byte(byte, rest, output, depth, empty, opts) do + out = if empty, do: [opts(opts, :line), tab(opts(opts, :indent), depth), byte], else: byte + empty = false + + if byte == ?" do + pp_string(rest, [output, out], _in_bs = false, &pp_iodata(&1, &2, depth, empty, opts)) + else + pp_iodata(rest, [output, out], depth, empty, opts) + end + end + + defp pp_string(<<>>, output_acc, in_bs, cont) do + {output_acc, &pp_string(&1, &2, in_bs, cont)} + end + + defp pp_string(binary, output_acc, true = _in_bs, cont) when is_binary(binary) do + <<byte, rest::binary>> = binary + pp_string(rest, [output_acc, byte], false, cont) + end + + defp pp_string(binary, output_acc, false = _in_bs, cont) when is_binary(binary) do + case :binary.match(binary, ["\"", "\\"]) do + :nomatch -> + {[output_acc | binary], &pp_string(&1, &2, false, cont)} + {pos, 1} -> + {head, tail} = :erlang.split_binary(binary, pos + 1) + case :binary.at(binary, pos) do + ?\\ -> pp_string(tail, [output_acc | head], true, cont) + ?" -> cont.(tail, [output_acc | head]) + end + end + end + + defp pp_string([], output_acc, in_bs, cont) do + {output_acc, &pp_string(&1, &2, in_bs, cont)} + end + + defp pp_string([byte | rest], output_acc, in_bs, cont) when is_integer(byte) do + cond do + in_bs -> pp_string(rest, [output_acc, byte], false, cont) + byte == ?" -> cont.(rest, [output_acc, byte]) + true -> pp_string(rest, [output_acc, byte], byte == ?\\, cont) + end + end + + defp pp_string([head | tail], output_acc, in_bs, cont) do + {output_acc, cont} = pp_string(head, output_acc, in_bs, cont) + cont.(tail, output_acc) + end +end diff --git a/deps/jason/lib/fragment.ex b/deps/jason/lib/fragment.ex new file mode 100644 index 0000000..2bcde6b --- /dev/null +++ b/deps/jason/lib/fragment.ex @@ -0,0 +1,11 @@ +defmodule Jason.Fragment do + defstruct [:encode] + + def new(iodata) when is_list(iodata) or is_binary(iodata) do + %__MODULE__{encode: fn _ -> iodata end} + end + + def new(encode) when is_function(encode, 1) do + %__MODULE__{encode: encode} + end +end diff --git a/deps/jason/lib/helpers.ex b/deps/jason/lib/helpers.ex new file mode 100644 index 0000000..f94678d --- /dev/null +++ b/deps/jason/lib/helpers.ex @@ -0,0 +1,98 @@ +defmodule Jason.Helpers do + @moduledoc """ + Provides macro facilities for partial compile-time encoding of JSON. + """ + + alias Jason.{Codegen, Fragment} + + @doc ~S""" + Encodes a JSON map from a compile-time keyword. + + Encodes the keys at compile time and strives to create as flat iodata + structure as possible to achieve maximum efficiency. Does encoding + right at the call site, but returns an `%Jason.Fragment{}` struct + that needs to be passed to one of the "main" encoding functions - + for example `Jason.encode/2` for final encoding into JSON - this + makes it completely transparent for most uses. + + Only allows keys that do not require escaping in any of the supported + encoding modes. This means only ASCII characters from the range + 0x1F..0x7F excluding '\', '/' and '"' are allowed - this also excludes + all control characters like newlines. + + Preserves the order of the keys. + + ## Example + + iex> fragment = json_map(foo: 1, bar: 2) + iex> Jason.encode!(fragment) + "{\"foo\":1,\"bar\":2}" + + """ + defmacro json_map(kv) do + kv_values = Macro.expand(kv, __CALLER__) + kv_vars = Enum.map(kv_values, fn {key, _} -> {key, generated_var(key, Codegen)} end) + + values = Enum.map(kv_values, &elem(&1, 1)) + vars = Enum.map(kv_vars, &elem(&1, 1)) + + escape = quote(do: escape) + encode_map = quote(do: encode_map) + encode_args = [escape, encode_map] + kv_iodata = Codegen.build_kv_iodata(kv_vars, encode_args) + + quote do + {unquote_splicing(vars)} = {unquote_splicing(values)} + + %Fragment{ + encode: fn {unquote(escape), unquote(encode_map)} -> + unquote(kv_iodata) + end + } + end + end + + @doc ~S""" + Encodes a JSON map from a variable containing a map and a compile-time + list of keys. + + It is equivalent to calling `Map.take/2` before encoding. Otherwise works + similar to `json_map/2`. + + ## Example + + iex> map = %{a: 1, b: 2, c: 3} + iex> fragment = json_map_take(map, [:c, :b]) + iex> Jason.encode!(fragment) + "{\"c\":3,\"b\":2}" + + """ + defmacro json_map_take(map, take) do + take = Macro.expand(take, __CALLER__) + kv = Enum.map(take, &{&1, generated_var(&1, Codegen)}) + escape = quote(do: escape) + encode_map = quote(do: encode_map) + encode_args = [escape, encode_map] + kv_iodata = Codegen.build_kv_iodata(kv, encode_args) + + quote do + case unquote(map) do + %{unquote_splicing(kv)} -> + %Fragment{ + encode: fn {unquote(escape), unquote(encode_map)} -> + unquote(kv_iodata) + end + } + + other -> + raise ArgumentError, + "expected a map with keys: #{unquote(inspect(take))}, got: #{inspect(other)}" + end + end + end + + # The same as Macro.var/2 except it sets generated: true + defp generated_var(name, context) do + {name, [generated: true], context} + end +end diff --git a/deps/jason/lib/jason.ex b/deps/jason/lib/jason.ex new file mode 100644 index 0000000..2bfa013 --- /dev/null +++ b/deps/jason/lib/jason.ex @@ -0,0 +1,242 @@ +defmodule Jason do + @moduledoc """ + A blazing fast JSON parser and generator in pure Elixir. + """ + + alias Jason.{Encode, Decoder, DecodeError, EncodeError, Formatter} + + @type escape :: :json | :unicode_safe | :html_safe | :javascript_safe + @type maps :: :naive | :strict + + @type encode_opt :: {:escape, escape} | {:maps, maps} | {:pretty, boolean | Formatter.opts()} + + @type keys :: :atoms | :atoms! | :strings | :copy | (String.t() -> term) + + @type strings :: :reference | :copy + + @type floats :: :native | :decimals + + @type objects :: :maps | :ordered_objects + + @type decode_opt :: {:keys, keys} | {:strings, strings} | {:floats, floats} | {:objects, objects} + + @doc """ + Parses a JSON value from `input` iodata. + + ## Options + + * `:keys` - controls how keys in objects are decoded. Possible values are: + + * `:strings` (default) - decodes keys as binary strings, + * `:atoms` - keys are converted to atoms using `String.to_atom/1`, + * `:atoms!` - keys are converted to atoms using `String.to_existing_atom/1`, + * custom decoder - additionally a function accepting a string and returning a key + is accepted. + + * `:strings` - controls how strings (including keys) are decoded. Possible values are: + + * `:reference` (default) - when possible tries to create a sub-binary into the original + * `:copy` - always copies the strings. This option is especially useful when parts of the + decoded data will be stored for a long time (in ets or some process) to avoid keeping + the reference to the original data. + + * `:floats` - controls how floats are decoded. Possible values are: + + * `:native` (default) - Native conversion from binary to float using `:erlang.binary_to_float/1`, + * `:decimals` - uses `Decimal.new/1` to parse the binary into a Decimal struct with arbitrary precision. + + * `:objects` - controls how objects are decoded. Possible values are: + + * `:maps` (default) - objects are decoded as maps + * `:ordered_objects` - objects are decoded as `Jason.OrderedObject` structs + + ## Decoding keys to atoms + + The `:atoms` option uses the `String.to_atom/1` call that can create atoms at runtime. + Since the atoms are not garbage collected, this can pose a DoS attack vector when used + on user-controlled data. + + ## Examples + + iex> Jason.decode("{}") + {:ok, %{}} + + iex> Jason.decode("invalid") + {:error, %Jason.DecodeError{data: "invalid", position: 0, token: nil}} + """ + @spec decode(iodata, [decode_opt]) :: {:ok, term} | {:error, DecodeError.t()} + def decode(input, opts \\ []) do + input = IO.iodata_to_binary(input) + Decoder.parse(input, format_decode_opts(opts)) + end + + @doc """ + Parses a JSON value from `input` iodata. + + Similar to `decode/2` except it will unwrap the error tuple and raise + in case of errors. + + ## Examples + + iex> Jason.decode!("{}") + %{} + + iex> Jason.decode!("invalid") + ** (Jason.DecodeError) unexpected byte at position 0: 0x69 ("i") + + """ + @spec decode!(iodata, [decode_opt]) :: term | no_return + def decode!(input, opts \\ []) do + case decode(input, opts) do + {:ok, result} -> result + {:error, error} -> raise error + end + end + + @doc """ + Generates JSON corresponding to `input`. + + The generation is controlled by the `Jason.Encoder` protocol, + please refer to the module to read more on how to define the protocol + for custom data types. + + ## Options + + * `:escape` - controls how strings are encoded. Possible values are: + + * `:json` (default) - the regular JSON escaping as defined by RFC 7159. + * `:javascript_safe` - additionally escapes the LINE SEPARATOR (U+2028) + and PARAGRAPH SEPARATOR (U+2029) characters to make the produced JSON + valid JavaScript. + * `:html_safe` - similar to `:javascript_safe`, but also escapes the `/` + character to prevent XSS. + * `:unicode_safe` - escapes all non-ascii characters. + + * `:maps` - controls how maps are encoded. Possible values are: + + * `:strict` - checks the encoded map for duplicate keys and raises + if they appear. For example `%{:foo => 1, "foo" => 2}` would be + rejected, since both keys would be encoded to the string `"foo"`. + * `:naive` (default) - does not perform the check. + + * `:pretty` - controls pretty printing of the output. Possible values are: + + * `true` to pretty print with default configuration + * a keyword of options as specified by `Jason.Formatter.pretty_print/2`. + + ## Examples + + iex> Jason.encode(%{a: 1}) + {:ok, ~S|{"a":1}|} + + iex> Jason.encode("\\xFF") + {:error, %Jason.EncodeError{message: "invalid byte 0xFF in <<255>>"}} + + """ + @spec encode(term, [encode_opt]) :: + {:ok, String.t()} | {:error, EncodeError.t() | Exception.t()} + def encode(input, opts \\ []) do + case do_encode(input, format_encode_opts(opts)) do + {:ok, result} -> {:ok, IO.iodata_to_binary(result)} + {:error, error} -> {:error, error} + end + end + + @doc """ + Generates JSON corresponding to `input`. + + Similar to `encode/1` except it will unwrap the error tuple and raise + in case of errors. + + ## Examples + + iex> Jason.encode!(%{a: 1}) + ~S|{"a":1}| + + iex> Jason.encode!("\\xFF") + ** (Jason.EncodeError) invalid byte 0xFF in <<255>> + + """ + @spec encode!(term, [encode_opt]) :: String.t() | no_return + def encode!(input, opts \\ []) do + case do_encode(input, format_encode_opts(opts)) do + {:ok, result} -> IO.iodata_to_binary(result) + {:error, error} -> raise error + end + end + + @doc """ + Generates JSON corresponding to `input` and returns iodata. + + This function should be preferred to `encode/2`, if the generated + JSON will be handed over to one of the IO functions or sent + over the socket. The Erlang runtime is able to leverage vectorised + writes and avoid allocating a continuous buffer for the whole + resulting string, lowering memory use and increasing performance. + + ## Examples + + iex> {:ok, iodata} = Jason.encode_to_iodata(%{a: 1}) + iex> IO.iodata_to_binary(iodata) + ~S|{"a":1}| + + iex> Jason.encode_to_iodata("\\xFF") + {:error, %Jason.EncodeError{message: "invalid byte 0xFF in <<255>>"}} + + """ + @spec encode_to_iodata(term, [encode_opt]) :: + {:ok, iodata} | {:error, EncodeError.t() | Exception.t()} + def encode_to_iodata(input, opts \\ []) do + do_encode(input, format_encode_opts(opts)) + end + + @doc """ + Generates JSON corresponding to `input` and returns iodata. + + Similar to `encode_to_iodata/1` except it will unwrap the error tuple + and raise in case of errors. + + ## Examples + + iex> iodata = Jason.encode_to_iodata!(%{a: 1}) + iex> IO.iodata_to_binary(iodata) + ~S|{"a":1}| + + iex> Jason.encode_to_iodata!("\\xFF") + ** (Jason.EncodeError) invalid byte 0xFF in <<255>> + + """ + @spec encode_to_iodata!(term, [encode_opt]) :: iodata | no_return + def encode_to_iodata!(input, opts \\ []) do + case do_encode(input, format_encode_opts(opts)) do + {:ok, result} -> result + {:error, error} -> raise error + end + end + + defp do_encode(input, %{pretty: true} = opts) do + case Encode.encode(input, opts) do + {:ok, encoded} -> {:ok, Formatter.pretty_print_to_iodata(encoded)} + other -> other + end + end + + defp do_encode(input, %{pretty: pretty} = opts) when pretty !== false do + case Encode.encode(input, opts) do + {:ok, encoded} -> {:ok, Formatter.pretty_print_to_iodata(encoded, pretty)} + other -> other + end + end + + defp do_encode(input, opts) do + Encode.encode(input, opts) + end + + defp format_encode_opts(opts) do + Enum.into(opts, %{escape: :json, maps: :naive}) + end + + defp format_decode_opts(opts) do + Enum.into(opts, %{keys: :strings, strings: :reference, floats: :native, objects: :maps}) + end +end diff --git a/deps/jason/lib/ordered_object.ex b/deps/jason/lib/ordered_object.ex new file mode 100644 index 0000000..52831f3 --- /dev/null +++ b/deps/jason/lib/ordered_object.ex @@ -0,0 +1,94 @@ +defmodule Jason.OrderedObject do + @doc """ + Struct implementing a JSON object retaining order of properties. + + A wrapper around a keyword (that supports non-atom keys) allowing for + proper protocol implementations. + + Implements the `Access` behaviour and `Enumerable` protocol with + complexity similar to keywords/lists. + """ + + @behaviour Access + + @type t :: %__MODULE__{values: [{String.Chars.t(), term()}]} + + defstruct values: [] + + def new(values) when is_list(values) do + %__MODULE__{values: values} + end + + @impl Access + def fetch(%__MODULE__{values: values}, key) do + case :lists.keyfind(key, 1, values) do + {_, value} -> {:ok, value} + false -> :error + end + end + + @impl Access + def get_and_update(%__MODULE__{values: values} = obj, key, function) do + {result, new_values} = get_and_update(values, [], key, function) + {result, %{obj | values: new_values}} + end + + @impl Access + def pop(%__MODULE__{values: values} = obj, key, default \\ nil) do + case :lists.keyfind(key, 1, values) do + {_, value} -> {value, %{obj | values: delete_key(values, key)}} + false -> {default, obj} + end + end + + defp get_and_update([{key, current} | t], acc, key, fun) do + case fun.(current) do + {get, value} -> + {get, :lists.reverse(acc, [{key, value} | t])} + + :pop -> + {current, :lists.reverse(acc, t)} + + other -> + raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}" + end + end + + defp get_and_update([{_, _} = h | t], acc, key, fun), do: get_and_update(t, [h | acc], key, fun) + + defp get_and_update([], acc, key, fun) do + case fun.(nil) do + {get, update} -> + {get, [{key, update} | :lists.reverse(acc)]} + + :pop -> + {nil, :lists.reverse(acc)} + + other -> + raise "the given function must return a two-element tuple or :pop, got: #{inspect(other)}" + end + end + + defp delete_key([{key, _} | tail], key), do: delete_key(tail, key) + defp delete_key([{_, _} = pair | tail], key), do: [pair | delete_key(tail, key)] + defp delete_key([], _key), do: [] +end + +defimpl Enumerable, for: Jason.OrderedObject do + def count(%{values: []}), do: {:ok, 0} + def count(_obj), do: {:error, __MODULE__} + + def member?(%{values: []}, _value), do: {:ok, false} + def member?(_obj, _value), do: {:error, __MODULE__} + + def slice(%{values: []}), do: {:ok, 0, fn _, _ -> [] end} + def slice(_obj), do: {:error, __MODULE__} + + def reduce(%{values: values}, acc, fun), do: Enumerable.List.reduce(values, acc, fun) +end + +defimpl Jason.Encoder, for: Jason.OrderedObject do + def encode(%{values: values}, opts) do + Jason.Encode.keyword(values, opts) + end +end diff --git a/deps/jason/lib/sigil.ex b/deps/jason/lib/sigil.ex new file mode 100644 index 0000000..e5447ef --- /dev/null +++ b/deps/jason/lib/sigil.ex @@ -0,0 +1,84 @@ +defmodule Jason.Sigil do + @doc ~S""" + Handles the sigil `~j` for JSON strings. + + Calls `Jason.decode!/2` with modifiers mapped to options. + + Given a string literal without interpolations, decodes the + string at compile-time. + + ## Modifiers + + See `Jason.decode/2` for detailed descriptions. + + * `a` - equivalent to `{:keys, :atoms}` option + * `A` - equivalent to `{:keys, :atoms!}` option + * `r` - equivalent to `{:strings, :reference}` option + * `c` - equivalent to `{:strings, :copy}` option + + ## Examples + + iex> ~j"0" + 0 + + iex> ~j"[1, 2, 3]" + [1, 2, 3] + + iex> ~j'"string"'r + "string" + + iex> ~j"{}" + %{} + + iex> ~j'{"atom": "value"}'a + %{atom: "value"} + + iex> ~j'{"#{:j}": #{'"j"'}}'A + %{j: "j"} + + """ + defmacro sigil_j(term, modifiers) + + defmacro sigil_j({:<<>>, _meta, [string]}, modifiers) when is_binary(string) do + Macro.escape(Jason.decode!(string, mods_to_opts(modifiers))) + end + + defmacro sigil_j(term, modifiers) do + quote(do: Jason.decode!(unquote(term), unquote(mods_to_opts(modifiers)))) + end + + @doc ~S""" + Handles the sigil `~J` for raw JSON strings. + + Decodes a raw string ignoring Elixir interpolations and + escape characters at compile-time. + + ## Examples + + iex> ~J'"#{string}"' + "\#{string}" + + iex> ~J'"\u0078\\y"' + "x\\y" + + iex> ~J'{"#{key}": "#{}"}'a + %{"\#{key}": "\#{}"} + """ + defmacro sigil_J(term, modifiers) + + defmacro sigil_J({:<<>>, _meta, [string]}, modifiers) when is_binary(string) do + Macro.escape(Jason.decode!(string, mods_to_opts(modifiers))) + end + + @spec mods_to_opts(charlist) :: [Jason.decode_opt()] + defp mods_to_opts(modifiers) do + modifiers + |> Enum.map(fn + ?a -> {:keys, :atoms} + ?A -> {:keys, :atoms!} + ?r -> {:strings, :reference} + ?c -> {:strings, :copy} + m -> raise ArgumentError, "unknown sigil modifier #{<<?", m, ?">>}" + end) + end +end diff --git a/deps/jason/mix.exs b/deps/jason/mix.exs new file mode 100644 index 0000000..8f5b085 --- /dev/null +++ b/deps/jason/mix.exs @@ -0,0 +1,76 @@ +defmodule Jason.Mixfile do + use Mix.Project + + @source_url "https://github.com/michalmuskala/jason" + @version "1.3.0" + + def project() do + [ + app: :jason, + version: @version, + elixir: "~> 1.4", + start_permanent: Mix.env() == :prod, + consolidate_protocols: Mix.env() != :test, + deps: deps(), + preferred_cli_env: [docs: :docs], + dialyzer: dialyzer(), + description: description(), + package: package(), + docs: docs() + ] + end + + def application() do + [ + extra_applications: [] + ] + end + + defp deps() do + [ + {:decimal, "~> 1.0 or ~> 2.0", optional: true}, + {:dialyxir, "~> 1.0", only: [:dev, :test], runtime: false}, + {:ex_doc, ">= 0.0.0", only: :dev, runtime: false}, + ] ++ maybe_stream_data() + end + + defp maybe_stream_data() do + if Version.match?(System.version(), "~> 1.5") do + [{:stream_data, "~> 0.4", only: :test}] + else + [] + end + end + + defp dialyzer() do + [ + ignore_warnings: "dialyzer.ignore", + plt_add_apps: [:decimal] + ] + end + + defp description() do + """ + A blazing fast JSON parser and generator in pure Elixir. + """ + end + + defp package() do + [ + maintainers: ["Michaล‚ Muskaล‚a"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => @source_url} + ] + end + + defp docs() do + [ + main: "readme", + name: "Jason", + source_ref: "v#{@version}", + canonical: "http://hexdocs.pm/jason", + source_url: @source_url, + extras: ["README.md", "CHANGELOG.md", "LICENSE"] + ] + end +end diff --git a/deps/mime/.fetch b/deps/mime/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/mime/.formatter.exs b/deps/mime/.formatter.exs new file mode 100644 index 0000000..d304ff3 --- /dev/null +++ b/deps/mime/.formatter.exs @@ -0,0 +1,3 @@ +[ + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/mime/.hex b/deps/mime/.hex new file mode 100644 index 0000000000000000000000000000000000000000..42d85d6938b5a409d40a1b99b87e57ea2e8c0b42 GIT binary patch literal 268 zcmZ9{O>P1)3<cm6=+CN0L75~@CcEAQ*|F0Z+GNlG75iQs(`9$ov-CduciK!_Hrq*V zxIuF<#=Nk3pL>_}P$nr~=WA=4e)yPT@R2<aHPJW_pkqfo1`9O|uEdl8#-f{{WJjX2 zwGbT<WSfD_Tf>f@vYx-S?@7@%-;Qi{9h*FeU7MRIzFaT=ahK3rs+K7Pb|6v+lUEN* w4K|L2s<?>3Cs(tOmmugdK!!-QMp9*!)ACTB_0>~j^ZKKg%uD~v+}`f}9T<v9Pyhe` literal 0 HcmV?d00001 diff --git a/deps/mime/CHANGELOG.md b/deps/mime/CHANGELOG.md new file mode 100644 index 0000000..fb06842 --- /dev/null +++ b/deps/mime/CHANGELOG.md @@ -0,0 +1,33 @@ +# Changelog + +## v2.0.3 + + * Support Markdown, JPEG XL, and PSD formats + +## v2.0.2 + + * Support Associated Signature Containers (ASiC) files + * Support `.atom` and `.rss` files + +## v2.0.1 + + * Add `.text` extension to text/plain + +## v2.0.0 + +Upgrade note: mime v2 no longer ships with a complete database of mime.types, +instead it lists the most common mime types used by web applications. When +upgrading, check carefully if all mime types used by your app are supported. + + * Ship with our own minimal types database + +## v1.6.0 + + * Deprecate MIME.valid? + * Ignore media type params + * Detect subtype suffix according to the spec + +## v1.5.0 + + * Compare extensions in a case-insensitive way (see + [elixir-plug/mime#38](https://github.com/elixir-plug/mime/issues/38)). diff --git a/deps/mime/LICENSE b/deps/mime/LICENSE new file mode 100644 index 0000000..04fb7d9 --- /dev/null +++ b/deps/mime/LICENSE @@ -0,0 +1,13 @@ +Copyright (c) 2016 Plataformatec. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/deps/mime/README.md b/deps/mime/README.md new file mode 100644 index 0000000..2e73551 --- /dev/null +++ b/deps/mime/README.md @@ -0,0 +1,32 @@ +# MIME + +[![CI](https://github.com/elixir-plug/mime/actions/workflows/ci.yml/badge.svg)](https://github.com/elixir-plug/mime/actions/workflows/ci.yml) + +A read-only and immutable MIME type module for Elixir. + +This library embeds a database of MIME types so we can map MIME types +to extensions and vice-versa. The library was designed to be read-only +for performance. This library is used by projects like Plug and Phoenix. + +Master currently points to a redesign of this library with a minimal copy +of the MIME database. To add any [media type specified by +IANA](https://www.iana.org/assignments/media-types/media-types.xhtml), +please submit a pull request. You can also add specific types to your +application via a compile-time configuration, see [the documentation for +more information](http://hexdocs.pm/mime/). + +## Installation + +The package can be installed as: + +```elixir +def deps do + [{:mime, "~> 2.0"}] +end +``` + +## License + +MIME source code is released under Apache License 2.0. + +Check LICENSE file for more information. diff --git a/deps/mime/hex_metadata.config b/deps/mime/hex_metadata.config new file mode 100644 index 0000000..90063a8 --- /dev/null +++ b/deps/mime/hex_metadata.config @@ -0,0 +1,12 @@ +{<<"app">>,<<"mime">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"A MIME type module for Elixir">>}. +{<<"elixir">>,<<"~> 1.10">>}. +{<<"files">>, + [<<"lib">>,<<"lib/mime.ex">>,<<".formatter.exs">>,<<"mix.exs">>, + <<"README.md">>,<<"LICENSE">>,<<"CHANGELOG.md">>]}. +{<<"licenses">>,[<<"Apache-2.0">>]}. +{<<"links">>,[{<<"GitHub">>,<<"https://github.com/elixir-plug/mime">>}]}. +{<<"name">>,<<"mime">>}. +{<<"requirements">>,[]}. +{<<"version">>,<<"2.0.3">>}. diff --git a/deps/mime/lib/mime.ex b/deps/mime/lib/mime.ex new file mode 100644 index 0000000..e722574 --- /dev/null +++ b/deps/mime/lib/mime.ex @@ -0,0 +1,264 @@ +defmodule MIME do + @moduledoc """ + Maps MIME types to its file extensions and vice versa. + + MIME types can be extended in your application configuration + as follows: + + config :mime, :types, %{ + "application/vnd.api+json" => ["json-api"] + } + + After adding the configuration, MIME needs to be recompiled. + If you are using mix, it can be done with: + + $ mix deps.clean mime --build + + """ + + types = %{ + "application/atom+xml" => ["atom"], + "application/epub+zip" => ["epub"], + "application/gzip" => ["gz"], + "application/java-archive" => ["jar"], + "application/javascript" => ["js"], + "application/json" => ["json"], + "application/json-patch+json" => ["json-patch"], + "application/ld+json" => ["jsonld"], + "application/manifest+json" => ["webmanifest"], + "application/msword" => ["doc"], + "application/octet-stream" => ["bin"], + "application/ogg" => ["ogx"], + "application/pdf" => ["pdf"], + "application/postscript" => ["ps", "eps", "ai"], + "application/rss+xml" => ["rss"], + "application/rtf" => ["rtf"], + "application/vnd.amazon.ebook" => ["azw"], + "application/vnd.api+json" => ["json-api"], + "application/vnd.apple.installer+xml" => ["mpkg"], + "application/vnd.etsi.asic-e+zip" => ["asice", "sce"], + "application/vnd.etsi.asic-s+zip" => ["asics", "scs"], + "application/vnd.mozilla.xul+xml" => ["xul"], + "application/vnd.ms-excel" => ["xls"], + "application/vnd.ms-fontobject" => ["eot"], + "application/vnd.ms-powerpoint" => ["ppt"], + "application/vnd.oasis.opendocument.presentation" => ["odp"], + "application/vnd.oasis.opendocument.spreadsheet" => ["ods"], + "application/vnd.oasis.opendocument.text" => ["odt"], + "application/vnd.openxmlformats-officedocument.presentationml.presentation" => ["pptx"], + "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet" => ["xlsx"], + "application/vnd.openxmlformats-officedocument.wordprocessingml.document" => ["docx"], + "application/vnd.rar" => ["rar"], + "application/vnd.visio" => ["vsd"], + "application/wasm" => ["wasm"], + "application/x-7z-compressed" => ["7z"], + "application/x-abiword" => ["abw"], + "application/x-bzip" => ["bz"], + "application/x-bzip2" => ["bz2"], + "application/x-cdf" => ["cda"], + "application/x-csh" => ["csh"], + "application/x-freearc" => ["arc"], + "application/x-httpd-php" => ["php"], + "application/x-msaccess" => ["mdb"], + "application/x-sh" => ["sh"], + "application/x-shockwave-flash" => ["swf"], + "application/x-tar" => ["tar"], + "application/xhtml+xml" => ["xhtml"], + "application/xml" => ["xml"], + "application/zip" => ["zip"], + "audio/3gpp" => ["3gp"], + "audio/3gpp2" => ["3g2"], + "audio/aac" => ["aac"], + "audio/midi" => ["mid", "midi"], + "audio/mpeg" => ["mp3"], + "audio/ogg" => ["oga"], + "audio/opus" => ["opus"], + "audio/wav" => ["wav"], + "audio/webm" => ["weba"], + "font/otf" => ["otf"], + "font/ttf" => ["ttf"], + "font/woff" => ["woff"], + "font/woff2" => ["woff2"], + "image/avif" => ["avif"], + "image/bmp" => ["bmp"], + "image/gif" => ["gif"], + "image/heic" => ["heic"], + "image/heif" => ["heif"], + "image/jpeg" => ["jpg", "jpeg"], + "image/jxl" => ["jxl"], + "image/png" => ["png"], + "image/svg+xml" => ["svg", "svgz"], + "image/tiff" => ["tiff", "tif"], + "image/vnd.adobe.photoshop" => ["psd"], + "image/vnd.microsoft.icon" => ["ico"], + "image/webp" => ["webp"], + "text/calendar" => ["ics"], + "text/css" => ["css"], + "text/csv" => ["csv"], + "text/html" => ["html", "htm"], + "text/javascript" => ["js", "mjs"], + "text/markdown" => ["md", "markdown"], + "text/plain" => ["txt", "text"], + "text/xml" => ["xml"], + "video/3gpp" => ["3gp"], + "video/3gpp2" => ["3g2"], + "video/mp2t" => ["ts"], + "video/mp4" => ["mp4"], + "video/mpeg" => ["mpeg", "mpg"], + "video/ogg" => ["ogv"], + "video/quicktime" => ["mov"], + "video/webm" => ["webm"], + "video/x-ms-wmv" => ["wmv"], + "video/x-msvideo" => ["avi"] + } + + require Application + custom_types = Application.compile_env(:mime, :types, %{}) + + to_exts = fn map -> + for {media, exts} <- map, ext <- exts, reduce: %{} do + acc -> Map.update(acc, ext, [media], &[media | &1]) + end + end + + exts = + Map.merge(to_exts.(types), %{ + "3g2" => ["video/3gpp2"], + "3gp" => ["video/3gpp"], + "js" => ["text/javascript"], + "xml" => ["text/xml"] + }) + + for {ext, [_, _ | _] = mimes} <- exts do + raise "conflicting MIMEs for extension .#{ext}, please override: #{inspect(mimes)}" + end + + all_exts = Map.merge(exts, to_exts.(custom_types)) + all_types = Map.merge(types, custom_types) + + @doc """ + Returns the custom types compiled into the MIME module. + """ + def compiled_custom_types do + unquote(Macro.escape(custom_types)) + end + + @doc """ + Returns the extensions associated with a given MIME type. + + ## Examples + + iex> MIME.extensions("text/html") + ["html", "htm"] + + iex> MIME.extensions("application/json") + ["json"] + + iex> MIME.extensions("application/vnd.custom+xml") + ["xml"] + + iex> MIME.extensions("foo/bar") + [] + + """ + @spec extensions(String.t()) :: [String.t()] + def extensions(type) do + mime = + type + |> strip_params() + |> downcase("") + + mime_to_ext(mime) || suffix(mime) || [] + end + + defp suffix(type) do + case String.split(type, "+") do + [_type_subtype_without_suffix, suffix] -> [suffix] + _ -> nil + end + end + + @default_type "application/octet-stream" + + @doc """ + Returns the MIME type associated with a file extension. + + If no MIME type is known for `file_extension`, + `#{inspect(@default_type)}` is returned. + + ## Examples + + iex> MIME.type("txt") + "text/plain" + + iex> MIME.type("foobarbaz") + #{inspect(@default_type)} + + """ + @spec type(String.t()) :: String.t() + def type(file_extension) do + ext_to_mime(file_extension) || @default_type + end + + @doc """ + Returns whether an extension has a MIME type registered. + + ## Examples + + iex> MIME.has_type?("txt") + true + + iex> MIME.has_type?("foobarbaz") + false + + """ + @spec has_type?(String.t()) :: boolean + def has_type?(file_extension) do + is_binary(ext_to_mime(file_extension)) + end + + @doc """ + Guesses the MIME type based on the path's extension. See `type/1`. + + ## Examples + + iex> MIME.from_path("index.html") + "text/html" + + """ + @spec from_path(Path.t()) :: String.t() + def from_path(path) do + case Path.extname(path) do + "." <> ext -> type(downcase(ext, "")) + _ -> @default_type + end + end + + defp strip_params(string) do + string |> :binary.split(";") |> hd() + end + + defp downcase(<<h, t::binary>>, acc) when h in ?A..?Z, + do: downcase(t, <<acc::binary, h + 32>>) + + defp downcase(<<h, t::binary>>, acc), do: downcase(t, <<acc::binary, h>>) + defp downcase(<<>>, acc), do: acc + + @spec ext_to_mime(String.t()) :: String.t() | nil + defp ext_to_mime(type) + + for {ext, [type | _]} <- all_exts do + defp ext_to_mime(unquote(ext)), do: unquote(type) + end + + defp ext_to_mime(_ext), do: nil + + @spec mime_to_ext(String.t()) :: list(String.t()) | nil + defp mime_to_ext(type) + + for {type, exts} <- all_types do + defp mime_to_ext(unquote(type)), do: unquote(List.wrap(exts)) + end + + defp mime_to_ext(_type), do: nil +end diff --git a/deps/mime/mix.exs b/deps/mime/mix.exs new file mode 100644 index 0000000..1c0aa5f --- /dev/null +++ b/deps/mime/mix.exs @@ -0,0 +1,42 @@ +defmodule MIME.Mixfile do + use Mix.Project + + @version "2.0.3" + + def project do + [ + app: :mime, + version: @version, + elixir: "~> 1.10", + description: "A MIME type module for Elixir", + package: package(), + deps: deps(), + docs: [ + source_ref: "v#{@version}", + main: "MIME", + source_url: "https://github.com/elixir-plug/mime" + ] + ] + end + + def package do + [ + maintainers: ["alirz23", "Josรฉ Valim"], + licenses: ["Apache-2.0"], + links: %{"GitHub" => "https://github.com/elixir-plug/mime"} + ] + end + + def application do + [ + env: [], + extra_applications: [:logger] + ] + end + + defp deps do + [ + {:ex_doc, "~> 0.19", only: :docs} + ] + end +end diff --git a/deps/phoenix/.fetch b/deps/phoenix/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/phoenix/.formatter.exs b/deps/phoenix/.formatter.exs new file mode 100644 index 0000000..3887f7a --- /dev/null +++ b/deps/phoenix/.formatter.exs @@ -0,0 +1,80 @@ +locals_without_parens = [ + # Phoenix.Channel + intercept: 1, + + # Phoenix.Router + connect: 3, + connect: 4, + delete: 3, + delete: 4, + forward: 2, + forward: 3, + forward: 4, + get: 3, + get: 4, + head: 3, + head: 4, + match: 4, + match: 5, + options: 3, + options: 4, + patch: 3, + patch: 4, + pipeline: 2, + pipe_through: 1, + post: 3, + post: 4, + put: 3, + put: 4, + resources: 2, + resources: 3, + resources: 4, + trace: 4, + + # Phoenix.Controller + action_fallback: 1, + + # Phoenix.Endpoint + plug: 1, + plug: 2, + socket: 2, + socket: 3, + + # Phoenix.Socket + channel: 2, + channel: 3, + + # Phoenix.ChannelTest + assert_broadcast: 2, + assert_broadcast: 3, + assert_push: 2, + assert_push: 3, + assert_reply: 2, + assert_reply: 3, + assert_reply: 4, + refute_broadcast: 2, + refute_broadcast: 3, + refute_push: 2, + refute_push: 3, + refute_reply: 2, + refute_reply: 3, + refute_reply: 4, + + # Phoenix.ConnTest + assert_error_sent: 2, + + # Phoenix.Live{Dashboard,View}.Router + attr: 2, + attr: 3, + live: 2, + live: 3, + live: 4, + live_dashboard: 1, + live_dashboard: 2, + on_mount: 1 +] + +[ + locals_without_parens: locals_without_parens, + export: [locals_without_parens: locals_without_parens] +] diff --git a/deps/phoenix/.hex b/deps/phoenix/.hex new file mode 100644 index 0000000000000000000000000000000000000000..6324004ff870925133ed7e923ec1b99205078feb GIT binary patch literal 272 zcmZ9{O;W=!3<Y4*Ld%MybmSk|&W@WfiY4O|>}1lG$)1bjY+33p-uLud4&}(|SY)FE zsk>6@#OiHoP1cW8Bz@f;wf6Kq#~dj^2p%9u=kyT4Go#hojYdKsVU|D${ct*J3LK-i zh(zF>W%XLA(Wk7p@80*+(K>BcR?m&9?8JE~v$SdJ{`|QA_YM1$5u;5*GLVA?n;eJ- y?IcGqJa|MqMnU!%k=TnjVPs?;fwk$-D$Be>Fa6u4vO53tG1J`k?4&RLqRk&k*Ga|z literal 0 HcmV?d00001 diff --git a/deps/phoenix/CHANGELOG.md b/deps/phoenix/CHANGELOG.md new file mode 100644 index 0000000..9c57df5 --- /dev/null +++ b/deps/phoenix/CHANGELOG.md @@ -0,0 +1,163 @@ +# Changelog for v1.6 + +See the [upgrade guide](https://gist.github.com/chrismccord/2ab350f154235ad4a4d0f4de6decba7b) to upgrade from Phoenix 1.5.x. + +Phoenix v1.6 requires Elixir v1.9+. + +## 1.6.12 (2022-09-06) + * Fix `phx.gen.release` Dockerfile pointing to expired image + +## 1.6.11 (2022-07-11) + +### JavaScript Client Enhancements + * Add convenience for getting longpoll reference with `getLongPollTransport` + +### JavaScript Client Bug Fixes + * Cancel inflight longpoll requests on canceled longpoll session + * Do not attempt to flush socket buffer when tearing down socket on `replaceTransport` + +## 1.6.10 (2022-06-01) + +### JavaScript Client Enhancements + * Add `ping` function to socket + +## 1.6.9 (2022-05-16) + +### Bug Fixes + * [phx.gen.release] Fix generated .dockerignore comment + +## 1.6.8 (2022-05-06) + +### Bug Fixes + * [phx.gen.release] Fix Ecto check failing to find Ecto in certain cases + +## 1.6.7 (2022-04-14) + +### Enhancements + * [Endpoint] Add Endpoint init telemetry event + * [Endpoint] Prioritize user :http configuration for ranch to fix inet_backend failing to be respected + * [Logger] Support log_module in router metadata + * [phx.gen.release] Don't handle assets in Docker when directory doesn't exist + * [phx.gen.release] Skip generating migration files when ecto_sql is not installed + +### JavaScript Client Enhancements + * Switch to .mjs files for ESM for better compatibility across build tools + +### JavaScript Client Bug Fixes + * Fix LongPoll callbacks in JS client causing errors on connection close + +## 1.6.6 (2022-01-04) + +### Bug Fixes + * [Endpoint] Fix `check_origin: :conn` failing to match scheme + +## 1.6.5 (2021-12-16) + +### Enhancements + * [Endpoint] Support `check_origin: :conn` to enforce origin on the connection's host, port, and scheme + +### Bug Fixes + * Fix LiveView upload testing errors caused by `Phoenix.ChannelTest` + +## 1.6.4 (2021-12-08) + +### Bug Fixes + * Fix incorrect `phx.gen.release` output + +## 1.6.3 (2021-12-07) + +### Enhancements + * Add new `phx.gen.release` task for release and docker based deployments + * Add `fullsweep_after` option to the websocket transport + * Add `:force_watchers` option to `Phoenix.Endpoint` for running watchers even when web server is not started + +### Bug Fixes + * Fix Endpoint `log: false` failing to disable logging + +### JavaScript Client Bug Fixes + * Do not attempt to reconnect automatically if client gracefully closes connection + +## 1.6.2 (2021-10-08) + +### Bug Fixes + * [phx.new] Fix external flag to esbuild using incorrect syntax + +## 1.6.1 (2021-10-08) + +### Enhancements + * [phx.new] Add external flag to esbuild for fonts and image path loading + * [phx.gen.auth] No longer set `argon2` as the default hash algorithm for `phx.gen.auth` in favor of bcrypt for performance reasons on smaller hardware + +### Bug Fixes + * Fix race conditions logging debug duplicate channel joins when no duplicate existed + +### JavaScript Client Bug Fixes + * Export commonjs modules for backwards compatibility + +## 1.6.0 (2021-09-24) ๐Ÿš€ + +### Enhancements + * [ConnTest] Add `path_params/2` for retrieving router path parameters out of dynamically returned URLs. + +### JavaScript Client Bug Fixes + * Fix LongPoll transport undefined readyState check + +## 1.6.0-rc.1 (2021-09-22) + +### Enhancements + * [mix phx.gen.auth] Validate bcrypt passwords are no longer than 72 bytes + * re-enable `phx.routes` task to support back to back invocations, such as for aliased mix route tasks + * [mix phx.gen.html] Remove comma after `for={@changeset}` on `form.html.heex` + +### JavaScript Client Bug Fixes + * Fix messages for duplicate topic being dispatched to old channels + +## 1.6.0-rc.0 (2021-08-26) + +### Enhancements + * [CodeReloader] Code reloading can now pick up changes to .beam files if they were compiled in a separate OS process than the Phoenix server + * [Controller] Do not create compile-time dependency for `action_fallback` + * [Endpoint] Allow custom error response from socket handler + * [Endpoint] Do not require a pubsub server in the socket (only inside channels) + * [mix phx.digest.clean] Add `--all` flag to `mix phx.digest.clean` + * [mix phx.gen.auth] Add `mix phx.gen.auth` generator + * [mix phx.gen.context] Support `enum` types and the `redact` option when declaring fields + * [mix phx.gen.notifier] A new generator to build notifiers that by default deliver emails + * [mix phx.new] Update `mix phx.new` to require Elixir v1.12 and use the new `config/runtime.exs` + * [mix phx.new] Set `plug_init_mode: :runtime` in generated `config/test.exs` + * [mix phx.new] Add description to Ecto telemetry metrics + * [mix phx.new] Use `Ecto.Adapters.SQL.Sandbox.start_owner!/2` in generators - this approach provides proper shutdown semantics for apps using LiveView and Presence + * [mix phx.new] Add `--install` and `--no-install` options to `phx.new` + * [mix phx.new] Add `--database sqlite3` option to `phx.new` + * [mix phx.new] Remove usage of Sass + * [mix phx.new] New applications now depend on Swoosh to deliver emails + * [mix phx.new] No longer generate a socket file by default, instead one can run `mix phx.gen.socket` + * [mix phx.new] No longer generates a home page using LiveView, instead one can run `mix phx.gen.live` + * [mix phx.new] LiveView is now included by default. Passing `--no-live` will comment out lines in `app.js` and `Endpoint` + * [mix phx.server] Add `--open` flag + * [Router] Do not add compile time deps in `pipe_through` + * [View] Extracted `Phoenix.View` into its own project to facilitate reuse + +### JavaScript Client Enhancements + * Add new `replaceTransport` function to socket with extended `onError` API to allow simplified LongPoll fallback + * Fire each event in a separate task for the LongPoll transport to fix ordering + * Optimize presence syncing + +### Bug fixes + * [Controller] Return normalized paths in `current_path/1` and `current_path/2` + * [mix phx.gen.live] Fix a bug where tests with `utc_datetime` and `boolean` fields did not pass out of the box + +### JavaScript Client Bug fixes + * Bind to `beforeunload` instead of `unload` to solve Firefox connection issues + * Fix presence onJoin including current metadata in new presence + +### Deprecations + * [mix compile.phoenix] Adding the `:phoenix` compiler to your `mix.exs` (`compilers: [:phoenix] ++ Mix.compilers()`) is no longer required from Phoenix v1.6 forward if you are running on Elixir v1.11. Remove it from your `mix.exs` and you should gain faster compilation times too + * [Endpoint] Phoenix now requires Cowboy v2.7+ + +### Breaking changes + * [View] `@view_module` and `@view_template` are no longer set. Use `Phoenix.Controller.view_module/1` and `Phoenix.Controller.view_template/1` respectively, or pass explicit assigns from `Phoenix.View.render`. + +## v1.5 + +The CHANGELOG for v1.5 releases can be found in the [v1.5 branch](https://github.com/phoenixframework/phoenix/blob/v1.5/CHANGELOG.md). diff --git a/deps/phoenix/LICENSE.md b/deps/phoenix/LICENSE.md new file mode 100644 index 0000000..a2197e0 --- /dev/null +++ b/deps/phoenix/LICENSE.md @@ -0,0 +1,22 @@ +# MIT License + +Copyright (c) 2014 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/phoenix/README.md b/deps/phoenix/README.md new file mode 100644 index 0000000..ea76288 --- /dev/null +++ b/deps/phoenix/README.md @@ -0,0 +1,94 @@ +![phoenix logo](https://raw.githubusercontent.com/phoenixframework/phoenix/master/priv/static/phoenix.png) +> Peace of mind from prototype to production. + +[![Build Status](https://github.com/phoenixframework/phoenix/workflows/CI/badge.svg)](https://github.com/phoenixframework/phoenix/actions?query=workflow%3ACI) + +## Getting started + +See the official site at https://www.phoenixframework.org/ + +Install the latest version of Phoenix by following the instructions at https://hexdocs.pm/phoenix/installation.html#phoenix + +## Documentation + +API documentation is available at [https://hexdocs.pm/phoenix](https://hexdocs.pm/phoenix) + +Phoenix.js documentation is available at [https://hexdocs.pm/phoenix/js](https://hexdocs.pm/phoenix/js) + +## Contributing + +We appreciate any contribution to Phoenix. Check our [CODE_OF_CONDUCT.md](CODE_OF_CONDUCT.md) and [CONTRIBUTING.md](CONTRIBUTING.md) guides for more information. We usually keep a list of features and bugs in the [issue tracker][4]. + +### Generating a Phoenix project from unreleased versions + +You can create a new project using the latest Phoenix source installer (the `phx.new` Mix task) with the following steps: + +1. Remove any previously installed `phx_new` archives so that Mix will pick up the local source code. This can be done with `mix archive.uninstall phx_new` or by simply deleting the file, which is usually in `~/.mix/archives/`. +2. Copy this repo via `git clone https://github.com/phoenixframework/phoenix` or by downloading it +3. Run the `phx.new` Mix task from within the `installer` directory, for example: + +```bash +$ cd phoenix/installer +$ mix phx.new dev_app --dev +``` + +The `--dev` flag will configure your new project's `:phoenix` dep as a relative path dependency, pointing to your local Phoenix checkout: + +```elixir +defp deps do + [{:phoenix, path: "../..", override: true}, +``` + +To create projects outside of the `installer/` directory, add the latest archive to your machine by following the instructions in [installer/README.md](https://github.com/phoenixframework/phoenix/blob/master/installer/README.md) + +To build the documentation from source: + +```bash +$ npm install --prefix assets +$ MIX_ENV=docs mix docs +``` + +To build Phoenix from source: + +```bash +$ mix deps.get +$ mix compile +``` + +To build the Phoenix installer from source: + +```bash +$ mix deps.get +$ mix compile +$ mix archive.build +``` + +### Building phoenix.js + +```bash +$ cd assets +$ npm install +``` + +## Important links + +* [#elixir][1] on [Libera][2] IRC +* [elixir-lang Slack channel][3] +* [Issue tracker][4] +* [Phoenix Forum (questions)][5] +* [phoenix-core Mailing list (development)][6] +* Visit Phoenix's sponsor, DockYard, for expert [phoenix consulting](https://dockyard.com/phoenix-consulting) +* Privately disclose security vulnerabilities to phoenix-security@googlegroups.com + + [1]: https://web.libera.chat/?channels=#elixir + [2]: https://libera.chat/ + [3]: https://elixir-slackin.herokuapp.com/ + [4]: https://github.com/phoenixframework/phoenix/issues + [5]: https://elixirforum.com/c/phoenix-forum + [6]: https://groups.google.com/group/phoenix-core + +## Copyright and License + +Copyright (c) 2014, Chris McCord. + +Phoenix source code is licensed under the [MIT License](LICENSE.md). diff --git a/deps/phoenix/assets/js/phoenix/ajax.js b/deps/phoenix/assets/js/phoenix/ajax.js new file mode 100644 index 0000000..3e79274 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/ajax.js @@ -0,0 +1,83 @@ +import { + global, + XHR_STATES +} from "./constants" + +export default class Ajax { + + static request(method, endPoint, accept, body, timeout, ontimeout, callback){ + if(global.XDomainRequest){ + let req = new global.XDomainRequest() // IE8, IE9 + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) + } else { + let req = new global.XMLHttpRequest() // IE7+, Firefox, Chrome, Opera, Safari + return this.xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback) + } + } + + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback){ + req.timeout = timeout + req.open(method, endPoint) + req.onload = () => { + let response = this.parseJSON(req.responseText) + callback && callback(response) + } + if(ontimeout){ req.ontimeout = ontimeout } + + // Work around bug in IE9 that requires an attached onprogress handler + req.onprogress = () => { } + + req.send(body) + return req + } + + static xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback){ + req.open(method, endPoint, true) + req.timeout = timeout + req.setRequestHeader("Content-Type", accept) + req.onerror = () => callback && callback(null) + req.onreadystatechange = () => { + if(req.readyState === XHR_STATES.complete && callback){ + let response = this.parseJSON(req.responseText) + callback(response) + } + } + if(ontimeout){ req.ontimeout = ontimeout } + + req.send(body) + return req + } + + static parseJSON(resp){ + if(!resp || resp === ""){ return null } + + try { + return JSON.parse(resp) + } catch (e){ + console && console.log("failed to parse JSON response", resp) + return null + } + } + + static serialize(obj, parentKey){ + let queryStr = [] + for(var key in obj){ + if(!Object.prototype.hasOwnProperty.call(obj, key)){ continue } + let paramKey = parentKey ? `${parentKey}[${key}]` : key + let paramVal = obj[key] + if(typeof paramVal === "object"){ + queryStr.push(this.serialize(paramVal, paramKey)) + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)) + } + } + return queryStr.join("&") + } + + static appendParams(url, params){ + if(Object.keys(params).length === 0){ return url } + + let prefix = url.match(/\?/) ? "&" : "?" + return `${url}${prefix}${this.serialize(params)}` + } +} diff --git a/deps/phoenix/assets/js/phoenix/channel.js b/deps/phoenix/assets/js/phoenix/channel.js new file mode 100644 index 0000000..76bcb33 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/channel.js @@ -0,0 +1,311 @@ +import {closure} from "./utils" +import { + CHANNEL_EVENTS, + CHANNEL_STATES, +} from "./constants" + +import Push from "./push" +import Timer from "./timer" + +/** + * + * @param {string} topic + * @param {(Object|function)} params + * @param {Socket} socket + */ +export default class Channel { + constructor(topic, params, socket){ + this.state = CHANNEL_STATES.closed + this.topic = topic + this.params = closure(params || {}) + this.socket = socket + this.bindings = [] + this.bindingRef = 0 + this.timeout = this.socket.timeout + this.joinedOnce = false + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout) + this.pushBuffer = [] + this.stateChangeRefs = [] + + this.rejoinTimer = new Timer(() => { + if(this.socket.isConnected()){ this.rejoin() } + }, this.socket.rejoinAfterMs) + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())) + this.stateChangeRefs.push(this.socket.onOpen(() => { + this.rejoinTimer.reset() + if(this.isErrored()){ this.rejoin() } + }) + ) + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined + this.rejoinTimer.reset() + this.pushBuffer.forEach(pushEvent => pushEvent.send()) + this.pushBuffer = [] + }) + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored + if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() } + }) + this.onClose(() => { + this.rejoinTimer.reset() + if(this.socket.hasLogger()) this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`) + this.state = CHANNEL_STATES.closed + this.socket.remove(this) + }) + this.onError(reason => { + if(this.socket.hasLogger()) this.socket.log("channel", `error ${this.topic}`, reason) + if(this.isJoining()){ this.joinPush.reset() } + this.state = CHANNEL_STATES.errored + if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() } + }) + this.joinPush.receive("timeout", () => { + if(this.socket.hasLogger()) this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout) + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout) + leavePush.send() + this.state = CHANNEL_STATES.errored + this.joinPush.reset() + if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() } + }) + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload) + }) + } + + /** + * Join the channel + * @param {integer} timeout + * @returns {Push} + */ + join(timeout = this.timeout){ + if(this.joinedOnce){ + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance") + } else { + this.timeout = timeout + this.joinedOnce = true + this.rejoin() + return this.joinPush + } + } + + /** + * Hook into channel close + * @param {Function} callback + */ + onClose(callback){ + this.on(CHANNEL_EVENTS.close, callback) + } + + /** + * Hook into channel errors + * @param {Function} callback + */ + onError(callback){ + return this.on(CHANNEL_EVENTS.error, reason => callback(reason)) + } + + /** + * Subscribes on channel events + * + * Subscription returns a ref counter, which can be used later to + * unsubscribe the exact event listener + * + * @example + * const ref1 = channel.on("event", do_stuff) + * const ref2 = channel.on("event", do_other_stuff) + * channel.off("event", ref1) + * // Since unsubscription, do_stuff won't fire, + * // while do_other_stuff will keep firing on the "event" + * + * @param {string} event + * @param {Function} callback + * @returns {integer} ref + */ + on(event, callback){ + let ref = this.bindingRef++ + this.bindings.push({event, ref, callback}) + return ref + } + + /** + * Unsubscribes off of channel events + * + * Use the ref returned from a channel.on() to unsubscribe one + * handler, or pass nothing for the ref to unsubscribe all + * handlers for the given event. + * + * @example + * // Unsubscribe the do_stuff handler + * const ref1 = channel.on("event", do_stuff) + * channel.off("event", ref1) + * + * // Unsubscribe all handlers from event + * channel.off("event") + * + * @param {string} event + * @param {integer} ref + */ + off(event, ref){ + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)) + }) + } + + /** + * @private + */ + canPush(){ return this.socket.isConnected() && this.isJoined() } + + /** + * Sends a message `event` to phoenix with the payload `payload`. + * Phoenix receives this in the `handle_in(event, payload, socket)` + * function. if phoenix replies or it times out (default 10000ms), + * then optionally the reply can be received. + * + * @example + * channel.push("event") + * .receive("ok", payload => console.log("phoenix replied:", payload)) + * .receive("error", err => console.log("phoenix errored", err)) + * .receive("timeout", () => console.log("timed out pushing")) + * @param {string} event + * @param {Object} payload + * @param {number} [timeout] + * @returns {Push} + */ + push(event, payload, timeout = this.timeout){ + payload = payload || {} + if(!this.joinedOnce){ + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`) + } + let pushEvent = new Push(this, event, function (){ return payload }, timeout) + if(this.canPush()){ + pushEvent.send() + } else { + pushEvent.startTimeout() + this.pushBuffer.push(pushEvent) + } + + return pushEvent + } + + /** Leaves the channel + * + * Unsubscribes from server events, and + * instructs channel to terminate on server + * + * Triggers onClose() hooks + * + * To receive leave acknowledgements, use the `receive` + * hook to bind to the server ack, ie: + * + * @example + * channel.leave().receive("ok", () => alert("left!") ) + * + * @param {integer} timeout + * @returns {Push} + */ + leave(timeout = this.timeout){ + this.rejoinTimer.reset() + this.joinPush.cancelTimeout() + + this.state = CHANNEL_STATES.leaving + let onClose = () => { + if(this.socket.hasLogger()) this.socket.log("channel", `leave ${this.topic}`) + this.trigger(CHANNEL_EVENTS.close, "leave") + } + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout) + leavePush.receive("ok", () => onClose()) + .receive("timeout", () => onClose()) + leavePush.send() + if(!this.canPush()){ leavePush.trigger("ok", {}) } + + return leavePush + } + + /** + * Overridable message hook + * + * Receives all events for specialized message handling + * before dispatching to the channel callbacks. + * + * Must return the payload, modified or unmodified + * @param {string} event + * @param {Object} payload + * @param {integer} ref + * @returns {Object} + */ + onMessage(_event, payload, _ref){ return payload } + + /** + * @private + */ + isMember(topic, event, payload, joinRef){ + if(this.topic !== topic){ return false } + + if(joinRef && joinRef !== this.joinRef()){ + if(this.socket.hasLogger()) this.socket.log("channel", "dropping outdated message", {topic, event, payload, joinRef}) + return false + } else { + return true + } + } + + /** + * @private + */ + joinRef(){ return this.joinPush.ref } + + /** + * @private + */ + rejoin(timeout = this.timeout){ + if(this.isLeaving()){ return } + this.socket.leaveOpenTopic(this.topic) + this.state = CHANNEL_STATES.joining + this.joinPush.resend(timeout) + } + + /** + * @private + */ + trigger(event, payload, ref, joinRef){ + let handledPayload = this.onMessage(event, payload, ref, joinRef) + if(payload && !handledPayload){ throw new Error("channel onMessage callbacks must return the payload, modified or unmodified") } + + let eventBindings = this.bindings.filter(bind => bind.event === event) + + for(let i = 0; i < eventBindings.length; i++){ + let bind = eventBindings[i] + bind.callback(handledPayload, ref, joinRef || this.joinRef()) + } + } + + /** + * @private + */ + replyEventName(ref){ return `chan_reply_${ref}` } + + /** + * @private + */ + isClosed(){ return this.state === CHANNEL_STATES.closed } + + /** + * @private + */ + isErrored(){ return this.state === CHANNEL_STATES.errored } + + /** + * @private + */ + isJoined(){ return this.state === CHANNEL_STATES.joined } + + /** + * @private + */ + isJoining(){ return this.state === CHANNEL_STATES.joining } + + /** + * @private + */ + isLeaving(){ return this.state === CHANNEL_STATES.leaving } +} diff --git a/deps/phoenix/assets/js/phoenix/constants.js b/deps/phoenix/assets/js/phoenix/constants.js new file mode 100644 index 0000000..499d4a9 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/constants.js @@ -0,0 +1,29 @@ +export const globalSelf = typeof self !== "undefined" ? self : null +export const phxWindow = typeof window !== "undefined" ? window : null +export const global = globalSelf || phxWindow || global +export const DEFAULT_VSN = "2.0.0" +export const SOCKET_STATES = {connecting: 0, open: 1, closing: 2, closed: 3} +export const DEFAULT_TIMEOUT = 10000 +export const WS_CLOSE_NORMAL = 1000 +export const CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving", +} +export const CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" +} + +export const TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" +} +export const XHR_STATES = { + complete: 4 +} diff --git a/deps/phoenix/assets/js/phoenix/index.js b/deps/phoenix/assets/js/phoenix/index.js new file mode 100644 index 0000000..c456caf --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/index.js @@ -0,0 +1,207 @@ +/** + * Phoenix Channels JavaScript client + * + * ## Socket Connection + * + * A single connection is established to the server and + * channels are multiplexed over the connection. + * Connect to the server using the `Socket` class: + * + * ```javascript + * let socket = new Socket("/socket", {params: {userToken: "123"}}) + * socket.connect() + * ``` + * + * The `Socket` constructor takes the mount point of the socket, + * the authentication params, as well as options that can be found in + * the Socket docs, such as configuring the `LongPoll` transport, and + * heartbeat. + * + * ## Channels + * + * Channels are isolated, concurrent processes on the server that + * subscribe to topics and broker events between the client and server. + * To join a channel, you must provide the topic, and channel params for + * authorization. Here's an example chat room example where `"new_msg"` + * events are listened for, messages are pushed to the server, and + * the channel is joined with ok/error/timeout matches: + * + * ```javascript + * let channel = socket.channel("room:123", {token: roomToken}) + * channel.on("new_msg", msg => console.log("Got message", msg) ) + * $input.onEnter( e => { + * channel.push("new_msg", {body: e.target.val}, 10000) + * .receive("ok", (msg) => console.log("created message", msg) ) + * .receive("error", (reasons) => console.log("create failed", reasons) ) + * .receive("timeout", () => console.log("Networking issue...") ) + * }) + * + * channel.join() + * .receive("ok", ({messages}) => console.log("catching up", messages) ) + * .receive("error", ({reason}) => console.log("failed join", reason) ) + * .receive("timeout", () => console.log("Networking issue. Still waiting...")) + *``` + * + * ## Joining + * + * Creating a channel with `socket.channel(topic, params)`, binds the params to + * `channel.params`, which are sent up on `channel.join()`. + * Subsequent rejoins will send up the modified params for + * updating authorization params, or passing up last_message_id information. + * Successful joins receive an "ok" status, while unsuccessful joins + * receive "error". + * + * With the default serializers and WebSocket transport, JSON text frames are + * used for pushing a JSON object literal. If an `ArrayBuffer` instance is provided, + * binary encoding will be used and the message will be sent with the binary + * opcode. + * + * *Note*: binary messages are only supported on the WebSocket transport. + * + * ## Duplicate Join Subscriptions + * + * While the client may join any number of topics on any number of channels, + * the client may only hold a single subscription for each unique topic at any + * given time. When attempting to create a duplicate subscription, + * the server will close the existing channel, log a warning, and + * spawn a new channel for the topic. The client will have their + * `channel.onClose` callbacks fired for the existing channel, and the new + * channel join will have its receive hooks processed as normal. + * + * ## Pushing Messages + * + * From the previous example, we can see that pushing messages to the server + * can be done with `channel.push(eventName, payload)` and we can optionally + * receive responses from the push. Additionally, we can use + * `receive("timeout", callback)` to abort waiting for our other `receive` hooks + * and take action after some period of waiting. The default timeout is 10000ms. + * + * + * ## Socket Hooks + * + * Lifecycle events of the multiplexed connection can be hooked into via + * `socket.onError()` and `socket.onClose()` events, ie: + * + * ```javascript + * socket.onError( () => console.log("there was an error with the connection!") ) + * socket.onClose( () => console.log("the connection dropped") ) + * ``` + * + * + * ## Channel Hooks + * + * For each joined channel, you can bind to `onError` and `onClose` events + * to monitor the channel lifecycle, ie: + * + * ```javascript + * channel.onError( () => console.log("there was an error!") ) + * channel.onClose( () => console.log("the channel has gone away gracefully") ) + * ``` + * + * ### onError hooks + * + * `onError` hooks are invoked if the socket connection drops, or the channel + * crashes on the server. In either case, a channel rejoin is attempted + * automatically in an exponential backoff manner. + * + * ### onClose hooks + * + * `onClose` hooks are invoked only in two cases. 1) the channel explicitly + * closed on the server, or 2). The client explicitly closed, by calling + * `channel.leave()` + * + * + * ## Presence + * + * The `Presence` object provides features for syncing presence information + * from the server with the client and handling presences joining and leaving. + * + * ### Syncing state from the server + * + * To sync presence state from the server, first instantiate an object and + * pass your channel in to track lifecycle events: + * + * ```javascript + * let channel = socket.channel("some:topic") + * let presence = new Presence(channel) + * ``` + * + * Next, use the `presence.onSync` callback to react to state changes + * from the server. For example, to render the list of users every time + * the list changes, you could write: + * + * ```javascript + * presence.onSync(() => { + * myRenderUsersFunction(presence.list()) + * }) + * ``` + * + * ### Listing Presences + * + * `presence.list` is used to return a list of presence information + * based on the local state of metadata. By default, all presence + * metadata is returned, but a `listBy` function can be supplied to + * allow the client to select which metadata to use for a given presence. + * For example, you may have a user online from different devices with + * a metadata status of "online", but they have set themselves to "away" + * on another device. In this case, the app may choose to use the "away" + * status for what appears on the UI. The example below defines a `listBy` + * function which prioritizes the first metadata which was registered for + * each user. This could be the first tab they opened, or the first device + * they came online from: + * + * ```javascript + * let listBy = (id, {metas: [first, ...rest]}) => { + * first.count = rest.length + 1 // count of this user's presences + * first.id = id + * return first + * } + * let onlineUsers = presence.list(listBy) + * ``` + * + * ### Handling individual presence join and leave events + * + * The `presence.onJoin` and `presence.onLeave` callbacks can be used to + * react to individual presences joining and leaving the app. For example: + * + * ```javascript + * let presence = new Presence(channel) + * + * // detect if user has joined for the 1st time or from another tab/device + * presence.onJoin((id, current, newPres) => { + * if(!current){ + * console.log("user has entered for the first time", newPres) + * } else { + * console.log("user additional presence", newPres) + * } + * }) + * + * // detect if user has left from all tabs/devices, or is still present + * presence.onLeave((id, current, leftPres) => { + * if(current.metas.length === 0){ + * console.log("user has left from all devices", leftPres) + * } else { + * console.log("user left from a device", leftPres) + * } + * }) + * // receive presence data from server + * presence.onSync(() => { + * displayUsers(presence.list()) + * }) + * ``` + * @module phoenix + */ + +import Channel from "./channel" +import LongPoll from "./longpoll" +import Presence from "./presence" +import Serializer from "./serializer" +import Socket from "./socket" + +export { + Channel, + LongPoll, + Presence, + Serializer, + Socket +} diff --git a/deps/phoenix/assets/js/phoenix/longpoll.js b/deps/phoenix/assets/js/phoenix/longpoll.js new file mode 100644 index 0000000..958d18f --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/longpoll.js @@ -0,0 +1,135 @@ +import { + SOCKET_STATES, + TRANSPORTS +} from "./constants" + +import Ajax from "./ajax" + +export default class LongPoll { + + constructor(endPoint){ + this.endPoint = null + this.token = null + this.skipHeartbeat = true + this.reqs = new Set() + this.onopen = function (){ } // noop + this.onerror = function (){ } // noop + this.onmessage = function (){ } // noop + this.onclose = function (){ } // noop + this.pollEndpoint = this.normalizeEndpoint(endPoint) + this.readyState = SOCKET_STATES.connecting + this.poll() + } + + normalizeEndpoint(endPoint){ + return (endPoint + .replace("ws://", "http://") + .replace("wss://", "https://") + .replace(new RegExp("(.*)\/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll)) + } + + endpointURL(){ + return Ajax.appendParams(this.pollEndpoint, {token: this.token}) + } + + closeAndRetry(code, reason, wasClean){ + this.close(code, reason, wasClean) + this.readyState = SOCKET_STATES.connecting + } + + ontimeout(){ + this.onerror("timeout") + this.closeAndRetry(1005, "timeout", false) + } + + isActive(){ return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting } + + poll(){ + this.ajax("GET", null, () => this.ontimeout(), resp => { + if(resp){ + var {status, token, messages} = resp + this.token = token + } else { + status = 0 + } + + switch(status){ + case 200: + messages.forEach(msg => { + // Tasks are what things like event handlers, setTimeout callbacks, + // promise resolves and more are run within. + // In modern browsers, there are two different kinds of tasks, + // microtasks and macrotasks. + // Microtasks are mainly used for Promises, while macrotasks are + // used for everything else. + // Microtasks always have priority over macrotasks. If the JS engine + // is looking for a task to run, it will always try to empty the + // microtask queue before attempting to run anything from the + // macrotask queue. + // + // For the WebSocket transport, messages always arrive in their own + // event. This means that if any promises are resolved from within, + // their callbacks will always finish execution by the time the + // next message event handler is run. + // + // In order to emulate this behaviour, we need to make sure each + // onmessage handler is run within it's own macrotask. + setTimeout(() => this.onmessage({data: msg}), 0) + }) + this.poll() + break + case 204: + this.poll() + break + case 410: + this.readyState = SOCKET_STATES.open + this.onopen({}) + this.poll() + break + case 403: + this.onerror(403) + this.close(1008, "forbidden", false) + break + case 0: + case 500: + this.onerror(500) + this.closeAndRetry(1011, "internal server error", 500) + break + default: throw new Error(`unhandled poll status ${status}`) + } + }) + } + + send(body){ + this.ajax("POST", body, () => this.onerror("timeout"), resp => { + if(!resp || resp.status !== 200){ + this.onerror(resp && resp.status) + this.closeAndRetry(1011, "internal server error", false) + } + }) + } + + close(code, reason, wasClean){ + for(let req of this.reqs){ req.abort() } + this.readyState = SOCKET_STATES.closed + let opts = Object.assign({code: 1000, reason: undefined, wasClean: true}, {code, reason, wasClean}) + if(typeof(CloseEvent) !== "undefined"){ + this.onclose(new CloseEvent("close", opts)) + } else { + this.onclose(opts) + } + } + + ajax(method, body, onCallerTimeout, callback){ + let req + let ontimeout = () => { + this.reqs.delete(req) + onCallerTimeout() + } + req = Ajax.request(method, this.endpointURL(), "application/json", body, this.timeout, ontimeout, resp => { + this.reqs.delete(req) + if(this.isActive()){ callback(resp) } + }) + this.reqs.add(req) + } +} diff --git a/deps/phoenix/assets/js/phoenix/presence.js b/deps/phoenix/assets/js/phoenix/presence.js new file mode 100644 index 0000000..cfb5af6 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/presence.js @@ -0,0 +1,162 @@ +/** + * Initializes the Presence + * @param {Channel} channel - The Channel + * @param {Object} opts - The options, + * for example `{events: {state: "state", diff: "diff"}}` + */ +export default class Presence { + + constructor(channel, opts = {}){ + let events = opts.events || {state: "presence_state", diff: "presence_diff"} + this.state = {} + this.pendingDiffs = [] + this.channel = channel + this.joinRef = null + this.caller = { + onJoin: function (){ }, + onLeave: function (){ }, + onSync: function (){ } + } + + this.channel.on(events.state, newState => { + let {onJoin, onLeave, onSync} = this.caller + + this.joinRef = this.channel.joinRef() + this.state = Presence.syncState(this.state, newState, onJoin, onLeave) + + this.pendingDiffs.forEach(diff => { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave) + }) + this.pendingDiffs = [] + onSync() + }) + + this.channel.on(events.diff, diff => { + let {onJoin, onLeave, onSync} = this.caller + + if(this.inPendingSyncState()){ + this.pendingDiffs.push(diff) + } else { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave) + onSync() + } + }) + } + + onJoin(callback){ this.caller.onJoin = callback } + + onLeave(callback){ this.caller.onLeave = callback } + + onSync(callback){ this.caller.onSync = callback } + + list(by){ return Presence.list(this.state, by) } + + inPendingSyncState(){ + return !this.joinRef || (this.joinRef !== this.channel.joinRef()) + } + + // lower-level public static API + + /** + * Used to sync the list of presences on the server + * with the client's state. An optional `onJoin` and `onLeave` callback can + * be provided to react to changes in the client's local presences across + * disconnects and reconnects with the server. + * + * @returns {Presence} + */ + static syncState(currentState, newState, onJoin, onLeave){ + let state = this.clone(currentState) + let joins = {} + let leaves = {} + + this.map(state, (key, presence) => { + if(!newState[key]){ + leaves[key] = presence + } + }) + this.map(newState, (key, newPresence) => { + let currentPresence = state[key] + if(currentPresence){ + let newRefs = newPresence.metas.map(m => m.phx_ref) + let curRefs = currentPresence.metas.map(m => m.phx_ref) + let joinedMetas = newPresence.metas.filter(m => curRefs.indexOf(m.phx_ref) < 0) + let leftMetas = currentPresence.metas.filter(m => newRefs.indexOf(m.phx_ref) < 0) + if(joinedMetas.length > 0){ + joins[key] = newPresence + joins[key].metas = joinedMetas + } + if(leftMetas.length > 0){ + leaves[key] = this.clone(currentPresence) + leaves[key].metas = leftMetas + } + } else { + joins[key] = newPresence + } + }) + return this.syncDiff(state, {joins: joins, leaves: leaves}, onJoin, onLeave) + } + + /** + * + * Used to sync a diff of presence join and leave + * events from the server, as they happen. Like `syncState`, `syncDiff` + * accepts optional `onJoin` and `onLeave` callbacks to react to a user + * joining or leaving from a device. + * + * @returns {Presence} + */ + static syncDiff(state, diff, onJoin, onLeave){ + let {joins, leaves} = this.clone(diff) + if(!onJoin){ onJoin = function (){ } } + if(!onLeave){ onLeave = function (){ } } + + this.map(joins, (key, newPresence) => { + let currentPresence = state[key] + state[key] = this.clone(newPresence) + if(currentPresence){ + let joinedRefs = state[key].metas.map(m => m.phx_ref) + let curMetas = currentPresence.metas.filter(m => joinedRefs.indexOf(m.phx_ref) < 0) + state[key].metas.unshift(...curMetas) + } + onJoin(key, currentPresence, newPresence) + }) + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key] + if(!currentPresence){ return } + let refsToRemove = leftPresence.metas.map(m => m.phx_ref) + currentPresence.metas = currentPresence.metas.filter(p => { + return refsToRemove.indexOf(p.phx_ref) < 0 + }) + onLeave(key, currentPresence, leftPresence) + if(currentPresence.metas.length === 0){ + delete state[key] + } + }) + return state + } + + /** + * Returns the array of presences, with selected metadata. + * + * @param {Object} presences + * @param {Function} chooser + * + * @returns {Presence} + */ + static list(presences, chooser){ + if(!chooser){ chooser = function (key, pres){ return pres } } + + return this.map(presences, (key, presence) => { + return chooser(key, presence) + }) + } + + // private + + static map(obj, func){ + return Object.getOwnPropertyNames(obj).map(key => func(key, obj[key])) + } + + static clone(obj){ return JSON.parse(JSON.stringify(obj)) } +} diff --git a/deps/phoenix/assets/js/phoenix/push.js b/deps/phoenix/assets/js/phoenix/push.js new file mode 100644 index 0000000..2e497a2 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/push.js @@ -0,0 +1,128 @@ +/** + * Initializes the Push + * @param {Channel} channel - The Channel + * @param {string} event - The event, for example `"phx_join"` + * @param {Object} payload - The payload, for example `{user_id: 123}` + * @param {number} timeout - The push timeout in milliseconds + */ +export default class Push { + constructor(channel, event, payload, timeout){ + this.channel = channel + this.event = event + this.payload = payload || function (){ return {} } + this.receivedResp = null + this.timeout = timeout + this.timeoutTimer = null + this.recHooks = [] + this.sent = false + } + + /** + * + * @param {number} timeout + */ + resend(timeout){ + this.timeout = timeout + this.reset() + this.send() + } + + /** + * + */ + send(){ + if(this.hasReceived("timeout")){ return } + this.startTimeout() + this.sent = true + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }) + } + + /** + * + * @param {*} status + * @param {*} callback + */ + receive(status, callback){ + if(this.hasReceived(status)){ + callback(this.receivedResp.response) + } + + this.recHooks.push({status, callback}) + return this + } + + /** + * @private + */ + reset(){ + this.cancelRefEvent() + this.ref = null + this.refEvent = null + this.receivedResp = null + this.sent = false + } + + /** + * @private + */ + matchReceive({status, response, _ref}){ + this.recHooks.filter(h => h.status === status) + .forEach(h => h.callback(response)) + } + + /** + * @private + */ + cancelRefEvent(){ + if(!this.refEvent){ return } + this.channel.off(this.refEvent) + } + + /** + * @private + */ + cancelTimeout(){ + clearTimeout(this.timeoutTimer) + this.timeoutTimer = null + } + + /** + * @private + */ + startTimeout(){ + if(this.timeoutTimer){ this.cancelTimeout() } + this.ref = this.channel.socket.makeRef() + this.refEvent = this.channel.replyEventName(this.ref) + + this.channel.on(this.refEvent, payload => { + this.cancelRefEvent() + this.cancelTimeout() + this.receivedResp = payload + this.matchReceive(payload) + }) + + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}) + }, this.timeout) + } + + /** + * @private + */ + hasReceived(status){ + return this.receivedResp && this.receivedResp.status === status + } + + /** + * @private + */ + trigger(status, response){ + this.channel.trigger(this.refEvent, {status, response}) + } +} diff --git a/deps/phoenix/assets/js/phoenix/serializer.js b/deps/phoenix/assets/js/phoenix/serializer.js new file mode 100644 index 0000000..3df8eb3 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/serializer.js @@ -0,0 +1,112 @@ +/* The default serializer for encoding and decoding messages */ +import { + CHANNEL_EVENTS +} from "./constants" + +export default { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: {push: 0, reply: 1, broadcast: 2}, + + encode(msg, callback){ + if(msg.payload.constructor === ArrayBuffer){ + return callback(this.binaryEncode(msg)) + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload] + return callback(JSON.stringify(payload)) + } + }, + + decode(rawPayload, callback){ + if(rawPayload.constructor === ArrayBuffer){ + return callback(this.binaryDecode(rawPayload)) + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload) + return callback({join_ref, ref, topic, event, payload}) + } + }, + + // private + + binaryEncode(message){ + let {join_ref, ref, event, topic, payload} = message + let metaLength = this.META_LENGTH + join_ref.length + ref.length + topic.length + event.length + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength) + let view = new DataView(header) + let offset = 0 + + view.setUint8(offset++, this.KINDS.push) // kind + view.setUint8(offset++, join_ref.length) + view.setUint8(offset++, ref.length) + view.setUint8(offset++, topic.length) + view.setUint8(offset++, event.length) + Array.from(join_ref, char => view.setUint8(offset++, char.charCodeAt(0))) + Array.from(ref, char => view.setUint8(offset++, char.charCodeAt(0))) + Array.from(topic, char => view.setUint8(offset++, char.charCodeAt(0))) + Array.from(event, char => view.setUint8(offset++, char.charCodeAt(0))) + + var combined = new Uint8Array(header.byteLength + payload.byteLength) + combined.set(new Uint8Array(header), 0) + combined.set(new Uint8Array(payload), header.byteLength) + + return combined.buffer + }, + + binaryDecode(buffer){ + let view = new DataView(buffer) + let kind = view.getUint8(0) + let decoder = new TextDecoder() + switch(kind){ + case this.KINDS.push: return this.decodePush(buffer, view, decoder) + case this.KINDS.reply: return this.decodeReply(buffer, view, decoder) + case this.KINDS.broadcast: return this.decodeBroadcast(buffer, view, decoder) + } + }, + + decodePush(buffer, view, decoder){ + let joinRefSize = view.getUint8(1) + let topicSize = view.getUint8(2) + let eventSize = view.getUint8(3) + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1 // pushes have no ref + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)) + offset = offset + joinRefSize + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)) + offset = offset + topicSize + let event = decoder.decode(buffer.slice(offset, offset + eventSize)) + offset = offset + eventSize + let data = buffer.slice(offset, buffer.byteLength) + return {join_ref: joinRef, ref: null, topic: topic, event: event, payload: data} + }, + + decodeReply(buffer, view, decoder){ + let joinRefSize = view.getUint8(1) + let refSize = view.getUint8(2) + let topicSize = view.getUint8(3) + let eventSize = view.getUint8(4) + let offset = this.HEADER_LENGTH + this.META_LENGTH + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)) + offset = offset + joinRefSize + let ref = decoder.decode(buffer.slice(offset, offset + refSize)) + offset = offset + refSize + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)) + offset = offset + topicSize + let event = decoder.decode(buffer.slice(offset, offset + eventSize)) + offset = offset + eventSize + let data = buffer.slice(offset, buffer.byteLength) + let payload = {status: event, response: data} + return {join_ref: joinRef, ref: ref, topic: topic, event: CHANNEL_EVENTS.reply, payload: payload} + }, + + decodeBroadcast(buffer, view, decoder){ + let topicSize = view.getUint8(1) + let eventSize = view.getUint8(2) + let offset = this.HEADER_LENGTH + 2 + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)) + offset = offset + topicSize + let event = decoder.decode(buffer.slice(offset, offset + eventSize)) + offset = offset + eventSize + let data = buffer.slice(offset, buffer.byteLength) + + return {join_ref: null, ref: null, topic: topic, event: event, payload: data} + } +} diff --git a/deps/phoenix/assets/js/phoenix/socket.js b/deps/phoenix/assets/js/phoenix/socket.js new file mode 100644 index 0000000..24cad70 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/socket.js @@ -0,0 +1,560 @@ +import { + global, + phxWindow, + CHANNEL_EVENTS, + DEFAULT_TIMEOUT, + DEFAULT_VSN, + SOCKET_STATES, + TRANSPORTS, + WS_CLOSE_NORMAL +} from "./constants" + +import { + closure +} from "./utils" + +import Ajax from "./ajax" +import Channel from "./channel" +import LongPoll from "./longpoll" +import Serializer from "./serializer" +import Timer from "./timer" + +/** Initializes the Socket * + * + * For IE8 support use an ES5-shim (https://github.com/es-shims/es5-shim) + * + * @param {string} endPoint - The string WebSocket endpoint, ie, `"ws://example.com/socket"`, + * `"wss://example.com"` + * `"/socket"` (inherited host & protocol) + * @param {Object} [opts] - Optional configuration + * @param {Function} [opts.transport] - The Websocket Transport, for example WebSocket or Phoenix.LongPoll. + * + * Defaults to WebSocket with automatic LongPoll fallback. + * @param {Function} [opts.encode] - The function to encode outgoing messages. + * + * Defaults to JSON encoder. + * + * @param {Function} [opts.decode] - The function to decode incoming messages. + * + * Defaults to JSON: + * + * ```javascript + * (payload, callback) => callback(JSON.parse(payload)) + * ``` + * + * @param {number} [opts.timeout] - The default timeout in milliseconds to trigger push timeouts. + * + * Defaults `DEFAULT_TIMEOUT` + * @param {number} [opts.heartbeatIntervalMs] - The millisec interval to send a heartbeat message + * @param {number} [opts.reconnectAfterMs] - The optional function that returns the millsec + * socket reconnect interval. + * + * Defaults to stepped backoff of: + * + * ```javascript + * function(tries){ + * return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000 + * } + * ```` + * + * @param {number} [opts.rejoinAfterMs] - The optional function that returns the millsec + * rejoin interval for individual channels. + * + * ```javascript + * function(tries){ + * return [1000, 2000, 5000][tries - 1] || 10000 + * } + * ```` + * + * @param {Function} [opts.logger] - The optional function for specialized logging, ie: + * + * ```javascript + * function(kind, msg, data) { + * console.log(`${kind}: ${msg}`, data) + * } + * ``` + * + * @param {number} [opts.longpollerTimeout] - The maximum timeout of a long poll AJAX request. + * + * Defaults to 20s (double the server long poll timer). + * + * @param {(Object|function)} [opts.params] - The optional params to pass when connecting + * @param {string} [opts.binaryType] - The binary type to use for binary WebSocket frames. + * + * Defaults to "arraybuffer" + * + * @param {vsn} [opts.vsn] - The serializer's protocol version to send on connect. + * + * Defaults to DEFAULT_VSN. +*/ +export default class Socket { + constructor(endPoint, opts = {}){ + this.stateChangeCallbacks = {open: [], close: [], error: [], message: []} + this.channels = [] + this.sendBuffer = [] + this.ref = 0 + this.timeout = opts.timeout || DEFAULT_TIMEOUT + this.transport = opts.transport || global.WebSocket || LongPoll + this.establishedConnections = 0 + this.defaultEncoder = Serializer.encode.bind(Serializer) + this.defaultDecoder = Serializer.decode.bind(Serializer) + this.closeWasClean = false + this.binaryType = opts.binaryType || "arraybuffer" + this.connectClock = 1 + if(this.transport !== LongPoll){ + this.encode = opts.encode || this.defaultEncoder + this.decode = opts.decode || this.defaultDecoder + } else { + this.encode = this.defaultEncoder + this.decode = this.defaultDecoder + } + let awaitingConnectionOnPageShow = null + if(phxWindow && phxWindow.addEventListener){ + phxWindow.addEventListener("pagehide", _e => { + if(this.conn){ + this.disconnect() + awaitingConnectionOnPageShow = this.connectClock + } + }) + phxWindow.addEventListener("pageshow", _e => { + if(awaitingConnectionOnPageShow === this.connectClock){ + awaitingConnectionOnPageShow = null + this.connect() + } + }) + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 30000 + this.rejoinAfterMs = (tries) => { + if(opts.rejoinAfterMs){ + return opts.rejoinAfterMs(tries) + } else { + return [1000, 2000, 5000][tries - 1] || 10000 + } + } + this.reconnectAfterMs = (tries) => { + if(opts.reconnectAfterMs){ + return opts.reconnectAfterMs(tries) + } else { + return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000 + } + } + this.logger = opts.logger || null + this.longpollerTimeout = opts.longpollerTimeout || 20000 + this.params = closure(opts.params || {}) + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}` + this.vsn = opts.vsn || DEFAULT_VSN + this.heartbeatTimer = null + this.pendingHeartbeatRef = null + this.reconnectTimer = new Timer(() => { + this.teardown(() => this.connect()) + }, this.reconnectAfterMs) + } + + /** + * Returns the LongPoll transport reference + */ + getLongPollTransport(){ return LongPoll } + + /** + * Disconnects and replaces the active transport + * + * @param {Function} newTransport - The new transport class to instantiate + * + */ + replaceTransport(newTransport){ + this.connectClock++ + this.closeWasClean = true + this.reconnectTimer.reset() + this.sendBuffer = [] + if(this.conn){ + this.conn.close() + this.conn = null + } + this.transport = newTransport + } + + /** + * Returns the socket protocol + * + * @returns {string} + */ + protocol(){ return location.protocol.match(/^https/) ? "wss" : "ws" } + + /** + * The fully qualifed socket url + * + * @returns {string} + */ + endPointURL(){ + let uri = Ajax.appendParams( + Ajax.appendParams(this.endPoint, this.params()), {vsn: this.vsn}) + if(uri.charAt(0) !== "/"){ return uri } + if(uri.charAt(1) === "/"){ return `${this.protocol()}:${uri}` } + + return `${this.protocol()}://${location.host}${uri}` + } + + /** + * Disconnects the socket + * + * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes. + * + * @param {Function} callback - Optional callback which is called after socket is disconnected. + * @param {integer} code - A status code for disconnection (Optional). + * @param {string} reason - A textual description of the reason to disconnect. (Optional) + */ + disconnect(callback, code, reason){ + this.connectClock++ + this.closeWasClean = true + this.reconnectTimer.reset() + this.teardown(callback, code, reason) + } + + /** + * + * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}` + * + * Passing params to connect is deprecated; pass them in the Socket constructor instead: + * `new Socket("/socket", {params: {user_id: userToken}})`. + */ + connect(params){ + if(params){ + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor") + this.params = closure(params) + } + if(this.conn){ return } + + this.connectClock++ + this.closeWasClean = false + this.conn = new this.transport(this.endPointURL()) + this.conn.binaryType = this.binaryType + this.conn.timeout = this.longpollerTimeout + this.conn.onopen = () => this.onConnOpen() + this.conn.onerror = error => this.onConnError(error) + this.conn.onmessage = event => this.onConnMessage(event) + this.conn.onclose = event => this.onConnClose(event) + } + + /** + * Logs the message. Override `this.logger` for specialized logging. noops by default + * @param {string} kind + * @param {string} msg + * @param {Object} data + */ + log(kind, msg, data){ this.logger(kind, msg, data) } + + /** + * Returns true if a logger has been set on this socket. + */ + hasLogger(){ return this.logger !== null } + + /** + * Registers callbacks for connection open events + * + * @example socket.onOpen(function(){ console.info("the socket was opened") }) + * + * @param {Function} callback + */ + onOpen(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.open.push([ref, callback]) + return ref + } + + /** + * Registers callbacks for connection close events + * @param {Function} callback + */ + onClose(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.close.push([ref, callback]) + return ref + } + + /** + * Registers callbacks for connection error events + * + * @example socket.onError(function(error){ alert("An error occurred") }) + * + * @param {Function} callback + */ + onError(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.error.push([ref, callback]) + return ref + } + + /** + * Registers callbacks for connection message events + * @param {Function} callback + */ + onMessage(callback){ + let ref = this.makeRef() + this.stateChangeCallbacks.message.push([ref, callback]) + return ref + } + + /** + * Pings the server and invokes the callback with the RTT in milliseconds + * @param {Function} callback + * + * Returns true if the ping was pushed or false if unable to be pushed. + */ + ping(callback){ + if(!this.isConnected()){ return false } + let ref = this.makeRef() + let startTime = Date.now() + this.push({topic: "phoenix", event: "heartbeat", payload: {}, ref: ref}) + let onMsgRef = this.onMessage(msg => { + if(msg.ref === ref){ + this.off([onMsgRef]) + callback(Date.now() - startTime) + } + }) + return true + } + + /** + * @private + */ + onConnOpen(){ + if(this.hasLogger()) this.log("transport", `connected to ${this.endPointURL()}`) + this.closeWasClean = false + this.establishedConnections++ + this.flushSendBuffer() + this.reconnectTimer.reset() + this.resetHeartbeat() + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()) + } + + /** + * @private + */ + + heartbeatTimeout(){ + if(this.pendingHeartbeatRef){ + this.pendingHeartbeatRef = null + if(this.hasLogger()){ this.log("transport", "heartbeat timeout. Attempting to re-establish connection") } + this.abnormalClose("heartbeat timeout") + } + } + + resetHeartbeat(){ + if(this.conn && this.conn.skipHeartbeat){ return } + this.pendingHeartbeatRef = null + clearTimeout(this.heartbeatTimer) + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs) + } + + teardown(callback, code, reason){ + if(!this.conn){ + return callback && callback() + } + + this.waitForBufferDone(() => { + if(this.conn){ + if(code){ this.conn.close(code, reason || "") } else { this.conn.close() } + } + + this.waitForSocketClosed(() => { + if(this.conn){ + this.conn.onclose = function (){ } // noop + this.conn = null + } + + callback && callback() + }) + }) + } + + waitForBufferDone(callback, tries = 1){ + if(tries === 5 || !this.conn || !this.conn.bufferedAmount){ + callback() + return + } + + setTimeout(() => { + this.waitForBufferDone(callback, tries + 1) + }, 150 * tries) + } + + waitForSocketClosed(callback, tries = 1){ + if(tries === 5 || !this.conn || this.conn.readyState === SOCKET_STATES.closed){ + callback() + return + } + + setTimeout(() => { + this.waitForSocketClosed(callback, tries + 1) + }, 150 * tries) + } + + onConnClose(event){ + let closeCode = event && event.code + if(this.hasLogger()) this.log("transport", "close", event) + this.triggerChanError() + clearTimeout(this.heartbeatTimer) + if(!this.closeWasClean && closeCode !== 1000){ + this.reconnectTimer.scheduleTimeout() + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)) + } + + /** + * @private + */ + onConnError(error){ + if(this.hasLogger()) this.log("transport", error) + let transportBefore = this.transport + let establishedBefore = this.establishedConnections + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore) + }) + if(transportBefore === this.transport || establishedBefore > 0){ + this.triggerChanError() + } + } + + /** + * @private + */ + triggerChanError(){ + this.channels.forEach(channel => { + if(!(channel.isErrored() || channel.isLeaving() || channel.isClosed())){ + channel.trigger(CHANNEL_EVENTS.error) + } + }) + } + + /** + * @returns {string} + */ + connectionState(){ + switch(this.conn && this.conn.readyState){ + case SOCKET_STATES.connecting: return "connecting" + case SOCKET_STATES.open: return "open" + case SOCKET_STATES.closing: return "closing" + default: return "closed" + } + } + + /** + * @returns {boolean} + */ + isConnected(){ return this.connectionState() === "open" } + + /** + * @private + * + * @param {Channel} + */ + remove(channel){ + this.off(channel.stateChangeRefs) + this.channels = this.channels.filter(c => c.joinRef() !== channel.joinRef()) + } + + /** + * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations. + * + * @param {refs} - list of refs returned by calls to + * `onOpen`, `onClose`, `onError,` and `onMessage` + */ + off(refs){ + for(let key in this.stateChangeCallbacks){ + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1 + }) + } + } + + /** + * Initiates a new channel for the given topic + * + * @param {string} topic + * @param {Object} chanParams - Parameters for the channel + * @returns {Channel} + */ + channel(topic, chanParams = {}){ + let chan = new Channel(topic, chanParams, this) + this.channels.push(chan) + return chan + } + + /** + * @param {Object} data + */ + push(data){ + if(this.hasLogger()){ + let {topic, event, payload, ref, join_ref} = data + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload) + } + + if(this.isConnected()){ + this.encode(data, result => this.conn.send(result)) + } else { + this.sendBuffer.push(() => this.encode(data, result => this.conn.send(result))) + } + } + + /** + * Return the next message ref, accounting for overflows + * @returns {string} + */ + makeRef(){ + let newRef = this.ref + 1 + if(newRef === this.ref){ this.ref = 0 } else { this.ref = newRef } + + return this.ref.toString() + } + + sendHeartbeat(){ + if(this.pendingHeartbeatRef && !this.isConnected()){ return } + this.pendingHeartbeatRef = this.makeRef() + this.push({topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef}) + this.heartbeatTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs) + } + + abnormalClose(reason){ + this.closeWasClean = false + if(this.isConnected()){ this.conn.close(WS_CLOSE_NORMAL, reason) } + } + + flushSendBuffer(){ + if(this.isConnected() && this.sendBuffer.length > 0){ + this.sendBuffer.forEach(callback => callback()) + this.sendBuffer = [] + } + } + + onConnMessage(rawMessage){ + this.decode(rawMessage.data, msg => { + let {topic, event, payload, ref, join_ref} = msg + if(ref && ref === this.pendingHeartbeatRef){ + clearTimeout(this.heartbeatTimer) + this.pendingHeartbeatRef = null + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs) + } + + if(this.hasLogger()) this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload) + + for(let i = 0; i < this.channels.length; i++){ + const channel = this.channels[i] + if(!channel.isMember(topic, event, payload, join_ref)){ continue } + channel.trigger(event, payload, ref, join_ref) + } + + for(let i = 0; i < this.stateChangeCallbacks.message.length; i++){ + let [, callback] = this.stateChangeCallbacks.message[i] + callback(msg) + } + }) + } + + leaveOpenTopic(topic){ + let dupChannel = this.channels.find(c => c.topic === topic && (c.isJoined() || c.isJoining())) + if(dupChannel){ + if(this.hasLogger()) this.log("transport", `leaving duplicate topic "${topic}"`) + dupChannel.leave() + } + } +} diff --git a/deps/phoenix/assets/js/phoenix/timer.js b/deps/phoenix/assets/js/phoenix/timer.js new file mode 100644 index 0000000..5784ce5 --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/timer.js @@ -0,0 +1,42 @@ +/** + * + * Creates a timer that accepts a `timerCalc` function to perform + * calculated timeout retries, such as exponential backoff. + * + * @example + * let reconnectTimer = new Timer(() => this.connect(), function(tries){ + * return [1000, 5000, 10000][tries - 1] || 10000 + * }) + * reconnectTimer.scheduleTimeout() // fires after 1000 + * reconnectTimer.scheduleTimeout() // fires after 5000 + * reconnectTimer.reset() + * reconnectTimer.scheduleTimeout() // fires after 1000 + * + * @param {Function} callback + * @param {Function} timerCalc + */ +export default class Timer { + constructor(callback, timerCalc){ + this.callback = callback + this.timerCalc = timerCalc + this.timer = null + this.tries = 0 + } + + reset(){ + this.tries = 0 + clearTimeout(this.timer) + } + + /** + * Cancels any previous scheduleTimeout and schedules callback + */ + scheduleTimeout(){ + clearTimeout(this.timer) + + this.timer = setTimeout(() => { + this.tries = this.tries + 1 + this.callback() + }, this.timerCalc(this.tries + 1)) + } +} diff --git a/deps/phoenix/assets/js/phoenix/utils.js b/deps/phoenix/assets/js/phoenix/utils.js new file mode 100644 index 0000000..b3a701a --- /dev/null +++ b/deps/phoenix/assets/js/phoenix/utils.js @@ -0,0 +1,9 @@ +// wraps value in closure or returns closure +export let closure = (value) => { + if(typeof value === "function"){ + return value + } else { + let closure = function (){ return value } + return closure + } +} diff --git a/deps/phoenix/hex_metadata.config b/deps/phoenix/hex_metadata.config new file mode 100644 index 0000000..66f69a9 --- /dev/null +++ b/deps/phoenix/hex_metadata.config @@ -0,0 +1,202 @@ +{<<"app">>,<<"phoenix">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>, + <<"Productive. Reliable. Fast. A productive web framework that\ndoes not compromise speed or maintainability.">>}. +{<<"elixir">>,<<"~> 1.9">>}. +{<<"files">>, + [<<"assets/js">>,<<"assets/js/phoenix">>, + <<"assets/js/phoenix/constants.js">>,<<"assets/js/phoenix/channel.js">>, + <<"assets/js/phoenix/timer.js">>,<<"assets/js/phoenix/presence.js">>, + <<"assets/js/phoenix/index.js">>,<<"assets/js/phoenix/socket.js">>, + <<"assets/js/phoenix/ajax.js">>,<<"assets/js/phoenix/push.js">>, + <<"assets/js/phoenix/utils.js">>,<<"assets/js/phoenix/serializer.js">>, + <<"assets/js/phoenix/longpoll.js">>,<<"lib">>,<<"lib/phoenix">>, + <<"lib/phoenix/naming.ex">>,<<"lib/phoenix/channel.ex">>, + <<"lib/phoenix/code_reloader">>,<<"lib/phoenix/code_reloader/proxy.ex">>, + <<"lib/phoenix/code_reloader/server.ex">>,<<"lib/phoenix/param.ex">>, + <<"lib/phoenix/test">>,<<"lib/phoenix/test/channel_test.ex">>, + <<"lib/phoenix/test/conn_test.ex">>,<<"lib/phoenix/presence.ex">>, + <<"lib/phoenix/logger.ex">>,<<"lib/phoenix/transports">>, + <<"lib/phoenix/transports/long_poll.ex">>, + <<"lib/phoenix/transports/long_poll_server.ex">>, + <<"lib/phoenix/transports/websocket.ex">>,<<"lib/phoenix/config.ex">>, + <<"lib/phoenix/token.ex">>,<<"lib/phoenix/controller.ex">>, + <<"lib/phoenix/controller">>,<<"lib/phoenix/controller/pipeline.ex">>, + <<"lib/phoenix/code_reloader.ex">>,<<"lib/phoenix/router.ex">>, + <<"lib/phoenix/channel">>,<<"lib/phoenix/channel/server.ex">>, + <<"lib/phoenix/socket.ex">>,<<"lib/phoenix/digester">>, + <<"lib/phoenix/digester/gzip.ex">>,<<"lib/phoenix/digester/compressor.ex">>, + <<"lib/phoenix/endpoint">>,<<"lib/phoenix/endpoint/cowboy2_handler.ex">>, + <<"lib/phoenix/endpoint/supervisor.ex">>, + <<"lib/phoenix/endpoint/cowboy2_adapter.ex">>, + <<"lib/phoenix/endpoint/watcher.ex">>, + <<"lib/phoenix/endpoint/render_errors.ex">>,<<"lib/phoenix/endpoint.ex">>, + <<"lib/phoenix/digester.ex">>,<<"lib/phoenix/exceptions.ex">>, + <<"lib/phoenix/socket">>,<<"lib/phoenix/socket/message.ex">>, + <<"lib/phoenix/socket/transport.ex">>,<<"lib/phoenix/socket/serializers">>, + <<"lib/phoenix/socket/serializers/v1_json_serializer.ex">>, + <<"lib/phoenix/socket/serializers/v2_json_serializer.ex">>, + <<"lib/phoenix/socket/pool_supervisor.ex">>, + <<"lib/phoenix/socket/serializer.ex">>,<<"lib/phoenix/router">>, + <<"lib/phoenix/router/route.ex">>, + <<"lib/phoenix/router/console_formatter.ex">>, + <<"lib/phoenix/router/resource.ex">>,<<"lib/phoenix/router/scope.ex">>, + <<"lib/phoenix/router/helpers.ex">>,<<"lib/mix">>,<<"lib/mix/tasks">>, + <<"lib/mix/tasks/phx.gen.live.ex">>,<<"lib/mix/tasks/compile.phoenix.ex">>, + <<"lib/mix/tasks/phx.gen.presence.ex">>,<<"lib/mix/tasks/phx.ex">>, + <<"lib/mix/tasks/phx.gen.auth">>, + <<"lib/mix/tasks/phx.gen.auth/hashing_library.ex">>, + <<"lib/mix/tasks/phx.gen.auth/injector.ex">>, + <<"lib/mix/tasks/phx.gen.auth/migration.ex">>, + <<"lib/mix/tasks/phx.gen.cert.ex">>,<<"lib/mix/tasks/phx.gen.notifier.ex">>, + <<"lib/mix/tasks/phx.routes.ex">>,<<"lib/mix/tasks/phx.gen.channel.ex">>, + <<"lib/mix/tasks/phx.gen.release.ex">>,<<"lib/mix/tasks/phx.gen.json.ex">>, + <<"lib/mix/tasks/phx.digest.clean.ex">>, + <<"lib/mix/tasks/phx.gen.embedded.ex">>,<<"lib/mix/tasks/phx.gen.ex">>, + <<"lib/mix/tasks/phx.gen.secret.ex">>,<<"lib/mix/tasks/phx.gen.auth.ex">>, + <<"lib/mix/tasks/phx.gen.schema.ex">>,<<"lib/mix/tasks/phx.digest.ex">>, + <<"lib/mix/tasks/phx.server.ex">>,<<"lib/mix/tasks/phx.gen.socket.ex">>, + <<"lib/mix/tasks/phx.gen.html.ex">>,<<"lib/mix/tasks/phx.gen.context.ex">>, + <<"lib/mix/phoenix">>,<<"lib/mix/phoenix/schema.ex">>, + <<"lib/mix/phoenix/context.ex">>,<<"lib/mix/phoenix.ex">>, + <<"lib/phoenix.ex">>,<<"priv">>,<<"priv/static">>, + <<"priv/static/favicon.ico">>,<<"priv/static/phoenix.cjs.js">>, + <<"priv/static/phoenix.mjs">>,<<"priv/static/phoenix.cjs.js.map">>, + <<"priv/static/phoenix.js">>,<<"priv/static/phoenix.mjs.map">>, + <<"priv/static/phoenix.png">>,<<"priv/static/phoenix.min.js">>, + <<"priv/templates">>,<<"priv/templates/phx.gen.html">>, + <<"priv/templates/phx.gen.html/new.html.heex">>, + <<"priv/templates/phx.gen.html/edit.html.heex">>, + <<"priv/templates/phx.gen.html/index.html.heex">>, + <<"priv/templates/phx.gen.html/form.html.heex">>, + <<"priv/templates/phx.gen.html/controller.ex">>, + <<"priv/templates/phx.gen.html/show.html.heex">>, + <<"priv/templates/phx.gen.html/controller_test.exs">>, + <<"priv/templates/phx.gen.html/view.ex">>, + <<"priv/templates/phx.gen.embedded">>, + <<"priv/templates/phx.gen.embedded/embedded_schema.ex">>, + <<"priv/templates/phx.gen.auth">>, + <<"priv/templates/phx.gen.auth/confirmation_new.html.heex">>, + <<"priv/templates/phx.gen.auth/registration_new.html.heex">>, + <<"priv/templates/phx.gen.auth/settings_edit.html.heex">>, + <<"priv/templates/phx.gen.auth/context_fixtures_functions.ex">>, + <<"priv/templates/phx.gen.auth/context_functions.ex">>, + <<"priv/templates/phx.gen.auth/confirmation_controller_test.exs">>, + <<"priv/templates/phx.gen.auth/notifier.ex">>, + <<"priv/templates/phx.gen.auth/reset_password_controller.ex">>, + <<"priv/templates/phx.gen.auth/confirmation_edit.html.heex">>, + <<"priv/templates/phx.gen.auth/_menu.html.heex">>, + <<"priv/templates/phx.gen.auth/registration_controller_test.exs">>, + <<"priv/templates/phx.gen.auth/auth.ex">>, + <<"priv/templates/phx.gen.auth/conn_case.exs">>, + <<"priv/templates/phx.gen.auth/test_cases.exs">>, + <<"priv/templates/phx.gen.auth/session_view.ex">>, + <<"priv/templates/phx.gen.auth/reset_password_new.html.heex">>, + <<"priv/templates/phx.gen.auth/reset_password_controller_test.exs">>, + <<"priv/templates/phx.gen.auth/auth_test.exs">>, + <<"priv/templates/phx.gen.auth/reset_password_edit.html.heex">>, + <<"priv/templates/phx.gen.auth/registration_controller.ex">>, + <<"priv/templates/phx.gen.auth/settings_controller_test.exs">>, + <<"priv/templates/phx.gen.auth/confirmation_controller.ex">>, + <<"priv/templates/phx.gen.auth/registration_view.ex">>, + <<"priv/templates/phx.gen.auth/migration.ex">>, + <<"priv/templates/phx.gen.auth/settings_controller.ex">>, + <<"priv/templates/phx.gen.auth/schema.ex">>, + <<"priv/templates/phx.gen.auth/routes.ex">>, + <<"priv/templates/phx.gen.auth/session_controller.ex">>, + <<"priv/templates/phx.gen.auth/session_new.html.heex">>, + <<"priv/templates/phx.gen.auth/session_controller_test.exs">>, + <<"priv/templates/phx.gen.auth/schema_token.ex">>, + <<"priv/templates/phx.gen.auth/settings_view.ex">>, + <<"priv/templates/phx.gen.auth/reset_password_view.ex">>, + <<"priv/templates/phx.gen.auth/confirmation_view.ex">>, + <<"priv/templates/phx.gen.presence">>, + <<"priv/templates/phx.gen.presence/presence.ex">>, + <<"priv/templates/phx.gen.channel">>, + <<"priv/templates/phx.gen.channel/channel.ex">>, + <<"priv/templates/phx.gen.channel/channel_test.exs">>, + <<"priv/templates/phx.gen.channel/channel_case.ex">>, + <<"priv/templates/phx.gen.release">>, + <<"priv/templates/phx.gen.release/dockerignore.eex">>, + <<"priv/templates/phx.gen.release/Dockerfile.eex">>, + <<"priv/templates/phx.gen.release/release.ex">>, + <<"priv/templates/phx.gen.release/rel">>, + <<"priv/templates/phx.gen.release/rel/migrate.bat.eex">>, + <<"priv/templates/phx.gen.release/rel/migrate.sh.eex">>, + <<"priv/templates/phx.gen.release/rel/server.sh.eex">>, + <<"priv/templates/phx.gen.release/rel/server.bat.eex">>, + <<"priv/templates/phx.gen.socket">>, + <<"priv/templates/phx.gen.socket/socket.js">>, + <<"priv/templates/phx.gen.socket/socket.ex">>, + <<"priv/templates/phx.gen.notifier">>, + <<"priv/templates/phx.gen.notifier/notifier.ex">>, + <<"priv/templates/phx.gen.notifier/notifier_test.exs">>, + <<"priv/templates/phx.gen.live">>, + <<"priv/templates/phx.gen.live/index.html.heex">>, + <<"priv/templates/phx.gen.live/show.ex">>, + <<"priv/templates/phx.gen.live/live_test.exs">>, + <<"priv/templates/phx.gen.live/index.ex">>, + <<"priv/templates/phx.gen.live/show.html.heex">>, + <<"priv/templates/phx.gen.live/form_component.html.heex">>, + <<"priv/templates/phx.gen.live/form_component.ex">>, + <<"priv/templates/phx.gen.live/live_helpers.ex">>, + <<"priv/templates/phx.gen.context">>, + <<"priv/templates/phx.gen.context/fixtures_module.ex">>, + <<"priv/templates/phx.gen.context/test_cases.exs">>, + <<"priv/templates/phx.gen.context/context_test.exs">>, + <<"priv/templates/phx.gen.context/schema_access.ex">>, + <<"priv/templates/phx.gen.context/fixtures.ex">>, + <<"priv/templates/phx.gen.context/context.ex">>, + <<"priv/templates/phx.gen.context/access_no_schema.ex">>, + <<"priv/templates/phx.gen.schema">>, + <<"priv/templates/phx.gen.schema/schema.ex">>, + <<"priv/templates/phx.gen.schema/migration.exs">>, + <<"priv/templates/phx.gen.json">>, + <<"priv/templates/phx.gen.json/fallback_controller.ex">>, + <<"priv/templates/phx.gen.json/controller.ex">>, + <<"priv/templates/phx.gen.json/changeset_view.ex">>, + <<"priv/templates/phx.gen.json/controller_test.exs">>, + <<"priv/templates/phx.gen.json/view.ex">>,<<"CHANGELOG.md">>, + <<"LICENSE.md">>,<<"mix.exs">>,<<"package.json">>,<<"README.md">>, + <<".formatter.exs">>]}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"links">>, + [{<<"GitHub">>,<<"https://github.com/phoenixframework/phoenix">>}]}. +{<<"name">>,<<"phoenix">>}. +{<<"requirements">>, + [[{<<"app">>,<<"plug">>}, + {<<"name">>,<<"plug">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.10">>}], + [{<<"app">>,<<"plug_crypto">>}, + {<<"name">>,<<"plug_crypto">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.2">>}], + [{<<"app">>,<<"telemetry">>}, + {<<"name">>,<<"telemetry">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 0.4 or ~> 1.0">>}], + [{<<"app">>,<<"phoenix_pubsub">>}, + {<<"name">>,<<"phoenix_pubsub">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 2.0">>}], + [{<<"app">>,<<"phoenix_view">>}, + {<<"name">>,<<"phoenix_view">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.0">>}], + [{<<"app">>,<<"plug_cowboy">>}, + {<<"name">>,<<"plug_cowboy">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 2.2">>}], + [{<<"app">>,<<"jason">>}, + {<<"name">>,<<"jason">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.0">>}]]}. +{<<"version">>,<<"1.6.12">>}. diff --git a/deps/phoenix/lib/mix/phoenix.ex b/deps/phoenix/lib/mix/phoenix.ex new file mode 100644 index 0000000..39d3b00 --- /dev/null +++ b/deps/phoenix/lib/mix/phoenix.ex @@ -0,0 +1,362 @@ +defmodule Mix.Phoenix do + # Conveniences for Phoenix tasks. + @moduledoc false + + @doc """ + Evals EEx files from source dir. + + Files are evaluated against EEx according to + the given binding. + """ + def eval_from(apps, source_file_path, binding) do + sources = Enum.map(apps, &to_app_source(&1, source_file_path)) + + content = + Enum.find_value(sources, fn source -> + File.exists?(source) && File.read!(source) + end) || raise "could not find #{source_file_path} in any of the sources" + + EEx.eval_string(content, binding) + end + + @doc """ + Copies files from source dir to target dir + according to the given map. + + Files are evaluated against EEx according to + the given binding. + """ + def copy_from(apps, source_dir, binding, mapping) when is_list(mapping) do + roots = Enum.map(apps, &to_app_source(&1, source_dir)) + + for {format, source_file_path, target} <- mapping do + source = + Enum.find_value(roots, fn root -> + source = Path.join(root, source_file_path) + if File.exists?(source), do: source + end) || raise "could not find #{source_file_path} in any of the sources" + + case format do + :text -> Mix.Generator.create_file(target, File.read!(source)) + :eex -> Mix.Generator.create_file(target, EEx.eval_file(source, binding)) + :new_eex -> + if File.exists?(target) do + :ok + else + Mix.Generator.create_file(target, EEx.eval_file(source, binding)) + end + end + end + end + + defp to_app_source(path, source_dir) when is_binary(path), + do: Path.join(path, source_dir) + defp to_app_source(app, source_dir) when is_atom(app), + do: Application.app_dir(app, source_dir) + + @doc """ + Inflects path, scope, alias and more from the given name. + + ## Examples + + iex> Mix.Phoenix.inflect("user") + [alias: "User", + human: "User", + base: "Phoenix", + web_module: "PhoenixWeb", + module: "Phoenix.User", + scoped: "User", + singular: "user", + path: "user"] + + iex> Mix.Phoenix.inflect("Admin.User") + [alias: "User", + human: "User", + base: "Phoenix", + web_module: "PhoenixWeb", + module: "Phoenix.Admin.User", + scoped: "Admin.User", + singular: "user", + path: "admin/user"] + + iex> Mix.Phoenix.inflect("Admin.SuperUser") + [alias: "SuperUser", + human: "Super user", + base: "Phoenix", + web_module: "PhoenixWeb", + module: "Phoenix.Admin.SuperUser", + scoped: "Admin.SuperUser", + singular: "super_user", + path: "admin/super_user"] + + """ + def inflect(singular) do + base = Mix.Phoenix.base() + web_module = base |> web_module() |> inspect() + scoped = Phoenix.Naming.camelize(singular) + path = Phoenix.Naming.underscore(scoped) + singular = String.split(path, "/") |> List.last + module = Module.concat(base, scoped) |> inspect + alias = String.split(module, ".") |> List.last + human = Phoenix.Naming.humanize(singular) + + [alias: alias, + human: human, + base: base, + web_module: web_module, + module: module, + scoped: scoped, + singular: singular, + path: path] + end + + @doc """ + Checks the availability of a given module name. + """ + def check_module_name_availability!(name) do + name = Module.concat(Elixir, name) + if Code.ensure_loaded?(name) do + Mix.raise "Module name #{inspect name} is already taken, please choose another name" + end + end + + @doc """ + Returns the module base name based on the configuration value. + + config :my_app + namespace: My.App + + """ + def base do + app_base(otp_app()) + end + + @doc """ + Returns the context module base name based on the configuration value. + + config :my_app + namespace: My.App + + """ + def context_base(ctx_app) do + app_base(ctx_app) + end + + defp app_base(app) do + case Application.get_env(app, :namespace, app) do + ^app -> app |> to_string() |> Phoenix.Naming.camelize() + mod -> mod |> inspect() + end + end + + @doc """ + Returns the OTP app from the Mix project configuration. + """ + def otp_app do + Mix.Project.config() |> Keyword.fetch!(:app) + end + + @doc """ + Returns all compiled modules in a project. + """ + def modules do + Mix.Project.compile_path() + |> Path.join("*.beam") + |> Path.wildcard() + |> Enum.map(&beam_to_module/1) + end + + defp beam_to_module(path) do + path |> Path.basename(".beam") |> String.to_atom() + end + + @doc """ + The paths to look for template files for generators. + + Defaults to checking the current app's `priv` directory, + and falls back to Phoenix's `priv` directory. + """ + def generator_paths do + [".", :phoenix] + end + + @doc """ + Checks if the given `app_path` is inside an umbrella. + """ + def in_umbrella?(app_path) do + umbrella = Path.expand(Path.join [app_path, "..", ".."]) + mix_path = Path.join(umbrella, "mix.exs") + apps_path = Path.join(umbrella, "apps") + File.exists?(mix_path) && File.exists?(apps_path) + end + + @doc """ + Returns the web prefix to be used in generated file specs. + """ + def web_path(ctx_app, rel_path \\ "") when is_atom(ctx_app) do + this_app = otp_app() + + if ctx_app == this_app do + Path.join(["lib", "#{this_app}_web", rel_path]) + else + Path.join(["lib", to_string(this_app), rel_path]) + end + end + + @doc """ + Returns the context app path prefix to be used in generated context files. + """ + def context_app_path(ctx_app, rel_path) when is_atom(ctx_app) do + this_app = otp_app() + + if ctx_app == this_app do + rel_path + else + app_path = + case Application.get_env(this_app, :generators)[:context_app] do + {^ctx_app, path} -> Path.relative_to_cwd(path) + _ -> mix_app_path(ctx_app, this_app) + end + Path.join(app_path, rel_path) + end + end + + @doc """ + Returns the context lib path to be used in generated context files. + """ + def context_lib_path(ctx_app, rel_path) when is_atom(ctx_app) do + context_app_path(ctx_app, Path.join(["lib", to_string(ctx_app), rel_path])) + end + + @doc """ + Returns the context test path to be used in generated context files. + """ + def context_test_path(ctx_app, rel_path) when is_atom(ctx_app) do + context_app_path(ctx_app, Path.join(["test", to_string(ctx_app), rel_path])) + end + + @doc """ + Returns the OTP context app. + """ + def context_app do + this_app = otp_app() + + case fetch_context_app(this_app) do + {:ok, app} -> app + :error -> this_app + end + end + + @doc """ + Returns the test prefix to be used in generated file specs. + """ + def web_test_path(ctx_app, rel_path \\ "") when is_atom(ctx_app) do + this_app = otp_app() + + if ctx_app == this_app do + Path.join(["test", "#{this_app}_web", rel_path]) + else + Path.join(["test", to_string(this_app), rel_path]) + end + end + + defp fetch_context_app(this_otp_app) do + case Application.get_env(this_otp_app, :generators)[:context_app] do + nil -> + :error + false -> + Mix.raise """ + no context_app configured for current application #{this_otp_app}. + + Add the context_app generators config in config.exs, or pass the + --context-app option explicitly to the generators. For example: + + via config: + + config :#{this_otp_app}, :generators, + context_app: :some_app + + via cli option: + + mix phx.gen.[task] --context-app some_app + + Note: cli option only works when `context_app` is not set to `false` + in the config. + """ + {app, _path} -> + {:ok, app} + app -> + {:ok, app} + end + end + + defp mix_app_path(app, this_otp_app) do + case Mix.Project.deps_paths() do + %{^app => path} -> + Path.relative_to_cwd(path) + deps -> + Mix.raise """ + no directory for context_app #{inspect app} found in #{this_otp_app}'s deps. + + Ensure you have listed #{inspect app} as an in_umbrella dependency in mix.exs: + + def deps do + [ + {:#{app}, in_umbrella: true}, + ... + ] + end + + Existing deps: + + #{inspect Map.keys(deps)} + + """ + end + end + + @doc """ + Prompts to continue if any files exist. + """ + def prompt_for_conflicts(generator_files) do + file_paths = + Enum.flat_map(generator_files, fn + {:new_eex, _, _path} -> [] + {_kind, _, path} -> [path] + end) + + case Enum.filter(file_paths, &File.exists?(&1)) do + [] -> :ok + conflicts -> + Mix.shell().info""" + The following files conflict with new files to be generated: + + #{Enum.map_join(conflicts, "\n", &" * #{&1}")} + + See the --web option to namespace similarly named resources + """ + unless Mix.shell().yes?("Proceed with interactive overwrite?") do + System.halt() + end + end + end + + @doc """ + Returns the web module prefix. + """ + def web_module(base) do + if base |> to_string() |> String.ends_with?("Web") do + Module.concat([base]) + else + Module.concat(["#{base}Web"]) + end + end + + def to_text(data) do + inspect data, limit: :infinity, printable_limit: :infinity + end + + def prepend_newline(string) do + "\n" <> string + end +end diff --git a/deps/phoenix/lib/mix/phoenix/context.ex b/deps/phoenix/lib/mix/phoenix/context.ex new file mode 100644 index 0000000..27fbc65 --- /dev/null +++ b/deps/phoenix/lib/mix/phoenix/context.ex @@ -0,0 +1,101 @@ +defmodule Mix.Phoenix.Context do + @moduledoc false + + alias Mix.Phoenix.{Context, Schema} + + defstruct name: nil, + module: nil, + schema: nil, + alias: nil, + base_module: nil, + web_module: nil, + basename: nil, + file: nil, + test_file: nil, + test_fixtures_file: nil, + dir: nil, + generate?: true, + context_app: nil, + opts: [] + + def valid?(context) do + context =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + def new(context_name, opts) do + new(context_name, %Schema{}, opts) + end + + def new(context_name, %Schema{} = schema, opts) do + ctx_app = opts[:context_app] || Mix.Phoenix.context_app() + base = Module.concat([Mix.Phoenix.context_base(ctx_app)]) + module = Module.concat(base, context_name) + alias = Module.concat([module |> Module.split() |> List.last()]) + basedir = Phoenix.Naming.underscore(context_name) + basename = Path.basename(basedir) + dir = Mix.Phoenix.context_lib_path(ctx_app, basedir) + file = dir <> ".ex" + test_dir = Mix.Phoenix.context_test_path(ctx_app, basedir) + test_file = test_dir <> "_test.exs" + test_fixtures_dir = Mix.Phoenix.context_app_path(ctx_app, "test/support/fixtures") + test_fixtures_file = Path.join([test_fixtures_dir, basedir <> "_fixtures.ex"]) + generate? = Keyword.get(opts, :context, true) + + %Context{ + name: context_name, + module: module, + schema: schema, + alias: alias, + base_module: base, + web_module: web_module(), + basename: basename, + file: file, + test_file: test_file, + test_fixtures_file: test_fixtures_file, + dir: dir, + generate?: generate?, + context_app: ctx_app, + opts: opts} + end + + def pre_existing?(%Context{file: file}), do: File.exists?(file) + + def pre_existing_tests?(%Context{test_file: file}), do: File.exists?(file) + + def pre_existing_test_fixtures?(%Context{test_fixtures_file: file}), do: File.exists?(file) + + def function_count(%Context{file: file}) do + {_ast, count} = + file + |> File.read!() + |> Code.string_to_quoted!() + |> Macro.postwalk(0, fn + {:def, _, _} = node, count -> {node, count + 1} + {:defdelegate, _, _} = node, count -> {node, count + 1} + node, count -> {node, count} + end) + + count + end + + def file_count(%Context{dir: dir}) do + dir + |> Path.join("**/*.ex") + |> Path.wildcard() + |> Enum.count() + end + + defp web_module do + base = Mix.Phoenix.base() + cond do + Mix.Phoenix.context_app() != Mix.Phoenix.otp_app() -> + Module.concat([base]) + + String.ends_with?(base, "Web") -> + Module.concat([base]) + + true -> + Module.concat(["#{base}Web"]) + end + end +end diff --git a/deps/phoenix/lib/mix/phoenix/schema.ex b/deps/phoenix/lib/mix/phoenix/schema.ex new file mode 100644 index 0000000..13bc26f --- /dev/null +++ b/deps/phoenix/lib/mix/phoenix/schema.ex @@ -0,0 +1,540 @@ +defmodule Mix.Phoenix.Schema do + @moduledoc false + + alias Mix.Phoenix.Schema + + defstruct module: nil, + repo: nil, + table: nil, + collection: nil, + embedded?: false, + generate?: true, + opts: [], + alias: nil, + file: nil, + attrs: [], + string_attr: nil, + plural: nil, + singular: nil, + uniques: [], + redacts: [], + assocs: [], + types: [], + indexes: [], + defaults: [], + human_singular: nil, + human_plural: nil, + binary_id: false, + migration_defaults: nil, + migration?: false, + params: %{}, + sample_id: nil, + web_path: nil, + web_namespace: nil, + context_app: nil, + route_helper: nil, + migration_module: nil, + fixture_unique_functions: %{}, + fixture_params: %{}, + prefix: nil + + @valid_types [ + :integer, + :float, + :decimal, + :boolean, + :map, + :string, + :array, + :references, + :text, + :date, + :time, + :time_usec, + :naive_datetime, + :naive_datetime_usec, + :utc_datetime, + :utc_datetime_usec, + :uuid, + :binary, + :enum + ] + + def valid_types, do: @valid_types + + def valid?(schema) do + schema =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + def new(schema_name, schema_plural, cli_attrs, opts) do + ctx_app = opts[:context_app] || Mix.Phoenix.context_app() + otp_app = Mix.Phoenix.otp_app() + opts = Keyword.merge(Application.get_env(otp_app, :generators, []), opts) + base = Mix.Phoenix.context_base(ctx_app) + basename = Phoenix.Naming.underscore(schema_name) + module = Module.concat([base, schema_name]) + repo = opts[:repo] || Module.concat([base, "Repo"]) + file = Mix.Phoenix.context_lib_path(ctx_app, basename <> ".ex") + table = opts[:table] || schema_plural + {cli_attrs, uniques, redacts} = extract_attr_flags(cli_attrs) + {assocs, attrs} = partition_attrs_and_assocs(module, attrs(cli_attrs)) + types = types(attrs) + web_namespace = opts[:web] && Phoenix.Naming.camelize(opts[:web]) + web_path = web_namespace && Phoenix.Naming.underscore(web_namespace) + embedded? = Keyword.get(opts, :embedded, false) + generate? = Keyword.get(opts, :schema, true) + + singular = + module + |> Module.split() + |> List.last() + |> Phoenix.Naming.underscore() + + collection = if schema_plural == singular, do: singular <> "_collection", else: schema_plural + string_attr = string_attr(types) + create_params = params(attrs, :create) + default_params_key = + case Enum.at(create_params, 0) do + {key, _} -> key + nil -> :some_field + end + fixture_unique_functions = fixture_unique_functions(singular, uniques, attrs) + + %Schema{ + opts: opts, + migration?: Keyword.get(opts, :migration, true), + module: module, + repo: repo, + table: table, + embedded?: embedded?, + alias: module |> Module.split() |> List.last() |> Module.concat(nil), + file: file, + attrs: attrs, + plural: schema_plural, + singular: singular, + collection: collection, + assocs: assocs, + types: types, + defaults: schema_defaults(attrs), + uniques: uniques, + redacts: redacts, + indexes: indexes(table, assocs, uniques), + human_singular: Phoenix.Naming.humanize(singular), + human_plural: Phoenix.Naming.humanize(schema_plural), + binary_id: opts[:binary_id], + migration_defaults: migration_defaults(attrs), + string_attr: string_attr, + params: %{ + create: create_params, + update: params(attrs, :update), + default_key: string_attr || default_params_key + }, + web_namespace: web_namespace, + web_path: web_path, + route_helper: route_helper(web_path, singular), + sample_id: sample_id(opts), + context_app: ctx_app, + generate?: generate?, + migration_module: migration_module(), + fixture_unique_functions: fixture_unique_functions, + fixture_params: fixture_params(attrs, fixture_unique_functions), + prefix: opts[:prefix] + } + end + + @doc """ + Returns the string value of the default schema param. + """ + def default_param(%Schema{} = schema, action) do + schema.params + |> Map.fetch!(action) + |> Map.fetch!(schema.params.default_key) + |> to_string() + end + + def extract_attr_flags(cli_attrs) do + {attrs, uniques, redacts} = Enum.reduce(cli_attrs, {[], [], []}, fn attr, {attrs, uniques, redacts} -> + [attr_name | rest] = String.split(attr, ":") + attr_name = String.to_atom(attr_name) + split_flags(Enum.reverse(rest), attr_name, attrs, uniques, redacts) + end) + + {Enum.reverse(attrs), uniques, redacts} + end + + defp split_flags(["unique" | rest], name, attrs, uniques, redacts), + do: split_flags(rest, name, attrs, [name | uniques], redacts) + + defp split_flags(["redact" | rest], name, attrs, uniques, redacts), + do: split_flags(rest, name, attrs, uniques, [name | redacts]) + + defp split_flags(rest, name, attrs, uniques, redacts), + do: {[Enum.join([name | Enum.reverse(rest)], ":") | attrs ], uniques, redacts} + + @doc """ + Parses the attrs as received by generators. + """ + def attrs(attrs) do + Enum.map(attrs, fn attr -> + attr + |> String.split(":", parts: 3) + |> list_to_attr() + |> validate_attr!() + end) + end + + @doc """ + Generates some sample params based on the parsed attributes. + """ + def params(attrs, action \\ :create) when action in [:create, :update] do + attrs + |> Enum.reject(fn + {_, {:references, _}} -> true + {_, _} -> false + end) + |> Enum.into(%{}, fn {k, t} -> {k, type_to_default(k, t, action)} end) + end + + @doc """ + Converts the given value to map format when it is a date, time, datetime or naive_datetime. + + Since `form_component.html.heex` generated by the live generator uses selects for dates and/or + times, fixtures must use map format for those fields in order to submit the live form. + """ + def live_form_value(%Date{} = date), do: %{day: date.day, month: date.month, year: date.year} + + def live_form_value(%Time{} = time), do: %{hour: time.hour, minute: time.minute} + + def live_form_value(%NaiveDateTime{} = naive), + do: %{ + day: naive.day, + month: naive.month, + year: naive.year, + hour: naive.hour, + minute: naive.minute + } + + def live_form_value(%DateTime{} = naive), + do: %{ + day: naive.day, + month: naive.month, + year: naive.year, + hour: naive.hour, + minute: naive.minute + } + + def live_form_value(value), do: value + + @doc """ + Build an invalid value for `@invalid_attrs` which is nil by default. + + * In case the value is a list, this will return an empty array. + * In case the value is date, datetime, naive_datetime or time, this will return an invalid date. + * In case it is a boolean, we keep it as false + """ + def invalid_form_value(value) when is_list(value), do: [] + + def invalid_form_value(%{day: _day, month: _month, year: _year} = date), + do: %{date | day: 30, month: 02} + + def invalid_form_value(%{hour: _hour, minute: _minute} = value), do: value + def invalid_form_value(true), do: false + def invalid_form_value(_value), do: nil + + @doc """ + Generates an invalid error message according to the params present in the schema. + """ + def failed_render_change_message(schema) do + if schema.params.create |> Map.values() |> Enum.any?(&date_value?/1) do + "is invalid" + else + "can't be blank" + end + end + + def type_for_migration({:enum, _}), do: :string + def type_for_migration(other), do: other + + def format_fields_for_schema(schema) do + Enum.map_join(schema.types, "\n", fn {k, v} -> + " field #{inspect(k)}, #{type_and_opts_for_schema(v)}#{schema.defaults[k]}#{maybe_redact_field(k in schema.redacts)}" + end) + end + + def type_and_opts_for_schema({:enum, opts}), do: ~s|Ecto.Enum, values: #{inspect Keyword.get(opts, :values)}| + def type_and_opts_for_schema(other), do: inspect other + + def maybe_redact_field(true), do: ", redact: true" + def maybe_redact_field(false), do: "" + + defp date_value?(%{day: _day, month: _month, year: _year}), do: true + defp date_value?(_value), do: false + + @doc """ + Returns the string value for use in EEx templates. + """ + def value(schema, field, value) do + schema.types + |> Map.fetch!(field) + |> inspect_value(value) + end + defp inspect_value(:decimal, value), do: "Decimal.new(\"#{value}\")" + defp inspect_value(_type, value), do: inspect(value) + + defp list_to_attr([key]), do: {String.to_atom(key), :string} + defp list_to_attr([key, value]), do: {String.to_atom(key), String.to_atom(value)} + defp list_to_attr([key, comp, value]) do + {String.to_atom(key), {String.to_atom(comp), String.to_atom(value)}} + end + + @one_day_in_seconds 24 * 3600 + + defp type_to_default(key, t, :create) do + case t do + {:array, _} -> [] + {:enum, values} -> build_enum_values(values, :create) + :integer -> 42 + :float -> 120.5 + :decimal -> "120.5" + :boolean -> true + :map -> %{} + :text -> "some #{key}" + :date -> Date.add(Date.utc_today(), -1) + :time -> ~T[14:00:00] + :time_usec -> ~T[14:00:00.000000] + :uuid -> "7488a646-e31f-11e4-aace-600308960662" + :utc_datetime -> DateTime.add(build_utc_datetime(), -@one_day_in_seconds, :second, Calendar.UTCOnlyTimeZoneDatabase) + :utc_datetime_usec -> DateTime.add(build_utc_datetime_usec(), -@one_day_in_seconds, :second, Calendar.UTCOnlyTimeZoneDatabase) + :naive_datetime -> NaiveDateTime.add(build_utc_naive_datetime(), -@one_day_in_seconds) + :naive_datetime_usec -> NaiveDateTime.add(build_utc_naive_datetime_usec(), -@one_day_in_seconds) + _ -> "some #{key}" + end + end + defp type_to_default(key, t, :update) do + case t do + {:array, _} -> [] + {:enum, values} -> build_enum_values(values, :update) + :integer -> 43 + :float -> 456.7 + :decimal -> "456.7" + :boolean -> false + :map -> %{} + :text -> "some updated #{key}" + :date -> Date.utc_today() + :time -> ~T[15:01:01] + :time_usec -> ~T[15:01:01.000000] + :uuid -> "7488a646-e31f-11e4-aace-600308960668" + :utc_datetime -> build_utc_datetime() + :utc_datetime_usec -> build_utc_datetime_usec() + :naive_datetime -> build_utc_naive_datetime() + :naive_datetime_usec -> build_utc_naive_datetime_usec() + _ -> "some updated #{key}" + end + end + + defp build_enum_values(values, action) do + case {action, translate_enum_vals(values)} do + {:create, vals} -> hd(vals) + {:update, [val | []]} -> val + {:update, vals} -> vals |> tl() |> hd() + end + end + + defp build_utc_datetime_usec, + do: %{DateTime.utc_now() | second: 0, microsecond: {0, 6}} + + defp build_utc_datetime, + do: DateTime.truncate(build_utc_datetime_usec(), :second) + + defp build_utc_naive_datetime_usec, + do: %{NaiveDateTime.utc_now() | second: 0, microsecond: {0, 6}} + + defp build_utc_naive_datetime, + do: NaiveDateTime.truncate(build_utc_naive_datetime_usec(), :second) + + @enum_missing_value_error """ + Enum type requires at least one value + For example: + + mix phx.gen.schema Comment comments body:text status:enum:published:unpublished + """ + + defp validate_attr!({name, :datetime}), do: validate_attr!({name, :naive_datetime}) + defp validate_attr!({name, :array}) do + Mix.raise """ + Phoenix generators expect the type of the array to be given to #{name}:array. + For example: + + mix phx.gen.schema Post posts settings:array:string + """ + end + defp validate_attr!({_name, :enum}), do: Mix.raise @enum_missing_value_error + defp validate_attr!({_name, type} = attr) when type in @valid_types, do: attr + defp validate_attr!({_name, {:enum, _vals}} = attr), do: attr + defp validate_attr!({_name, {type, _}} = attr) when type in @valid_types, do: attr + defp validate_attr!({_, type}) do + Mix.raise "Unknown type `#{inspect type}` given to generator. " <> + "The supported types are: #{@valid_types |> Enum.sort() |> Enum.join(", ")}" + end + + defp partition_attrs_and_assocs(schema_module, attrs) do + {assocs, attrs} = + Enum.split_with(attrs, fn + {_, {:references, _}} -> + true + {key, :references} -> + Mix.raise """ + Phoenix generators expect the table to be given to #{key}:references. + For example: + + mix phx.gen.schema Comment comments body:text post_id:references:posts + """ + _ -> false + end) + + assocs = + Enum.map(assocs, fn {key_id, {:references, source}} -> + key = String.replace(Atom.to_string(key_id), "_id", "") + base = schema_module |> Module.split() |> Enum.drop(-1) + module = Module.concat(base ++ [Phoenix.Naming.camelize(key)]) + {String.to_atom(key), key_id, inspect(module), source} + end) + + {assocs, attrs} + end + + defp schema_defaults(attrs) do + Enum.into(attrs, %{}, fn + {key, :boolean} -> {key, ", default: false"} + {key, _} -> {key, ""} + end) + end + + defp string_attr(types) do + Enum.find_value(types, fn + {key, :string} -> key + _ -> false + end) + end + + defp types(attrs) do + Enum.into(attrs, %{}, fn + {key, {:enum, vals}} -> {key, {:enum, values: translate_enum_vals(vals)}} + {key, {root, val}} -> {key, {root, schema_type(val)}} + {key, val} -> {key, schema_type(val)} + end) + end + + def translate_enum_vals(vals) do + vals + |> Atom.to_string() + |> String.split(":") + |> Enum.map(&String.to_atom/1) + end + + defp schema_type(:text), do: :string + defp schema_type(:uuid), do: Ecto.UUID + defp schema_type(val) do + if Code.ensure_loaded?(Ecto.Type) and not Ecto.Type.primitive?(val) do + Mix.raise "Unknown type `#{val}` given to generator" + else + val + end + end + + defp indexes(table, assocs, uniques) do + uniques = Enum.map(uniques, fn key -> {key, true} end) + assocs = Enum.map(assocs, fn {_, key, _, _} -> {key, false} end) + + (uniques ++ assocs) + |> Enum.uniq_by(fn {key, _} -> key end) + |> Enum.map(fn + {key, false} -> "create index(:#{table}, [:#{key}])" + {key, true} -> "create unique_index(:#{table}, [:#{key}])" + end) + end + + defp migration_defaults(attrs) do + Enum.into(attrs, %{}, fn + {key, :boolean} -> {key, ", default: false, null: false"} + {key, _} -> {key, ""} + end) + end + + defp sample_id(opts) do + if Keyword.get(opts, :binary_id, false) do + Keyword.get(opts, :sample_binary_id, "11111111-1111-1111-1111-111111111111") + else + -1 + end + end + + defp route_helper(web_path, singular) do + "#{web_path}_#{singular}" + |> String.trim_leading("_") + |> String.replace("/", "_") + end + + defp migration_module do + case Application.get_env(:ecto_sql, :migration_module, Ecto.Migration) do + migration_module when is_atom(migration_module) -> migration_module + other -> Mix.raise "Expected :migration_module to be a module, got: #{inspect(other)}" + end + end + + defp fixture_unique_functions(singular, uniques, attrs) do + uniques + |> Enum.filter(&Keyword.has_key?(attrs, &1)) + |> Enum.into(%{}, fn attr -> + function_name = "unique_#{singular}_#{attr}" + + {function_def, needs_impl?} = + case Keyword.fetch!(attrs, attr) do + :integer -> + function_def = + """ + def #{function_name}, do: System.unique_integer([:positive]) + """ + + {function_def, false} + + type when type in [:string, :text] -> + function_def = + """ + def #{function_name}, do: "some #{attr}\#{System.unique_integer([:positive])}" + """ + + {function_def, false} + + _ -> + function_def = + """ + def #{function_name} do + raise "implement the logic to generate a unique #{singular} #{attr}" + end + """ + + {function_def, true} + end + + + {attr, {function_name, function_def, needs_impl?}} + end) + end + + defp fixture_params(attrs, fixture_unique_functions) do + attrs + |> Enum.reject(fn + {_, {:references, _}} -> true + {_, _} -> false + end) + |> Enum.into(%{}, fn {attr, type} -> + case Map.fetch(fixture_unique_functions, attr) do + {:ok, {function_name, _function_def, _needs_impl?}} -> + {attr, "#{function_name}()"} + :error -> + {attr, inspect(type_to_default(attr, type, :create))} + end + end) + end +end diff --git a/deps/phoenix/lib/mix/tasks/compile.phoenix.ex b/deps/phoenix/lib/mix/tasks/compile.phoenix.ex new file mode 100644 index 0000000..cc1109a --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/compile.phoenix.ex @@ -0,0 +1,63 @@ +defmodule Mix.Tasks.Compile.Phoenix do + use Mix.Task + @recursive true + + @moduledoc """ + Compiles Phoenix source files that support code reloading. + + If you are using Elixir v1.11+ or later, there is no longer + a need to use this module as this functionality is now provided + by Elixir. Just remember to update `__phoenix_recompile__?` to + `__mix_recompile__?` in any module that may define it. + """ + + # TODO: Deprecate this module once we require Elixir v1.11+ + @mix_recompile Version.match?(System.version(), ">= 1.11.0") + + @doc false + def run(_args) do + {:ok, _} = Application.ensure_all_started(:phoenix) + + case touch() do + [] -> {:noop, []} + _ -> {:ok, []} + end + end + + @doc false + def touch do + Mix.Phoenix.modules() + |> modules_for_recompilation + |> modules_to_file_paths + |> Stream.map(&touch_if_exists(&1)) + |> Stream.filter(&(&1 == :ok)) + |> Enum.to_list() + end + + defp touch_if_exists(path) do + :file.change_time(path, :calendar.local_time()) + end + + defp modules_for_recompilation(modules) do + Stream.filter(modules, fn mod -> + Code.ensure_loaded?(mod) and (phoenix_recompile?(mod) or mix_recompile?(mod)) + end) + end + + defp phoenix_recompile?(mod) do + function_exported?(mod, :__phoenix_recompile__?, 0) and mod.__phoenix_recompile__?() + end + + if @mix_recompile do + # Recompile is provided by Mix, we don't need to do anything + defp mix_recompile?(_mod), do: false + else + defp mix_recompile?(mod) do + function_exported?(mod, :__mix_recompile__?, 0) and mod.__mix_recompile__?() + end + end + + defp modules_to_file_paths(modules) do + Stream.map(modules, fn mod -> mod.__info__(:compile)[:source] end) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.digest.clean.ex b/deps/phoenix/lib/mix/tasks/phx.digest.clean.ex new file mode 100644 index 0000000..976d27a --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.digest.clean.ex @@ -0,0 +1,66 @@ +defmodule Mix.Tasks.Phx.Digest.Clean do + use Mix.Task + @default_output_path "priv/static" + @default_age 3600 + @default_keep 2 + + @shortdoc "Removes old versions of static assets." + @recursive true + + @moduledoc """ + Removes old versions of compiled assets. + + By default, it will keep the latest version and + 2 previous versions as well as any digest created + in the last hour. + + $ mix phx.digest.clean + $ mix phx.digest.clean -o /www/public + $ mix phx.digest.clean --age 600 --keep 3 + $ mix phx.digest.clean --all + + ## Options + + * `-o, --output` - indicates the path to your compiled + assets directory. Defaults to `priv/static` + + * `--age` - specifies a maximum age (in seconds) for assets. + Files older than age that are not in the last `--keep` versions + will be removed. Defaults to 3600 (1 hour) + + * `--keep` - specifies how many previous versions of assets to keep. + Defaults to 2 previous versions + + * `--all` - specifies that all compiled assets (including the manifest) + will be removed. Note this overrides the age and keep switches. + """ + + @doc false + def run(args) do + switches = [output: :string, age: :integer, keep: :integer, all: :boolean] + {opts, _, _} = OptionParser.parse(args, switches: switches, aliases: [o: :output]) + output_path = opts[:output] || @default_output_path + age = opts[:age] || @default_age + keep = opts[:keep] || @default_keep + all? = opts[:all] || false + + {:ok, _} = Application.ensure_all_started(:phoenix) + + result = + if all?, + do: Phoenix.Digester.clean_all(output_path), + else: Phoenix.Digester.clean(output_path, age, keep) + + case result do + :ok -> + # We need to call build structure so everything we have cleaned from + # priv is removed from _build in case we have build_embedded set to + # true. In case it's not true, build structure is mostly a no-op, so we + # are fine. + Mix.Project.build_structure() + Mix.shell().info [:green, "Clean complete for #{inspect output_path}"] + {:error, :invalid_path} -> + Mix.shell().error "The output path #{inspect output_path} does not exist" + end + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.digest.ex b/deps/phoenix/lib/mix/tasks/phx.digest.ex new file mode 100644 index 0000000..7e55467 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.digest.ex @@ -0,0 +1,70 @@ +defmodule Mix.Tasks.Phx.Digest do + use Mix.Task + @default_input_path "priv/static" + + @shortdoc "Digests and compresses static files" + @recursive true + + @moduledoc """ + Digests and compresses static files. + + $ mix phx.digest + $ mix phx.digest priv/static -o /www/public + + The first argument is the path where the static files are located. The + `-o` option indicates the path that will be used to save the digested and + compressed files. + + If no path is given, it will use `priv/static` as the input and output path. + + The output folder will contain: + + * the original file + * the file compressed with gzip + * a file containing the original file name and its digest + * a compressed file containing the file name and its digest + * a cache manifest file + + Example of generated files: + + * app.js + * app.js.gz + * app-eb0a5b9302e8d32828d8a73f137cc8f0.js + * app-eb0a5b9302e8d32828d8a73f137cc8f0.js.gz + * cache_manifest.json + + You can use `mix phx.digest.clean` to prune stale versions of the assets. + If you want to remove all produced files, run `mix phx.digest.clean --all`. + + ## vsn + + It is possible to digest the stylesheet asset references without the query + string "?vsn=d" with the option `--no-vsn`. + """ + + @default_opts [vsn: true] + + @doc false + def run(all_args) do + Mix.Task.run "compile", all_args + {opts, args, _} = OptionParser.parse(all_args, switches: [output: :string, vsn: :boolean], aliases: [o: :output]) + input_path = List.first(args) || @default_input_path + output_path = opts[:output] || input_path + with_vsn? = Keyword.merge(@default_opts, opts)[:vsn] + + Mix.Task.run "deps.loadpaths", all_args + {:ok, _} = Application.ensure_all_started(:phoenix) + + case Phoenix.Digester.compile(input_path, output_path, with_vsn?) do + :ok -> + # We need to call build structure so everything we have + # generated into priv is copied to _build in case we have + # build_embedded set to true. In case it's not true, + # build structure is mostly a no-op, so we are fine. + Mix.Project.build_structure() + Mix.shell().info [:green, "Check your digested files at #{inspect output_path}"] + {:error, :invalid_path} -> + Mix.shell().error "The input path #{inspect input_path} does not exist" + end + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.ex b/deps/phoenix/lib/mix/tasks/phx.ex new file mode 100644 index 0000000..2316dd0 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.ex @@ -0,0 +1,40 @@ +defmodule Mix.Tasks.Phx do + use Mix.Task + + @shortdoc "Prints Phoenix help information" + + @moduledoc """ + Prints Phoenix tasks and their information. + + $ mix phx + + To print the Phoenix version, pass `-v` or `--version`, for example: + + $ mix phx --version + + """ + + @version Mix.Project.config()[:version] + + @impl true + @doc false + def run([version]) when version in ~w(-v --version) do + Mix.shell().info("Phoenix v#{@version}") + end + + def run(args) do + case args do + [] -> general() + _ -> Mix.raise "Invalid arguments, expected: mix phx" + end + end + + defp general() do + Application.ensure_all_started(:phoenix) + Mix.shell().info "Phoenix v#{Application.spec(:phoenix, :vsn)}" + Mix.shell().info "Peace of mind from prototype to production" + Mix.shell().info "\n## Options\n" + Mix.shell().info "-v, --version # Prints Phoenix version\n" + Mix.Tasks.Help.run(["--search", "phx."]) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth.ex new file mode 100644 index 0000000..67e1f14 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth.ex @@ -0,0 +1,684 @@ +defmodule Mix.Tasks.Phx.Gen.Auth do + @shortdoc "Generates authentication logic for a resource" + + @moduledoc """ + Generates authentication logic for a resource. + + $ mix phx.gen.auth Accounts User users + + The first argument is the context module followed by the schema module + and its plural name (used as the schema table name). + + Additional information and security considerations are detailed in the + [`mix phx.gen.auth` guide](mix_phx_gen_auth.html). + + ## Password hashing + + The password hashing mechanism defaults to `bcrypt` for + Unix systems and `pbkdf2` for Windows systems. Both + systems use the [Comeonin interface](https://hexdocs.pm/comeonin/). + + The password hashing mechanism can be overridden with the + `--hashing-lib` option. The following values are supported: + + * `bcrypt` - [bcrypt_elixir](https://hex.pm/packages/bcrypt_elixir) + * `pbkdf2` - [pbkdf2_elixir](https://hex.pm/packages/pbkdf2_elixir) + * `argon2` - [argon2_elixir](https://hex.pm/packages/argon2_elixir) + + We recommend developers to consider using `argon2`, which + is the most robust of all 3. The downside is that `argon2` + is quite CPU and memory intensive, and you will need more + powerful instances to run your applications on. + + For more information about choosing these libraries, see the + [Comeonin project](https://github.com/riverrun/comeonin). + + ## Web namespace + + By default, the controllers and view will be namespaced by the schema name. + You can customize the web module namespace by passing the `--web` flag with a + module name, for example: + + $ mix phx.gen.auth Accounts User users --web Warehouse + + Which would generate the controllers, views, templates and associated tests nested in the `MyAppWeb.Warehouse` namespace: + + * `lib/my_app_web/controllers/warehouse/user_auth.ex` + * `lib/my_app_web/controllers/warehouse/user_confirmation_controller.ex` + * `lib/my_app_web/views/warehouse/user_confirmation_view.ex` + * `lib/my_app_web/templates/warehouse/user_confirmation/new.html.heex` + * `test/my_app_web/controllers/warehouse/user_auth_test.exs` + * `test/my_app_web/controllers/warehouse/user_confirmation_controller_test.exs` + * and so on... + + ## Binary ids + + The `--binary-id` option causes the generated migration to use + `binary_id` for its primary key and foreign keys. + + ## Default options + + This generator uses default options provided in the `:generators` + configuration of your application. These are the defaults: + + config :your_app, :generators, + binary_id: false, + sample_binary_id: "11111111-1111-1111-1111-111111111111" + + You can override those options per invocation by providing corresponding + switches, e.g. `--no-binary-id` to use normal ids despite the default + configuration. + + ## Custom table names + + By default, the table name for the migration and schema will be + the plural name provided for the resource. To customize this value, + a `--table` option may be provided. For example: + + $ mix phx.gen.auth Accounts User users --table accounts_users + + This will cause the generated tables to be named `"accounts_users"` and `"accounts_users_tokens"`. + """ + + use Mix.Task + + alias Mix.Phoenix.{Context, Schema} + alias Mix.Tasks.Phx.Gen + alias Mix.Tasks.Phx.Gen.Auth.{HashingLibrary, Injector, Migration} + + @switches [ + web: :string, + binary_id: :boolean, + hashing_lib: :string, + table: :string, + merge_with_existing_context: :boolean, + prefix: :string + ] + + @doc false + def run(args, test_opts \\ []) do + if Mix.Project.umbrella?() do + Mix.raise("mix phx.gen.auth can only be run inside an application directory") + end + + {opts, parsed} = OptionParser.parse!(args, strict: @switches) + validate_args!(parsed) + hashing_library = build_hashing_library!(opts) + + context_args = OptionParser.to_argv(opts, switches: @switches) ++ parsed + + {context, schema} = Gen.Context.build(context_args, __MODULE__) + Gen.Context.prompt_for_code_injection(context) + + if Keyword.get(test_opts, :validate_dependencies?, true) do + # Needed so we can get the ecto adapter and ensure other + # libraries are loaded. + Mix.Task.run("compile") + + validate_required_dependencies!() + end + + ecto_adapter = + Keyword.get_lazy( + test_opts, + :ecto_adapter, + fn -> get_ecto_adapter!(schema) end + ) + + migration = Migration.build(ecto_adapter) + + binding = [ + context: context, + schema: schema, + migration: migration, + hashing_library: hashing_library, + web_app_name: web_app_name(context), + endpoint_module: Module.concat([context.web_module, Endpoint]), + auth_module: Module.concat([context.web_module, schema.web_namespace, "#{inspect(schema.alias)}Auth"]), + router_scope: router_scope(context), + web_path_prefix: web_path_prefix(schema), + test_case_options: test_case_options(ecto_adapter) + ] + + paths = generator_paths() + + prompt_for_conflicts(context) + + context + |> copy_new_files(binding, paths) + |> inject_conn_case_helpers(paths, binding) + |> inject_config(hashing_library) + |> maybe_inject_mix_dependency(hashing_library) + |> inject_routes(paths, binding) + |> maybe_inject_router_import(binding) + |> maybe_inject_router_plug() + |> maybe_inject_app_layout_menu() + |> Gen.Notifier.maybe_print_mailer_installation_instructions() + |> print_shell_instructions() + end + + defp web_app_name(%Context{} = context) do + context.web_module + |> inspect() + |> Phoenix.Naming.underscore() + end + + defp validate_args!([_, _, _]), do: :ok + + defp validate_args!(_) do + raise_with_help("Invalid arguments") + end + + defp validate_required_dependencies! do + unless Code.ensure_loaded?(Ecto.Adapters.SQL) do + raise_with_help("mix phx.gen.auth requires ecto_sql", :phx_generator_args) + end + + if generated_with_no_html?() do + raise_with_help("mix phx.gen.auth requires phoenix_html", :phx_generator_args) + end + end + + defp generated_with_no_html? do + Mix.Project.config() + |> Keyword.get(:deps, []) + |> Enum.any?(fn + {:phoenix_html, _} -> true + {:phoenix_html, _, _} -> true + _ -> false + end) + |> Kernel.not() + end + + defp build_hashing_library!(opts) do + opts + |> Keyword.get_lazy(:hashing_lib, &default_hashing_library_option/0) + |> HashingLibrary.build() + |> case do + {:ok, hashing_library} -> + hashing_library + + {:error, {:unknown_library, unknown_library}} -> + raise_with_help("Unknown value for --hashing-lib #{inspect(unknown_library)}", :hashing_lib) + end + end + + defp default_hashing_library_option do + case :os.type() do + {:unix, _} -> "bcrypt" + {:win32, _} -> "pbkdf2" + end + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + defp files_to_be_generated(%Context{schema: schema, context_app: context_app} = context) do + web_prefix = Mix.Phoenix.web_path(context_app) + web_test_prefix = Mix.Phoenix.web_test_path(context_app) + migrations_prefix = Mix.Phoenix.context_app_path(context_app, "priv/repo/migrations") + web_path = to_string(schema.web_path) + + [ + {:eex, "migration.ex", Path.join([migrations_prefix, "#{timestamp()}_create_#{schema.table}_auth_tables.exs"])}, + {:eex, "notifier.ex", Path.join([context.dir, "#{schema.singular}_notifier.ex"])}, + {:eex, "schema.ex", Path.join([context.dir, "#{schema.singular}.ex"])}, + {:eex, "schema_token.ex", Path.join([context.dir, "#{schema.singular}_token.ex"])}, + {:eex, "auth.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_auth.ex"])}, + {:eex, "auth_test.exs", Path.join([web_test_prefix, "controllers", web_path, "#{schema.singular}_auth_test.exs"])}, + {:eex, "confirmation_view.ex", Path.join([web_prefix, "views", web_path, "#{schema.singular}_confirmation_view.ex"])}, + {:eex, "confirmation_new.html.heex", Path.join([web_prefix, "templates", web_path, "#{schema.singular}_confirmation", "new.html.heex"])}, + {:eex, "confirmation_edit.html.heex", Path.join([web_prefix, "templates", web_path, "#{schema.singular}_confirmation", "edit.html.heex"])}, + {:eex, "confirmation_controller.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_confirmation_controller.ex"])}, + {:eex, "confirmation_controller_test.exs", Path.join([web_test_prefix, "controllers", web_path, "#{schema.singular}_confirmation_controller_test.exs"])}, + {:eex, "_menu.html.heex", Path.join([web_prefix, "templates", "layout", "_#{schema.singular}_menu.html.heex"])}, + {:eex, "registration_new.html.heex", Path.join([web_prefix, "templates", web_path, "#{schema.singular}_registration", "new.html.heex"])}, + {:eex, "registration_controller.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_registration_controller.ex"])}, + {:eex, "registration_controller_test.exs", Path.join([web_test_prefix, "controllers", web_path, "#{schema.singular}_registration_controller_test.exs"])}, + {:eex, "registration_view.ex", Path.join([web_prefix, "views", web_path, "#{schema.singular}_registration_view.ex"])}, + {:eex, "reset_password_view.ex", Path.join([web_prefix, "views", web_path, "#{schema.singular}_reset_password_view.ex"])}, + {:eex, "reset_password_controller.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_reset_password_controller.ex"])}, + {:eex, "reset_password_controller_test.exs", + Path.join([web_test_prefix, "controllers", web_path, "#{schema.singular}_reset_password_controller_test.exs"])}, + {:eex, "reset_password_edit.html.heex", Path.join([web_prefix, "templates", web_path, "#{schema.singular}_reset_password", "edit.html.heex"])}, + {:eex, "reset_password_new.html.heex", Path.join([web_prefix, "templates", web_path, "#{schema.singular}_reset_password", "new.html.heex"])}, + {:eex, "session_view.ex", Path.join([web_prefix, "views", web_path, "#{schema.singular}_session_view.ex"])}, + {:eex, "session_controller.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_session_controller.ex"])}, + {:eex, "session_controller_test.exs", Path.join([web_test_prefix, "controllers", web_path, "#{schema.singular}_session_controller_test.exs"])}, + {:eex, "session_new.html.heex", Path.join([web_prefix, "templates", web_path, "#{schema.singular}_session", "new.html.heex"])}, + {:eex, "settings_view.ex", Path.join([web_prefix, "views", web_path, "#{schema.singular}_settings_view.ex"])}, + {:eex, "settings_edit.html.heex", Path.join([web_prefix, "templates", web_path, "#{schema.singular}_settings", "edit.html.heex"])}, + {:eex, "settings_controller.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_settings_controller.ex"])}, + {:eex, "settings_controller_test.exs", Path.join([web_test_prefix, "controllers", web_path, "#{schema.singular}_settings_controller_test.exs"])} + ] + end + + defp copy_new_files(%Context{} = context, binding, paths) do + files = files_to_be_generated(context) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.auth", binding, files) + inject_context_functions(context, paths, binding) + inject_tests(context, paths, binding) + inject_context_test_fixtures(context, paths, binding) + + context + end + + defp inject_context_functions(%Context{file: file} = context, paths, binding) do + Gen.Context.ensure_context_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/context_functions.ex", binding) + |> prepend_newline() + |> inject_before_final_end(file) + end + + defp inject_tests(%Context{test_file: test_file} = context, paths, binding) do + Gen.Context.ensure_test_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/test_cases.exs", binding) + |> prepend_newline() + |> inject_before_final_end(test_file) + end + + defp inject_context_test_fixtures(%Context{test_fixtures_file: test_fixtures_file} = context, paths, binding) do + Gen.Context.ensure_test_fixtures_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/context_fixtures_functions.ex", binding) + |> prepend_newline() + |> inject_before_final_end(test_fixtures_file) + end + + defp inject_conn_case_helpers(%Context{} = context, paths, binding) do + test_file = "test/support/conn_case.ex" + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/conn_case.exs", binding) + |> inject_before_final_end(test_file) + + context + end + + defp inject_routes(%Context{context_app: ctx_app} = context, paths, binding) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + file_path = Path.join(web_prefix, "router.ex") + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.auth/routes.ex", binding) + |> inject_before_final_end(file_path) + + context + end + + defp maybe_inject_mix_dependency(%Context{context_app: ctx_app} = context, %HashingLibrary{mix_dependency: mix_dependency}) do + file_path = Mix.Phoenix.context_app_path(ctx_app, "mix.exs") + + file = File.read!(file_path) + + case Injector.mix_dependency_inject(file, mix_dependency) do + {:ok, new_file} -> + print_injecting(file_path) + File.write!(file_path, new_file) + + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + Add your #{mix_dependency} dependency to #{file_path}: + + defp deps do + [ + #{mix_dependency}, + ... + ] + end + """) + end + + context + end + + defp maybe_inject_router_import(%Context{context_app: ctx_app} = context, binding) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + file_path = Path.join(web_prefix, "router.ex") + auth_module = Keyword.fetch!(binding, :auth_module) + inject = "import #{inspect(auth_module)}" + use_line = "use #{inspect(context.web_module)}, :router" + + help_text = """ + Add your #{inspect(auth_module)} import to #{Path.relative_to_cwd(file_path)}: + + defmodule #{inspect(context.web_module)}.Router do + #{use_line} + + # Import authentication plugs + #{inject} + + ... + end + """ + + with {:ok, file} <- read_file(file_path), + {:ok, new_file} <- Injector.inject_unless_contains(file, inject, &String.replace(&1, use_line, "#{use_line}\n\n #{&2}")) do + print_injecting(file_path, " - imports") + File.write!(file_path, new_file) + else + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + #{help_text} + """) + + {:error, {:file_read_error, _}} -> + print_injecting(file_path) + print_unable_to_read_file_error(file_path, help_text) + end + + context + end + + defp maybe_inject_router_plug(%Context{context_app: ctx_app} = context) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + file_path = Path.join(web_prefix, "router.ex") + help_text = Injector.router_plug_help_text(file_path, context) + + with {:ok, file} <- read_file(file_path), + {:ok, new_file} <- Injector.router_plug_inject(file, context) do + print_injecting(file_path, " - plug") + File.write!(file_path, new_file) + else + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + #{help_text} + """) + + {:error, {:file_read_error, _}} -> + print_injecting(file_path) + print_unable_to_read_file_error(file_path, help_text) + end + + context + end + + defp maybe_inject_app_layout_menu(%Context{} = context) do + schema = context.schema + + if file_path = get_layout_html_path(context) do + file = File.read!(file_path) + + case Injector.app_layout_menu_inject(file, schema) do + {:ok, new_file} -> + print_injecting(file_path) + File.write!(file_path, new_file) + + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + Mix.shell().info(""" + + #{Injector.app_layout_menu_help_text(file_path, schema)} + """) + end + else + menu_name = Injector.app_layout_menu_template_name(schema) + inject = Injector.app_layout_menu_code_to_inject(schema) + + missing = + context + |> potential_layout_file_paths() + |> Enum.map_join("\n", &" * #{&1}") + + Mix.shell().error(""" + + Unable to find an application layout file to inject a render + call for #{inspect(menu_name)}. + + Missing files: + + #{missing} + + Please ensure this phoenix app was not generated with + --no-html. If you have changed the name of your application + layout file, please add the following code to it where you'd + like #{inspect(menu_name)} to be rendered. + + #{inject} + """) + end + + context + end + + defp get_layout_html_path(%Context{} = context) do + context + |> potential_layout_file_paths() + |> Enum.find(&File.exists?/1) + end + + defp potential_layout_file_paths(%Context{context_app: ctx_app}) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + + for file_name <- ~w(root.html.heex app.html.heex) do + Path.join([web_prefix, "templates", "layout", file_name]) + end + end + + defp inject_config(context, %HashingLibrary{} = hashing_library) do + file_path = + if Mix.Phoenix.in_umbrella?(File.cwd!()) do + Path.expand("../../") + else + File.cwd!() + end + |> Path.join("config/test.exs") + + file = + case read_file(file_path) do + {:ok, file} -> file + {:error, {:file_read_error, _}} -> "use Mix.Config\n" + end + + case Injector.test_config_inject(file, hashing_library) do + {:ok, new_file} -> + print_injecting(file_path) + File.write!(file_path, new_file) + + :already_injected -> + :ok + + {:error, :unable_to_inject} -> + help_text = Injector.test_config_help_text(file_path, hashing_library) + + Mix.shell().info(""" + + #{help_text} + """) + end + + context + end + + defp print_shell_instructions(%Context{} = context) do + Mix.shell().info(""" + + Please re-fetch your dependencies with the following command: + + $ mix deps.get + + Remember to update your repository by running migrations: + + $ mix ecto.migrate + + Once you are ready, visit "/#{context.schema.plural}/register" + to create your account and then access "/dev/mailbox" to + see the account confirmation email. + """) + + context + end + + defp router_scope(%Context{schema: schema} = context) do + prefix = Module.concat(context.web_module, schema.web_namespace) + + if schema.web_namespace do + ~s|"/#{schema.web_path}", #{inspect(prefix)}, as: :#{schema.web_path}| + else + ~s|"/", #{inspect(context.web_module)}| + end + end + + defp web_path_prefix(%Schema{web_path: nil}), do: "" + defp web_path_prefix(%Schema{web_path: web_path}), do: "/" <> web_path + + # The paths to look for template files for generators. + # + # Defaults to checking the current app's `priv` directory, + # and falls back to phx_gen_auth's `priv` directory. + defp generator_paths do + [".", :phoenix] + end + + defp inject_before_final_end(content_to_inject, file_path) do + with {:ok, file} <- read_file(file_path), + {:ok, new_file} <- Injector.inject_before_final_end(file, content_to_inject) do + print_injecting(file_path) + File.write!(file_path, new_file) + else + :already_injected -> + :ok + + {:error, {:file_read_error, _}} -> + print_injecting(file_path) + + print_unable_to_read_file_error( + file_path, + """ + + Please add the following to the end of your equivalent + #{Path.relative_to_cwd(file_path)} module: + + #{indent_spaces(content_to_inject, 2)} + """ + ) + end + end + + defp read_file(file_path) do + case File.read(file_path) do + {:ok, file} -> {:ok, file} + {:error, reason} -> {:error, {:file_read_error, reason}} + end + end + + defp indent_spaces(string, number_of_spaces) when is_binary(string) and is_integer(number_of_spaces) do + indent = String.duplicate(" ", number_of_spaces) + + string + |> String.split("\n") + |> Enum.map_join("\n", &(indent <> &1)) + end + + defp timestamp do + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + + defp pad(i) when i < 10, do: <<?0, ?0 + i>> + defp pad(i), do: to_string(i) + + defp prepend_newline(string) when is_binary(string), do: "\n" <> string + + defp get_ecto_adapter!(%Schema{repo: repo}) do + if Code.ensure_loaded?(repo) do + repo.__adapter__() + else + Mix.raise("Unable to find #{inspect(repo)}") + end + end + + defp print_injecting(file_path, suffix \\ []) do + Mix.shell().info([:green, "* injecting ", :reset, Path.relative_to_cwd(file_path), suffix]) + end + + defp print_unable_to_read_file_error(file_path, help_text) do + Mix.shell().error( + """ + + Unable to read file #{Path.relative_to_cwd(file_path)}. + + #{help_text} + """ + |> indent_spaces(2) + ) + end + + @doc false + def raise_with_help(msg) do + raise_with_help(msg, :general) + end + + defp raise_with_help(msg, :general) do + Mix.raise(""" + #{msg} + + mix phx.gen.auth expects a context module name, followed by + the schema module and its plural name (used as the schema + table name). + + For example: + + mix phx.gen.auth Accounts User users + + The context serves as the API boundary for the given resource. + Multiple resources may belong to a context and a resource may be + split over distinct contexts (such as Accounts.User and Payments.User). + """) + end + + defp raise_with_help(msg, :phx_generator_args) do + Mix.raise(""" + #{msg} + + mix phx.gen.auth must be installed into a Phoenix 1.5 app that + contains ecto and html templates. + + mix phx.new my_app + mix phx.new my_app --umbrella + mix phx.new my_app --database mysql + + Apps generated with --no-ecto or --no-html are not supported. + """) + end + + defp raise_with_help(msg, :hashing_lib) do + Mix.raise(""" + #{msg} + + mix phx.gen.auth supports the following values for --hashing-lib + + * bcrypt + * pbkdf2 + * argon2 + + Visit https://github.com/riverrun/comeonin for more information + on choosing a library. + """) + end + + defp test_case_options(Ecto.Adapters.Postgres), do: ", async: true" + defp test_case_options(adapter) when is_atom(adapter), do: "" +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth/hashing_library.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth/hashing_library.ex new file mode 100644 index 0000000..a922eab --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth/hashing_library.ex @@ -0,0 +1,48 @@ +defmodule Mix.Tasks.Phx.Gen.Auth.HashingLibrary do + @moduledoc false + + defstruct [:name, :module, :mix_dependency, :test_config] + + def build("bcrypt") do + lib = %__MODULE__{ + name: :bcrypt, + module: Bcrypt, + mix_dependency: ~s|{:bcrypt_elixir, "~> 3.0"}|, + test_config: """ + config :bcrypt_elixir, :log_rounds, 1 + """ + } + + {:ok, lib} + end + + def build("pbkdf2") do + lib = %__MODULE__{ + name: :pbkdf2, + module: Pbkdf2, + mix_dependency: ~s|{:pbkdf2_elixir, "~> 2.0"}|, + test_config: """ + config :pbkdf2_elixir, :rounds, 1 + """ + } + + {:ok, lib} + end + + def build("argon2") do + lib = %__MODULE__{ + name: :argon2, + module: Argon2, + mix_dependency: ~s|{:argon2_elixir, "~> 3.0"}|, + test_config: """ + config :argon2_elixir, t_cost: 1, m_cost: 8 + """ + } + + {:ok, lib} + end + + def build(other) do + {:error, {:unknown_library, other}} + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth/injector.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth/injector.ex new file mode 100644 index 0000000..c5ecf7f --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth/injector.ex @@ -0,0 +1,262 @@ +defmodule Mix.Tasks.Phx.Gen.Auth.Injector do + @moduledoc false + + alias Mix.Phoenix.{Context, Schema} + alias Mix.Tasks.Phx.Gen.Auth.HashingLibrary + + @type schema :: %Schema{} + @type context :: %Context{schema: schema} + + @doc """ + Injects a dependency into the contents of mix.exs + """ + @spec mix_dependency_inject(String.t(), String.t()) :: {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def mix_dependency_inject(mixfile, dependency) do + with :ok <- ensure_not_already_injected(mixfile, dependency), + {:ok, new_mixfile} <- do_mix_dependency_inject(mixfile, dependency) do + {:ok, new_mixfile} + end + end + + @spec do_mix_dependency_inject(String.t(), String.t()) :: {:ok, String.t()} | {:error, :unable_to_inject} + defp do_mix_dependency_inject(mixfile, dependency) do + string_to_split_on = """ + defp deps do + [ + """ + + case split_with_self(mixfile, string_to_split_on) do + {beginning, splitter, rest} -> + new_mixfile = IO.iodata_to_binary([beginning, splitter, " ", dependency, ?,, ?\n, rest]) + {:ok, new_mixfile} + + _ -> + {:error, :unable_to_inject} + end + end + + @doc """ + Injects configuration for test environment into `file`. + """ + @spec test_config_inject(String.t(), HashingLibrary.t()) :: {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def test_config_inject(file, %HashingLibrary{} = hashing_library) when is_binary(file) do + code_to_inject = + hashing_library + |> test_config_code() + |> normalize_line_endings_to_file(file) + + inject_unless_contains( + file, + code_to_inject, + # Matches the entire line and captures the line ending. In the + # replace string: + # + # * the entire matching line is inserted with \\0, + # * the actual code is injected with &2, + # * and the appropriate newlines are injected using \\2. + &Regex.replace(~r/(use Mix\.Config|import Config)(\r\n|\n|$)/, &1, "\\0\\2#{&2}\\2", global: false) + ) + end + + @doc """ + Instructions to provide the user when `test_config_inject/2` fails. + """ + @spec test_config_help_text(String.t(), HashingLibrary.t()) :: String.t() + def test_config_help_text(file_path, %HashingLibrary{} = hashing_library) do + """ + Add the following to #{Path.relative_to_cwd(file_path)}: + + #{hashing_library |> test_config_code() |> indent_spaces(4)} + """ + end + + defp test_config_code(%HashingLibrary{test_config: test_config}) do + String.trim(""" + # Only in tests, remove the complexity from the password hashing algorithm + #{test_config} + """) + end + + @router_plug_anchor_line "plug :put_secure_browser_headers" + + @doc """ + Injects the fetch_current_<schema> plug into router's browser pipeline + """ + @spec router_plug_inject(String.t(), context) :: {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def router_plug_inject(file, %Context{schema: schema}) when is_binary(file) do + inject_unless_contains( + file, + router_plug_code(schema), + # Matches the entire line containing `anchor_line` and captures + # the whitespace before the anchor. In the replace string + # + # * the entire matching line is inserted with \\0, + # * the captured indent is inserted using \\1, + # * the actual code is injected with &2, + # * and the appropriate newline is injected using \\2 + &Regex.replace(~r/^(\s*)#{@router_plug_anchor_line}.*(\r\n|\n|$)/Um, &1, "\\0\\1#{&2}\\2", global: false) + ) + end + + @doc """ + Instructions to provide the user when `inject_router_plug/2` fails. + """ + @spec router_plug_help_text(String.t(), context) :: String.t() + def router_plug_help_text(file_path, %Context{schema: schema}) do + """ + Add the #{router_plug_name(schema)} plug to the :browser pipeline in #{Path.relative_to_cwd(file_path)}: + + pipeline :browser do + ... + #{@router_plug_anchor_line} + #{router_plug_code(schema)} + end + """ + end + + defp router_plug_code(%Schema{} = schema) do + "plug " <> router_plug_name(schema) + end + + defp router_plug_name(%Schema{} = schema) do + ":fetch_current_#{schema.singular}" + end + + @doc """ + Injects a menu in the application layout + """ + @spec app_layout_menu_inject(String.t(), schema) :: {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def app_layout_menu_inject(file, %Schema{} = schema) when is_binary(file) do + with {:error, :unable_to_inject} <- app_layout_menu_inject_at_end_of_nav_tag(file, schema), + {:error, :unable_to_inject} <- app_layout_menu_inject_after_opening_body_tag(file, schema) do + {:error, :unable_to_inject} + end + end + + @doc """ + Instructions to provide the user when `app_layout_menu_inject/2` fails. + """ + @spec app_layout_menu_help_text(String.t(), schema) :: String.t() + def app_layout_menu_help_text(file_path, %Schema{} = schema) do + """ + Add a render call for #{inspect(app_layout_menu_template_name(schema))} to #{Path.relative_to_cwd(file_path)}: + + <nav> + #{app_layout_menu_code_to_inject(schema)} + </nav> + """ + end + + @doc """ + Menu code to inject into the application layout template. + """ + @spec app_layout_menu_code_to_inject(schema) :: String.t() + def app_layout_menu_code_to_inject(%Schema{} = schema) do + "<%= render \"#{app_layout_menu_template_name(schema)}\", assigns %>" + end + + @doc """ + Name of the template containing the menu + """ + @spec app_layout_menu_template_name(schema) :: String.t() + def app_layout_menu_template_name(%Schema{} = schema) do + "_#{schema.singular}_menu.html" + end + + defp app_layout_menu_inject_at_end_of_nav_tag(file, schema) do + inject_unless_contains( + file, + app_layout_menu_code_to_inject(schema), + &Regex.replace(~r/(\s*)<\/nav>/m, &1, "\\1 #{&2}\\0", global: false) + ) + end + + defp app_layout_menu_inject_after_opening_body_tag(file, schema) do + anchor_line = "<body>" + + inject_unless_contains( + file, + app_layout_menu_code_to_inject(schema), + # Matches the entire line containing `anchor_line` and captures + # the whitespace before the anchor. In the replace string, the + # entire matching line is inserted with \\0, then a newline then + # the indent that was captured using \\1. &2 is the code to + # inject. + &Regex.replace(~r/^(\s*)#{anchor_line}.*(\r\n|\n|$)/Um, &1, "\\0\\1 #{&2}\\2", global: false) + ) + end + + @doc """ + Injects code unless the existing code already contains `code_to_inject` + """ + @spec inject_unless_contains(String.t(), String.t(), (String.t(), String.t() -> String.t())) :: + {:ok, String.t()} | :already_injected | {:error, :unable_to_inject} + def inject_unless_contains(code, code_to_inject, inject_fn) when is_binary(code) and is_binary(code_to_inject) and is_function(inject_fn, 2) do + with :ok <- ensure_not_already_injected(code, code_to_inject) do + new_code = inject_fn.(code, code_to_inject) + + if code != new_code do + {:ok, new_code} + else + {:error, :unable_to_inject} + end + end + end + + @doc """ + Injects snippet before the final end in a file + """ + @spec inject_before_final_end(String.t(), String.t()) :: {:ok, String.t()} | :already_injected + def inject_before_final_end(code, code_to_inject) when is_binary(code) and is_binary(code_to_inject) do + if String.contains?(code, code_to_inject) do + :already_injected + else + new_code = + code + |> String.trim_trailing() + |> String.trim_trailing("end") + |> Kernel.<>(code_to_inject) + |> Kernel.<>("end\n") + + {:ok, new_code} + end + end + + @spec ensure_not_already_injected(String.t(), String.t()) :: :ok | :already_injected + defp ensure_not_already_injected(file, inject) do + if String.contains?(file, inject) do + :already_injected + else + :ok + end + end + + @spec split_with_self(String.t(), String.t()) :: {String.t(), String.t(), String.t()} | :error + defp split_with_self(contents, text) do + case :binary.split(contents, text) do + [left, right] -> {left, text, right} + [_] -> :error + end + end + + @spec normalize_line_endings_to_file(String.t(), String.t()) :: String.t() + defp normalize_line_endings_to_file(code, file) do + String.replace(code, "\n", get_line_ending(file)) + end + + @spec get_line_ending(String.t()) :: String.t() + defp get_line_ending(file) do + case Regex.run(~r/\r\n|\n|$/, file) do + [line_ending] -> line_ending + [] -> "\n" + end + end + + defp indent_spaces(string, number_of_spaces) when is_binary(string) and is_integer(number_of_spaces) do + indent = String.duplicate(" ", number_of_spaces) + + string + |> String.split("\n") + |> Enum.map_join("\n", &(indent <> &1)) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.auth/migration.ex b/deps/phoenix/lib/mix/tasks/phx.gen.auth/migration.ex new file mode 100644 index 0000000..55e7adf --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.auth/migration.ex @@ -0,0 +1,33 @@ +defmodule Mix.Tasks.Phx.Gen.Auth.Migration do + @moduledoc false + + defstruct [:ecto_adapter, :extensions, :column_definitions] + + def build(ecto_adapter) when is_atom(ecto_adapter) do + %__MODULE__{ + ecto_adapter: ecto_adapter, + extensions: extensions(ecto_adapter), + column_definitions: column_definitions(ecto_adapter) + } + end + + defp extensions(Ecto.Adapters.Postgres) do + ["execute \"CREATE EXTENSION IF NOT EXISTS citext\", \"\""] + end + + defp extensions(_), do: [] + + defp column_definitions(ecto_adapter) do + for field <- ~w(email token)a, + into: %{}, + do: {field, column_definition(field, ecto_adapter)} + end + + defp column_definition(:email, Ecto.Adapters.Postgres), do: "add :email, :citext, null: false" + defp column_definition(:email, Ecto.Adapters.SQLite3), do: "add :email, :string, null: false, collate: :nocase" + defp column_definition(:email, _), do: "add :email, :string, null: false, size: 160" + + defp column_definition(:token, Ecto.Adapters.Postgres), do: "add :token, :binary, null: false" + + defp column_definition(:token, _), do: "add :token, :binary, null: false, size: 32" +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.cert.ex b/deps/phoenix/lib/mix/tasks/phx.gen.cert.ex new file mode 100644 index 0000000..84b6d1e --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.cert.ex @@ -0,0 +1,312 @@ +defmodule Mix.Tasks.Phx.Gen.Cert do + @shortdoc "Generates a self-signed certificate for HTTPS testing" + + @default_path "priv/cert/selfsigned" + @default_name "Self-signed test certificate" + @default_hostnames ["localhost"] + + @warning """ + WARNING: only use the generated certificate for testing in a closed network + environment, such as running a development server on `localhost`. + For production, staging, or testing servers on the public internet, obtain a + proper certificate, for example from [Let's Encrypt](https://letsencrypt.org). + + NOTE: when using Google Chrome, open chrome://flags/#allow-insecure-localhost + to enable the use of self-signed certificates on `localhost`. + """ + + @moduledoc """ + Generates a self-signed certificate for HTTPS testing. + + $ mix phx.gen.cert + $ mix phx.gen.cert my-app my-app.local my-app.internal.example.com + + Creates a private key and a self-signed certificate in PEM format. These + files can be referenced in the `certfile` and `keyfile` parameters of an + HTTPS Endpoint. + + #{@warning} + + ## Arguments + + The list of hostnames, if none are specified, defaults to: + + * #{Enum.join(@default_hostnames, "\n * ")} + + Other (optional) arguments: + + * `--output` (`-o`): the path and base filename for the certificate and + key (default: #{@default_path}) + * `--name` (`-n`): the Common Name value in certificate's subject + (default: "#{@default_name}") + + Requires OTP 21.3 or later. + """ + + use Mix.Task + import Mix.Generator + + @doc false + def run(all_args) do + if Mix.Project.umbrella?() do + Mix.raise("mix phx.gen.cert must be invoked from within your *_web application root directory") + end + + {opts, args} = + OptionParser.parse!( + all_args, + aliases: [n: :name, o: :output], + strict: [name: :string, output: :string] + ) + + path = opts[:output] || @default_path + name = opts[:name] || @default_name + + hostnames = + case args do + [] -> @default_hostnames + list -> list + end + + {certificate, private_key} = certificate_and_key(2048, name, hostnames) + + keyfile = path <> "_key.pem" + certfile = path <> ".pem" + + create_file( + keyfile, + :public_key.pem_encode([:public_key.pem_entry_encode(:RSAPrivateKey, private_key)]) + ) + + create_file( + certfile, + :public_key.pem_encode([{:Certificate, certificate, :not_encrypted}]) + ) + + print_shell_instructions(keyfile, certfile) + end + + @doc false + def certificate_and_key(key_size, name, hostnames) do + private_key = + case generate_rsa_key(key_size, 65537) do + {:ok, key} -> + key + + {:error, :not_supported} -> + Mix.raise(""" + Failed to generate an RSA key pair. + + This Mix task requires Erlang/OTP 20 or later. Please upgrade to a + newer version, or use another tool, such as OpenSSL, to generate a + certificate. + """) + end + + public_key = extract_public_key(private_key) + + certificate = + public_key + |> new_cert(name, hostnames) + |> :public_key.pkix_sign(private_key) + + {certificate, private_key} + end + + defp print_shell_instructions(keyfile, certfile) do + app = Mix.Phoenix.otp_app() + base = Mix.Phoenix.base() + + Mix.shell().info(""" + + If you have not already done so, please update your HTTPS Endpoint + configuration in config/dev.exs: + + config #{inspect(app)}, #{inspect(Mix.Phoenix.web_module(base))}.Endpoint, + http: [port: 4000], + https: [ + port: 4001, + cipher_suite: :strong, + certfile: "#{certfile}", + keyfile: "#{keyfile}" + ], + ... + + #{@warning} + """) + end + + require Record + + # RSA key pairs + + Record.defrecordp( + :rsa_private_key, + :RSAPrivateKey, + Record.extract(:RSAPrivateKey, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :rsa_public_key, + :RSAPublicKey, + Record.extract(:RSAPublicKey, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + defp generate_rsa_key(keysize, e) do + private_key = :public_key.generate_key({:rsa, keysize, e}) + {:ok, private_key} + rescue + FunctionClauseError -> + {:error, :not_supported} + end + + defp extract_public_key(rsa_private_key(modulus: m, publicExponent: e)) do + rsa_public_key(modulus: m, publicExponent: e) + end + + # Certificates + + Record.defrecordp( + :otp_tbs_certificate, + :OTPTBSCertificate, + Record.extract(:OTPTBSCertificate, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :signature_algorithm, + :SignatureAlgorithm, + Record.extract(:SignatureAlgorithm, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :validity, + :Validity, + Record.extract(:Validity, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :otp_subject_public_key_info, + :OTPSubjectPublicKeyInfo, + Record.extract(:OTPSubjectPublicKeyInfo, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :public_key_algorithm, + :PublicKeyAlgorithm, + Record.extract(:PublicKeyAlgorithm, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :extension, + :Extension, + Record.extract(:Extension, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :basic_constraints, + :BasicConstraints, + Record.extract(:BasicConstraints, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + Record.defrecordp( + :attr, + :AttributeTypeAndValue, + Record.extract(:AttributeTypeAndValue, from_lib: "public_key/include/OTP-PUB-KEY.hrl") + ) + + # OID values + @rsaEncryption {1, 2, 840, 113_549, 1, 1, 1} + @sha256WithRSAEncryption {1, 2, 840, 113_549, 1, 1, 11} + + @basicConstraints {2, 5, 29, 19} + @keyUsage {2, 5, 29, 15} + @extendedKeyUsage {2, 5, 29, 37} + @subjectKeyIdentifier {2, 5, 29, 14} + @subjectAlternativeName {2, 5, 29, 17} + + @organizationName {2, 5, 4, 10} + @commonName {2, 5, 4, 3} + + @serverAuth {1, 3, 6, 1, 5, 5, 7, 3, 1} + @clientAuth {1, 3, 6, 1, 5, 5, 7, 3, 2} + + defp new_cert(public_key, common_name, hostnames) do + <<serial::unsigned-64>> = :crypto.strong_rand_bytes(8) + + # Dates must be in 'YYMMDD' format + {{year, month, day}, _} = + :erlang.timestamp() + |> :calendar.now_to_datetime() + + yy = year |> Integer.to_string() |> String.slice(2, 2) + mm = month |> Integer.to_string() |> String.pad_leading(2, "0") + dd = day |> Integer.to_string() |> String.pad_leading(2, "0") + + not_before = yy <> mm <> dd + + yy2 = (year + 1) |> Integer.to_string() |> String.slice(2, 2) + + not_after = yy2 <> mm <> dd + + otp_tbs_certificate( + version: :v3, + serialNumber: serial, + signature: signature_algorithm(algorithm: @sha256WithRSAEncryption, parameters: :NULL), + issuer: rdn(common_name), + validity: + validity( + notBefore: {:utcTime, '#{not_before}000000Z'}, + notAfter: {:utcTime, '#{not_after}000000Z'} + ), + subject: rdn(common_name), + subjectPublicKeyInfo: + otp_subject_public_key_info( + algorithm: public_key_algorithm(algorithm: @rsaEncryption, parameters: :NULL), + subjectPublicKey: public_key + ), + extensions: extensions(public_key, hostnames) + ) + end + + defp rdn(common_name) do + {:rdnSequence, + [ + [attr(type: @organizationName, value: {:utf8String, "Phoenix Framework"})], + [attr(type: @commonName, value: {:utf8String, common_name})] + ]} + end + + defp extensions(public_key, hostnames) do + [ + extension( + extnID: @basicConstraints, + critical: true, + extnValue: basic_constraints(cA: false) + ), + extension( + extnID: @keyUsage, + critical: true, + extnValue: [:digitalSignature, :keyEncipherment] + ), + extension( + extnID: @extendedKeyUsage, + critical: false, + extnValue: [@serverAuth, @clientAuth] + ), + extension( + extnID: @subjectKeyIdentifier, + critical: false, + extnValue: key_identifier(public_key) + ), + extension( + extnID: @subjectAlternativeName, + critical: false, + extnValue: Enum.map(hostnames, &{:dNSName, String.to_charlist(&1)}) + ) + ] + end + + defp key_identifier(public_key) do + :crypto.hash(:sha, :public_key.der_encode(:RSAPublicKey, public_key)) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.channel.ex b/deps/phoenix/lib/mix/tasks/phx.gen.channel.ex new file mode 100644 index 0000000..2068140 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.channel.ex @@ -0,0 +1,111 @@ +defmodule Mix.Tasks.Phx.Gen.Channel do + @shortdoc "Generates a Phoenix channel" + + @moduledoc """ + Generates a Phoenix channel. + + $ mix phx.gen.channel Room + + Accepts the module name for the channel + + The generated files will contain: + + For a regular application: + + * a channel in `lib/my_app_web/channels` + * a channel test in `test/my_app_web/channels` + + For an umbrella application: + + * a channel in `apps/my_app_web/lib/app_name_web/channels` + * a channel test in `apps/my_app_web/test/my_app_web/channels` + + """ + use Mix.Task + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.channel must be invoked from within your *_web application root directory" + ) + end + + [channel_name] = validate_args!(args) + context_app = Mix.Phoenix.context_app() + web_prefix = Mix.Phoenix.web_path(context_app) + web_test_prefix = Mix.Phoenix.web_test_path(context_app) + binding = Mix.Phoenix.inflect(channel_name) + binding = Keyword.put(binding, :module, "#{binding[:web_module]}.#{binding[:scoped]}") + + Mix.Phoenix.check_module_name_availability!(binding[:module] <> "Channel") + + test_path = Path.join(web_test_prefix, "channels/#{binding[:path]}_channel_test.exs") + case_path = Path.join(Path.dirname(web_test_prefix), "support/channel_case.ex") + + maybe_case = if File.exists?(case_path) do + [] + else + [{:eex, "channel_case.ex", case_path}] + end + + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.channel", binding, [ + {:eex, "channel.ex", Path.join(web_prefix, "channels/#{binding[:path]}_channel.ex")}, + {:eex, "channel_test.exs", test_path} + ] ++ maybe_case) + + user_socket_path = Mix.Phoenix.web_path(context_app, "channels/user_socket.ex") + + if File.exists?(user_socket_path) do + Mix.shell().info(""" + + Add the channel to your `#{user_socket_path}` handler, for example: + + channel "#{binding[:singular]}:lobby", #{binding[:module]}Channel + """) + else + Mix.shell().info(""" + + The default socket handler - #{binding[:web_module]}.UserSocket - was not found. + """) + + if Mix.shell().yes?("Do you want to create it?") do + Gen.Socket.run(~w(User --from-channel #{channel_name})) + else + Mix.shell().info(""" + + To create it, please run the mix task: + + mix phx.gen.socket User + + Then add the channel to the newly created file, at `#{user_socket_path}`: + + channel "#{binding[:singular]}:lobby", #{binding[:module]}Channel + """) + end + end + end + + @spec raise_with_help() :: no_return() + defp raise_with_help do + Mix.raise(""" + mix phx.gen.channel expects just the module name: + + mix phx.gen.channel Room + + """) + end + + defp validate_args!(args) do + unless length(args) == 1 do + raise_with_help() + end + + args + end + + defp paths do + [".", :phoenix] + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.context.ex b/deps/phoenix/lib/mix/tasks/phx.gen.context.ex new file mode 100644 index 0000000..20f9dac --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.context.ex @@ -0,0 +1,362 @@ +defmodule Mix.Tasks.Phx.Gen.Context do + @shortdoc "Generates a context with functions around an Ecto schema" + + @moduledoc """ + Generates a context with functions around an Ecto schema. + + $ mix phx.gen.context Accounts User users name:string age:integer + + The first argument is the context module followed by the schema module + and its plural name (used as the schema table name). + + The context is an Elixir module that serves as an API boundary for + the given resource. A context often holds many related resources. + Therefore, if the context already exists, it will be augmented with + functions for the given resource. + + > Note: A resource may also be split + > over distinct contexts (such as Accounts.User and Payments.User). + + The schema is responsible for mapping the database fields into an + Elixir struct. + + Overall, this generator will add the following files to `lib/your_app`: + + * a context module in `accounts.ex`, serving as the API boundary + * a schema in `accounts/user.ex`, with a `users` table + + A migration file for the repository and test files for the context + will also be generated. + + ## Generating without a schema + + In some cases, you may wish to bootstrap the context module and + tests, but leave internal implementation of the context and schema + to yourself. Use the `--no-schema` flags to accomplish this. + + ## table + + By default, the table name for the migration and schema will be + the plural name provided for the resource. To customize this value, + a `--table` option may be provided. For example: + + $ mix phx.gen.context Accounts User users --table cms_users + + ## binary_id + + Generated migration can use `binary_id` for schema's primary key + and its references with option `--binary-id`. + + ## Default options + + This generator uses default options provided in the `:generators` + configuration of your application. These are the defaults: + + config :your_app, :generators, + migration: true, + binary_id: false, + sample_binary_id: "11111111-1111-1111-1111-111111111111" + + You can override those options per invocation by providing corresponding + switches, e.g. `--no-binary-id` to use normal ids despite the default + configuration or `--migration` to force generation of the migration. + + Read the documentation for `phx.gen.schema` for more information on + attributes. + + ## Skipping prompts + + This generator will prompt you if there is an existing context with the same + name, in order to provide more instructions on how to correctly use phoenix contexts. + You can skip this prompt and automatically merge the new schema access functions and tests into the + existing context using `--merge-with-existing-context`. To prevent changes to + the existing context and exit the generator, use `--no-merge-with-existing-context`. + """ + + use Mix.Task + + alias Mix.Phoenix.{Context, Schema} + alias Mix.Tasks.Phx.Gen + + @switches [binary_id: :boolean, table: :string, web: :string, + schema: :boolean, context: :boolean, context_app: :string, + merge_with_existing_context: :boolean, prefix: :string] + + @default_opts [schema: true, context: true] + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.context must be invoked from within your *_web application root directory" + end + + {context, schema} = build(args) + binding = [context: context, schema: schema] + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + prompt_for_code_injection(context) + + context + |> copy_new_files(paths, binding) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + def build(args, help \\ __MODULE__) do + {opts, parsed, _} = parse_opts(args) + [context_name, schema_name, plural | schema_args] = validate_args!(parsed, help) + schema_module = inspect(Module.concat(context_name, schema_name)) + schema = Gen.Schema.build([schema_module, plural | schema_args], opts, help) + context = Context.new(context_name, schema, opts) + {context, schema} + end + + defp parse_opts(args) do + {opts, parsed, invalid} = OptionParser.parse(args, switches: @switches) + merged_opts = + @default_opts + |> Keyword.merge(opts) + |> put_context_app(opts[:context_app]) + + {merged_opts, parsed, invalid} + end + defp put_context_app(opts, nil), do: opts + defp put_context_app(opts, string) do + Keyword.put(opts, :context_app, String.to_atom(string)) + end + + @doc false + def files_to_be_generated(%Context{schema: schema}) do + if schema.generate? do + Gen.Schema.files_to_be_generated(schema) + else + [] + end + end + + @doc false + def copy_new_files(%Context{schema: schema} = context, paths, binding) do + if schema.generate?, do: Gen.Schema.copy_new_files(schema, paths, binding) + inject_schema_access(context, paths, binding) + inject_tests(context, paths, binding) + inject_test_fixture(context, paths, binding) + + context + end + + @doc false + def ensure_context_file_exists(%Context{file: file} = context, paths, binding) do + unless Context.pre_existing?(context) do + Mix.Generator.create_file(file, Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/context.ex", binding)) + end + end + + defp inject_schema_access(%Context{file: file} = context, paths, binding) do + ensure_context_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.context/#{schema_access_template(context)}", binding) + |> inject_eex_before_final_end(file, binding) + end + + defp write_file(content, file) do + File.write!(file, content) + end + + @doc false + def ensure_test_file_exists(%Context{test_file: test_file} = context, paths, binding) do + unless Context.pre_existing_tests?(context) do + Mix.Generator.create_file(test_file, Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/context_test.exs", binding)) + end + end + + defp inject_tests(%Context{test_file: test_file} = context, paths, binding) do + ensure_test_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.context/test_cases.exs", binding) + |> inject_eex_before_final_end(test_file, binding) + end + + @doc false + def ensure_test_fixtures_file_exists(%Context{test_fixtures_file: test_fixtures_file} = context, paths, binding) do + unless Context.pre_existing_test_fixtures?(context) do + Mix.Generator.create_file(test_fixtures_file, Mix.Phoenix.eval_from(paths, "priv/templates/phx.gen.context/fixtures_module.ex", binding)) + end + end + + defp inject_test_fixture(%Context{test_fixtures_file: test_fixtures_file} = context, paths, binding) do + ensure_test_fixtures_file_exists(context, paths, binding) + + paths + |> Mix.Phoenix.eval_from("priv/templates/phx.gen.context/fixtures.ex", binding) + |> Mix.Phoenix.prepend_newline() + |> inject_eex_before_final_end(test_fixtures_file, binding) + + maybe_print_unimplemented_fixture_functions(context) + end + + defp maybe_print_unimplemented_fixture_functions(%Context{} = context) do + fixture_functions_needing_implementations = + Enum.flat_map( + context.schema.fixture_unique_functions, + fn + {_field, {_function_name, function_def, true}} -> [function_def] + {_field, {_function_name, _function_def, false}} -> [] + end + ) + + if Enum.any?(fixture_functions_needing_implementations) do + Mix.shell.info( + """ + + Some of the generated database columns are unique. Please provide + unique implementations for the following fixture function(s) in + #{context.test_fixtures_file}: + + #{ + fixture_functions_needing_implementations + |> Enum.map_join(&indent(&1, 2)) + |> String.trim_trailing() + } + """ + ) + end + end + + defp indent(string, spaces) do + indent_string = String.duplicate(" ", spaces) + + string + |> String.split("\n") + |> Enum.map_join(fn line -> + if String.trim(line) == "" do + "\n" + else + indent_string <> line <> "\n" + end + end) + end + + defp inject_eex_before_final_end(content_to_inject, file_path, binding) do + file = File.read!(file_path) + + if String.contains?(file, content_to_inject) do + :ok + else + Mix.shell().info([:green, "* injecting ", :reset, Path.relative_to_cwd(file_path)]) + + file + |> String.trim_trailing() + |> String.trim_trailing("end") + |> EEx.eval_string(binding) + |> Kernel.<>(content_to_inject) + |> Kernel.<>("end\n") + |> write_file(file_path) + end + end + + @doc false + def print_shell_instructions(%Context{schema: schema}) do + if schema.generate? do + Gen.Schema.print_shell_instructions(schema) + else + :ok + end + end + + defp schema_access_template(%Context{schema: schema}) do + if schema.generate? do + "schema_access.ex" + else + "access_no_schema.ex" + end + end + + defp validate_args!([context, schema, _plural | _] = args, help) do + cond do + not Context.valid?(context) -> + help.raise_with_help "Expected the context, #{inspect context}, to be a valid module name" + not Schema.valid?(schema) -> + help.raise_with_help "Expected the schema, #{inspect schema}, to be a valid module name" + context == schema -> + help.raise_with_help "The context and schema should have different names" + context == Mix.Phoenix.base() -> + help.raise_with_help "Cannot generate context #{context} because it has the same name as the application" + schema == Mix.Phoenix.base() -> + help.raise_with_help "Cannot generate schema #{schema} because it has the same name as the application" + true -> + args + end + end + + defp validate_args!(_, help) do + help.raise_with_help "Invalid arguments" + end + + @doc false + def raise_with_help(msg) do + Mix.raise """ + #{msg} + + mix phx.gen.html, phx.gen.json, phx.gen.live, and phx.gen.context + expect a context module name, followed by singular and plural names + of the generated resource, ending with any number of attributes. + For example: + + mix phx.gen.html Accounts User users name:string + mix phx.gen.json Accounts User users name:string + mix phx.gen.live Accounts User users name:string + mix phx.gen.context Accounts User users name:string + + The context serves as the API boundary for the given resource. + Multiple resources may belong to a context and a resource may be + split over distinct contexts (such as Accounts.User and Payments.User). + """ + end + + @doc false + def prompt_for_code_injection(%Context{generate?: false}), do: :ok + def prompt_for_code_injection(%Context{} = context) do + if Context.pre_existing?(context) && !merge_with_existing_context?(context) do + System.halt() + end + end + + defp merge_with_existing_context?(%Context{} = context) do + Keyword.get_lazy(context.opts, :merge_with_existing_context, fn -> + function_count = Context.function_count(context) + file_count = Context.file_count(context) + + Mix.shell().info(""" + You are generating into an existing context. + + The #{inspect(context.module)} context currently has #{singularize(function_count, "functions")} and \ + #{singularize(file_count, "files")} in its directory. + + * It's OK to have multiple resources in the same context as \ + long as they are closely related. But if a context grows too \ + large, consider breaking it apart + + * If they are not closely related, another context probably works better + + The fact two entities are related in the database does not mean they belong \ + to the same context. + + If you are not sure, prefer creating a new context over adding to the existing one. + """) + + Mix.shell().yes?("Would you like to proceed?") + end) + end + + defp singularize(1, plural), do: "1 " <> String.trim_trailing(plural, "s") + defp singularize(amount, plural), do: "#{amount} #{plural}" +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.embedded.ex b/deps/phoenix/lib/mix/tasks/phx.gen.embedded.ex new file mode 100644 index 0000000..977cac2 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.embedded.ex @@ -0,0 +1,107 @@ +defmodule Mix.Tasks.Phx.Gen.Embedded do + @shortdoc "Generates an embedded Ecto schema file" + + @moduledoc """ + Generates an embedded Ecto schema for casting/validating data outside the DB. + + mix phx.gen.embedded Blog.Post title:string views:integer + + The first argument is the schema module followed by the schema attributes. + + The generated schema above will contain: + + * an embedded schema file in `lib/my_app/blog/post.ex` + + ## Attributes + + The resource fields are given using `name:type` syntax + where type are the types supported by Ecto. Omitting + the type makes it default to `:string`: + + mix phx.gen.embedded Blog.Post title views:integer + + The following types are supported: + + #{for attr <- Mix.Phoenix.Schema.valid_types(), do: " * `#{inspect attr}`\n"} + * `:datetime` - An alias for `:naive_datetime` + """ + use Mix.Task + + alias Mix.Phoenix.Schema + + @switches [binary_id: :boolean, web: :string] + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.embedded must be invoked from within your *_web application root directory" + end + + schema = build(args) + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(schema) + + copy_new_files(schema, paths, schema: schema) + end + + @doc false + def build(args) do + {schema_opts, parsed, _} = OptionParser.parse(args, switches: @switches) + [schema_name | attrs] = validate_args!(parsed) + opts = + schema_opts + |> Keyword.put(:embedded, true) + |> Keyword.put(:migration, false) + + schema = Schema.new(schema_name, nil, attrs, opts) + + schema + end + + @doc false + def validate_args!([schema | _] = args) do + if Schema.valid?(schema) do + args + else + raise_with_help "Expected the schema argument, #{inspect schema}, to be a valid module name" + end + end + def validate_args!(_) do + raise_with_help "Invalid arguments" + end + + @doc false + @spec raise_with_help(String.t) :: no_return() + def raise_with_help(msg) do + Mix.raise """ + #{msg} + + mix phx.gen.embedded expects a module name followed by + any number of attributes: + + mix phx.gen.embedded Blog.Post title:string + """ + end + + + defp prompt_for_conflicts(schema) do + schema + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + def files_to_be_generated(%Schema{} = schema) do + [{:eex, "embedded_schema.ex", schema.file}] + end + + @doc false + def copy_new_files(%Schema{} = schema, paths, binding) do + files = files_to_be_generated(schema) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.embedded", binding, files) + + schema + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.ex b/deps/phoenix/lib/mix/tasks/phx.gen.ex new file mode 100644 index 0000000..16b00f0 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.ex @@ -0,0 +1,26 @@ +defmodule Mix.Tasks.Phx.Gen do + use Mix.Task + + @shortdoc "Lists all available Phoenix generators" + + @moduledoc """ + Lists all available Phoenix generators. + + ## CRUD related generators + + The table below shows a summary of the contents created by the CRUD generators: + + | Task | Schema | Migration | Context | Controller | View | LiveView | + |:------------------ |:-:|:-:|:-:|:-:|:-:|:-:| + | `phx.gen.embedded` | โœ“ | | | | | | + | `phx.gen.schema` | โœ“ | โœ“ | | | | | + | `phx.gen.context` | โœ“ | โœ“ | โœ“ | | | | + | `phx.gen.live` | โœ“ | โœ“ | โœ“ | | | โœ“ | + | `phx.gen.json` | โœ“ | โœ“ | โœ“ | โœ“ | โœ“ | | + | `phx.gen.html` | โœ“ | โœ“ | โœ“ | โœ“ | โœ“ | | + """ + + def run(_args) do + Mix.Task.run("help", ["--search", "phx.gen."]) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.html.ex b/deps/phoenix/lib/mix/tasks/phx.gen.html.ex new file mode 100644 index 0000000..aa21d62 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.html.ex @@ -0,0 +1,204 @@ +defmodule Mix.Tasks.Phx.Gen.Html do + @shortdoc "Generates controller, views, and context for an HTML resource" + + @moduledoc """ + Generates controller, views, and context for an HTML resource. + + mix phx.gen.html Accounts User users name:string age:integer + + The first argument is the context module followed by the schema module + and its plural name (used as the schema table name). + + The context is an Elixir module that serves as an API boundary for + the given resource. A context often holds many related resources. + Therefore, if the context already exists, it will be augmented with + functions for the given resource. + + > Note: A resource may also be split + > over distinct contexts (such as `Accounts.User` and `Payments.User`). + + The schema is responsible for mapping the database fields into an + Elixir struct. It is followed by an optional list of attributes, + with their respective names and types. See `mix phx.gen.schema` + for more information on attributes. + + Overall, this generator will add the following files to `lib/`: + + * a context module in `lib/app/accounts.ex` for the accounts API + * a schema in `lib/app/accounts/user.ex`, with an `users` table + * a view in `lib/app_web/views/user_view.ex` + * a controller in `lib/app_web/controllers/user_controller.ex` + * default CRUD templates in `lib/app_web/templates/user` + + ## The context app + + A migration file for the repository and test files for the context and + controller features will also be generated. + + The location of the web files (controllers, views, templates, etc) in an + umbrella application will vary based on the `:context_app` config located + in your applications `:generators` configuration. When set, the Phoenix + generators will generate web files directly in your lib and test folders + since the application is assumed to be isolated to web specific functionality. + If `:context_app` is not set, the generators will place web related lib + and test files in a `web/` directory since the application is assumed + to be handling both web and domain specific functionality. + Example configuration: + + config :my_app_web, :generators, context_app: :my_app + + Alternatively, the `--context-app` option may be supplied to the generator: + + mix phx.gen.html Sales User users --context-app warehouse + + ## Web namespace + + By default, the controller and view will be namespaced by the schema name. + You can customize the web module namespace by passing the `--web` flag with a + module name, for example: + + mix phx.gen.html Sales User users --web Sales + + Which would generate a `lib/app_web/controllers/sales/user_controller.ex` and + `lib/app_web/views/sales/user_view.ex`. + + ## Customizing the context, schema, tables and migrations + + In some cases, you may wish to bootstrap HTML templates, controllers, + and controller tests, but leave internal implementation of the context + or schema to yourself. You can use the `--no-context` and `--no-schema` + flags for file generation control. + + You can also change the table name or configure the migrations to + use binary ids for primary keys, see `mix phx.gen.schema` for more + information. + """ + use Mix.Task + + alias Mix.Phoenix.{Context, Schema} + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.html must be invoked from within your *_web application root directory" + end + + {context, schema} = Gen.Context.build(args) + Gen.Context.prompt_for_code_injection(context) + + binding = [context: context, schema: schema, inputs: inputs(schema)] + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + context + |> copy_new_files(paths, binding) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Kernel.++(context_files(context)) + |> Mix.Phoenix.prompt_for_conflicts() + end + defp context_files(%Context{generate?: true} = context) do + Gen.Context.files_to_be_generated(context) + end + defp context_files(%Context{generate?: false}) do + [] + end + + @doc false + def files_to_be_generated(%Context{schema: schema, context_app: context_app}) do + web_prefix = Mix.Phoenix.web_path(context_app) + test_prefix = Mix.Phoenix.web_test_path(context_app) + web_path = to_string(schema.web_path) + + [ + {:eex, "controller.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_controller.ex"])}, + {:eex, "edit.html.heex", Path.join([web_prefix, "templates", web_path, schema.singular, "edit.html.heex"])}, + {:eex, "form.html.heex", Path.join([web_prefix, "templates", web_path, schema.singular, "form.html.heex"])}, + {:eex, "index.html.heex", Path.join([web_prefix, "templates", web_path, schema.singular, "index.html.heex"])}, + {:eex, "new.html.heex", Path.join([web_prefix, "templates", web_path, schema.singular, "new.html.heex"])}, + {:eex, "show.html.heex", Path.join([web_prefix, "templates", web_path, schema.singular, "show.html.heex"])}, + {:eex, "view.ex", Path.join([web_prefix, "views", web_path, "#{schema.singular}_view.ex"])}, + {:eex, "controller_test.exs", Path.join([test_prefix, "controllers", web_path, "#{schema.singular}_controller_test.exs"])}, + ] + end + + @doc false + def copy_new_files(%Context{} = context, paths, binding) do + files = files_to_be_generated(context) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.html", binding, files) + if context.generate?, do: Gen.Context.copy_new_files(context, paths, binding) + context + end + + @doc false + def print_shell_instructions(%Context{schema: schema, context_app: ctx_app} = context) do + if schema.web_namespace do + Mix.shell().info """ + + Add the resource to your #{schema.web_namespace} :browser scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + scope "/#{schema.web_path}", #{inspect Module.concat(context.web_module, schema.web_namespace)}, as: :#{schema.web_path} do + pipe_through :browser + ... + resources "/#{schema.plural}", #{inspect schema.alias}Controller + end + """ + else + Mix.shell().info """ + + Add the resource to your browser scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + resources "/#{schema.plural}", #{inspect schema.alias}Controller + """ + end + if context.generate?, do: Gen.Context.print_shell_instructions(context) + end + + @doc false + def inputs(%Schema{} = schema) do + Enum.map(schema.attrs, fn + {_, {:references, _}} -> + {nil, nil, nil} + {key, :integer} -> + {label(key), ~s(<%= number_input f, #{inspect(key)} %>), error(key)} + {key, :float} -> + {label(key), ~s(<%= number_input f, #{inspect(key)}, step: "any" %>), error(key)} + {key, :decimal} -> + {label(key), ~s(<%= number_input f, #{inspect(key)}, step: "any" %>), error(key)} + {key, :boolean} -> + {label(key), ~s(<%= checkbox f, #{inspect(key)} %>), error(key)} + {key, :text} -> + {label(key), ~s(<%= textarea f, #{inspect(key)} %>), error(key)} + {key, :date} -> + {label(key), ~s(<%= date_select f, #{inspect(key)} %>), error(key)} + {key, :time} -> + {label(key), ~s(<%= time_select f, #{inspect(key)} %>), error(key)} + {key, :utc_datetime} -> + {label(key), ~s(<%= datetime_select f, #{inspect(key)} %>), error(key)} + {key, :naive_datetime} -> + {label(key), ~s(<%= datetime_select f, #{inspect(key)} %>), error(key)} + {key, {:array, :integer}} -> + {label(key), ~s(<%= multiple_select f, #{inspect(key)}, ["1": 1, "2": 2] %>), error(key)} + {key, {:array, _}} -> + {label(key), ~s(<%= multiple_select f, #{inspect(key)}, ["Option 1": "option1", "Option 2": "option2"] %>), error(key)} + {key, {:enum, _}} -> + {label(key), ~s|<%= select f, #{inspect(key)}, Ecto.Enum.values(#{inspect(schema.module)}, #{inspect(key)}), prompt: "Choose a value" %>|, error(key)} + {key, _} -> + {label(key), ~s(<%= text_input f, #{inspect(key)} %>), error(key)} + end) + end + + defp label(key) do + ~s(<%= label f, #{inspect(key)} %>) + end + + defp error(field) do + ~s(<%= error_tag f, #{inspect(field)} %>) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.json.ex b/deps/phoenix/lib/mix/tasks/phx.gen.json.ex new file mode 100644 index 0000000..d7de310 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.json.ex @@ -0,0 +1,160 @@ +defmodule Mix.Tasks.Phx.Gen.Json do + @shortdoc "Generates controller, views, and context for a JSON resource" + + @moduledoc """ + Generates controller, views, and context for a JSON resource. + + mix phx.gen.json Accounts User users name:string age:integer + + The first argument is the context module followed by the schema module + and its plural name (used as the schema table name). + + The context is an Elixir module that serves as an API boundary for + the given resource. A context often holds many related resources. + Therefore, if the context already exists, it will be augmented with + functions for the given resource. + + > Note: A resource may also be split + > over distinct contexts (such as `Accounts.User` and `Payments.User`). + + The schema is responsible for mapping the database fields into an + Elixir struct. It is followed by an optional list of attributes, + with their respective names and types. See `mix phx.gen.schema` + for more information on attributes. + + Overall, this generator will add the following files to `lib/`: + + * a context module in `lib/app/accounts.ex` for the accounts API + * a schema in `lib/app/accounts/user.ex`, with an `users` table + * a view in `lib/app_web/views/user_view.ex` + * a controller in `lib/app_web/controllers/user_controller.ex` + + A migration file for the repository and test files for the context and + controller features will also be generated. + + ## The context app + + The location of the web files (controllers, views, templates, etc) in an + umbrella application will vary based on the `:context_app` config located + in your applications `:generators` configuration. When set, the Phoenix + generators will generate web files directly in your lib and test folders + since the application is assumed to be isolated to web specific functionality. + If `:context_app` is not set, the generators will place web related lib + and test files in a `web/` directory since the application is assumed + to be handling both web and domain specific functionality. + Example configuration: + + config :my_app_web, :generators, context_app: :my_app + + Alternatively, the `--context-app` option may be supplied to the generator: + + mix phx.gen.json Sales User users --context-app warehouse + + ## Web namespace + + By default, the controller and view will be namespaced by the schema name. + You can customize the web module namespace by passing the `--web` flag with a + module name, for example: + + mix phx.gen.json Sales User users --web Sales + + Which would generate a `lib/app_web/controllers/sales/user_controller.ex` and + `lib/app_web/views/sales/user_view.ex`. + + ## Customizing the context, schema, tables and migrations + + In some cases, you may wish to bootstrap JSON views, controllers, + and controller tests, but leave internal implementation of the context + or schema to yourself. You can use the `--no-context` and `--no-schema` + flags for file generation control. + + You can also change the table name or configure the migrations to + use binary ids for primary keys, see `mix phx.gen.schema` for more + information. + """ + + use Mix.Task + + alias Mix.Phoenix.Context + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.json must be invoked from within your *_web application root directory" + end + + {context, schema} = Gen.Context.build(args) + Gen.Context.prompt_for_code_injection(context) + + binding = [context: context, schema: schema] + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + context + |> copy_new_files(paths, binding) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Kernel.++(context_files(context)) + |> Mix.Phoenix.prompt_for_conflicts() + end + defp context_files(%Context{generate?: true} = context) do + Gen.Context.files_to_be_generated(context) + end + defp context_files(%Context{generate?: false}) do + [] + end + + @doc false + def files_to_be_generated(%Context{schema: schema, context_app: context_app}) do + web_prefix = Mix.Phoenix.web_path(context_app) + test_prefix = Mix.Phoenix.web_test_path(context_app) + web_path = to_string(schema.web_path) + + [ + {:eex, "controller.ex", Path.join([web_prefix, "controllers", web_path, "#{schema.singular}_controller.ex"])}, + {:eex, "view.ex", Path.join([web_prefix, "views", web_path, "#{schema.singular}_view.ex"])}, + {:eex, "controller_test.exs", Path.join([test_prefix, "controllers", web_path, "#{schema.singular}_controller_test.exs"])}, + {:new_eex, "changeset_view.ex", Path.join([web_prefix, "views/changeset_view.ex"])}, + {:new_eex, "fallback_controller.ex", Path.join([web_prefix, "controllers/fallback_controller.ex"])}, + ] + end + + @doc false + def copy_new_files(%Context{} = context, paths, binding) do + files = files_to_be_generated(context) + Mix.Phoenix.copy_from paths, "priv/templates/phx.gen.json", binding, files + if context.generate?, do: Gen.Context.copy_new_files(context, paths, binding) + + context + end + + @doc false + def print_shell_instructions(%Context{schema: schema, context_app: ctx_app} = context) do + if schema.web_namespace do + Mix.shell().info """ + + Add the resource to your #{schema.web_namespace} :api scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + scope "/#{schema.web_path}", #{inspect Module.concat(context.web_module, schema.web_namespace)}, as: :#{schema.web_path} do + pipe_through :api + ... + resources "/#{schema.plural}", #{inspect schema.alias}Controller + end + """ + else + Mix.shell().info """ + + Add the resource to your :api scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + resources "/#{schema.plural}", #{inspect schema.alias}Controller, except: [:new, :edit] + """ + end + if context.generate?, do: Gen.Context.print_shell_instructions(context) + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.live.ex b/deps/phoenix/lib/mix/tasks/phx.gen.live.ex new file mode 100644 index 0000000..4bf63ca --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.live.ex @@ -0,0 +1,233 @@ +defmodule Mix.Tasks.Phx.Gen.Live do + @shortdoc "Generates LiveView, templates, and context for a resource" + + @moduledoc """ + Generates LiveView, templates, and context for a resource. + + mix phx.gen.live Accounts User users name:string age:integer + + The first argument is the context module. The context is an Elixir module + that serves as an API boundary for the given resource. A context often holds + many related resources. Therefore, if the context already exists, it will be + augmented with functions for the given resource. + + The second argument is the schema module. The schema is responsible for + mapping the database fields into an Elixir struct. + + The remaining arguments are the schema module plural name (used as the schema + table name), and an optional list of attributes as their respective names and + types. See `mix help phx.gen.schema` for more information on attributes. + + When this command is run for the first time, a `ModalComponent` and + `LiveHelpers` module will be created, along with the resource level + LiveViews and components, including `UserLive.Index`, `UserLive.Show`, + and `UserLive.FormComponent` modules for the new resource. + + > Note: A resource may also be split + > over distinct contexts (such as `Accounts.User` and `Payments.User`). + + Overall, this generator will add the following files: + + * a context module in `lib/app/accounts.ex` for the accounts API + * a schema in `lib/app/accounts/user.ex`, with a `users` table + * a LiveView in `lib/app_web/live/user_live/show.ex` + * a LiveView in `lib/app_web/live/user_live/index.ex` + * a LiveComponent in `lib/app_web/live/user_live/form_component.ex` + * a helpers module in `lib/app_web/live/live_helpers.ex` with a modal + + After file generation is complete, there will be output regarding required + updates to the lib/app_web/router.ex file. + + Add the live routes to your browser scope in lib/app_web/router.ex: + + live "/users", UserLive.Index, :index + live "/users/new", UserLive.Index, :new + live "/users/:id/edit", UserLive.Index, :edit + + live "/users/:id", UserLive.Show, :show + live "/users/:id/show/edit", UserLive.Show, :edit + + ## The context app + + A migration file for the repository and test files for the context and + controller features will also be generated. + + The location of the web files (LiveView's, views, templates, etc.) in an + umbrella application will vary based on the `:context_app` config located + in your applications `:generators` configuration. When set, the Phoenix + generators will generate web files directly in your lib and test folders + since the application is assumed to be isolated to web specific functionality. + If `:context_app` is not set, the generators will place web related lib + and test files in a `web/` directory since the application is assumed + to be handling both web and domain specific functionality. + Example configuration: + + config :my_app_web, :generators, context_app: :my_app + + Alternatively, the `--context-app` option may be supplied to the generator: + + mix phx.gen.live Accounts User users --context-app warehouse + + ## Web namespace + + By default, the LiveView modules will be namespaced by the web module. + You can customize the web module namespace by passing the `--web` flag with a + module name, for example: + + mix phx.gen.live Accounts User users --web Sales + + Which would generate the LiveViews in `lib/app_web/live/sales/user_live/`, + namespaced `AppWeb.Sales.UserLive` instead of `AppWeb.UserLive`. + + ## Customizing the context, schema, tables and migrations + + In some cases, you may wish to bootstrap HTML templates, LiveViews, + and tests, but leave internal implementation of the context or schema + to yourself. You can use the `--no-context` and `--no-schema` flags + for file generation control. + + You can also change the table name or configure the migrations to + use binary ids for primary keys, see `mix help phx.gen.schema` for more + information. + """ + use Mix.Task + + alias Mix.Phoenix.{Context} + alias Mix.Tasks.Phx.Gen + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.live must be invoked from within your *_web application root directory" + end + + {context, schema} = Gen.Context.build(args) + Gen.Context.prompt_for_code_injection(context) + + binding = [context: context, schema: schema, inputs: Gen.Html.inputs(schema)] + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + context + |> copy_new_files(binding, paths) + |> maybe_inject_helpers() + |> print_shell_instructions() + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Kernel.++(context_files(context)) + |> Mix.Phoenix.prompt_for_conflicts() + end + defp context_files(%Context{generate?: true} = context) do + Gen.Context.files_to_be_generated(context) + end + defp context_files(%Context{generate?: false}) do + [] + end + + defp files_to_be_generated(%Context{schema: schema, context_app: context_app}) do + web_prefix = Mix.Phoenix.web_path(context_app) + test_prefix = Mix.Phoenix.web_test_path(context_app) + web_path = to_string(schema.web_path) + live_subdir = "#{schema.singular}_live" + + [ + {:eex, "show.ex", Path.join([web_prefix, "live", web_path, live_subdir, "show.ex"])}, + {:eex, "index.ex", Path.join([web_prefix, "live", web_path, live_subdir, "index.ex"])}, + {:eex, "form_component.ex", Path.join([web_prefix, "live", web_path, live_subdir, "form_component.ex"])}, + {:eex, "form_component.html.heex", Path.join([web_prefix, "live", web_path, live_subdir, "form_component.html.heex"])}, + {:eex, "index.html.heex", Path.join([web_prefix, "live", web_path, live_subdir, "index.html.heex"])}, + {:eex, "show.html.heex", Path.join([web_prefix, "live", web_path, live_subdir, "show.html.heex"])}, + {:eex, "live_test.exs", Path.join([test_prefix, "live", web_path, "#{schema.singular}_live_test.exs"])}, + {:new_eex, "live_helpers.ex", Path.join([web_prefix, "live", "live_helpers.ex"])}, + ] + end + + defp copy_new_files(%Context{} = context, binding, paths) do + files = files_to_be_generated(context) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.live", binding, files) + if context.generate?, do: Gen.Context.copy_new_files(context, paths, binding) + + context + end + + defp maybe_inject_helpers(%Context{context_app: ctx_app} = context) do + web_prefix = Mix.Phoenix.web_path(ctx_app) + [lib_prefix, web_dir] = Path.split(web_prefix) + file_path = Path.join(lib_prefix, "#{web_dir}.ex") + file = File.read!(file_path) + inject = "import #{inspect(context.web_module)}.LiveHelpers" + + if String.contains?(file, inject) do + :ok + else + do_inject_helpers(context, file, file_path, inject) + end + + context + end + + defp do_inject_helpers(context, file, file_path, inject) do + Mix.shell().info([:green, "* injecting ", :reset, Path.relative_to_cwd(file_path)]) + + new_file = String.replace(file, "import Phoenix.LiveView.Helpers", "import Phoenix.LiveView.Helpers\n #{inject}") + if file != new_file do + File.write!(file_path, new_file) + else + Mix.shell().info """ + + Could not find Phoenix.LiveView.Helpers imported in #{file_path}. + + This typically happens because your application was not generated + with the --live flag: + + mix phx.new my_app --live + + Please make sure LiveView is installed and that #{inspect(context.web_module)} + defines both `live_view/0` and `live_component/0` functions, + and that both functions import #{inspect(context.web_module)}.LiveHelpers. + """ + end + end + + @doc false + def print_shell_instructions(%Context{schema: schema, context_app: ctx_app} = context) do + prefix = Module.concat(context.web_module, schema.web_namespace) + web_path = Mix.Phoenix.web_path(ctx_app) + + if schema.web_namespace do + Mix.shell().info """ + + Add the live routes to your #{schema.web_namespace} :browser scope in #{web_path}/router.ex: + + scope "/#{schema.web_path}", #{inspect prefix}, as: :#{schema.web_path} do + pipe_through :browser + ... + + #{for line <- live_route_instructions(schema), do: " #{line}"} + end + """ + else + Mix.shell().info """ + + Add the live routes to your browser scope in #{Mix.Phoenix.web_path(ctx_app)}/router.ex: + + #{for line <- live_route_instructions(schema), do: " #{line}"} + """ + end + if context.generate?, do: Gen.Context.print_shell_instructions(context) + end + + defp live_route_instructions(schema) do + [ + ~s|live "/#{schema.plural}", #{inspect(schema.alias)}Live.Index, :index\n|, + ~s|live "/#{schema.plural}/new", #{inspect(schema.alias)}Live.Index, :new\n|, + ~s|live "/#{schema.plural}/:id/edit", #{inspect(schema.alias)}Live.Index, :edit\n\n|, + ~s|live "/#{schema.plural}/:id", #{inspect(schema.alias)}Live.Show, :show\n|, + ~s|live "/#{schema.plural}/:id/show/edit", #{inspect(schema.alias)}Live.Show, :edit| + ] + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.notifier.ex b/deps/phoenix/lib/mix/tasks/phx.gen.notifier.ex new file mode 100644 index 0000000..6b9de9f --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.notifier.ex @@ -0,0 +1,214 @@ +defmodule Mix.Tasks.Phx.Gen.Notifier do + @shortdoc "Generates a notifier that delivers emails by default" + + @moduledoc """ + Generates a notifier that delivers emails by default. + + $ mix phx.gen.notifier Accounts User welcome_user reset_password confirmation_instructions + + This task expects a context module name, followed by a + notifier name and one or more message names. Messages + are the functions that will be created prefixed by "deliver", + so the message name should be "snake_case" without punctuation. + + Additionally a context app can be specified with the flag + `--context-app`, which is useful if the notifier is being + generated in a different app under an umbrella. + + $ mix phx.gen.notifier Accounts User welcome_user --context-app marketing + + The app "marketing" must exist before the command is executed. + """ + + use Mix.Task + + @switches [ + context: :boolean, + context_app: :string, + prefix: :string + ] + + @default_opts [context: true] + + alias Mix.Phoenix.Context + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.notifier must be invoked from within your *_web application root directory" + ) + end + + {context, notifier_module, messages} = build(args) + + inflections = Mix.Phoenix.inflect(notifier_module) + + binding = [ + context: context, + inflections: inflections, + notifier_messages: messages + ] + + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(context) + + if "--no-compile" not in args do + Mix.Task.run("compile") + end + + context + |> copy_new_files(binding, paths) + |> maybe_print_mailer_installation_instructions() + end + + @doc false + def build(args, help \\ __MODULE__) do + {opts, parsed, _} = parse_opts(args) + + [context_name, notifier_name | notifier_messages] = validate_args!(parsed, help) + + notifier_module = inspect(Module.concat(context_name, "#{notifier_name}Notifier")) + context = Context.new(notifier_module, opts) + + {context, notifier_module, notifier_messages} + end + + defp parse_opts(args) do + {opts, parsed, invalid} = OptionParser.parse(args, switches: @switches) + + merged_opts = + @default_opts + |> Keyword.merge(opts) + |> put_context_app(opts[:context_app]) + + {merged_opts, parsed, invalid} + end + + defp put_context_app(opts, nil), do: opts + + defp put_context_app(opts, string) do + Keyword.put(opts, :context_app, String.to_atom(string)) + end + + defp validate_args!([context, notifier | messages] = args, help) do + cond do + not Context.valid?(context) -> + help.raise_with_help( + "Expected the context, #{inspect(context)}, to be a valid module name" + ) + + not valid_notifier?(notifier) -> + help.raise_with_help( + "Expected the notifier, #{inspect(notifier)}, to be a valid module name" + ) + + context == Mix.Phoenix.base() -> + help.raise_with_help( + "Cannot generate context #{context} because it has the same name as the application" + ) + + notifier == Mix.Phoenix.base() -> + help.raise_with_help( + "Cannot generate notifier #{notifier} because it has the same name as the application" + ) + + Enum.any?(messages, &(!valid_message?(&1))) -> + help.raise_with_help( + "Cannot generate notifier #{inspect(notifier)} because one of the messages is invalid: #{Enum.map_join(messages, ", ", &inspect/1)}" + ) + + true -> + args + end + end + + defp validate_args!(_, help) do + help.raise_with_help("Invalid arguments") + end + + defp valid_notifier?(notifier) do + notifier =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + defp valid_message?(message_name) do + message_name =~ ~r/^[a-z]+(\_[a-z0-9]+)*$/ + end + + @doc false + @spec raise_with_help(String.t()) :: no_return() + def raise_with_help(msg) do + Mix.raise(""" + #{msg} + + mix phx.gen.notifier expects a context module name, followed by a + notifier name and one or more message names. Messages are the + functions that will be created prefixed by "deliver", so the message + name should be "snake_case" without punctuation. + For example: + + mix phx.gen.notifier Accounts User welcome reset_password + + In this example the notifier will be called `UserNotifier` inside + the Accounts context. The functions `deliver_welcome/1` and + `reset_password/1` will be created inside this notifier. + """) + end + + defp copy_new_files(%Context{} = context, binding, paths) do + files = files_to_be_generated(context) + + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.notifier", binding, files) + + context + end + + defp files_to_be_generated(%Context{} = context) do + [ + {:eex, "notifier.ex", context.file}, + {:eex, "notifier_test.exs", context.test_file} + ] + end + + defp prompt_for_conflicts(context) do + context + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + @spec maybe_print_mailer_installation_instructions(%Context{}) :: %Context{} + def maybe_print_mailer_installation_instructions(%Context{} = context) do + mailer_module = Module.concat([context.base_module, "Mailer"]) + + unless Code.ensure_loaded?(mailer_module) do + Mix.shell().info(""" + Unable to find the "#{inspect(mailer_module)}" module defined. + + A mailer module like the following is expected to be defined + in your application in order to send emails. + + defmodule #{inspect(mailer_module)} do + use Swoosh.Mailer, otp_app: #{inspect(context.context_app)} + end + + It is also necessary to add "swoosh" as a dependency in your + "mix.exs" file: + + def deps do + [{:swoosh, "~> 1.4"}] + end + + Finally, an adapter needs to be set in your configuration: + + import Config + config #{inspect(context.context_app)}, #{inspect(mailer_module)}, adapter: Swoosh.Adapters.Local + + Check https://hexdocs.pm/swoosh for more details. + """) + end + + context + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.presence.ex b/deps/phoenix/lib/mix/tasks/phx.gen.presence.ex new file mode 100644 index 0000000..ad39cce --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.presence.ex @@ -0,0 +1,61 @@ +defmodule Mix.Tasks.Phx.Gen.Presence do + @shortdoc "Generates a Presence tracker" + + @moduledoc """ + Generates a Presence tracker. + + $ mix phx.gen.presence + $ mix phx.gen.presence MyPresence + + The argument, which defaults to `Presence`, defines the module name of the + Presence tracker. + + Generates a new file, `lib/my_app_web/channels/my_presence.ex`, where + `my_presence` is the snake-cased version of the provided module name. + """ + use Mix.Task + + @doc false + def run([]) do + run(["Presence"]) + end + def run([alias_name]) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.presence must be invoked from within your *_web application root directory" + end + context_app = Mix.Phoenix.context_app() + otp_app = Mix.Phoenix.otp_app() + web_prefix = Mix.Phoenix.web_path(context_app) + inflections = Mix.Phoenix.inflect(alias_name) + inflections = Keyword.put(inflections, :module, "#{inflections[:web_module]}.#{inflections[:scoped]}") + + binding = inflections ++ [ + otp_app: otp_app, + pubsub_server: Module.concat(inflections[:base], "PubSub") + ] + + files = [ + {:eex, "presence.ex", Path.join(web_prefix, "channels/#{binding[:path]}.ex")}, + ] + + Mix.Phoenix.copy_from paths(), "priv/templates/phx.gen.presence", binding, files + + Mix.shell().info """ + + Add your new module to your supervision tree, + in lib/#{otp_app}/application.ex: + + children = [ + ... + #{binding[:module]} + ] + + You're all set! See the Phoenix.Presence docs for more details: + https://hexdocs.pm/phoenix/Phoenix.Presence.html + """ + end + + defp paths do + [".", :phoenix] + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.release.ex b/deps/phoenix/lib/mix/tasks/phx.gen.release.ex new file mode 100644 index 0000000..402de20 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.release.ex @@ -0,0 +1,207 @@ +defmodule Mix.Tasks.Phx.Gen.Release do + @shortdoc "Generates release files and optional Dockerfile for release-based deployments" + + @moduledoc """ + Generates release files and optional Dockerfile for release-based deployments. + + The following release files are created: + + * `lib/app_name/release.ex` - A release module containing tasks for running + migrations inside a release + + * `rel/overlays/bin/migrate` - A migrate script for conveniently invoking + the release system migrations + + * `rel/overlays/bin/server` - A server script for conveniently invoking + the release system with environment variables to start the phoenix web server + + Note, the `rel/overlays` directory is copied into the release build by default when + running `mix release`. + + To skip generating the migration-related files, use the `--no-ecto` flag. To + force these migration-related files to be generated, the use `--ecto` flag. + + ## Docker + + When the `--docker` flag is passed, the following docker files are generated: + + * `Dockerfile` - The Dockerfile for use in any standard docker deployment + + * `.dockerignore` - A docker ignore file with standard elixir defaults + + For extended release configuration, the `mix release.init`task can be used + in addition to this task. See the `Mix.Release` docs for more details. + """ + + use Mix.Task + + @doc false + def run(args) do + opts = parse_args(args) + + if Mix.Project.umbrella?() do + Mix.raise(""" + mix phx.gen.release is not supported in umbrella applications. + + Run this task in your web application instead. + """) + end + + app = Mix.Phoenix.otp_app() + app_namespace = Mix.Phoenix.base() + web_namespace = app_namespace |> Mix.Phoenix.web_module() |> inspect() + + binding = [ + app_namespace: app_namespace, + otp_app: app, + elixir_vsn: System.version(), + otp_vsn: otp_vsn(), + assets_dir_exists?: File.dir?("assets") + ] + + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.release", binding, [ + {:eex, "rel/server.sh.eex", "rel/overlays/bin/server"}, + {:eex, "rel/server.bat.eex", "rel/overlays/bin/server.bat"} + ]) + + if opts.ecto do + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.release", binding, [ + {:eex, "rel/migrate.sh.eex", "rel/overlays/bin/migrate"}, + {:eex, "rel/migrate.bat.eex", "rel/overlays/bin/migrate.bat"}, + {:eex, "release.ex", Mix.Phoenix.context_lib_path(app, "release.ex")} + ]) + end + + if opts.docker do + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.release", binding, [ + {:eex, "Dockerfile.eex", "Dockerfile"}, + {:eex, "dockerignore.eex", ".dockerignore"} + ]) + end + + File.chmod!("rel/overlays/bin/server", 0o755) + File.chmod!("rel/overlays/bin/server.bat", 0o755) + + if opts.ecto do + File.chmod!("rel/overlays/bin/migrate", 0o755) + File.chmod!("rel/overlays/bin/migrate.bat", 0o755) + end + + Mix.shell().info(""" + + Your application is ready to be deployed in a release! + + See https://hexdocs.pm/mix/Mix.Tasks.Release.html for more information about Elixir releases. + #{if opts.docker, do: docker_instructions()} + Here are some useful release commands you can run in any release environment: + + # To build a release + mix release + + # To start your system with the Phoenix server running + _build/dev/rel/#{app}/bin/server + #{if opts.ecto, do: ecto_instructions(app)} + Once the release is running you can connect to it remotely: + + _build/dev/rel/#{app}/bin/#{app} remote + + To list all commands: + + _build/dev/rel/#{app}/bin/#{app} + """) + + if opts.ecto do + post_install_instructions("config/runtime.exs", ~r/ECTO_IPV6/, """ + [warn] Conditional IPV6 support missing from runtime configuration. + + Add the following to your config/runtime.exs: + + maybe_ipv6 = if System.get_env("ECTO_IPV6"), do: [:inet6], else: [] + + config :#{app}, #{app_namespace}.Repo, + ..., + socket_options: maybe_ipv6 + """) + end + + post_install_instructions("config/runtime.exs", ~r/PHX_SERVER/, """ + [warn] Conditional server startup is missing from runtime configuration. + + Add the following to the top of your config/runtime.exs: + + if System.get_env("PHX_SERVER") do + config :#{app}, #{web_namespace}.Endpoint, server: true + end + """) + + post_install_instructions("config/runtime.exs", ~r/PHX_HOST/, """ + [warn] Environment based URL export is missing from runtime configuration. + + Add the following to your config/runtime.exs: + + host = System.get_env("PHX_HOST") || "example.com" + + config :#{app}, #{web_namespace}.Endpoint, + ..., + url: [host: host, port: 443] + """) + end + + defp parse_args(args) do + args + |> OptionParser.parse!(strict: [ecto: :boolean, docker: :boolean]) + |> elem(0) + |> Keyword.put_new_lazy(:ecto, &ecto_sql_installed?/0) + |> Keyword.put_new(:docker, false) + |> Map.new() + end + + defp ecto_instructions(app) do + """ + + # To run migrations + _build/dev/rel/#{app}/bin/migrate + """ + end + + defp docker_instructions do + """ + + Using the generated Dockerfile, your release will be bundled into + a Docker image, ready for deployment on platforms that support Docker. + + For more information about deploying with Docker see + https://hexdocs.pm/phoenix/releases.html#containers + """ + end + + defp paths do + [".", :phoenix] + end + + defp post_install_instructions(path, matching, msg) do + case File.read(path) do + {:ok, content} -> + unless content =~ matching, do: Mix.shell().info(msg) + + {:error, _} -> + Mix.shell().info(msg) + end + end + + def otp_vsn do + major = to_string(:erlang.system_info(:otp_release)) + path = Path.join([:code.root_dir(), "releases", major, "OTP_VERSION"]) + + case File.read(path) do + {:ok, content} -> + String.trim(content) + + {:error, _} -> + IO.warn("unable to read OTP minor version at #{path}. Falling back to #{major}.0") + "#{major}.0" + end + end + + defp ecto_sql_installed?, do: Mix.Project.deps_paths() |> Map.has_key?(:ecto_sql) +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.schema.ex b/deps/phoenix/lib/mix/tasks/phx.gen.schema.ex new file mode 100644 index 0000000..8ce05b5 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.schema.ex @@ -0,0 +1,219 @@ +defmodule Mix.Tasks.Phx.Gen.Schema do + @shortdoc "Generates an Ecto schema and migration file" + + @moduledoc """ + Generates an Ecto schema and migration. + + $ mix phx.gen.schema Blog.Post blog_posts title:string views:integer + + The first argument is the schema module followed by its plural + name (used as the table name). + + The generated schema above will contain: + + * a schema file in `lib/my_app/blog/post.ex`, with a `blog_posts` table + * a migration file for the repository + + The generated migration can be skipped with `--no-migration`. + + ## Contexts + + Your schemas can be generated and added to a separate OTP app. + Make sure your configuration is properly setup or manually + specify the context app with the `--context-app` option with + the CLI. + + Via config: + + config :marketing_web, :generators, context_app: :marketing + + Via CLI: + + $ mix phx.gen.schema Blog.Post blog_posts title:string views:integer --context-app marketing + + ## Attributes + + The resource fields are given using `name:type` syntax + where type are the types supported by Ecto. Omitting + the type makes it default to `:string`: + + $ mix phx.gen.schema Blog.Post blog_posts title views:integer + + The following types are supported: + + #{for attr <- Mix.Phoenix.Schema.valid_types(), do: " * `#{inspect attr}`\n"} + * `:datetime` - An alias for `:naive_datetime` + + The generator also supports references, which we will properly + associate the given column to the primary key column of the + referenced table: + + $ mix phx.gen.schema Blog.Post blog_posts title user_id:references:users + + This will result in a migration with an `:integer` column + of `:user_id` and create an index. + + Furthermore an array type can also be given if it is + supported by your database, although it requires the + type of the underlying array element to be given too: + + $ mix phx.gen.schema Blog.Post blog_posts tags:array:string + + Unique columns can be automatically generated by using: + + $ mix phx.gen.schema Blog.Post blog_posts title:unique unique_int:integer:unique + + Redact columns can be automatically generated by using: + + $ mix phx.gen.schema Accounts.Superhero superheroes secret_identity:redact password:string:redact + + Ecto.Enum fields can be generated by using: + + $ mix phx.gen.schema Blog.Post blog_posts title status:enum:unpublished:published:deleted + + If no data type is given, it defaults to a string. + + ## table + + By default, the table name for the migration and schema will be + the plural name provided for the resource. To customize this value, + a `--table` option may be provided. For example: + + $ mix phx.gen.schema Blog.Post posts --table cms_posts + + ## binary_id + + Generated migration can use `binary_id` for schema's primary key + and its references with option `--binary-id`. + + ## Default options + + This generator uses default options provided in the `:generators` + configuration of your application. These are the defaults: + + config :your_app, :generators, + migration: true, + binary_id: false, + sample_binary_id: "11111111-1111-1111-1111-111111111111" + + You can override those options per invocation by providing corresponding + switches, e.g. `--no-binary-id` to use normal ids despite the default + configuration or `--migration` to force generation of the migration. + """ + use Mix.Task + + alias Mix.Phoenix.Schema + + @switches [migration: :boolean, binary_id: :boolean, table: :string, + web: :string, context_app: :string, prefix: :string] + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise "mix phx.gen.schema must be invoked from within your *_web application root directory" + end + + schema = build(args, []) + paths = Mix.Phoenix.generator_paths() + + prompt_for_conflicts(schema) + + schema + |> copy_new_files(paths, schema: schema) + |> print_shell_instructions() + end + + defp prompt_for_conflicts(schema) do + schema + |> files_to_be_generated() + |> Mix.Phoenix.prompt_for_conflicts() + end + + @doc false + def build(args, parent_opts, help \\ __MODULE__) do + {schema_opts, parsed, _} = OptionParser.parse(args, switches: @switches) + [schema_name, plural | attrs] = validate_args!(parsed, help) + + opts = + parent_opts + |> Keyword.merge(schema_opts) + |> put_context_app(schema_opts[:context_app]) + + schema = Schema.new(schema_name, plural, attrs, opts) + + schema + end + + defp put_context_app(opts, nil), do: opts + defp put_context_app(opts, string) do + Keyword.put(opts, :context_app, String.to_atom(string)) + end + + @doc false + def files_to_be_generated(%Schema{} = schema) do + [{:eex, "schema.ex", schema.file}] + end + + @doc false + def copy_new_files(%Schema{context_app: ctx_app} = schema, paths, binding) do + files = files_to_be_generated(schema) + Mix.Phoenix.copy_from(paths, "priv/templates/phx.gen.schema", binding, files) + + if schema.migration? do + migration_path = Mix.Phoenix.context_app_path(ctx_app, "priv/repo/migrations/#{timestamp()}_create_#{schema.table}.exs") + Mix.Phoenix.copy_from paths, "priv/templates/phx.gen.schema", binding, [ + {:eex, "migration.exs", migration_path}, + ] + end + + schema + end + + @doc false + def print_shell_instructions(%Schema{} = schema) do + if schema.migration? do + Mix.shell().info """ + + Remember to update your repository by running migrations: + + $ mix ecto.migrate + """ + end + end + + @doc false + def validate_args!([schema, plural | _] = args, help) do + cond do + not Schema.valid?(schema) -> + help.raise_with_help "Expected the schema argument, #{inspect schema}, to be a valid module name" + String.contains?(plural, ":") or plural != Phoenix.Naming.underscore(plural) -> + help.raise_with_help "Expected the plural argument, #{inspect plural}, to be all lowercase using snake_case convention" + true -> + args + end + end + def validate_args!(_, help) do + help.raise_with_help "Invalid arguments" + end + + @doc false + @spec raise_with_help(String.t) :: no_return() + def raise_with_help(msg) do + Mix.raise """ + #{msg} + + mix phx.gen.schema expects both a module name and + the plural of the generated resource followed by + any number of attributes: + + mix phx.gen.schema Blog.Post blog_posts title:string + """ + end + + defp timestamp do + {{y, m, d}, {hh, mm, ss}} = :calendar.universal_time() + "#{y}#{pad(m)}#{pad(d)}#{pad(hh)}#{pad(mm)}#{pad(ss)}" + end + defp pad(i) when i < 10, do: << ?0, ?0 + i >> + defp pad(i), do: to_string(i) +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.secret.ex b/deps/phoenix/lib/mix/tasks/phx.gen.secret.ex new file mode 100644 index 0000000..91e39a2 --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.secret.ex @@ -0,0 +1,36 @@ +defmodule Mix.Tasks.Phx.Gen.Secret do + @shortdoc "Generates a secret" + + @moduledoc """ + Generates a secret and prints it to the terminal. + + $ mix phx.gen.secret [length] + + By default, mix phx.gen.secret generates a key 64 characters long. + + The minimum value for `length` is 32. + """ + use Mix.Task + + @doc false + def run([]), do: run(["64"]) + def run([int]), do: int |> parse!() |> random_string() |> Mix.shell().info() + def run([_|_]), do: invalid_args!() + + defp parse!(int) do + case Integer.parse(int) do + {int, ""} -> int + _ -> invalid_args!() + end + end + + defp random_string(length) when length > 31 do + :crypto.strong_rand_bytes(length) |> Base.encode64(padding: false) |> binary_part(0, length) + end + defp random_string(_), do: Mix.raise "The secret should be at least 32 characters long" + + @spec invalid_args!() :: no_return() + defp invalid_args! do + Mix.raise "mix phx.gen.secret expects a length as integer or no argument at all" + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.gen.socket.ex b/deps/phoenix/lib/mix/tasks/phx.gen.socket.ex new file mode 100644 index 0000000..6aaca6d --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.gen.socket.ex @@ -0,0 +1,116 @@ +defmodule Mix.Tasks.Phx.Gen.Socket do + @shortdoc "Generates a Phoenix socket handler" + + @moduledoc """ + Generates a Phoenix socket handler. + + $ mix phx.gen.socket User + + Accepts the module name for the socket + + The generated files will contain: + + For a regular application: + + * a client in `assets/js` + * a socket in `lib/my_app_web/channels` + + For an umbrella application: + + * a client in `apps/my_app_web/assets/js` + * a socket in `apps/my_app_web/lib/app_name_web/channels` + + You can then generated channels with `mix phx.gen.channel`. + """ + use Mix.Task + + @doc false + def run(args) do + if Mix.Project.umbrella?() do + Mix.raise( + "mix phx.gen.socket must be invoked from within your *_web application root directory" + ) + end + + [socket_name, pre_existing_channel] = validate_args!(args) + + context_app = Mix.Phoenix.context_app() + web_prefix = Mix.Phoenix.web_path(context_app) + binding = Mix.Phoenix.inflect(socket_name) + + existing_channel = + if pre_existing_channel do + channel_binding = Mix.Phoenix.inflect(pre_existing_channel) + + Keyword.put( + channel_binding, + :module, + "#{channel_binding[:web_module]}.#{channel_binding[:scoped]}" + ) + end + + binding = + binding + |> Keyword.put(:module, "#{binding[:web_module]}.#{binding[:scoped]}") + |> Keyword.put(:endpoint_module, Module.concat([binding[:web_module], Endpoint])) + |> Keyword.put(:web_prefix, web_prefix) + |> Keyword.put(:existing_channel, existing_channel) + + Mix.Phoenix.check_module_name_availability!(binding[:module] <> "Socket") + + Mix.Phoenix.copy_from(paths(), "priv/templates/phx.gen.socket", binding, [ + {:eex, "socket.ex", Path.join(web_prefix, "channels/#{binding[:path]}_socket.ex")}, + {:eex, "socket.js", "assets/js/#{binding[:path]}_socket.js"} + ]) + + Mix.shell().info(""" + + Add the socket handler to your `#{Mix.Phoenix.web_path(context_app, "endpoint.ex")}`, for example: + + socket "/socket", #{binding[:module]}Socket, + websocket: true, + longpoll: false + + For the front-end integration, you need to import the `#{binding[:path]}_socket.js` + in your `assets/js/app.js` file: + + import "./#{binding[:path]}_socket.js" + """) + end + + @spec raise_with_help() :: no_return() + defp raise_with_help do + Mix.raise(""" + mix phx.gen.socket expects the module name: + + mix phx.gen.socket User + + """) + end + + defp validate_args!([name, "--from-channel", pre_existing_channel]) do + unless valid_name?(name) and valid_name?(pre_existing_channel) do + raise_with_help() + end + + [name, pre_existing_channel] + end + + defp validate_args!([name]) do + unless valid_name?(name) do + raise_with_help() + end + + [name, nil] + end + + defp validate_args!(_), do: raise_with_help() + + defp valid_name?(name) do + name =~ ~r/^[A-Z]\w*(\.[A-Z]\w*)*$/ + end + + defp paths do + [".", :phoenix] + end +end diff --git a/deps/phoenix/lib/mix/tasks/phx.routes.ex b/deps/phoenix/lib/mix/tasks/phx.routes.ex new file mode 100644 index 0000000..f0ba35a --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.routes.ex @@ -0,0 +1,99 @@ +defmodule Mix.Tasks.Phx.Routes do + use Mix.Task + alias Phoenix.Router.ConsoleFormatter + + @shortdoc "Prints all routes" + + @moduledoc """ + Prints all routes for the default or a given router. + + $ mix phx.routes + $ mix phx.routes MyApp.AnotherRouter + + The default router is inflected from the application + name unless a configuration named `:namespace` + is set inside your application configuration. For example, + the configuration: + + config :my_app, + namespace: My.App + + will exhibit the routes for `My.App.Router` when this + task is invoked without arguments. + + Umbrella projects do not have a default router and + therefore always expect a router to be given. An + alias can be added to mix.exs to automate this: + + defp aliases do + [ + "phx.routes": "phx.routes MyAppWeb.Router", + # aliases... + ] + + """ + + @doc false + def run(args, base \\ Mix.Phoenix.base()) do + Mix.Task.run("compile", args) + Mix.Task.reenable("phx.routes") + + {router_mod, opts} = + case OptionParser.parse(args, switches: [endpoint: :string, router: :string]) do + {opts, [passed_router], _} -> {router(passed_router, base), opts} + {opts, [], _} -> {router(opts[:router], base), opts} + end + + router_mod + |> ConsoleFormatter.format(endpoint(opts[:endpoint], base)) + |> Mix.shell().info() + end + + defp endpoint(nil, base) do + loaded(web_mod(base, "Endpoint")) + end + defp endpoint(module, _base) do + loaded(Module.concat([module])) + end + + defp router(nil, base) do + if Mix.Project.umbrella?() do + Mix.raise """ + umbrella applications require an explicit router to be given to phx.routes, for example: + + $ mix phx.routes MyAppWeb.Router + + An alias can be added to mix.exs aliases to automate this: + + "phx.routes": "phx.routes MyAppWeb.Router" + + """ + end + web_router = web_mod(base, "Router") + old_router = app_mod(base, "Router") + + loaded(web_router) || loaded(old_router) || Mix.raise """ + no router found at #{inspect web_router} or #{inspect old_router}. + An explicit router module may be given to phx.routes, for example: + + $ mix phx.routes MyAppWeb.Router + + An alias can be added to mix.exs aliases to automate this: + + "phx.routes": "phx.routes MyAppWeb.Router" + + """ + end + defp router(router_name, _base) do + arg_router = Module.concat([router_name]) + loaded(arg_router) || Mix.raise "the provided router, #{inspect(arg_router)}, does not exist" + end + + defp loaded(module) do + if Code.ensure_loaded?(module), do: module + end + + defp app_mod(base, name), do: Module.concat([base, name]) + + defp web_mod(base, name), do: Module.concat(["#{base}Web", name]) +end diff --git a/deps/phoenix/lib/mix/tasks/phx.server.ex b/deps/phoenix/lib/mix/tasks/phx.server.ex new file mode 100644 index 0000000..048982a --- /dev/null +++ b/deps/phoenix/lib/mix/tasks/phx.server.ex @@ -0,0 +1,55 @@ +defmodule Mix.Tasks.Phx.Server do + use Mix.Task + + @shortdoc "Starts applications and their servers" + + @moduledoc """ + Starts the application by configuring all endpoints servers to run. + + Note: to start the endpoint without using this mix task you must set + `server: true` in your `Phoenix.Endpoint` configuration. + + ## Command line options + + * `--open` - open browser window for each started endpoint + + Furthermore, this task accepts the same command-line options as + `mix run`. + + For example, to run `phx.server` without recompiling: + + $ mix phx.server --no-compile + + The `--no-halt` flag is automatically added. + + Note that the `--no-deps-check` flag cannot be used this way, + because Mix needs to check dependencies to find `phx.server`. + + To run `phx.server` without checking dependencies, you can run: + + $ mix do deps.loadpaths --no-deps-check, phx.server + """ + + @impl true + def run(args) do + Application.put_env(:phoenix, :serve_endpoints, true, persistent: true) + Mix.Tasks.Run.run(open_args(args) ++ run_args()) + end + + defp iex_running? do + Code.ensure_loaded?(IEx) and IEx.started?() + end + + defp open_args(args) do + if "--open" in args do + Application.put_env(:phoenix, :browser_open, true) + args -- ["--open"] + else + args + end + end + + defp run_args do + if iex_running?(), do: [], else: ["--no-halt"] + end +end diff --git a/deps/phoenix/lib/phoenix.ex b/deps/phoenix/lib/phoenix.ex new file mode 100644 index 0000000..1913125 --- /dev/null +++ b/deps/phoenix/lib/phoenix.ex @@ -0,0 +1,136 @@ +defmodule Phoenix do + @moduledoc """ + This is the documentation for the Phoenix project. + + By default, Phoenix applications depend on the following packages + across these categories. + + ## General + + * [Ecto](https://hexdocs.pm/ecto) - a language integrated query and + database wrapper + + * [ExUnit](https://hexdocs.pm/ex_unit) - Elixir's built-in test framework + + * [Gettext](https://hexdocs.pm/gettext) - Internationalization and + localization through [`gettext`](https://www.gnu.org/software/gettext/) + + * [Phoenix](https://hexdocs.pm/phoenix) - the Phoenix web framework + (these docs) + + * [Phoenix PubSub](https://hexdocs.pm/phoenix_pubsub) - a distributed + pub/sub system with presence support + + * [Phoenix HTML](https://hexdocs.pm/phoenix_html) - conveniences for + working with HTML in Phoenix + + * [Phoenix View](https://hexdocs.pm/phoenix_view) - a set of functions + for building `Phoenix.View` and working with template languages such + as Elixir's own `EEx` + + * [Phoenix LiveView](https://hexdocs.pm/phoenix_live_view) - rich, + real-time user experiences with server-rendered HTML + + * [Phoenix LiveDashboard](https://hexdocs.pm/phoenix_live_dashboard) - + real-time performance monitoring and debugging tools for Phoenix + developers + + * [Plug](https://hexdocs.pm/plug) - a specification and conveniences + for composable modules in between web applications + + * [Swoosh](https://hexdocs.pm/swoosh) - a library for composing, + delivering and testing emails, also used by `mix phx.gen.auth` + + * [Telemetry Metrics](https://hexdocs.pm/telemetry_metrics) - common + interface for defining metrics based on Telemetry events + + To get started, see our [overview guides](overview.html). + """ + use Application + + @doc false + def start(_type, _args) do + # Warm up caches + _ = Phoenix.Template.engines() + _ = Phoenix.Template.format_encoder("index.html") + warn_on_missing_json_library() + + # Configure proper system flags from Phoenix only + if stacktrace_depth = Application.get_env(:phoenix, :stacktrace_depth) do + :erlang.system_flag(:backtrace_depth, stacktrace_depth) + end + + if Application.fetch_env!(:phoenix, :logger) do + Phoenix.Logger.install() + end + + children = [ + # Code reloading must be serial across all Phoenix apps + Phoenix.CodeReloader.Server, + {DynamicSupervisor, name: Phoenix.Transports.LongPoll.Supervisor, strategy: :one_for_one} + ] + + Supervisor.start_link(children, strategy: :one_for_one, name: Phoenix.Supervisor) + end + + # TODO v2: swap Poison default with Jason + # From there we can ditch explicit config for new projects + @doc """ + Returns the configured JSON encoding library for Phoenix. + + To customize the JSON library, including the following + in your `config/config.exs`: + + config :phoenix, :json_library, Jason + + """ + def json_library do + Application.get_env(:phoenix, :json_library, Poison) + end + + @doc """ + Returns the `:plug_init_mode` that controls when plugs are + initialized. + + We recommend to set it to `:runtime` in development for + compilation time improvements. It must be `:compile` in + production (the default). + + This option is passed as the `:init_mode` to `Plug.Builder.compile/3`. + """ + def plug_init_mode do + Application.get_env(:phoenix, :plug_init_mode, :compile) + end + + defp warn_on_missing_json_library do + configured_lib = Application.get_env(:phoenix, :json_library) + + cond do + configured_lib && Code.ensure_loaded?(configured_lib) -> + true + + configured_lib && not Code.ensure_loaded?(configured_lib) -> + IO.warn """ + found #{inspect(configured_lib)} in your application configuration + for Phoenix JSON encoding, but module #{inspect(configured_lib)} is not available. + Ensure #{inspect(configured_lib)} is listed as a dependency in mix.exs. + """ + + true -> + IO.warn """ + Phoenix now requires you to explicitly list which engine to use + for Phoenix JSON encoding. We recommend everyone to upgrade to + Jason by setting in your config/config.exs: + + config :phoenix, :json_library, Jason + + And then adding {:jason, "~> 1.0"} as a dependency. + + If instead you would rather continue using Poison, then add to + your config/config.exs: + + config :phoenix, :json_library, Poison + """ + end + end +end diff --git a/deps/phoenix/lib/phoenix/channel.ex b/deps/phoenix/lib/phoenix/channel.ex new file mode 100644 index 0000000..ea60fcc --- /dev/null +++ b/deps/phoenix/lib/phoenix/channel.ex @@ -0,0 +1,661 @@ +defmodule Phoenix.Channel do + @moduledoc ~S""" + Defines a Phoenix Channel. + + Channels provide a means for bidirectional communication from clients that + integrate with the `Phoenix.PubSub` layer for soft-realtime functionality. + + ## Topics & Callbacks + + Every time you join a channel, you need to choose which particular topic you + want to listen to. The topic is just an identifier, but by convention it is + often made of two parts: `"topic:subtopic"`. Using the `"topic:subtopic"` + approach pairs nicely with the `Phoenix.Socket.channel/3` allowing you to + match on all topics starting with a given prefix by using a splat (the `*` + character) as the last character in the topic pattern: + + channel "room:*", MyAppWeb.RoomChannel + + Any topic coming into the router with the `"room:"` prefix would dispatch + to `MyAppWeb.RoomChannel` in the above example. Topics can also be pattern + matched in your channels' `join/3` callback to pluck out the scoped pattern: + + # handles the special `"lobby"` subtopic + def join("room:lobby", _payload, socket) do + {:ok, socket} + end + + # handles any other subtopic as the room ID, for example `"room:12"`, `"room:34"` + def join("room:" <> room_id, _payload, socket) do + {:ok, socket} + end + + ## Authorization + + Clients must join a channel to send and receive PubSub events on that channel. + Your channels must implement a `join/3` callback that authorizes the socket + for the given topic. For example, you could check if the user is allowed to + join that particular room. + + To authorize a socket in `join/3`, return `{:ok, socket}`. + To refuse authorization in `join/3`, return `{:error, reply}`. + + ## Incoming Events + + After a client has successfully joined a channel, incoming events from the + client are routed through the channel's `handle_in/3` callbacks. Within these + callbacks, you can perform any action. Typically you'll either forward a + message to all listeners with `broadcast!/3`, or push a message directly down + the socket with `push/3`. Incoming callbacks must return the `socket` to + maintain ephemeral state. + + Here's an example of receiving an incoming `"new_msg"` event from one client, + and broadcasting the message to all topic subscribers for this socket. + + def handle_in("new_msg", %{"uid" => uid, "body" => body}, socket) do + broadcast!(socket, "new_msg", %{uid: uid, body: body}) + {:noreply, socket} + end + + General message payloads are received as maps, and binary data payloads are + passed as a `{:binary, data}` tuple: + + def handle_in("file_chunk", {:binary, chunk}, socket) do + ... + {:reply, :ok, socket} + end + + You can also push a message directly down the socket, in the form of a map, + or a tagged `{:binary, data}` tuple: + + # client asks for their current rank, push sent directly as a new event. + def handle_in("current_rank", _, socket) do + push(socket, "current_rank", %{val: Game.get_rank(socket.assigns[:user])}) + push(socket, "photo", {:binary, File.read!(socket.assigns.photo_path)}) + {:noreply, socket} + end + + ## Replies + + In addition to pushing messages out when you receive a `handle_in` event, + you can also reply directly to a client event for request/response style + messaging. This is useful when a client must know the result of an operation + or to simply ack messages. + + For example, imagine creating a resource and replying with the created record: + + def handle_in("create:post", attrs, socket) do + changeset = Post.changeset(%Post{}, attrs) + + if changeset.valid? do + post = Repo.insert!(changeset) + response = MyAppWeb.PostView.render("show.json", %{post: post}) + {:reply, {:ok, response}, socket} + else + response = MyAppWeb.ChangesetView.render("errors.json", %{changeset: changeset}) + {:reply, {:error, response}, socket} + end + end + + Alternatively, you may just want to ack the status of the operation: + + def handle_in("create:post", attrs, socket) do + changeset = Post.changeset(%Post{}, attrs) + + if changeset.valid? do + Repo.insert!(changeset) + {:reply, :ok, socket} + else + {:reply, :error, socket} + end + end + + Like binary pushes, binary data is also supported with replies via a `{:binary, data}` tuple: + + {:reply, {:ok, {:binary, bin}}, socket} + + ## Intercepting Outgoing Events + + When an event is broadcasted with `broadcast/3`, each channel subscriber can + choose to intercept the event and have their `handle_out/3` callback triggered. + This allows the event's payload to be customized on a socket by socket basis + to append extra information, or conditionally filter the message from being + delivered. If the event is not intercepted with `Phoenix.Channel.intercept/1`, + then the message is pushed directly to the client: + + intercept ["new_msg", "user_joined"] + + # for every socket subscribing to this topic, append an `is_editable` + # value for client metadata. + def handle_out("new_msg", msg, socket) do + push(socket, "new_msg", Map.merge(msg, + %{is_editable: User.can_edit_message?(socket.assigns[:user], msg)} + )) + {:noreply, socket} + end + + # do not send broadcasted `"user_joined"` events if this socket's user + # is ignoring the user who joined. + def handle_out("user_joined", msg, socket) do + unless User.ignoring?(socket.assigns[:user], msg.user_id) do + push(socket, "user_joined", msg) + end + {:noreply, socket} + end + + ## Broadcasting to an external topic + + In some cases, you will want to broadcast messages without the context of + a `socket`. This could be for broadcasting from within your channel to an + external topic, or broadcasting from elsewhere in your application like a + controller or another process. Such can be done via your endpoint: + + # within channel + def handle_in("new_msg", %{"uid" => uid, "body" => body}, socket) do + ... + broadcast_from!(socket, "new_msg", %{uid: uid, body: body}) + MyAppWeb.Endpoint.broadcast_from!(self(), "room:superadmin", + "new_msg", %{uid: uid, body: body}) + {:noreply, socket} + end + + # within controller + def create(conn, params) do + ... + MyAppWeb.Endpoint.broadcast!("room:" <> rid, "new_msg", %{uid: uid, body: body}) + MyAppWeb.Endpoint.broadcast!("room:superadmin", "new_msg", %{uid: uid, body: body}) + redirect(conn, to: "/") + end + + ## Terminate + + On termination, the channel callback `terminate/2` will be invoked with + the error reason and the socket. + + If we are terminating because the client left, the reason will be + `{:shutdown, :left}`. Similarly, if we are terminating because the + client connection was closed, the reason will be `{:shutdown, :closed}`. + + If any of the callbacks return a `:stop` tuple, it will also + trigger terminate with the reason given in the tuple. + + `terminate/2`, however, won't be invoked in case of errors nor in + case of exits. This is the same behaviour as you find in Elixir + abstractions like `GenServer` and others. Similar to `GenServer`, + it would also be possible `:trap_exit` to guarantee that `terminate/2` + is invoked. This practice is not encouraged though. + + Typically speaking, if you want to clean something up, it is better to + monitor your channel process and do the clean up from another process. + All channel callbacks including `join/3` are called from within the + channel process. Therefore, `self()` in any of them returns the PID to + be monitored. + + ## Exit reasons when stopping a channel + + When the channel callbacks return a `:stop` tuple, such as: + + {:stop, :shutdown, socket} + {:stop, {:error, :enoent}, socket} + + the second argument is the exit reason, which follows the same behaviour as + standard `GenServer` exits. + + You have three options to choose from when shutting down a channel: + + * `:normal` - in such cases, the exit won't be logged and linked processes + do not exit + + * `:shutdown` or `{:shutdown, term}` - in such cases, the exit won't be + logged and linked processes exit with the same reason unless they're + trapping exits + + * any other term - in such cases, the exit will be logged and linked + processes exit with the same reason unless they're trapping exits + + ## Subscribing to external topics + + Sometimes you may need to programmatically subscribe a socket to external + topics in addition to the internal `socket.topic`. For example, + imagine you have a bidding system where a remote client dynamically sets + preferences on products they want to receive bidding notifications on. + Instead of requiring a unique channel process and topic per + preference, a more efficient and simple approach would be to subscribe a + single channel to relevant notifications via your endpoint. For example: + + defmodule MyAppWeb.Endpoint.NotificationChannel do + use Phoenix.Channel + + def join("notification:" <> user_id, %{"ids" => ids}, socket) do + topics = for product_id <- ids, do: "product:#{product_id}" + + {:ok, socket + |> assign(:topics, []) + |> put_new_topics(topics)} + end + + def handle_in("watch", %{"product_id" => id}, socket) do + {:reply, :ok, put_new_topics(socket, ["product:#{id}"])} + end + + def handle_in("unwatch", %{"product_id" => id}, socket) do + {:reply, :ok, MyAppWeb.Endpoint.unsubscribe("product:#{id}")} + end + + defp put_new_topics(socket, topics) do + Enum.reduce(topics, socket, fn topic, acc -> + topics = acc.assigns.topics + if topic in topics do + acc + else + :ok = MyAppWeb.Endpoint.subscribe(topic) + assign(acc, :topics, [topic | topics]) + end + end) + end + end + + Note: the caller must be responsible for preventing duplicate subscriptions. + After calling `subscribe/1` from your endpoint, the same flow applies to + handling regular Elixir messages within your channel. Most often, you'll + simply relay the `%Phoenix.Socket.Broadcast{}` event and payload: + + alias Phoenix.Socket.Broadcast + def handle_info(%Broadcast{topic: _, event: event, payload: payload}, socket) do + push(socket, event, payload) + {:noreply, socket} + end + + ## Hibernation + + From Erlang/OTP 20, channels automatically hibernate to save memory + after 15_000 milliseconds of inactivity. This can be customized by + passing the `:hibernate_after` option to `use Phoenix.Channel`: + + use Phoenix.Channel, hibernate_after: 60_000 + + You can also set it to `:infinity` to fully disable it. + + ## Shutdown + + You can configure the shutdown of each channel used when your application + is shutting down by setting the `:shutdown` value on use: + + use Phoenix.Channel, shutdown: 5_000 + + It defaults to 5_000. + + ## Logging + + By default, channel `"join"` and `"handle_in"` events are logged, using + the level `:info` and `:debug`, respectively. Logs can be customized per + event type or disabled by setting the `:log_join` and `:log_handle_in` + options when using `Phoenix.Channel`. For example, the following + configuration logs join events as `:info`, but disables logging for + incoming events: + + use Phoenix.Channel, log_join: :info, log_handle_in: false + + """ + alias Phoenix.Socket + alias Phoenix.Channel.Server + + @type payload :: map | {:binary, binary} + @type reply :: status :: atom | {status :: atom, response :: payload} + @type socket_ref :: + {transport_pid :: Pid, serializer :: module, topic :: binary, ref :: binary, + join_ref :: binary} + + @doc """ + Handle channel joins by `topic`. + + To authorize a socket, return `{:ok, socket}` or `{:ok, reply, socket}`. To + refuse authorization, return `{:error, reason}`. + + ## Example + + def join("room:lobby", payload, socket) do + if authorized?(payload) do + {:ok, socket} + else + {:error, %{reason: "unauthorized"}} + end + end + + """ + @callback join(topic :: binary, payload :: payload, socket :: Socket.t()) :: + {:ok, Socket.t()} + | {:ok, reply :: payload, Socket.t()} + | {:error, reason :: map} + + @doc """ + Handle incoming `event`s. + + ## Example + + def handle_in("ping", payload, socket) do + {:reply, {:ok, payload}, socket} + end + + """ + @callback handle_in(event :: String.t(), payload :: payload, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:noreply, Socket.t(), timeout | :hibernate} + | {:reply, reply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + | {:stop, reason :: term, reply, Socket.t()} + + @doc """ + Intercepts outgoing `event`s. + + See `intercept/1`. + """ + @callback handle_out(event :: String.t(), payload :: payload, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:noreply, Socket.t(), timeout | :hibernate} + | {:stop, reason :: term, Socket.t()} + + @doc """ + Handle regular Elixir process messages. + + See `c:GenServer.handle_info/2`. + """ + @callback handle_info(msg :: term, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + + @doc """ + Handle regular GenServer call messages. + + See `c:GenServer.handle_call/3`. + """ + @callback handle_call(msg :: term, from :: {pid, tag :: term}, socket :: Socket.t()) :: + {:reply, response :: term, Socket.t()} + | {:noreply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + + @doc """ + Handle regular GenServer cast messages. + + See `c:GenServer.handle_cast/2`. + """ + @callback handle_cast(msg :: term, socket :: Socket.t()) :: + {:noreply, Socket.t()} + | {:stop, reason :: term, Socket.t()} + + @doc false + @callback code_change(old_vsn, Socket.t(), extra :: term) :: + {:ok, Socket.t()} + | {:error, reason :: term} + when old_vsn: term | {:down, term} + + @doc """ + Invoked when the channel process is about to exit. + + See `c:GenServer.terminate/2`. + """ + @callback terminate( + reason :: :normal | :shutdown | {:shutdown, :left | :closed | term}, + Socket.t() + ) :: + term + + @optional_callbacks handle_in: 3, + handle_out: 3, + handle_info: 2, + handle_call: 3, + handle_cast: 2, + code_change: 3, + terminate: 2 + + defmacro __using__(opts \\ []) do + quote do + opts = unquote(opts) + @behaviour unquote(__MODULE__) + @on_definition unquote(__MODULE__) + @before_compile unquote(__MODULE__) + @phoenix_intercepts [] + @phoenix_log_join Keyword.get(opts, :log_join, :info) + @phoenix_log_handle_in Keyword.get(opts, :log_handle_in, :debug) + @phoenix_hibernate_after Keyword.get(opts, :hibernate_after, 15_000) + @phoenix_shutdown Keyword.get(opts, :shutdown, 5000) + + import unquote(__MODULE__) + import Phoenix.Socket, only: [assign: 3, assign: 2] + + def child_spec(init_arg) do + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [init_arg]}, + shutdown: @phoenix_shutdown, + restart: :temporary + } + end + + def start_link(triplet) do + GenServer.start_link(Phoenix.Channel.Server, triplet, + hibernate_after: @phoenix_hibernate_after + ) + end + + def __socket__(:private) do + %{log_join: @phoenix_log_join, log_handle_in: @phoenix_log_handle_in} + end + end + end + + defmacro __before_compile__(_) do + quote do + def __intercepts__, do: @phoenix_intercepts + end + end + + @doc """ + Defines which Channel events to intercept for `handle_out/3` callbacks. + + By default, broadcasted events are pushed directly to the client, but + intercepting events gives your channel a chance to customize the event + for the client to append extra information or filter the message from being + delivered. + + *Note*: intercepting events can introduce significantly more overhead if a + large number of subscribers must customize a message since the broadcast will + be encoded N times instead of a single shared encoding across all subscribers. + + ## Examples + + intercept ["new_msg"] + + def handle_out("new_msg", payload, socket) do + push(socket, "new_msg", Map.merge(payload, + is_editable: User.can_edit_message?(socket.assigns[:user], payload) + )) + {:noreply, socket} + end + + `handle_out/3` callbacks must return one of: + + {:noreply, Socket.t} | + {:noreply, Socket.t, timeout | :hibernate} | + {:stop, reason :: term, Socket.t} + + """ + defmacro intercept(events) do + quote do + @phoenix_intercepts unquote(events) + end + end + + @doc false + def __on_definition__(env, :def, :handle_out, [event, _payload, _socket], _, _) + when is_binary(event) do + unless event in Module.get_attribute(env.module, :phoenix_intercepts) do + IO.write( + "#{Path.relative_to(env.file, File.cwd!())}:#{env.line}: [warning] " <> + "An intercept for event \"#{event}\" has not yet been defined in #{env.module}.handle_out/3. " <> + "Add \"#{event}\" to your list of intercepted events with intercept/1" + ) + end + end + + def __on_definition__(_env, _kind, _name, _args, _guards, _body) do + :ok + end + + @doc """ + Broadcast an event to all subscribers of the socket topic. + + The event's message must be a serializable map or a tagged `{:binary, data}` + tuple where `data` is binary data. + + ## Examples + + iex> broadcast(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + iex> broadcast(socket, "new_message", {:binary, "hello"}) + :ok + + """ + def broadcast(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic} = assert_joined!(socket) + Server.broadcast(pubsub_server, topic, event, message) + end + + @doc """ + Same as `broadcast/3`, but raises if broadcast fails. + """ + def broadcast!(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic} = assert_joined!(socket) + Server.broadcast!(pubsub_server, topic, event, message) + end + + @doc """ + Broadcast event from pid to all subscribers of the socket topic. + + The channel that owns the socket will not receive the published + message. The event's message must be a serializable map or a tagged + `{:binary, data}` tuple where `data` is binary data. + + ## Examples + + iex> broadcast_from(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + iex> broadcast_from(socket, "new_message", {:binary, "hello"}) + :ok + + """ + def broadcast_from(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, channel_pid: channel_pid} = + assert_joined!(socket) + + Server.broadcast_from(pubsub_server, channel_pid, topic, event, message) + end + + @doc """ + Same as `broadcast_from/3`, but raises if broadcast fails. + """ + def broadcast_from!(socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, channel_pid: channel_pid} = + assert_joined!(socket) + + Server.broadcast_from!(pubsub_server, channel_pid, topic, event, message) + end + + @doc """ + Sends event to the socket. + + The event's message must be a serializable map or a tagged `{:binary, data}` + tuple where `data` is binary data. + + ## Examples + + iex> push(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + iex> push(socket, "new_message", {:binary, "hello"}) + :ok + + """ + def push(socket, event, message) do + %{transport_pid: transport_pid, topic: topic} = assert_joined!(socket) + Server.push(transport_pid, socket.join_ref, topic, event, message, socket.serializer) + end + + @doc """ + Replies asynchronously to a socket push. + + Useful when you need to reply to a push that can't otherwise be handled using + the `{:reply, {status, payload}, socket}` return from your `handle_in` + callbacks. `reply/2` will be used in the rare cases you need to perform work in + another process and reply when finished by generating a reference to the push + with `socket_ref/1`. + + *Note*: In such cases, a `socket_ref` should be generated and + passed to the external process, so the `socket` itself is not leaked outside + the channel. The `socket` holds information such as assigns and transport + configuration, so it's important to not copy this information outside of the + channel that owns it. + + ## Examples + + def handle_in("work", payload, socket) do + Worker.perform(payload, socket_ref(socket)) + {:noreply, socket} + end + + def handle_info({:work_complete, result, ref}, socket) do + reply(ref, {:ok, result}) + {:noreply, socket} + end + + """ + @spec reply(socket_ref, reply) :: :ok + def reply(socket_ref, status) when is_atom(status) do + reply(socket_ref, {status, %{}}) + end + + def reply({transport_pid, serializer, topic, ref, join_ref}, {status, payload}) do + Server.reply(transport_pid, join_ref, ref, topic, {status, payload}, serializer) + end + + @doc """ + Generates a `socket_ref` for an async reply. + + See `reply/2` for example usage. + """ + @spec socket_ref(Socket.t()) :: socket_ref + def socket_ref(%Socket{joined: true, ref: ref} = socket) when not is_nil(ref) do + {socket.transport_pid, socket.serializer, socket.topic, ref, socket.join_ref} + end + + def socket_ref(_socket) do + raise ArgumentError, """ + socket refs can only be generated for a socket that has joined with a push ref + """ + end + + defp assert_joined!(%Socket{joined: true} = socket) do + socket + end + + defp assert_joined!(%Socket{joined: false}) do + raise """ + push/3, reply/2, and broadcast/3 can only be called after the socket has finished joining. + To push a message on join, send to self and handle in handle_info/2. For example: + + def join(topic, auth_msg, socket) do + ... + send(self, :after_join) + {:ok, socket} + end + + def handle_info(:after_join, socket) do + push(socket, "feed", %{list: feed_items(socket)}) + {:noreply, socket} + end + + """ + end +end diff --git a/deps/phoenix/lib/phoenix/channel/server.ex b/deps/phoenix/lib/phoenix/channel/server.ex new file mode 100644 index 0000000..5feabd8 --- /dev/null +++ b/deps/phoenix/lib/phoenix/channel/server.ex @@ -0,0 +1,546 @@ +defmodule Phoenix.Channel.Server do + @moduledoc false + use GenServer, restart: :temporary + + require Logger + require Phoenix.Endpoint + + alias Phoenix.PubSub + alias Phoenix.Socket + alias Phoenix.Socket.{Broadcast, Message, Reply, PoolSupervisor} + + ## Socket API + + @doc """ + Joins the channel in socket with authentication payload. + """ + @spec join(Socket.t(), module, Message.t(), keyword) :: {:ok, term, pid} | {:error, term} + def join(socket, channel, message, opts) do + %{topic: topic, payload: payload, ref: ref, join_ref: join_ref} = message + assigns = Map.merge(socket.assigns, Keyword.get(opts, :assigns, %{})) + socket = %{socket | topic: topic, channel: channel, join_ref: join_ref || ref, assigns: assigns} + ref = make_ref() + from = {self(), ref} + child_spec = channel.child_spec({socket.endpoint, from}) + + case PoolSupervisor.start_child(socket.endpoint, socket.handler, from, child_spec) do + {:ok, pid} -> + send(pid, {Phoenix.Channel, payload, from, socket}) + mon_ref = Process.monitor(pid) + + receive do + {^ref, {:ok, reply}} -> + Process.demonitor(mon_ref, [:flush]) + {:ok, reply, pid} + + {^ref, {:error, reply}} -> + Process.demonitor(mon_ref, [:flush]) + {:error, reply} + + {:DOWN, ^mon_ref, _, _, reason} -> + Logger.error(fn -> Exception.format_exit(reason) end) + {:error, %{reason: "join crashed"}} + end + + {:error, reason} -> + Logger.error(fn -> Exception.format_exit(reason) end) + {:error, %{reason: "join crashed"}} + end + end + + @doc """ + Gets the socket from the channel. + + Used by channel tests. + """ + @spec socket(pid) :: Socket.t() + def socket(pid) do + GenServer.call(pid, :socket) + end + + @doc """ + Emulates the socket being closed. + + Used by channel tests. + """ + @spec close(pid, timeout) :: :ok + def close(pid, timeout) do + GenServer.cast(pid, :close) + ref = Process.monitor(pid) + + receive do + {:DOWN, ^ref, _, _, _} -> :ok + after + timeout -> + Process.exit(pid, :kill) + receive do: ({:DOWN, ^ref, _, _, _} -> :ok) + end + end + + ## Channel API + + @doc """ + Hook invoked by Phoenix.PubSub dispatch. + """ + def dispatch(subscribers, from, %Broadcast{event: event} = msg) do + Enum.reduce(subscribers, %{}, fn + {pid, _}, cache when pid == from -> + cache + + {pid, {:fastlane, fastlane_pid, serializer, event_intercepts}}, cache -> + if event in event_intercepts do + send(pid, msg) + cache + else + case cache do + %{^serializer => encoded_msg} -> + send(fastlane_pid, encoded_msg) + cache + + %{} -> + encoded_msg = serializer.fastlane!(msg) + send(fastlane_pid, encoded_msg) + Map.put(cache, serializer, encoded_msg) + end + end + + {pid, _}, cache -> + send(pid, msg) + cache + end) + + :ok + end + + def dispatch(entries, :none, message) do + for {pid, _} <- entries do + send(pid, message) + end + + :ok + end + + def dispatch(entries, from, message) do + for {pid, _} <- entries, pid != from do + send(pid, message) + end + + :ok + end + + @doc """ + Broadcasts on the given pubsub server with the given + `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def broadcast(pubsub_server, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast(pubsub_server, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `topic`, `event` and `payload`. + + Raises in case of crashes. + """ + def broadcast!(pubsub_server, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast!(pubsub_server, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `from`, `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def broadcast_from(pubsub_server, from, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast_from(pubsub_server, from, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `from`, `topic`, `event` and `payload`. + + Raises in case of crashes. + """ + def broadcast_from!(pubsub_server, from, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.broadcast_from!(pubsub_server, from, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def local_broadcast(pubsub_server, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.local_broadcast(pubsub_server, topic, broadcast, __MODULE__) + end + + @doc """ + Broadcasts on the given pubsub server with the given + `from`, `topic`, `event` and `payload`. + + The message is encoded as `Phoenix.Socket.Broadcast`. + """ + def local_broadcast_from(pubsub_server, from, topic, event, payload) + when is_binary(topic) and is_binary(event) do + broadcast = %Broadcast{ + topic: topic, + event: event, + payload: payload + } + + PubSub.local_broadcast_from(pubsub_server, from, topic, broadcast, __MODULE__) + end + + @doc """ + Pushes a message with the given topic, event and payload + to the given process. + """ + def push(pid, join_ref, topic, event, payload, serializer) + when is_binary(topic) and is_binary(event) do + message = %Message{join_ref: join_ref, topic: topic, event: event, payload: payload} + send(pid, serializer.encode!(message)) + :ok + end + + @doc """ + Replies to a given ref to the transport process. + """ + def reply(pid, join_ref, ref, topic, {status, payload}, serializer) + when is_binary(topic) do + reply = %Reply{topic: topic, join_ref: join_ref, ref: ref, status: status, payload: payload} + send(pid, serializer.encode!(reply)) + :ok + end + + ## Callbacks + + @doc false + def init({_endpoint, {pid, _}}) do + {:ok, Process.monitor(pid)} + end + + @doc false + def handle_call(:socket, _from, socket) do + {:reply, socket, socket} + end + + @doc false + def handle_call(msg, from, socket) do + msg + |> socket.channel.handle_call(from, socket) + |> handle_result(:handle_call) + end + + @doc false + def handle_cast(:close, socket) do + {:stop, {:shutdown, :closed}, socket} + end + + @doc false + def handle_cast(msg, socket) do + msg + |> socket.channel.handle_cast(socket) + |> handle_result(:handle_cast) + end + + @doc false + def handle_info({Phoenix.Channel, auth_payload, {pid, _} = from, socket}, ref) do + Process.demonitor(ref) + %{channel: channel, topic: topic, private: private} = socket + Process.put(:"$callers", [pid]) + + socket = %{ + socket + | channel_pid: self(), + private: Map.merge(channel.__socket__(:private), private) + } + + start = System.monotonic_time() + {reply, state} = channel_join(channel, topic, auth_payload, socket) + duration = System.monotonic_time() - start + metadata = %{params: auth_payload, socket: socket, result: elem(reply, 0)} + :telemetry.execute([:phoenix, :channel_joined], %{duration: duration}, metadata) + GenServer.reply(from, reply) + state + end + + def handle_info(%Message{topic: topic, event: "phx_leave", ref: ref}, %{topic: topic} = socket) do + handle_in({:stop, {:shutdown, :left}, :ok, put_in(socket.ref, ref)}) + end + + def handle_info( + %Message{topic: topic, event: event, payload: payload, ref: ref}, + %{topic: topic} = socket + ) do + start = System.monotonic_time() + result = socket.channel.handle_in(event, payload, put_in(socket.ref, ref)) + duration = System.monotonic_time() - start + metadata = %{ref: ref, event: event, params: payload, socket: socket} + :telemetry.execute([:phoenix, :channel_handled_in], %{duration: duration}, metadata) + handle_in(result) + end + + def handle_info( + %Broadcast{topic: topic, event: event, payload: payload}, + %Socket{topic: topic} = socket + ) do + event + |> socket.channel.handle_out(payload, socket) + |> handle_result(:handle_out) + end + + def handle_info({:DOWN, ref, _, _, reason}, ref) do + {:stop, reason, ref} + end + + def handle_info({:DOWN, _, _, transport_pid, reason}, %{transport_pid: transport_pid} = socket) do + reason = if reason == :normal, do: {:shutdown, :closed}, else: reason + {:stop, reason, socket} + end + + def handle_info(msg, %{channel: channel} = socket) do + if function_exported?(channel, :handle_info, 2) do + msg + |> socket.channel.handle_info(socket) + |> handle_result(:handle_info) + else + warn_unexpected_msg(:handle_info, 2, msg) + {:noreply, socket} + end + end + + @doc false + def code_change(old, %{channel: channel} = socket, extra) do + if function_exported?(channel, :code_change, 3) do + channel.code_change(old, socket, extra) + else + {:ok, socket} + end + end + + @doc false + def terminate(reason, %{channel: channel} = socket) do + if function_exported?(channel, :terminate, 2) do + channel.terminate(reason, socket) + else + :ok + end + end + + def terminate(_reason, _socket) do + :ok + end + + ## Joins + + defp channel_join(channel, topic, auth_payload, socket) do + case channel.join(topic, auth_payload, socket) do + {:ok, socket} -> + {{:ok, %{}}, init_join(socket, channel, topic)} + + {:ok, reply, socket} -> + {{:ok, reply}, init_join(socket, channel, topic)} + + {:error, reply} -> + {{:error, reply}, {:stop, :shutdown, socket}} + + other -> + raise """ + channel #{inspect(socket.channel)}.join/3 is expected to return one of: + + {:ok, Socket.t} | + {:ok, reply :: map, Socket.t} | + {:error, reply :: map} + + got #{inspect(other)} + """ + end + end + + defp init_join(socket, channel, topic) do + %{transport_pid: transport_pid, serializer: serializer, pubsub_server: pubsub_server} = socket + + unless pubsub_server do + raise """ + The :pubsub_server was not configured for endpoint #{inspect(socket.endpoint)}. + Make sure to start a PubSub process in your application supervision tree: + + {Phoenix.PubSub, [name: YOURAPP.PubSub, adapter: Phoenix.PubSub.PG2]} + + And then add it to your endpoint config: + + config :YOURAPP, YOURAPPWeb.Endpoint, + # ... + pubsub_server: YOURAPP.PubSub + """ + end + + Process.monitor(transport_pid) + fastlane = {:fastlane, transport_pid, serializer, channel.__intercepts__()} + PubSub.subscribe(pubsub_server, topic, metadata: fastlane) + + {:noreply, %{socket | joined: true}} + end + + ## Handle results + + defp handle_result({:stop, reason, socket}, _callback) do + case reason do + :normal -> send_socket_close(socket, reason) + :shutdown -> send_socket_close(socket, reason) + {:shutdown, _} -> send_socket_close(socket, reason) + _ -> :noop + end + + {:stop, reason, socket} + end + + defp handle_result({:reply, resp, socket}, :handle_call) do + {:reply, resp, socket} + end + + defp handle_result({:noreply, socket}, callback) + when callback in [:handle_call, :handle_cast] do + {:noreply, socket} + end + + defp handle_result({:noreply, socket}, _callback) do + {:noreply, put_in(socket.ref, nil)} + end + + defp handle_result({:noreply, socket, timeout_or_hibernate}, _callback) do + {:noreply, put_in(socket.ref, nil), timeout_or_hibernate} + end + + defp handle_result(result, :handle_in) do + raise """ + Expected handle_in/3 to return one of: + + {:noreply, Socket.t} | + {:noreply, Socket.t, timeout | :hibernate} | + {:reply, {status :: atom, response :: map}, Socket.t} | + {:reply, status :: atom, Socket.t} | + {:stop, reason :: term, Socket.t} | + {:stop, reason :: term, {status :: atom, response :: map}, Socket.t} | + {:stop, reason :: term, status :: atom, Socket.t} + + got #{inspect(result)} + """ + end + + defp handle_result(result, callback) do + raise """ + Expected #{callback} to return one of: + + {:noreply, Socket.t} | + {:noreply, Socket.t, timeout | :hibernate} | + {:stop, reason :: term, Socket.t} | + + got #{inspect(result)} + """ + end + + defp send_socket_close(%{transport_pid: transport_pid}, reason) do + send(transport_pid, {:socket_close, self(), reason}) + end + + ## Handle in/replies + + defp handle_in({:reply, reply, %Socket{} = socket}) do + handle_reply(socket, reply) + {:noreply, put_in(socket.ref, nil)} + end + + defp handle_in({:stop, reason, reply, socket}) do + handle_reply(socket, reply) + handle_result({:stop, reason, socket}, :handle_in) + end + + defp handle_in(other) do + handle_result(other, :handle_in) + end + + defp handle_reply(socket, {status, payload}) when is_atom(status) do + reply( + socket.transport_pid, + socket.join_ref, + socket.ref, + socket.topic, + {status, payload}, + socket.serializer + ) + end + + defp handle_reply(socket, status) when is_atom(status) do + handle_reply(socket, {status, %{}}) + end + + defp handle_reply(_socket, reply) do + raise """ + Channel replies from handle_in/3 are expected to be one of: + + status :: atom + {status :: atom, response :: map} + + for example: + + {:reply, :ok, socket} + {:reply, {:ok, %{}}, socket} + {:stop, :shutdown, {:error, %{}}, socket} + + got #{inspect(reply)} + """ + end + + defp warn_unexpected_msg(fun, arity, msg) do + proc = + case Process.info(self(), :registered_name) do + {_, []} -> self() + {_, name} -> name + end + + :error_logger.warning_msg( + ~c"~p ~p received unexpected message in #{fun}/#{arity}: ~p~n", + [__MODULE__, proc, msg] + ) + end +end diff --git a/deps/phoenix/lib/phoenix/code_reloader.ex b/deps/phoenix/lib/phoenix/code_reloader.ex new file mode 100644 index 0000000..3fa0f49 --- /dev/null +++ b/deps/phoenix/lib/phoenix/code_reloader.ex @@ -0,0 +1,288 @@ +defmodule Phoenix.CodeReloader do + @moduledoc """ + A plug and module to handle automatic code reloading. + + To avoid race conditions, all code reloads are funneled through a + sequential call operation. + """ + + ## Server delegation + + @doc """ + Reloads code for the current Mix project by invoking the + `:reloadable_compilers` on the list of `:reloadable_apps`. + + This is configured in your application environment like: + + config :your_app, YourAppWeb.Endpoint, + reloadable_compilers: [:gettext, :elixir], + reloadable_apps: [:ui, :backend] + + Keep in mind `:reloadable_compilers` must be a subset of the + `:compilers` specified in `project/0` in your `mix.exs`. + + The `:reloadable_apps` defaults to `nil`. In such case + default behaviour is to reload the current project if it + consists of a single app, or all applications within an umbrella + project. You can set `:reloadable_apps` to a subset of default + applications to reload only some of them, an empty list - to + effectively disable the code reloader, or include external + applications from library dependencies. + + This function is a no-op and returns `:ok` if Mix is not available. + """ + @spec reload(module) :: :ok | {:error, binary()} + def reload(endpoint) do + if Code.ensure_loaded?(Mix.Project), do: reload!(endpoint), else: :ok + end + + @doc """ + Same as `reload/1` but it will raise if Mix is not available. + """ + @spec reload!(module) :: :ok | {:error, binary()} + defdelegate reload!(endpoint), to: Phoenix.CodeReloader.Server + + @doc """ + Synchronizes with the code server if it is alive. + + It returns `:ok`. If it is not running, it also returns `:ok`. + """ + @spec sync :: :ok + defdelegate sync, to: Phoenix.CodeReloader.Server + + ## Plug + + @behaviour Plug + import Plug.Conn + + @style %{ + primary: "#EB532D", + accent: "#a0b0c0", + text_color: "#304050", + logo: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJEAAABjCAYAAACbguIxAAAAAXNSR0IArs4c6QAAAAlwSFlzAAALEwAACxMBAJqcGAAAHThJREFUeAHtPWlgVOW197vbLNkTFoFQlixAwpIVQZ8ooE+tRaBWdoK4VF5tfe2r1tb2ta611r6n9b1Xd4GETRGxIuJSoKACAlkIkD0hsiRoIHtmues7J3LpOJ2Z3Jm5yUxi5s+991vOOd+5Z777fWf7CGXA79Ct46ZGmyPnshw9WaX5qTSlJBCKjqU51aoohKVUivaIRqUUmlactEK3iCp1gablTztsnZ9kbK16w2P7wcKw5AAJhKqiBWlzIyIjVrKsnKtQ7HiiqiaGZQOC5Qm/JAkiUekqSha2X7/x2JP1FOXw1G6wLDw4oPvFl94+ZVmkib9HJnQuy7MRfUW+qoqSLMtHWi60PzB9Z+2BvsI7iEc/B3wK0d8Wjk8dHRX7B5hjbqBZU6R+sMa3VBWFUiSxqLmhdc303XVHjMcwCDFQDngUosO3JF0VPzz2eSKRLJrjPLbxhVARYYXDUCKlKAJFMV00yw731d6fOlWVKadT/mjSxsIb/ek32Lb3OPANAdl/c3La8CExmziGnUYYz2thd1JwhpBk5RDDyBccTuWgKNpqWxzCsdk76iuwbdXiyd/nIqO2ufcL9lmVBZvgcP5k4pYTrwcLa7B/cBy4LESVeVlvsxS9wN+ZR1Jkioi2B5M3nPiTJ1LqVuXaCcuaPdUZUSbJjg9T1hXfZASsQRiBcYDULJ/2OM1zDxOa0zf1eMFDROmcQ5Jeam7peE+iKOfQ+IjFHM//gqF7T4A0UhD3dflHkusHd3EaS/r0SupWZO+lCHWFwislio2Kpi30cKKQZEKYGEL7L1e4ZqFkRSWs/2upYEauSpKjpblldvaOmkPBwBns6z8HLn/O3Lsenjs+N2pU7G94hr6JpjnevT4cn0GQ1HZb29JBZWXfvh2vQuRCBg2z1W5i4q9zKQvfW1mmOrrsy6duPb4pfIkcWJTp+V4p4zcUzrY72h9SJCX8R88wVGSEdWPZkskrw5/YgUGhnpno8khLbk9dHBMZu4Wimctl4XqjKCrV4ehcmbH5xAZXGsuWTLpFdSpylyC1t3RIjQfLv2h6pInqdG0zeO8fB/wSIgR9clnGw1aL5Un/0ISmtSorVJe97cYpb1R8pFFQtSzzBc5iXoPPMqyhCKOqlEycKqW2gHL0vCqRvR1S146srRX7tD6DV98c8FuIEFxlXnYxz/EZvkGHR60kSUrjVy1TZu2qKdMoqr4j8wOWMXvVeOMsJqlyB0vkfRdPtz42aGbROOf5GpAQIai61Tlgiw1Ot+SZJONLFUUU5q49GlPvokequStzM0OZl/SEDWczmLIq2mwdv8rcVvVOT+2/jfV6FtYe+SJQ9CseK8KwEFUUu1flNLqSlvxa8VKH0/msa5mnezT/EJ6fGBubsL1qdfahVxOj4z21+zaXBTwTIdNq7siVGIYN/1X2pTcsCY6alILiFNcXfmxR+qrICMsrIGica7m3e0WWRFWyP+zNzOOt30AuD3gmQqbAwnRPf2IOy5uTa1dlfuxK87Q3T64/V9o0RhLFBtdyb/c0w3KMKeqZyhVZu721+baVByVELS3tv+pvDANT3vUVt019xpXuWYVfNKbkHx0liM7tuKjW8+NNpjk1q6af/9vkcYa5uejBG45tgvqc4YCq83I6WY7rM09Ho5jY1n5xiSfzCOqRLBbrWormh+rBBYt20emw/yht88lX9bQfiG2CmomQIYqifN4fGRMZGb1p46QRY9xpT9tSvnPc2sJhotjxgiLLTvd692dcS1ms0a9U5uW85173bXkOWohssrSjPzKLAfXEjNzEclfa86cOH4aRK1iWmn/iR0nrDpslQdiqqKLo2s7TPc9xt1Tm5bafXDL1fk/1A7ks6M/Z7mmJo8ZmjDpLs0HLY0j4jAtqXA8hclzfjM+M/7ugCqUTNxxf7EIQe3LFlGdZYlrC89wQl3KPt7IoXJAVeqfU1b4lfXvlB66Ntt88OmnikJhFxEbH7zt+4el7qxouuNb3x/ughQgHXZU3vZPjmH63LtJemCRIx1IKjnRr4E8unHCTJTZ2l6jIdRPWH03S2mjX0vmp3zVbI+6jeeYqQjGxPf15upWVYFNBPytCE4jAU0WiKC2CxHz44aHa+++vaW7XYPfXqzFCtHz6Kc7MjO2vTEC6FcX5XtLaonl4j4JkjY/fJUO0UofofCBzc+lzWO7+++yWpMnDYyMXixQ7nefIBAjFjCZEtUA7FvTcDAM7PZUhqqLS4OyptqhELBEd4sa0LScK3GH152dDhKhmedZ+xmy6pj8zAmmXFfHl5LVH78X76vkTfsAOid+K9+h+2253/EKvj9IPR1LW5fEjEzY2N1x8uYGyIYxgfwe/m3JldBSXwUhsMmdhR6gmlVFE9UvJQVU7VMeJUBqMDRGiyhW563gTuypYRoVD/06b8NSUzYUPIy0YqcKazW9prr4oTJIsrE3eeOw/e5tWnOVi46z3WhjTXIUm42iKNnt1V4ZgCZjuHLIqldrt0p/1CrtRYzBEiMpXZDxiNll+ZxRRoYYjO2xPaIKCbsJxo4fsZxnGrNGFBl14bcVSl1yQ9mYJ2hAhvi74H35G+cjIOxWKzOYYZojesC13zIIk1rWdbV7SV94HhggR2p+io6LXuQ+mPz/bHfYn0zaW/AbH8MhQKnLZTbnlHM8muo+JyJIsqmoDuCaVU4rzI8Uhnjxc/OWh1fWtre5tXZ9xVzs0Ne5as4WZrlDMbI6iU2iOxfWUIT8VTHyCKP9u4qbixw0B6AOIIUKkLUR94OmXVXab49W0zcX3aMR3x+Yx/EKa9s02FCxYU4sQ8yIwtGSTZGJHGDRLWWSFtcLim4f9Gs+yva8XcQqdz00sOP4zbQy9cfXNDZ0YcdE3fHj8Ia/fbJ1wwrGZ6LTtSN1w7FaNtuOLJ/5rpDVig16ziNYvlFdvJh6jaOqfGkKjRq8DDmeyzqtbmX1Zs42utmgWcbZ2/QnSlTh0gAh5k8iImI29SYQhQoQ2SAr0aAP1h05paGg+sWhitx4JxzlxW+mDKesOW9DGJshSR6jHjv7i3mhAn6+qpZk7vdUHW27I5wxtTtdkjWkA9VrYOqih5lhQpFJVkbfbZaUyyuYUO62mRCvDzuNYMoMwvLUnZn6dvEJ6KzW/8Hb3tjUrJj8AMNaAFns85B4whK/uOLRnRQTHcVWqVwh3UHYIn6uivbZVkM7yFjbJyloywI63EN7EFML8Y82F4V7791XG9bTg13D4czVksOEuROiN2NLWNidne9Wn3phTtiLzVRPN3KknoQVkzGlz2OwPpb9R9pI7vP3ZY0YMGR/zM85ims8Q6jtGJbNAtQJYTqpE1bFpUsGJpwGvzyBAtAOOzorfBgEVV2s0uipTtTIjroYIUbcRNvuK0zQJP8d9zFrS0dl+nR6NLuqEYkYl7OY5NkoPc0X498s222OTtp1EXZHH3/GFk25gIyw3w7phGsXQYymVDCUU7MwYiqMU0s1/lIbudQUDzwqoDVFHrqgCTOunZUqusovC2+7xcx6ReSgsWzTlZ+ZIy39DbgUK0vE0jV9XOMxDs6CKDBGitWNjY6+ZlXKB4cLP3xomoYbk9V9b6fVyqvaOnHqa4cbobY8vxympG/YfPv97vVZ5nL2ThltGMhZyeUZRRIYRz9guXHui4Yxe3HradQedRidswU96/s7Po4wO1jREiHAgdXfmOAjhTHoG1Zdt0OV1Qn7R9/3FWbUyq4jjTZn+9MMYN0LJpwVZ3c112D5I+WvlW/707822WtCmvbP1vrQ3yv9iJC7DhKhq1ZVtHEtHG0mcEbCCUbZVrZy6jeMj/BZAjW70AiCM0qnI9JegYHTSKjFJolSTurl4IbQxxFSi4dJzxYRjsIcrSc0/MlNPe71tDNnidyNTlLD0i6EJ/0+mCr3MSS0ovc3W2bYGdkPdGme9/bR2+HmnaT6G5dhUCBKZAnvw0QorVUE9uIb0/U9S7WtZosYYjZk1CiCjyhAc+M+2JaPgBwqHZugZgfbFfpd2YC/V5GW9D9v3G8C+5RfPcDsuU9RRsaP9UXcvx2DoCqRvU2PnywmJVuMmjktEGPY5q1s1rYCw1hWBDK43+2Am250H6mKN8CAcS1HmD1ZOeYol3DzwaExUVdbkyY4GubedlKie6pKo7fM2Fz5W7xK+3Ztj1QkbhejyYl5nH5/NDBOiikVpa0xRMS/4xBaiStQqo+O90egP35oyK9JqGqPS7GgTeDR2KOpFkypWY8SI0bjCGZ5hQoRKtsSpVzSEoxEWbVxoogjnF9GfaTNMiJAJvb1DU2UJwtxAXQfmFU+fEV8vwuG0PzppQ8kjvtqEYx266UrRXApR2RRCkUTw9rfAuToyHMDDKERtpmS5pNPpKMp9q/KvoaLfUCGqzMvYx3OWWUYORpLEM6oqvS122D+4UN1xsq7T1pGenpAWHRN5K01Mi/UGCOACNyn/iK6kDUbS7y8sNPJyZutqnqZmKoRO0JtoApSqqDKoVFXnxpT842gW6bOfoUJkpIcjWqVFxf5rsBM95YsbR34wYX6cNfJVhuN7jAdzCo59EwuKr/MFLxR1Y2HB/uGK3BdZTlmAKoFgacBgS0mit0zIP5wXLCw9/Q0VIkRYuypXhLM8/NoGeyLU2dVxlz9HLmC2D0zW4AmWa1lHe2fYZJZFc9Gs2eMLCKFvAm2/XzzDODb4qAk0kbp1TiohrAofejjiC/LPX9rFC6Iqs9QrEMFyH/Cg13RThgtR9cqsz1jedJXri/P3Xpac9cnri8b52w8t8RaT+S5f/XBddfb4V4mYCcRXu96uQ1rNPLPKH+FR0K6iSkWdorwZ/mR7Zrx7qtSFThoScMWOHh8XMzLBmsxwplQ+klkNm/mhXTbHbzGFjktbQ28NFyI8oWjoFcM+C4ZKm93+6/RNJb8PBEb58mmPms3W3/rqK4pyV2r+4ZAcvYWpkU1m8/+AgVf3Z0sGn20wnr696+CpuwPRd2F2t7vPtjf74kkwdYYLERKDeXvAmW54oIS12ZvnZGyq3Btof83Y6Ks/+Oc0J609muCrjZF16N8zNjPufYY3ZfkDV1aFwvrDzbdcf+LUl/7068u2fn2H9RLW0tV275CY+ICTZEp2VdSLy1O71E3F/1a1Ytoo9I/2VI9lsOuJr12dc3H/3pqk3vD2c8VbtjTzFRPP3uHPWhHdSzpsjgf9+Qx1H6URa8kgVjqNU7mhAk1FgXdSE22XWxy8cszW6jh51a6aYlfajLjvlZkICTuVl9NAcdyIQIhsbb240IhMrTV5OccZjpvsiwZURDrs7fNdc137ao8OeFFjLEnT363e76sdfkKuuibpaTPPrvDHu1EW5Xan0/mX9DeO/coXfK2uaOnUpVaWuZejSTZk843sSdkrgj88ZJeoUJ32Fye+WfaiBieYa68J0Wc3jM0Y+Z0RAUm9e7xXMAOsyZvexnCMTxeV7qNBKflyHL4vfHiw4BVD416jCRmnggZQkZWzhBJr4R/vlAlrg8wfQ3mangauiqP1enriwTaCSmpkwfG/6VtKn/eFX6srvy39Hi4y4vFglg2YxEsUxCcgwPEJDW4g114TIiSmdnXWDpo2fc9fwsCH+XzS2sKAZjF3XC+ljhxy/b+M/FLPC0UvyPY2W17WO2U9JfVkIe/jU6yVW6TSdKK/QYiqgnGNik0SmQrZ4dxbfKLp/5aXN37hTrunZ5wJvzNtxB50L/FU76kM13+gbH2v1WF/W7VLTSxnspis/JUmhr5NUdh40tn2YDAOdL0qRDggzB6m12dZYwDODAcPnR6rl7FaP29X1AJHRMW9663etRxxy7JwuLGpY7VrFn7XNu73JcsmzDbRlmsZmeSqHD2SAidprQ3ogOw0JbfQRL5oF0m5U1VONR/v2BPIQrlsefoveM76e3/SPjud9rUTN5TcqdHj6YqCOffY2XOe6vSUXR6snsaBtMETrcdHJ1T4G0YD/9BPkjcWGWZCqcrLeA6yK/673jHIqKijSKHN1vakEeszvXi9tatcPmUTb45c6q3evRz/DA5H5z19kZC014UIB1e2NP1uTI7pPlCfz3Bu2UcHzg7V6/juE9alyupVmQfgONqZetq6tsHPgSyre5wdtpenbC//2LXOqHuczd75uPKIJyf6QOh2tLb/0FcUyt55YycOi7TOZNSvEwtA7s1aPRExnsbbJ0KEiDF3tCk24gFPRHgrc4py9cT8w7q//d7guJYHs2tEOKiohN1NOVGEUggCeOfcefuJG/d/ccoVh5573L3NzB0x3RJtXi6ppoWQ+OGLgp1FV7oLUc3KrEJ/dUvePBZQBRA7LOYRxkxfDUe0Rmt5l7rpxRxHRHGCD1+F0yH80Z8cR30mREho1fLM5zmz+Sd6mKy1sXd0/kfam8ef1Z6NuNbdkd2lJ+JVDy70nKSI0gX/505RZZqJIrdCfqEmVRWcsIPr1sMRlhcVSTXD+mg47OiGQXhZDFTEqpeOtMBt95Ej5ya4rwErV+Ye4Xk2Rw8dWhvB0bl5wsbjy7RnvKIVIT5h6HaGI7pjzmCTcRxCrVAx2qPNrU+FCAd0cknG73gL/wir8+A9zLNTfaopKZB/O+Lz9EMHulGTh532R/nnCY4RZbLorE3OL0p2hxWIW43qFP6Op2S6w8IASlOk5WmQdhqickeBX1KCnkhfUHjaGptar7x6Z+0Jd5iuz30uRIgc09hRJvMmjtMXp4YnTc9ZfySu3kBf5cJ5yTPihsR+FsrjtgSnc8+EDUVzXV8I3mNQABhQb3Yv9/UsCNLRCQVHcn210epwszM6KvYPNGHm96SewLCnpgutV898v/pzrb/7NSRChERgcsxfzs0uxIwb7kR5eobptXXD+0dHu68ZPLXVW4bTfNyQ+E96YqReeHrboSeB3SE+lr6l5FH3PoEEPHibgdxhuz/vuCExZdLIkZ/0pLBEA/AXxY1jvKkBQiZE2oDQ6s6x3C8hLovXyrxdMf6rtaVlTvaOmkPe2vhbjovN+MT4T/Xg9xe2p/b4+Spv/OrmeR+frXavDySBqt3peC1tQ/Hd7rD8edZjHkLtdlNz03Q395NuNCEXokuDZcvzsraxhPleT7OCih41qvP51PySn/rDKF9tUdkGQQYlerLl+4Ljq04QpQ74LP/Rm4mhekXGetZk0e2JCCcBdHXZ2+/ydMiNLzq81ek5khXTCNrsnfe7h2GHRIhqV2RtQAvzpPyi+a6DwgNbcrOHga+N+UZIreNzZsKMHJJof9jIxOIVKzP/buLN17rSFOw9mNQ6HYK4Ln3Dca+7UvgD/dXMmS6n9POJE5SgDqLscOedax+c0RhemSyLlB08IKsdsrTHwvHfx5wExbdm326NoZZPKChc4NoH74GOg0BHj8GeuHMTnI5nzjR0fFp/XuwIiRBholBzbNwuyBvU0FDUMMNTFoyy5RlP8DSzElKRj2YgXb37gC8/y87zTkFef7a0/dlATAmX4Vy6wQwaUdaYP8POLWB/qG4HREWt7pKEF71l49fwYio/PetCXJfIinKoqvHL1Z4+hRo8vKJ2Hs4huZ+wNLG3dz3DmLlUnufnj3vtIKlZlXMOPt0j8d61j3ZftXzaa6CQXY19tTJvV/DlVhw26bEeG3oDEGw5OtijzxEkXgJ7q7gudeMxj26t3ZrVmKj7TLTpOkJIErg6WLy5O6AbBbgAnmJU54Zgj9fEvD6syXQv6HrA1dR3yhxcKKu0bANdUBmRlY++OHHxRW+LUI1v5Usn/5znLY+DsFq0MvcrWvchQqoRkhZt37u75rf+eCeiioBWuWw4sySyenXOFpbmFquCUAG+2BPgEHfq+oKj1novu11MxD4kPvYFjqZzwPHqG0nYUS8G1mMbZD+pFBTnG3/7vPHFkAkRMszVlRU1wZCt/jktd7Q7Q7Vn3JrTkdYZVsaUQdFyNOg8INQd5is4RoMGDZ9EMZLd2bbLqLUC5rBePCt9KYmOyIY1wTCwwIugFuBoRemQiFThlKgzpSebPsor/fIrjUYvVxr0NXMjovk8WeUWuh80iMm4OPj2SApzUaSEOiKp75e3XNi0cNeZWi/wfBZXrcypAKVmEoZJVa7M/oTlyFXdngzwOVRoqu1Ue/OV12+vw+QSPn/IbytvmiIR1gwa7YtfSV1H3fuFVIiQend3EVUWbaJEth74tPqnRnscfjhrzLjEkXF5LA/+PpSSAAkavoLPRNn59rbNs3fUV/jkZpCVOKOOiI170cTAQTLwg7nrNBw5dBoOFGnsghONlE7bodt21JTUe5kd/EWP6xueIZPApSYWTSegKQfNs/Q2CKmFZbkft7W1LfCVftAffCEXIiQW/imwM+Lhxf7jh2sAilZKhC7b6+67gX+06vkO/YnmZI/4JTHTi2mFHuXtW48KTYck/ldPM2HPGL22wI0CBhj2yQ/HnWyhTfhZ3Td55Ojq1s4u7XOIBwO+fvRUjVGH14SFECFXcfrleK77X+rOZZjjBULEGkhk+LkiObcVH2s94W5n0vog865Kj8lkIsyLzTR7DXgaJvnKagvCI6m0coHIdLtDFrf2ohBpJA64a9gIEXJW704FF3eEhu0roRzgCGbHvuA4bGJpxQzJNa16vBhReOwO4U96fZkRx+DPMwfCSoiQRNiClsIWdIpncg0qlWW5tu1CmvsC0SDo3zowl+Jtw2fc4H4wFQ2TvUmRCruTQQEyjsNhJ0Q4NLRsi6L9zzpcWQLiBCT9jUdvy4A6D3b6Jw6E3efMlcLi21IXREbFbnY9sM61Pph79EEWRNubX5W3/zTUcfnBjCMc+oa1EF1iEF+Tl1sEWuP03mAYqu7BqHsKZqdDHc7OHbZOpWrZrpryeoP0Nb1Bc7jB7A9C1M0z9Ig0W9iHIfzZp2E2WAbjDKVSYECRaYEBtbGsgm8Bo0CkDy3CQXcXVFUpkxSpvKK5OT9QbXKwNIZb/34jRJcYx4JNaDdP87NA9xNSXqJdC+wsLaD5PnDxq7anpu+sPRBSgkKIvL8JUTer0CMRDISvEZaZCKkLQ8i+r1Hj7KXIYm2LrevnocydGCpG9Esh0piFsVoRTMQTkAcUzivT0oNptaG5gvXkYMr64qCSfIWG8sCx9msh0oaNJ/bMmHLFU7BcgjPGSEJvzU5oaWcUOEtKwUOBARPtWUOCRuTGppYeoyQ0+vv7dUAIketLQNeFyLj4H0Es2NUwNyX6sxDH0GnI5iECU2yQ//AcIVKjSHO1YofzJMU4K+0XhJb2aKoN8VkddERUNDuUoUgyy/LZkBA9FRIjTwJfnTjNxbe1SViU+W7hVlf6BuL9gBMi95eEXpR8FD+NIfRkQaFHw0vvTkNM06pNoZmLquxophWqrl2mz3W22o7pTeLgjkd7xoxoIybHrDHxzI8hiDGq9VzzNdN31x3R6gfidcALkZEv7cDNyZmxUZbrBNXZ8Pmxzt095QlAAcazWXsK/jOSxlDAGhQiP7iOkaSWePOdRGZmghfBKAJZrWSacmBKOzgbsxFcaY/YHLZ39WZd8wN1WDcdFKIAX0/Zooz7OAv7EHgJjnYHAX5P7USRPty3t3qN5gjm3mYgPQ8KUZBvs2hB2tzouIh1kIE80R0UhiBDvNnatM3F97jXDaTnQSEy6G1WrMh43WSyrPYEDqMsxhcUTvJUNxDKBoXIwLdYsnTyimizeb2nJBGSIJxKKSgcbyC6sAE1KEQGvwp0gh86JOEouOh2qxJcwQuiUDIhvzDTtWwg3HtWuQ6EkYVoDJjw4PyZC9PRQOtOAs/xGRXLpv3Bvby/Pw8KUS+8was/ri+52NW+UJHAPuL2482mhzAixa24Xz8OClEvvT605jd3tS6ApKHfOGKCEIaaM3NkUS+hDQnYQSHqRbajIH1WeCZRFaVvhCujbqlmdc5LvYi6T0EPLqz7iN14Wjdtivg1C0eha9Z/OB/x0P49lbf0d4XkoBD1kRBpaNChLiYhYY2JUufIrDpCEkkR5FrE3No9ZmnVYITb9f8BhSZnYemqCy4AAAAASUVORK5CYII=", + monospace_font: "menlo, consolas, monospace" + } + + @doc """ + API used by Plug to start the code reloader. + """ + def init(opts), do: Keyword.put_new(opts, :reloader, &Phoenix.CodeReloader.reload/1) + + @doc """ + API used by Plug to invoke the code reloader on every request. + """ + def call(conn, opts) do + case opts[:reloader].(conn.private.phoenix_endpoint) do + :ok -> + conn + {:error, output} -> + conn + |> put_resp_content_type("text/html") + |> send_resp(500, template(output)) + |> halt() + end + end + + defp template(output) do + {error, headline} = get_error_details(output) + + """ + <!DOCTYPE html> + <html> + <head> + <meta charset="utf-8"> + <title>CompileError + + + + + +
+ +
+
#{error}
+

#{headline}

+
Console output is shown below.
+
+
+
+
#{format_output(output)}
+
+ + + """ + end + + defp format_output(output) do + output + |> String.trim + |> Plug.HTML.html_escape + end + + defp get_error_details(output) do + case Regex.run(~r/(?:\n|^)\*\* \(([^ ]+)\) (.*)(?:\n|$)/, output) do + [_, error, headline] -> {error, format_output(headline)} + _ -> {"CompileError", "Compilation error"} + end + end +end diff --git a/deps/phoenix/lib/phoenix/code_reloader/proxy.ex b/deps/phoenix/lib/phoenix/code_reloader/proxy.ex new file mode 100644 index 0000000..fc20360 --- /dev/null +++ b/deps/phoenix/lib/phoenix/code_reloader/proxy.ex @@ -0,0 +1,52 @@ +# A tiny proxy that stores all output sent to the group leader +# while forwarding all requests to it. +defmodule Phoenix.CodeReloader.Proxy do + @moduledoc false + use GenServer + + def start() do + GenServer.start(__MODULE__, :ok) + end + + def stop(proxy) do + GenServer.call(proxy, :stop) + end + + ## Callbacks + + def init(:ok) do + {:ok, ""} + end + + def handle_call(:stop, _from, output) do + {:stop, :normal, output, output} + end + + def handle_info(msg, output) do + case msg do + {:io_request, from, reply, {:put_chars, chars}} -> + put_chars(from, reply, chars, output) + + {:io_request, from, reply, {:put_chars, m, f, as}} -> + put_chars(from, reply, apply(m, f, as), output) + + {:io_request, from, reply, {:put_chars, _encoding, chars}} -> + put_chars(from, reply, chars, output) + + {:io_request, from, reply, {:put_chars, _encoding, m, f, as}} -> + put_chars(from, reply, apply(m, f, as), output) + + {:io_request, _from, _reply, _request} = msg -> + send(Process.group_leader(), msg) + {:noreply, output} + + _ -> + {:noreply, output} + end + end + + defp put_chars(from, reply, chars, output) do + send(Process.group_leader(), {:io_request, from, reply, {:put_chars, chars}}) + {:noreply, output <> IO.chardata_to_string(chars)} + end +end diff --git a/deps/phoenix/lib/phoenix/code_reloader/server.ex b/deps/phoenix/lib/phoenix/code_reloader/server.ex new file mode 100644 index 0000000..85b92b0 --- /dev/null +++ b/deps/phoenix/lib/phoenix/code_reloader/server.ex @@ -0,0 +1,292 @@ +defmodule Phoenix.CodeReloader.Server do + @moduledoc false + use GenServer + + require Logger + alias Phoenix.CodeReloader.Proxy + + def start_link(_) do + GenServer.start_link(__MODULE__, :ok, name: __MODULE__) + end + + def check_symlinks do + GenServer.call(__MODULE__, :check_symlinks, :infinity) + end + + def reload!(endpoint) do + GenServer.call(__MODULE__, {:reload!, endpoint}, :infinity) + end + + def sync do + pid = Process.whereis(__MODULE__) + ref = Process.monitor(pid) + GenServer.cast(pid, {:sync, self(), ref}) + + receive do + ^ref -> :ok + {:DOWN, ^ref, _, _, _} -> :ok + end + end + + ## Callbacks + + def init(:ok) do + {:ok, %{check_symlinks: true, timestamp: timestamp()}} + end + + def handle_call(:check_symlinks, _from, state) do + if state.check_symlinks and Code.ensure_loaded?(Mix.Project) and not Mix.Project.umbrella?() do + priv_path = "#{Mix.Project.app_path()}/priv" + + case :file.read_link(priv_path) do + {:ok, _} -> + :ok + + {:error, _} -> + if can_symlink?() do + File.rm_rf(priv_path) + Mix.Project.build_structure() + else + Logger.warn( + "Phoenix is unable to create symlinks. Phoenix' code reloader will run " <> + "considerably faster if symlinks are allowed." <> os_symlink(:os.type()) + ) + end + end + end + + {:reply, :ok, %{state | check_symlinks: false}} + end + + def handle_call({:reload!, endpoint}, from, state) do + compilers = endpoint.config(:reloadable_compilers) + reloadable_apps = endpoint.config(:reloadable_apps) || default_reloadable_apps() + + # We do a backup of the endpoint in case compilation fails. + # If so we can bring it back to finish the request handling. + backup = load_backup(endpoint) + froms = all_waiting([from], endpoint) + + {res, out} = + proxy_io(fn -> + try do + mix_compile(Code.ensure_loaded(Mix.Task), compilers, reloadable_apps, state.timestamp) + catch + :exit, {:shutdown, 1} -> + :error + + kind, reason -> + IO.puts(Exception.format(kind, reason, __STACKTRACE__)) + :error + end + end) + + reply = + case res do + :ok -> + :ok + + :error -> + write_backup(backup) + {:error, out} + end + + Enum.each(froms, &GenServer.reply(&1, reply)) + {:noreply, %{state | timestamp: timestamp()}} + end + + def handle_cast({:sync, pid, ref}, state) do + send(pid, ref) + {:noreply, state} + end + + def handle_info(_, state) do + {:noreply, state} + end + + defp default_reloadable_apps() do + if Mix.Project.umbrella?() do + Enum.map(Mix.Dep.Umbrella.cached(), & &1.app) + else + [Mix.Project.config()[:app]] + end + end + + defp os_symlink({:win32, _}), + do: + " On Windows, the lack of symlinks may even cause empty assets to be served. " <> + "Luckily, you can address this issue by starting your Windows terminal at least " <> + "once with \"Run as Administrator\" and then running your Phoenix application." + + defp os_symlink(_), + do: "" + + defp can_symlink?() do + build_path = Mix.Project.build_path() + symlink = Path.join(Path.dirname(build_path), "__phoenix__") + + case File.ln_s(build_path, symlink) do + :ok -> + File.rm_rf(symlink) + true + + {:error, :eexist} -> + File.rm_rf(symlink) + true + + {:error, _} -> + false + end + end + + defp load_backup(mod) do + mod + |> :code.which() + |> read_backup() + end + + defp read_backup(path) when is_list(path) do + case File.read(path) do + {:ok, binary} -> {:ok, path, binary} + _ -> :error + end + end + + defp read_backup(_path), do: :error + + defp write_backup({:ok, path, file}), do: File.write!(path, file) + defp write_backup(:error), do: :ok + + defp all_waiting(acc, endpoint) do + receive do + {:"$gen_call", from, {:reload!, ^endpoint}} -> all_waiting([from | acc], endpoint) + after + 0 -> acc + end + end + + defp mix_compile({:module, Mix.Task}, compilers, apps_to_reload, timestamp) do + config = Mix.Project.config() + path = Mix.Project.consolidation_path(config) + + if config[:consolidate_protocols] do + purge_modules(path) + Code.delete_path(path) + end + + mix_compile_deps(Mix.Dep.cached(), apps_to_reload, compilers, timestamp) + mix_compile_project(config[:app], apps_to_reload, compilers, timestamp) + + if config[:consolidate_protocols] do + Code.prepend_path(path) + end + + :ok + end + + defp mix_compile({:error, _reason}, _, _, _) do + raise "the Code Reloader is enabled but Mix is not available. If you want to " <> + "use the Code Reloader in production or inside an escript, you must add " <> + ":mix to your applications list. Otherwise, you must disable code reloading " <> + "in such environments" + end + + defp mix_compile_deps(deps, apps_to_reload, compilers, timestamp) do + for dep <- deps, dep.app in apps_to_reload do + Mix.Dep.in_dependency(dep, fn _ -> + mix_compile_unless_stale_config(compilers, timestamp) + end) + end + end + + defp mix_compile_project(nil, _, _, _), do: :ok + + defp mix_compile_project(app, apps_to_reload, compilers, timestamp) do + if app in apps_to_reload do + mix_compile_unless_stale_config(compilers, timestamp) + end + end + + defp mix_compile_unless_stale_config(compilers, timestamp) do + manifests = Mix.Tasks.Compile.Elixir.manifests() + configs = Mix.Project.config_files() + config = Mix.Project.config() + + case Mix.Utils.extract_stale(configs, manifests) do + [] -> + # If the manifests are more recent than the timestamp, + # someone updated this app behind the scenes, so purge all beams. + if Mix.Utils.stale?(manifests, [timestamp]) do + purge_modules(Path.join(Mix.Project.app_path(config), "ebin")) + end + + mix_compile(compilers, config) + + files -> + raise """ + could not compile application: #{Mix.Project.config()[:app]}. + + You must restart your server after changing the following files: + + * #{Enum.map_join(files, "\n * ", &Path.relative_to_cwd/1)} + + """ + end + end + + defp mix_compile(compilers, config) do + all = config[:compilers] || Mix.compilers() + + compilers = + for compiler <- compilers, compiler in all do + Mix.Task.reenable("compile.#{compiler}") + compiler + end + + # We call build_structure mostly for Windows so new + # assets in priv are copied to the build directory. + Mix.Project.build_structure(config) + results = Enum.map(compilers, &Mix.Task.run("compile.#{&1}", [])) + + # Results are either {:ok, _} | {:error, _}, {:noop, _} or + # :ok | :error | :noop. So we use proplists to do the unwrapping. + cond do + :proplists.get_value(:error, results, false) -> + exit({:shutdown, 1}) + + :proplists.get_value(:ok, results, false) && config[:consolidate_protocols] -> + Mix.Task.reenable("compile.protocols") + Mix.Task.run("compile.protocols", []) + :ok + + true -> + :ok + end + end + + defp timestamp, do: System.system_time(:second) + + defp purge_modules(path) do + with {:ok, beams} <- File.ls(path) do + Enum.map(beams, &(&1 |> Path.rootname(".beam") |> String.to_atom() |> purge_module())) + end + end + + defp purge_module(module) do + :code.purge(module) + :code.delete(module) + end + + defp proxy_io(fun) do + original_gl = Process.group_leader() + {:ok, proxy_gl} = Proxy.start() + Process.group_leader(self(), proxy_gl) + + try do + {fun.(), Proxy.stop(proxy_gl)} + after + Process.group_leader(self(), original_gl) + Process.exit(proxy_gl, :kill) + end + end +end diff --git a/deps/phoenix/lib/phoenix/config.ex b/deps/phoenix/lib/phoenix/config.ex new file mode 100644 index 0000000..7826906 --- /dev/null +++ b/deps/phoenix/lib/phoenix/config.ex @@ -0,0 +1,166 @@ +defmodule Phoenix.Config do + # Handles Phoenix configuration. + # + # This module is private to Phoenix and should not be accessed + # directly. The Phoenix endpoint configuration can be accessed + # at runtime using the `config/2` function. + @moduledoc false + + require Logger + use GenServer + + @doc """ + Starts a Phoenix configuration handler. + """ + def start_link({module, config, defaults, opts}) do + permanent = Keyword.keys(defaults) + GenServer.start_link(__MODULE__, {module, config, permanent}, opts) + end + + @doc """ + Puts a given key-value pair in config. + """ + def put_new(module, key, value) do + :ets.insert_new(module, {key, value}) + end + + @doc """ + Adds permanent configuration. + + Permanent configuration is not deleted on hot code reload. + """ + def permanent(module, key, value) do + pid = :ets.lookup_element(module, :__config__, 2) + GenServer.call(pid, {:permanent, key, value}) + end + + @doc """ + Caches a value in Phoenix configuration handler for the module. + + The given function needs to return a tuple with `:cache` if the + value should be cached or `:nocache` if the value should not be + cached because it can be consequently considered stale. + + Notice writes are not serialized to the server, we expect the + function that generates the cache to be idempotent. + """ + @spec cache(module, term, (module -> {:cache | :nocache, term})) :: term + def cache(module, key, fun) do + try do + :ets.lookup(module, key) + rescue + e -> + case :ets.info(module) do + :undefined -> + raise "could not find ets table for endpoint #{inspect(module)}. Make sure your endpoint is started and note you cannot access endpoint functions at compile-time." + + _ -> + reraise e, __STACKTRACE__ + end + else + [{^key, :cache, val}] -> + val + + [] -> + case fun.(module) do + {:cache, val} -> + :ets.insert(module, {key, :cache, val}) + val + + {:nocache, val} -> + val + end + end + end + + @doc """ + Clears all cached entries in the endpoint. + """ + @spec clear_cache(module) :: :ok + def clear_cache(module) do + :ets.match_delete(module, {:_, :cache, :_}) + :ok + end + + @doc """ + Reads the configuration for module from the given OTP app. + + Useful to read a particular value at compilation time. + """ + def from_env(otp_app, module, defaults) do + config = fetch_config(otp_app, module) + + merge(defaults, config) + end + + @doc """ + Take 2 keyword lists and merge them recursively. + + Used to merge configuration values into defaults. + """ + def merge(a, b), do: Keyword.merge(a, b, &merger/3) + + defp fetch_config(otp_app, module) do + case Application.fetch_env(otp_app, module) do + {:ok, conf} -> conf + :error -> [] + end + end + + defp merger(_k, v1, v2) do + if Keyword.keyword?(v1) and Keyword.keyword?(v2) do + Keyword.merge(v1, v2, &merger/3) + else + v2 + end + end + + @doc """ + Changes the configuration for the given module. + + It receives a keyword list with changed config and another + with removed ones. The changed config are updated while the + removed ones stop the configuration server, effectively removing + the table. + """ + def config_change(module, changed, removed) do + pid = :ets.lookup_element(module, :__config__, 2) + GenServer.call(pid, {:config_change, changed, removed}) + end + + # Callbacks + + def init({module, config, permanent}) do + :ets.new(module, [:named_table, :public, read_concurrency: true]) + update(module, config, []) + :ets.insert(module, {:__config__, self()}) + {:ok, {module, [:__config__ | permanent]}} + end + + def handle_call({:permanent, key, value}, _from, {module, permanent}) do + :ets.insert(module, {key, value}) + {:reply, :ok, {module, [key | permanent]}} + end + + def handle_call({:config_change, changed, removed}, _from, {module, permanent}) do + cond do + changed = changed[module] -> + update(module, changed, permanent) + {:reply, :ok, {module, permanent}} + + module in removed -> + {:stop, :normal, :ok, {module, permanent}} + + true -> + {:reply, :ok, {module, permanent}} + end + end + + defp update(module, config, permanent) do + old_keys = :ets.select(module, [{{:"$1", :_}, [], [:"$1"]}]) + new_keys = Enum.map(config, &elem(&1, 0)) + Enum.each((old_keys -- new_keys) -- permanent, &:ets.delete(module, &1)) + :ets.insert(module, config) + clear_cache(module) + end +end diff --git a/deps/phoenix/lib/phoenix/controller.ex b/deps/phoenix/lib/phoenix/controller.ex new file mode 100644 index 0000000..e8155d8 --- /dev/null +++ b/deps/phoenix/lib/phoenix/controller.ex @@ -0,0 +1,1591 @@ +defmodule Phoenix.Controller do + import Plug.Conn + alias Plug.Conn.AlreadySentError + + require Logger + require Phoenix.Endpoint + + @unsent [:unset, :set, :set_chunked, :set_file] + + @moduledoc """ + Controllers are used to group common functionality in the same + (pluggable) module. + + For example, the route: + + get "/users/:id", MyAppWeb.UserController, :show + + will invoke the `show/2` action in the `MyAppWeb.UserController`: + + defmodule MyAppWeb.UserController do + use MyAppWeb, :controller + + def show(conn, %{"id" => id}) do + user = Repo.get(User, id) + render(conn, "show.html", user: user) + end + end + + An action is a regular function that receives the connection + and the request parameters as arguments. The connection is a + `Plug.Conn` struct, as specified by the Plug library. + + ## Options + + When used, the controller supports the following options: + + * `:namespace` - sets the namespace to properly inflect + the layout view. By default it uses the base alias + in your controller name + + * `:put_default_views` - controls whether the default view + and layout should be set or not + + ## Connection + + A controller by default provides many convenience functions for + manipulating the connection, rendering templates, and more. + + Those functions are imported from two modules: + + * `Plug.Conn` - a collection of low-level functions to work with + the connection + + * `Phoenix.Controller` - functions provided by Phoenix + to support rendering, and other Phoenix specific behaviour + + If you want to have functions that manipulate the connection + without fully implementing the controller, you can import both + modules directly instead of `use Phoenix.Controller`. + + ## Plug pipeline + + As with routers, controllers also have their own plug pipeline. + However, different from routers, controllers have a single pipeline: + + defmodule MyAppWeb.UserController do + use MyAppWeb, :controller + + plug :authenticate, usernames: ["jose", "eric", "sonny"] + + def show(conn, params) do + # authenticated users only + end + + defp authenticate(conn, options) do + if get_session(conn, :username) in options[:usernames] do + conn + else + conn |> redirect(to: "/") |> halt() + end + end + end + + The `:authenticate` plug will be invoked before the action. If the + plug calls `Plug.Conn.halt/1` (which is by default imported into + controllers), it will halt the pipeline and won't invoke the action. + + ### Guards + + `plug/2` in controllers supports guards, allowing a developer to configure + a plug to only run in some particular action: + + plug :authenticate, [usernames: ["jose", "eric", "sonny"]] when action in [:show, :edit] + plug :authenticate, [usernames: ["admin"]] when not action in [:index] + + The first plug will run only when action is show or edit. The second plug will + always run, except for the index action. + + Those guards work like regular Elixir guards and the only variables accessible + in the guard are `conn`, the `action` as an atom and the `controller` as an + alias. + + ## Controllers are plugs + + Like routers, controllers are plugs, but they are wired to dispatch + to a particular function which is called an action. + + For example, the route: + + get "/users/:id", UserController, :show + + will invoke `UserController` as a plug: + + UserController.call(conn, :show) + + which will trigger the plug pipeline and which will eventually + invoke the inner action plug that dispatches to the `show/2` + function in `UserController`. + + As controllers are plugs, they implement both [`init/1`](`c:Plug.init/1`) and + [`call/2`](`c:Plug.call/2`), and it also provides a function named `action/2` + which is responsible for dispatching the appropriate action + after the plug stack (and is also overridable). + + ### Overriding `action/2` for custom arguments + + Phoenix injects an `action/2` plug in your controller which calls the + function matched from the router. By default, it passes the conn and params. + In some cases, overriding the `action/2` plug in your controller is a + useful way to inject arguments into your actions that you would otherwise + need to repeatedly fetch off the connection. For example, imagine if you + stored a `conn.assigns.current_user` in the connection and wanted quick + access to the user for every action in your controller: + + def action(conn, _) do + args = [conn, conn.params, conn.assigns.current_user] + apply(__MODULE__, action_name(conn), args) + end + + def index(conn, _params, user) do + videos = Repo.all(user_videos(user)) + # ... + end + + def delete(conn, %{"id" => id}, user) do + video = Repo.get!(user_videos(user), id) + # ... + end + + ## Rendering and layouts + + One of the main features provided by controllers is the ability + to perform content negotiation and render templates based on + information sent by the client. Read `render/3` to learn more. + + It is also important not to confuse `Phoenix.Controller.render/3` + with `Phoenix.View.render/3`. The former expects + a connection and relies on content negotiation while the latter is + connection-agnostic and typically invoked from your views. + """ + defmacro __using__(opts) do + quote bind_quoted: [opts: opts] do + import Phoenix.Controller + + # TODO v2: No longer automatically import dependencies + import Plug.Conn + + use Phoenix.Controller.Pipeline + + if Keyword.get(opts, :put_default_views, true) do + plug :put_new_layout, {Phoenix.Controller.__layout__(__MODULE__, opts), :app} + plug :put_new_view, Phoenix.Controller.__view__(__MODULE__) + end + end + end + + @doc """ + Registers the plug to call as a fallback to the controller action. + + A fallback plug is useful to translate common domain data structures + into a valid `%Plug.Conn{}` response. If the controller action fails to + return a `%Plug.Conn{}`, the provided plug will be called and receive + the controller's `%Plug.Conn{}` as it was before the action was invoked + along with the value returned from the controller action. + + ## Examples + + defmodule MyController do + use Phoenix.Controller + + action_fallback MyFallbackController + + def show(conn, %{"id" => id}, current_user) do + with {:ok, post} <- Blog.fetch_post(id), + :ok <- Authorizer.authorize(current_user, :view, post) do + + render(conn, "show.json", post: post) + end + end + end + + In the above example, `with` is used to match only a successful + post fetch, followed by valid authorization for the current user. + In the event either of those fail to match, `with` will not invoke + the render block and instead return the unmatched value. In this case, + imagine `Blog.fetch_post/2` returned `{:error, :not_found}` or + `Authorizer.authorize/3` returned `{:error, :unauthorized}`. For cases + where these data structures serve as return values across multiple + boundaries in our domain, a single fallback module can be used to + translate the value into a valid response. For example, you could + write the following fallback controller to handle the above values: + + defmodule MyFallbackController do + use Phoenix.Controller + + def call(conn, {:error, :not_found}) do + conn + |> put_status(:not_found) + |> put_view(MyErrorView) + |> render(:"404") + end + + def call(conn, {:error, :unauthorized}) do + conn + |> put_status(403) + |> put_view(MyErrorView) + |> render(:"403") + end + end + """ + defmacro action_fallback(plug) do + Phoenix.Controller.Pipeline.__action_fallback__(plug, __CALLER__) + end + + @doc """ + Returns the action name as an atom, raises if unavailable. + """ + @spec action_name(Plug.Conn.t) :: atom + def action_name(conn), do: conn.private.phoenix_action + + @doc """ + Returns the controller module as an atom, raises if unavailable. + """ + @spec controller_module(Plug.Conn.t) :: atom + def controller_module(conn), do: conn.private.phoenix_controller + + @doc """ + Returns the router module as an atom, raises if unavailable. + """ + @spec router_module(Plug.Conn.t) :: atom + def router_module(conn), do: conn.private.phoenix_router + + @doc """ + Returns the endpoint module as an atom, raises if unavailable. + """ + @spec endpoint_module(Plug.Conn.t) :: atom + def endpoint_module(conn), do: conn.private.phoenix_endpoint + + @doc """ + Returns the template name rendered in the view as a string + (or nil if no template was rendered). + """ + @spec view_template(Plug.Conn.t) :: binary | nil + def view_template(conn) do + conn.private[:phoenix_template] + end + + @doc """ + Sends JSON response. + + It uses the configured `:json_library` under the `:phoenix` + application for `:json` to pick up the encoder module. + + ## Examples + + iex> json(conn, %{id: 123}) + + """ + @spec json(Plug.Conn.t, term) :: Plug.Conn.t + def json(conn, data) do + response = Phoenix.json_library().encode_to_iodata!(data) + send_resp(conn, conn.status || 200, "application/json", response) + end + + @doc """ + A plug that may convert a JSON response into a JSONP one. + + In case a JSON response is returned, it will be converted + to a JSONP as long as the callback field is present in + the query string. The callback field itself defaults to + "callback", but may be configured with the callback option. + + In case there is no callback or the response is not encoded + in JSON format, it is a no-op. + + Only alphanumeric characters and underscore are allowed in the + callback name. Otherwise an exception is raised. + + ## Examples + + # Will convert JSON to JSONP if callback=someFunction is given + plug :allow_jsonp + + # Will convert JSON to JSONP if cb=someFunction is given + plug :allow_jsonp, callback: "cb" + + """ + @spec allow_jsonp(Plug.Conn.t, Keyword.t) :: Plug.Conn.t + def allow_jsonp(conn, opts \\ []) do + callback = Keyword.get(opts, :callback, "callback") + case Map.fetch(conn.query_params, callback) do + :error -> conn + {:ok, ""} -> conn + {:ok, cb} -> + validate_jsonp_callback!(cb) + register_before_send(conn, fn conn -> + if json_response?(conn) do + conn + |> put_resp_header("content-type", "application/javascript") + |> resp(conn.status, jsonp_body(conn.resp_body, cb)) + else + conn + end + end) + end + end + + defp json_response?(conn) do + case get_resp_header(conn, "content-type") do + ["application/json;" <> _] -> true + ["application/json"] -> true + _ -> false + end + end + + defp jsonp_body(data, callback) do + body = + data + |> IO.iodata_to_binary() + |> String.replace(<<0x2028::utf8>>, "\\u2028") + |> String.replace(<<0x2029::utf8>>, "\\u2029") + + "/**/ typeof #{callback} === 'function' && #{callback}(#{body});" + end + + defp validate_jsonp_callback!(<>) + when h in ?0..?9 or h in ?A..?Z or h in ?a..?z or h == ?_, + do: validate_jsonp_callback!(t) + defp validate_jsonp_callback!(<<>>), do: :ok + defp validate_jsonp_callback!(_), + do: raise(ArgumentError, "the JSONP callback name contains invalid characters") + + @doc """ + Sends text response. + + ## Examples + + iex> text(conn, "hello") + + iex> text(conn, :implements_to_string) + + """ + @spec text(Plug.Conn.t, String.Chars.t) :: Plug.Conn.t + def text(conn, data) do + send_resp(conn, conn.status || 200, "text/plain", to_string(data)) + end + + @doc """ + Sends html response. + + ## Examples + + iex> html(conn, "...") + + """ + @spec html(Plug.Conn.t, iodata) :: Plug.Conn.t + def html(conn, data) do + send_resp(conn, conn.status || 200, "text/html", data) + end + + @doc """ + Sends redirect response to the given url. + + For security, `:to` only accepts paths. Use the `:external` + option to redirect to any URL. + + The response will be sent with the status code defined within + the connection, via `Plug.Conn.put_status/2`. If no status + code is set, a 302 response is sent. + + ## Examples + + iex> redirect(conn, to: "/login") + + iex> redirect(conn, external: "https://elixir-lang.org") + + """ + def redirect(conn, opts) when is_list(opts) do + url = url(opts) + html = Plug.HTML.html_escape(url) + body = "You are being redirected." + + conn + |> put_resp_header("location", url) + |> send_resp(conn.status || 302, "text/html", body) + end + + defp url(opts) do + cond do + to = opts[:to] -> validate_local_url(to) + external = opts[:external] -> external + true -> raise ArgumentError, "expected :to or :external option in redirect/2" + end + end + @invalid_local_url_chars ["\\"] + defp validate_local_url("//" <> _ = to), do: raise_invalid_url(to) + defp validate_local_url("/" <> _ = to) do + if String.contains?(to, @invalid_local_url_chars) do + raise ArgumentError, "unsafe characters detected for local redirect in URL #{inspect to}" + else + to + end + end + defp validate_local_url(to), do: raise_invalid_url(to) + + @spec raise_invalid_url(term()) :: no_return() + defp raise_invalid_url(url) do + raise ArgumentError, "the :to option in redirect expects a path but was #{inspect url}" + end + + @doc """ + Stores the view for rendering. + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_view(Plug.Conn.t, atom) :: Plug.Conn.t + def put_view(%Plug.Conn{state: state} = conn, module) when state in @unsent do + put_private(conn, :phoenix_view, module) + end + + def put_view(%Plug.Conn{}, _module), do: raise AlreadySentError + + @doc """ + Stores the view for rendering if one was not stored yet. + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_new_view(Plug.Conn.t, atom) :: Plug.Conn.t + def put_new_view(%Plug.Conn{state: state} = conn, module) + when state in @unsent do + update_in conn.private, &Map.put_new(&1, :phoenix_view, module) + end + + def put_new_view(%Plug.Conn{}, _module), do: raise AlreadySentError + + @doc """ + Retrieves the current view. + """ + @spec view_module(Plug.Conn.t) :: atom + def view_module(conn), do: conn.private.phoenix_view + + @doc """ + Stores the layout for rendering. + + The layout must be a tuple, specifying the layout view and the layout + name, or false. In case a previous layout is set, `put_layout` also + accepts the layout name to be given as a string or as an atom. If a + string, it must contain the format. Passing an atom means the layout + format will be found at rendering time, similar to the template in + `render/3`. It can also be set to `false`. In this case, no layout + would be used. + + ## Examples + + iex> layout(conn) + false + + iex> conn = put_layout conn, {AppView, "application.html"} + iex> layout(conn) + {AppView, "application.html"} + + iex> conn = put_layout conn, "print.html" + iex> layout(conn) + {AppView, "print.html"} + + iex> conn = put_layout conn, :print + iex> layout(conn) + {AppView, :print} + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_layout(Plug.Conn.t, {atom, binary | atom} | atom | binary | false) :: Plug.Conn.t + def put_layout(%Plug.Conn{state: state} = conn, layout) do + if state in @unsent do + do_put_layout(conn, :phoenix_layout, layout) + else + raise AlreadySentError + end + end + + defp do_put_layout(conn, private_key, false) do + put_private(conn, private_key, false) + end + + defp do_put_layout(conn, private_key, {mod, layout}) when is_atom(mod) do + put_private(conn, private_key, {mod, layout}) + end + + defp do_put_layout(conn, private_key, layout) when is_binary(layout) or is_atom(layout) do + update_in conn.private, fn private -> + case Map.get(private, private_key, false) do + {mod, _} -> Map.put(private, private_key, {mod, layout}) + false -> raise "cannot use put_layout/2 or put_root_layout/2 with atom/binary when layout is false, use a tuple instead" + end + end + end + + @doc """ + Stores the layout for rendering if one was not stored yet. + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_new_layout(Plug.Conn.t, {atom, binary | atom} | false) :: Plug.Conn.t + def put_new_layout(%Plug.Conn{state: state} = conn, layout) + when (is_tuple(layout) and tuple_size(layout) == 2) or layout == false do + if state in @unsent do + update_in conn.private, &Map.put_new(&1, :phoenix_layout, layout) + else + raise AlreadySentError + end + end + + @doc """ + Stores the root layout for rendering. + + Like `put_layout/2`, the layout must be a tuple, + specifying the layout view and the layout name, or false. + + In case a previous layout is set, `put_root_layout` also + accepts the layout name to be given as a string or as an atom. If a + string, it must contain the format. Passing an atom means the layout + format will be found at rendering time, similar to the template in + `render/3`. It can also be set to `false`. In this case, no layout + would be used. + + ## Examples + + iex> root_layout(conn) + false + + iex> conn = put_root_layout conn, {AppView, "root.html"} + iex> root_layout(conn) + {AppView, "root.html"} + + iex> conn = put_root_layout conn, "bare.html" + iex> root_layout(conn) + {AppView, "bare.html"} + + iex> conn = put_root_layout conn, :bare + iex> root_layout(conn) + {AppView, :bare} + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_root_layout(Plug.Conn.t, {atom, binary | atom} | atom | binary | false) :: Plug.Conn.t + def put_root_layout(%Plug.Conn{state: state} = conn, layout) do + if state in @unsent do + do_put_layout(conn, :phoenix_root_layout, layout) + else + raise AlreadySentError + end + end + + @doc """ + Sets which formats have a layout when rendering. + + ## Examples + + iex> layout_formats(conn) + ["html"] + + iex> put_layout_formats(conn, ["html", "mobile"]) + iex> layout_formats(conn) + ["html", "mobile"] + + Raises `Plug.Conn.AlreadySentError` if `conn` is already sent. + """ + @spec put_layout_formats(Plug.Conn.t, [String.t]) :: Plug.Conn.t + def put_layout_formats(%Plug.Conn{state: state} = conn, formats) + when state in @unsent and is_list(formats) do + put_private(conn, :phoenix_layout_formats, formats) + end + + def put_layout_formats(%Plug.Conn{}, _formats), do: raise AlreadySentError + + @doc """ + Retrieves current layout formats. + """ + @spec layout_formats(Plug.Conn.t) :: [String.t] + def layout_formats(conn) do + Map.get(conn.private, :phoenix_layout_formats, ~w(html)) + end + + @doc """ + Retrieves the current layout. + """ + @spec layout(Plug.Conn.t) :: {atom, String.t | atom} | false + def layout(conn), do: conn.private |> Map.get(:phoenix_layout, false) + + @doc """ + Retrieves the current root layout. + """ + @spec root_layout(Plug.Conn.t) :: {atom, String.t | atom} | false + def root_layout(conn), do: conn.private |> Map.get(:phoenix_root_layout, false) + + @doc """ + Render the given template or the default template + specified by the current action with the given assigns. + + See `render/3` for more information. + """ + @spec render(Plug.Conn.t, Keyword.t | map | binary | atom) :: Plug.Conn.t + def render(conn, template_or_assigns \\ []) + + def render(conn, template) when is_binary(template) or is_atom(template) do + render(conn, template, []) + end + + def render(conn, assigns) do + render(conn, action_name(conn), assigns) + end + + @doc """ + Renders the given `template` and `assigns` based on the `conn` information. + + Once the template is rendered, the template format is set as the response + content type (for example, an HTML template will set "text/html" as response + content type) and the data is sent to the client with default status of 200. + + ## Arguments + + * `conn` - the `Plug.Conn` struct + + * `template` - which may be an atom or a string. If an atom, like `:index`, + it will render a template with the same format as the one returned by + `get_format/1`. For example, for an HTML request, it will render + the "index.html" template. If the template is a string, it must contain + the extension too, like "index.json" + + * `assigns` - a dictionary with the assigns to be used in the view. Those + assigns are merged and have higher precedence than the connection assigns + (`conn.assigns`) + + ## Examples + + defmodule MyAppWeb.UserController do + use Phoenix.Controller + + def show(conn, _params) do + render(conn, "show.html", message: "Hello") + end + end + + The example above renders a template "show.html" from the `MyAppWeb.UserView` + and sets the response content type to "text/html". + + In many cases, you may want the template format to be set dynamically based + on the request. To do so, you can pass the template name as an atom (without + the extension): + + def show(conn, _params) do + render(conn, :show, message: "Hello") + end + + In order for the example above to work, we need to do content negotiation with + the accepts plug before rendering. You can do so by adding the following to your + pipeline (in the router): + + plug :accepts, ["html"] + + ## Views + + By default, Controllers render templates in a view with a similar name to the + controller. For example, `MyAppWeb.UserController` will render templates inside + the `MyAppWeb.UserView`. This information can be changed any time by using the + `put_view/2` function: + + def show(conn, _params) do + conn + |> put_view(MyAppWeb.SpecialView) + |> render(:show, message: "Hello") + end + + `put_view/2` can also be used as a plug: + + defmodule MyAppWeb.UserController do + use Phoenix.Controller + + plug :put_view, MyAppWeb.SpecialView + + def show(conn, _params) do + render(conn, :show, message: "Hello") + end + end + + ## Layouts + + Templates are often rendered inside layouts. By default, Phoenix + will render layouts for html requests. For example: + + defmodule MyAppWeb.UserController do + use Phoenix.Controller + + def show(conn, _params) do + render(conn, "show.html", message: "Hello") + end + end + + will render the "show.html" template inside an "app.html" + template specified in `MyAppWeb.LayoutView`. `put_layout/2` can be used + to change the layout, similar to how `put_view/2` can be used to change + the view. + + `layout_formats/1` and `put_layout_formats/2` can be used to configure + which formats support/require layout rendering (defaults to "html" only). + """ + @spec render(Plug.Conn.t, binary | atom, Keyword.t | map | binary | atom) :: Plug.Conn.t + def render(conn, template, assigns) + when is_atom(template) and (is_map(assigns) or is_list(assigns)) do + format = + get_format(conn) || + raise "cannot render template #{inspect template} because conn.params[\"_format\"] is not set. " <> + "Please set `plug :accepts, ~w(html json ...)` in your pipeline." + + render_and_send(conn, format, template, assigns) + end + + def render(conn, template, assigns) + when is_binary(template) and (is_map(assigns) or is_list(assigns)) do + case Path.extname(template) do + "." <> format -> + render_and_send(conn, format, template, assigns) + "" -> + raise "cannot render template #{inspect template} without format. Use an atom if the " <> + "template format is meant to be set dynamically based on the request format" + end + end + + def render(conn, view, template) + when is_atom(view) and (is_binary(template) or is_atom(template)) do + IO.warn "#{__MODULE__}.render/3 with a view is deprecated, see the documentation for render/3 for an alternative" + render(conn, view, template, []) + end + + @doc false + def render(conn, view, template, assigns) + when is_atom(view) and (is_binary(template) or is_atom(template)) do + IO.warn "#{__MODULE__}.render/4 with a view is deprecated, see the documentation for render/3 for an alternative" + conn + |> put_view(view) + |> render(template, assigns) + end + + defp render_and_send(conn, format, template, assigns) do + template = template_name(template, format) + view = + Map.get(conn.private, :phoenix_view) || + raise "a view module was not specified, set one with put_view/2" + + layout_format? = format in layout_formats(conn) + conn = prepare_assigns(conn, assigns, template, format, layout_format?) + data = render_with_layouts(conn, view, template, format, layout_format?) + + conn + |> ensure_resp_content_type(MIME.type(format)) + |> send_resp(conn.status || 200, data) + end + + defp render_with_layouts(conn, view, template, format, layout_format?) do + render_assigns = Map.put(conn.assigns, :conn, conn) + + case layout_format? and root_layout(conn) do + {layout_mod, layout_tpl} -> + inner = Phoenix.View.render(view, template, render_assigns) + root_assigns = render_assigns |> Map.put(:inner_content, inner) |> Map.delete(:layout) + Phoenix.View.render_to_iodata(layout_mod, template_name(layout_tpl, format), root_assigns) + + false -> + Phoenix.View.render_to_iodata(view, template, render_assigns) + end + end + + defp prepare_assigns(conn, assigns, template, format, layout_format?) do + assigns = to_map(assigns) + + layout = + case layout_format? and assigns_layout(conn, assigns) do + {mod, layout} -> {mod, template_name(layout, format)} + false -> false + end + + conn + |> put_private(:phoenix_template, template) + |> Map.update!(:assigns, fn prev -> + prev + |> Map.merge(assigns) + |> Map.put(:layout, layout) + end) + end + + defp assigns_layout(conn, assigns) do + case Map.fetch(assigns, :layout) do + {:ok, layout} -> layout + :error -> layout(conn) + end + end + + defp to_map(assigns) when is_map(assigns), do: assigns + defp to_map(assigns) when is_list(assigns), do: :maps.from_list(assigns) + + defp template_name(name, format) when is_atom(name), do: + Atom.to_string(name) <> "." <> format + defp template_name(name, _format) when is_binary(name), do: + name + + defp send_resp(conn, default_status, default_content_type, body) do + conn + |> ensure_resp_content_type(default_content_type) + |> send_resp(conn.status || default_status, body) + end + + defp ensure_resp_content_type(%Plug.Conn{resp_headers: resp_headers} = conn, content_type) do + if List.keyfind(resp_headers, "content-type", 0) do + conn + else + content_type = content_type <> "; charset=utf-8" + %Plug.Conn{conn | resp_headers: [{"content-type", content_type}|resp_headers]} + end + end + + @doc """ + Puts the url string or `%URI{}` to be used for route generation. + + This function overrides the default URL generation pulled + from the `%Plug.Conn{}`'s endpoint configuration. + + ## Examples + + Imagine your application is configured to run on "example.com" + but after the user signs in, you want all links to use + "some_user.example.com". You can do so by setting the proper + router url configuration: + + def put_router_url_by_user(conn) do + put_router_url(conn, get_user_from_conn(conn).account_name <> ".example.com") + end + + Now when you call `Routes.some_route_url(conn, ...)`, it will use + the router url set above. Keep in mind that, if you want to generate + routes to the *current* domain, it is preferred to use + `Routes.some_route_path` helpers, as those are always relative. + """ + def put_router_url(conn, %URI{} = uri) do + put_private(conn, :phoenix_router_url, URI.to_string(uri)) + end + def put_router_url(conn, url) when is_binary(url) do + put_private(conn, :phoenix_router_url, url) + end + + @doc """ + Puts the URL or `%URI{}` to be used for the static url generation. + + Using this function on a `%Plug.Conn{}` struct tells `static_url/2` to use + the given information for URL generation instead of the the `%Plug.Conn{}`'s + endpoint configuration (much like `put_router_url/2` but for static URLs). + """ + def put_static_url(conn, %URI{} = uri) do + put_private(conn, :phoenix_static_url, URI.to_string(uri)) + end + def put_static_url(conn, url) when is_binary(url) do + put_private(conn, :phoenix_static_url, url) + end + + @doc """ + Puts the format in the connection. + + This format is used when rendering a template as an atom. + For example, `render(conn, :foo)` will render `"foo.FORMAT"` + where the format is the one set here. The default format + is typically set from the negotiation done in `accepts/2`. + + See `get_format/1` for retrieval. + """ + def put_format(conn, format), do: put_private(conn, :phoenix_format, format) + + @doc """ + Returns the request format, such as "json", "html". + + This format is used when rendering a template as an atom. + For example, `render(conn, :foo)` will render `"foo.FORMAT"` + where the format is the one set here. The default format + is typically set from the negotiation done in `accepts/2`. + """ + def get_format(conn) do + conn.private[:phoenix_format] || conn.params["_format"] + end + + @doc """ + Sends the given file or binary as a download. + + The second argument must be `{:binary, contents}`, where + `contents` will be sent as download, or`{:file, path}`, + where `path` is the filesystem location of the file to + be sent. Be careful to not interpolate the path from + external parameters, as it could allow traversal of the + filesystem. + + The download is achieved by setting "content-disposition" + to attachment. The "content-type" will also be set based + on the extension of the given filename but can be customized + via the `:content_type` and `:charset` options. + + ## Options + + * `:filename` - the filename to be presented to the user + as download + * `:content_type` - the content type of the file or binary + sent as download. It is automatically inferred from the + filename extension + * `:disposition` - specifies disposition type + (`:attachment` or `:inline`). If `:attachment` was used, + user will be prompted to save the file. If `:inline` was used, + the browser will attempt to open the file. + Defaults to `:attachment`. + * `:charset` - the charset of the file, such as "utf-8". + Defaults to none + * `:offset` - the bytes to offset when reading. Defaults to `0` + * `:length` - the total bytes to read. Defaults to `:all` + * `:encode` - encodes the filename using `URI.encode_www_form/1`. + Defaults to `true`. When `false`, disables encoding. If you + disable encoding, you need to guarantee there are no special + characters in the filename, such as quotes, newlines, etc. + Otherwise you can expose your application to security attacks + + ## Examples + + To send a file that is stored inside your application priv + directory: + + path = Application.app_dir(:my_app, "priv/prospectus.pdf") + send_download(conn, {:file, path}) + + When using `{:file, path}`, the filename is inferred from the + given path but may also be set explicitly. + + To allow the user to download contents that are in memory as + a binary or string: + + send_download(conn, {:binary, "world"}, filename: "hello.txt") + + See `Plug.Conn.send_file/3` and `Plug.Conn.send_resp/3` if you + would like to access the low-level functions used to send files + and responses via Plug. + """ + def send_download(conn, kind, opts \\ []) + + def send_download(conn, {:file, path}, opts) do + filename = opts[:filename] || Path.basename(path) + offset = opts[:offset] || 0 + length = opts[:length] || :all + conn + |> prepare_send_download(filename, opts) + |> send_file(conn.status || 200, path, offset, length) + end + + def send_download(conn, {:binary, contents}, opts) do + filename = opts[:filename] || raise ":filename option is required when sending binary download" + conn + |> prepare_send_download(filename, opts) + |> send_resp(conn.status || 200, contents) + end + + defp prepare_send_download(conn, filename, opts) do + content_type = opts[:content_type] || MIME.from_path(filename) + encoded_filename = encode_filename(filename, Keyword.get(opts, :encode, true)) + disposition_type = get_disposition_type(Keyword.get(opts, :disposition, :attachment)) + warn_if_ajax(conn) + conn + |> put_resp_content_type(content_type, opts[:charset]) + |> put_resp_header("content-disposition", ~s[#{disposition_type}; filename="#{encoded_filename}"]) + end + + defp encode_filename(filename, false), do: filename + defp encode_filename(filename, true), do: URI.encode_www_form(filename) + + defp get_disposition_type(:attachment), do: "attachment" + defp get_disposition_type(:inline), do: "inline" + defp get_disposition_type(other), do: raise ArgumentError, "expected :disposition to be :attachment or :inline, got: #{inspect(other)}" + + defp ajax?(conn) do + case get_req_header(conn, "x-requested-with") do + [value] -> value in ["XMLHttpRequest", "xmlhttprequest"] + [] -> false + end + end + + defp warn_if_ajax(conn) do + if ajax?(conn) do + Logger.warn "send_download/3 has been invoked during an AJAX request. " <> + "The download may not work as expected under XMLHttpRequest" + end + end + + @doc """ + Scrubs the parameters from the request. + + This process is two-fold: + + * Checks to see if the `required_key` is present + * Changes empty parameters of `required_key` (recursively) to nils + + This function is useful for removing empty strings sent + via HTML forms. If you are providing an API, there + is likely no need to invoke `scrub_params/2`. + + If the `required_key` is not present, it will + raise `Phoenix.MissingParamError`. + + ## Examples + + iex> scrub_params(conn, "user") + + """ + @spec scrub_params(Plug.Conn.t, String.t) :: Plug.Conn.t + def scrub_params(conn, required_key) when is_binary(required_key) do + param = Map.get(conn.params, required_key) |> scrub_param() + + unless param do + raise Phoenix.MissingParamError, key: required_key + end + + params = Map.put(conn.params, required_key, param) + %Plug.Conn{conn | params: params} + end + + defp scrub_param(%{__struct__: mod} = struct) when is_atom(mod) do + struct + end + defp scrub_param(%{} = param) do + Enum.reduce(param, %{}, fn({k, v}, acc) -> + Map.put(acc, k, scrub_param(v)) + end) + end + defp scrub_param(param) when is_list(param) do + Enum.map(param, &scrub_param/1) + end + defp scrub_param(param) do + if scrub?(param), do: nil, else: param + end + + defp scrub?(" " <> rest), do: scrub?(rest) + defp scrub?(""), do: true + defp scrub?(_), do: false + + + @doc """ + Enables CSRF protection. + + Currently used as a wrapper function for `Plug.CSRFProtection` + and mainly serves as a function plug in `YourApp.Router`. + + Check `get_csrf_token/0` and `delete_csrf_token/0` for + retrieving and deleting CSRF tokens. + """ + def protect_from_forgery(conn, opts \\ []) do + Plug.CSRFProtection.call(conn, Plug.CSRFProtection.init(opts)) + end + + @doc """ + Put headers that improve browser security. + + It sets the following headers: + + * `x-frame-options` - set to SAMEORIGIN to avoid clickjacking + through iframes unless in the same origin + * `x-content-type-options` - set to nosniff. This requires + script and style tags to be sent with proper content type + * `x-xss-protection` - set to "1; mode=block" to improve XSS + protection on both Chrome and IE + * `x-download-options` - set to noopen to instruct the browser + not to open a download directly in the browser, to avoid + HTML files rendering inline and accessing the security + context of the application (like critical domain cookies) + * `x-permitted-cross-domain-policies` - set to none to restrict + Adobe Flash Playerโ€™s access to data + * `cross-origin-window-policy` - set to deny to avoid window + control attacks + + A custom headers map may also be given to be merged with defaults. + It is recommended for custom header keys to be in lowercase, to avoid sending + duplicate keys in a request. + Additionally, responses with mixed-case headers served over HTTP/2 are not + considered valid by common clients, resulting in dropped responses. + """ + def put_secure_browser_headers(conn, headers \\ %{}) + def put_secure_browser_headers(conn, []) do + put_secure_defaults(conn) + end + def put_secure_browser_headers(conn, headers) when is_map(headers) do + conn + |> put_secure_defaults() + |> merge_resp_headers(headers) + end + defp put_secure_defaults(conn) do + merge_resp_headers(conn, [ + {"x-frame-options", "SAMEORIGIN"}, + {"x-xss-protection", "1; mode=block"}, + {"x-content-type-options", "nosniff"}, + {"x-download-options", "noopen"}, + {"x-permitted-cross-domain-policies", "none"}, + {"cross-origin-window-policy", "deny"} + ]) + end + + @doc """ + Gets or generates a CSRF token. + + If a token exists, it is returned, otherwise it is generated and stored + in the process dictionary. + """ + defdelegate get_csrf_token(), to: Plug.CSRFProtection + + @doc """ + Deletes the CSRF token from the process dictionary. + + *Note*: The token is deleted only after a response has been sent. + """ + defdelegate delete_csrf_token(), to: Plug.CSRFProtection + + @doc """ + Performs content negotiation based on the available formats. + + It receives a connection, a list of formats that the server + is capable of rendering and then proceeds to perform content + negotiation based on the request information. If the client + accepts any of the given formats, the request proceeds. + + If the request contains a "_format" parameter, it is + considered to be the format desired by the client. If no + "_format" parameter is available, this function will parse + the "accept" header and find a matching format accordingly. + + This function is useful when you may want to serve different + content-types (such as JSON and HTML) from the same routes. + However, if you always have distinct routes, you can also + disable content negotiation and simply hardcode your format + of choice in your route pipelines: + + plug :put_format, "html" + + It is important to notice that browsers have historically + sent bad accept headers. For this reason, this function will + default to "html" format whenever: + + * the accepted list of arguments contains the "html" format + + * the accept header specified more than one media type preceded + or followed by the wildcard media type "`*/*`" + + This function raises `Phoenix.NotAcceptableError`, which is rendered + with status 406, whenever the server cannot serve a response in any + of the formats expected by the client. + + ## Examples + + `accepts/2` can be invoked as a function: + + iex> accepts(conn, ["html", "json"]) + + or used as a plug: + + plug :accepts, ["html", "json"] + plug :accepts, ~w(html json) + + ## Custom media types + + It is possible to add custom media types to your Phoenix application. + The first step is to teach Plug about those new media types in + your `config/config.exs` file: + + config :mime, :types, %{ + "application/vnd.api+json" => ["json-api"] + } + + The key is the media type, the value is a list of formats the + media type can be identified with. For example, by using + "json-api", you will be able to use templates with extension + "index.json-api" or to force a particular format in a given + URL by sending "?_format=json-api". + + After this change, you must recompile plug: + + $ mix deps.clean mime --build + $ mix deps.get + + And now you can use it in accepts too: + + plug :accepts, ["html", "json-api"] + + """ + @spec accepts(Plug.Conn.t, [binary]) :: Plug.Conn.t | no_return() + def accepts(conn, [_|_] = accepted) do + case Map.fetch(conn.params, "_format") do + {:ok, format} -> + handle_params_accept(conn, format, accepted) + :error -> + handle_header_accept(conn, get_req_header(conn, "accept"), accepted) + end + end + + defp handle_params_accept(conn, format, accepted) do + if format in accepted do + put_format(conn, format) + else + raise Phoenix.NotAcceptableError, + message: "unknown format #{inspect format}, expected one of #{inspect accepted}", + accepts: accepted + end + end + + # In case there is no accept header or the header is */* + # we use the first format specified in the accepts list. + defp handle_header_accept(conn, header, [first|_]) when header == [] or header == ["*/*"] do + put_format(conn, first) + end + + # In case there is a header, we need to parse it. + # But before we check for */* because if one exists and we serve html, + # we unfortunately need to assume it is a browser sending us a request. + defp handle_header_accept(conn, [header|_], accepted) do + if header =~ "*/*" and "html" in accepted do + put_format(conn, "html") + else + parse_header_accept(conn, String.split(header, ","), [], accepted) + end + end + + defp parse_header_accept(conn, [h|t], acc, accepted) do + case Plug.Conn.Utils.media_type(h) do + {:ok, type, subtype, args} -> + exts = parse_exts(type, subtype) + q = parse_q(args) + + if format = (q === 1.0 && find_format(exts, accepted)) do + put_format(conn, format) + else + parse_header_accept(conn, t, [{-q, h, exts}|acc], accepted) + end + :error -> + parse_header_accept(conn, t, acc, accepted) + end + end + + defp parse_header_accept(conn, [], acc, accepted) do + acc + |> Enum.sort() + |> Enum.find_value(&parse_header_accept(conn, &1, accepted)) + |> Kernel.||(refuse(conn, acc, accepted)) + end + + defp parse_header_accept(conn, {_, _, exts}, accepted) do + if format = find_format(exts, accepted) do + put_format(conn, format) + end + end + + defp parse_q(args) do + case Map.fetch(args, "q") do + {:ok, float} -> + case Float.parse(float) do + {float, _} -> float + :error -> 1.0 + end + :error -> + 1.0 + end + end + + defp parse_exts("*", "*"), do: "*/*" + defp parse_exts(type, "*"), do: type + defp parse_exts(type, subtype), do: MIME.extensions(type <> "/" <> subtype) + + defp find_format("*/*", accepted), do: Enum.fetch!(accepted, 0) + defp find_format(exts, accepted) when is_list(exts), do: Enum.find(exts, &(&1 in accepted)) + defp find_format(_type_range, []), do: nil + defp find_format(type_range, [h|t]) do + mime_type = MIME.type(h) + case Plug.Conn.Utils.media_type(mime_type) do + {:ok, accepted_type, _subtype, _args} when type_range === accepted_type -> h + _ -> find_format(type_range, t) + end + end + + @spec refuse(term(), [tuple], [binary]) :: no_return() + defp refuse(_conn, given, accepted) do + raise Phoenix.NotAcceptableError, + accepts: accepted, + message: """ + no supported media type in accept header. + + Expected one of #{inspect accepted} but got the following formats: + + * #{Enum.map_join(given, "\n ", fn {_, header, exts} -> + inspect(header) <> " with extensions: " <> inspect(exts) + end)} + + To accept custom formats, register them under the :mime library + in your config/config.exs file: + + config :mime, :types, %{ + "application/xml" => ["xml"] + } + + And then run `mix deps.clean --build mime` to force it to be recompiled. + """ + end + + @doc """ + Fetches the flash storage. + """ + def fetch_flash(conn, _opts \\ []) do + if Map.get(conn.private, :phoenix_flash) do + conn + else + session_flash = get_session(conn, "phoenix_flash") + conn = persist_flash(conn, session_flash || %{}) + + register_before_send conn, fn conn -> + flash = conn.private.phoenix_flash + flash_size = map_size(flash) + + cond do + is_nil(session_flash) and flash_size == 0 -> + conn + flash_size > 0 and conn.status in 300..308 -> + put_session(conn, "phoenix_flash", flash) + true -> + delete_session(conn, "phoenix_flash") + end + end + end + end + + @doc """ + Merges a map into the flash. + + Returns the updated connection. + + ## Examples + + iex> conn = merge_flash(conn, info: "Welcome Back!") + iex> get_flash(conn, :info) + "Welcome Back!" + + """ + def merge_flash(conn, enumerable) do + map = for {k, v} <- enumerable, into: %{}, do: {flash_key(k), v} + persist_flash(conn, Map.merge(get_flash(conn), map)) + end + + @doc """ + Persists a value in flash. + + Returns the updated connection. + + ## Examples + + iex> conn = put_flash(conn, :info, "Welcome Back!") + iex> get_flash(conn, :info) + "Welcome Back!" + + """ + def put_flash(conn, key, message) do + persist_flash(conn, Map.put(get_flash(conn), flash_key(key), message)) + end + + @doc """ + Returns a map of previously set flash messages or an empty map. + + ## Examples + + iex> get_flash(conn) + %{} + + iex> conn = put_flash(conn, :info, "Welcome Back!") + iex> get_flash(conn) + %{"info" => "Welcome Back!"} + + """ + def get_flash(conn) do + Map.get(conn.private, :phoenix_flash) || + raise ArgumentError, message: "flash not fetched, call fetch_flash/2" + end + + @doc """ + Returns a message from flash by `key` (or `nil` if no message is available for `key`). + + ## Examples + + iex> conn = put_flash(conn, :info, "Welcome Back!") + iex> get_flash(conn, :info) + "Welcome Back!" + + """ + def get_flash(conn, key) do + get_flash(conn)[flash_key(key)] + end + + @doc """ + Generates a status message from the template name. + + ## Examples + + iex> status_message_from_template("404.html") + "Not Found" + iex> status_message_from_template("whatever.html") + "Internal Server Error" + + """ + def status_message_from_template(template) do + template + |> String.split(".") + |> hd() + |> String.to_integer() + |> Plug.Conn.Status.reason_phrase() + rescue + _ -> "Internal Server Error" + end + + @doc """ + Clears all flash messages. + """ + def clear_flash(conn) do + persist_flash(conn, %{}) + end + + defp flash_key(binary) when is_binary(binary), do: binary + defp flash_key(atom) when is_atom(atom), do: Atom.to_string(atom) + + defp persist_flash(conn, value) do + put_private(conn, :phoenix_flash, value) + end + + @doc """ + Returns the current request path with its default query parameters: + + iex> current_path(conn) + "/users/123?existing=param" + + See `current_path/2` to override the default parameters. + + The path is normalized based on the `conn.script_name` and + `conn.path_info`. For example, "/foo//bar/" will become "/foo/bar". + If you want the original path, use `conn.request_path` instead. + """ + def current_path(%Plug.Conn{query_string: ""} = conn) do + normalized_request_path(conn) + end + + def current_path(%Plug.Conn{query_string: query_string} = conn) do + normalized_request_path(conn) <> "?" <> query_string + end + + @doc """ + Returns the current path with the given query parameters. + + You may also retrieve only the request path by passing an + empty map of params. + + ## Examples + + iex> current_path(conn) + "/users/123?existing=param" + + iex> current_path(conn, %{new: "param"}) + "/users/123?new=param" + + iex> current_path(conn, %{filter: %{status: ["draft", "published"]}}) + "/users/123?filter[status][]=draft&filter[status][]=published" + + iex> current_path(conn, %{}) + "/users/123" + + The path is normalized based on the `conn.script_name` and + `conn.path_info`. For example, "/foo//bar/" will become "/foo/bar". + If you want the original path, use `conn.request_path` instead. + """ + def current_path(%Plug.Conn{} = conn, params) when params == %{} do + normalized_request_path(conn) + end + def current_path(%Plug.Conn{} = conn, params) do + normalized_request_path(conn) <> "?" <> Plug.Conn.Query.encode(params) + end + + defp normalized_request_path(%{path_info: info, script_name: script}) do + "/" <> Enum.join(script ++ info, "/") + end + + @doc """ + Returns the current request url with its default query parameters: + + iex> current_url(conn) + "https://www.example.com/users/123?existing=param" + + See `current_url/2` to override the default parameters. + """ + def current_url(%Plug.Conn{} = conn) do + Phoenix.Router.Helpers.url(router_module(conn), conn) <> current_path(conn) + end + + @doc ~S""" + Returns the current request URL with query params. + + The path will be retrieved from the currently requested path via + `current_path/1`. The scheme, host and others will be received from + the URL configuration in your Phoenix endpoint. The reason we don't + use the host and scheme information in the request is because most + applications are behind proxies and the host and scheme may not + actually reflect the host and scheme accessed by the client. If you + want to access the url precisely as requested by the client, see + `Plug.Conn.request_url/1`. + + ## Examples + + iex> current_url(conn) + "https://www.example.com/users/123?existing=param" + + iex> current_url(conn, %{new: "param"}) + "https://www.example.com/users/123?new=param" + + iex> current_url(conn, %{}) + "https://www.example.com/users/123" + + ## Custom URL Generation + + In some cases, you'll need to generate a request's URL, but using a + different scheme, different host, etc. This can be accomplished in + two ways. + + If you want to do so in a case-by-case basis, you can define a custom + function that gets the endpoint URI configuration and changes it accordingly. + For example, to get the current URL always in HTTPS format: + + def current_secure_url(conn, params \\ %{}) do + cur_uri = MyAppWeb.Endpoint.struct_url() + cur_path = Phoenix.Controller.current_path(conn, params) + + MyAppWeb.Router.Helpers.url(%URI{cur_uri | scheme: "https"}) <> cur_path + end + + However, if you want all generated URLs to always have a certain schema, + host, etc, you may use `put_router_url/2`. + """ + def current_url(%Plug.Conn{} = conn, %{} = params) do + Phoenix.Router.Helpers.url(router_module(conn), conn) <> current_path(conn, params) + end + + @doc false + def __view__(controller_module) do + controller_module + |> Phoenix.Naming.unsuffix("Controller") + |> Kernel.<>("View") + |> String.to_atom() + end + + @doc false + def __layout__(controller_module, opts) do + namespace = + if given = Keyword.get(opts, :namespace) do + given + else + controller_module + |> Atom.to_string() + |> String.split(".") + |> Enum.drop(-1) + |> Enum.take(2) + |> Module.concat() + end + Module.concat(namespace, "LayoutView") + end +end diff --git a/deps/phoenix/lib/phoenix/controller/pipeline.ex b/deps/phoenix/lib/phoenix/controller/pipeline.ex new file mode 100644 index 0000000..a99f921 --- /dev/null +++ b/deps/phoenix/lib/phoenix/controller/pipeline.ex @@ -0,0 +1,220 @@ +defmodule Phoenix.Controller.Pipeline do + @moduledoc false + + @doc false + defmacro __using__(_) do + quote do + @behaviour Plug + + require Phoenix.Endpoint + import Phoenix.Controller.Pipeline + + Module.register_attribute(__MODULE__, :plugs, accumulate: true) + @before_compile Phoenix.Controller.Pipeline + @phoenix_fallback :unregistered + + @doc false + def init(opts), do: opts + + @doc false + def call(conn, action) when is_atom(action) do + conn + |> merge_private( + phoenix_controller: __MODULE__, + phoenix_action: action + ) + |> phoenix_controller_pipeline(action) + end + + @doc false + def action(%Plug.Conn{private: %{phoenix_action: action}} = conn, _options) do + apply(__MODULE__, action, [conn, conn.params]) + end + + defoverridable init: 1, call: 2, action: 2 + end + end + + @doc false + def __action_fallback__(plug, caller) do + plug = Macro.expand(plug, %{caller | function: {:init, 1}}) + quote bind_quoted: [plug: plug] do + @phoenix_fallback Phoenix.Controller.Pipeline.validate_fallback( + plug, + __MODULE__, + Module.get_attribute(__MODULE__, :phoenix_fallback) + ) + end + end + + @doc false + def validate_fallback(plug, module, fallback) do + cond do + fallback == nil -> + raise """ + action_fallback can only be called when using Phoenix.Controller. + Add `use Phoenix.Controller` to #{inspect(module)} + """ + + fallback != :unregistered -> + raise "action_fallback can only be called a single time per controller." + + not is_atom(plug) -> + raise ArgumentError, + "expected action_fallback to be a module or function plug, got #{inspect(plug)}" + + fallback == :unregistered -> + case Atom.to_charlist(plug) do + ~c"Elixir." ++ _ -> {:module, plug} + _ -> {:function, plug} + end + end + end + + @doc false + defmacro __before_compile__(env) do + action = {:action, [], true} + plugs = [action | Module.get_attribute(env.module, :plugs)] + + {conn, body} = + Plug.Builder.compile(env, plugs, + log_on_halt: :debug, + init_mode: Phoenix.plug_init_mode() + ) + + fallback_ast = + env.module + |> Module.get_attribute(:phoenix_fallback) + |> build_fallback() + + quote do + defoverridable action: 2 + + def action(var!(conn_before), opts) do + try do + var!(conn_after) = super(var!(conn_before), opts) + unquote(fallback_ast) + catch + :error, reason -> + Phoenix.Controller.Pipeline.__catch__( + var!(conn_before), + reason, + __MODULE__, + var!(conn_before).private.phoenix_action, + __STACKTRACE__ + ) + end + end + + defp phoenix_controller_pipeline(unquote(conn), var!(action)) do + var!(conn) = unquote(conn) + var!(controller) = __MODULE__ + _ = var!(conn) + _ = var!(controller) + _ = var!(action) + + unquote(body) + end + end + end + + defp build_fallback(:unregistered) do + quote do: var!(conn_after) + end + + defp build_fallback({:module, plug}) do + quote bind_quoted: binding() do + case var!(conn_after) do + %Plug.Conn{} = conn_after -> conn_after + val -> plug.call(var!(conn_before), plug.init(val)) + end + end + end + + defp build_fallback({:function, plug}) do + quote do + case var!(conn_after) do + %Plug.Conn{} = conn_after -> conn_after + val -> unquote(plug)(var!(conn_before), val) + end + end + end + + @doc false + def __catch__( + %Plug.Conn{}, + :function_clause, + controller, + action, + [{controller, action, [%Plug.Conn{} | _] = action_args, _loc} | _] = stack + ) do + args = [module: controller, function: action, arity: length(action_args), args: action_args] + reraise Phoenix.ActionClauseError, args, stack + end + + def __catch__(%Plug.Conn{} = conn, reason, _controller, _action, stack) do + Plug.Conn.WrapperError.reraise(conn, :error, reason, stack) + end + + @doc """ + Stores a plug to be executed as part of the plug pipeline. + """ + defmacro plug(plug) + + defmacro plug({:when, _, [plug, guards]}), do: plug(plug, [], guards, __CALLER__) + + defmacro plug(plug), do: plug(plug, [], true, __CALLER__) + + @doc """ + Stores a plug with the given options to be executed as part of + the plug pipeline. + """ + defmacro plug(plug, opts) + + defmacro plug(plug, {:when, _, [opts, guards]}), do: plug(plug, opts, guards, __CALLER__) + + defmacro plug(plug, opts), do: plug(plug, opts, true, __CALLER__) + + defp plug(plug, opts, guards, caller) do + runtime? = Phoenix.plug_init_mode() == :runtime + + plug = + if runtime? do + expand_alias(plug, caller) + else + plug + end + + opts = + if runtime? and Macro.quoted_literal?(opts) do + Macro.prewalk(opts, &expand_alias(&1, caller)) + else + opts + end + + quote do + @plugs {unquote(plug), unquote(opts), unquote(escape_guards(guards))} + end + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:init, 1}}) + + defp expand_alias(other, _env), do: other + + defp escape_guards({pre_expanded, _, [_ | _]} = node) + when pre_expanded in [:@, :__aliases__], + do: node + + defp escape_guards({left, meta, right}), + do: {:{}, [], [escape_guards(left), meta, escape_guards(right)]} + + defp escape_guards({left, right}), + do: {escape_guards(left), escape_guards(right)} + + defp escape_guards([_ | _] = list), + do: Enum.map(list, &escape_guards/1) + + defp escape_guards(node), + do: node +end diff --git a/deps/phoenix/lib/phoenix/digester.ex b/deps/phoenix/lib/phoenix/digester.ex new file mode 100644 index 0000000..b06c08e --- /dev/null +++ b/deps/phoenix/lib/phoenix/digester.ex @@ -0,0 +1,378 @@ +defmodule Phoenix.Digester do + @digested_file_regex ~r/(-[a-fA-F\d]{32})/ + @manifest_version 1 + @empty_manifest %{ + "version" => @manifest_version, + "digests" => %{}, + "latest" => %{} + } + + defp now() do + :calendar.datetime_to_gregorian_seconds(:calendar.universal_time()) + end + + @moduledoc false + + @doc """ + Digests and compresses the static files in the given `input_path` + and saves them in the given `output_path`. + """ + @spec compile(String.t(), String.t(), boolean()) :: :ok | {:error, :invalid_path} + def compile(input_path, output_path, with_vsn?) do + if File.exists?(input_path) do + File.mkdir_p!(output_path) + + files = filter_files(input_path) + latest = generate_latest(files) + digests = load_compile_digests(output_path) + digested_files = Enum.map(files, &digested_contents(&1, latest, with_vsn?)) + + save_manifest(digested_files, latest, digests, output_path) + Enum.each(digested_files, &write_to_disk(&1, output_path)) + else + {:error, :invalid_path} + end + end + + defp filter_files(input_path) do + input_path + |> Path.join("**") + |> Path.wildcard() + |> Enum.filter(&(not (File.dir?(&1) or compiled_file?(&1)))) + |> Enum.map(&map_file(&1, input_path)) + end + + defp generate_latest(files) do + Map.new( + files, + &{ + manifest_join(&1.relative_path, &1.filename), + manifest_join(&1.relative_path, &1.digested_filename) + } + ) + end + + defp load_compile_digests(output_path) do + manifest = load_manifest(output_path) + manifest["digests"] + end + + defp load_manifest(output_path) do + manifest_path = Path.join(output_path, "cache_manifest.json") + + if File.exists?(manifest_path) do + manifest_path + |> File.read!() + |> Phoenix.json_library().decode!() + |> migrate_manifest(output_path) + else + @empty_manifest + end + end + + defp migrate_manifest(%{"version" => @manifest_version} = manifest, _output_path), do: manifest + defp migrate_manifest(_latest, _output_path), do: @empty_manifest + + defp save_manifest(files, latest, old_digests, output_path) do + old_digests_that_still_exist = + old_digests + |> Enum.filter(fn {file, _} -> File.exists?(Path.join(output_path, file)) end) + |> Map.new() + + digests = Map.merge(old_digests_that_still_exist, generate_digests(files)) + write_manifest(latest, digests, output_path) + end + + @comment "This is file was auto-generated by `mix phx.digest`. Remove it and all generated artefacts with `mix phx.digest.clean --all`" + + defp write_manifest(latest, digests, output_path) do + encoder = Phoenix.json_library() + + json = """ + { + "!comment!":#{encoder.encode!(@comment)}, + "version":#{encoder.encode!(@manifest_version)}, + "latest":#{encoder.encode!(latest)}, + "digests":#{encoder.encode!(digests)} + } + """ + + File.write!(Path.join(output_path, "cache_manifest.json"), json) + end + + defp remove_manifest(output_path) do + File.rm(Path.join(output_path, "cache_manifest.json")) + end + + defp generate_digests(files) do + Map.new( + files, + &{ + manifest_join(&1.relative_path, &1.digested_filename), + build_digest(&1) + } + ) + end + + defp build_digest(file) do + %{ + logical_path: manifest_join(file.relative_path, file.filename), + mtime: now(), + size: file.size, + digest: file.digest, + sha512: Base.encode64(:crypto.hash(:sha512, file.digested_content)) + } + end + + defp manifest_join(".", filename), do: filename + defp manifest_join(path, filename), do: Path.join(path, filename) + + defp compiled_file?(file_path) do + compressors = Application.fetch_env!(:phoenix, :static_compressors) + compressed_extensions = Enum.flat_map(compressors, & &1.file_extensions) + + Regex.match?(@digested_file_regex, Path.basename(file_path)) || + Path.extname(file_path) in compressed_extensions || + Path.basename(file_path) == "cache_manifest.json" + end + + defp map_file(file_path, input_path) do + stats = File.stat!(file_path) + content = File.read!(file_path) + + basename = Path.basename(file_path) + rootname = Path.rootname(basename) + extension = Path.extname(basename) + digest = Base.encode16(:erlang.md5(content), case: :lower) + + %{ + absolute_path: file_path, + relative_path: file_path |> Path.relative_to(input_path) |> Path.dirname(), + filename: basename, + size: stats.size, + content: content, + digest: digest, + digested_content: nil, + digested_filename: "#{rootname}-#{digest}#{extension}" + } + end + + defp write_to_disk(file, output_path) do + path = Path.join(output_path, file.relative_path) + File.mkdir_p!(path) + + compressors = Application.fetch_env!(:phoenix, :static_compressors) + + Enum.each(compressors, fn compressor -> + [file_extension | _] = compressor.file_extensions + + with {:ok, compressed_digested} <- + compressor.compress_file(file.digested_filename, file.digested_content) do + File.write!( + Path.join(path, file.digested_filename <> file_extension), + compressed_digested + ) + end + + with {:ok, compressed} <- compressor.compress_file(file.filename, file.content) do + File.write!( + Path.join(path, file.filename <> file_extension), + compressed + ) + end + end) + + # uncompressed files + File.write!(Path.join(path, file.digested_filename), file.digested_content) + File.write!(Path.join(path, file.filename), file.content) + + file + end + + defp digested_contents(file, latest, with_vsn?) do + ext = Path.extname(file.filename) + + digested_content = + case ext do + ".css" -> digest_stylesheet_asset_references(file, latest, with_vsn?) + ".js" -> digest_javascript_asset_references(file, latest) + ".map" -> digest_javascript_map_asset_references(file, latest) + _ -> file.content + end + + %{file | digested_content: digested_content} + end + + @stylesheet_url_regex ~r{(url\(\s*)(\S+?)(\s*\))} + @quoted_text_regex ~r{\A(['"])(.+)\1\z} + + defp digest_stylesheet_asset_references(file, latest, with_vsn?) do + Regex.replace(@stylesheet_url_regex, file.content, fn _, open, url, close -> + case Regex.run(@quoted_text_regex, url) do + [_, quote_symbol, url] -> + open <> + quote_symbol <> digested_url(url, file, latest, with_vsn?) <> quote_symbol <> close + + nil -> + open <> digested_url(url, file, latest, with_vsn?) <> close + end + end) + end + + @javascript_source_map_regex ~r{(//#\s*sourceMappingURL=\s*)(\S+)} + + defp digest_javascript_asset_references(file, latest) do + Regex.replace(@javascript_source_map_regex, file.content, fn _, source_map_text, url -> + source_map_text <> digested_url(url, file, latest, false) + end) + end + + @javascript_map_file_regex ~r{(['"]file['"]:['"])([^,"']+)(['"])} + + defp digest_javascript_map_asset_references(file, latest) do + Regex.replace(@javascript_map_file_regex, file.content, fn _, open_text, url, close_text -> + open_text <> digested_url(url, file, latest, false) <> close_text + end) + end + + defp digested_url("/" <> relative_path, _file, latest, with_vsn?) do + case Map.fetch(latest, relative_path) do + {:ok, digested_path} -> relative_digested_path(digested_path, with_vsn?) + :error -> "/" <> relative_path + end + end + + defp digested_url(url, file, latest, with_vsn?) do + case URI.parse(url) do + %URI{scheme: nil, host: nil} -> + manifest_path = + file.relative_path + |> Path.join(url) + |> Path.expand() + |> Path.relative_to_cwd() + + case Map.fetch(latest, manifest_path) do + {:ok, digested_path} -> + absolute_digested_url(url, digested_path, with_vsn?) + + :error -> + url + end + + _ -> + url + end + end + + defp relative_digested_path(digested_path, true), + do: relative_digested_path(digested_path) <> "?vsn=d" + + defp relative_digested_path(digested_path, false), + do: relative_digested_path(digested_path) + + defp relative_digested_path(digested_path), + do: "/" <> digested_path + + defp absolute_digested_url(url, digested_path, true), + do: absolute_digested_url(url, digested_path) <> "?vsn=d" + + defp absolute_digested_url(url, digested_path, false), + do: absolute_digested_url(url, digested_path) + + defp absolute_digested_url(url, digested_path), + do: url |> Path.dirname() |> Path.join(Path.basename(digested_path)) + + @doc """ + Deletes compiled/compressed asset files that are no longer in use based on + the specified criteria. + + ## Arguments + + * `path` - The path where the compiled/compressed files are saved + * `age` - The max age of assets to keep in seconds + * `keep` - The number of old versions to keep + + """ + @spec clean(String.t(), integer, integer, integer) :: :ok | {:error, :invalid_path} + def clean(path, age, keep, now \\ now()) do + if File.exists?(path) do + %{"latest" => latest, "digests" => digests} = load_manifest(path) + files = files_to_clean(latest, digests, now - age, keep) + remove_files(files, path) + write_manifest(latest, Map.drop(digests, files), path) + :ok + else + {:error, :invalid_path} + end + end + + @doc """ + Deletes compiled/compressed asset files, including the cache manifest. + + ## Arguments + + * `path` - The path where the compiled/compressed files are saved + + """ + @spec clean_all(String.t()) :: :ok | {:error, :invalid_path} + def clean_all(path) do + if File.exists?(path) do + %{"digests" => digests} = load_manifest(path) + grouped_digests = group_by_logical_path(digests) + logical_paths = Map.keys(grouped_digests) + + files = + for {_, versions} <- grouped_digests, + file <- Enum.map(versions, fn {path, _attrs} -> path end), + do: file + + remove_files(files, path) + remove_compressed_files(logical_paths, path) + remove_manifest(path) + :ok + else + {:error, :invalid_path} + end + end + + defp files_to_clean(latest, digests, max_age, keep) do + digests = Map.drop(digests, Map.values(latest)) + + for {_, versions} <- group_by_logical_path(digests), + file <- versions_to_clean(versions, max_age, keep), + do: file + end + + defp versions_to_clean(versions, max_age, keep) do + versions + |> Enum.map(fn {path, attrs} -> Map.put(attrs, "path", path) end) + |> Enum.sort_by(& &1["mtime"], &>/2) + |> Enum.with_index(1) + |> Enum.filter(fn {version, index} -> max_age > version["mtime"] || index > keep end) + |> Enum.map(fn {version, _index} -> version["path"] end) + end + + defp group_by_logical_path(digests) do + Enum.group_by(digests, fn {_, attrs} -> attrs["logical_path"] end) + end + + defp remove_files(files, output_path) do + for file <- files do + output_path + |> Path.join(file) + |> File.rm() + + remove_compressed_file(file, output_path) + end + end + + defp remove_compressed_files(files, output_path) do + for file <- files, do: remove_compressed_file(file, output_path) + end + + defp remove_compressed_file(file, output_path) do + output_path + |> Path.join("#{file}.gz") + |> File.rm() + end +end diff --git a/deps/phoenix/lib/phoenix/digester/compressor.ex b/deps/phoenix/lib/phoenix/digester/compressor.ex new file mode 100644 index 0000000..d93c24a --- /dev/null +++ b/deps/phoenix/lib/phoenix/digester/compressor.ex @@ -0,0 +1,44 @@ +defmodule Phoenix.Digester.Compressor do + @moduledoc ~S""" + Defines the `Phoenix.Digester.Compressor` behaviour for + implementing static file compressors. + + A custom compressor expects 2 functions to be implemented. + + By default, Phoenix uses only `Phoenix.Digester.Gzip` to compress + static files, but additional compressors can be defined and added + to the digest process. + + ## Example + + If you wanted to compress files using an external brotli compression + library, you could define a new module implementing the behaviour and add the + module to the list of configured Phoenix static compressors. + + defmodule MyApp.BrotliCompressor do + @behaviour Phoenix.Digester.Compressor + + def compress_file(file_path, content) do + valid_extension = Path.extname(file_path) in Application.fetch_env!(:phoenix, :gzippable_exts) + compressed_content = :brotli.encode(content) + + if valid_extension && byte_size(compressed_content) < byte_size(content) do + {:ok, compressed_content} + else + :error + end + end + + def file_extensions do + [".br"] + end + end + + # config/config.exs + config :phoenix, + static_compressors: [Phoenix.Digester.Gzip, MyApp.BrotliCompressor], + # ... + """ + @callback compress_file(Path.t(), binary()) :: {:ok, binary()} | :error + @callback file_extensions() :: nonempty_list(String.t()) +end diff --git a/deps/phoenix/lib/phoenix/digester/gzip.ex b/deps/phoenix/lib/phoenix/digester/gzip.ex new file mode 100644 index 0000000..6f86b46 --- /dev/null +++ b/deps/phoenix/lib/phoenix/digester/gzip.ex @@ -0,0 +1,18 @@ +defmodule Phoenix.Digester.Gzip do + @moduledoc ~S""" + Gzip compressor for Phoenix.Digester + """ + @behaviour Phoenix.Digester.Compressor + + def compress_file(file_path, content) do + if Path.extname(file_path) in Application.fetch_env!(:phoenix, :gzippable_exts) do + {:ok, :zlib.gzip(content)} + else + :error + end + end + + def file_extensions do + [".gz"] + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint.ex b/deps/phoenix/lib/phoenix/endpoint.ex new file mode 100644 index 0000000..c7a0870 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint.ex @@ -0,0 +1,936 @@ +defmodule Phoenix.Endpoint do + @moduledoc ~S""" + Defines a Phoenix endpoint. + + The endpoint is the boundary where all requests to your + web application start. It is also the interface your + application provides to the underlying web servers. + + Overall, an endpoint has three responsibilities: + + * to provide a wrapper for starting and stopping the + endpoint as part of a supervision tree + + * to define an initial plug pipeline for requests + to pass through + + * to host web specific configuration for your + application + + ## Endpoints + + An endpoint is simply a module defined with the help + of `Phoenix.Endpoint`. If you have used the `mix phx.new` + generator, an endpoint was automatically generated as + part of your application: + + defmodule YourAppWeb.Endpoint do + use Phoenix.Endpoint, otp_app: :your_app + + # plug ... + # plug ... + + plug YourApp.Router + end + + Endpoints must be explicitly started as part of your application + supervision tree. Endpoints are added by default + to the supervision tree in generated applications. Endpoints can be + added to the supervision tree as follows: + + children = [ + YourAppWeb.Endpoint + ] + + ## Endpoint configuration + + All endpoints are configured in your application environment. + For example: + + config :your_app, YourAppWeb.Endpoint, + secret_key_base: "kjoy3o1zeidquwy1398juxzldjlksahdk3" + + Endpoint configuration is split into two categories. Compile-time + configuration means the configuration is read during compilation + and changing it at runtime has no effect. The compile-time + configuration is mostly related to error handling. + + Runtime configuration, instead, is accessed during or + after your application is started and can be read through the + `c:config/2` function: + + YourAppWeb.Endpoint.config(:port) + YourAppWeb.Endpoint.config(:some_config, :default_value) + + ### Dynamic configuration + + For dynamically configuring the endpoint, such as loading data + from environment variables or configuration files, Phoenix invokes + the `c:init/2` callback on the endpoint, passing the atom `:supervisor` + as the first argument and the endpoint configuration as second. + + All of Phoenix configuration, except the Compile-time configuration + below can be set dynamically from the `c:init/2` callback. + + ### Compile-time configuration + + * `:code_reloader` - when `true`, enables code reloading functionality. + For the list of code reloader configuration options see + `Phoenix.CodeReloader.reload/1`. Keep in mind code reloading is + based on the file-system, therefore it is not possible to run two + instances of the same app at the same time with code reloading in + development, as they will race each other and only one will effectively + recompile the files. In such cases, tweak your config files so code + reloading is enabled in only one of the apps or set the MIX_BUILD + environment variable to give them distinct build directories + + * `:debug_errors` - when `true`, uses `Plug.Debugger` functionality for + debugging failures in the application. Recommended to be set to `true` + only in development as it allows listing of the application source + code during debugging. Defaults to `false` + + * `:force_ssl` - ensures no data is ever sent via HTTP, always redirecting + to HTTPS. It expects a list of options which are forwarded to `Plug.SSL`. + By default it sets the "strict-transport-security" header in HTTPS requests, + forcing browsers to always use HTTPS. If an unsafe request (HTTP) is sent, + it redirects to the HTTPS version using the `:host` specified in the `:url` + configuration. To dynamically redirect to the `host` of the current request, + set `:host` in the `:force_ssl` configuration to `nil` + + ### Runtime configuration + + * `:adapter` - which webserver adapter to use for serving web requests. + See the "Adapter configuration" section below + + * `:cache_static_manifest` - a path to a json manifest file that contains + static files and their digested version. This is typically set to + "priv/static/cache_manifest.json" which is the file automatically generated + by `mix phx.digest`. It can be either: a string containing a file system path + or a tuple containing the application name and the path within that application. + + * `:cache_static_manifest_latest` - a map of the static files pointing to their + digest version. This is automatically loaded from `cache_static_manifest` on + boot. However, if you have your own static handling mechanism, you may want to + set this value explicitly. This is used by projects such as `LiveView` to + detect if the client is running on the latest version of all assets. + + * `:cache_manifest_skip_vsn` - when true, skips the appended query string + "?vsn=d" when generatic paths to static assets. This query string is used + by `Plug.Static` to set long expiry dates, therefore, you should set this + option to true only if you are not using `Plug.Static` to serve assets, + for example, if you are using a CDN. If you are setting this option, you + should also consider passing `--no-vsn` to `mix phx.digest`. Defaults to + `false`. + + * `:check_origin` - configure the default `:check_origin` setting for + transports. See `socket/3` for options. Defaults to `true`. + + * `:secret_key_base` - a secret key used as a base to generate secrets + for encrypting and signing data. For example, cookies and tokens + are signed by default, but they may also be encrypted if desired. + Defaults to `nil` as it must be set per application + + * `:server` - when `true`, starts the web server when the endpoint + supervision tree starts. Defaults to `false`. The `mix phx.server` + task automatically sets this to `true` + + * `:url` - configuration for generating URLs throughout the app. + Accepts the `:host`, `:scheme`, `:path` and `:port` options. All + keys except `:path` can be changed at runtime. Defaults to: + + [host: "localhost", path: "/"] + + The `:port` option requires either an integer or string. The `:host` + option requires a string. + + The `:scheme` option accepts `"http"` and `"https"` values. Default value + is inferred from top level `:http` or `:https` option. It is useful + when hosting Phoenix behind a load balancer or reverse proxy and + terminating SSL there. + + The `:path` option can be used to override root path. Useful when hosting + Phoenix behind a reverse proxy with URL rewrite rules + + * `:static_url` - configuration for generating URLs for static files. + It will fallback to `url` if no option is provided. Accepts the same + options as `url` + + * `:watchers` - a set of watchers to run alongside your server. It + expects a list of tuples containing the executable and its arguments. + Watchers are guaranteed to run in the application directory, but only + when the server is enabled (unless `:force_watchers` configuration is + set to `true`). For example, the watcher below will run the "watch" mode + of the webpack build tool when the server starts. You can configure it + to whatever build tool or command you want: + + [ + node: [ + "node_modules/webpack/bin/webpack.js", + "--mode", + "development", + "--watch", + "--watch-options-stdin" + ] + ] + + The `:cd` and `:env` options can be given at the end of the list to customize + the watcher: + + [node: [..., cd: "assets", env: [{"TAILWIND_MODE", "watch"}]]] + + A watcher can also be a module-function-args tuple that will be invoked accordingly: + + [another: {Mod, :fun, [arg1, arg2]}] + + * `:force_watchers` - when `true`, forces your watchers to start + even when the `:server` option is set to `false`. + + * `:live_reload` - configuration for the live reload option. + Configuration requires a `:patterns` option which should be a list of + file patterns to watch. When these files change, it will trigger a reload. + If you are using a tool like [pow](http://pow.cx) in development, + you may need to set the `:url` option appropriately. + + live_reload: [ + url: "ws://localhost:4000", + patterns: [ + ~r{priv/static/.*(js|css|png|jpeg|jpg|gif)$}, + ~r{web/views/.*(ex)$}, + ~r{web/templates/.*(eex)$} + ] + ] + + * `:pubsub_server` - the name of the pubsub server to use in channels + and via the Endpoint broadcast functions. The PubSub server is typically + started in your supervision tree. + + * `:render_errors` - responsible for rendering templates whenever there + is a failure in the application. For example, if the application crashes + with a 500 error during a HTML request, `render("500.html", assigns)` + will be called in the view given to `:render_errors`. Defaults to: + + [view: MyApp.ErrorView, accepts: ~w(html), layout: false, log: :debug] + + The default format is used when none is set in the connection + + ### Adapter configuration + + Phoenix allows you to choose which webserver adapter to use. The default + is `Phoenix.Endpoint.Cowboy2Adapter` which can be configured via the + following top-level options. + + * `:http` - the configuration for the HTTP server. It accepts all options + as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults + to `false` + + * `:https` - the configuration for the HTTPS server. It accepts all options + as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults + to `false` + + * `:drainer` - a drainer process that triggers when your application is + shutting down to wait for any on-going request to finish. It accepts all + options as defined by [`Plug.Cowboy.Drainer`](https://hexdocs.pm/plug_cowboy/Plug.Cowboy.Drainer.html). + Defaults to `[]`, which will start a drainer process for each configured endpoint, + but can be disabled by setting it to `false`. + + ## Endpoint API + + In the previous section, we have used the `c:config/2` function that is + automatically generated in your endpoint. Here's a list of all the functions + that are automatically defined in your endpoint: + + * for handling paths and URLs: `c:struct_url/0`, `c:url/0`, `c:path/1`, + `c:static_url/0`,`c:static_path/1`, and `c:static_integrity/1` + + * for broadcasting to channels: `c:broadcast/3`, `c:broadcast!/3`, + `c:broadcast_from/4`, `c:broadcast_from!/4`, `c:local_broadcast/3`, + and `c:local_broadcast_from/4` + + * for configuration: `c:start_link/1`, `c:config/2`, and `c:config_change/2` + + * as required by the `Plug` behaviour: `c:Plug.init/1` and `c:Plug.call/2` + + """ + + @type topic :: String.t + @type event :: String.t + @type msg :: map | {:binary, binary} + + require Logger + + # Configuration + + @doc """ + Starts the endpoint supervision tree. + + Starts endpoint's configuration cache and possibly the servers for + handling requests. + """ + @callback start_link(keyword) :: Supervisor.on_start + + @doc """ + Access the endpoint configuration given by key. + """ + @callback config(key :: atom, default :: term) :: term + + @doc """ + Reload the endpoint configuration on application upgrades. + """ + @callback config_change(changed :: term, removed :: term) :: term + + @doc """ + Initialize the endpoint configuration. + + Invoked when the endpoint supervisor starts, allows dynamically + configuring the endpoint from system environment or other runtime sources. + """ + @callback init(:supervisor, config :: Keyword.t) :: {:ok, Keyword.t} + + # Paths and URLs + + @doc """ + Generates the endpoint base URL, but as a `URI` struct. + """ + @callback struct_url() :: URI.t + + @doc """ + Generates the endpoint base URL without any path information. + """ + @callback url() :: String.t + + @doc """ + Generates the path information when routing to this endpoint. + """ + @callback path(path :: String.t) :: String.t + + @doc """ + Generates the static URL without any path information. + """ + @callback static_url() :: String.t + + @doc """ + Generates a route to a static file in `priv/static`. + """ + @callback static_path(path :: String.t) :: String.t + + @doc """ + Generates an integrity hash to a static file in `priv/static`. + """ + @callback static_integrity(path :: String.t) :: String.t | nil + + @doc """ + Generates a two item tuple containing the `static_path` and `static_integrity`. + """ + @callback static_lookup(path :: String.t) :: {String.t, String.t} | {String.t, nil} + + @doc """ + Returns the script name from the :url configuration. + """ + @callback script_name() :: [String.t] + + @doc """ + Returns the host from the :url configuration. + """ + @callback host() :: String.t + + # Channels + + @doc """ + Subscribes the caller to the given topic. + + See `Phoenix.PubSub.subscribe/3` for options. + """ + @callback subscribe(topic, opts :: Keyword.t) :: :ok | {:error, term} + + @doc """ + Unsubscribes the caller from the given topic. + """ + @callback unsubscribe(topic) :: :ok | {:error, term} + + @doc """ + Broadcasts a `msg` as `event` in the given `topic` to all nodes. + """ + @callback broadcast(topic, event, msg) :: :ok | {:error, term} + + @doc """ + Broadcasts a `msg` as `event` in the given `topic` to all nodes. + + Raises in case of failures. + """ + @callback broadcast!(topic, event, msg) :: :ok | no_return + + @doc """ + Broadcasts a `msg` from the given `from` as `event` in the given `topic` to all nodes. + """ + @callback broadcast_from(from :: pid, topic, event, msg) :: :ok | {:error, term} + + @doc """ + Broadcasts a `msg` from the given `from` as `event` in the given `topic` to all nodes. + + Raises in case of failures. + """ + @callback broadcast_from!(from :: pid, topic, event, msg) :: :ok | no_return + + @doc """ + Broadcasts a `msg` as `event` in the given `topic` within the current node. + """ + @callback local_broadcast(topic, event, msg) :: :ok + + @doc """ + Broadcasts a `msg` from the given `from` as `event` in the given `topic` within the current node. + """ + @callback local_broadcast_from(from :: pid, topic, event, msg) :: :ok + + @doc false + defmacro __using__(opts) do + quote do + @behaviour Phoenix.Endpoint + + unquote(config(opts)) + unquote(pubsub()) + unquote(plug()) + unquote(server()) + end + end + + defp config(opts) do + quote do + @otp_app unquote(opts)[:otp_app] || raise "endpoint expects :otp_app to be given" + var!(config) = Phoenix.Endpoint.Supervisor.config(@otp_app, __MODULE__) + var!(code_reloading?) = var!(config)[:code_reloader] + + # Avoid unused variable warnings + _ = var!(code_reloading?) + + @doc false + def init(_key, config) do + {:ok, config} + end + + defoverridable init: 2 + end + end + + defp pubsub() do + quote do + def subscribe(topic, opts \\ []) when is_binary(topic) do + Phoenix.PubSub.subscribe(pubsub_server!(), topic, opts) + end + + def unsubscribe(topic) do + Phoenix.PubSub.unsubscribe(pubsub_server!(), topic) + end + + def broadcast_from(from, topic, event, msg) do + Phoenix.Channel.Server.broadcast_from(pubsub_server!(), from, topic, event, msg) + end + + def broadcast_from!(from, topic, event, msg) do + Phoenix.Channel.Server.broadcast_from!(pubsub_server!(), from, topic, event, msg) + end + + def broadcast(topic, event, msg) do + Phoenix.Channel.Server.broadcast(pubsub_server!(), topic, event, msg) + end + + def broadcast!(topic, event, msg) do + Phoenix.Channel.Server.broadcast!(pubsub_server!(), topic, event, msg) + end + + def local_broadcast(topic, event, msg) do + Phoenix.Channel.Server.local_broadcast(pubsub_server!(), topic, event, msg) + end + + def local_broadcast_from(from, topic, event, msg) do + Phoenix.Channel.Server.local_broadcast_from(pubsub_server!(), from, topic, event, msg) + end + + defp pubsub_server! do + config(:pubsub_server) || + raise ArgumentError, "no :pubsub_server configured for #{inspect(__MODULE__)}" + end + end + end + + defp plug() do + quote location: :keep do + use Plug.Builder, init_mode: Phoenix.plug_init_mode() + import Phoenix.Endpoint + + Module.register_attribute(__MODULE__, :phoenix_sockets, accumulate: true) + + if force_ssl = Phoenix.Endpoint.__force_ssl__(__MODULE__, var!(config)) do + plug Plug.SSL, force_ssl + end + + if var!(config)[:debug_errors] do + use Plug.Debugger, + otp_app: @otp_app, + banner: {Phoenix.Endpoint.RenderErrors, :__debugger_banner__, []}, + style: [ + primary: "#EB532D", + logo: "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAJEAAABjCAYAAACbguIxAAAAAXNSR0IArs4c6QAAAAlwSFlzAAALEwAACxMBAJqcGAAAHThJREFUeAHtPWlgVOW197vbLNkTFoFQlixAwpIVQZ8ooE+tRaBWdoK4VF5tfe2r1tb2ta611r6n9b1Xd4GETRGxIuJSoKACAlkIkD0hsiRoIHtmues7J3LpOJ2Z3Jm5yUxi5s+991vOOd+5Z777fWf7CGXA79Ct46ZGmyPnshw9WaX5qTSlJBCKjqU51aoohKVUivaIRqUUmlactEK3iCp1gablTztsnZ9kbK16w2P7wcKw5AAJhKqiBWlzIyIjVrKsnKtQ7HiiqiaGZQOC5Qm/JAkiUekqSha2X7/x2JP1FOXw1G6wLDw4oPvFl94+ZVmkib9HJnQuy7MRfUW+qoqSLMtHWi60PzB9Z+2BvsI7iEc/B3wK0d8Wjk8dHRX7B5hjbqBZU6R+sMa3VBWFUiSxqLmhdc303XVHjMcwCDFQDngUosO3JF0VPzz2eSKRLJrjPLbxhVARYYXDUCKlKAJFMV00yw731d6fOlWVKadT/mjSxsIb/ek32Lb3OPANAdl/c3La8CExmziGnUYYz2thd1JwhpBk5RDDyBccTuWgKNpqWxzCsdk76iuwbdXiyd/nIqO2ufcL9lmVBZvgcP5k4pYTrwcLa7B/cBy4LESVeVlvsxS9wN+ZR1Jkioi2B5M3nPiTJ1LqVuXaCcuaPdUZUSbJjg9T1hXfZASsQRiBcYDULJ/2OM1zDxOa0zf1eMFDROmcQ5Jeam7peE+iKOfQ+IjFHM//gqF7T4A0UhD3dflHkusHd3EaS/r0SupWZO+lCHWFwislio2Kpi30cKKQZEKYGEL7L1e4ZqFkRSWs/2upYEauSpKjpblldvaOmkPBwBns6z8HLn/O3Lsenjs+N2pU7G94hr6JpjnevT4cn0GQ1HZb29JBZWXfvh2vQuRCBg2z1W5i4q9zKQvfW1mmOrrsy6duPb4pfIkcWJTp+V4p4zcUzrY72h9SJCX8R88wVGSEdWPZkskrw5/YgUGhnpno8khLbk9dHBMZu4Wimctl4XqjKCrV4ehcmbH5xAZXGsuWTLpFdSpylyC1t3RIjQfLv2h6pInqdG0zeO8fB/wSIgR9clnGw1aL5Un/0ISmtSorVJe97cYpb1R8pFFQtSzzBc5iXoPPMqyhCKOqlEycKqW2gHL0vCqRvR1S146srRX7tD6DV98c8FuIEFxlXnYxz/EZvkGHR60kSUrjVy1TZu2qKdMoqr4j8wOWMXvVeOMsJqlyB0vkfRdPtz42aGbROOf5GpAQIai61Tlgiw1Ot+SZJONLFUUU5q49GlPvokequStzM0OZl/SEDWczmLIq2mwdv8rcVvVOT+2/jfV6FtYe+SJQ9CseK8KwEFUUu1flNLqSlvxa8VKH0/msa5mnezT/EJ6fGBubsL1qdfahVxOj4z21+zaXBTwTIdNq7siVGIYN/1X2pTcsCY6alILiFNcXfmxR+qrICMsrIGica7m3e0WWRFWyP+zNzOOt30AuD3gmQqbAwnRPf2IOy5uTa1dlfuxK87Q3T64/V9o0RhLFBtdyb/c0w3KMKeqZyhVZu721+baVByVELS3tv+pvDANT3vUVt019xpXuWYVfNKbkHx0liM7tuKjW8+NNpjk1q6af/9vkcYa5uejBG45tgvqc4YCq83I6WY7rM09Ho5jY1n5xiSfzCOqRLBbrWormh+rBBYt20emw/yht88lX9bQfiG2CmomQIYqifN4fGRMZGb1p46QRY9xpT9tSvnPc2sJhotjxgiLLTvd692dcS1ms0a9U5uW85173bXkOWohssrSjPzKLAfXEjNzEclfa86cOH4aRK1iWmn/iR0nrDpslQdiqqKLo2s7TPc9xt1Tm5bafXDL1fk/1A7ks6M/Z7mmJo8ZmjDpLs0HLY0j4jAtqXA8hclzfjM+M/7ugCqUTNxxf7EIQe3LFlGdZYlrC89wQl3KPt7IoXJAVeqfU1b4lfXvlB66Ntt88OmnikJhFxEbH7zt+4el7qxouuNb3x/ughQgHXZU3vZPjmH63LtJemCRIx1IKjnRr4E8unHCTJTZ2l6jIdRPWH03S2mjX0vmp3zVbI+6jeeYqQjGxPf15upWVYFNBPytCE4jAU0WiKC2CxHz44aHa+++vaW7XYPfXqzFCtHz6Kc7MjO2vTEC6FcX5XtLaonl4j4JkjY/fJUO0UofofCBzc+lzWO7+++yWpMnDYyMXixQ7nefIBAjFjCZEtUA7FvTcDAM7PZUhqqLS4OyptqhELBEd4sa0LScK3GH152dDhKhmedZ+xmy6pj8zAmmXFfHl5LVH78X76vkTfsAOid+K9+h+2253/EKvj9IPR1LW5fEjEzY2N1x8uYGyIYxgfwe/m3JldBSXwUhsMmdhR6gmlVFE9UvJQVU7VMeJUBqMDRGiyhW563gTuypYRoVD/06b8NSUzYUPIy0YqcKazW9prr4oTJIsrE3eeOw/e5tWnOVi46z3WhjTXIUm42iKNnt1V4ZgCZjuHLIqldrt0p/1CrtRYzBEiMpXZDxiNll+ZxRRoYYjO2xPaIKCbsJxo4fsZxnGrNGFBl14bcVSl1yQ9mYJ2hAhvi74H35G+cjIOxWKzOYYZojesC13zIIk1rWdbV7SV94HhggR2p+io6LXuQ+mPz/bHfYn0zaW/AbH8MhQKnLZTbnlHM8muo+JyJIsqmoDuCaVU4rzI8Uhnjxc/OWh1fWtre5tXZ9xVzs0Ne5as4WZrlDMbI6iU2iOxfWUIT8VTHyCKP9u4qbixw0B6AOIIUKkLUR94OmXVXab49W0zcX3aMR3x+Yx/EKa9s02FCxYU4sQ8yIwtGSTZGJHGDRLWWSFtcLim4f9Gs+yva8XcQqdz00sOP4zbQy9cfXNDZ0YcdE3fHj8Ia/fbJ1wwrGZ6LTtSN1w7FaNtuOLJ/5rpDVig16ziNYvlFdvJh6jaOqfGkKjRq8DDmeyzqtbmX1Zs42utmgWcbZ2/QnSlTh0gAh5k8iImI29SYQhQoQ2SAr0aAP1h05paGg+sWhitx4JxzlxW+mDKesOW9DGJshSR6jHjv7i3mhAn6+qpZk7vdUHW27I5wxtTtdkjWkA9VrYOqih5lhQpFJVkbfbZaUyyuYUO62mRCvDzuNYMoMwvLUnZn6dvEJ6KzW/8Hb3tjUrJj8AMNaAFns85B4whK/uOLRnRQTHcVWqVwh3UHYIn6uivbZVkM7yFjbJyloywI63EN7EFML8Y82F4V7791XG9bTg13D4czVksOEuROiN2NLWNidne9Wn3phTtiLzVRPN3KknoQVkzGlz2OwPpb9R9pI7vP3ZY0YMGR/zM85ims8Q6jtGJbNAtQJYTqpE1bFpUsGJpwGvzyBAtAOOzorfBgEVV2s0uipTtTIjroYIUbcRNvuK0zQJP8d9zFrS0dl+nR6NLuqEYkYl7OY5NkoPc0X498s222OTtp1EXZHH3/GFk25gIyw3w7phGsXQYymVDCUU7MwYiqMU0s1/lIbudQUDzwqoDVFHrqgCTOunZUqusovC2+7xcx6ReSgsWzTlZ+ZIy39DbgUK0vE0jV9XOMxDs6CKDBGitWNjY6+ZlXKB4cLP3xomoYbk9V9b6fVyqvaOnHqa4cbobY8vxympG/YfPv97vVZ5nL2ThltGMhZyeUZRRIYRz9guXHui4Yxe3HradQedRidswU96/s7Po4wO1jREiHAgdXfmOAjhTHoG1Zdt0OV1Qn7R9/3FWbUyq4jjTZn+9MMYN0LJpwVZ3c112D5I+WvlW/707822WtCmvbP1vrQ3yv9iJC7DhKhq1ZVtHEtHG0mcEbCCUbZVrZy6jeMj/BZAjW70AiCM0qnI9JegYHTSKjFJolSTurl4IbQxxFSi4dJzxYRjsIcrSc0/MlNPe71tDNnidyNTlLD0i6EJ/0+mCr3MSS0ovc3W2bYGdkPdGme9/bR2+HmnaT6G5dhUCBKZAnvw0QorVUE9uIb0/U9S7WtZosYYjZk1CiCjyhAc+M+2JaPgBwqHZugZgfbFfpd2YC/V5GW9D9v3G8C+5RfPcDsuU9RRsaP9UXcvx2DoCqRvU2PnywmJVuMmjktEGPY5q1s1rYCw1hWBDK43+2Am250H6mKN8CAcS1HmD1ZOeYol3DzwaExUVdbkyY4GubedlKie6pKo7fM2Fz5W7xK+3Ztj1QkbhejyYl5nH5/NDBOiikVpa0xRMS/4xBaiStQqo+O90egP35oyK9JqGqPS7GgTeDR2KOpFkypWY8SI0bjCGZ5hQoRKtsSpVzSEoxEWbVxoogjnF9GfaTNMiJAJvb1DU2UJwtxAXQfmFU+fEV8vwuG0PzppQ8kjvtqEYx266UrRXApR2RRCkUTw9rfAuToyHMDDKERtpmS5pNPpKMp9q/KvoaLfUCGqzMvYx3OWWUYORpLEM6oqvS122D+4UN1xsq7T1pGenpAWHRN5K01Mi/UGCOACNyn/iK6kDUbS7y8sNPJyZutqnqZmKoRO0JtoApSqqDKoVFXnxpT842gW6bOfoUJkpIcjWqVFxf5rsBM95YsbR34wYX6cNfJVhuN7jAdzCo59EwuKr/MFLxR1Y2HB/uGK3BdZTlmAKoFgacBgS0mit0zIP5wXLCw9/Q0VIkRYuypXhLM8/NoGeyLU2dVxlz9HLmC2D0zW4AmWa1lHe2fYZJZFc9Gs2eMLCKFvAm2/XzzDODb4qAk0kbp1TiohrAofejjiC/LPX9rFC6Iqs9QrEMFyH/Cg13RThgtR9cqsz1jedJXri/P3Xpac9cnri8b52w8t8RaT+S5f/XBddfb4V4mYCcRXu96uQ1rNPLPKH+FR0K6iSkWdorwZ/mR7Zrx7qtSFThoScMWOHh8XMzLBmsxwplQ+klkNm/mhXTbHbzGFjktbQ28NFyI8oWjoFcM+C4ZKm93+6/RNJb8PBEb58mmPms3W3/rqK4pyV2r+4ZAcvYWpkU1m8/+AgVf3Z0sGn20wnr696+CpuwPRd2F2t7vPtjf74kkwdYYLERKDeXvAmW54oIS12ZvnZGyq3Btof83Y6Ks/+Oc0J609muCrjZF16N8zNjPufYY3ZfkDV1aFwvrDzbdcf+LUl/7068u2fn2H9RLW0tV275CY+ICTZEp2VdSLy1O71E3F/1a1Ytoo9I/2VI9lsOuJr12dc3H/3pqk3vD2c8VbtjTzFRPP3uHPWhHdSzpsjgf9+Qx1H6URa8kgVjqNU7mhAk1FgXdSE22XWxy8cszW6jh51a6aYlfajLjvlZkICTuVl9NAcdyIQIhsbb240IhMrTV5OccZjpvsiwZURDrs7fNdc137ao8OeFFjLEnT363e76sdfkKuuibpaTPPrvDHu1EW5Xan0/mX9DeO/coXfK2uaOnUpVaWuZejSTZk843sSdkrgj88ZJeoUJ32Fye+WfaiBieYa68J0Wc3jM0Y+Z0RAUm9e7xXMAOsyZvexnCMTxeV7qNBKflyHL4vfHiw4BVD416jCRmnggZQkZWzhBJr4R/vlAlrg8wfQ3mangauiqP1enriwTaCSmpkwfG/6VtKn/eFX6srvy39Hi4y4vFglg2YxEsUxCcgwPEJDW4g114TIiSmdnXWDpo2fc9fwsCH+XzS2sKAZjF3XC+ljhxy/b+M/FLPC0UvyPY2W17WO2U9JfVkIe/jU6yVW6TSdKK/QYiqgnGNik0SmQrZ4dxbfKLp/5aXN37hTrunZ5wJvzNtxB50L/FU76kM13+gbH2v1WF/W7VLTSxnspis/JUmhr5NUdh40tn2YDAOdL0qRDggzB6m12dZYwDODAcPnR6rl7FaP29X1AJHRMW9663etRxxy7JwuLGpY7VrFn7XNu73JcsmzDbRlmsZmeSqHD2SAidprQ3ogOw0JbfQRL5oF0m5U1VONR/v2BPIQrlsefoveM76e3/SPjud9rUTN5TcqdHj6YqCOffY2XOe6vSUXR6snsaBtMETrcdHJ1T4G0YD/9BPkjcWGWZCqcrLeA6yK/673jHIqKijSKHN1vakEeszvXi9tatcPmUTb45c6q3evRz/DA5H5z19kZC014UIB1e2NP1uTI7pPlCfz3Bu2UcHzg7V6/juE9alyupVmQfgONqZetq6tsHPgSyre5wdtpenbC//2LXOqHuczd75uPKIJyf6QOh2tLb/0FcUyt55YycOi7TOZNSvEwtA7s1aPRExnsbbJ0KEiDF3tCk24gFPRHgrc4py9cT8w7q//d7guJYHs2tEOKiohN1NOVGEUggCeOfcefuJG/d/ccoVh5573L3NzB0x3RJtXi6ppoWQ+OGLgp1FV7oLUc3KrEJ/dUvePBZQBRA7LOYRxkxfDUe0Rmt5l7rpxRxHRHGCD1+F0yH80Z8cR30mREho1fLM5zmz+Sd6mKy1sXd0/kfam8ef1Z6NuNbdkd2lJ+JVDy70nKSI0gX/505RZZqJIrdCfqEmVRWcsIPr1sMRlhcVSTXD+mg47OiGQXhZDFTEqpeOtMBt95Ej5ya4rwErV+Ye4Xk2Rw8dWhvB0bl5wsbjy7RnvKIVIT5h6HaGI7pjzmCTcRxCrVAx2qPNrU+FCAd0cknG73gL/wir8+A9zLNTfaopKZB/O+Lz9EMHulGTh532R/nnCY4RZbLorE3OL0p2hxWIW43qFP6Op2S6w8IASlOk5WmQdhqickeBX1KCnkhfUHjaGptar7x6Z+0Jd5iuz30uRIgc09hRJvMmjtMXp4YnTc9ZfySu3kBf5cJ5yTPihsR+FsrjtgSnc8+EDUVzXV8I3mNQABhQb3Yv9/UsCNLRCQVHcn210epwszM6KvYPNGHm96SewLCnpgutV898v/pzrb/7NSRChERgcsxfzs0uxIwb7kR5eobptXXD+0dHu68ZPLXVW4bTfNyQ+E96YqReeHrboSeB3SE+lr6l5FH3PoEEPHibgdxhuz/vuCExZdLIkZ/0pLBEA/AXxY1jvKkBQiZE2oDQ6s6x3C8hLovXyrxdMf6rtaVlTvaOmkPe2vhbjovN+MT4T/Xg9xe2p/b4+Spv/OrmeR+frXavDySBqt3peC1tQ/Hd7rD8edZjHkLtdlNz03Q395NuNCEXokuDZcvzsraxhPleT7OCih41qvP51PySn/rDKF9tUdkGQQYlerLl+4Ljq04QpQ74LP/Rm4mhekXGetZk0e2JCCcBdHXZ2+/ydMiNLzq81ek5khXTCNrsnfe7h2GHRIhqV2RtQAvzpPyi+a6DwgNbcrOHga+N+UZIreNzZsKMHJJof9jIxOIVKzP/buLN17rSFOw9mNQ6HYK4Ln3Dca+7UvgD/dXMmS6n9POJE5SgDqLscOedax+c0RhemSyLlB08IKsdsrTHwvHfx5wExbdm326NoZZPKChc4NoH74GOg0BHj8GeuHMTnI5nzjR0fFp/XuwIiRBholBzbNwuyBvU0FDUMMNTFoyy5RlP8DSzElKRj2YgXb37gC8/y87zTkFef7a0/dlATAmX4Vy6wQwaUdaYP8POLWB/qG4HREWt7pKEF71l49fwYio/PetCXJfIinKoqvHL1Z4+hRo8vKJ2Hs4huZ+wNLG3dz3DmLlUnufnj3vtIKlZlXMOPt0j8d61j3ZftXzaa6CQXY19tTJvV/DlVhw26bEeG3oDEGw5OtijzxEkXgJ7q7gudeMxj26t3ZrVmKj7TLTpOkJIErg6WLy5O6AbBbgAnmJU54Zgj9fEvD6syXQv6HrA1dR3yhxcKKu0bANdUBmRlY++OHHxRW+LUI1v5Usn/5znLY+DsFq0MvcrWvchQqoRkhZt37u75rf+eCeiioBWuWw4sySyenXOFpbmFquCUAG+2BPgEHfq+oKj1novu11MxD4kPvYFjqZzwPHqG0nYUS8G1mMbZD+pFBTnG3/7vPHFkAkRMszVlRU1wZCt/jktd7Q7Q7Vn3JrTkdYZVsaUQdFyNOg8INQd5is4RoMGDZ9EMZLd2bbLqLUC5rBePCt9KYmOyIY1wTCwwIugFuBoRemQiFThlKgzpSebPsor/fIrjUYvVxr0NXMjovk8WeUWuh80iMm4OPj2SApzUaSEOiKp75e3XNi0cNeZWi/wfBZXrcypAKVmEoZJVa7M/oTlyFXdngzwOVRoqu1Ue/OV12+vw+QSPn/IbytvmiIR1gwa7YtfSV1H3fuFVIiQend3EVUWbaJEth74tPqnRnscfjhrzLjEkXF5LA/+PpSSAAkavoLPRNn59rbNs3fUV/jkZpCVOKOOiI170cTAQTLwg7nrNBw5dBoOFGnsghONlE7bodt21JTUe5kd/EWP6xueIZPApSYWTSegKQfNs/Q2CKmFZbkft7W1LfCVftAffCEXIiQW/imwM+Lhxf7jh2sAilZKhC7b6+67gX+06vkO/YnmZI/4JTHTi2mFHuXtW48KTYck/ldPM2HPGL22wI0CBhj2yQ/HnWyhTfhZ3Td55Ojq1s4u7XOIBwO+fvRUjVGH14SFECFXcfrleK77X+rOZZjjBULEGkhk+LkiObcVH2s94W5n0vog865Kj8lkIsyLzTR7DXgaJvnKagvCI6m0coHIdLtDFrf2ohBpJA64a9gIEXJW704FF3eEhu0roRzgCGbHvuA4bGJpxQzJNa16vBhReOwO4U96fZkRx+DPMwfCSoiQRNiClsIWdIpncg0qlWW5tu1CmvsC0SDo3zowl+Jtw2fc4H4wFQ2TvUmRCruTQQEyjsNhJ0Q4NLRsi6L9zzpcWQLiBCT9jUdvy4A6D3b6Jw6E3efMlcLi21IXREbFbnY9sM61Pph79EEWRNubX5W3/zTUcfnBjCMc+oa1EF1iEF+Tl1sEWuP03mAYqu7BqHsKZqdDHc7OHbZOpWrZrpryeoP0Nb1Bc7jB7A9C1M0z9Ig0W9iHIfzZp2E2WAbjDKVSYECRaYEBtbGsgm8Bo0CkDy3CQXcXVFUpkxSpvKK5OT9QbXKwNIZb/34jRJcYx4JNaDdP87NA9xNSXqJdC+wsLaD5PnDxq7anpu+sPRBSgkKIvL8JUTer0CMRDISvEZaZCKkLQ8i+r1Hj7KXIYm2LrevnocydGCpG9Esh0piFsVoRTMQTkAcUzivT0oNptaG5gvXkYMr64qCSfIWG8sCx9msh0oaNJ/bMmHLFU7BcgjPGSEJvzU5oaWcUOEtKwUOBARPtWUOCRuTGppYeoyQ0+vv7dUAIketLQNeFyLj4H0Es2NUwNyX6sxDH0GnI5iECU2yQ//AcIVKjSHO1YofzJMU4K+0XhJb2aKoN8VkddERUNDuUoUgyy/LZkBA9FRIjTwJfnTjNxbe1SViU+W7hVlf6BuL9gBMi95eEXpR8FD+NIfRkQaFHw0vvTkNM06pNoZmLquxophWqrl2mz3W22o7pTeLgjkd7xoxoIybHrDHxzI8hiDGq9VzzNdN31x3R6gfidcALkZEv7cDNyZmxUZbrBNXZ8Pmxzt095QlAAcazWXsK/jOSxlDAGhQiP7iOkaSWePOdRGZmghfBKAJZrWSacmBKOzgbsxFcaY/YHLZ39WZd8wN1WDcdFKIAX0/Zooz7OAv7EHgJjnYHAX5P7USRPty3t3qN5gjm3mYgPQ8KUZBvs2hB2tzouIh1kIE80R0UhiBDvNnatM3F97jXDaTnQSEy6G1WrMh43WSyrPYEDqMsxhcUTvJUNxDKBoXIwLdYsnTyimizeb2nJBGSIJxKKSgcbyC6sAE1KEQGvwp0gh86JOEouOh2qxJcwQuiUDIhvzDTtWwg3HtWuQ6EkYVoDJjw4PyZC9PRQOtOAs/xGRXLpv3Bvby/Pw8KUS+8was/ri+52NW+UJHAPuL2482mhzAixa24Xz8OClEvvT605jd3tS6ApKHfOGKCEIaaM3NkUS+hDQnYQSHqRbajIH1WeCZRFaVvhCujbqlmdc5LvYi6T0EPLqz7iN14Wjdtivg1C0eha9Z/OB/x0P49lbf0d4XkoBD1kRBpaNChLiYhYY2JUufIrDpCEkkR5FrE3No9ZmnVYITb9f8BhSZnYemqCy4AAAAASUVORK5CYII=" + ] + end + + # Compile after the debugger so we properly wrap it. + @before_compile Phoenix.Endpoint + end + end + + defp server() do + quote location: :keep, unquote: false do + @doc """ + Returns the child specification to start the endpoint + under a supervision tree. + """ + def child_spec(opts) do + %{ + id: __MODULE__, + start: {__MODULE__, :start_link, [opts]}, + type: :supervisor + } + end + + @doc """ + Starts the endpoint supervision tree. + + ## Options + + * `:log_access_url` - if the access url should be logged + once the endpoint starts + + All other options are merged into the endpoint configuration. + """ + def start_link(opts \\ []) do + Phoenix.Endpoint.Supervisor.start_link(@otp_app, __MODULE__, opts) + end + + @doc """ + Returns the endpoint configuration for `key` + + Returns `default` if the key does not exist. + """ + def config(key, default \\ nil) do + case :ets.lookup(__MODULE__, key) do + [{^key, val}] -> val + [] -> default + end + end + + @doc """ + Reloads the configuration given the application environment changes. + """ + def config_change(changed, removed) do + Phoenix.Endpoint.Supervisor.config_change(__MODULE__, changed, removed) + end + + @doc """ + Generates the endpoint base URL without any path information. + + It uses the configuration under `:url` to generate such. + """ + def url do + Phoenix.Config.cache(__MODULE__, + :__phoenix_url__, + &Phoenix.Endpoint.Supervisor.url/1) + end + + @doc """ + Generates the static URL without any path information. + + It uses the configuration under `:static_url` to generate + such. It falls back to `:url` if `:static_url` is not set. + """ + def static_url do + Phoenix.Config.cache(__MODULE__, + :__phoenix_static_url__, + &Phoenix.Endpoint.Supervisor.static_url/1) + end + + @doc """ + Generates the endpoint base URL but as a `URI` struct. + + It uses the configuration under `:url` to generate such. + Useful for manipulating the URL data and passing it to + URL helpers. + """ + def struct_url do + Phoenix.Config.cache(__MODULE__, + :__phoenix_struct_url__, + &Phoenix.Endpoint.Supervisor.struct_url/1) + end + + @doc """ + Returns the host for the given endpoint. + """ + def host do + Phoenix.Config.cache(__MODULE__, + :__phoenix_host__, + &Phoenix.Endpoint.Supervisor.host/1) + end + + @doc """ + Generates the path information when routing to this endpoint. + """ + def path(path) do + Phoenix.Config.cache(__MODULE__, + :__phoenix_path__, + &Phoenix.Endpoint.Supervisor.path/1) <> path + end + + @doc """ + Generates the script name. + """ + def script_name do + Phoenix.Config.cache(__MODULE__, + :__phoenix_script_name__, + &Phoenix.Endpoint.Supervisor.script_name/1) + end + + @doc """ + Generates a route to a static file in `priv/static`. + """ + def static_path(path) do + Phoenix.Config.cache(__MODULE__, :__phoenix_static__, + &Phoenix.Endpoint.Supervisor.static_path/1) <> + elem(static_lookup(path), 0) + end + + @doc """ + Generates a base64-encoded cryptographic hash (sha512) to a static file + in `priv/static`. Meant to be used for Subresource Integrity with CDNs. + """ + def static_integrity(path) do + elem(static_lookup(path), 1) + end + + @doc """ + Returns a two item tuple with the first item being the `static_path` + and the second item being the `static_integrity`. + """ + def static_lookup(path) do + Phoenix.Config.cache(__MODULE__, {:__phoenix_static__, path}, + &Phoenix.Endpoint.Supervisor.static_lookup(&1, path)) + end + end + end + + @doc false + def __force_ssl__(module, config) do + if force_ssl = config[:force_ssl] do + Keyword.put_new(force_ssl, :host, {module, :host, []}) + end + end + + @doc false + defmacro __before_compile__(%{module: module}) do + sockets = Module.get_attribute(module, :phoenix_sockets) + + dispatches = + for {path, socket, socket_opts} <- sockets, + {path, type, conn_ast, socket, opts} <- socket_paths(module, path, socket, socket_opts) do + quote do + defp do_handler(unquote(path), conn, _opts) do + {unquote(type), unquote(conn_ast), unquote(socket), unquote(Macro.escape(opts))} + end + end + end + + quote do + defoverridable [call: 2] + + # Inline render errors so we set the endpoint before calling it. + def call(conn, opts) do + conn = %{conn | script_name: script_name(), secret_key_base: config(:secret_key_base)} + conn = Plug.Conn.put_private(conn, :phoenix_endpoint, __MODULE__) + + try do + super(conn, opts) + rescue + e in Plug.Conn.WrapperError -> + %{conn: conn, kind: kind, reason: reason, stack: stack} = e + Phoenix.Endpoint.RenderErrors.__catch__(conn, kind, reason, stack, config(:render_errors)) + catch + kind, reason -> + stack = __STACKTRACE__ + Phoenix.Endpoint.RenderErrors.__catch__(conn, kind, reason, stack, config(:render_errors)) + end + end + + @doc false + def __sockets__, do: unquote(Macro.escape(sockets)) + + @doc false + def __handler__(%{path_info: path} = conn, opts), do: do_handler(path, conn, opts) + unquote(dispatches) + defp do_handler(_path, conn, opts), do: {:plug, conn, __MODULE__, opts} + end + end + + defp socket_paths(endpoint, path, socket, opts) do + paths = [] + websocket = Keyword.get(opts, :websocket, true) + longpoll = Keyword.get(opts, :longpoll, false) + + paths = + if websocket do + config = Phoenix.Socket.Transport.load_config(websocket, Phoenix.Transports.WebSocket) + {conn_ast, match_path} = socket_path(path, config) + [{match_path, :websocket, conn_ast, socket, config} | paths] + else + paths + end + + paths = + if longpoll do + config = Phoenix.Socket.Transport.load_config(longpoll, Phoenix.Transports.LongPoll) + plug_init = {endpoint, socket, config} + {conn_ast, match_path} = socket_path(path, config) + [{match_path, :plug, conn_ast, Phoenix.Transports.LongPoll, plug_init} | paths] + else + paths + end + + paths + end + + defp socket_path(path, config) do + end_path_fragment = Keyword.fetch!(config, :path) + + {vars, path} = + String.split(path <> "/" <> end_path_fragment, "/", trim: true) + |> Enum.join("/") + |> Plug.Router.Utils.build_path_match() + + conn_ast = + if vars == [] do + quote do + conn + end + else + params_map = {:%{}, [], Plug.Router.Utils.build_path_params_match(vars)} + quote do + params = unquote(params_map) + %Plug.Conn{conn | path_params: params, params: params} + end + end + + {conn_ast, path} + end + + ## API + + @doc """ + Defines a websocket/longpoll mount-point for a socket. + + ## Options + + * `:websocket` - controls the websocket configuration. + Defaults to `true`. May be false or a keyword list + of options. See "Common configuration" and + "WebSocket configuration" for the whole list + + * `:longpoll` - controls the longpoll configuration. + Defaults to `false`. May be true or a keyword list + of options. See "Common configuration" and + "Longpoll configuration" for the whole list + + If your socket is implemented using `Phoenix.Socket`, + you can also pass to each transport above all options + accepted on `use Phoenix.Socket`. An option given here + will override the value in `use Phoenix.Socket`. + + ## Examples + + socket "/ws", MyApp.UserSocket + + socket "/ws/admin", MyApp.AdminUserSocket, + longpoll: true, + websocket: [compress: true] + + ## Path params + + It is possible to include variables in the path, these will be + available in the `params` that are passed to the socket. + + socket "/ws/:user_id", MyApp.UserSocket, + websocket: [path: "/project/:project_id"] + + ## Common configuration + + The configuration below can be given to both `:websocket` and + `:longpoll` keys: + + * `:path` - the path to use for the transport. Will default + to the transport name ("/websocket" or "/longpoll") + + * `:serializer` - a list of serializers for messages. See + `Phoenix.Socket` for more information + + * `:transport_log` - if the transport layer itself should log and, + if so, the level + + * `:check_origin` - if the transport should check the origin of requests when + the `origin` header is present. May be `true`, `false`, a list of hosts that + are allowed, or a function provided as MFA tuple. Defaults to `:check_origin` + setting at endpoint configuration. + + If `true`, the header is checked against `:host` in `YourAppWeb.Endpoint.config(:url)[:host]`. + + If `false`, your app is vulnerable to Cross-Site WebSocket Hijacking (CSWSH) + attacks. Only use in development, when the host is truly unknown or when + serving clients that do not send the `origin` header, such as mobile apps. + + You can also specify a list of explicitly allowed origins. Wildcards are + supported. + + check_origin: [ + "https://example.com", + "//another.com:888", + "//*.other.com" + ] + + Or to accept any origin matching the request connection's host, port, and scheme: + + check_origin: :conn + + Or a custom MFA function: + + check_origin: {MyAppWeb.Auth, :my_check_origin?, []} + + The MFA is invoked with the request `%URI{}` as the first argument, + followed by arguments in the MFA list, and must return a boolean. + + * `:code_reloader` - enable or disable the code reloader. Defaults to your + endpoint configuration + + * `:connect_info` - a list of keys that represent data to be copied from + the transport to be made available in the user socket `connect/3` callback + + The valid keys are: + + * `:peer_data` - the result of `Plug.Conn.get_peer_data/1` + + * `:trace_context_headers` - a list of all trace context headers. Supported + headers are defined by the [W3C Trace Context Specification](https://www.w3.org/TR/trace-context-1/). + These headers are necessary for libraries such as [OpenTelemetry](https://opentelemetry.io/) to extract + trace propagation information to know this request is part of a larger trace + in progress. + + * `:x_headers` - all request headers that have an "x-" prefix + + * `:uri` - a `%URI{}` with information from the conn + + * `:user_agent` - the value of the "user-agent" request header + + * `{:session, session_config}` - the session information from `Plug.Conn`. + The `session_config` is an exact copy of the arguments given to `Plug.Session`. + This requires the "_csrf_token" to be given as request parameter with + the value of `URI.encode_www_form(Plug.CSRFProtection.get_csrf_token())` + when connecting to the socket. It can also be a MFA to allow loading + config in runtime `{MyAppWeb.Auth, :get_session_config, []}`. Otherwise + the session will be `nil`. + + Arbitrary keywords may also appear following the above valid keys, which + is useful for passing custom connection information to the socket. + + For example: + + ``` + socket "/socket", AppWeb.UserSocket, + websocket: [ + connect_info: [:peer_data, :trace_context_headers, :x_headers, :uri, session: [store: :cookie]] + ] + ``` + + With arbitrary keywords: + + ``` + socket "/socket", AppWeb.UserSocket, + websocket: [ + connect_info: [:uri, custom_value: "abcdef"] + ] + ``` + + ## Websocket configuration + + The following configuration applies only to `:websocket`. + + * `:timeout` - the timeout for keeping websocket connections + open after it last received data, defaults to 60_000ms + + * `:max_frame_size` - the maximum allowed frame size in bytes, + defaults to "infinity" + + * `:fullsweep_after` - the maximum number of garbage collections + before forcing a fullsweep for the socket process. You can set + it to `0` to force more frequent cleanups of your websocket + transport processes. Setting this option requires Erlang/OTP 24 + + * `:compress` - whether to enable per message compression on + all data frames, defaults to false + + * `:subprotocols` - a list of supported websocket subprotocols. + Used for handshake `Sec-WebSocket-Protocol` response header, defaults to nil. + + For example: + + subprotocols: ["sip", "mqtt"] + + * `:error_handler` - custom error handler for connection errors. + If `c:Phoenix.Socket.connect/3` returns an `{:error, reason}` tuple, + the error handler will be called with the error reason. For WebSockets, + the error handler must be a MFA tuple that receives a `Plug.Conn`, the + error reason, and returns a `Plug.Conn` with a response. For example: + + error_handler: {MySocket, :handle_error, []} + + and a `{:error, :rate_limit}` return may be handled on `MySocket` as: + + def handle_error(conn, :rate_limit), do: Plug.Conn.send_resp(conn, 429, "Too many requests") + + ## Longpoll configuration + + The following configuration applies only to `:longpoll`: + + * `:window_ms` - how long the client can wait for new messages + in its poll request, defaults to 10_000ms. + + * `:pubsub_timeout_ms` - how long a request can wait for the + pubsub layer to respond, defaults to 2000ms. + + * `:crypto` - options for verifying and signing the token, accepted + by `Phoenix.Token`. By default tokens are valid for 2 weeks + + """ + defmacro socket(path, module, opts \\ []) do + module = Macro.expand(module, %{__CALLER__ | function: {:__handler__, 2}}) + + quote do + @phoenix_sockets {unquote(path), unquote(module), unquote(opts)} + end + end + + @doc false + @deprecated "Phoenix.Endpoint.instrument/4 is deprecated and has no effect. Use :telemetry instead" + defmacro instrument(_endpoint_or_conn_or_socket, _event, _runtime, _fun) do + :ok + end + + @doc """ + Checks if Endpoint's web server has been configured to start. + + * `otp_app` - The OTP app running the endpoint, for example `:my_app` + * `endpoint` - The endpoint module, for example `MyAppWeb.Endpoint` + + ## Examples + + iex> Phoenix.Endpoint.server?(:my_app, MyAppWeb.Endpoint) + true + + """ + def server?(otp_app, endpoint) when is_atom(otp_app) and is_atom(endpoint) do + Phoenix.Endpoint.Supervisor.server?(otp_app, endpoint) + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint/cowboy2_adapter.ex b/deps/phoenix/lib/phoenix/endpoint/cowboy2_adapter.ex new file mode 100644 index 0000000..4e04615 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/cowboy2_adapter.ex @@ -0,0 +1,132 @@ +defmodule Phoenix.Endpoint.Cowboy2Adapter do + @moduledoc """ + The Cowboy2 adapter for Phoenix. + + ## Endpoint configuration + + This adapter uses the following endpoint configuration: + + * `:http` - the configuration for the HTTP server. It accepts all options + as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults + to `false` + + * `:https` - the configuration for the HTTPS server. It accepts all options + as defined by [`Plug.Cowboy`](https://hexdocs.pm/plug_cowboy/). Defaults + to `false` + + * `:drainer` - a drainer process that triggers when your application is + shutting down to wait for any on-going request to finish. It accepts all + options as defined by [`Plug.Cowboy.Drainer`](https://hexdocs.pm/plug_cowboy/Plug.Cowboy.Drainer.html). + Defaults to `[]`, which will start a drainer process for each configured endpoint, + but can be disabled by setting it to `false`. + + ## Custom dispatch options + + You can provide custom dispatch options in order to use Phoenix's + builtin Cowboy server with custom handlers. For example, to handle + raw WebSockets [as shown in Cowboy's docs](https://github.com/ninenines/cowboy/tree/master/examples)). + + The options are passed to both `:http` and `:https` keys in the + endpoint configuration. However, once you pass your custom dispatch + options, you will need to manually wire the Phoenix endpoint by + adding the following rule: + + {:_, Phoenix.Endpoint.Cowboy2Handler, {MyAppWeb.Endpoint, []}} + + For example: + + config :myapp, MyAppWeb.Endpoint, + http: [dispatch: [ + {:_, [ + {"/foo", MyAppWeb.CustomHandler, []}, + {:_, Phoenix.Endpoint.Cowboy2Handler, {MyAppWeb.Endpoint, []}} + ]}]] + + It is also important to specify your handlers first, otherwise + Phoenix will intercept the requests before they get to your handler. + """ + + require Logger + + @doc false + def child_specs(endpoint, config) do + otp_app = Keyword.fetch!(config, :otp_app) + + refs_and_specs = + for {scheme, port} <- [http: 4000, https: 4040], opts = config[scheme] do + port = :proplists.get_value(:port, opts, port) + + unless port do + Logger.error(":port for #{scheme} config is nil, cannot start server") + raise "aborting due to nil port" + end + + # Ranch options are read from the top, so we keep the user opts first. + opts = :proplists.delete(:port, opts) ++ [port: port_to_integer(port), otp_app: otp_app] + child_spec(scheme, endpoint, opts) + end + + {refs, child_specs} = Enum.unzip(refs_and_specs) + + if drainer = (refs != [] && Keyword.get(config, :drainer, [])) do + child_specs ++ [{Plug.Cowboy.Drainer, Keyword.put_new(drainer, :refs, refs)}] + else + child_specs + end + end + + defp child_spec(scheme, endpoint, config) do + if scheme == :https do + Application.ensure_all_started(:ssl) + end + + dispatches = [{:_, Phoenix.Endpoint.Cowboy2Handler, {endpoint, endpoint.init([])}}] + config = Keyword.put_new(config, :dispatch, [{:_, dispatches}]) + ref = Module.concat(endpoint, scheme |> Atom.to_string() |> String.upcase()) + spec = Plug.Cowboy.child_spec(ref: ref, scheme: scheme, plug: {endpoint, []}, options: config) + spec = update_in(spec.start, &{__MODULE__, :start_link, [scheme, endpoint, &1]}) + {ref, spec} + end + + @doc false + def start_link(scheme, endpoint, {m, f, [ref | _] = a}) do + # ref is used by Ranch to identify its listeners, defaulting + # to plug.HTTP and plug.HTTPS and overridable by users. + case apply(m, f, a) do + {:ok, pid} -> + Logger.info(fn -> info(scheme, endpoint, ref) end) + {:ok, pid} + + {:error, {:shutdown, {_, _, {:listen_error, _, :eaddrinuse}}}} = error -> + Logger.error([info(scheme, endpoint, ref), " failed, port already in use"]) + error + + {:error, {:shutdown, {_, _, {{_, {:error, :eaddrinuse}}, _}}}} = error -> + Logger.error [info(scheme, endpoint, ref), " failed, port already in use"] + error + + {:error, _} = error -> + error + end + end + + defp info(scheme, endpoint, ref) do + server = "cowboy #{Application.spec(:cowboy)[:vsn]}" + "Running #{inspect endpoint} with #{server} at #{bound_address(scheme, ref)}" + end + + defp bound_address(scheme, ref) do + case :ranch.get_addr(ref) do + {:local, unix_path} -> + "#{unix_path} (#{scheme}+unix)" + + {addr, port} -> + "#{:inet.ntoa(addr)}:#{port} (#{scheme})" + end + end + + # TODO: Deprecate {:system, env_var} once we require Elixir v1.9+ + defp port_to_integer({:system, env_var}), do: port_to_integer(System.get_env(env_var)) + defp port_to_integer(port) when is_binary(port), do: String.to_integer(port) + defp port_to_integer(port) when is_integer(port), do: port +end diff --git a/deps/phoenix/lib/phoenix/endpoint/cowboy2_handler.ex b/deps/phoenix/lib/phoenix/endpoint/cowboy2_handler.ex new file mode 100644 index 0000000..cbedb17 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/cowboy2_handler.ex @@ -0,0 +1,188 @@ +defmodule Phoenix.Endpoint.Cowboy2Handler do + @moduledoc false + + if Code.ensure_loaded?(:cowboy_websocket) and + function_exported?(:cowboy_websocket, :behaviour_info, 1) do + @behaviour :cowboy_websocket + end + + @connection Plug.Cowboy.Conn + @already_sent {:plug_conn, :sent} + + # Note we keep the websocket state as [handler | state] + # to avoid conflicts with {endpoint, opts}. + def init(req, {endpoint, opts}) do + init(@connection.conn(req), endpoint, opts, true) + end + + defp init(conn, endpoint, opts, retry?) do + try do + case endpoint.__handler__(conn, opts) do + {:websocket, conn, handler, opts} -> + case Phoenix.Transports.WebSocket.connect(conn, endpoint, handler, opts) do + {:ok, %Plug.Conn{adapter: {@connection, req}} = conn, state} -> + cowboy_opts = + opts + |> Enum.flat_map(fn + {:timeout, timeout} -> + [idle_timeout: timeout] + + {:compress, _} = opt -> + [opt] + + {:max_frame_size, _} = opt -> + [opt] + + {:fullsweep_after, value} -> + :erlang.process_flag(:fullsweep_after, value) + [] + + _other -> + [] + end) + |> Map.new() + + {:cowboy_websocket, copy_resp_headers(conn, req), [handler | state], cowboy_opts} + + {:error, %Plug.Conn{adapter: {@connection, req}} = conn} -> + {:ok, copy_resp_headers(conn, req), {handler, opts}} + end + + {:plug, conn, handler, opts} -> + %{adapter: {@connection, req}} = + conn + |> handler.call(opts) + |> maybe_send(handler) + + {:ok, req, {handler, opts}} + end + catch + kind, reason -> + case __STACKTRACE__ do + # Maybe the handler is not available because the code is being recompiled. + # Sync with the code reloader and retry once. + [{^endpoint, :__handler__, _, _} | _] when reason == :undef and retry? -> + Phoenix.CodeReloader.sync() + init(conn, endpoint, opts, false) + + stacktrace -> + exit_on_error(kind, reason, stacktrace, {endpoint, :call, [conn, opts]}) + end + after + receive do + @already_sent -> :ok + after + 0 -> :ok + end + end + end + + defp maybe_send(%Plug.Conn{state: :unset}, _plug), do: raise(Plug.Conn.NotSentError) + defp maybe_send(%Plug.Conn{state: :set} = conn, _plug), do: Plug.Conn.send_resp(conn) + defp maybe_send(%Plug.Conn{} = conn, _plug), do: conn + + defp maybe_send(other, plug) do + raise "Cowboy2 adapter expected #{inspect(plug)} to return Plug.Conn but got: " <> + inspect(other) + end + + defp exit_on_error( + :error, + %Plug.Conn.WrapperError{kind: kind, reason: reason, stack: stack}, + _stack, + call + ) do + exit_on_error(kind, reason, stack, call) + end + + defp exit_on_error(:error, value, stack, call) do + exception = Exception.normalize(:error, value, stack) + :erlang.raise(:exit, {{exception, stack}, call}, []) + end + + defp exit_on_error(:throw, value, stack, call) do + :erlang.raise(:exit, {{{:nocatch, value}, stack}, call}, []) + end + + defp exit_on_error(:exit, value, _stack, call) do + :erlang.raise(:exit, {value, call}, []) + end + + defp copy_resp_headers(%Plug.Conn{} = conn, req) do + Enum.reduce(conn.resp_headers, req, fn {key, val}, acc -> + :cowboy_req.set_resp_header(key, val, acc) + end) + end + + defp handle_reply(handler, {:ok, state}), do: {:ok, [handler | state]} + defp handle_reply(handler, {:push, data, state}), do: {:reply, data, [handler | state]} + + defp handle_reply(handler, {:reply, _status, data, state}), + do: {:reply, data, [handler | state]} + + defp handle_reply(handler, {:stop, _reason, state}), do: {:stop, [handler | state]} + + defp handle_control_frame(payload_with_opts, handler_state) do + [handler | state] = handler_state + reply = + if function_exported?(handler, :handle_control, 2) do + handler.handle_control(payload_with_opts, state) + else + {:ok, state} + end + + handle_reply(handler, reply) + end + + ## Websocket callbacks + + def websocket_init([handler | state]) do + {:ok, state} = handler.init(state) + {:ok, [handler | state]} + end + + def websocket_handle({opcode, payload}, [handler | state]) when opcode in [:text, :binary] do + handle_reply(handler, handler.handle_in({payload, opcode: opcode}, state)) + end + + def websocket_handle({opcode, payload}, handler_state) when opcode in [:ping, :pong] do + handle_control_frame({payload, opcode: opcode}, handler_state) + end + + def websocket_handle(opcode, handler_state) when opcode in [:ping, :pong] do + handle_control_frame({nil, opcode: opcode}, handler_state) + end + + def websocket_handle(_other, handler_state) do + {:ok, handler_state} + end + + def websocket_info(message, [handler | state]) do + handle_reply(handler, handler.handle_info(message, state)) + end + + def terminate(_reason, _req, {_handler, _state}) do + :ok + end + + def terminate({:error, :closed}, _req, [handler | state]) do + handler.terminate(:closed, state) + end + + def terminate({:remote, :closed}, _req, [handler | state]) do + handler.terminate(:closed, state) + end + + def terminate({:remote, code, _}, _req, [handler | state]) + when code in 1000..1003 or code in 1005..1011 or code == 1015 do + handler.terminate(:closed, state) + end + + def terminate(:remote, _req, [handler | state]) do + handler.terminate(:closed, state) + end + + def terminate(reason, _req, [handler | state]) do + handler.terminate(reason, state) + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint/render_errors.ex b/deps/phoenix/lib/phoenix/endpoint/render_errors.ex new file mode 100644 index 0000000..6482775 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/render_errors.ex @@ -0,0 +1,149 @@ +defmodule Phoenix.Endpoint.RenderErrors do + # This module is used to catch failures and render them using a view. + # + # This module is automatically used in `Phoenix.Endpoint` where it + # overrides `call/2` to provide rendering. Once the error is + # rendered, the error is reraised unless it is a NoRouteError. + # + # ## Options + # + # * `:view` - the name of the view we render templates against + # * `:format` - the format to use when none is available from the request + # * `:accepts` - list of accepted formats errors will be rendered for + # * `:log` - the `t:Logger.level/0` or `false` to disable logging rendered errors + # + @moduledoc false + + import Plug.Conn + + require Phoenix.Endpoint + require Logger + + alias Phoenix.Router.NoRouteError + alias Phoenix.Controller + + @already_sent {:plug_conn, :sent} + + @doc false + defmacro __using__(opts) do + quote do + @before_compile Phoenix.Endpoint.RenderErrors + @phoenix_render_errors unquote(opts) + end + end + + @doc false + defmacro __before_compile__(_) do + quote location: :keep do + defoverridable [call: 2] + + def call(conn, opts) do + try do + super(conn, opts) + rescue + e in Plug.Conn.WrapperError -> + %{conn: conn, kind: kind, reason: reason, stack: stack} = e + unquote(__MODULE__).__catch__(conn, kind, reason, stack, @phoenix_render_errors) + catch + kind, reason -> + stack = __STACKTRACE__ + unquote(__MODULE__).__catch__(conn, kind, reason, stack, @phoenix_render_errors) + end + end + end + end + + @doc false + def __catch__(conn, kind, reason, stack, opts) do + receive do + @already_sent -> + send(self(), @already_sent) + %Plug.Conn{conn | state: :sent} + + after 0 -> + instrument_render_and_send(conn, kind, reason, stack, opts) + end + + :erlang.raise(kind, reason, stack) + end + + defp instrument_render_and_send(conn, kind, reason, stack, opts) do + level = Keyword.get(opts, :log, :debug) + status = status(kind, reason) + conn = error_conn(conn, kind, reason) + start = System.monotonic_time() + metadata = %{conn: conn, status: status, kind: kind, reason: reason, stacktrace: stack, log: level} + + try do + render(conn, status, kind, reason, stack, opts) + after + duration = System.monotonic_time() - start + :telemetry.execute([:phoenix, :error_rendered], %{duration: duration}, metadata) + end + end + + defp error_conn(_conn, :error, %NoRouteError{conn: conn}), do: conn + defp error_conn(conn, _kind, _reason), do: conn + + ## Rendering + + @doc false + def __debugger_banner__(_conn, _status, _kind, %NoRouteError{router: router}, _stack) do + """ +

Available routes

+
#{Phoenix.Router.ConsoleFormatter.format(router)}
+ """ + end + def __debugger_banner__(_conn, _status, _kind, _reason, _stack), do: nil + + defp render(conn, status, kind, reason, stack, opts) do + view = Keyword.fetch!(opts, :view) + conn = + conn + |> maybe_fetch_query_params() + |> maybe_fetch_format(opts) + |> Plug.Conn.put_status(status) + |> Controller.put_root_layout(opts[:root_layout] || false) + |> Controller.put_layout(opts[:layout] || false) + |> Controller.put_view(view) + + reason = Exception.normalize(kind, reason, stack) + format = Controller.get_format(conn) + template = "#{conn.status}.#{format}" + assigns = %{kind: kind, reason: reason, stack: stack, status: conn.status} + + conn + |> Controller.put_view(view) + |> Controller.render(template, assigns) + end + + defp maybe_fetch_query_params(conn) do + fetch_query_params(conn) + rescue + Plug.Conn.InvalidQueryError -> + case conn.params do + %Plug.Conn.Unfetched{} -> %Plug.Conn{conn | query_params: %{}, params: %{}} + params -> %Plug.Conn{conn | query_params: %{}, params: params} + end + end + + defp maybe_fetch_format(conn, opts) do + # We ignore params["_format"] although we respect any already stored. + case conn.private do + %{phoenix_format: format} when is_binary(format) -> conn + _ -> Controller.accepts(conn, Keyword.fetch!(opts, :accepts)) + end + rescue + e in Phoenix.NotAcceptableError -> + fallback_format = Keyword.fetch!(opts, :accepts) |> List.first() + Logger.debug("Could not render errors due to #{Exception.message(e)}. " <> + "Errors will be rendered using the first accepted format #{inspect fallback_format} as fallback. " <> + "Please customize the :accepts option under the :render_errors configuration " <> + "in your endpoint if you want to support other formats or choose another fallback") + Controller.put_format(conn, fallback_format) + end + + defp status(:error, error), do: Plug.Exception.status(error) + defp status(:throw, _throw), do: 500 + defp status(:exit, _exit), do: 500 +end diff --git a/deps/phoenix/lib/phoenix/endpoint/supervisor.ex b/deps/phoenix/lib/phoenix/endpoint/supervisor.ex new file mode 100644 index 0000000..e076679 --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/supervisor.ex @@ -0,0 +1,417 @@ +defmodule Phoenix.Endpoint.Supervisor do + # This module contains the logic used by most functions in Phoenix.Endpoint + # as well the supervisor for sockets, adapters, watchers, etc. + @moduledoc false + + require Logger + use Supervisor + + @doc """ + Starts the endpoint supervision tree. + """ + def start_link(otp_app, mod, opts \\ []) do + case Supervisor.start_link(__MODULE__, {otp_app, mod, opts}, name: mod) do + {:ok, _} = ok -> + warmup(mod) + log_access_url(otp_app, mod, opts) + browser_open(otp_app, mod) + ok + + {:error, _} = error -> + error + end + end + + @doc false + def init({otp_app, mod, opts}) do + default_conf = Phoenix.Config.merge(defaults(otp_app, mod), opts) + env_conf = config(otp_app, mod, default_conf) + + secret_conf = + case mod.init(:supervisor, env_conf) do + {:ok, init_conf} -> + if is_nil(Application.get_env(otp_app, mod)) and init_conf == env_conf do + Logger.warn("no configuration found for otp_app #{inspect(otp_app)} and module #{inspect(mod)}") + end + + init_conf + + other -> + raise ArgumentError, "expected init/2 callback to return {:ok, config}, got: #{inspect other}" + end + + extra_conf = [ + endpoint_id: :crypto.strong_rand_bytes(16) |> Base.encode64(padding: false), + # TODO: Remove this once :pubsub is removed + pubsub_server: secret_conf[:pubsub_server] || secret_conf[:pubsub][:name] + ] + + secret_conf = extra_conf ++ secret_conf + default_conf = extra_conf ++ default_conf + + # Drop all secrets from secret_conf before passing it around + conf = Keyword.drop(secret_conf, [:secret_key_base]) + server? = server?(conf) + + if conf[:instrumenters] do + Logger.warn(":instrumenters configuration for #{inspect(mod)} is deprecated and has no effect") + end + + if server? and conf[:code_reloader] do + Phoenix.CodeReloader.Server.check_symlinks() + end + + children = + config_children(mod, secret_conf, default_conf) ++ + pubsub_children(mod, conf) ++ + socket_children(mod) ++ + server_children(mod, conf, server?) ++ + watcher_children(mod, conf, server?) + + Supervisor.init(children, strategy: :one_for_one) + end + + defp pubsub_children(mod, conf) do + pub_conf = conf[:pubsub] + + if pub_conf do + Logger.warn """ + The :pubsub key in your #{inspect mod} is deprecated. + + You must now start the pubsub in your application supervision tree. + Go to lib/my_app/application.ex and add the following: + + {Phoenix.PubSub, #{inspect pub_conf}} + + Now, back in your config files in config/*, you can remove the :pubsub + key and add the :pubsub_server key, with the PubSub name: + + pubsub_server: #{inspect pub_conf[:name]} + """ + end + + if pub_conf[:adapter] do + [{Phoenix.PubSub, pub_conf}] + else + [] + end + end + + defp socket_children(endpoint) do + endpoint.__sockets__ + |> Enum.uniq_by(&elem(&1, 1)) + |> Enum.map(fn {_, socket, opts} -> socket.child_spec([endpoint: endpoint] ++ opts) end) + end + + defp config_children(mod, conf, default_conf) do + args = {mod, conf, default_conf, name: Module.concat(mod, "Config")} + [{Phoenix.Config, args}] + end + + defp server_children(mod, config, server?) do + if server? do + adapter = config[:adapter] || Phoenix.Endpoint.Cowboy2Adapter + adapter.child_specs(mod, config) + else + [] + end + end + + defp watcher_children(_mod, conf, server?) do + if server? || conf[:force_watchers] do + Enum.map(conf[:watchers], &{Phoenix.Endpoint.Watcher, &1}) + else + [] + end + end + + @doc """ + The endpoint configuration used at compile time. + """ + def config(otp_app, endpoint) do + config(otp_app, endpoint, defaults(otp_app, endpoint)) + end + + defp config(otp_app, endpoint, defaults) do + Phoenix.Config.from_env(otp_app, endpoint, defaults) + end + + @doc """ + Checks if Endpoint's web server has been configured to start. + """ + def server?(otp_app, endpoint) when is_atom(otp_app) and is_atom(endpoint) do + otp_app + |> config(endpoint) + |> server?() + end + def server?(conf) when is_list(conf) do + Keyword.get(conf, :server, Application.get_env(:phoenix, :serve_endpoints, false)) + end + + defp defaults(otp_app, module) do + [otp_app: otp_app, + + # Compile-time config + code_reloader: false, + debug_errors: false, + render_errors: [view: render_errors(module), accepts: ~w(html), layout: false], + + # Runtime config + cache_static_manifest: nil, + check_origin: true, + http: false, + https: false, + reloadable_apps: nil, + reloadable_compilers: [:gettext, :elixir], + secret_key_base: nil, + static_url: nil, + url: [host: "localhost", path: "/"], + cache_manifest_skip_vsn: false, + + # Supervisor config + watchers: [], + force_watchers: false] + end + + defp render_errors(module) do + module + |> Module.split + |> Enum.at(0) + |> Module.concat("ErrorView") + end + + @doc """ + Callback that changes the configuration from the app callback. + """ + def config_change(endpoint, changed, removed) do + res = Phoenix.Config.config_change(endpoint, changed, removed) + warmup(endpoint) + res + end + + @doc """ + Builds the endpoint url from its configuration. + + The result is wrapped in a `{:cache, value}` tuple so + the `Phoenix.Config` layer knows how to cache it. + """ + def url(endpoint) do + {:cache, build_url(endpoint, endpoint.config(:url)) |> String.Chars.URI.to_string()} + end + + @doc """ + Builds the host for caching. + """ + def host(endpoint) do + {:cache, host_to_binary(endpoint.config(:url)[:host] || "localhost")} + end + + @doc """ + Builds the path for caching. + """ + def path(endpoint) do + {:cache, empty_string_if_root(endpoint.config(:url)[:path] || "/")} + end + + @doc """ + Builds the script_name for caching. + """ + def script_name(endpoint) do + {:cache, String.split(endpoint.config(:url)[:path] || "/", "/", trim: true)} + end + + @doc """ + Builds the static url from its configuration. + + The result is wrapped in a `{:cache, value}` tuple so + the `Phoenix.Config` layer knows how to cache it. + """ + def static_url(endpoint) do + url = endpoint.config(:static_url) || endpoint.config(:url) + {:cache, build_url(endpoint, url) |> String.Chars.URI.to_string()} + end + + @doc """ + Builds a struct url for user processing. + + The result is wrapped in a `{:cache, value}` tuple so + the `Phoenix.Config` layer knows how to cache it. + """ + def struct_url(endpoint) do + url = endpoint.config(:url) + {:cache, build_url(endpoint, url)} + end + + defp build_url(endpoint, url) do + https = endpoint.config(:https) + http = endpoint.config(:http) + + {scheme, port} = + cond do + https -> + {"https", https[:port]} + http -> + {"http", http[:port]} + true -> + {"http", 80} + end + + scheme = url[:scheme] || scheme + host = host_to_binary(url[:host] || "localhost") + port = port_to_integer(url[:port] || port) + + if host =~ ~r"[^:]:\d" do + Logger.warn("url: [host: ...] configuration value #{inspect(host)} for #{inspect(endpoint)} is invalid") + end + + %URI{scheme: scheme, port: port, host: host} + end + + @doc """ + Returns the script path root. + """ + def static_path(endpoint) do + script_path = (endpoint.config(:static_url) || endpoint.config(:url))[:path] || "/" + {:cache, empty_string_if_root(script_path)} + end + + defp empty_string_if_root("/"), do: "" + defp empty_string_if_root(other), do: other + + @doc """ + Returns a two item tuple with the first element containing the + static path of a file in the static root directory + and the second element containing the sha512 of that file (for SRI). + + When the file exists, it includes a timestamp. When it doesn't exist, + just the static path is returned. + + The result is wrapped in a `{:cache | :nocache, value}` tuple so + the `Phoenix.Config` layer knows how to cache it. + """ + @invalid_local_url_chars ["\\"] + + def static_lookup(_endpoint, "//" <> _ = path) do + raise_invalid_path(path) + end + + def static_lookup(_endpoint, "/" <> _ = path) do + if String.contains?(path, @invalid_local_url_chars) do + raise ArgumentError, "unsafe characters detected for path #{inspect path}" + else + {:nocache, {path, nil}} + end + end + + def static_lookup(_endpoint, path) when is_binary(path) do + raise_invalid_path(path) + end + + defp raise_invalid_path(path) do + raise ArgumentError, "expected a path starting with a single / but got #{inspect path}" + end + + # TODO: Deprecate {:system, env_var} once we require Elixir v1.9+ + defp host_to_binary({:system, env_var}), do: host_to_binary(System.get_env(env_var)) + defp host_to_binary(host), do: host + + # TODO: Deprecate {:system, env_var} once we require Elixir v1.9+ + defp port_to_integer({:system, env_var}), do: port_to_integer(System.get_env(env_var)) + defp port_to_integer(port) when is_binary(port), do: String.to_integer(port) + defp port_to_integer(port) when is_integer(port), do: port + + @doc """ + Invoked to warm up caches on start and config change. + """ + def warmup(endpoint) do + endpoint.host() + endpoint.script_name() + endpoint.path("/") + warmup_url(endpoint) + warmup_static(endpoint) + :ok + rescue + _ -> :ok + end + + defp warmup_url(endpoint) do + endpoint.url() + endpoint.static_url() + endpoint.struct_url() + end + + defp warmup_static(endpoint) do + warmup_static(endpoint, cache_static_manifest(endpoint)) + endpoint.static_path("/") + end + + defp warmup_static(endpoint, %{"latest" => latest, "digests" => digests}) do + Phoenix.Config.put_new(endpoint, :cache_static_manifest_latest, latest) + with_vsn? = !endpoint.config(:cache_manifest_skip_vsn) + + Enum.each(latest, fn {key, _} -> + Phoenix.Config.cache(endpoint, {:__phoenix_static__, "/" <> key}, fn _ -> + {:cache, static_cache(digests, Map.get(latest, key), with_vsn?)} + end) + end) + end + + defp warmup_static(_endpoint, _manifest) do + raise ArgumentError, "expected warmup_static/2 to include 'latest' and 'digests' keys in manifest" + end + + defp static_cache(digests, value, true) do + {"/#{value}?vsn=d", static_integrity(digests[value]["sha512"])} + end + + defp static_cache(digests, value, false) do + {"/#{value}", static_integrity(digests[value]["sha512"])} + end + + defp static_integrity(nil), do: nil + defp static_integrity(sha), do: "sha512-#{sha}" + + defp cache_static_manifest(endpoint) do + if inner = endpoint.config(:cache_static_manifest) do + {app, inner} = + case inner do + {_, _} = inner -> inner + inner when is_binary(inner) -> {endpoint.config(:otp_app), inner} + _ -> raise ArgumentError, ":cache_static_manifest must be a binary or a tuple" + end + + outer = Application.app_dir(app, inner) + + if File.exists?(outer) do + outer |> File.read!() |> Phoenix.json_library().decode!() + else + Logger.error "Could not find static manifest at #{inspect outer}. " <> + "Run \"mix phx.digest\" after building your static files " <> + "or remove the configuration from \"config/prod.exs\"." + end + else + %{} + end + end + + defp log_access_url(otp_app, endpoint, opts) do + if Keyword.get(opts, :log_access_url, true) && server?(otp_app, endpoint) do + Logger.info("Access #{inspect(endpoint)} at #{endpoint.url()}") + end + end + + defp browser_open(otp_app, endpoint) do + if Application.get_env(:phoenix, :browser_open) && server?(otp_app, endpoint) do + url = endpoint.url() + + {cmd, args} = + case :os.type() do + {:win32, _} -> {"cmd", ["/c", "start", url]} + {:unix, :darwin} -> {"open", [url]} + {:unix, _} -> {"xdg-open", [url]} + end + + System.cmd(cmd, args) + end + end +end diff --git a/deps/phoenix/lib/phoenix/endpoint/watcher.ex b/deps/phoenix/lib/phoenix/endpoint/watcher.ex new file mode 100644 index 0000000..7f6fe4c --- /dev/null +++ b/deps/phoenix/lib/phoenix/endpoint/watcher.ex @@ -0,0 +1,60 @@ +defmodule Phoenix.Endpoint.Watcher do + @moduledoc false + require Logger + + def child_spec(args) do + %{ + id: make_ref(), + start: {__MODULE__, :start_link, [args]}, + restart: :transient + } + end + + def start_link({cmd, args}) do + Task.start_link(__MODULE__, :watch, [to_string(cmd), args]) + end + + def watch(_cmd, {mod, fun, args}) do + try do + apply(mod, fun, args) + catch + kind, reason -> + # The function returned a non-zero exit code. + # Sleep for a couple seconds before exiting to + # ensure this doesn't hit the supervisor's + # max_restarts/max_seconds limit. + Process.sleep(2000) + :erlang.raise(kind, reason, __STACKTRACE__) + end + end + + def watch(cmd, args) when is_list(args) do + {args, opts} = Enum.split_while(args, &is_binary(&1)) + opts = Keyword.merge([into: IO.stream(:stdio, :line), stderr_to_stdout: true], opts) + + try do + System.cmd(cmd, args, opts) + catch + :error, :enoent -> + relative = Path.relative_to_cwd(cmd) + + Logger.error( + "Could not start watcher #{inspect(relative)} from #{inspect(cd(opts))}, executable does not exist" + ) + + exit(:shutdown) + else + {_, 0} -> + :ok + + {_, _} -> + # System.cmd returned a non-zero exit code + # sleep for a couple seconds before exiting to ensure this doesn't + # hit the supervisor's max_restarts / max_seconds limit + Process.sleep(2000) + exit(:watcher_command_error) + end + end + + defp cd(opts), do: opts[:cd] || File.cwd!() +end diff --git a/deps/phoenix/lib/phoenix/exceptions.ex b/deps/phoenix/lib/phoenix/exceptions.ex new file mode 100644 index 0000000..0e9f132 --- /dev/null +++ b/deps/phoenix/lib/phoenix/exceptions.ex @@ -0,0 +1,70 @@ +defmodule Phoenix.NotAcceptableError do + @moduledoc """ + Raised when one of the `accept*` headers is not accepted by the server. + + This exception is commonly raised by `Phoenix.Controller.accepts/2` + which negotiates the media types the server is able to serve with + the contents the client is able to render. + + If you are seeing this error, you should check if you are listing + the desired formats in your `:accepts` plug or if you are setting + the proper accept header in the client. The exception contains the + acceptable mime types in the `accepts` field. + """ + + defexception message: nil, accepts: [], plug_status: 406 +end + +defmodule Phoenix.MissingParamError do + @moduledoc """ + Raised when a key is expected to be present in the request parameters, + but is not. + + This exception is raised by `Phoenix.Controller.scrub_params/2` which: + + * Checks to see if the required_key is present (can be empty) + * Changes all empty parameters to nils ("" -> nil) + + If you are seeing this error, you should handle the error and surface it + to the end user. It means that there is a parameter missing from the request. + """ + + defexception [:message, plug_status: 400] + + def exception([key: value]) do + msg = "expected key #{inspect value} to be present in params, " <> + "please send the expected key or adapt your scrub_params/2 call" + %Phoenix.MissingParamError{message: msg} + end +end + +defmodule Phoenix.ActionClauseError do + exception_keys = + FunctionClauseError.__struct__() + |> Map.keys() + |> Kernel.--([:__exception__, :__struct__]) + + defexception exception_keys + + def message(exception) do + exception + |> Map.put(:__struct__, FunctionClauseError) + |> FunctionClauseError.message() + end + + def blame(exception, stacktrace) do + {exception, stacktrace} = + exception + |> Map.put(:__struct__, FunctionClauseError) + |> FunctionClauseError.blame(stacktrace) + + exception = Map.put(exception, :__struct__, __MODULE__) + + {exception, stacktrace} + end +end + +defimpl Plug.Exception, for: Phoenix.ActionClauseError do + def status(_), do: 400 + def actions(_), do: [] +end diff --git a/deps/phoenix/lib/phoenix/logger.ex b/deps/phoenix/lib/phoenix/logger.ex new file mode 100644 index 0000000..2f829ad --- /dev/null +++ b/deps/phoenix/lib/phoenix/logger.ex @@ -0,0 +1,377 @@ +defmodule Phoenix.Logger do + @moduledoc """ + Instrumenter to handle logging of various instrumentation events. + + ## Instrumentation + + Phoenix uses the `:telemetry` library for instrumentation. The following events + are published by Phoenix with the following measurements and metadata: + + * `[:phoenix, :endpoint, :start]` - dispatched by `Plug.Telemetry` in your endpoint, + usually after code reloading + * Measurement: `%{system_time: system_time}` + * Metadata: `%{conn: Plug.Conn.t, options: Keyword.t}` + * Options: `%{log: Logger.level | false}` + * Disable logging: In your endpoint `plug Plug.Telemetry, ..., log: Logger.level | false` + * Configure log level dynamically: `plug Plug.Telemetry, ..., log: {Mod, Fun, Args}` + + * `[:phoenix, :endpoint, :stop]` - dispatched by `Plug.Telemetry` in your + endpoint whenever the response is sent + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, options: Keyword.t}` + * Options: `%{log: Logger.level | false}` + * Disable logging: In your endpoint `plug Plug.Telemetry, ..., log: Logger.level | false` + * Configure log level dynamically: `plug Plug.Telemetry, ..., log: {Mod, Fun, Args}` + + * `[:phoenix, :router_dispatch, :start]` - dispatched by `Phoenix.Router` + before dispatching to a matched route + * Measurement: `%{system_time: System.system_time}` + * Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom], log: Logger.level | false}` + * Disable logging: Pass `log: false` to the router macro, for example: `get("/page", PageController, :index, log: false)` + * Configure log level dynamically: `get("/page", PageController, :index, log: {Mod, Fun, Args})` + + * `[:phoenix, :router_dispatch, :exception]` - dispatched by `Phoenix.Router` + after exceptions on dispatching a route + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, kind: :throw | :error | :exit, reason: term(), stacktrace: Exception.stacktrace()}` + * Disable logging: This event is not logged + + * `[:phoenix, :router_dispatch, :stop]` - dispatched by `Phoenix.Router` + after successfully dispatching a matched route + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, route: binary, plug: module, plug_opts: term, path_params: map, pipe_through: [atom], log: Logger.level | false}` + * Disable logging: This event is not logged + + * `[:phoenix, :error_rendered]` - dispatched at the end of an error view being rendered + * Measurement: `%{duration: native_time}` + * Metadata: `%{conn: Plug.Conn.t, status: Plug.Conn.status, kind: Exception.kind, reason: term, stacktrace: Exception.stacktrace}` + * Disable logging: Set `render_errors: [log: false]` on your endpoint configuration + + * `[:phoenix, :socket_connected]` - dispatched by `Phoenix.Socket`, at the end of a socket connection + * Measurement: `%{duration: native_time}` + * Metadata: `%{endpoint: atom, transport: atom, params: term, connect_info: map, vsn: binary, user_socket: atom, result: :ok | :error, serializer: atom, log: Logger.level | false}` + * Disable logging: `use Phoenix.Socket, log: false` or `socket "/foo", MySocket, websocket: [log: false]` in your endpoint + + * `[:phoenix, :channel_joined]` - dispatched at the end of a channel join + * Measurement: `%{duration: native_time}` + * Metadata: `%{result: :ok | :error, params: term, socket: Phoenix.Socket.t}` + * Disable logging: This event cannot be disabled + + * `[:phoenix, :channel_handled_in]` - dispatched at the end of a channel handle in + * Measurement: `%{duration: native_time}` + * Metadata: `%{event: binary, params: term, socket: Phoenix.Socket.t}` + * Disable logging: This event cannot be disabled + + To see an example of how Phoenix LiveDashboard uses these events to create + metrics, visit . + + ## Parameter filtering + + When logging parameters, Phoenix can filter out sensitive parameters + such as passwords and tokens. Parameters to be filtered can be + added via the `:filter_parameters` option: + + config :phoenix, :filter_parameters, ["password", "secret"] + + With the configuration above, Phoenix will filter any parameter + that contains the terms `password` or `secret`. The match is + case sensitive. + + Phoenix's default is `["password"]`. + + Phoenix can filter all parameters by default and selectively keep + parameters. This can be configured like so: + + config :phoenix, :filter_parameters, {:keep, ["id", "order"]} + + With the configuration above, Phoenix will filter all parameters, + except those that match exactly `id` or `order`. If a kept parameter + matches, all parameters nested under that one will also be kept. + + ## Dynamic log level + + In some cases you may wish to set the log level dynamically + on a per-request basis. To do so, set the `:log` option to + a tuple, `{Mod, Fun, Args}`. The `Plug.Conn.t()` for the + request will be prepended to the provided list of arguments. + + When invoked, your function must return a + [`Logger.level()`](`t:Logger.level()/0`) or `false` to + disable logging for the request. + + For example, in your Endpoint you might do something like this: + + # lib/my_app_web/endpoint.ex + plug Plug.Telemetry, + event_prefix: [:phoenix, :endpoint], + log: {__MODULE__, :log_level, []} + + # Disables logging for routes like /status/* + def log_level(%{path_info: ["status" | _]}), do: false + def log_level(_), do: :info + + ## Disabling + + When you are using custom logging system it is not always desirable to enable + `#{inspect __MODULE__}` by default. You can always disable this in general by: + + config :phoenix, :logger, false + """ + + require Logger + + @doc false + def install do + handlers = %{ + [:phoenix, :endpoint, :start] => &__MODULE__.phoenix_endpoint_start/4, + [:phoenix, :endpoint, :stop] => &__MODULE__.phoenix_endpoint_stop/4, + [:phoenix, :router_dispatch, :start] => &__MODULE__.phoenix_router_dispatch_start/4, + [:phoenix, :error_rendered] => &__MODULE__.phoenix_error_rendered/4, + [:phoenix, :socket_connected] => &__MODULE__.phoenix_socket_connected/4, + [:phoenix, :channel_joined] => &__MODULE__.phoenix_channel_joined/4, + [:phoenix, :channel_handled_in] => &__MODULE__.phoenix_channel_handled_in/4 + } + + for {key, fun} <- handlers do + :telemetry.attach({__MODULE__, key}, key, fun, :ok) + end + end + + @doc false + def duration(duration) do + duration = System.convert_time_unit(duration, :native, :microsecond) + + if duration > 1000 do + [duration |> div(1000) |> Integer.to_string(), "ms"] + else + [Integer.to_string(duration), "ยตs"] + end + end + + @doc false + def filter_values(values, params \\ Application.get_env(:phoenix, :filter_parameters, [])) + def filter_values(values, {:discard, params}), do: discard_values(values, params) + def filter_values(values, {:keep, params}), do: keep_values(values, params) + def filter_values(values, params), do: discard_values(values, params) + + defp discard_values(%{__struct__: mod} = struct, _params) when is_atom(mod) do + struct + end + + defp discard_values(%{} = map, params) do + Enum.into(map, %{}, fn {k, v} -> + if is_binary(k) and String.contains?(k, params) do + {k, "[FILTERED]"} + else + {k, discard_values(v, params)} + end + end) + end + + defp discard_values([_ | _] = list, params) do + Enum.map(list, &discard_values(&1, params)) + end + + defp discard_values(other, _params), do: other + + defp keep_values(%{__struct__: mod}, _params) when is_atom(mod), do: "[FILTERED]" + + defp keep_values(%{} = map, params) do + Enum.into(map, %{}, fn {k, v} -> + if is_binary(k) and k in params do + {k, discard_values(v, [])} + else + {k, keep_values(v, params)} + end + end) + end + + defp keep_values([_ | _] = list, params) do + Enum.map(list, &keep_values(&1, params)) + end + + defp keep_values(_other, _params), do: "[FILTERED]" + + defp log_level(nil, _conn), do: :info + defp log_level(level, _conn) when is_atom(level), do: level + + defp log_level({mod, fun, args}, conn) when is_atom(mod) and is_atom(fun) and is_list(args) do + apply(mod, fun, [conn | args]) + end + + ## Event: [:phoenix, :endpoint, *] + + @doc false + def phoenix_endpoint_start(_, _, %{conn: conn} = metadata, _) do + case log_level(metadata[:options][:log], conn) do + false -> + :ok + + level -> + Logger.log(level, fn -> + %{method: method, request_path: request_path} = conn + [method, ?\s, request_path] + end) + end + end + + @doc false + def phoenix_endpoint_stop(_, %{duration: duration}, %{conn: conn} = metadata, _) do + case log_level(metadata[:options][:log], conn) do + false -> + :ok + + level -> + Logger.log(level, fn -> + %{status: status, state: state} = conn + status = Integer.to_string(status) + [connection_type(state), ?\s, status, " in ", duration(duration)] + end) + end + end + + defp connection_type(:set_chunked), do: "Chunked" + defp connection_type(_), do: "Sent" + + ## Event: [:phoenix, :error_rendered] + + @doc false + def phoenix_error_rendered(_, _, %{log: false}, _), do: :ok + + def phoenix_error_rendered(_, _, %{log: level, status: status, kind: kind, reason: reason}, _) do + Logger.log(level, fn -> + [ + "Converted ", + Atom.to_string(kind), + ?\s, + error_banner(kind, reason), + " to ", + Integer.to_string(status), + " response" + ] + end) + end + + defp error_banner(:error, %type{}), do: inspect(type) + defp error_banner(_kind, reason), do: inspect(reason) + + ## Event: [:phoenix, :router_dispatch, :start] + + @doc false + def phoenix_router_dispatch_start(_, _, %{log: false}, _), do: :ok + + def phoenix_router_dispatch_start(_, _, metadata, _) do + %{log: level, conn: conn, plug: plug} = metadata + level = log_level(level, conn) + log_module = metadata[:log_module] || plug + + Logger.log(level, fn -> + %{ + pipe_through: pipe_through, + plug_opts: plug_opts + } = metadata + + [ + "Processing with ", + inspect(log_module), + maybe_action(plug_opts), + ?\n, + " Parameters: ", + params(conn.params), + ?\n, + " Pipelines: ", + inspect(pipe_through) + ] + end) + end + + defp maybe_action(action) when is_atom(action), do: [?., Atom.to_string(action), ?/, ?2] + defp maybe_action(_), do: [] + + defp params(%Plug.Conn.Unfetched{}), do: "[UNFETCHED]" + defp params(params), do: params |> filter_values() |> inspect() + + ## Event: [:phoenix, :socket_connected] + + @doc false + def phoenix_socket_connected(_, _, %{log: false}, _), do: :ok + + def phoenix_socket_connected(_, %{duration: duration}, %{log: level} = meta, _) do + Logger.log(level, fn -> + %{ + transport: transport, + params: params, + user_socket: user_socket, + result: result, + serializer: serializer + } = meta + + [ + connect_result(result), + inspect(user_socket), + " in ", + duration(duration), + "\n Transport: ", + inspect(transport), + "\n Serializer: ", + inspect(serializer), + "\n Parameters: ", + inspect(filter_values(params)) + ] + end) + end + + defp connect_result(:ok), do: "CONNECTED TO " + defp connect_result(:error), do: "REFUSED CONNECTION TO " + + ## Event: [:phoenix, :channel_joined] + + @doc false + def phoenix_channel_joined(_, %{duration: duration}, %{socket: socket} = metadata, _) do + channel_log(:log_join, socket, fn -> + %{result: result, params: params} = metadata + + [ + join_result(result), + socket.topic, + " in ", + duration(duration), + "\n Parameters: ", + inspect(filter_values(params)) + ] + end) + end + + defp join_result(:ok), do: "JOINED " + defp join_result(:error), do: "REFUSED JOIN " + + ## Event: [:phoenix, :channel_handle_in] + + @doc false + def phoenix_channel_handled_in(_, %{duration: duration}, %{socket: socket} = metadata, _) do + channel_log(:log_handle_in, socket, fn -> + %{event: event, params: params} = metadata + + [ + "HANDLED ", + event, + " INCOMING ON ", + socket.topic, + " (", + inspect(socket.channel), + ") in ", + duration(duration), + "\n Parameters: ", + inspect(filter_values(params)) + ] + end) + end + + defp channel_log(_log_option, %{topic: "phoenix" <> _}, _fun), do: :ok + + defp channel_log(log_option, %{private: private}, fun) do + if level = Map.get(private, log_option) do + Logger.log(level, fun) + end + end +end diff --git a/deps/phoenix/lib/phoenix/naming.ex b/deps/phoenix/lib/phoenix/naming.ex new file mode 100644 index 0000000..01c3782 --- /dev/null +++ b/deps/phoenix/lib/phoenix/naming.ex @@ -0,0 +1,132 @@ +defmodule Phoenix.Naming do + @moduledoc """ + Conveniences for inflecting and working with names in Phoenix. + """ + + @doc """ + Extracts the resource name from an alias. + + ## Examples + + iex> Phoenix.Naming.resource_name(MyApp.User) + "user" + + iex> Phoenix.Naming.resource_name(MyApp.UserView, "View") + "user" + + """ + @spec resource_name(String.Chars.t, String.t) :: String.t + def resource_name(alias, suffix \\ "") do + alias + |> to_string() + |> Module.split() + |> List.last() + |> unsuffix(suffix) + |> underscore() + end + + @doc """ + Removes the given suffix from the name if it exists. + + ## Examples + + iex> Phoenix.Naming.unsuffix("MyApp.User", "View") + "MyApp.User" + + iex> Phoenix.Naming.unsuffix("MyApp.UserView", "View") + "MyApp.User" + + """ + @spec unsuffix(String.t, String.t) :: String.t + def unsuffix(value, suffix) do + string = to_string(value) + suffix_size = byte_size(suffix) + prefix_size = byte_size(string) - suffix_size + case string do + <> -> prefix + _ -> string + end + end + + @doc """ + Converts a string to underscore case. + + ## Examples + + iex> Phoenix.Naming.underscore("MyApp") + "my_app" + + In general, `underscore` can be thought of as the reverse of + `camelize`, however, in some cases formatting may be lost: + + Phoenix.Naming.underscore "SAPExample" #=> "sap_example" + Phoenix.Naming.camelize "sap_example" #=> "SapExample" + + """ + @spec underscore(String.t) :: String.t + + def underscore(value), do: Macro.underscore(value) + + defp to_lower_char(char) when char in ?A..?Z, do: char + 32 + defp to_lower_char(char), do: char + + @doc """ + Converts a string to camel case. + + Takes an optional `:lower` flag to return lowerCamelCase. + + ## Examples + + iex> Phoenix.Naming.camelize("my_app") + "MyApp" + + iex> Phoenix.Naming.camelize("my_app", :lower) + "myApp" + + In general, `camelize` can be thought of as the reverse of + `underscore`, however, in some cases formatting may be lost: + + Phoenix.Naming.underscore "SAPExample" #=> "sap_example" + Phoenix.Naming.camelize "sap_example" #=> "SapExample" + + """ + @spec camelize(String.t) :: String.t + def camelize(value), do: Macro.camelize(value) + + @spec camelize(String.t, :lower) :: String.t + def camelize("", :lower), do: "" + def camelize(<>, :lower) do + camelize(t, :lower) + end + def camelize(<> = value, :lower) do + <<_first, rest :: binary>> = camelize(value) + <> <> rest + end + + @doc """ + Converts an attribute/form field into its humanize version. + + ## Examples + + iex> Phoenix.Naming.humanize(:username) + "Username" + iex> Phoenix.Naming.humanize(:created_at) + "Created at" + iex> Phoenix.Naming.humanize("user_id") + "User" + + """ + @spec humanize(atom | String.t) :: String.t + def humanize(atom) when is_atom(atom), + do: humanize(Atom.to_string(atom)) + def humanize(bin) when is_binary(bin) do + bin = + if String.ends_with?(bin, "_id") do + binary_part(bin, 0, byte_size(bin) - 3) + else + bin + end + + bin |> String.replace("_", " ") |> String.capitalize + end +end diff --git a/deps/phoenix/lib/phoenix/param.ex b/deps/phoenix/lib/phoenix/param.ex new file mode 100644 index 0000000..7ccd2b7 --- /dev/null +++ b/deps/phoenix/lib/phoenix/param.ex @@ -0,0 +1,127 @@ +defprotocol Phoenix.Param do + @moduledoc """ + A protocol that converts data structures into URL parameters. + + This protocol is used by URL helpers and other parts of the + Phoenix stack. For example, when you write: + + user_path(conn, :edit, @user) + + Phoenix knows how to extract the `:id` from `@user` thanks + to this protocol. + + By default, Phoenix implements this protocol for integers, binaries, atoms, + and structs. For structs, a key `:id` is assumed, but you may provide a + specific implementation. + + Nil values cannot be converted to param. + + ## Custom parameters + + In order to customize the parameter for any struct, + one can simply implement this protocol. + + However, for convenience, this protocol can also be + derivable. For example: + + defmodule User do + @derive Phoenix.Param + defstruct [:id, :username] + end + + By default, the derived implementation will also use + the `:id` key. In case the user does not contain an + `:id` key, the key can be specified with an option: + + defmodule User do + @derive {Phoenix.Param, key: :username} + defstruct [:username] + end + + will automatically use `:username` in URLs. + + When using Ecto, you must call `@derive` before + your `schema` call: + + @derive {Phoenix.Param, key: :username} + schema "users" do + + """ + + @fallback_to_any true + + @spec to_param(term) :: String.t + def to_param(term) +end + +defimpl Phoenix.Param, for: Integer do + def to_param(int), do: Integer.to_string(int) +end + +defimpl Phoenix.Param, for: Float do + def to_param(float), do: Float.to_string(float) +end + +defimpl Phoenix.Param, for: BitString do + def to_param(bin) when is_binary(bin), do: bin +end + +defimpl Phoenix.Param, for: Atom do + def to_param(nil) do + raise ArgumentError, "cannot convert nil to param" + end + + def to_param(atom) do + Atom.to_string(atom) + end +end + +defimpl Phoenix.Param, for: Map do + def to_param(map) do + raise ArgumentError, + "maps cannot be converted to_param. A struct was expected, got: #{inspect map}" + end +end + +defimpl Phoenix.Param, for: Any do + defmacro __deriving__(module, struct, options) do + key = Keyword.get(options, :key, :id) + + unless Map.has_key?(struct, key) do + raise ArgumentError, "cannot derive Phoenix.Param for struct #{inspect module} " <> + "because it does not have key #{inspect key}. Please pass " <> + "the :key option when deriving" + end + + quote do + defimpl Phoenix.Param, for: unquote(module) do + def to_param(%{unquote(key) => nil}) do + raise ArgumentError, "cannot convert #{inspect unquote(module)} to param, " <> + "key #{inspect unquote(key)} contains a nil value" + end + + def to_param(%{unquote(key) => key}) when is_integer(key), do: Integer.to_string(key) + def to_param(%{unquote(key) => key}) when is_binary(key), do: key + def to_param(%{unquote(key) => key}), do: Phoenix.Param.to_param(key) + end + end + end + + def to_param(%{id: nil}) do + raise ArgumentError, "cannot convert struct to param, key :id contains a nil value" + end + def to_param(%{id: id}) when is_integer(id), do: Integer.to_string(id) + def to_param(%{id: id}) when is_binary(id), do: id + def to_param(%{id: id}), do: Phoenix.Param.to_param(id) + + def to_param(map) when is_map(map) do + raise ArgumentError, + "structs expect an :id key when converting to_param or a custom implementation " <> + "of the Phoenix.Param protocol (read Phoenix.Param docs for more information), " <> + "got: #{inspect map}" + end + + def to_param(data) do + raise Protocol.UndefinedError, protocol: @protocol, value: data + end +end diff --git a/deps/phoenix/lib/phoenix/presence.ex b/deps/phoenix/lib/phoenix/presence.ex new file mode 100644 index 0000000..755a58b --- /dev/null +++ b/deps/phoenix/lib/phoenix/presence.ex @@ -0,0 +1,424 @@ +defmodule Phoenix.Presence do + @moduledoc """ + Provides Presence tracking to processes and channels. + + This behaviour provides presence features such as fetching + presences for a given topic, as well as handling diffs of + join and leave events as they occur in real-time. Using this + module defines a supervisor and a module that implements the + `Phoenix.Tracker` behaviour that uses `Phoenix.PubSub` to + broadcast presence updates. + + In case you want to use only a subset of the functionality + provided by `Phoenix.Presence`, such as tracking processes + but without broadcasting updates, we recommend that you look + at the `Phoenix.Tracker` functionality from the `phoenix_pubsub` + project. + + ## Example Usage + + Start by defining a presence module within your application + which uses `Phoenix.Presence` and provide the `:otp_app` which + holds your configuration, as well as the `:pubsub_server`. + + defmodule MyAppWeb.Presence do + use Phoenix.Presence, + otp_app: :my_app, + pubsub_server: MyApp.PubSub + end + + The `:pubsub_server` must point to an existing pubsub server + running in your application, which is included by default as + `MyApp.PubSub` for new applications. + + Next, add the new supervisor to your supervision tree in + `lib/my_app/application.ex`. It must be after the PubSub child + and before the endpoint: + + children = [ + ... + {Phoenix.PubSub, name: MyApp.PubSub}, + MyAppWeb.Presence, + MyAppWeb.Endpoint + ] + + Once added, presences can be tracked in your channel after joining: + + defmodule MyAppWeb.MyChannel do + use MyAppWeb, :channel + alias MyAppWeb.Presence + + def join("some:topic", _params, socket) do + send(self(), :after_join) + {:ok, assign(socket, :user_id, ...)} + end + + def handle_info(:after_join, socket) do + {:ok, _} = Presence.track(socket, socket.assigns.user_id, %{ + online_at: inspect(System.system_time(:second)) + }) + + push(socket, "presence_state", Presence.list(socket)) + {:noreply, socket} + end + end + + In the example above, `Presence.track` is used to register this channel's process as a + presence for the socket's user ID, with a map of metadata. + Next, the current presence information for + the socket's topic is pushed to the client as a `"presence_state"` event. + + Finally, a diff of presence join and leave events will be sent to the + client as they happen in real-time with the "presence_diff" event. + The diff structure will be a map of `:joins` and `:leaves` of the form: + + %{ + joins: %{"123" => %{metas: [%{status: "away", phx_ref: ...}]}}, + leaves: %{"456" => %{metas: [%{status: "online", phx_ref: ...}]}} + }, + + See `c:list/1` for more information on the presence data structure. + + ## Fetching Presence Information + + Presence metadata should be minimized and used to store small, + ephemeral state, such as a user's "online" or "away" status. + More detailed information, such as user details that need to be fetched + from the database, can be achieved by overriding the `c:fetch/2` function. + + The `c:fetch/2` callback is triggered when using `c:list/1` and on + every update, and it serves as a mechanism to fetch presence information + a single time, before broadcasting the information to all channel subscribers. + This prevents N query problems and gives you a single place to group + isolated data fetching to extend presence metadata. + + The function must return a map of data matching the outlined Presence + data structure, including the `:metas` key, but can extend the map of + information to include any additional information. For example: + + def fetch(_topic, presences) do + users = presences |> Map.keys() |> Accounts.get_users_map() + + for {key, %{metas: metas}} <- presences, into: %{} do + {key, %{metas: metas, user: users[String.to_integer(key)]}} + end + end + + Where `Account.get_users_map/1` could be implemented like: + + def get_users_map(ids) do + query = + from u in User, + where: u.id in ^ids, + select: {u.id, u} + + query |> Repo.all() |> Enum.into(%{}) + end + + The `fetch/2` function above fetches all users from the database who + have registered presences for the given topic. The presences + information is then extended with a `:user` key of the user's + information, while maintaining the required `:metas` field from the + original presence data. + + ## Testing with Presence + + Every time the `fetch` callback is invoked, it is done from a separate + process. Given those processes run asynchronously, it is often necessary + to guarantee they have been shutdown at the end of every test. This can + be done by using ExUnit's `on_exit` hook plus `fetchers_pids` function: + + on_exit(fn -> + for pid <- MyAppWeb.Presence.fetchers_pids() do + ref = Process.monitor(pid) + assert_receive {:DOWN, ^ref, _, _, _}, 1000 + end + end) + + """ + + @type presences :: %{String.t => %{metas: [map()]}} + @type presence :: %{key: String.t, meta: map()} + @type topic :: String.t + + @doc """ + Track a channel's process as a presence. + + Tracked presences are grouped by `key`, cast as a string. For example, to + group each user's channels together, use user IDs as keys. Each presence can + be associated with a map of metadata to store small, ephemeral state, such as + a user's online status. To store detailed information, see `c:fetch/2`. + + ## Example + + alias MyApp.Presence + def handle_info(:after_join, socket) do + {:ok, _} = Presence.track(socket, socket.assigns.user_id, %{ + online_at: inspect(System.system_time(:second)) + }) + {:noreply, socket} + end + + """ + @callback track(socket :: Phoenix.Socket.t, key :: String.t, meta :: map()) :: + {:ok, ref :: binary()} | + {:error, reason :: term()} + + @doc """ + Track an arbitrary process as a presence. + + Same with `track/3`, except track any process by `topic` and `key`. + """ + @callback track(pid, topic, key :: String.t, meta :: map()) :: + {:ok, ref :: binary()} | + {:error, reason :: term()} + + @doc """ + Stop tracking a channel's process. + """ + @callback untrack(socket :: Phoenix.Socket.t, key :: String.t) :: :ok + + @doc """ + Stop tracking a process. + """ + @callback untrack(pid, topic, key :: String.t) :: :ok + + @doc """ + Update a channel presence's metadata. + + Replace a presence's metadata by passing a new map or a function that takes + the current map and returns a new one. + """ + @callback update(socket :: Phoenix.Socket.t, key :: String.t, meta :: map() | (map() -> map())) :: + {:ok, ref :: binary()} | + {:error, reason :: term()} + + @doc """ + Update a process presence's metadata. + + Same as `update/3`, but with an arbitrary process. + """ + @callback update(pid, topic, key :: String.t, meta :: map() | (map() -> map())) :: + {:ok, ref :: binary()} | + {:error, reason :: term()} + + @doc """ + Returns presences for a socket/topic. + + ## Presence data structure + + The presence information is returned as a map with presences grouped + by key, cast as a string, and accumulated metadata, with the following form: + + %{key => %{metas: [%{phx_ref: ..., ...}, ...]}} + + For example, imagine a user with id `123` online from two + different devices, as well as a user with id `456` online from + just one device. The following presence information might be returned: + + %{"123" => %{metas: [%{status: "away", phx_ref: ...}, + %{status: "online", phx_ref: ...}]}, + "456" => %{metas: [%{status: "online", phx_ref: ...}]}} + + The keys of the map will usually point to a resource ID. The value + will contain a map with a `:metas` key containing a list of metadata + for each resource. Additionally, every metadata entry will contain a + `:phx_ref` key which can be used to uniquely identify metadata for a + given key. In the event that the metadata was previously updated, + a `:phx_ref_prev` key will be present containing the previous + `:phx_ref` value. + """ + @callback list(Phoenix.Socket.t | topic) :: presences + + @doc """ + Returns the map of presence metadata for a socket/topic-key pair. + + ## Examples + + Uses the same data format as `c:list/1`, but only + returns metadata for the presences under a topic and key pair. For example, + a user with key `"user1"`, connected to the same chat room `"room:1"` from two + devices, could return: + + iex> MyPresence.get_by_key("room:1", "user1") + [%{name: "User 1", metas: [%{device: "Desktop"}, %{device: "Mobile"}]}] + + Like `c:list/1`, the presence metadata is passed to the `fetch` + callback of your presence module to fetch any additional information. + """ + @callback get_by_key(Phoenix.Socket.t | topic, key :: String.t) :: presences + + @doc """ + Extend presence information with additional data. + + When `c:list/1` is used to list all presences of the given `topic`, this + callback is triggered once to modify the result before it is broadcasted to + all channel subscribers. This avoids N query problems and provides a single + place to extend presence metadata. You must return a map of data matching the + original result, including the `:metas` key, but can extend the map to include + any additional information. + + The default implementation simply passes `presences` through unchanged. + + ## Example + + def fetch(_topic, presences) do + query = + from u in User, + where: u.id in ^Map.keys(presences), + select: {u.id, u} + + users = query |> Repo.all() |> Enum.into(%{}) + for {key, %{metas: metas}} <- presences, into: %{} do + {key, %{metas: metas, user: users[key]}} + end + end + + """ + @callback fetch(topic, presences) :: presences + + defmacro __using__(opts) do + quote location: :keep, bind_quoted: [opts: opts] do + @behaviour Phoenix.Presence + @opts opts + @task_supervisor Module.concat(__MODULE__, "TaskSupervisor") + + _ = opts[:otp_app] || raise "use Phoenix.Presence expects :otp_app to be given" + + # User defined + + def fetch(_topic, presences), do: presences + defoverridable fetch: 2 + + # Private + + def child_spec(opts) do + opts = Keyword.merge(@opts, opts) + + %{ + id: __MODULE__, + start: {Phoenix.Presence, :start_link, [__MODULE__, @task_supervisor, opts]}, + type: :supervisor + } + end + + # API + + def track(%Phoenix.Socket{} = socket, key, meta) do + track(socket.channel_pid, socket.topic, key, meta) + end + def track(pid, topic, key, meta) do + Phoenix.Tracker.track(__MODULE__, pid, topic, key, meta) + end + + def untrack(%Phoenix.Socket{} = socket, key) do + untrack(socket.channel_pid, socket.topic, key) + end + def untrack(pid, topic, key) do + Phoenix.Tracker.untrack(__MODULE__, pid, topic, key) + end + + def update(%Phoenix.Socket{} = socket, key, meta) do + update(socket.channel_pid, socket.topic, key, meta) + end + def update(pid, topic, key, meta) do + Phoenix.Tracker.update(__MODULE__, pid, topic, key, meta) + end + + def list(%Phoenix.Socket{topic: topic}), do: list(topic) + def list(topic), do: Phoenix.Presence.list(__MODULE__, topic) + + def get_by_key(%Phoenix.Socket{topic: topic}, key), do: get_by_key(topic, key) + def get_by_key(topic, key), do: Phoenix.Presence.get_by_key(__MODULE__, topic, key) + + def fetchers_pids(), do: Task.Supervisor.children(@task_supervisor) + end + end + + defmodule Tracker do + @moduledoc false + use Phoenix.Tracker + + def start_link({module, task_supervisor, opts}) do + pubsub_server = + opts[:pubsub_server] || raise "use Phoenix.Presence expects :pubsub_server to be given" + + Phoenix.Tracker.start_link(__MODULE__, {module, task_supervisor, pubsub_server}, opts) + end + + def init(state) do + {:ok, state} + end + + def handle_diff(diff, state) do + {module, task_supervisor, pubsub_server} = state + + Task.Supervisor.start_child(task_supervisor, fn -> + for {topic, {joins, leaves}} <- diff do + Phoenix.Channel.Server.local_broadcast(pubsub_server, topic, "presence_diff", %{ + joins: module.fetch(topic, Phoenix.Presence.group(joins)), + leaves: module.fetch(topic, Phoenix.Presence.group(leaves)) + }) + end + end) + + {:ok, state} + end + end + + @doc false + def start_link(module, task_supervisor, opts) do + otp_app = opts[:otp_app] + + opts = + opts + |> Keyword.merge(Application.get_env(otp_app, module, [])) + |> Keyword.put(:name, module) + + children = [ + {Task.Supervisor, name: task_supervisor}, + {Tracker, {module, task_supervisor, opts}} + ] + + sup_opts = [ + strategy: :rest_for_one, + name: Module.concat(module, "Supervisor") + ] + + Supervisor.start_link(children, sup_opts) + end + + @doc false + def list(module, topic) do + grouped = + module + |> Phoenix.Tracker.list(topic) + |> group() + + module.fetch(topic, grouped) + end + + @doc false + def get_by_key(module, topic, key) do + string_key = to_string(key) + + case Phoenix.Tracker.get_by_key(module, topic, key) do + [] -> [] + [_|_] = pid_metas -> + metas = Enum.map(pid_metas, fn {_pid, meta} -> meta end) + %{^string_key => fetched_metas} = module.fetch(topic, %{string_key => %{metas: metas}}) + fetched_metas + end + end + + @doc false + def group(presences) do + presences + |> Enum.reverse() + |> Enum.reduce(%{}, fn {key, meta}, acc -> + Map.update(acc, to_string(key), %{metas: [meta]}, fn %{metas: metas} -> + %{metas: [meta | metas]} + end) + end) + end +end diff --git a/deps/phoenix/lib/phoenix/router.ex b/deps/phoenix/lib/phoenix/router.ex new file mode 100644 index 0000000..789bab7 --- /dev/null +++ b/deps/phoenix/lib/phoenix/router.ex @@ -0,0 +1,1065 @@ +defmodule Phoenix.Router do + defmodule NoRouteError do + @moduledoc """ + Exception raised when no route is found. + """ + defexception plug_status: 404, message: "no route found", conn: nil, router: nil + + def exception(opts) do + conn = Keyword.fetch!(opts, :conn) + router = Keyword.fetch!(opts, :router) + path = "/" <> Enum.join(conn.path_info, "/") + + %NoRouteError{message: "no route found for #{conn.method} #{path} (#{inspect router})", + conn: conn, router: router} + end + end + + defmodule MalformedURIError do + @moduledoc """ + Exception raised when the URI is malformed on matching. + """ + defexception [:message, plug_status: 400] + end + + @moduledoc """ + Defines a Phoenix router. + + The router provides a set of macros for generating routes + that dispatch to specific controllers and actions. Those + macros are named after HTTP verbs. For example: + + defmodule MyAppWeb.Router do + use Phoenix.Router + + get "/pages/:page", PageController, :show + end + + The `get/3` macro above accepts a request to `/pages/hello` and dispatches + it to `PageController`'s `show` action with `%{"page" => "hello"}` in + `params`. + + Phoenix's router is extremely efficient, as it relies on Elixir + pattern matching for matching routes and serving requests. + + ## Routing + + `get/3`, `post/3`, `put/3` and other macros named after HTTP verbs are used + to create routes. + + The route: + + get "/pages", PageController, :index + + matches a `GET` request to `/pages` and dispatches it to the `index` action in + `PageController`. + + get "/pages/:page", PageController, :show + + matches `/pages/hello` and dispatches to the `show` action with + `%{"page" => "hello"}` in `params`. + + defmodule PageController do + def show(conn, params) do + # %{"page" => "hello"} == params + end + end + + Partial and multiple segments can be matched. For example: + + get "/api/v:version/pages/:id", PageController, :show + + matches `/api/v1/pages/2` and puts `%{"version" => "1", "id" => "2"}` in + `params`. Only the trailing part of a segment can be captured. + + Routes are matched from top to bottom. The second route here: + + get "/pages/:page", PageController, :show + get "/pages/hello", PageController, :hello + + will never match `/pages/hello` because `/pages/:page` matches that first. + + Routes can use glob-like patterns to match trailing segments. + + get "/pages/*page", PageController, :show + + matches `/pages/hello/world` and puts the globbed segments in `params["page"]`. + + GET /pages/hello/world + %{"page" => ["hello", "world"]} = params + + Globs cannot have prefixes nor suffixes, but can be mixed with variables: + + get "/pages/he:page/*rest", PageController, :show + + matches + + GET /pages/hello + %{"page" => "llo", "rest" => []} = params + + GET /pages/hey/there/world + %{"page" => "y", "rest" => ["there" "world"]} = params + + ## Helpers + + Phoenix automatically generates a module `Helpers` inside your router + which contains named helpers to help developers generate and keep + their routes up to date. + + Helpers are automatically generated based on the controller name. + For example, the route: + + get "/pages/:page", PageController, :show + + will generate the following named helper: + + MyAppWeb.Router.Helpers.page_path(conn_or_endpoint, :show, "hello") + "/pages/hello" + + MyAppWeb.Router.Helpers.page_path(conn_or_endpoint, :show, "hello", some: "query") + "/pages/hello?some=query" + + MyAppWeb.Router.Helpers.page_url(conn_or_endpoint, :show, "hello") + "http://example.com/pages/hello" + + MyAppWeb.Router.Helpers.page_url(conn_or_endpoint, :show, "hello", some: "query") + "http://example.com/pages/hello?some=query" + + If the route contains glob-like patterns, parameters for those have to be given as + list: + + MyAppWeb.Router.Helpers.page_path(conn_or_endpoint, :show, ["hello", "world"]) + "/pages/hello/world" + + The URL generated in the named URL helpers is based on the configuration for + `:url`, `:http` and `:https`. However, if for some reason you need to manually + control the URL generation, the url helpers also allow you to pass in a `URI` + struct: + + uri = %URI{scheme: "https", host: "other.example.com"} + MyAppWeb.Router.Helpers.page_url(uri, :show, "hello") + "https://other.example.com/pages/hello" + + The named helper can also be customized with the `:as` option. Given + the route: + + get "/pages/:page", PageController, :show, as: :special_page + + the named helper will be: + + MyAppWeb.Router.Helpers.special_page_path(conn, :show, "hello") + "/pages/hello" + + ## Scopes and Resources + + It is very common in Phoenix applications to namespace all of your + routes under the application scope: + + scope "/", MyAppWeb do + get "/pages/:id", PageController, :show + end + + The route above will dispatch to `MyAppWeb.PageController`. This syntax + is not only convenient for developers, since we don't have to repeat + the `MyAppWeb.` prefix on all routes, but it also allows Phoenix to put + less pressure on the Elixir compiler. If instead we had written: + + get "/pages/:id", MyAppWeb.PageController, :show + + The Elixir compiler would infer that the router depends directly on + `MyAppWeb.PageController`, which is not true. By using scopes, Phoenix + can properly hint to the Elixir compiler the controller is not an + actual dependency of the router. This provides more efficient + compilation times. + + Scopes allow us to scope on any path or even on the helper name: + + scope "/api/v1", MyAppWeb, as: :api_v1 do + get "/pages/:id", PageController, :show + end + + For example, the route above will match on the path `"/api/v1/pages/:id"` + and the named route will be `api_v1_page_path`, as expected from the + values given to `scope/2` option. + + Phoenix also provides a `resources/4` macro that allows developers + to generate "RESTful" routes to a given resource: + + defmodule MyAppWeb.Router do + use Phoenix.Router + + resources "/pages", PageController, only: [:show] + resources "/users", UserController, except: [:delete] + end + + Finally, Phoenix ships with a `mix phx.routes` task that nicely + formats all routes in a given router. We can use it to verify all + routes included in the router above: + + $ mix phx.routes + page_path GET /pages/:id PageController.show/2 + user_path GET /users UserController.index/2 + user_path GET /users/:id/edit UserController.edit/2 + user_path GET /users/new UserController.new/2 + user_path GET /users/:id UserController.show/2 + user_path POST /users UserController.create/2 + user_path PATCH /users/:id UserController.update/2 + PUT /users/:id UserController.update/2 + + One can also pass a router explicitly as an argument to the task: + + $ mix phx.routes MyAppWeb.Router + + Check `scope/2` and `resources/4` for more information. + + ## Pipelines and plugs + + Once a request arrives at the Phoenix router, it performs + a series of transformations through pipelines until the + request is dispatched to a desired end-point. + + Such transformations are defined via plugs, as defined + in the [Plug](https://github.com/elixir-lang/plug) specification. + Once a pipeline is defined, it can be piped through per scope. + + For example: + + defmodule MyAppWeb.Router do + use Phoenix.Router + + pipeline :browser do + plug :fetch_session + plug :accepts, ["html"] + end + + scope "/" do + pipe_through :browser + + # browser related routes and resources + end + end + + `Phoenix.Router` imports functions from both `Plug.Conn` and `Phoenix.Controller` + to help define plugs. In the example above, `fetch_session/2` + comes from `Plug.Conn` while `accepts/2` comes from `Phoenix.Controller`. + + Note that router pipelines are only invoked after a route is found. + No plug is invoked in case no matches were found. + """ + + alias Phoenix.Router.{Resource, Scope, Route, Helpers} + + @http_methods [:get, :post, :put, :patch, :delete, :options, :connect, :trace, :head] + + @doc false + defmacro __using__(opts) do + quote do + unquote(prelude(opts)) + unquote(defs()) + unquote(match_dispatch()) + end + end + + defp prelude(opts) do + quote do + @helpers_moduledoc Keyword.get(unquote(opts), :helpers_moduledoc, true) + + Module.register_attribute __MODULE__, :phoenix_routes, accumulate: true + @phoenix_forwards %{} + + import Phoenix.Router + + # TODO v2: No longer automatically import dependencies + import Plug.Conn + import Phoenix.Controller + + # Set up initial scope + @phoenix_pipeline nil + Phoenix.Router.Scope.init(__MODULE__) + @before_compile unquote(__MODULE__) + end + end + + # Because those macros are executed multiple times, + # we end-up generating a huge scope that drastically + # affects compilation. We work around it by defining + # those functions only once and calling it over and + # over again. + defp defs() do + quote unquote: false do + var!(add_resources, Phoenix.Router) = fn resource -> + path = resource.path + ctrl = resource.controller + opts = resource.route + + if resource.singleton do + Enum.each resource.actions, fn + :show -> get path, ctrl, :show, opts + :new -> get path <> "/new", ctrl, :new, opts + :edit -> get path <> "/edit", ctrl, :edit, opts + :create -> post path, ctrl, :create, opts + :delete -> delete path, ctrl, :delete, opts + :update -> + patch path, ctrl, :update, opts + put path, ctrl, :update, Keyword.put(opts, :as, nil) + end + else + param = resource.param + + Enum.each resource.actions, fn + :index -> get path, ctrl, :index, opts + :show -> get path <> "/:" <> param, ctrl, :show, opts + :new -> get path <> "/new", ctrl, :new, opts + :edit -> get path <> "/:" <> param <> "/edit", ctrl, :edit, opts + :create -> post path, ctrl, :create, opts + :delete -> delete path <> "/:" <> param, ctrl, :delete, opts + :update -> + patch path <> "/:" <> param, ctrl, :update, opts + put path <> "/:" <> param, ctrl, :update, Keyword.put(opts, :as, nil) + end + end + end + end + end + + @doc false + def __call__( + %{private: %{phoenix_router: router, phoenix_bypass: {router, pipes}}} = conn, + {metadata, prepare, pipeline, _} + ) do + conn = prepare.(conn, metadata) + + case pipes do + :current -> pipeline.(conn) + _ -> Enum.reduce(pipes, conn, fn pipe, acc -> apply(router, pipe, [acc, []]) end) + end + end + def __call__(%{private: %{phoenix_bypass: :all}} = conn, {metadata, prepare, _, _}) do + prepare.(conn, metadata) + end + def __call__(conn, {metadata, prepare, pipeline, {plug, opts}}) do + conn = prepare.(conn, metadata) + start = System.monotonic_time() + metadata = %{metadata | conn: conn} + :telemetry.execute([:phoenix, :router_dispatch, :start], %{system_time: System.system_time()}, metadata) + + case pipeline.(conn) do + %Plug.Conn{halted: true} = halted_conn -> + measurements = %{duration: System.monotonic_time() - start} + metadata = %{metadata | conn: halted_conn} + :telemetry.execute([:phoenix, :router_dispatch, :stop], measurements, metadata) + halted_conn + %Plug.Conn{} = piped_conn -> + try do + plug.call(piped_conn, plug.init(opts)) + else + conn -> + measurements = %{duration: System.monotonic_time() - start} + metadata = %{metadata | conn: conn} + :telemetry.execute([:phoenix, :router_dispatch, :stop], measurements, metadata) + conn + rescue + e in Plug.Conn.WrapperError -> + measurements = %{duration: System.monotonic_time() - start} + metadata = Map.merge(metadata, %{conn: conn, kind: :error, reason: e, stacktrace: __STACKTRACE__}) + :telemetry.execute([:phoenix, :router_dispatch, :exception], measurements, metadata) + Plug.Conn.WrapperError.reraise(e) + catch + kind, reason -> + measurements = %{duration: System.monotonic_time() - start} + metadata = Map.merge(metadata, %{conn: conn, kind: kind, reason: reason, stacktrace: __STACKTRACE__}) + :telemetry.execute([:phoenix, :router_dispatch, :exception], measurements, metadata) + Plug.Conn.WrapperError.reraise(piped_conn, kind, reason, __STACKTRACE__) + end + end + end + + defp match_dispatch() do + quote location: :keep do + @behaviour Plug + + @doc """ + Callback required by Plug that initializes the router + for serving web requests. + """ + def init(opts) do + opts + end + + @doc """ + Callback invoked by Plug on every request. + """ + def call(conn, _opts) do + %{method: method, path_info: path_info, host: host} = conn = prepare(conn) + + decoded = + # TODO: Remove try/catch on Elixir v1.13 as decode no longer raises + try do + Enum.map(path_info, &URI.decode/1) + rescue + ArgumentError -> + raise MalformedURIError, "malformed URI path: #{inspect conn.request_path}" + end + + case __match_route__(method, decoded, host) do + :error -> raise NoRouteError, conn: conn, router: __MODULE__ + match -> Phoenix.Router.__call__(conn, match) + end + end + + defoverridable [init: 1, call: 2] + end + end + + @doc false + defmacro __before_compile__(env) do + routes = env.module |> Module.get_attribute(:phoenix_routes) |> Enum.reverse + routes_with_exprs = Enum.map(routes, &{&1, Route.exprs(&1)}) + + helpers_moduledoc = Module.get_attribute(env.module, :helpers_moduledoc) + + Helpers.define(env, routes_with_exprs, docs: helpers_moduledoc) + {matches, _} = Enum.map_reduce(routes_with_exprs, %{}, &build_match/2) + + checks = + for %{line: line, plug: plug, plug_opts: plug_opts} <- routes, into: %{} do + quote line: line do + {unquote(plug).init(unquote(Macro.escape(plug_opts))), []} + end + end + + match_404 = + quote [generated: true] do + def __match_route__(_method, _path_info, _host) do + :error + end + end + + keys = [:verb, :path, :plug, :plug_opts, :helper, :metadata] + routes = Enum.map(routes, &Map.take(&1, keys)) + + quote do + @doc false + def __routes__, do: unquote(Macro.escape(routes)) + + @doc false + def __checks__, do: unquote({:__block__, [], Map.keys(checks)}) + + @doc false + def __helpers__, do: __MODULE__.Helpers + + defp prepare(conn) do + merge_private( + conn, + [ + {:phoenix_router, __MODULE__}, + {__MODULE__, {conn.script_name, @phoenix_forwards}} + ] + ) + end + + unquote(matches) + unquote(match_404) + end + end + + defp build_match({route, exprs}, known_pipelines) do + %{pipe_through: pipe_through} = route + + %{ + prepare: prepare, + dispatch: dispatch, + verb_match: verb_match, + path_params: path_params, + path: path, + host: host + } = exprs + + {pipe_name, pipe_definition, known_pipelines} = + case known_pipelines do + %{^pipe_through => name} -> + {name, :ok, known_pipelines} + + %{} -> + name = :"__pipe_through#{map_size(known_pipelines)}__" + {name, build_pipes(name, pipe_through), Map.put(known_pipelines, pipe_through, name)} + end + + quoted = + quote line: route.line do + unquote(pipe_definition) + + @doc false + def __match_route__(unquote(verb_match), unquote(path), unquote(host)) do + {unquote(build_metadata(route, path_params)), + fn var!(conn, :conn), %{path_params: var!(path_params, :conn)} -> unquote(prepare) end, + &unquote(Macro.var(pipe_name, __MODULE__))/1, + unquote(dispatch)} + end + end + + {quoted, known_pipelines} + end + + defp build_metadata(route, path_params) do + %{ + path: path, + plug: plug, + plug_opts: plug_opts, + pipe_through: pipe_through, + metadata: metadata + } = route + + pairs = [ + conn: nil, + route: path, + plug: plug, + plug_opts: Macro.escape(plug_opts), + path_params: path_params, + pipe_through: pipe_through + ] + + {:%{}, [], pairs ++ Macro.escape(Map.to_list(metadata))} + end + + defp build_pipes(name, []) do + quote do + defp unquote(name)(conn), do: conn + end + end + + defp build_pipes(name, pipe_through) do + plugs = pipe_through |> Enum.reverse |> Enum.map(&{&1, [], true}) + opts = [init_mode: Phoenix.plug_init_mode(), log_on_halt: :debug] + {conn, body} = Plug.Builder.compile(__ENV__, plugs, opts) + + quote do + defp unquote(name)(unquote(conn)), do: unquote(body) + end + end + + @doc """ + Generates a route match based on an arbitrary HTTP method. + + Useful for defining routes not included in the builtin macros. + + The catch-all verb, `:*`, may also be used to match all HTTP methods. + + ## Options + + * `:as` - configures the named helper exclusively. If false, does not generate + a helper. + * `:alias` - configure if the scope alias should be applied to the route. + Defaults to true, disables scoping if false. + * `:log` - the level to log the route dispatching under, + may be set to false. Defaults to `:debug` + * `:host` - a string containing the host scope, or prefix host scope, + ie `"foo.bar.com"`, `"foo."` + * `:private` - a map of private data to merge into the connection + when a route matches + * `:assigns` - a map of data to merge into the connection when a route matches + * `:metadata` - a map of metadata used by the telemetry events and returned by + `route_info/4` + * `:trailing_slash` - a boolean to flag whether or not the helper functions + append a trailing slash. Defaults to `false`. + + ## Examples + + match(:move, "/events/:id", EventController, :move) + + match(:*, "/any", SomeController, :any) + + """ + defmacro match(verb, path, plug, plug_opts, options \\ []) do + add_route(:match, verb, path, plug, plug_opts, options) + end + + for verb <- @http_methods do + @doc """ + Generates a route to handle a #{verb} request to the given path. + + #{verb}("/events/:id", EventController, :action) + + See `match/5` for options. + """ + defmacro unquote(verb)(path, plug, plug_opts, options \\ []) do + add_route(:match, unquote(verb), path, plug, plug_opts, options) + end + end + + defp add_route(kind, verb, path, plug, plug_opts, options) do + quote do + @phoenix_routes Scope.route( + __ENV__.line, + __ENV__.module, + unquote(kind), + unquote(verb), + unquote(path), + unquote(plug), + unquote(plug_opts), + unquote(options) + ) + end + end + + @doc """ + Defines a plug pipeline. + + Pipelines are defined at the router root and can be used + from any scope. + + ## Examples + + pipeline :api do + plug :token_authentication + plug :dispatch + end + + A scope may then use this pipeline as: + + scope "/" do + pipe_through :api + end + + Every time `pipe_through/1` is called, the new pipelines + are appended to the ones previously given. + """ + defmacro pipeline(plug, do: block) do + with true <- is_atom(plug), + imports = __CALLER__.macros ++ __CALLER__.functions, + {mod, _} <- Enum.find(imports, fn {_, imports} -> {plug, 2} in imports end) do + raise ArgumentError, + "cannot define pipeline named #{inspect(plug)} " <> + "because there is an import from #{inspect(mod)} with the same name" + end + + block = + quote do + plug = unquote(plug) + @phoenix_pipeline [] + unquote(block) + end + + compiler = + quote unquote: false do + Scope.pipeline(__MODULE__, plug) + {conn, body} = Plug.Builder.compile(__ENV__, @phoenix_pipeline, + init_mode: Phoenix.plug_init_mode()) + + def unquote(plug)(unquote(conn), _) do + try do + unquote(body) + rescue + e in Plug.Conn.WrapperError -> + Plug.Conn.WrapperError.reraise(e) + catch + :error, reason -> + Plug.Conn.WrapperError.reraise(unquote(conn), :error, reason, __STACKTRACE__) + end + end + @phoenix_pipeline nil + end + + quote do + try do + unquote(block) + unquote(compiler) + after + :ok + end + end + end + + @doc """ + Defines a plug inside a pipeline. + + See `pipeline/2` for more information. + """ + defmacro plug(plug, opts \\ []) do + {plug, opts} = expand_plug_and_opts(plug, opts, __CALLER__) + + quote do + if pipeline = @phoenix_pipeline do + @phoenix_pipeline [{unquote(plug), unquote(opts), true}|pipeline] + else + raise "cannot define plug at the router level, plug must be defined inside a pipeline" + end + end + end + + defp expand_plug_and_opts(plug, opts, caller) do + runtime? = Phoenix.plug_init_mode() == :runtime + + plug = + if runtime? do + expand_alias(plug, caller) + else + plug + end + + opts = + if runtime? and Macro.quoted_literal?(opts) do + Macro.prewalk(opts, &expand_alias(&1, caller)) + else + opts + end + + {plug, opts} + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:init, 1}}) + + defp expand_alias(other, _env), do: other + + @doc """ + Defines a list of plugs (and pipelines) to send the connection through. + + See `pipeline/2` for more information. + """ + defmacro pipe_through(pipes) do + pipes = + if Phoenix.plug_init_mode() == :runtime and Macro.quoted_literal?(pipes) do + Macro.prewalk(pipes, &expand_alias(&1, __CALLER__)) + else + pipes + end + + quote do + if pipeline = @phoenix_pipeline do + raise "cannot pipe_through inside a pipeline" + else + Scope.pipe_through(__MODULE__, unquote(pipes)) + end + end + end + + @doc """ + Defines "RESTful" routes for a resource. + + The given definition: + + resources "/users", UserController + + will include routes to the following actions: + + * `GET /users` => `:index` + * `GET /users/new` => `:new` + * `POST /users` => `:create` + * `GET /users/:id` => `:show` + * `GET /users/:id/edit` => `:edit` + * `PATCH /users/:id` => `:update` + * `PUT /users/:id` => `:update` + * `DELETE /users/:id` => `:delete` + + ## Options + + This macro accepts a set of options: + + * `:only` - a list of actions to generate routes for, for example: `[:show, :edit]` + * `:except` - a list of actions to exclude generated routes from, for example: `[:delete]` + * `:param` - the name of the parameter for this resource, defaults to `"id"` + * `:name` - the prefix for this resource. This is used for the named helper + and as the prefix for the parameter in nested resources. The default value + is automatically derived from the controller name, i.e. `UserController` will + have name `"user"` + * `:as` - configures the named helper exclusively + * `:singleton` - defines routes for a singleton resource that is looked up by + the client without referencing an ID. Read below for more information + + ## Singleton resources + + When a resource needs to be looked up without referencing an ID, because + it contains only a single entry in the given context, the `:singleton` + option can be used to generate a set of routes that are specific to + such single resource: + + * `GET /user` => `:show` + * `GET /user/new` => `:new` + * `POST /user` => `:create` + * `GET /user/edit` => `:edit` + * `PATCH /user` => `:update` + * `PUT /user` => `:update` + * `DELETE /user` => `:delete` + + Usage example: + + resources "/account", AccountController, only: [:show], singleton: true + + ## Nested Resources + + This macro also supports passing a nested block of route definitions. + This is helpful for nesting children resources within their parents to + generate nested routes. + + The given definition: + + resources "/users", UserController do + resources "/posts", PostController + end + + will include the following routes: + + user_post_path GET /users/:user_id/posts PostController :index + user_post_path GET /users/:user_id/posts/:id/edit PostController :edit + user_post_path GET /users/:user_id/posts/new PostController :new + user_post_path GET /users/:user_id/posts/:id PostController :show + user_post_path POST /users/:user_id/posts PostController :create + user_post_path PATCH /users/:user_id/posts/:id PostController :update + PUT /users/:user_id/posts/:id PostController :update + user_post_path DELETE /users/:user_id/posts/:id PostController :delete + + """ + defmacro resources(path, controller, opts, do: nested_context) do + add_resources path, controller, opts, do: nested_context + end + + @doc """ + See `resources/4`. + """ + defmacro resources(path, controller, do: nested_context) do + add_resources path, controller, [], do: nested_context + end + + defmacro resources(path, controller, opts) do + add_resources path, controller, opts, do: nil + end + + @doc """ + See `resources/4`. + """ + defmacro resources(path, controller) do + add_resources path, controller, [], do: nil + end + + defp add_resources(path, controller, options, do: context) do + scope = + if context do + quote do + scope resource.member, do: unquote(context) + end + end + + quote do + resource = Resource.build(unquote(path), unquote(controller), unquote(options)) + var!(add_resources, Phoenix.Router).(resource) + unquote(scope) + end + end + + @doc """ + Defines a scope in which routes can be nested. + + ## Examples + + scope path: "/api/v1", as: :api_v1, alias: API.V1 do + get "/pages/:id", PageController, :show + end + + The generated route above will match on the path `"/api/v1/pages/:id"` + and will dispatch to `:show` action in `API.V1.PageController`. A named + helper `api_v1_page_path` will also be generated. + + ## Options + + The supported options are: + + * `:path` - a string containing the path scope. + * `:as` - a string or atom containing the named helper scope. When set to + false, it resets the nested helper scopes. + * `:alias` - an alias (atom) containing the controller scope. When set to + false, it resets all nested aliases. + * `:host` - a string containing the host scope, or prefix host scope, + ie `"foo.bar.com"`, `"foo."` + * `:private` - a map of private data to merge into the connection when a route matches + * `:assigns` - a map of data to merge into the connection when a route matches + * `:log` - the level to log the route dispatching under, + may be set to false. Defaults to `:debug` + * `:trailing_slash` - whether or not the helper functions append a trailing + slash. Defaults to `false`. + + """ + defmacro scope(options, do: context) do + options = + if Macro.quoted_literal?(options) do + Macro.prewalk(options, &expand_alias(&1, __CALLER__)) + else + options + end + + do_scope(options, context) + end + + @doc """ + Define a scope with the given path. + + This function is a shortcut for: + + scope path: path do + ... + end + + ## Examples + + scope "/api/v1", as: :api_v1 do + get "/pages/:id", PageController, :show + end + + """ + defmacro scope(path, options, do: context) do + options = + if Macro.quoted_literal?(options) do + Macro.prewalk(options, &expand_alias(&1, __CALLER__)) + else + options + end + + options = + quote do + path = unquote(path) + + case unquote(options) do + alias when is_atom(alias) -> [path: path, alias: alias] + options when is_list(options) -> Keyword.put(options, :path, path) + end + end + + do_scope(options, context) + end + + @doc """ + Defines a scope with the given path and alias. + + This function is a shortcut for: + + scope path: path, alias: alias do + ... + end + + ## Examples + + scope "/api/v1", API.V1, as: :api_v1 do + get "/pages/:id", PageController, :show + end + + """ + defmacro scope(path, alias, options, do: context) do + alias = expand_alias(alias, __CALLER__) + + options = quote do + unquote(options) + |> Keyword.put(:path, unquote(path)) + |> Keyword.put(:alias, unquote(alias)) + end + + do_scope(options, context) + end + + defp do_scope(options, context) do + quote do + Scope.push(__MODULE__, unquote(options)) + try do + unquote(context) + after + Scope.pop(__MODULE__) + end + end + end + + @doc """ + Returns the full alias with the current scope's aliased prefix. + + Useful for applying the same short-hand alias handling to + other values besides the second argument in route definitions. + + ## Examples + + scope "/", MyPrefix do + get "/", ProxyPlug, controller: scoped_alias(__MODULE__, MyController) + end + """ + def scoped_alias(router_module, alias) do + Scope.expand_alias(router_module, alias) + end + + @doc """ + Forwards a request at the given path to a plug. + + All paths that match the forwarded prefix will be sent to + the forwarded plug. This is useful for sharing a router between + applications or even breaking a big router into smaller ones. + The router pipelines will be invoked prior to forwarding the + connection. + + However, we don't advise forwarding to another endpoint. + The reason is that plugs defined by your app and the forwarded + endpoint would be invoked twice, which may lead to errors. + + ## Examples + + scope "/", MyApp do + pipe_through [:browser, :admin] + + forward "/admin", SomeLib.AdminDashboard + forward "/api", ApiRouter + end + + """ + defmacro forward(path, plug, plug_opts \\ [], router_opts \\ []) do + {plug, plug_opts} = expand_plug_and_opts(plug, plug_opts, __CALLER__) + router_opts = Keyword.put(router_opts, :as, nil) + + quote unquote: true, bind_quoted: [path: path, plug: plug] do + plug = Scope.register_forwards(__MODULE__, path, plug) + unquote(add_route(:forward, :*, path, plug, plug_opts, router_opts)) + end + end + + @doc """ + Returns all routes information from the given router. + """ + def routes(router) do + router.__routes__() + end + + @doc """ + Returns the compile-time route info and runtime path params for a request. + + The `path` can be either a string or the `path_info` segments. + + A map of metadata is returned with the following keys: + + * `:log` - the configured log level. For example `:debug` + * `:path_params` - the map of runtime path params + * `:pipe_through` - the list of pipelines for the route's scope, for example `[:browser]` + * `:plug` - the plug to dispatch the route to, for example `AppWeb.PostController` + * `:plug_opts` - the options to pass when calling the plug, for example: `:index` + * `:route` - the string route pattern, such as `"/posts/:id"` + + ## Examples + + iex> Phoenix.Router.route_info(AppWeb.Router, "GET", "/posts/123", "myhost") + %{ + log: :debug, + path_params: %{"id" => "123"}, + pipe_through: [:browser], + plug: AppWeb.PostController, + plug_opts: :show, + route: "/posts/:id", + } + + iex> Phoenix.Router.route_info(MyRouter, "GET", "/not-exists", "myhost") + :error + """ + def route_info(router, method, path, host) when is_binary(path) do + split_path = for segment <- String.split(path, "/"), segment != "", do: segment + route_info(router, method, split_path, host) + end + + def route_info(router, method, split_path, host) when is_list(split_path) do + case router.__match_route__(method, split_path, host) do + {%{} = metadata, _prepare, _pipeline, {_plug, _opts}} -> Map.delete(metadata, :conn) + :error -> :error + end + end +end diff --git a/deps/phoenix/lib/phoenix/router/console_formatter.ex b/deps/phoenix/lib/phoenix/router/console_formatter.ex new file mode 100644 index 0000000..9b7af24 --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/console_formatter.ex @@ -0,0 +1,101 @@ +defmodule Phoenix.Router.ConsoleFormatter do + @moduledoc false + + @doc """ + Format the routes for printing. + """ + def format(router, endpoint \\ nil) do + routes = Phoenix.Router.routes(router) + column_widths = calculate_column_widths(routes, endpoint) + + routes + |> Enum.map_join("", &format_route(&1, column_widths)) + |> Kernel.<>(format_endpoint(endpoint, column_widths)) + end + + defp format_endpoint(nil, _), do: "" + defp format_endpoint(endpoint, widths) do + case endpoint.__sockets__() do + [] -> "" + sockets -> + Enum.map_join(sockets, "", fn socket -> + format_websocket(socket, widths) <> + format_longpoll(socket, widths) + end) + end + end + + defp format_websocket({_path, Phoenix.LiveReloader.Socket, _opts}, _), do: "" + defp format_websocket({path, module, opts}, widths) do + if opts[:websocket] != false do + {verb_len, path_len, route_name_len} = widths + + String.pad_leading("websocket", route_name_len) <> " " <> + String.pad_trailing("WS", verb_len) <> " " <> + String.pad_trailing(path <> "/websocket", path_len) <> " " <> + inspect(module) <> + "\n" + else + "" + end + end + + defp format_longpoll({_path, Phoenix.LiveReloader.Socket, _opts}, _), do: "" + defp format_longpoll({path, module, opts}, widths) do + if opts[:longpoll] != false do + for method <- ["GET", "POST"], into: "" do + {verb_len, path_len, route_name_len} = widths + + String.pad_leading("longpoll", route_name_len) <> " " <> + String.pad_trailing(method, verb_len) <> " " <> + String.pad_trailing(path <> "/longpoll", path_len) <> " " <> + inspect(module) <> + "\n" + end + else + "" + end + end + + defp calculate_column_widths(routes, endpoint) do + sockets = endpoint && endpoint.__sockets__() || [] + + widths = + Enum.reduce(routes, {0, 0, 0}, fn route, acc -> + %{verb: verb, path: path, helper: helper} = route + verb = verb_name(verb) + {verb_len, path_len, route_name_len} = acc + route_name = route_name(helper) + + {max(verb_len, String.length(verb)), + max(path_len, String.length(path)), + max(route_name_len, String.length(route_name))} + end) + + Enum.reduce(sockets, widths, fn {path, _mod, _opts}, acc -> + {verb_len, path_len, route_name_len} = acc + + {verb_len, + max(path_len, String.length(path <> "/websocket")), + max(route_name_len, String.length("websocket"))} + end) + end + + defp format_route(route, column_widths) do + %{verb: verb, path: path, plug: plug, metadata: metadata, plug_opts: plug_opts, helper: helper} = route + verb = verb_name(verb) + route_name = route_name(helper) + {verb_len, path_len, route_name_len} = column_widths + log_module = metadata[:log_module] || plug + + String.pad_leading(route_name, route_name_len) <> " " <> + String.pad_trailing(verb, verb_len) <> " " <> + String.pad_trailing(path, path_len) <> " " <> + "#{inspect(log_module)} #{inspect(plug_opts)}\n" + end + + defp route_name(nil), do: "" + defp route_name(name), do: name <> "_path" + + defp verb_name(verb), do: verb |> to_string() |> String.upcase() +end diff --git a/deps/phoenix/lib/phoenix/router/helpers.ex b/deps/phoenix/lib/phoenix/router/helpers.ex new file mode 100644 index 0000000..7e65dfa --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/helpers.ex @@ -0,0 +1,429 @@ +defmodule Phoenix.Router.Helpers do + # Module that generates the routing helpers. + @moduledoc false + + alias Phoenix.Router.Route + alias Plug.Conn + + @doc """ + Callback invoked by the url generated in each helper module. + """ + def url(_router, %Conn{private: private}) do + case private do + %{phoenix_router_url: url} when is_binary(url) -> url + %{phoenix_endpoint: endpoint} -> endpoint.url() + end + end + + def url(_router, %_{endpoint: endpoint}) do + endpoint.url() + end + + def url(_router, %URI{} = uri) do + URI.to_string(%{uri | path: nil}) + end + + def url(_router, endpoint) when is_atom(endpoint) do + endpoint.url() + end + + def url(router, other) do + raise ArgumentError, + "expected a %Plug.Conn{}, a %Phoenix.Socket{}, a %URI{}, a struct with an :endpoint key, " <> + "or a Phoenix.Endpoint when building url for #{inspect(router)}, got: #{inspect(other)}" + end + + @doc """ + Callback invoked by path generated in each helper module. + """ + def path(router, %Conn{} = conn, path) do + conn + |> build_own_forward_path(router, path) + |> Kernel.||(build_conn_forward_path(conn, router, path)) + |> Kernel.||(path_with_script(path, conn.script_name)) + end + + def path(_router, %URI{} = uri, path) do + (uri.path || "") <> path + end + + def path(_router, %_{endpoint: endpoint}, path) do + endpoint.path(path) + end + + def path(_router, endpoint, path) when is_atom(endpoint) do + endpoint.path(path) + end + + def path(router, other, _path) do + raise ArgumentError, + "expected a %Plug.Conn{}, a %Phoenix.Socket{}, a %URI{}, a struct with an :endpoint key, " <> + "or a Phoenix.Endpoint when building path for #{inspect(router)}, got: #{inspect(other)}" + end + + ## Helpers + + defp build_own_forward_path(conn, router, path) do + case Map.fetch(conn.private, router) do + {:ok, {local_script, _}} -> + path_with_script(path, local_script) + :error -> nil + end + end + + defp build_conn_forward_path(%Conn{private: %{phoenix_router: phx_router}} = conn, router, path) do + case Map.fetch(conn.private, phx_router) do + {:ok, {script_name, forwards}} -> + case Map.fetch(forwards, router) do + {:ok, local_script} -> + path_with_script(path, script_name ++ local_script) + :error -> nil + end + :error -> nil + end + end + defp build_conn_forward_path(_conn, _router, _path), do: nil + + defp path_with_script(path, []) do + path + end + defp path_with_script(path, script) do + "/" <> Enum.join(script, "/") <> path + end + + @doc """ + Generates the helper module for the given environment and routes. + """ + def define(env, routes, opts \\ []) do + # Ignore any route without helper or forwards. + routes = + Enum.reject(routes, fn {route, _exprs} -> + is_nil(route.helper) or route.kind == :forward + end) + + trailing_slash? = Enum.any?(routes, fn {route, _} -> route.trailing_slash? end) + groups = Enum.group_by(routes, fn {route, _exprs} -> route.helper end) + + impls = + for {_helper, helper_routes} <- groups, + {_, [{route, exprs} | _]} <- + helper_routes + |> Enum.group_by(fn {route, exprs} -> [length(exprs.binding) | route.plug_opts] end) + |> Enum.sort(), + do: defhelper(route, exprs) + + catch_all = Enum.map(groups, &defhelper_catch_all/1) + + defhelper = quote [generated: true, unquote: false] do + defhelper = fn helper, vars, opts, bins, segs, trailing_slash? -> + def unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars)) do + unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), []) + end + + def unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), params) + when is_list(params) or is_map(params) do + path(conn_or_endpoint, segments(unquote(segs), params, unquote(bins), unquote(trailing_slash?), + {unquote(helper), unquote(Macro.escape(opts)), unquote(Enum.map(vars, &Macro.to_string/1))})) + end + + def unquote(:"#{helper}_url")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars)) do + unquote(:"#{helper}_url")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), []) + end + + def unquote(:"#{helper}_url")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), params) + when is_list(params) or is_map(params) do + url(conn_or_endpoint) <> unquote(:"#{helper}_path")(conn_or_endpoint, unquote(Macro.escape(opts)), unquote_splicing(vars), params) + end + end + end + + defcatch_all = quote [generated: true, unquote: false] do + defcatch_all = fn helper, binding_lengths, params_lengths, routes -> + for length <- binding_lengths do + binding = List.duplicate({:_, [], nil}, length) + arity = length + 2 + + def unquote(:"#{helper}_path")(conn_or_endpoint, action, unquote_splicing(binding)) do + path(conn_or_endpoint, "/") + raise_route_error(unquote(helper), :path, unquote(arity), action, []) + end + + def unquote(:"#{helper}_url")(conn_or_endpoint, action, unquote_splicing(binding)) do + url(conn_or_endpoint) + raise_route_error(unquote(helper), :url, unquote(arity), action, []) + end + end + + for length <- params_lengths do + binding = List.duplicate({:_, [], nil}, length) + arity = length + 2 + + def unquote(:"#{helper}_path")(conn_or_endpoint, action, unquote_splicing(binding), params) do + path(conn_or_endpoint, "/") + raise_route_error(unquote(helper), :path, unquote(arity + 1), action, params) + end + + def unquote(:"#{helper}_url")(conn_or_endpoint, action, unquote_splicing(binding), params) do + url(conn_or_endpoint) + raise_route_error(unquote(helper), :url, unquote(arity + 1), action, params) + end + end + + defp raise_route_error(unquote(helper), suffix, arity, action, params) do + Phoenix.Router.Helpers.raise_route_error( + __MODULE__, + "#{unquote(helper)}_#{suffix}", + arity, + action, + unquote(Macro.escape(routes)), + params + ) + end + end + end + + docs = Keyword.get(opts, :docs, true) + + # It is in general bad practice to generate large chunks of code + # inside quoted expressions. However, we can get away with this + # here for two reasons: + # + # * Helper modules are quite uncommon, typically one per project. + # + # * We inline most of the code for performance, so it is specific + # per helper module anyway. + # + code = quote do + @moduledoc unquote(docs) && """ + Module with named helpers generated from #{inspect unquote(env.module)}. + """ + unquote(defhelper) + unquote(defcatch_all) + unquote_splicing(impls) + unquote_splicing(catch_all) + + @doc """ + Generates the path information including any necessary prefix. + """ + def path(data, path) do + Phoenix.Router.Helpers.path(unquote(env.module), data, path) + end + + @doc """ + Generates the connection/endpoint base URL without any path information. + """ + def url(data) do + Phoenix.Router.Helpers.url(unquote(env.module), data) + end + + @doc """ + Generates path to a static asset given its file path. + """ + def static_path(%Conn{private: private} = conn, path) do + private.phoenix_endpoint.static_path(path) + end + + def static_path(%_{endpoint: endpoint} = conn, path) do + endpoint.static_path(path) + end + + def static_path(endpoint, path) when is_atom(endpoint) do + endpoint.static_path(path) + end + + @doc """ + Generates url to a static asset given its file path. + """ + def static_url(%Conn{private: private}, path) do + case private do + %{phoenix_static_url: url} when is_binary(url) -> url <> path + %{phoenix_endpoint: endpoint} -> static_url(endpoint, path) + end + end + + def static_url(%_{endpoint: endpoint} = conn, path) do + static_url(endpoint, path) + end + + def static_url(endpoint, path) when is_atom(endpoint) do + endpoint.static_url() <> endpoint.static_path(path) + end + + @doc """ + Generates an integrity hash to a static asset given its file path. + """ + def static_integrity(%Conn{private: %{phoenix_endpoint: endpoint}}, path) do + static_integrity(endpoint, path) + end + + def static_integrity(%_{endpoint: endpoint}, path) do + static_integrity(endpoint, path) + end + + def static_integrity(endpoint, path) when is_atom(endpoint) do + endpoint.static_integrity(path) + end + + # Functions used by generated helpers + # Those are inlined here for performance + + defp to_param(int) when is_integer(int), do: Integer.to_string(int) + defp to_param(bin) when is_binary(bin), do: bin + defp to_param(false), do: "false" + defp to_param(true), do: "true" + defp to_param(data), do: Phoenix.Param.to_param(data) + + defp segments(segments, [], _reserved, trailing_slash?, _opts) do + maybe_append_slash(segments, trailing_slash?) + end + + defp segments(segments, query, reserved, trailing_slash?, _opts) when is_list(query) or is_map(query) do + dict = for {k, v} <- query, + not ((k = to_string(k)) in reserved), + do: {k, v} + + + case Conn.Query.encode dict, &to_param/1 do + "" -> maybe_append_slash(segments, trailing_slash?) + o -> maybe_append_slash(segments, trailing_slash?) <> "?" <> o + end + end + + if unquote(trailing_slash?) do + defp maybe_append_slash("/", _), do: "/" + defp maybe_append_slash(path, true), do: path <> "/" + end + + defp maybe_append_slash(path, _), do: path + end + + Module.create(Module.concat(env.module, Helpers), code, line: env.line, file: env.file) + end + + @doc """ + Receives a route and returns the quoted definition for its helper function. + + In case a helper name was not given, or route is forwarded, returns nil. + """ + def defhelper(%Route{} = route, exprs) do + helper = route.helper + opts = route.plug_opts + trailing_slash? = route.trailing_slash? + + {bins, vars} = :lists.unzip(exprs.binding) + segs = expand_segments(exprs.path) + + quote do + defhelper.( + unquote(helper), + unquote(Macro.escape(vars)), + unquote(Macro.escape(opts)), + unquote(Macro.escape(bins)), + unquote(Macro.escape(segs)), + unquote(Macro.escape(trailing_slash?)) + ) + end + end + + def defhelper_catch_all({helper, routes_and_exprs}) do + routes = + routes_and_exprs + |> Enum.map(fn {routes, exprs} -> {routes.plug_opts, Enum.map(exprs.binding, &elem(&1, 0))} end) + |> Enum.sort() + + params_lengths = + routes + |> Enum.map(fn {_, bindings} -> length(bindings) end) + |> Enum.uniq() + + # Each helper defines catch alls like this: + # + # def helper_path(context, action, ...binding) + # def helper_path(context, action, ...binding, params) + # + # Given the helpers are ordered by binding length, the additional + # helper with param for a helper_path/n will always override the + # binding for helper_path/n+1, so we skip those here to avoid warnings. + binding_lengths = + Enum.reject(params_lengths, &(&1 - 1 in params_lengths)) + + quote do + defcatch_all.( + unquote(helper), + unquote(binding_lengths), + unquote(params_lengths), + unquote(Macro.escape(routes)) + ) + end + end + + @doc """ + Callback for generate router catch alls. + """ + def raise_route_error(mod, fun, arity, action, routes, params) do + cond do + is_atom(action) and not Keyword.has_key?(routes, action) -> + "no action #{inspect action} for #{inspect mod}.#{fun}/#{arity}" + |> invalid_route_error(fun, routes) + + is_list(params) or is_map(params) -> + "no function clause for #{inspect mod}.#{fun}/#{arity} and action #{inspect action}" + |> invalid_route_error(fun, routes) + + true -> + invalid_param_error(mod, fun, arity, action, routes) + end + end + + defp invalid_route_error(prelude, fun, routes) do + suggestions = + for {action, bindings} <- routes do + bindings = Enum.join([inspect(action) | bindings], ", ") + "\n #{fun}(conn_or_endpoint, #{bindings}, params \\\\ [])" + end + + raise ArgumentError, "#{prelude}. The following actions/clauses are supported:\n#{suggestions}" + end + + defp invalid_param_error(mod, fun, arity, action, routes) do + call_vars = Keyword.fetch!(routes, action) + + raise ArgumentError, """ + #{inspect(mod)}.#{fun}/#{arity} called with invalid params. + The last argument to this function should be a keyword list or a map. + For example: + + #{fun}(#{Enum.join(["conn", ":#{action}" | call_vars], ", ")}, page: 5, per_page: 10) + + It is possible you have called this function without defining the proper + number of path segments in your router. + """ + end + + @doc """ + Callback for properly encoding parameters in routes. + """ + def encode_param(str), do: URI.encode(str, &URI.char_unreserved?/1) + + defp expand_segments([]), do: "/" + + defp expand_segments(segments) when is_list(segments) do + expand_segments(segments, "") + end + + defp expand_segments(segments) do + quote(do: "/" <> Enum.map_join(unquote(segments), "/", &unquote(__MODULE__).encode_param/1)) + end + + defp expand_segments([{:|, _, [h, t]}], acc), + do: quote(do: unquote(expand_segments([h], acc)) <> "/" <> Enum.map_join(unquote(t), "/", &unquote(__MODULE__).encode_param/1)) + + defp expand_segments([h|t], acc) when is_binary(h), + do: expand_segments(t, quote(do: unquote(acc) <> unquote("/" <> h))) + + defp expand_segments([h|t], acc), + do: expand_segments(t, quote(do: unquote(acc) <> "/" <> unquote(__MODULE__).encode_param(to_param(unquote(h))))) + + defp expand_segments([], acc), + do: acc +end diff --git a/deps/phoenix/lib/phoenix/router/resource.ex b/deps/phoenix/lib/phoenix/router/resource.ex new file mode 100644 index 0000000..ca90feb --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/resource.ex @@ -0,0 +1,84 @@ +defmodule Phoenix.Router.Resource do + # This module defines the Resource struct that is used + # throughout Phoenix's router. This struct is private + # as it contains internal routing information. + @moduledoc false + + alias Phoenix.Router.Resource + + @default_param_key "id" + @actions [:index, :edit, :new, :show, :create, :update, :delete] + + @doc """ + The `Phoenix.Router.Resource` struct. It stores: + + * `:path` - the path as string (not normalized) + * `:param` - the param to be used in routes (not normalized) + * `:controller` - the controller as an atom + * `:actions` - a list of actions as atoms + * `:route` - the context for resource routes + * `:member` - the context for member routes + * `:collection` - the context for collection routes + + """ + defstruct [:path, :actions, :param, :route, :controller, :member, :collection, :singleton] + @type t :: %Resource{} + + @doc """ + Builds a resource struct. + """ + def build(path, controller, options) when is_atom(controller) and is_list(options) do + path = Phoenix.Router.Scope.validate_path(path) + alias = Keyword.get(options, :alias) + param = Keyword.get(options, :param, @default_param_key) + name = Keyword.get(options, :name, Phoenix.Naming.resource_name(controller, "Controller")) + as = Keyword.get(options, :as, name) + private = Keyword.get(options, :private, %{}) + assigns = Keyword.get(options, :assigns, %{}) + + singleton = Keyword.get(options, :singleton, false) + actions = extract_actions(options, singleton) + + route = [as: as, private: private, assigns: assigns] + collection = [path: path, as: as, private: private, assigns: assigns] + member_path = if singleton, do: path, else: Path.join(path, ":#{name}_#{param}") + member = [path: member_path, as: as, alias: alias, private: private, assigns: assigns] + + %Resource{path: path, actions: actions, param: param, route: route, + member: member, collection: collection, controller: controller, singleton: singleton} + end + + defp extract_actions(opts, singleton) do + only = Keyword.get(opts, :only) + except = Keyword.get(opts, :except) + + cond do + only -> + supported_actions = validate_actions(:only, singleton, only) + supported_actions -- (supported_actions -- only) + + except -> + supported_actions = validate_actions(:except, singleton, except) + supported_actions -- except + + true -> default_actions(singleton) + end + end + + defp validate_actions(type, singleton, actions) do + supported_actions = default_actions(singleton) + + unless actions -- supported_actions == [], do: raise ArgumentError, """ + invalid :#{type} action(s) passed to resources. + + supported#{if singleton, do: " singleton", else: ""} actions: #{inspect(supported_actions)} + + got: #{inspect(actions)} + """ + + supported_actions + end + + defp default_actions(true = _singleton), do: @actions -- [:index] + defp default_actions(false = _singleton), do: @actions +end diff --git a/deps/phoenix/lib/phoenix/router/route.ex b/deps/phoenix/lib/phoenix/router/route.ex new file mode 100644 index 0000000..f24bed0 --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/route.ex @@ -0,0 +1,183 @@ +defmodule Phoenix.Router.Route do + # This module defines the Route struct that is used + # throughout Phoenix's router. This struct is private + # as it contains internal routing information. + @moduledoc false + + alias Phoenix.Router.Route + + @doc """ + The `Phoenix.Router.Route` struct. It stores: + + * `:verb` - the HTTP verb as an atom + * `:line` - the line the route was defined + * `:kind` - the kind of route, one of `:match`, `:forward` + * `:path` - the normalized path as string + * `:host` - the request host or host prefix + * `:plug` - the plug module + * `:plug_opts` - the plug options + * `:helper` - the name of the helper as a string (may be nil) + * `:private` - the private route info + * `:assigns` - the route info + * `:pipe_through` - the pipeline names as a list of atoms + * `:metadata` - general metadata used on telemetry events and route info + * `:trailing_slash?` - whether or not the helper functions append a trailing slash + """ + + defstruct [:verb, :line, :kind, :path, :host, :plug, :plug_opts, + :helper, :private, :pipe_through, :assigns, :metadata, + :trailing_slash?] + + @type t :: %Route{} + + @doc "Used as a plug on forwarding" + def init(opts), do: opts + + @doc "Used as a plug on forwarding" + def call(%{path_info: path, script_name: script} = conn, {fwd_segments, plug, opts}) do + new_path = path -- fwd_segments + {base, ^new_path} = Enum.split(path, length(path) - length(new_path)) + conn = %{conn | path_info: new_path, script_name: script ++ base} + conn = plug.call(conn, plug.init(opts)) + %{conn | path_info: path, script_name: script} + end + + @doc """ + Receives the verb, path, plug, options and helper + and returns a `Phoenix.Router.Route` struct. + """ + @spec build(non_neg_integer, :match | :forward, atom, String.t, String.t | nil, atom, atom, atom | nil, list(atom), map, map, map, boolean) :: t + def build(line, kind, verb, path, host, plug, plug_opts, helper, pipe_through, private, assigns, metadata, trailing_slash?) + when is_atom(verb) and (is_binary(host) or is_nil(host)) and + is_atom(plug) and (is_binary(helper) or is_nil(helper)) and + is_list(pipe_through) and is_map(private) and is_map(assigns) and + is_map(metadata) and kind in [:match, :forward] and + is_boolean(trailing_slash?) do + %Route{kind: kind, verb: verb, path: path, host: host, private: private, + plug: plug, plug_opts: plug_opts, helper: helper, + pipe_through: pipe_through, assigns: assigns, line: line, metadata: metadata, + trailing_slash?: trailing_slash?} + end + + @doc """ + Builds the compiled expressions used by the route. + """ + def exprs(route) do + {path, binding} = build_path_and_binding(route) + + %{ + path: path, + host: build_host(route.host), + verb_match: verb_match(route.verb), + binding: binding, + prepare: build_prepare(route), + path_params: build_path_params(binding), + dispatch: build_dispatch(route) + } + end + + defp verb_match(:*), do: Macro.var(:_verb, nil) + defp verb_match(verb), do: verb |> to_string() |> String.upcase() + + defp build_path_params(binding), do: {:%{}, [], binding} + + defp build_path_and_binding(%Route{path: path} = route) do + {params, segments} = case route.kind do + :forward -> Plug.Router.Utils.build_path_match(path <> "/*_forward_path_info") + :match -> Plug.Router.Utils.build_path_match(path) + end + + binding = for var <- params, var != :_forward_path_info do + {Atom.to_string(var), Macro.var(var, nil)} + end + + {segments, binding} + end + + defp build_host(host) do + cond do + is_nil(host) -> quote do: _ + String.last(host) == "." -> quote do: unquote(host) <> _ + true -> host + end + end + + defp build_prepare(route) do + {match_params, merge_params} = build_params() + {match_private, merge_private} = build_prepare_expr(:private, route.private) + {match_assigns, merge_assigns} = build_prepare_expr(:assigns, route.assigns) + + match_all = match_params ++ match_private ++ match_assigns + merge_all = merge_params ++ merge_private ++ merge_assigns + + if merge_all != [] do + quote do + %{unquote_splicing(match_all)} = var!(conn, :conn) + %{var!(conn, :conn) | unquote_splicing(merge_all)} + end + else + quote do + var!(conn, :conn) + end + end + end + + defp build_dispatch(%Route{kind: :forward} = route) do + {_params, fwd_segments} = Plug.Router.Utils.build_path_match(route.path) + + quote do + { + Phoenix.Router.Route, + {unquote(fwd_segments), unquote(route.plug), unquote(Macro.escape(route.plug_opts))} + } + end + end + + defp build_dispatch(%Route{} = route) do + quote do + {unquote(route.plug), unquote(Macro.escape(route.plug_opts))} + end + end + + defp build_prepare_expr(_key, data) when data == %{}, do: {[], []} + defp build_prepare_expr(key, data) do + var = Macro.var(key, :conn) + merge = quote(do: Map.merge(unquote(var), unquote(Macro.escape(data)))) + {[{key, var}], [{key, merge}]} + end + + defp build_params() do + params = Macro.var(:params, :conn) + path_params = Macro.var(:path_params, :conn) + merge_params = quote(do: Phoenix.Router.Route.merge_params(unquote(params), unquote(path_params))) + + { + [{:params, params}], + [{:params, merge_params}, {:path_params, path_params}] + } + end + + @doc """ + Merges params from router. + """ + def merge_params(%Plug.Conn.Unfetched{}, path_params), do: path_params + def merge_params(params, path_params), do: Map.merge(params, path_params) + + @doc """ + Validates and returns the list of forward path segments. + + Raises `RuntimeError` if the `plug` is already forwarded or the + `path` contains a dynamic segment. + """ + def forward_path_segments(path, plug, phoenix_forwards) do + case Plug.Router.Utils.build_path_match(path) do + {[], path_segments} -> + if phoenix_forwards[plug] do + raise ArgumentError, "#{inspect plug} has already been forwarded to. A module can only be forwarded a single time." + end + path_segments + _ -> + raise ArgumentError, "dynamic segment \"#{path}\" not allowed when forwarding. Use a static path instead." + end + end +end diff --git a/deps/phoenix/lib/phoenix/router/scope.ex b/deps/phoenix/lib/phoenix/router/scope.ex new file mode 100644 index 0000000..67164e6 --- /dev/null +++ b/deps/phoenix/lib/phoenix/router/scope.ex @@ -0,0 +1,213 @@ +defmodule Phoenix.Router.Scope do + alias Phoenix.Router.{Scope, Route} + @moduledoc false + + @stack :phoenix_router_scopes + @pipes :phoenix_pipeline_scopes + @top :phoenix_top_scopes + + defstruct path: [], alias: [], as: [], pipes: [], host: nil, private: %{}, assigns: %{}, log: :debug, trailing_slash?: false + + @doc """ + Initializes the scope. + """ + def init(module) do + Module.put_attribute(module, @stack, []) + Module.put_attribute(module, @top, %Scope{}) + Module.put_attribute(module, @pipes, MapSet.new()) + end + + @doc """ + Builds a route based on the top of the stack. + """ + def route(line, module, kind, verb, path, plug, plug_opts, opts) do + top = get_top(module) + path = validate_path(path) + private = Keyword.get(opts, :private, %{}) + assigns = Keyword.get(opts, :assigns, %{}) + as = Keyword.get(opts, :as, Phoenix.Naming.resource_name(plug, "Controller")) + alias? = Keyword.get(opts, :alias, true) + trailing_slash? = Keyword.get(opts, :trailing_slash, top.trailing_slash?) == true + + if to_string(as) == "static" do + raise ArgumentError, "`static` is a reserved route prefix generated from #{inspect plug} or `:as` option" + end + + {path, alias, as, private, assigns} = + join(top, path, plug, alias?, as, private, assigns) + + metadata = + opts + |> Keyword.get(:metadata, %{}) + |> Map.put(:log, Keyword.get(opts, :log, top.log)) + + Phoenix.Router.Route.build(line, kind, verb, path, top.host, alias, plug_opts, as, top.pipes, private, assigns, metadata, trailing_slash?) + end + + @doc """ + Validates a path is a string and contains a leading prefix. + """ + def validate_path("/" <> _ = path), do: path + def validate_path(path) when is_binary(path) do + IO.warn """ + router paths should begin with a forward slash, got: #{inspect path} + #{Exception.format_stacktrace()} + """ + + "/" <> path + end + def validate_path(path) do + raise ArgumentError, "router paths must be strings, got: #{inspect path}" + end + + @doc """ + Defines the given pipeline. + """ + def pipeline(module, pipe) when is_atom(pipe) do + update_pipes module, &MapSet.put(&1, pipe) + end + + @doc """ + Appends the given pipes to the current scope pipe through. + """ + def pipe_through(module, new_pipes) do + new_pipes = List.wrap(new_pipes) + %{pipes: pipes} = top = get_top(module) + + if pipe = Enum.find(new_pipes, & &1 in pipes) do + raise ArgumentError, + "duplicate pipe_through for #{inspect pipe}. " <> + "A plug may only be used once inside a scoped pipe_through" + end + + put_top(module, %{top | pipes: pipes ++ new_pipes}) + end + + @doc """ + Pushes a scope into the module stack. + """ + def push(module, path) when is_binary(path) do + push(module, path: path) + end + + def push(module, opts) when is_list(opts) do + top = get_top(module) + + path = + if path = Keyword.get(opts, :path) do + path |> validate_path() |> String.split("/", trim: true) + else + [] + end + + alias = append_unless_false(top, opts, :alias, &Atom.to_string(&1)) + as = append_unless_false(top, opts, :as, & &1) + host = Keyword.get(opts, :host) + private = Keyword.get(opts, :private, %{}) + assigns = Keyword.get(opts, :assigns, %{}) + + update_stack(module, fn stack -> [top | stack] end) + + put_top(module, %Scope{ + path: top.path ++ path, + alias: alias, + as: as, + host: host || top.host, + pipes: top.pipes, + private: Map.merge(top.private, private), + assigns: Map.merge(top.assigns, assigns), + log: Keyword.get(opts, :log, top.log), + trailing_slash?: Keyword.get(opts, :trailing_slash, top.trailing_slash?) == true + }) + end + + defp append_unless_false(top, opts, key, fun) do + case opts[key] do + false -> [] + nil -> Map.fetch!(top, key) + other -> Map.fetch!(top, key) ++ [fun.(other)] + end + end + + @doc """ + Pops a scope from the module stack. + """ + def pop(module) do + update_stack(module, fn [top | stack] -> + put_top(module, top) + stack + end) + end + + @doc """ + Add a forward to the router. + """ + def register_forwards(module, path, plug) when is_atom(plug) do + plug = expand_alias(module, plug) + phoenix_forwards = Module.get_attribute(module, :phoenix_forwards) + path_segments = Route.forward_path_segments(path, plug, phoenix_forwards) + phoenix_forwards = Map.put(phoenix_forwards, plug, path_segments) + Module.put_attribute(module, :phoenix_forwards, phoenix_forwards) + plug + end + + def register_forwards(_, _, plug) do + raise ArgumentError, "forward expects a module as the second argument, #{inspect plug} given" + end + + @doc """ + Expands the alias in the current router scope. + """ + def expand_alias(module, alias) do + join_alias(get_top(module), alias) + end + + defp join(top, path, alias, alias?, as, private, assigns) do + joined_alias = + if alias? do + join_alias(top, alias) + else + alias + end + + {join_path(top, path), joined_alias, join_as(top, as), + Map.merge(top.private, private), Map.merge(top.assigns, assigns)} + end + + defp join_path(top, path) do + "/" <> Enum.join(top.path ++ String.split(path, "/", trim: true), "/") + end + + defp join_alias(top, alias) when is_atom(alias) do + Module.concat(top.alias ++ [alias]) + end + + defp join_as(_top, nil), do: nil + defp join_as(top, as) when is_atom(as) or is_binary(as), do: Enum.join(top.as ++ [as], "_") + + defp get_top(module) do + get_attribute(module, @top) + end + + defp update_stack(module, fun) do + update_attribute(module, @stack, fun) + end + + defp update_pipes(module, fun) do + update_attribute(module, @pipes, fun) + end + + defp put_top(module, value) do + Module.put_attribute(module, @top, value) + value + end + + defp get_attribute(module, attr) do + Module.get_attribute(module, attr) || + raise "Phoenix router scope was not initialized" + end + + defp update_attribute(module, attr, fun) do + Module.put_attribute(module, attr, fun.(get_attribute(module, attr))) + end +end diff --git a/deps/phoenix/lib/phoenix/socket.ex b/deps/phoenix/lib/phoenix/socket.ex new file mode 100644 index 0000000..7b9f15f --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket.ex @@ -0,0 +1,736 @@ +defmodule Phoenix.Socket do + @moduledoc ~S""" + A socket implementation that multiplexes messages over channels. + + `Phoenix.Socket` is used as a module for establishing and maintaining + the socket state via the `Phoenix.Socket` struct. + + Once connected to a socket, incoming and outgoing events are routed to + channels. The incoming client data is routed to channels via transports. + It is the responsibility of the socket to tie transports and channels + together. + + Phoenix supports `websocket` and `longpoll` options when invoking + `Phoenix.Endpoint.socket/3` in your endpoint. `websocket` is set by default + and `longpoll` can also be configured explicitly. + + socket "/socket", MyAppWeb.Socket, websocket: true, longpoll: false + + The command above means incoming socket connections can be made via + a WebSocket connection. Events are routed by topic to channels: + + channel "room:lobby", MyAppWeb.LobbyChannel + + See `Phoenix.Channel` for more information on channels. + + ## Socket Behaviour + + Socket handlers are mounted in Endpoints and must define two callbacks: + + * `connect/3` - receives the socket params, connection info if any, and + authenticates the connection. Must return a `Phoenix.Socket` struct, + often with custom assigns + * `id/1` - receives the socket returned by `connect/3` and returns the + id of this connection as a string. The `id` is used to identify socket + connections, often to a particular user, allowing us to force disconnections. + For sockets requiring no authentication, `nil` can be returned + + ## Examples + + defmodule MyAppWeb.UserSocket do + use Phoenix.Socket + + channel "room:*", MyAppWeb.RoomChannel + + def connect(params, socket, _connect_info) do + {:ok, assign(socket, :user_id, params["user_id"])} + end + + def id(socket), do: "users_socket:#{socket.assigns.user_id}" + end + + # Disconnect all user's socket connections and their multiplexed channels + MyAppWeb.Endpoint.broadcast("users_socket:" <> user.id, "disconnect", %{}) + + ## Socket fields + + * `:id` - The string id of the socket + * `:assigns` - The map of socket assigns, default: `%{}` + * `:channel` - The current channel module + * `:channel_pid` - The channel pid + * `:endpoint` - The endpoint module where this socket originated, for example: `MyAppWeb.Endpoint` + * `:handler` - The socket module where this socket originated, for example: `MyAppWeb.UserSocket` + * `:joined` - If the socket has effectively joined the channel + * `:join_ref` - The ref sent by the client when joining + * `:ref` - The latest ref sent by the client + * `:pubsub_server` - The registered name of the socket's pubsub server + * `:topic` - The string topic, for example `"room:123"` + * `:transport` - An identifier for the transport, used for logging + * `:transport_pid` - The pid of the socket's transport process + * `:serializer` - The serializer for socket messages + + ## Using options + + On `use Phoenix.Socket`, the following options are accepted: + + * `:log` - the default level to log socket actions. Defaults + to `:info`. May be set to `false` to disable it + + * `:partitions` - each channel is spawned under a supervisor. + This option controls how many supervisors will be spawned + to handle channels. Defaults to the number of cores. + + ## Garbage collection + + It's possible to force garbage collection in the transport process after + processing large messages. For example, to trigger such from your channels, + run: + + send(socket.transport_pid, :garbage_collect) + + Alternatively, you can configure your endpoint socket to trigger more + fullsweep garbage collections more frequently, by setting the `:fullsweep_after` + option for websockets. See `Phoenix.Endpoint.socket/3` for more info. + + ## Client-server communication + + The encoding of server data and the decoding of client data is done + according to a serializer, defined in `Phoenix.Socket.Serializer`. + By default, JSON encoding is used to broker messages to and from clients. + + The serializer `decode!` function must return a `Phoenix.Socket.Message` + which is forwarded to channels except: + + * `"heartbeat"` events in the "phoenix" topic - should just emit an OK reply + * `"phx_join"` on any topic - should join the topic + * `"phx_leave"` on any topic - should leave the topic + + Each message also has a `ref` field which is used to track responses. + + The server may send messages or replies back. For messages, the + ref uniquely identifies the message. For replies, the ref matches + the original message. Both data-types also include a join_ref that + uniquely identifies the currently joined channel. + + The `Phoenix.Socket` implementation may also send special messages + and replies: + + * `"phx_error"` - in case of errors, such as a channel process + crashing, or when attempting to join an already joined channel + + * `"phx_close"` - the channel was gracefully closed + + Phoenix ships with a JavaScript implementation of both websocket + and long polling that interacts with Phoenix.Socket and can be + used as reference for those interested in implementing custom clients. + + ## Custom sockets and transports + + See the `Phoenix.Socket.Transport` documentation for more information on + writing your own socket that does not leverage channels or for writing + your own transports that interacts with other sockets. + + ## Custom channels + + You can list any module as a channel as long as it implements + a `child_spec/1` function. The `child_spec/1` function receives + the caller as argument and it must return a child spec that + initializes a process. + + Once the process is initialized, it will receive the following + message: + + {Phoenix.Channel, auth_payload, from, socket} + + A custom channel implementation MUST invoke + `GenServer.reply(from, {:ok | :error, reply_payload})` during its + initialization with a custom `reply_payload` that will be sent as + a reply to the client. Failing to do so will block the socket forever. + + A custom channel receives `Phoenix.Socket.Message` structs as regular + messages from the transport. Replies to those messages and custom + messages can be sent to the socket at any moment by building an + appropriate `Phoenix.Socket.Reply` and `Phoenix.Socket.Message` + structs, encoding them with the serializer and dispatching the + serialized result to the transport. + + For example, to handle "phx_leave" messages, which is recommended + to be handled by all channel implementations, one may do: + + def handle_info( + %Message{topic: topic, event: "phx_leave"} = message, + %{topic: topic, serializer: serializer, transport_pid: transport_pid} = socket + ) do + send transport_pid, serializer.encode!(build_leave_reply(message)) + {:stop, {:shutdown, :left}, socket} + end + + We also recommend all channels to monitor the `transport_pid` + on `init` and exit if the transport exits. We also advise to rewrite + `:normal` exit reasons (usually due to the socket being closed) + to the `{:shutdown, :closed}` to guarantee links are broken on + the channel exit (as a `:normal` exit does not break links): + + def handle_info({:DOWN, _, _, transport_pid, reason}, %{transport_pid: transport_pid} = socket) do + reason = if reason == :normal, do: {:shutdown, :closed}, else: reason + {:stop, reason, socket} + end + + Any process exit is treated as an error by the socket layer unless + a `{:socket_close, pid, reason}` message is sent to the socket before + shutdown. + + Custom channel implementations cannot be tested with `Phoenix.ChannelTest` + and are currently considered experimental. The underlying API may be + changed at any moment. + """ + + require Logger + require Phoenix.Endpoint + alias Phoenix.Socket + alias Phoenix.Socket.{Broadcast, Message, Reply} + + @doc """ + Receives the socket params and authenticates the connection. + + ## Socket params and assigns + + Socket params are passed from the client and can + be used to verify and authenticate a user. After + verification, you can put default assigns into + the socket that will be set for all channels, ie + + {:ok, assign(socket, :user_id, verified_user_id)} + + To deny connection, return `:error`. + + See `Phoenix.Token` documentation for examples in + performing token verification on connect. + """ + @callback connect(params :: map, Socket.t) :: {:ok, Socket.t} | {:error, term} | :error + @callback connect(params :: map, Socket.t, connect_info :: map) :: {:ok, Socket.t} | {:error, term} | :error + + @doc ~S""" + Identifies the socket connection. + + Socket IDs are topics that allow you to identify all sockets for a given user: + + def id(socket), do: "users_socket:#{socket.assigns.user_id}" + + Would allow you to broadcast a `"disconnect"` event and terminate + all active sockets and channels for a given user: + + MyAppWeb.Endpoint.broadcast("users_socket:" <> user.id, "disconnect", %{}) + + Returning `nil` makes this socket anonymous. + """ + @callback id(Socket.t) :: String.t | nil + + @optional_callbacks connect: 2, connect: 3 + + defmodule InvalidMessageError do + @moduledoc """ + Raised when the socket message is invalid. + """ + defexception [:message] + end + + defstruct assigns: %{}, + channel: nil, + channel_pid: nil, + endpoint: nil, + handler: nil, + id: nil, + joined: false, + join_ref: nil, + private: %{}, + pubsub_server: nil, + ref: nil, + serializer: nil, + topic: nil, + transport: nil, + transport_pid: nil + + @type t :: %Socket{ + assigns: map, + channel: atom, + channel_pid: pid, + endpoint: atom, + handler: atom, + id: String.t | nil, + joined: boolean, + ref: term, + private: map, + pubsub_server: atom, + serializer: atom, + topic: String.t, + transport: atom, + transport_pid: pid, + } + + defmacro __using__(opts) do + quote do + ## User API + + import Phoenix.Socket + @behaviour Phoenix.Socket + @before_compile Phoenix.Socket + Module.register_attribute(__MODULE__, :phoenix_channels, accumulate: true) + @phoenix_socket_options unquote(opts) + + ## Callbacks + + @behaviour Phoenix.Socket.Transport + + @doc false + def child_spec(opts) do + Phoenix.Socket.__child_spec__(__MODULE__, opts, @phoenix_socket_options) + end + + @doc false + def connect(map), do: Phoenix.Socket.__connect__(__MODULE__, map, @phoenix_socket_options) + + @doc false + def init(state), do: Phoenix.Socket.__init__(state) + + @doc false + def handle_in(message, state), do: Phoenix.Socket.__in__(message, state) + + @doc false + def handle_info(message, state), do: Phoenix.Socket.__info__(message, state) + + @doc false + def terminate(reason, state), do: Phoenix.Socket.__terminate__(reason, state) + end + end + + ## USER API + + @doc """ + Adds key-value pairs to socket assigns. + + A single key-value pair may be passed, a keyword list or map + of assigns may be provided to be merged into existing socket + assigns. + + ## Examples + + iex> assign(socket, :name, "Elixir") + iex> assign(socket, name: "Elixir", logo: "๐Ÿ’ง") + """ + def assign(%Socket{} = socket, key, value) do + assign(socket, [{key, value}]) + end + + def assign(%Socket{} = socket, attrs) + when is_map(attrs) or is_list(attrs) do + %{socket | assigns: Map.merge(socket.assigns, Map.new(attrs))} + end + + @doc """ + Defines a channel matching the given topic and transports. + + * `topic_pattern` - The string pattern, for example `"room:*"`, `"users:*"`, + or `"system"` + * `module` - The channel module handler, for example `MyAppWeb.RoomChannel` + * `opts` - The optional list of options, see below + + ## Options + + * `:assigns` - the map of socket assigns to merge into the socket on join + + ## Examples + + channel "topic1:*", MyChannel + + ## Topic Patterns + + The `channel` macro accepts topic patterns in two flavors. A splat (the `*` + character) argument can be provided as the last character to indicate a + `"topic:subtopic"` match. If a plain string is provided, only that topic will + match the channel handler. Most use-cases will use the `"topic:*"` pattern to + allow more versatile topic scoping. + + See `Phoenix.Channel` for more information + """ + defmacro channel(topic_pattern, module, opts \\ []) do + module = expand_alias(module, __CALLER__) + + opts = + if Macro.quoted_literal?(opts) do + Macro.prewalk(opts, &expand_alias(&1, __CALLER__)) + else + opts + end + + quote do + @phoenix_channels {unquote(topic_pattern), unquote(module), unquote(opts)} + end + end + + defp expand_alias({:__aliases__, _, _} = alias, env), + do: Macro.expand(alias, %{env | function: {:channel, 3}}) + + defp expand_alias(other, _env), do: other + + @doc false + @deprecated "transport/3 in Phoenix.Socket is deprecated and has no effect" + defmacro transport(_name, _module, _config \\ []) do + :ok + end + + defmacro __before_compile__(env) do + channels = Module.get_attribute(env.module, :phoenix_channels) + + channel_defs = + for {topic_pattern, module, opts} <- channels do + topic_pattern + |> to_topic_match() + |> defchannel(module, opts) + end + + quote do + unquote(channel_defs) + def __channel__(_topic), do: nil + end + end + + defp to_topic_match(topic_pattern) do + case String.split(topic_pattern, "*") do + [prefix, ""] -> quote do: < _rest>> + [bare_topic] -> bare_topic + _ -> raise ArgumentError, "channels using splat patterns must end with *" + end + end + + defp defchannel(topic_match, channel_module, opts) do + quote do + def __channel__(unquote(topic_match)), do: unquote({channel_module, Macro.escape(opts)}) + end + end + + ## CALLBACKS IMPLEMENTATION + + def __child_spec__(handler, opts, socket_options) do + endpoint = Keyword.fetch!(opts, :endpoint) + opts = Keyword.merge(socket_options, opts) + partitions = Keyword.get(opts, :partitions, System.schedulers_online()) + args = {endpoint, handler, partitions} + Supervisor.child_spec({Phoenix.Socket.PoolSupervisor, args}, id: handler) + end + + def __connect__(user_socket, map, socket_options) do + %{ + endpoint: endpoint, + options: options, + transport: transport, + params: params, + connect_info: connect_info + } = map + + vsn = params["vsn"] || "1.0.0" + + options = Keyword.merge(socket_options, options) + start = System.monotonic_time() + + case negotiate_serializer(Keyword.fetch!(options, :serializer), vsn) do + {:ok, serializer} -> + result = user_connect(user_socket, endpoint, transport, serializer, params, connect_info) + + metadata = %{ + endpoint: endpoint, + transport: transport, + params: params, + connect_info: connect_info, + vsn: vsn, + user_socket: user_socket, + log: Keyword.get(options, :log, :info), + result: result(result), + serializer: serializer + } + + duration = System.monotonic_time() - start + :telemetry.execute([:phoenix, :socket_connected], %{duration: duration}, metadata) + result + + :error -> + :error + end + end + + defp result({:ok, _}), do: :ok + defp result(:error), do: :error + defp result({:error, _}), do: :error + + def __init__({state, %{id: id, endpoint: endpoint} = socket}) do + _ = id && endpoint.subscribe(id, link: true) + {:ok, {state, %{socket | transport_pid: self()}}} + end + + def __in__({payload, opts}, {state, socket}) do + %{topic: topic} = message = socket.serializer.decode!(payload, opts) + handle_in(Map.get(state.channels, topic), message, state, socket) + end + + def __info__({:DOWN, ref, _, pid, reason}, {state, socket}) do + case state.channels_inverse do + %{^pid => {topic, join_ref}} -> + state = delete_channel(state, pid, topic, ref) + {:push, encode_on_exit(socket, topic, join_ref, reason), {state, socket}} + + %{} -> + {:ok, {state, socket}} + end + end + + def __info__(%Broadcast{event: "disconnect"}, state) do + {:stop, {:shutdown, :disconnected}, state} + end + + def __info__({:socket_push, opcode, payload}, state) do + {:push, {opcode, payload}, state} + end + + def __info__({:socket_close, pid, _reason}, {state, socket}) do + socket_close(pid, {state, socket}) + end + + def __info__(:garbage_collect, state) do + :erlang.garbage_collect(self()) + {:ok, state} + end + + def __info__(_, state) do + {:ok, state} + end + + def __terminate__(_reason, _state_socket) do + :ok + end + + defp negotiate_serializer(serializers, vsn) when is_list(serializers) do + case Version.parse(vsn) do + {:ok, vsn} -> + serializers + |> Enum.find(:error, fn {_serializer, vsn_req} -> Version.match?(vsn, vsn_req) end) + |> case do + {serializer, _vsn_req} -> + {:ok, serializer} + + :error -> + Logger.error "The client's requested transport version \"#{vsn}\" " <> + "does not match server's version requirements of #{inspect serializers}" + :error + end + + :error -> + Logger.error "Client sent invalid transport version \"#{vsn}\"" + :error + end + end + + defp user_connect(handler, endpoint, transport, serializer, params, connect_info) do + # The information in the Phoenix.Socket goes to userland and channels. + socket = %Socket{ + handler: handler, + endpoint: endpoint, + pubsub_server: endpoint.config(:pubsub_server), + serializer: serializer, + transport: transport + } + + # The information in the state is kept only inside the socket process. + state = %{ + channels: %{}, + channels_inverse: %{} + } + + connect_result = + if function_exported?(handler, :connect, 3) do + handler.connect(params, socket, connect_info) + else + handler.connect(params, socket) + end + + case connect_result do + {:ok, %Socket{} = socket} -> + case handler.id(socket) do + nil -> + {:ok, {state, socket}} + + id when is_binary(id) -> + {:ok, {state, %{socket | id: id}}} + + invalid -> + Logger.error "#{inspect handler}.id/1 returned invalid identifier " <> + "#{inspect invalid}. Expected nil or a string." + :error + end + + :error -> + :error + + {:error, _reason} = err -> + err + + invalid -> + connect_arity = if function_exported?(handler, :connect, 3), do: "connect/3", else: "connect/2" + Logger.error "#{inspect handler}. #{connect_arity} returned invalid value #{inspect invalid}. " <> + "Expected {:ok, socket}, {:error, reason} or :error" + :error + end + end + + defp handle_in(_, %{ref: ref, topic: "phoenix", event: "heartbeat"}, state, socket) do + reply = %Reply{ + ref: ref, + topic: "phoenix", + status: :ok, + payload: %{} + } + + {:reply, :ok, encode_reply(socket, reply), {state, socket}} + end + + defp handle_in(nil, %{event: "phx_join", topic: topic, ref: ref, join_ref: join_ref} = message, state, socket) do + case socket.handler.__channel__(topic) do + {channel, opts} -> + case Phoenix.Channel.Server.join(socket, channel, message, opts) do + {:ok, reply, pid} -> + reply = %Reply{join_ref: join_ref, ref: ref, topic: topic, status: :ok, payload: reply} + state = put_channel(state, pid, topic, join_ref) + {:reply, :ok, encode_reply(socket, reply), {state, socket}} + + {:error, reply} -> + reply = %Reply{join_ref: join_ref, ref: ref, topic: topic, status: :error, payload: reply} + {:reply, :error, encode_reply(socket, reply), {state, socket}} + end + + _ -> + Logger.warn fn -> "Ignoring unmatched topic \"#{topic}\" in #{inspect(socket.handler)}" end + {:reply, :error, encode_ignore(socket, message), {state, socket}} + end + end + + defp handle_in({pid, _ref, status}, %{event: "phx_join", topic: topic} = message, state, socket) do + receive do + {:socket_close, ^pid, _reason} -> :ok + after + 0 -> + if status != :leaving do + Logger.debug(fn -> + "Duplicate channel join for topic \"#{topic}\" in #{inspect(socket.handler)}. " <> + "Closing existing channel for new join." + end) + end + end + + :ok = shutdown_duplicate_channel(pid) + {:push, {opcode, payload}, {new_state, new_socket}} = socket_close(pid, {state, socket}) + send(self(), {:socket_push, opcode, payload}) + handle_in(nil, message, new_state, new_socket) + end + + defp handle_in({pid, _ref, _status}, message, state, socket) do + send(pid, message) + {:ok, {maybe_put_status(state, pid, message), socket}} + end + + defp handle_in(nil, %{event: "phx_leave", ref: ref, topic: topic, join_ref: join_ref}, state, socket) do + reply = %Reply{ + ref: ref, + join_ref: join_ref, + topic: topic, + status: :ok, + payload: %{} + } + + {:reply, :ok, encode_reply(socket, reply), {state, socket}} + end + + defp handle_in(nil, message, state, socket) do + # This clause can happen if the server drops the channel + # and the client sends a message meanwhile + {:reply, :error, encode_ignore(socket, message), {state, socket}} + end + + defp put_channel(state, pid, topic, join_ref) do + %{channels: channels, channels_inverse: channels_inverse} = state + monitor_ref = Process.monitor(pid) + + %{ + state | + channels: Map.put(channels, topic, {pid, monitor_ref, :joined}), + channels_inverse: Map.put(channels_inverse, pid, {topic, join_ref}) + } + end + + defp delete_channel(state, pid, topic, monitor_ref) do + %{channels: channels, channels_inverse: channels_inverse} = state + Process.demonitor(monitor_ref, [:flush]) + + %{ + state | + channels: Map.delete(channels, topic), + channels_inverse: Map.delete(channels_inverse, pid) + } + end + + defp encode_on_exit(socket, topic, ref, _reason) do + message = %Message{join_ref: ref, ref: ref, topic: topic, event: "phx_error", payload: %{}} + encode_reply(socket, message) + end + + defp encode_ignore(socket, %{ref: ref, topic: topic}) do + reply = %Reply{ref: ref, topic: topic, status: :error, payload: %{reason: "unmatched topic"}} + encode_reply(socket, reply) + end + + defp encode_reply(%{serializer: serializer}, message) do + {:socket_push, opcode, payload} = serializer.encode!(message) + {opcode, payload} + end + + defp encode_close(socket, topic, join_ref) do + message = %Message{join_ref: join_ref, ref: join_ref, topic: topic, event: "phx_close", payload: %{}} + encode_reply(socket, message) + end + + defp shutdown_duplicate_channel(pid) do + ref = Process.monitor(pid) + Process.exit(pid, {:shutdown, :duplicate_join}) + + receive do + {:DOWN, ^ref, _, _, _} -> :ok + after + 5_000 -> + Process.exit(pid, :kill) + receive do: ({:DOWN, ^ref, _, _, _} -> :ok) + end + end + + defp socket_close(pid, {state, socket}) do + case state.channels_inverse do + %{^pid => {topic, join_ref}} -> + {^pid, monitor_ref, _status} = Map.fetch!(state.channels, topic) + state = delete_channel(state, pid, topic, monitor_ref) + {:push, encode_close(socket, topic, join_ref), {state, socket}} + + %{} -> + {:ok, {state, socket}} + end + end + + defp maybe_put_status(state, pid, %{event: "phx_leave", topic: topic}) do + update_channel_status(state, pid, topic, :leaving) + end + + defp maybe_put_status(state, _pid, %{} = _msg) do + state + end + + defp update_channel_status(state, pid, topic, status) do + new_channels = Map.update!(state.channels, topic, fn {^pid, ref, _} -> {pid, ref, status} end) + %{state | channels: new_channels} + end +end diff --git a/deps/phoenix/lib/phoenix/socket/message.ex b/deps/phoenix/lib/phoenix/socket/message.ex new file mode 100644 index 0000000..584d63e --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/message.ex @@ -0,0 +1,72 @@ +defmodule Phoenix.Socket.Message do + @moduledoc """ + Defines a message dispatched over transport to channels and vice-versa. + + The message format requires the following keys: + + * `:topic` - The string topic or topic:subtopic pair namespace, for + example "messages", "messages:123" + * `:event`- The string event name, for example "phx_join" + * `:payload` - The message payload + * `:ref` - The unique string ref + * `:join_ref` - The unique string ref when joining + + """ + + @type t :: %Phoenix.Socket.Message{} + defstruct topic: nil, event: nil, payload: nil, ref: nil, join_ref: nil + + @doc """ + Converts a map with string keys into a message struct. + + Raises `Phoenix.Socket.InvalidMessageError` if not valid. + """ + def from_map!(map) when is_map(map) do + try do + %Phoenix.Socket.Message{ + topic: Map.fetch!(map, "topic"), + event: Map.fetch!(map, "event"), + payload: Map.fetch!(map, "payload"), + ref: Map.fetch!(map, "ref"), + join_ref: Map.get(map, "join_ref") + } + rescue + err in [KeyError] -> + raise Phoenix.Socket.InvalidMessageError, "missing key #{inspect(err.key)}" + end + end +end + +defmodule Phoenix.Socket.Reply do + @moduledoc """ + Defines a reply sent from channels to transports. + + The message format requires the following keys: + + * `:topic` - The string topic or topic:subtopic pair namespace, for example "messages", "messages:123" + * `:status` - The reply status as an atom + * `:payload` - The reply payload + * `:ref` - The unique string ref + * `:join_ref` - The unique string ref when joining + + """ + + @type t :: %Phoenix.Socket.Reply{} + defstruct topic: nil, status: nil, payload: nil, ref: nil, join_ref: nil +end + +defmodule Phoenix.Socket.Broadcast do + @moduledoc """ + Defines a message sent from pubsub to channels and vice-versa. + + The message format requires the following keys: + + * `:topic` - The string topic or topic:subtopic pair namespace, for example "messages", "messages:123" + * `:event`- The string event name, for example "phx_join" + * `:payload` - The message payload + + """ + + @type t :: %Phoenix.Socket.Broadcast{} + defstruct topic: nil, event: nil, payload: nil +end diff --git a/deps/phoenix/lib/phoenix/socket/pool_supervisor.ex b/deps/phoenix/lib/phoenix/socket/pool_supervisor.ex new file mode 100644 index 0000000..bc6e257 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/pool_supervisor.ex @@ -0,0 +1,59 @@ +defmodule Phoenix.Socket.PoolSupervisor do + @moduledoc false + use Supervisor + + def start_link(args) do + Supervisor.start_link(__MODULE__, args) + end + + def start_child(endpoint, name, key, spec) do + case endpoint.config({:socket, name}) do + ets when not is_nil(ets) -> + partitions = :ets.lookup_element(ets, :partitions, 2) + sup = :ets.lookup_element(ets, :erlang.phash2(key, partitions), 2) + DynamicSupervisor.start_child(sup, spec) + + nil -> + raise ArgumentError, """ + no socket supervision tree found for #{inspect(name)}. + + Ensure your #{inspect(endpoint)} contains a socket mount, for example: + + socket "/socket", #{inspect(name)}, + websocket: true, + longpoll: true + """ + end + end + + @doc false + def start_pooled(ref, i) do + case DynamicSupervisor.start_link(strategy: :one_for_one) do + {:ok, pid} -> + :ets.insert(ref, {i, pid}) + {:ok, pid} + + {:error, reason} -> + {:error, reason} + end + end + + @doc false + def init({endpoint, name, partitions}) do + ref = :ets.new(name, [:public, read_concurrency: true]) + :ets.insert(ref, {:partitions, partitions}) + Phoenix.Config.permanent(endpoint, {:socket, name}, ref) + + children = + for i <- 0..(partitions - 1) do + %{ + id: i, + start: {__MODULE__, :start_pooled, [ref, i]}, + type: :supervisor, + shutdown: :infinity + } + end + + Supervisor.init(children, strategy: :one_for_one) + end +end diff --git a/deps/phoenix/lib/phoenix/socket/serializer.ex b/deps/phoenix/lib/phoenix/socket/serializer.ex new file mode 100644 index 0000000..a8deb15 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/serializer.ex @@ -0,0 +1,29 @@ +defmodule Phoenix.Socket.Serializer do + @moduledoc """ + A behaviour that serializes incoming and outgoing socket messages. + + By default Phoenix provides a serializer that encodes to JSON and + decodes JSON messages. + + Custom serializers may be configured in the socket. + """ + + @doc """ + Encodes a `Phoenix.Socket.Broadcast` struct to fastlane format. + """ + @callback fastlane!(Phoenix.Socket.Broadcast.t()) :: + {:socket_push, :text, iodata()} + | {:socket_push, :binary, iodata()} + + @doc """ + Encodes `Phoenix.Socket.Message` and `Phoenix.Socket.Reply` structs to push format. + """ + @callback encode!(Phoenix.Socket.Message.t() | Phoenix.Socket.Reply.t()) :: + {:socket_push, :text, iodata()} + | {:socket_push, :binary, iodata()} + + @doc """ + Decodes iodata into `Phoenix.Socket.Message` struct. + """ + @callback decode!(iodata, options :: Keyword.t()) :: Phoenix.Socket.Message.t() +end diff --git a/deps/phoenix/lib/phoenix/socket/serializers/v1_json_serializer.ex b/deps/phoenix/lib/phoenix/socket/serializers/v1_json_serializer.ex new file mode 100644 index 0000000..174f70c --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/serializers/v1_json_serializer.ex @@ -0,0 +1,41 @@ +defmodule Phoenix.Socket.V1.JSONSerializer do + @moduledoc false + @behaviour Phoenix.Socket.Serializer + + alias Phoenix.Socket.{Broadcast, Message, Reply} + + @impl true + def fastlane!(%Broadcast{} = msg) do + map = %Message{topic: msg.topic, event: msg.event, payload: msg.payload} + {:socket_push, :text, encode_v1_fields_only(map)} + end + + @impl true + def encode!(%Reply{} = reply) do + map = %Message{ + topic: reply.topic, + event: "phx_reply", + ref: reply.ref, + payload: %{status: reply.status, response: reply.payload} + } + + {:socket_push, :text, encode_v1_fields_only(map)} + end + + def encode!(%Message{} = map) do + {:socket_push, :text, encode_v1_fields_only(map)} + end + + @impl true + def decode!(message, _opts) do + message + |> Phoenix.json_library().decode!() + |> Phoenix.Socket.Message.from_map!() + end + + defp encode_v1_fields_only(%Message{} = msg) do + msg + |> Map.take([:topic, :event, :payload, :ref]) + |> Phoenix.json_library().encode_to_iodata!() + end +end diff --git a/deps/phoenix/lib/phoenix/socket/serializers/v2_json_serializer.ex b/deps/phoenix/lib/phoenix/socket/serializers/v2_json_serializer.ex new file mode 100644 index 0000000..8727622 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/serializers/v2_json_serializer.ex @@ -0,0 +1,158 @@ +defmodule Phoenix.Socket.V2.JSONSerializer do + @moduledoc false + @behaviour Phoenix.Socket.Serializer + + @push 0 + @reply 1 + @broadcast 2 + + alias Phoenix.Socket.{Broadcast, Message, Reply} + + @impl true + def fastlane!(%Broadcast{payload: {:binary, data}} = msg) do + topic_size = byte_size!(msg.topic, :topic, 255) + event_size = byte_size!(msg.event, :event, 255) + + bin = << + @broadcast::size(8), + topic_size::size(8), + event_size::size(8), + msg.topic::binary-size(topic_size), + msg.event::binary-size(event_size), + data::binary + >> + + {:socket_push, :binary, bin} + end + + def fastlane!(%Broadcast{payload: %{}} = msg) do + data = Phoenix.json_library().encode_to_iodata!([nil, nil, msg.topic, msg.event, msg.payload]) + {:socket_push, :text, data} + end + + def fastlane!(%Broadcast{payload: invalid}) do + raise ArgumentError, "expected broadcasted payload to be a map, got: #{inspect(invalid)}" + end + + @impl true + def encode!(%Reply{payload: {:binary, data}} = reply) do + status = to_string(reply.status) + join_ref_size = byte_size!(reply.join_ref, :join_ref, 255) + ref_size = byte_size!(reply.ref, :ref, 255) + topic_size = byte_size!(reply.topic, :topic, 255) + status_size = byte_size!(status, :status, 255) + + bin = << + @reply::size(8), + join_ref_size::size(8), + ref_size::size(8), + topic_size::size(8), + status_size::size(8), + reply.join_ref::binary-size(join_ref_size), + reply.ref::binary-size(ref_size), + reply.topic::binary-size(topic_size), + status::binary-size(status_size), + data::binary + >> + + {:socket_push, :binary, bin} + end + + def encode!(%Reply{} = reply) do + data = [ + reply.join_ref, + reply.ref, + reply.topic, + "phx_reply", + %{status: reply.status, response: reply.payload} + ] + + {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)} + end + + def encode!(%Message{payload: {:binary, data}} = msg) do + join_ref = to_string(msg.join_ref) + join_ref_size = byte_size!(join_ref, :join_ref, 255) + topic_size = byte_size!(msg.topic, :topic, 255) + event_size = byte_size!(msg.event, :event, 255) + + bin = << + @push::size(8), + join_ref_size::size(8), + topic_size::size(8), + event_size::size(8), + join_ref::binary-size(join_ref_size), + msg.topic::binary-size(topic_size), + msg.event::binary-size(event_size), + data::binary + >> + + {:socket_push, :binary, bin} + end + + def encode!(%Message{payload: %{}} = msg) do + data = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload] + {:socket_push, :text, Phoenix.json_library().encode_to_iodata!(data)} + end + + def encode!(%Message{payload: invalid}) do + raise ArgumentError, "expected payload to be a map, got: #{inspect(invalid)}" + end + + @impl true + def decode!(raw_message, opts) do + case Keyword.fetch(opts, :opcode) do + {:ok, :text} -> decode_text(raw_message) + {:ok, :binary} -> decode_binary(raw_message) + end + end + + defp decode_text(raw_message) do + [join_ref, ref, topic, event, payload | _] = Phoenix.json_library().decode!(raw_message) + + %Message{ + topic: topic, + event: event, + payload: payload, + ref: ref, + join_ref: join_ref + } + end + + defp decode_binary(<< + @push::size(8), + join_ref_size::size(8), + ref_size::size(8), + topic_size::size(8), + event_size::size(8), + join_ref::binary-size(join_ref_size), + ref::binary-size(ref_size), + topic::binary-size(topic_size), + event::binary-size(event_size), + data::binary + >>) do + %Message{ + topic: topic, + event: event, + payload: {:binary, data}, + ref: ref, + join_ref: join_ref + } + end + + defp byte_size!(bin, kind, max) do + case byte_size(bin) do + size when size <= max -> + size + + oversized -> + raise ArgumentError, """ + unable to convert #{kind} to binary. + + #{inspect(bin)} + + must be less than or equal to #{max} bytes, but is #{oversized} bytes. + """ + end + end +end diff --git a/deps/phoenix/lib/phoenix/socket/transport.ex b/deps/phoenix/lib/phoenix/socket/transport.ex new file mode 100644 index 0000000..646e6d1 --- /dev/null +++ b/deps/phoenix/lib/phoenix/socket/transport.ex @@ -0,0 +1,634 @@ +defmodule Phoenix.Socket.Transport do + @moduledoc """ + Outlines the Socket <-> Transport communication. + + This module specifies a behaviour that all sockets must implement. + `Phoenix.Socket` is just one possible implementation of a socket + that multiplexes events over multiple channels. Developers can + implement their own sockets as long as they implement the behaviour + outlined here. + + Developers interested in implementing custom transports must invoke + the socket API defined in this module. This module also provides + many conveniences that invokes the underlying socket API to make + it easier to build custom transports. + + ## Booting sockets + + Whenever your endpoint starts, it will automatically invoke the + `child_spec/1` on each listed socket and start that specification + under the endpoint supervisor. + + Since the socket supervision tree is started by the endpoint, + any custom transport must be started after the endpoint in a + supervision tree. + + ## Operating sockets + + Sockets are operated by a transport. When a transport is defined, + it usually receives a socket module and the module will be invoked + when certain events happen at the transport level. + + Whenever the transport receives a new connection, it should invoke + the `c:connect/1` callback with a map of metadata. Different sockets + may require different metadatas. + + If the connection is accepted, the transport can move the connection + to another process, if so desires, or keep using the same process. The + process responsible for managing the socket should then call `c:init/1`. + + For each message received from the client, the transport must call + `c:handle_in/2` on the socket. For each informational message the + transport receives, it should call `c:handle_info/2` on the socket. + + Transports can optionally implement `c:handle_control/2` for handling + control frames such as `:ping` and `:pong`. + + On termination, `c:terminate/2` must be called. A special atom with + reason `:closed` can be used to specify that the client terminated + the connection. + + ## Example + + Here is a simple echo socket implementation: + + defmodule EchoSocket do + @behaviour Phoenix.Socket.Transport + + def child_spec(opts) do + # We won't spawn any process, so let's return a dummy task + %{id: __MODULE__, start: {Task, :start_link, [fn -> :ok end]}, restart: :transient} + end + + def connect(state) do + # Callback to retrieve relevant data from the connection. + # The map contains options, params, transport and endpoint keys. + {:ok, state} + end + + def init(state) do + # Now we are effectively inside the process that maintains the socket. + {:ok, state} + end + + def handle_in({text, _opts}, state) do + {:reply, :ok, {:text, text}, state} + end + + def handle_info(_, state) do + {:ok, state} + end + + def terminate(_reason, _state) do + :ok + end + end + + It can be mounted in your endpoint like any other socket: + + socket "/socket", EchoSocket, websocket: true, longpoll: true + + You can now interact with the socket under `/socket/websocket` + and `/socket/longpoll`. + + ## Security + + This module also provides functions to enable a secure environment + on transports that, at some point, have access to a `Plug.Conn`. + + The functionality provided by this module helps in performing "origin" + header checks and ensuring only SSL connections are allowed. + """ + + @type state :: term() + + @doc """ + Returns a child specification for socket management. + + This is invoked only once per socket regardless of + the number of transports and should be responsible + for setting up any process structure used exclusively + by the socket regardless of transports. + + Each socket connection is started by the transport + and the process that controls the socket likely + belongs to the transport. However, some sockets spawn + new processes, such as `Phoenix.Socket` which spawns + channels, and this gives the ability to start a + supervision tree associated to the socket. + + It receives the socket options from the endpoint, + for example: + + socket "/my_app", MyApp.Socket, shutdown: 5000 + + means `child_spec([shutdown: 5000])` will be invoked. + """ + @callback child_spec(keyword) :: :supervisor.child_spec + + @doc """ + Connects to the socket. + + The transport passes a map of metadata and the socket + returns `{:ok, state}` or `:error`. The state must be + stored by the transport and returned in all future + operations. + + This function is used for authorization purposes and it + may be invoked outside of the process that effectively + runs the socket. + + In the default `Phoenix.Socket` implementation, the + metadata expects the following keys: + + * `:endpoint` - the application endpoint + * `:transport` - the transport name + * `:params` - the connection parameters + * `:options` - a keyword list of transport options, often + given by developers when configuring the transport. + It must include a `:serializer` field with the list of + serializers and their requirements + + """ + @callback connect(transport_info :: map) :: {:ok, state} | :error + + @doc """ + Initializes the socket state. + + This must be executed from the process that will effectively + operate the socket. + """ + @callback init(state) :: {:ok, state} + + @doc """ + Handles incoming socket messages. + + The message is represented as `{payload, options}`. It must + return one of: + + * `{:ok, state}` - continues the socket with no reply + * `{:reply, status, reply, state}` - continues the socket with reply + * `{:stop, reason, state}` - stops the socket + + The `reply` is a tuple contain an `opcode` atom and a message that can + be any term. The built-in websocket transport supports both `:text` and + `:binary` opcode and the message must be always iodata. Long polling only + supports text opcode. + """ + @callback handle_in({message :: term, opts :: keyword}, state) :: + {:ok, state} + | {:reply, :ok | :error, {opcode :: atom, message :: term}, state} + | {:stop, reason :: term, state} + + @doc """ + Handles incoming control frames. + + The message is represented as `{payload, options}`. It must + return one of: + + * `{:ok, state}` - continues the socket with no reply + * `{:reply, status, reply, state}` - continues the socket with reply + * `{:stop, reason, state}` - stops the socket + + Control frames only supported when using websockets. + + The `options` contains an `opcode` key, this will be either `:ping` or + `:pong`. + + If a control frame doesn't have a payload, then the payload value + will be `nil`. + """ + @callback handle_control({message :: term, opts :: keyword}, state) :: + {:ok, state} + | {:reply, :ok | :error, {opcode :: atom, message :: term}, state} + | {:stop, reason :: term, state} + + @doc """ + Handles info messages. + + The message is a term. It must return one of: + + * `{:ok, state}` - continues the socket with no reply + * `{:push, reply, state}` - continues the socket with reply + * `{:stop, reason, state}` - stops the socket + + The `reply` is a tuple contain an `opcode` atom and a message that can + be any term. The built-in websocket transport supports both `:text` and + `:binary` opcode and the message must be always iodata. Long polling only + supports text opcode. + """ + @callback handle_info(message :: term, state) :: + {:ok, state} + | {:push, {opcode :: atom, message :: term}, state} + | {:stop, reason :: term, state} + + @doc """ + Invoked on termination. + + If `reason` is `:closed`, it means the client closed the socket. This is + considered a `:normal` exit signal, so linked process will not automatically + exit. See `Process.exit/2` for more details on exit signals. + """ + @callback terminate(reason :: term, state) :: :ok + + @optional_callbacks handle_control: 2 + + require Logger + + @doc false + def load_config(true, module), + do: module.default_config() + + def load_config(config, module), + do: module.default_config() |> Keyword.merge(config) |> load_config() + + @doc false + def load_config(config) do + {connect_info, config} = Keyword.pop(config, :connect_info, []) + + connect_info = + Enum.map(connect_info, fn + key when key in [:peer_data, :trace_context_headers, :uri, :user_agent, :x_headers] -> + key + + {:session, session} -> + {:session, init_session(session)} + + {_, _} = pair -> + pair + + other -> + raise ArgumentError, + ":connect_info keys are expected to be one of :peer_data, :trace_context_headers, :x_headers, :uri, or {:session, config}, " <> + "optionally followed by custom keyword pairs, got: #{inspect(other)}" + end) + + [connect_info: connect_info] ++ config + end + + defp init_session(session_config) when is_list(session_config) do + key = Keyword.fetch!(session_config, :key) + store = Plug.Session.Store.get(Keyword.fetch!(session_config, :store)) + init = store.init(Keyword.drop(session_config, [:store, :key])) + {key, store, init} + end + + defp init_session({_, _, _} = mfa) do + {:mfa, mfa} + end + + @doc """ + Runs the code reloader if enabled. + """ + def code_reload(conn, endpoint, opts) do + reload? = Keyword.get(opts, :code_reloader, endpoint.config(:code_reloader)) + reload? && Phoenix.CodeReloader.reload(endpoint) + conn + end + + @doc """ + Forces SSL in the socket connection. + + Uses the endpoint configuration to decide so. It is a + noop if the connection has been halted. + """ + def force_ssl(%{halted: true} = conn, _socket, _endpoint, _opts) do + conn + end + + def force_ssl(conn, socket, endpoint, opts) do + if force_ssl = force_ssl_config(socket, endpoint, opts) do + Plug.SSL.call(conn, force_ssl) + else + conn + end + end + + defp force_ssl_config(socket, endpoint, opts) do + Phoenix.Config.cache(endpoint, {:force_ssl, socket}, fn _ -> + opts = + if force_ssl = Keyword.get(opts, :force_ssl, endpoint.config(:force_ssl)) do + force_ssl + |> Keyword.put_new(:host, {endpoint, :host, []}) + |> Plug.SSL.init() + end + {:cache, opts} + end) + end + + @doc """ + Logs the transport request. + + Available for transports that generate a connection. + """ + def transport_log(conn, level) do + if level do + Plug.Logger.call(conn, Plug.Logger.init(log: level)) + else + conn + end + end + + @doc """ + Checks the origin request header against the list of allowed origins. + + Should be called by transports before connecting when appropriate. + If the origin header matches the allowed origins, no origin header was + sent or no origin was configured, it will return the given connection. + + Otherwise a 403 Forbidden response will be sent and the connection halted. + It is a noop if the connection has been halted. + """ + def check_origin(conn, handler, endpoint, opts, sender \\ &Plug.Conn.send_resp/1) + + def check_origin(%Plug.Conn{halted: true} = conn, _handler, _endpoint, _opts, _sender), + do: conn + + def check_origin(conn, handler, endpoint, opts, sender) do + import Plug.Conn + origin = conn |> get_req_header("origin") |> List.first() + check_origin = check_origin_config(handler, endpoint, opts) + + cond do + is_nil(origin) or check_origin == false -> + conn + + origin_allowed?(check_origin, URI.parse(origin), endpoint, conn) -> + conn + + true -> + Logger.error """ + Could not check origin for Phoenix.Socket transport. + + Origin of the request: #{origin} + + This happens when you are attempting a socket connection to + a different host than the one configured in your config/ + files. For example, in development the host is configured + to "localhost" but you may be trying to access it from + "127.0.0.1". To fix this issue, you may either: + + 1. update [url: [host: ...]] to your actual host in the + config file for your current environment (recommended) + + 2. pass the :check_origin option when configuring your + endpoint or when configuring the transport in your + UserSocket module, explicitly outlining which origins + are allowed: + + check_origin: ["https://example.com", + "//another.com:888", "//other.com"] + + """ + resp(conn, :forbidden, "") + |> sender.() + |> halt() + end + end + + @doc """ + Checks the Websocket subprotocols request header against the allowed subprotocols. + + Should be called by transports before connecting when appropriate. + If the sec-websocket-protocol header matches the allowed subprotocols, + it will put sec-websocket-protocol response header and return the given connection. + If no sec-websocket-protocol header was sent it will return the given connection. + + Otherwise a 403 Forbidden response will be sent and the connection halted. + It is a noop if the connection has been halted. + """ + def check_subprotocols(conn, subprotocols) + + def check_subprotocols(%Plug.Conn{halted: true} = conn, _subprotocols), do: conn + def check_subprotocols(conn, nil), do: conn + + def check_subprotocols(conn, subprotocols) when is_list(subprotocols) do + case Plug.Conn.get_req_header(conn, "sec-websocket-protocol") do + [] -> + conn + + [subprotocols_header | _] -> + request_subprotocols = subprotocols_header |> Plug.Conn.Utils.list() + subprotocol = Enum.find(subprotocols, fn elem -> Enum.find(request_subprotocols, &(&1 == elem)) end) + + if subprotocol do + Plug.Conn.put_resp_header(conn, "sec-websocket-protocol", subprotocol) + else + subprotocols_error_response(conn, subprotocols) + end + end + end + + def check_subprotocols(conn, subprotocols), do: subprotocols_error_response(conn, subprotocols) + + @doc """ + Extracts connection information from `conn` and returns a map. + + Keys are retrieved from the optional transport option `:connect_info`. + This functionality is transport specific. Please refer to your transports' + documentation for more information. + + The supported keys are: + + * `:peer_data` - the result of `Plug.Conn.get_peer_data/1` + + * `:trace_context_headers` - a list of all trace context headers + + * `:x_headers` - a list of all request headers that have an "x-" prefix + + * `:uri` - a `%URI{}` derived from the conn + + * `:user_agent` - the value of the "user-agent" request header + + """ + def connect_info(conn, endpoint, keys) do + for key <- keys, into: %{} do + case key do + :peer_data -> + {:peer_data, Plug.Conn.get_peer_data(conn)} + + :trace_context_headers -> + {:trace_context_headers, fetch_trace_context_headers(conn)} + + :x_headers -> + {:x_headers, fetch_x_headers(conn)} + + :uri -> + {:uri, fetch_uri(conn)} + + :user_agent -> + {:user_agent, fetch_user_agent(conn)} + + {:session, session} -> + {:session, connect_session(conn, endpoint, session)} + + {key, val} -> + {key, val} + end + end + end + + defp connect_session(conn, endpoint, {key, store, store_config}) do + conn = Plug.Conn.fetch_cookies(conn) + + with csrf_token when is_binary(csrf_token) <- conn.params["_csrf_token"], + cookie when is_binary(cookie) <- conn.cookies[key], + conn = put_in(conn.secret_key_base, endpoint.config(:secret_key_base)), + {_, session} <- store.get(conn, cookie, store_config), + csrf_state when is_binary(csrf_state) <- Plug.CSRFProtection.dump_state_from_session(session["_csrf_token"]), + true <- Plug.CSRFProtection.valid_state_and_csrf_token?(csrf_state, csrf_token) do + session + else + _ -> nil + end + end + + defp connect_session(conn, endpoint, {:mfa, {module, function, args}}) do + case apply(module, function, args) do + session_config when is_list(session_config) -> + connect_session(conn, endpoint, init_session(session_config)) + + other -> + raise ArgumentError, + "the MFA given to `session_config` must return a keyword list, got: #{inspect other}" + end + end + + defp subprotocols_error_response(conn, subprotocols) do + import Plug.Conn + request_headers = get_req_header(conn, "sec-websocket-protocol") + + Logger.error """ + Could not check Websocket subprotocols for Phoenix.Socket transport. + + Subprotocols of the request: #{inspect(request_headers)} + Configured supported subprotocols: #{inspect(subprotocols)} + + This happens when you are attempting a socket connection to + a different subprotocols than the one configured in your endpoint + or when you incorrectly configured supported subprotocols. + + To fix this issue, you may either: + + 1. update websocket: [subprotocols: [..]] to your actual subprotocols + in your endpoint socket configuration. + + 2. check the correctness of the `sec-websocket-protocol` request header + sent from the client. + + 3. remove `websocket` option from your endpoint socket configuration + if you don't use Websocket subprotocols. + """ + + resp(conn, :forbidden, "") + |> send_resp() + |> halt() + end + + defp fetch_x_headers(conn) do + for {header, _} = pair <- conn.req_headers, + String.starts_with?(header, "x-"), + do: pair + end + + defp fetch_trace_context_headers(conn) do + for {header, _} = pair <- conn.req_headers, + header in ["traceparent", "tracestate"], + do: pair + end + + defp fetch_uri(conn) do + %URI{ + scheme: to_string(conn.scheme), + query: conn.query_string, + port: conn.port, + host: conn.host, + authority: conn.host, + path: conn.request_path + } + end + + defp fetch_user_agent(conn) do + with {_, value} <- List.keyfind(conn.req_headers, "user-agent", 0) do + value + end + end + + defp check_origin_config(handler, endpoint, opts) do + Phoenix.Config.cache(endpoint, {:check_origin, handler}, fn _ -> + check_origin = + case Keyword.get(opts, :check_origin, endpoint.config(:check_origin)) do + origins when is_list(origins) -> + Enum.map(origins, &parse_origin/1) + + boolean when is_boolean(boolean) -> + boolean + + {module, function, arguments} -> + {module, function, arguments} + + :conn -> + :conn + + invalid -> + raise ArgumentError, ":check_origin expects a boolean, list of hosts, :conn, or MFA tuple, got: #{inspect(invalid)}" + end + + {:cache, check_origin} + end) + end + + defp parse_origin(origin) do + case URI.parse(origin) do + %{host: nil} -> + raise ArgumentError, + "invalid :check_origin option: #{inspect origin}. " <> + "Expected an origin with a host that is parsable by URI.parse/1. For example: " <> + "[\"https://example.com\", \"//another.com:888\", \"//other.com\"]" + + %{scheme: scheme, port: port, host: host} -> + {scheme, host, port} + end + end + + defp origin_allowed?({module, function, arguments}, uri, _endpoint, _conn), + do: apply(module, function, [uri | arguments]) + + defp origin_allowed?(:conn, uri, _endpoint, %Plug.Conn{} = conn) do + uri.host == conn.host and + uri.scheme == Atom.to_string(conn.scheme) and + uri.port == conn.port + end + + defp origin_allowed?(_check_origin, %{host: nil}, _endpoint, _conn), + do: false + defp origin_allowed?(true, uri, endpoint, _conn), + do: compare?(uri.host, host_to_binary(endpoint.config(:url)[:host])) + defp origin_allowed?(check_origin, uri, _endpoint, _conn) when is_list(check_origin), + do: origin_allowed?(uri, check_origin) + + defp origin_allowed?(uri, allowed_origins) do + %{scheme: origin_scheme, host: origin_host, port: origin_port} = uri + + Enum.any?(allowed_origins, fn {allowed_scheme, allowed_host, allowed_port} -> + compare?(origin_scheme, allowed_scheme) and + compare?(origin_port, allowed_port) and + compare_host?(origin_host, allowed_host) + end) + end + + defp compare?(request_val, allowed_val) do + is_nil(allowed_val) or request_val == allowed_val + end + + defp compare_host?(_request_host, nil), + do: true + defp compare_host?(request_host, "*." <> allowed_host), + do: String.ends_with?(request_host, allowed_host) + defp compare_host?(request_host, allowed_host), + do: request_host == allowed_host + + # TODO: Deprecate {:system, env_var} once we require Elixir v1.9+ + defp host_to_binary({:system, env_var}), do: host_to_binary(System.get_env(env_var)) + defp host_to_binary(host), do: host +end diff --git a/deps/phoenix/lib/phoenix/test/channel_test.ex b/deps/phoenix/lib/phoenix/test/channel_test.ex new file mode 100644 index 0000000..1b47ab8 --- /dev/null +++ b/deps/phoenix/lib/phoenix/test/channel_test.ex @@ -0,0 +1,635 @@ +defmodule Phoenix.ChannelTest do + @moduledoc """ + Conveniences for testing Phoenix channels. + + In channel tests, we interact with channels via process + communication, sending and receiving messages. It is also + common to subscribe to the same topic the channel subscribes + to, allowing us to assert if a given message was broadcast + or not. + + ## Channel testing + + To get started, define the module attribute `@endpoint` + in your test case pointing to your application endpoint. + + Then you can directly create a socket and + `subscribe_and_join/4` topics and channels: + + {:ok, _, socket} = + socket(UserSocket, "user:id", %{some_assigns: 1}) + |> subscribe_and_join(RoomChannel, "room:lobby", %{"id" => 3}) + + You usually want to set the same ID and assigns your + `UserSocket.connect/3` callback would set. Alternatively, + you can use the `connect/3` helper to call your `UserSocket.connect/3` + callback and initialize the socket with the socket id: + + {:ok, socket} = connect(UserSocket, %{"some" => "params"}, %{}) + {:ok, _, socket} = subscribe_and_join(socket, "room:lobby", %{"id" => 3}) + + Once called, `subscribe_and_join/4` will subscribe the + current test process to the "room:lobby" topic and start a + channel in another process. It returns `{:ok, reply, socket}` + or `{:error, reply}`. + + Now, in the same way the channel has a socket representing + communication it will push to the client. Our test has a + socket representing communication to be pushed to the server. + + For example, we can use the `push/3` function in the test + to push messages to the channel (it will invoke `handle_in/3`): + + push(socket, "my_event", %{"some" => "data"}) + + Similarly, we can broadcast messages from the test itself + on the topic that both test and channel are subscribed to, + triggering `handle_out/3` on the channel: + + broadcast_from(socket, "my_event", %{"some" => "data"}) + + > Note only `broadcast_from/3` and `broadcast_from!/3` are + available in tests to avoid broadcast messages to be resent + to the test process. + + While the functions above are pushing data to the channel + (server) we can use `assert_push/3` to verify the channel + pushed a message to the client: + + assert_push "my_event", %{"some" => "data"} + + Or even assert something was broadcast into pubsub: + + assert_broadcast "my_event", %{"some" => "data"} + + Finally, every time a message is pushed to the channel, + a reference is returned. We can use this reference to + assert a particular reply was sent from the server: + + ref = push(socket, "counter", %{}) + assert_reply ref, :ok, %{"counter" => 1} + + ## Checking side-effects + + Often one may want to do side-effects inside channels, + like writing to the database, and verify those side-effects + during their tests. + + Imagine the following `handle_in/3` inside a channel: + + def handle_in("publish", %{"id" => id}, socket) do + Repo.get!(Post, id) |> Post.publish() |> Repo.update!() + {:noreply, socket} + end + + Because the whole communication is asynchronous, the + following test would be very brittle: + + push(socket, "publish", %{"id" => 3}) + assert Repo.get_by(Post, id: 3, published: true) + + The issue is that we have no guarantees the channel has + done processing our message after calling `push/3`. The + best solution is to assert the channel sent us a reply + before doing any other assertion. First change the + channel to send replies: + + def handle_in("publish", %{"id" => id}, socket) do + Repo.get!(Post, id) |> Post.publish() |> Repo.update!() + {:reply, :ok, socket} + end + + Then expect them in the test: + + ref = push(socket, "publish", %{"id" => 3}) + assert_reply ref, :ok + assert Repo.get_by(Post, id: 3, published: true) + + ## Leave and close + + This module also provides functions to simulate leaving + and closing a channel. Once you leave or close a channel, + because the channel is linked to the test process on join, + it will crash the test process: + + leave(socket) + ** (EXIT from #PID<...>) {:shutdown, :leave} + + You can avoid this by unlinking the channel process in + the test: + + Process.unlink(socket.channel_pid) + + Notice `leave/1` is async, so it will also return a + reference which you can use to check for a reply: + + ref = leave(socket) + assert_reply ref, :ok + + On the other hand, close is always sync and it will + return only after the channel process is guaranteed to + have been terminated: + + :ok = close(socket) + + This mimics the behaviour existing in clients. + + To assert that your channel closes or errors asynchronously, + you can monitor the channel process with the tools provided + by Elixir, and wait for the `:DOWN` message. + Imagine an implementation of the `handle_info/2` function + that closes the channel when it receives `:some_message`: + + def handle_info(:some_message, socket) do + {:stop, :normal, socket} + end + + In your test, you can assert that the close happened by: + + Process.monitor(socket.channel_pid) + send(socket.channel_pid, :some_message) + assert_receive {:DOWN, _, _, _, :normal} + + """ + + alias Phoenix.Socket + alias Phoenix.Socket.{Broadcast, Message, Reply} + alias Phoenix.Channel.Server + + defmodule NoopSerializer do + @behaviour Phoenix.Socket.Serializer + @moduledoc false + + def fastlane!(%Broadcast{} = msg) do + %Message{ + topic: msg.topic, + event: msg.event, + payload: msg.payload + } + end + + def encode!(%Reply{} = reply), do: reply + def encode!(%Message{} = msg), do: msg + def decode!(message, _opts), do: message + end + + @doc false + defmacro __using__(_) do + IO.warn """ + Using Phoenix.ChannelTest is deprecated, instead of: + + use Phoenix.ChannelTest + + do: + + import Phoenix.ChannelTest + """, Macro.Env.stacktrace(__CALLER__) + + quote do + import Phoenix.ChannelTest + end + end + + @doc """ + Builds a socket for the given `socket_module`. + + The socket is then used to subscribe and join channels. + Use this function when you want to create a blank socket + to pass to functions like `UserSocket.connect/3`. + + Otherwise, use `socket/3` if you want to build a socket with + existing id and assigns. + + ## Examples + + socket(MyApp.UserSocket) + + """ + defmacro socket(socket_module) do + build_socket(socket_module, nil, [], __CALLER__) + end + + @doc """ + Builds a socket for the given `socket_module` with given id and assigns. + + ## Examples + + socket(MyApp.UserSocket, "user_id", %{some: :assign}) + + """ + defmacro socket(socket_module, socket_id, socket_assigns) do + build_socket(socket_module, socket_id, socket_assigns, __CALLER__) + end + + defp build_socket(socket, id, assigns, caller) do + if endpoint = Module.get_attribute(caller.module, :endpoint) do + quote do + %Socket{ + assigns: Enum.into(unquote(assigns), %{}), + endpoint: unquote(endpoint), + handler: unquote(socket || first_socket!(endpoint)), + id: unquote(id), + pubsub_server: unquote(endpoint).config(:pubsub_server), + serializer: NoopSerializer, + transport: :channel_test, + transport_pid: self() + } + end + else + raise "module attribute @endpoint not set for socket/2" + end + end + + @doc false + @deprecated "Phoenix.ChannelTest.socket/0 is deprecated, please call socket/1 instead" + defmacro socket() do + build_socket(nil, nil, [], __CALLER__) + end + + @doc false + @deprecated "Phoenix.ChannelTest.socket/2 is deprecated, please call socket/3 instead" + defmacro socket(id, assigns) do + build_socket(nil, id, assigns, __CALLER__) + end + + defp first_socket!(endpoint) do + case endpoint.__sockets__ do + [] -> raise ArgumentError, "#{inspect endpoint} has no socket declaration" + [{_, socket, _} | _] -> socket + end + end + + @doc """ + Initiates a transport connection for the socket handler. + + Useful for testing UserSocket authentication. Returns + the result of the handler's `connect/3` callback. + """ + defmacro connect(handler, params, connect_info \\ quote(do: %{})) do + if endpoint = Module.get_attribute(__CALLER__.module, :endpoint) do + quote do + unquote(__MODULE__).__connect__(unquote(endpoint), unquote(handler), unquote(params), unquote(connect_info)) + end + else + raise "module attribute @endpoint not set for socket/2" + end + end + + @doc false + def __connect__(endpoint, handler, params, connect_info) do + map = %{ + endpoint: endpoint, + transport: :channel_test, + options: [serializer: [{NoopSerializer, "~> 1.0.0"}]], + params: __stringify__(params), + connect_info: connect_info + } + + with {:ok, state} <- handler.connect(map), + {:ok, {_, socket}} = handler.init(state), + do: {:ok, socket} + end + + @doc "See `subscribe_and_join!/4`." + def subscribe_and_join!(%Socket{} = socket, topic) when is_binary(topic) do + subscribe_and_join!(socket, nil, topic, %{}) + end + + @doc "See `subscribe_and_join!/4`." + def subscribe_and_join!(%Socket{} = socket, topic, payload) + when is_binary(topic) and is_map(payload) do + subscribe_and_join!(socket, nil, topic, payload) + end + + @doc """ + Same as `subscribe_and_join/4`, but returns either the socket + or throws an error. + + This is helpful when you are not testing joining the channel + and just need the socket. + """ + def subscribe_and_join!(%Socket{} = socket, channel, topic, payload \\ %{}) + when is_atom(channel) and is_binary(topic) and is_map(payload) do + case subscribe_and_join(socket, channel, topic, payload) do + {:ok, _, socket} -> socket + {:error, error} -> raise "could not join channel, got error: #{inspect(error)}" + end + end + + @doc "See `subscribe_and_join/4`." + def subscribe_and_join(%Socket{} = socket, topic) when is_binary(topic) do + subscribe_and_join(socket, nil, topic, %{}) + end + + @doc "See `subscribe_and_join/4`." + def subscribe_and_join(%Socket{} = socket, topic, payload) + when is_binary(topic) and is_map(payload) do + subscribe_and_join(socket, nil, topic, payload) + end + + @doc """ + Subscribes to the given topic and joins the channel + under the given topic and payload. + + By subscribing to the topic, we can use `assert_broadcast/3` + to verify a message has been sent through the pubsub layer. + + By joining the channel, we can interact with it directly. + The given channel is joined in a separate process which is + linked to the test process. + + If no channel module is provided, the socket's handler is used to + lookup the matching channel for the given topic. + + It returns `{:ok, reply, socket}` or `{:error, reply}`. + """ + def subscribe_and_join(%Socket{} = socket, channel, topic, payload \\ %{}) + when is_atom(channel) and is_binary(topic) and is_map(payload) do + socket.endpoint.subscribe(topic) + join(socket, channel, topic, payload) + end + + @doc "See `join/4`." + def join(%Socket{} = socket, topic) when is_binary(topic) do + join(socket, nil, topic, %{}) + end + + @doc "See `join/4`." + def join(%Socket{} = socket, topic, payload) when is_binary(topic) and is_map(payload) do + join(socket, nil, topic, payload) + end + + @doc """ + Joins the channel under the given topic and payload. + + The given channel is joined in a separate process + which is linked to the test process. + + It returns `{:ok, reply, socket}` or `{:error, reply}`. + """ + def join(%Socket{} = socket, channel, topic, payload \\ %{}) + when is_atom(channel) and is_binary(topic) and is_map(payload) do + message = %Message{ + event: "phx_join", + payload: __stringify__(payload), + topic: topic, + ref: System.unique_integer([:positive]) + } + + {channel, opts} = + if channel do + {channel, []} + else + match_topic_to_channel!(socket, topic) + end + + case Server.join(socket, channel, message, opts) do + {:ok, reply, pid} -> + Process.link(pid) + {:ok, reply, Server.socket(pid)} + {:error, _} = error -> + error + end + end + + @doc """ + Pushes a message into the channel. + + The triggers the `handle_in/3` callback in the channel. + + ## Examples + + iex> push(socket, "new_message", %{id: 1, content: "hello"}) + reference + + """ + @spec push(Socket.t, String.t, map()) :: reference() + def push(%Socket{} = socket, event, payload \\ %{}) do + ref = make_ref() + send(socket.channel_pid, + %Message{event: event, topic: socket.topic, ref: ref, payload: __stringify__(payload)}) + ref + end + + @doc """ + Emulates the client leaving the channel. + """ + @spec leave(Socket.t) :: reference() + def leave(%Socket{} = socket) do + push(socket, "phx_leave", %{}) + end + + @doc """ + Emulates the client closing the socket. + + Closing socket is synchronous and has a default timeout + of 5000 milliseconds. + """ + def close(%Socket{} = socket, timeout \\ 5000) do + Server.close(socket.channel_pid, timeout) + end + + @doc """ + Broadcast event from pid to all subscribers of the socket topic. + + The test process will not receive the published message. This triggers + the `handle_out/3` callback in the channel. + + ## Examples + + iex> broadcast_from(socket, "new_message", %{id: 1, content: "hello"}) + :ok + + """ + def broadcast_from(%Socket{} = socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, transport_pid: transport_pid} = socket + Server.broadcast_from pubsub_server, transport_pid, topic, event, message + end + + @doc """ + Same as `broadcast_from/3`, but raises if broadcast fails. + """ + def broadcast_from!(%Socket{} = socket, event, message) do + %{pubsub_server: pubsub_server, topic: topic, transport_pid: transport_pid} = socket + Server.broadcast_from! pubsub_server, transport_pid, topic, event, message + end + + @doc """ + Asserts the channel has pushed a message back to the client + with the given event and payload within `timeout`. + + Notice event and payload are patterns. This means one can write: + + assert_push "some_event", %{"data" => _} + + In the assertion above, we don't particularly care about + the data being sent, as long as something was sent. + + The timeout is in milliseconds and defaults to the `:assert_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + + **NOTE:** Because event and payload are patterns, they will be matched. This + means that if you wish to assert that the received payload is equivalent to + an existing variable, you need to pin the variable in the assertion + expression. + + Good: + + expected_payload = %{foo: "bar"} + assert_push "some_event", ^expected_payload + + Bad: + + expected_payload = %{foo: "bar"} + assert_push "some_event", expected_payload + # The code above does not assert the payload matches the described map. + + """ + defmacro assert_push(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :assert_receive_timeout)) do + quote do + assert_receive %Phoenix.Socket.Message{ + event: unquote(event), + payload: unquote(payload)}, unquote(timeout) + end + end + + @doc """ + Asserts the channel has not pushed a message to the client + matching the given event and payload within `timeout`. + + Like `assert_push`, the event and payload are patterns. + + The timeout is in milliseconds and defaults to the `:refute_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + Keep in mind this macro will block the test by the + timeout value, so use it only when necessary as overuse + will certainly slow down your test suite. + """ + defmacro refute_push(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :refute_receive_timeout)) do + quote do + refute_receive %Phoenix.Socket.Message{ + event: unquote(event), + payload: unquote(payload)}, unquote(timeout) + end + end + + @doc """ + Asserts the channel has replied to the given message within + `timeout`. + + Notice status and payload are patterns. This means one can write: + + ref = push(channel, "some_event") + assert_reply ref, :ok, %{"data" => _} + + In the assertion above, we don't particularly care about + the data being sent, as long as something was replied. + + The timeout is in milliseconds and defaults to the `:assert_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + """ + defmacro assert_reply(ref, status, payload \\ Macro.escape(%{}), timeout \\ Application.fetch_env!(:ex_unit, :assert_receive_timeout)) do + quote do + ref = unquote(ref) + assert_receive %Phoenix.Socket.Reply{ + ref: ^ref, + status: unquote(status), + payload: unquote(payload)}, unquote(timeout) + end + end + + @doc """ + Asserts the channel has not replied with a matching payload within + `timeout`. + + Like `assert_reply`, the event and payload are patterns. + + The timeout is in milliseconds and defaults to the `:refute_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + Keep in mind this macro will block the test by the + timeout value, so use it only when necessary as overuse + will certainly slow down your test suite. + """ + defmacro refute_reply(ref, status, payload \\ Macro.escape(%{}), timeout \\ Application.fetch_env!(:ex_unit, :refute_receive_timeout)) do + quote do + ref = unquote(ref) + refute_receive %Phoenix.Socket.Reply{ + ref: ^ref, + status: unquote(status), + payload: unquote(payload)}, unquote(timeout) + end + end + + @doc """ + Asserts the channel has broadcast a message within `timeout`. + + Before asserting anything was broadcast, we must first + subscribe to the topic of the channel in the test process: + + @endpoint.subscribe("foo:ok") + + Now we can match on event and payload as patterns: + + assert_broadcast "some_event", %{"data" => _} + + In the assertion above, we don't particularly care about + the data being sent, as long as something was sent. + + The timeout is in milliseconds and defaults to the `:assert_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + """ + defmacro assert_broadcast(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :assert_receive_timeout)) do + quote do + assert_receive %Phoenix.Socket.Broadcast{event: unquote(event), + payload: unquote(payload)}, unquote(timeout) + end + end + + @doc """ + Asserts the channel has not broadcast a message within `timeout`. + + Like `assert_broadcast`, the event and payload are patterns. + + The timeout is in milliseconds and defaults to the `:refute_receive_timeout` + set on the `:ex_unit` application (which defaults to 100ms). + Keep in mind this macro will block the test by the + timeout value, so use it only when necessary as overuse + will certainly slow down your test suite. + """ + defmacro refute_broadcast(event, payload, timeout \\ Application.fetch_env!(:ex_unit, :refute_receive_timeout)) do + quote do + refute_receive %Phoenix.Socket.Broadcast{event: unquote(event), + payload: unquote(payload)}, unquote(timeout) + end + end + + defp match_topic_to_channel!(socket, topic) do + unless socket.handler do + raise """ + no socket handler found to lookup channel for topic #{inspect topic}. + Use connect/3 when calling subscribe_and_join/* (or subscribe_and_join!/*) + without a channel, for example: + + {:ok, socket} = connect(UserSocket, %{}, %{}) + socket = subscribe_and_join!(socket, "foo:bar", %{}) + + """ + end + + case socket.handler.__channel__(topic) do + {channel, opts} when is_atom(channel) -> {channel, opts} + _ -> raise "no channel found for topic #{inspect topic} in #{inspect socket.handler}" + end + end + + @doc false + def __stringify__(%{__struct__: _} = struct), + do: struct + def __stringify__(%{} = params), + do: Enum.into(params, %{}, &stringify_kv/1) + def __stringify__(other), + do: other + + defp stringify_kv({k, v}), + do: {to_string(k), __stringify__(v)} +end diff --git a/deps/phoenix/lib/phoenix/test/conn_test.ex b/deps/phoenix/lib/phoenix/test/conn_test.ex new file mode 100644 index 0000000..3685be9 --- /dev/null +++ b/deps/phoenix/lib/phoenix/test/conn_test.ex @@ -0,0 +1,701 @@ +defmodule Phoenix.ConnTest do + @moduledoc """ + Conveniences for testing Phoenix endpoints and connection related helpers. + + You likely want to use this module or make it part of your `ExUnit.CaseTemplate`. + Once used, this module automatically imports all functions defined here as + well as the functions in `Plug.Conn`. + + ## Endpoint testing + + `Phoenix.ConnTest` typically works against endpoints. That's the preferred way + to test anything that your router dispatches to: + + @endpoint MyAppWeb.Endpoint + + test "says welcome on the home page" do + conn = get(build_conn(), "/") + assert conn.resp_body =~ "Welcome!" + end + + test "logs in" do + conn = post(build_conn(), "/login", [username: "john", password: "doe"]) + assert conn.resp_body =~ "Logged in!" + end + + The `@endpoint` module attribute contains the endpoint under testing, + most commonly your application endpoint itself. If you are using the + MyApp.ConnCase generated by Phoenix, it is automatically set for you. + + As in your router and controllers, the connection is the main abstraction + in testing. `build_conn()` returns a new connection and functions in this + module can be used to manipulate the connection before dispatching + to the endpoint. + + For example, one could set the accepts header for json requests as + follows: + + build_conn() + |> put_req_header("accept", "application/json") + |> get("/") + + You can also create your own helpers, such as `json_conn()` that uses + `build_conn/0` and `put_req_header/3`, so you avoid repeating the connection + setup throughout your tests. + + ## Controller testing + + The functions in this module can also be used for controller testing. + While endpoint testing is preferred over controller testing, especially + since the controller in Phoenix plays an integration role between your + domain and your views, unit testing controllers may be helpful in some + situations. + + For such cases, you need to set the `@endpoint` attribute to your controller + and pass an atom representing the action to dispatch: + + @endpoint MyAppWeb.HomeController + + test "says welcome on the home page" do + conn = get(build_conn(), :index) + assert conn.resp_body =~ "Welcome!" + end + + Keep in mind that, once the `@endpoint` variable is set, all tests after + setting it will be affected. + + ## Views testing + + Under other circumstances, you may be testing a view or another layer that + requires a connection for processing. For such cases, a connection can be + created using the `build_conn/3` helper: + + MyApp.UserView.render("hello.html", conn: build_conn(:get, "/")) + + While `build_conn/0` returns a connection with no request information to it, + `build_conn/3` returns a connection with the given request information already + filled in. + + ## Recycling + + Browsers implement a storage by using cookies. When a cookie is set in the + response, the browser stores it and sends it in the next request. + + To emulate this behaviour, this module provides the idea of recycling. + The `recycle/1` function receives a connection and returns a new connection, + similar to the one returned by `build_conn/0` with all the response cookies + from the previous connection defined as request headers. This is useful when + testing multiple routes that require cookies or session to work. + + Keep in mind Phoenix will automatically recycle the connection between + dispatches. This usually works out well most times, but it may discard + information if you are modifying the connection before the next dispatch: + + # No recycling as the connection is fresh + conn = get(build_conn(), "/") + + # The connection is recycled, creating a new one behind the scenes + conn = post(conn, "/login") + + # We can also recycle manually in case we want custom headers + conn = + conn + |> recycle() + |> put_req_header("x-special", "nice") + + # No recycling as we did it explicitly + conn = delete(conn, "/logout") + + Recycling also recycles the "accept" and "authorization" headers, + as well as peer data information. + """ + + @doc false + defmacro __using__(_) do + IO.warn """ + Using Phoenix.ConnTest is deprecated, instead of: + + use Phoenix.ConnTest + + do: + + import Plug.Conn + import Phoenix.ConnTest + """, Macro.Env.stacktrace(__CALLER__) + + quote do + import Plug.Conn + import Phoenix.ConnTest + end + end + + alias Plug.Conn + import ExUnit.Assertions, only: [flunk: 1] + + @doc """ + Creates a connection to be used in upcoming requests. + """ + @spec build_conn() :: Conn.t + def build_conn() do + build_conn(:get, "/", nil) + end + + @doc """ + Creates a connection to be used in upcoming requests + with a preset method, path and body. + + This is useful when a specific connection is required + for testing a plug or a particular function. + """ + @spec build_conn(atom | binary, binary, binary | list | map | nil) :: Conn.t + def build_conn(method, path, params_or_body \\ nil) do + Plug.Adapters.Test.Conn.conn(%Conn{}, method, path, params_or_body) + |> Conn.put_private(:plug_skip_csrf_protection, true) + |> Conn.put_private(:phoenix_recycled, true) + end + + @http_methods [:get, :post, :put, :patch, :delete, :options, :connect, :trace, :head] + + for method <- @http_methods do + @doc """ + Dispatches to the current endpoint. + + See `dispatch/5` for more information. + """ + defmacro unquote(method)(conn, path_or_action, params_or_body \\ nil) do + method = unquote(method) + quote do + Phoenix.ConnTest.dispatch(unquote(conn), @endpoint, unquote(method), + unquote(path_or_action), unquote(params_or_body)) + end + end + end + + @doc """ + Dispatches the connection to the given endpoint. + + When invoked via `get/3`, `post/3` and friends, the endpoint + is automatically retrieved from the `@endpoint` module + attribute, otherwise it must be given as an argument. + + The connection will be configured with the given `method`, + `path_or_action` and `params_or_body`. + + If `path_or_action` is a string, it is considered to be the + request path and stored as so in the connection. If an atom, + it is assumed to be an action and the connection is dispatched + to the given action. + + ## Parameters and body + + This function, as well as `get/3`, `post/3` and friends, accepts the + request body or parameters as last argument: + + get(build_conn(), "/", some: "param") + get(build_conn(), "/", "some=param&url=encoded") + + The allowed values are: + + * `nil` - meaning there is no body + + * a binary - containing a request body. For such cases, `:headers` + must be given as option with a content-type + + * a map or list - containing the parameters which will automatically + set the content-type to multipart. The map or list may contain + other lists or maps and all entries will be normalized to string + keys + + * a struct - unlike other maps, a struct will be passed through as-is + without normalizing its entries + """ + def dispatch(conn, endpoint, method, path_or_action, params_or_body \\ nil) + def dispatch(%Plug.Conn{} = conn, endpoint, method, path_or_action, params_or_body) do + if is_nil(endpoint) do + raise "no @endpoint set in test case" + end + + if is_binary(params_or_body) and is_nil(List.keyfind(conn.req_headers, "content-type", 0)) do + raise ArgumentError, "a content-type header is required when setting " <> + "a binary body in a test connection" + end + + conn + |> ensure_recycled() + |> dispatch_endpoint(endpoint, method, path_or_action, params_or_body) + |> Conn.put_private(:phoenix_recycled, false) + |> from_set_to_sent() + end + def dispatch(conn, _endpoint, method, _path_or_action, _params_or_body) do + raise ArgumentError, "expected first argument to #{method} to be a " <> + "%Plug.Conn{}, got #{inspect conn}" + end + + defp dispatch_endpoint(conn, endpoint, method, path, params_or_body) when is_binary(path) do + conn + |> Plug.Adapters.Test.Conn.conn(method, path, params_or_body) + |> endpoint.call(endpoint.init([])) + end + + defp dispatch_endpoint(conn, endpoint, method, action, params_or_body) when is_atom(action) do + conn + |> Plug.Adapters.Test.Conn.conn(method, "/", params_or_body) + |> endpoint.call(endpoint.init(action)) + end + + defp from_set_to_sent(%Conn{state: :set} = conn), do: Conn.send_resp(conn) + defp from_set_to_sent(conn), do: conn + + @doc """ + Inits a session used exclusively for testing. + """ + @spec init_test_session(Conn.t, map | keyword) :: Conn.t + defdelegate init_test_session(conn, session), to: Plug.Test + + @doc """ + Puts a request cookie. + """ + @spec put_req_cookie(Conn.t, binary, binary) :: Conn.t + defdelegate put_req_cookie(conn, key, value), to: Plug.Test + + @doc """ + Deletes a request cookie. + """ + @spec delete_req_cookie(Conn.t, binary) :: Conn.t + defdelegate delete_req_cookie(conn, key), to: Plug.Test + + @doc """ + Fetches the flash storage. + """ + @spec fetch_flash(Conn.t) :: Conn.t + defdelegate fetch_flash(conn), to: Phoenix.Controller + + @doc """ + Gets the whole flash storage. + """ + @spec get_flash(Conn.t) :: map + defdelegate get_flash(conn), to: Phoenix.Controller + + @doc """ + Gets the given key from the flash storage. + """ + @spec get_flash(Conn.t, term) :: term + defdelegate get_flash(conn, key), to: Phoenix.Controller + + @doc """ + Puts the given value under key in the flash storage. + """ + @spec put_flash(Conn.t, term, term) :: Conn.t + defdelegate put_flash(conn, key, value), to: Phoenix.Controller + + @doc """ + Clears up the flash storage. + """ + @spec clear_flash(Conn.t) :: Conn.t + defdelegate clear_flash(conn), to: Phoenix.Controller + + @doc """ + Returns the content type as long as it matches the given format. + + ## Examples + + # Assert we have an html response with utf-8 charset + assert response_content_type(conn, :html) =~ "charset=utf-8" + + """ + @spec response_content_type(Conn.t, atom) :: String.t | no_return + def response_content_type(conn, format) when is_atom(format) do + case Conn.get_resp_header(conn, "content-type") do + [] -> + raise "no content-type was set, expected a #{format} response" + [h] -> + if response_content_type?(h, format) do + h + else + raise "expected content-type for #{format}, got: #{inspect h}" + end + [_|_] -> + raise "more than one content-type was set, expected a #{format} response" + end + end + + defp response_content_type?(header, format) do + case parse_content_type(header) do + {part, subpart} -> + format = Atom.to_string(format) + format in MIME.extensions(part <> "/" <> subpart) or + format == subpart or String.ends_with?(subpart, "+" <> format) + _ -> + false + end + end + + defp parse_content_type(header) do + case Plug.Conn.Utils.content_type(header) do + {:ok, part, subpart, _params} -> + {part, subpart} + _ -> + false + end + end + + @doc """ + Asserts the given status code and returns the response body + if one was set or sent. + + ## Examples + + conn = get(build_conn(), "/") + assert response(conn, 200) =~ "hello world" + + """ + @spec response(Conn.t, status :: integer | atom) :: binary | no_return + def response(%Conn{state: :unset}, _status) do + raise """ + expected connection to have a response but no response was set/sent. + Please verify that you assign to "conn" after a request: + + conn = get(conn, "/") + assert html_response(conn) =~ "Hello" + """ + end + + def response(%Conn{status: status, resp_body: body}, given) do + given = Plug.Conn.Status.code(given) + + if given == status do + body + else + raise "expected response with status #{given}, got: #{status}, with body:\n#{inspect(body)}" + end + end + + @doc """ + Asserts the given status code, that we have an html response and + returns the response body if one was set or sent. + + ## Examples + + assert html_response(conn, 200) =~ "" + """ + @spec html_response(Conn.t, status :: integer | atom) :: String.t | no_return + def html_response(conn, status) do + body = response(conn, status) + _ = response_content_type(conn, :html) + body + end + + @doc """ + Asserts the given status code, that we have a text response and + returns the response body if one was set or sent. + + ## Examples + + assert text_response(conn, 200) =~ "hello" + """ + @spec text_response(Conn.t, status :: integer | atom) :: String.t | no_return + def text_response(conn, status) do + body = response(conn, status) + _ = response_content_type(conn, :text) + body + end + + @doc """ + Asserts the given status code, that we have a json response and + returns the decoded JSON response if one was set or sent. + + ## Examples + + body = json_response(conn, 200) + assert "can't be blank" in body["errors"] + + """ + @spec json_response(Conn.t, status :: integer | atom) :: map | no_return + def json_response(conn, status) do + body = response(conn, status) + _ = response_content_type(conn, :json) + + Phoenix.json_library().decode!(body) + end + + @doc """ + Returns the location header from the given redirect response. + + Raises if the response does not match the redirect status code + (defaults to 302). + + ## Examples + + assert redirected_to(conn) =~ "/foo/bar" + assert redirected_to(conn, 301) =~ "/foo/bar" + assert redirected_to(conn, :moved_permanently) =~ "/foo/bar" + """ + @spec redirected_to(Conn.t, status :: non_neg_integer) :: String.t + def redirected_to(conn, status \\ 302) + + def redirected_to(%Conn{state: :unset}, _status) do + raise "expected connection to have redirected but no response was set/sent" + end + + def redirected_to(conn, status) when is_atom(status) do + redirected_to(conn, Plug.Conn.Status.code(status)) + end + + def redirected_to(%Conn{status: status} = conn, status) do + location = Conn.get_resp_header(conn, "location") |> List.first + location || raise "no location header was set on redirected_to" + end + + def redirected_to(conn, status) do + raise "expected redirection with status #{status}, got: #{conn.status}" + end + + @doc """ + Recycles the connection. + + Recycling receives a connection and returns a new connection, + containing cookies and relevant information from the given one. + + This emulates behaviour performed by browsers where cookies + returned in the response are available in following requests. + + By default, only the headers "accept", "accept-language", and + "authorization" are recycled. However, a custom set of headers + can be specified by passing a list of strings representing its + names as the second argument of the function. + + Note `recycle/1` is automatically invoked when dispatching + to the endpoint, unless the connection has already been + recycled. + """ + @spec recycle(Conn.t, [String.t]) :: Conn.t + def recycle(conn, headers \\ ~w(accept accept-language authorization)) do + build_conn() + |> Map.put(:host, conn.host) + |> Plug.Test.recycle_cookies(conn) + |> Plug.Test.put_peer_data(Plug.Conn.get_peer_data(conn)) + |> copy_headers(conn.req_headers, headers) + end + + defp copy_headers(conn, headers, copy) do + headers = for {k, v} <- headers, k in copy, do: {k, v} + %{conn | req_headers: headers ++ conn.req_headers} + end + + @doc """ + Ensures the connection is recycled if it wasn't already. + + See `recycle/1` for more information. + """ + @spec ensure_recycled(Conn.t) :: Conn.t + def ensure_recycled(conn) do + if conn.private[:phoenix_recycled] do + conn + else + recycle(conn) + end + end + + @doc """ + Calls the Endpoint and Router pipelines. + + Useful for unit testing Plugs where Endpoint and/or router pipeline + plugs are required for proper setup. + + Note the use of `get("/")` following `bypass_through` in the examples below. + To execute the plug pipelines, you must issue a request against the router. + Most often, you can simply send a GET request against the root path, but you + may also specify a different method or path which your pipelines may operate + against. + + ## Examples + + For example, imagine you are testing an authentication plug in + isolation, but you need to invoke the Endpoint plugs and router + pipelines to set up session and flash related dependencies. + One option is to invoke an existing route that uses the proper + pipelines. You can do so by passing the connection and the + router name to `bypass_through`: + + conn = + conn + |> bypass_through(MyAppWeb.Router) + |> get("/some_url") + |> MyApp.RequireAuthentication.call([]) + assert conn.halted + + You can also specify which pipelines you want to run: + + conn = + conn + |> bypass_through(MyAppWeb.Router, [:browser]) + |> get("/") + |> MyApp.RequireAuthentication.call([]) + assert conn.halted + + Alternatively, you could only invoke the Endpoint's plugs: + + conn = + conn + |> bypass_through() + |> get("/") + |> MyApp.RequireAuthentication.call([]) + + assert conn.halted + """ + @spec bypass_through(Conn.t) :: Conn.t + def bypass_through(conn) do + Plug.Conn.put_private(conn, :phoenix_bypass, :all) + end + + @doc """ + Calls the Endpoint and Router pipelines for the current route. + + See `bypass_through/1`. + """ + @spec bypass_through(Conn.t, module) :: Conn.t + def bypass_through(conn, router) do + Plug.Conn.put_private(conn, :phoenix_bypass, {router, :current}) + end + + @doc """ + Calls the Endpoint and the given Router pipelines. + + See `bypass_through/1`. + """ + @spec bypass_through(Conn.t, module, atom | list) :: Conn.t + def bypass_through(conn, router, pipelines) do + Plug.Conn.put_private(conn, :phoenix_bypass, {router, List.wrap(pipelines)}) + end + + @doc """ + Returns the matched params from the URL the connection was redirected to. + + Uses the provided `%Plug.Conn{}`s router matched in the previous request. + Raises if the response's location header is not set. + + ## Examples + + assert redirected_to(conn) =~ "/posts/123" + assert %{id: "123"} = redirected_params(conn) + """ + @spec redirected_params(Conn.t) :: map + def redirected_params(%Plug.Conn{} = conn) do + router = Phoenix.Controller.router_module(conn) + %URI{path: path, host: host} = conn |> redirected_to() |> URI.parse() + + case Phoenix.Router.route_info(router, "GET", path, host || conn.host) do + :error -> + raise Phoenix.Router.NoRouteError, conn: conn, router: router + %{path_params: path_params} -> + Enum.into(path_params, %{}, fn {key, val} -> {String.to_atom(key), val} end) + end + end + + @doc """ + Returns the matched params of the URL for the `%Plug.Conn{}`'s router. + + Useful for extracting path params out of returned URLs, such as those + returned by `Phoenix.LiveViewTest`'s redirected results. + + ## Examples + + assert {:error, {:redirect, %{to: "/posts/123" = to}}} = live(conn, "/path") + assert %{id: "123"} = path_params(conn, to) + """ + @spec path_params(Conn.t, String.t) :: map + def path_params(%Plug.Conn{} = conn, to) when is_binary(to) do + router = Phoenix.Controller.router_module(conn) + + case Phoenix.Router.route_info(router, "GET", to, conn.host) do + %{path_params: path_params} -> + Enum.into(path_params, %{}, fn {key, val} -> {String.to_atom(key), val} end) + + :error -> + raise Phoenix.Router.NoRouteError, conn: conn, router: router + end + end + + @doc """ + Asserts an error was wrapped and sent with the given status. + + Useful for testing actions that you expect raise an error and have + the response wrapped in an HTTP status, with content usually rendered + by your MyApp.ErrorView. + + The function accepts a status either as an integer HTTP status or + atom, such as `404` or `:not_found`. The list of allowed atoms is available + in `Plug.Conn.Status`. If an error is raised, a 3-tuple of the wrapped + response is returned matching the status, headers, and body of the response: + + {404, [{"content-type", "text/html"} | _], "Page not found"} + + ## Examples + + assert_error_sent :not_found, fn -> + get(build_conn(), "/users/not-found") + end + + response = assert_error_sent 404, fn -> + get(build_conn(), "/users/not-found") + end + assert {404, [_h | _t], "Page not found"} = response + """ + @spec assert_error_sent(integer | atom, function) :: {integer, list, term} + def assert_error_sent(status_int_or_atom, func) do + expected_status = Plug.Conn.Status.code(status_int_or_atom) + discard_previously_sent() + result = + func + |> wrap_request() + |> receive_response(expected_status) + + discard_previously_sent() + result + end + + defp receive_response({:ok, conn}, expected_status) do + if conn.state == :sent do + flunk "expected error to be sent as #{expected_status} status, but response sent #{conn.status} without error" + else + flunk "expected error to be sent as #{expected_status} status, but no error happened" + end + end + defp receive_response({:error, {_kind, exception, stack}}, expected_status) do + receive do + {ref, {^expected_status, headers, body}} when is_reference(ref) -> + {expected_status, headers, body} + + {ref, {sent_status, _headers, _body}} when is_reference(ref) -> + reraise ExUnit.AssertionError.exception(""" + expected error to be sent as #{expected_status} status, but got #{sent_status} from: + + #{Exception.format_banner(:error, exception)} + """), stack + + after 0 -> + reraise ExUnit.AssertionError.exception(""" + expected error to be sent as #{expected_status} status, but got an error with no response from: + + #{Exception.format_banner(:error, exception)} + """), stack + end + end + + defp discard_previously_sent() do + receive do + {ref, {_, _, _}} when is_reference(ref) -> discard_previously_sent() + {:plug_conn, :sent} -> discard_previously_sent() + after + 0 -> :ok + end + end + + defp wrap_request(func) do + try do + {:ok, func.()} + catch + kind, error -> {:error, {kind, error, __STACKTRACE__}} + end + end +end diff --git a/deps/phoenix/lib/phoenix/token.ex b/deps/phoenix/lib/phoenix/token.ex new file mode 100644 index 0000000..f6cf12c --- /dev/null +++ b/deps/phoenix/lib/phoenix/token.ex @@ -0,0 +1,246 @@ +defmodule Phoenix.Token do + @moduledoc """ + Tokens provide a way to generate and verify bearer + tokens for use in Channels or API authentication. + + The data stored in the token is signed to prevent tampering + but not encrypted. This means it is safe to store identification + information (such as user IDs) but should not be used to store + confidential information (such as credit card numbers). + + ## Example + + When generating a unique token for use in an API or Channel + it is advised to use a unique identifier for the user, typically + the id from a database. For example: + + iex> user_id = 1 + iex> token = Phoenix.Token.sign(MyAppWeb.Endpoint, "user auth", user_id) + iex> Phoenix.Token.verify(MyAppWeb.Endpoint, "user auth", token, max_age: 86400) + {:ok, 1} + + In that example we have a user's id, we generate a token and + verify it using the secret key base configured in the given + `endpoint`. We guarantee the token will only be valid for one day + by setting a max age (recommended). + + The first argument to both `sign/4` and `verify/4` can be one of: + + * the module name of a Phoenix endpoint (shown above) - where + the secret key base is extracted from the endpoint + * `Plug.Conn` - where the secret key base is extracted from the + endpoint stored in the connection + * `Phoenix.Socket` - where the secret key base is extracted from + the endpoint stored in the socket + * a string, representing the secret key base itself. A key base + with at least 20 randomly generated characters should be used + to provide adequate entropy + + The second argument is a [cryptographic salt](https://en.wikipedia.org/wiki/Salt_(cryptography)) + which must be the same in both calls to `sign/4` and `verify/4`. + For instance, it may be called "user auth" and treated as namespace + when generating a token that will be used to authenticate users on + channels or on your APIs. + + The third argument can be any term (string, int, list, etc.) + that you wish to codify into the token. Upon valid verification, + this same term will be extracted from the token. + + ## Usage + + Once a token is signed, we can send it to the client in multiple ways. + + One is via the meta tag: + + <%= tag :meta, name: "channel_token", + content: Phoenix.Token.sign(@conn, "user auth", @current_user.id) %> + + Or an endpoint that returns it: + + def create(conn, params) do + user = User.create(params) + render(conn, "user.json", + %{token: Phoenix.Token.sign(conn, "user auth", user.id), user: user}) + end + + Once the token is sent, the client may now send it back to the server + as an authentication mechanism. For example, we can use it to authenticate + a user on a Phoenix channel: + + defmodule MyApp.UserSocket do + use Phoenix.Socket + + def connect(%{"token" => token}, socket, _connect_info) do + case Phoenix.Token.verify(socket, "user auth", token, max_age: 86400) do + {:ok, user_id} -> + socket = assign(socket, :user, Repo.get!(User, user_id)) + {:ok, socket} + {:error, _} -> + :error + end + end + + def connect(_params, _socket, _connect_info), do: :error + end + + In this example, the phoenix.js client will send the token in the + `connect` command which is then validated by the server. + + `Phoenix.Token` can also be used for validating APIs, handling + password resets, e-mail confirmation and more. + """ + + require Logger + + @doc """ + Encodes and signs data into a token you can send to clients. + + ## Options + + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + * `:signed_at` - set the timestamp of the token in seconds. + Defaults to `System.system_time(:second)` + * `:max_age` - the default maximum age of the token. Defaults to + 86400 seconds (1 day) and it may be overridden on verify/4. + + """ + def sign(context, salt, data, opts \\ []) when is_binary(salt) do + context + |> get_key_base() + |> Plug.Crypto.sign(salt, data, opts) + end + + @doc """ + Encodes, encrypts, and signs data into a token you can send to clients. + + ## Options + + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + * `:signed_at` - set the timestamp of the token in seconds. + Defaults to `System.system_time(:second)` + + """ + def encrypt(context, secret, data, opts \\ []) when is_binary(secret) do + context + |> get_key_base() + |> Plug.Crypto.encrypt(secret, data, opts) + end + + @doc """ + Decodes the original data from the token and verifies its integrity. + + ## Examples + + In this scenario we will create a token, sign it, then provide it to a client + application. The client will then use this token to authenticate requests for + resources from the server. See `Phoenix.Token` summary for more info about + creating tokens. + + iex> user_id = 99 + iex> secret = "kjoy3o1zeidquwy1398juxzldjlksahdk3" + iex> namespace = "user auth" + iex> token = Phoenix.Token.sign(secret, namespace, user_id) + + The mechanism for passing the token to the client is typically through a + cookie, a JSON response body, or HTTP header. For now, assume the client has + received a token it can use to validate requests for protected resources. + + When the server receives a request, it can use `verify/4` to determine if it + should provide the requested resources to the client: + + iex> Phoenix.Token.verify(secret, namespace, token, max_age: 86400) + {:ok, 99} + + In this example, we know the client sent a valid token because `verify/4` + returned a tuple of type `{:ok, user_id}`. The server can now proceed with + the request. + + However, if the client had sent an expired token, an invalid token, or `nil`, + `verify/4` would have returned an error instead: + + iex> Phoenix.Token.verify(secret, namespace, expired, max_age: 86400) + {:error, :expired} + + iex> Phoenix.Token.verify(secret, namespace, invalid, max_age: 86400) + {:error, :invalid} + + iex> Phoenix.Token.verify(secret, namespace, nil, max_age: 86400) + {:error, :missing} + + ## Options + + * `:max_age` - verifies the token only if it has been generated + "max age" ago in seconds. A reasonable value is 1 day (86400 + seconds) + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + + """ + def verify(context, salt, token, opts \\ []) when is_binary(salt) do + context + |> get_key_base() + |> Plug.Crypto.verify(salt, token, opts) + end + + @doc """ + Decrypts the original data from the token and verifies its integrity. + + ## Options + + * `:max_age` - verifies the token only if it has been generated + "max age" ago in seconds. Defaults to the max age signed in the + token (86400) + * `:key_iterations` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 1000 + * `:key_length` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to 32 + * `:key_digest` - option passed to `Plug.Crypto.KeyGenerator` + when generating the encryption and signing keys. Defaults to `:sha256` + + """ + def decrypt(context, secret, token, opts \\ []) when is_binary(secret) do + context + |> get_key_base() + |> Plug.Crypto.decrypt(secret, token, opts) + end + + ## Helpers + + defp get_key_base(%Plug.Conn{} = conn), + do: conn |> Phoenix.Controller.endpoint_module() |> get_endpoint_key_base() + + defp get_key_base(%Phoenix.Socket{} = socket), + do: get_endpoint_key_base(socket.endpoint) + + defp get_key_base(endpoint) when is_atom(endpoint), + do: get_endpoint_key_base(endpoint) + + defp get_key_base(string) when is_binary(string) and byte_size(string) >= 20, + do: string + + defp get_endpoint_key_base(endpoint) do + endpoint.config(:secret_key_base) || + raise """ + no :secret_key_base configuration found in #{inspect(endpoint)}. + Ensure your environment has the necessary mix configuration. For example: + + config :my_app, MyAppWeb.Endpoint, + secret_key_base: ... + + """ + end +end diff --git a/deps/phoenix/lib/phoenix/transports/long_poll.ex b/deps/phoenix/lib/phoenix/transports/long_poll.ex new file mode 100644 index 0000000..4e0c186 --- /dev/null +++ b/deps/phoenix/lib/phoenix/transports/long_poll.ex @@ -0,0 +1,210 @@ +defmodule Phoenix.Transports.LongPoll do + @moduledoc false + @behaviour Plug + + import Plug.Conn + alias Phoenix.Socket.{V1, V2, Transport} + + def default_config() do + [ + window_ms: 10_000, + path: "/longpoll", + pubsub_timeout_ms: 2_000, + serializer: [{V1.JSONSerializer, "~> 1.0.0"}, {V2.JSONSerializer, "~> 2.0.0"}], + transport_log: false, + crypto: [max_age: 1_209_600] + ] + end + + def init(opts), do: opts + + def call(conn, {endpoint, handler, opts}) do + conn + |> fetch_query_params() + |> put_resp_header("access-control-allow-origin", "*") + |> Transport.code_reload(endpoint, opts) + |> Transport.transport_log(opts[:transport_log]) + |> Transport.force_ssl(handler, endpoint, opts) + |> Transport.check_origin(handler, endpoint, opts, &status_json/1) + |> dispatch(endpoint, handler, opts) + end + + defp dispatch(%{halted: true} = conn, _, _, _) do + conn + end + + # Responds to pre-flight CORS requests with Allow-Origin-* headers. + # We allow cross-origin requests as we always validate the Origin header. + defp dispatch(%{method: "OPTIONS"} = conn, _, _, _) do + headers = get_req_header(conn, "access-control-request-headers") |> Enum.join(", ") + + conn + |> put_resp_header("access-control-allow-headers", headers) + |> put_resp_header("access-control-allow-methods", "get, post, options") + |> put_resp_header("access-control-max-age", "3600") + |> send_resp(:ok, "") + end + + # Starts a new session or listen to a message if one already exists. + defp dispatch(%{method: "GET"} = conn, endpoint, handler, opts) do + case resume_session(conn.params, endpoint, opts) do + {:ok, server_ref} -> + listen(conn, server_ref, endpoint, opts) + :error -> + new_session(conn, endpoint, handler, opts) + end + end + + # Publish the message. + defp dispatch(%{method: "POST"} = conn, endpoint, _, opts) do + case resume_session(conn.params, endpoint, opts) do + {:ok, server_ref} -> + publish(conn, server_ref, endpoint, opts) + :error -> + conn |> put_status(:gone) |> status_json() + end + end + + # All other requests should fail. + defp dispatch(conn, _, _, _) do + send_resp(conn, :bad_request, "") + end + + defp publish(conn, server_ref, endpoint, opts) do + case read_body(conn, []) do + {:ok, body, conn} -> + status = transport_dispatch(endpoint, server_ref, body, opts) + conn |> put_status(status) |> status_json() + + _ -> + raise Plug.BadRequestError + end + end + + defp transport_dispatch(endpoint, server_ref, body, opts) do + ref = make_ref() + broadcast_from!(endpoint, server_ref, {:dispatch, client_ref(server_ref), body, ref}) + + receive do + {:ok, ^ref} -> :ok + {:error, ^ref} -> :ok + after + opts[:window_ms] -> :request_timeout + end + end + + ## Session handling + + defp new_session(conn, endpoint, handler, opts) do + priv_topic = + "phx:lp:" + <> Base.encode64(:crypto.strong_rand_bytes(16)) + <> (System.system_time(:millisecond) |> Integer.to_string) + + keys = Keyword.get(opts, :connect_info, []) + connect_info = Transport.connect_info(conn, endpoint, keys) + arg = {endpoint, handler, opts, conn.params, priv_topic, connect_info} + spec = {Phoenix.Transports.LongPoll.Server, arg} + + case DynamicSupervisor.start_child(Phoenix.Transports.LongPoll.Supervisor, spec) do + :ignore -> + conn |> put_status(:forbidden) |> status_json() + + {:ok, server_pid} -> + data = {:v1, endpoint.config(:endpoint_id), server_pid, priv_topic} + token = sign_token(endpoint, data, opts) + conn |> put_status(:gone) |> status_token_messages_json(token, []) + end + end + + defp listen(conn, server_ref, endpoint, opts) do + ref = make_ref() + broadcast_from!(endpoint, server_ref, {:flush, client_ref(server_ref), ref}) + + {status, messages} = + receive do + {:messages, messages, ^ref} -> + {:ok, messages} + + {:now_available, ^ref} -> + broadcast_from!(endpoint, server_ref, {:flush, client_ref(server_ref), ref}) + receive do + {:messages, messages, ^ref} -> {:ok, messages} + after + opts[:window_ms] -> {:no_content, []} + end + after + opts[:window_ms] -> + {:no_content, []} + end + + conn + |> put_status(status) + |> status_token_messages_json(conn.params["token"], messages) + end + + # Retrieves the serialized `Phoenix.LongPoll.Server` pid + # by publishing a message in the encrypted private topic. + defp resume_session(%{"token" => token}, endpoint, opts) do + case verify_token(endpoint, token, opts) do + {:ok, {:v1, id, pid, priv_topic}} -> + server_ref = server_ref(endpoint.config(:endpoint_id), id, pid, priv_topic) + + ref = make_ref() + :ok = subscribe(endpoint, server_ref) + broadcast_from!(endpoint, server_ref, {:subscribe, client_ref(server_ref), ref}) + + receive do + {:subscribe, ^ref} -> {:ok, server_ref} + after + opts[:pubsub_timeout_ms] -> :error + end + + _ -> + :error + end + end + + defp resume_session(_params, _endpoint, _opts), do: :error + + ## Helpers + + defp server_ref(endpoint_id, id, pid, topic) do + if endpoint_id == id and Process.alive?(pid), do: pid, else: topic + end + + defp client_ref(topic) when is_binary(topic), do: topic + defp client_ref(pid) when is_pid(pid), do: self() + + defp subscribe(endpoint, topic) when is_binary(topic), + do: Phoenix.PubSub.subscribe(endpoint.config(:pubsub_server), topic, link: true) + defp subscribe(_endpoint, pid) when is_pid(pid), + do: :ok + + defp broadcast_from!(endpoint, topic, msg) when is_binary(topic), + do: Phoenix.PubSub.broadcast_from!(endpoint.config(:pubsub_server), self(), topic, msg) + defp broadcast_from!(_endpoint, pid, msg) when is_pid(pid), + do: send(pid, msg) + + defp sign_token(endpoint, data, opts) do + Phoenix.Token.sign(endpoint, Atom.to_string(endpoint.config(:pubsub_server)), data, opts[:crypto]) + end + + defp verify_token(endpoint, signed, opts) do + Phoenix.Token.verify(endpoint, Atom.to_string(endpoint.config(:pubsub_server)), signed, opts[:crypto]) + end + + defp status_json(conn) do + send_json(conn, %{"status" => conn.status || 200}) + end + + defp status_token_messages_json(conn, token, messages) do + send_json(conn, %{"status" => conn.status || 200, "token" => token, "messages" => messages}) + end + + defp send_json(conn, data) do + conn + |> put_resp_header("content-type", "application/json; charset=utf-8") + |> send_resp(200, Phoenix.json_library().encode_to_iodata!(data)) + end +end diff --git a/deps/phoenix/lib/phoenix/transports/long_poll_server.ex b/deps/phoenix/lib/phoenix/transports/long_poll_server.ex new file mode 100644 index 0000000..6803b88 --- /dev/null +++ b/deps/phoenix/lib/phoenix/transports/long_poll_server.ex @@ -0,0 +1,145 @@ +defmodule Phoenix.Transports.LongPoll.Server do + @moduledoc false + + use GenServer, restart: :temporary + alias Phoenix.PubSub + + def start_link(arg) do + GenServer.start_link(__MODULE__, arg) + end + + def init({endpoint, handler, options, params, priv_topic, connect_info}) do + config = %{ + endpoint: endpoint, + transport: :longpoll, + options: options, + params: params, + connect_info: connect_info + } + window_ms = Keyword.fetch!(options, :window_ms) + + case handler.connect(config) do + {:ok, handler_state} -> + {:ok, handler_state} = handler.init(handler_state) + + state = %{ + buffer: [], + handler: {handler, handler_state}, + window_ms: trunc(window_ms * 1.5), + pubsub_server: endpoint.config(:pubsub_server), + priv_topic: priv_topic, + last_client_poll: now_ms(), + client_ref: nil + } + + :ok = PubSub.subscribe(state.pubsub_server, priv_topic, link: true) + schedule_inactive_shutdown(state.window_ms) + {:ok, state} + + :error -> + :ignore + + {:error, _reason} -> + :ignore + end + end + + def handle_info({:dispatch, client_ref, body, ref}, state) do + %{handler: {handler, handler_state}} = state + + case handler.handle_in({body, opcode: :text}, handler_state) do + {:reply, status, {_, reply}, handler_state} -> + state = %{state | handler: {handler, handler_state}} + status = if status == :ok, do: :ok, else: :error + broadcast_from!(state, client_ref, {status, ref}) + publish_reply(state, reply) + + {:ok, handler_state} -> + state = %{state | handler: {handler, handler_state}} + broadcast_from!(state, client_ref, {:ok, ref}) + {:noreply, state} + + {:stop, reason, handler_state} -> + state = %{state | handler: {handler, handler_state}} + broadcast_from!(state, client_ref, {:error, ref}) + {:stop, reason, state} + end + end + + def handle_info({:subscribe, client_ref, ref}, state) do + broadcast_from!(state, client_ref, {:subscribe, ref}) + {:noreply, state} + end + + def handle_info({:flush, client_ref, ref}, state) do + case state.buffer do + [] -> + {:noreply, %{state | client_ref: {client_ref, ref}, last_client_poll: now_ms()}} + buffer -> + broadcast_from!(state, client_ref, {:messages, Enum.reverse(buffer), ref}) + {:noreply, %{state | client_ref: nil, last_client_poll: now_ms(), buffer: []}} + end + end + + def handle_info(:shutdown_if_inactive, state) do + if now_ms() - state.last_client_poll > state.window_ms do + {:stop, {:shutdown, :inactive}, state} + else + schedule_inactive_shutdown(state.window_ms) + {:noreply, state} + end + end + + def handle_info(message, state) do + %{handler: {handler, handler_state}} = state + + case handler.handle_info(message, handler_state) do + {:push, {_, reply}, handler_state} -> + state = %{state | handler: {handler, handler_state}} + publish_reply(state, reply) + + {:ok, handler_state} -> + state = %{state | handler: {handler, handler_state}} + {:noreply, state} + + {:stop, reason, handler_state} -> + state = %{state | handler: {handler, handler_state}} + {:stop, reason, state} + end + end + + def terminate(reason, state) do + %{handler: {handler, handler_state}} = state + handler.terminate(reason, handler_state) + :ok + end + + defp broadcast_from!(state, client_ref, msg) when is_binary(client_ref), + do: PubSub.broadcast_from!(state.pubsub_server, self(), client_ref, msg) + defp broadcast_from!(_state, client_ref, msg) when is_pid(client_ref), + do: send(client_ref, msg) + + defp publish_reply(state, reply) when is_map(reply) do + IO.warn "Returning a map from the LongPolling serializer is deprecated. " <> + "Please return JSON encoded data instead (see Phoenix.Socket.Serializer)" + publish_reply(state, Phoenix.json_library().encode_to_iodata!(reply)) + end + + defp publish_reply(state, reply) do + notify_client_now_available(state) + {:noreply, update_in(state.buffer, &[IO.iodata_to_binary(reply) | &1])} + end + + defp notify_client_now_available(state) do + case state.client_ref do + {client_ref, ref} -> broadcast_from!(state, client_ref, {:now_available, ref}) + nil -> :ok + end + end + + defp now_ms, do: System.system_time(:millisecond) + + defp schedule_inactive_shutdown(window_ms) do + Process.send_after(self(), :shutdown_if_inactive, window_ms) + end +end diff --git a/deps/phoenix/lib/phoenix/transports/websocket.ex b/deps/phoenix/lib/phoenix/transports/websocket.ex new file mode 100644 index 0000000..b08517c --- /dev/null +++ b/deps/phoenix/lib/phoenix/transports/websocket.ex @@ -0,0 +1,48 @@ +defmodule Phoenix.Transports.WebSocket do + @moduledoc false + alias Phoenix.Socket.{V1, V2, Transport} + + def default_config() do + [ + path: "/websocket", + serializer: [{V1.JSONSerializer, "~> 1.0.0"}, {V2.JSONSerializer, "~> 2.0.0"}], + error_handler: {__MODULE__, :handle_error, []}, + timeout: 60_000, + transport_log: false, + compress: false + ] + end + + def connect(%{method: "GET"} = conn, endpoint, handler, opts) do + conn + |> Plug.Conn.fetch_query_params() + |> Transport.code_reload(endpoint, opts) + |> Transport.transport_log(opts[:transport_log]) + |> Transport.force_ssl(handler, endpoint, opts) + |> Transport.check_origin(handler, endpoint, opts) + |> Transport.check_subprotocols(opts[:subprotocols]) + |> case do + %{halted: true} = conn -> + {:error, conn} + + %{params: params} = conn -> + keys = Keyword.get(opts, :connect_info, []) + connect_info = Transport.connect_info(conn, endpoint, keys) + config = %{endpoint: endpoint, transport: :websocket, options: opts, params: params, connect_info: connect_info} + + case handler.connect(config) do + {:ok, state} -> {:ok, conn, state} + :error -> {:error, Plug.Conn.send_resp(conn, 403, "")} + {:error, reason} -> + {m, f, args} = opts[:error_handler] + {:error, apply(m, f, [conn, reason | args])} + end + end + end + + def connect(conn, _, _, _) do + {:error, Plug.Conn.send_resp(conn, 400, "")} + end + + def handle_error(conn, _reason), do: Plug.Conn.send_resp(conn, 403, "") +end diff --git a/deps/phoenix/mix.exs b/deps/phoenix/mix.exs new file mode 100644 index 0000000..560a757 --- /dev/null +++ b/deps/phoenix/mix.exs @@ -0,0 +1,220 @@ +defmodule Phoenix.MixProject do + use Mix.Project + + if Mix.env() != :prod do + for path <- :code.get_path(), + Regex.match?(~r/phx_new\-\d+\.\d+\.\d.*\/ebin$/, List.to_string(path)) do + Code.delete_path(path) + end + end + + @version "1.6.12" + @scm_url "https://github.com/phoenixframework/phoenix" + + # If the elixir requirement is updated, we need to make the installer + # use at least the minimum requirement used here. Although often the + # installer is ahead of Phoenix itself. + @elixir_requirement "~> 1.9" + + def project do + [ + app: :phoenix, + version: @version, + elixir: @elixir_requirement, + deps: deps(), + package: package(), + preferred_cli_env: [docs: :docs], + consolidate_protocols: Mix.env() != :test, + xref: [ + exclude: [ + {IEx, :started?, 0}, + Ecto.Type, + :ranch, + :cowboy_req, + Plug.Cowboy.Conn, + Plug.Cowboy + ] + ], + elixirc_paths: elixirc_paths(Mix.env()), + name: "Phoenix", + docs: docs(), + aliases: aliases(), + source_url: @scm_url, + homepage_url: "https://www.phoenixframework.org", + description: """ + Productive. Reliable. Fast. A productive web framework that + does not compromise speed or maintainability. + """ + ] + end + + defp elixirc_paths(:docs), do: ["lib", "installer/lib"] + defp elixirc_paths(_), do: ["lib"] + + def application do + [ + mod: {Phoenix, []}, + extra_applications: [:logger, :eex, :crypto, :public_key], + env: [ + logger: true, + stacktrace_depth: nil, + filter_parameters: ["password"], + serve_endpoints: false, + gzippable_exts: ~w(.js .map .css .txt .text .html .json .svg .eot .ttf), + static_compressors: [Phoenix.Digester.Gzip] + ] + ] + end + + defp deps do + [ + {:plug, "~> 1.10"}, + {:plug_crypto, "~> 1.2"}, + {:telemetry, "~> 0.4 or ~> 1.0"}, + {:phoenix_pubsub, "~> 2.0"}, + {:phoenix_view, "~> 1.0"}, + + # Optional deps + {:plug_cowboy, "~> 2.2", optional: true}, + {:jason, "~> 1.0", optional: true}, + + # Docs dependencies (some for cross references) + {:ex_doc, "~> 0.24", only: :docs}, + {:ecto, "~> 3.0", only: :docs}, + {:ecto_sql, "~> 3.6", only: :docs}, + {:gettext, "~> 0.18", only: :docs}, + {:telemetry_poller, "~> 1.0", only: :docs}, + {:telemetry_metrics, "~> 0.6", only: :docs}, + {:makeup_eex, ">= 0.1.1", only: :docs}, + {:makeup_elixir, "~> 0.16", only: :docs}, + + # Test dependencies + {:phoenix_html, "~> 3.0", only: [:docs, :test]}, + {:phx_new, path: "./installer", only: :test}, + {:mint, "~> 1.4", only: :test}, + {:mint_web_socket, "~> 0.3.0", only: :test}, + + # Dev dependencies + {:esbuild, "~> 0.4", only: :dev} + ] + end + + defp package do + [ + maintainers: ["Chris McCord", "Josรฉ Valim", "Gary Rennie", "Jason Stiebs"], + licenses: ["MIT"], + links: %{"GitHub" => @scm_url}, + files: + ~w(assets/js lib priv CHANGELOG.md LICENSE.md mix.exs package.json README.md .formatter.exs) + ] + end + + defp docs do + [ + source_ref: "v#{@version}", + main: "overview", + logo: "logo.png", + extra_section: "GUIDES", + assets: "guides/assets", + formatters: ["html", "epub"], + groups_for_modules: groups_for_modules(), + extras: extras(), + groups_for_extras: groups_for_extras(), + skip_undefined_reference_warnings_on: ["CHANGELOG.md"] + ] + end + + defp extras do + [ + "guides/introduction/overview.md", + "guides/introduction/installation.md", + "guides/introduction/up_and_running.md", + "guides/introduction/community.md", + "guides/directory_structure.md", + "guides/request_lifecycle.md", + "guides/plug.md", + "guides/routing.md", + "guides/controllers.md", + "guides/views.md", + "guides/ecto.md", + "guides/contexts.md", + "guides/mix_tasks.md", + "guides/telemetry.md", + "guides/asset_management.md", + "guides/authentication/mix_phx_gen_auth.md", + "guides/real_time/channels.md", + "guides/real_time/presence.md", + "guides/testing/testing.md", + "guides/testing/testing_contexts.md", + "guides/testing/testing_controllers.md", + "guides/testing/testing_channels.md", + "guides/deployment/deployment.md", + "guides/deployment/releases.md", + "guides/deployment/gigalixir.md", + "guides/deployment/fly.md", + "guides/deployment/heroku.md", + "guides/howto/custom_error_pages.md", + "guides/howto/using_ssl.md", + "CHANGELOG.md" + ] + end + + defp groups_for_extras do + [ + Introduction: ~r/guides\/introduction\/.?/, + Guides: ~r/guides\/[^\/]+\.md/, + Authentication: ~r/guides\/authentication\/.?/, + "Real-time": ~r/guides\/real_time\/.?/, + Testing: ~r/guides\/testing\/.?/, + Deployment: ~r/guides\/deployment\/.?/, + "How-to's": ~r/guides\/howto\/.?/ + ] + end + + defp groups_for_modules do + # Ungrouped Modules: + # + # Phoenix + # Phoenix.Channel + # Phoenix.Controller + # Phoenix.Endpoint + # Phoenix.Naming + # Phoenix.Logger + # Phoenix.Param + # Phoenix.Presence + # Phoenix.Router + # Phoenix.Token + + [ + Testing: [ + Phoenix.ChannelTest, + Phoenix.ConnTest + ], + "Adapters and Plugs": [ + Phoenix.CodeReloader, + Phoenix.Endpoint.Cowboy2Adapter + ], + "Socket and Transport": [ + Phoenix.Socket, + Phoenix.Socket.Broadcast, + Phoenix.Socket.Message, + Phoenix.Socket.Reply, + Phoenix.Socket.Serializer, + Phoenix.Socket.Transport + ] + ] + end + + defp aliases do + [ + docs: ["docs", &generate_js_docs/1], + "assets.build": ["esbuild module", "esbuild cdn", "esbuild cdn_min", "esbuild main"], + "assets.watch": "esbuild module --watch" + ] + end + + def generate_js_docs(_) do + Mix.Task.run("app.start") + System.cmd("npm", ["run", "docs"], cd: "assets") + end +end diff --git a/deps/phoenix/package.json b/deps/phoenix/package.json new file mode 100644 index 0000000..734b1e4 --- /dev/null +++ b/deps/phoenix/package.json @@ -0,0 +1,26 @@ +{ + "name": "phoenix", + "version": "1.6.12", + "description": "The official JavaScript client for the Phoenix web framework.", + "license": "MIT", + "module": "./priv/static/phoenix.mjs", + "main": "./priv/static/phoenix.cjs.js", + "unpkg": "./priv/static/phoenix.min.js", + "jsdelivr": "./priv/static/phoenix.min.js", + "exports": { + "import": "./priv/static/phoenix.mjs", + "require": "./priv/static/phoenix.cjs.js" + }, + "repository": { + "type": "git", + "url": "git://github.com/phoenixframework/phoenix.git" + }, + "author": "Chris McCord (https://www.phoenixframework.org)", + "files": [ + "README.md", + "LICENSE.md", + "package.json", + "priv/static/*", + "assets/js/phoenix/*" + ] +} diff --git a/deps/phoenix/priv/static/favicon.ico b/deps/phoenix/priv/static/favicon.ico new file mode 100644 index 0000000000000000000000000000000000000000..73de524aaadcf60fbe9d32881db0aa86b58b5cb9 GIT binary patch literal 1258 zcmbtUO>fgM7{=qN=;Mz_82;lvPEdVaxv-<-&=sZLwab?3I zBP>U*&(Hv<5n@9ZQ$vhg#|u$Zmtq8BV;+W*7(?jOx-{r?#TE&$Sdq77MbdJjD5`-q zMm_z(jLv3t>5NhzK{%aG(Yudfpjd3AFdKe2U7&zdepTe>^s(@!&0X8TJ`h+-I?84Ml# literal 0 HcmV?d00001 diff --git a/deps/phoenix/priv/static/phoenix.cjs.js b/deps/phoenix/priv/static/phoenix.cjs.js new file mode 100644 index 0000000..0ba2027 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.cjs.js @@ -0,0 +1,1144 @@ +var __defProp = Object.defineProperty; +var __getOwnPropDesc = Object.getOwnPropertyDescriptor; +var __getOwnPropNames = Object.getOwnPropertyNames; +var __hasOwnProp = Object.prototype.hasOwnProperty; +var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); +}; +var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; +}; +var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); + +// js/phoenix/index.js +var phoenix_exports = {}; +__export(phoenix_exports, { + Channel: () => Channel, + LongPoll: () => LongPoll, + Presence: () => Presence, + Serializer: () => serializer_default, + Socket: () => Socket +}); +module.exports = __toCommonJS(phoenix_exports); + +// js/phoenix/utils.js +var closure = (value) => { + if (typeof value === "function") { + return value; + } else { + let closure2 = function() { + return value; + }; + return closure2; + } +}; + +// js/phoenix/constants.js +var globalSelf = typeof self !== "undefined" ? self : null; +var phxWindow = typeof window !== "undefined" ? window : null; +var global = globalSelf || phxWindow || global; +var DEFAULT_VSN = "2.0.0"; +var SOCKET_STATES = { connecting: 0, open: 1, closing: 2, closed: 3 }; +var DEFAULT_TIMEOUT = 1e4; +var WS_CLOSE_NORMAL = 1e3; +var CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving" +}; +var CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" +}; +var TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" +}; +var XHR_STATES = { + complete: 4 +}; + +// js/phoenix/push.js +var Push = class { + constructor(channel, event, payload, timeout) { + this.channel = channel; + this.event = event; + this.payload = payload || function() { + return {}; + }; + this.receivedResp = null; + this.timeout = timeout; + this.timeoutTimer = null; + this.recHooks = []; + this.sent = false; + } + resend(timeout) { + this.timeout = timeout; + this.reset(); + this.send(); + } + send() { + if (this.hasReceived("timeout")) { + return; + } + this.startTimeout(); + this.sent = true; + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }); + } + receive(status, callback) { + if (this.hasReceived(status)) { + callback(this.receivedResp.response); + } + this.recHooks.push({ status, callback }); + return this; + } + reset() { + this.cancelRefEvent(); + this.ref = null; + this.refEvent = null; + this.receivedResp = null; + this.sent = false; + } + matchReceive({ status, response, _ref }) { + this.recHooks.filter((h) => h.status === status).forEach((h) => h.callback(response)); + } + cancelRefEvent() { + if (!this.refEvent) { + return; + } + this.channel.off(this.refEvent); + } + cancelTimeout() { + clearTimeout(this.timeoutTimer); + this.timeoutTimer = null; + } + startTimeout() { + if (this.timeoutTimer) { + this.cancelTimeout(); + } + this.ref = this.channel.socket.makeRef(); + this.refEvent = this.channel.replyEventName(this.ref); + this.channel.on(this.refEvent, (payload) => { + this.cancelRefEvent(); + this.cancelTimeout(); + this.receivedResp = payload; + this.matchReceive(payload); + }); + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}); + }, this.timeout); + } + hasReceived(status) { + return this.receivedResp && this.receivedResp.status === status; + } + trigger(status, response) { + this.channel.trigger(this.refEvent, { status, response }); + } +}; + +// js/phoenix/timer.js +var Timer = class { + constructor(callback, timerCalc) { + this.callback = callback; + this.timerCalc = timerCalc; + this.timer = null; + this.tries = 0; + } + reset() { + this.tries = 0; + clearTimeout(this.timer); + } + scheduleTimeout() { + clearTimeout(this.timer); + this.timer = setTimeout(() => { + this.tries = this.tries + 1; + this.callback(); + }, this.timerCalc(this.tries + 1)); + } +}; + +// js/phoenix/channel.js +var Channel = class { + constructor(topic, params, socket) { + this.state = CHANNEL_STATES.closed; + this.topic = topic; + this.params = closure(params || {}); + this.socket = socket; + this.bindings = []; + this.bindingRef = 0; + this.timeout = this.socket.timeout; + this.joinedOnce = false; + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout); + this.pushBuffer = []; + this.stateChangeRefs = []; + this.rejoinTimer = new Timer(() => { + if (this.socket.isConnected()) { + this.rejoin(); + } + }, this.socket.rejoinAfterMs); + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())); + this.stateChangeRefs.push(this.socket.onOpen(() => { + this.rejoinTimer.reset(); + if (this.isErrored()) { + this.rejoin(); + } + })); + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined; + this.rejoinTimer.reset(); + this.pushBuffer.forEach((pushEvent) => pushEvent.send()); + this.pushBuffer = []; + }); + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.onClose(() => { + this.rejoinTimer.reset(); + if (this.socket.hasLogger()) + this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`); + this.state = CHANNEL_STATES.closed; + this.socket.remove(this); + }); + this.onError((reason) => { + if (this.socket.hasLogger()) + this.socket.log("channel", `error ${this.topic}`, reason); + if (this.isJoining()) { + this.joinPush.reset(); + } + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.joinPush.receive("timeout", () => { + if (this.socket.hasLogger()) + this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout); + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout); + leavePush.send(); + this.state = CHANNEL_STATES.errored; + this.joinPush.reset(); + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload); + }); + } + join(timeout = this.timeout) { + if (this.joinedOnce) { + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance"); + } else { + this.timeout = timeout; + this.joinedOnce = true; + this.rejoin(); + return this.joinPush; + } + } + onClose(callback) { + this.on(CHANNEL_EVENTS.close, callback); + } + onError(callback) { + return this.on(CHANNEL_EVENTS.error, (reason) => callback(reason)); + } + on(event, callback) { + let ref = this.bindingRef++; + this.bindings.push({ event, ref, callback }); + return ref; + } + off(event, ref) { + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)); + }); + } + canPush() { + return this.socket.isConnected() && this.isJoined(); + } + push(event, payload, timeout = this.timeout) { + payload = payload || {}; + if (!this.joinedOnce) { + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`); + } + let pushEvent = new Push(this, event, function() { + return payload; + }, timeout); + if (this.canPush()) { + pushEvent.send(); + } else { + pushEvent.startTimeout(); + this.pushBuffer.push(pushEvent); + } + return pushEvent; + } + leave(timeout = this.timeout) { + this.rejoinTimer.reset(); + this.joinPush.cancelTimeout(); + this.state = CHANNEL_STATES.leaving; + let onClose = () => { + if (this.socket.hasLogger()) + this.socket.log("channel", `leave ${this.topic}`); + this.trigger(CHANNEL_EVENTS.close, "leave"); + }; + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout); + leavePush.receive("ok", () => onClose()).receive("timeout", () => onClose()); + leavePush.send(); + if (!this.canPush()) { + leavePush.trigger("ok", {}); + } + return leavePush; + } + onMessage(_event, payload, _ref) { + return payload; + } + isMember(topic, event, payload, joinRef) { + if (this.topic !== topic) { + return false; + } + if (joinRef && joinRef !== this.joinRef()) { + if (this.socket.hasLogger()) + this.socket.log("channel", "dropping outdated message", { topic, event, payload, joinRef }); + return false; + } else { + return true; + } + } + joinRef() { + return this.joinPush.ref; + } + rejoin(timeout = this.timeout) { + if (this.isLeaving()) { + return; + } + this.socket.leaveOpenTopic(this.topic); + this.state = CHANNEL_STATES.joining; + this.joinPush.resend(timeout); + } + trigger(event, payload, ref, joinRef) { + let handledPayload = this.onMessage(event, payload, ref, joinRef); + if (payload && !handledPayload) { + throw new Error("channel onMessage callbacks must return the payload, modified or unmodified"); + } + let eventBindings = this.bindings.filter((bind) => bind.event === event); + for (let i = 0; i < eventBindings.length; i++) { + let bind = eventBindings[i]; + bind.callback(handledPayload, ref, joinRef || this.joinRef()); + } + } + replyEventName(ref) { + return `chan_reply_${ref}`; + } + isClosed() { + return this.state === CHANNEL_STATES.closed; + } + isErrored() { + return this.state === CHANNEL_STATES.errored; + } + isJoined() { + return this.state === CHANNEL_STATES.joined; + } + isJoining() { + return this.state === CHANNEL_STATES.joining; + } + isLeaving() { + return this.state === CHANNEL_STATES.leaving; + } +}; + +// js/phoenix/ajax.js +var Ajax = class { + static request(method, endPoint, accept, body, timeout, ontimeout, callback) { + if (global.XDomainRequest) { + let req = new global.XDomainRequest(); + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback); + } else { + let req = new global.XMLHttpRequest(); + return this.xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback); + } + } + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) { + req.timeout = timeout; + req.open(method, endPoint); + req.onload = () => { + let response = this.parseJSON(req.responseText); + callback && callback(response); + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.onprogress = () => { + }; + req.send(body); + return req; + } + static xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback) { + req.open(method, endPoint, true); + req.timeout = timeout; + req.setRequestHeader("Content-Type", accept); + req.onerror = () => callback && callback(null); + req.onreadystatechange = () => { + if (req.readyState === XHR_STATES.complete && callback) { + let response = this.parseJSON(req.responseText); + callback(response); + } + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.send(body); + return req; + } + static parseJSON(resp) { + if (!resp || resp === "") { + return null; + } + try { + return JSON.parse(resp); + } catch (e) { + console && console.log("failed to parse JSON response", resp); + return null; + } + } + static serialize(obj, parentKey) { + let queryStr = []; + for (var key in obj) { + if (!Object.prototype.hasOwnProperty.call(obj, key)) { + continue; + } + let paramKey = parentKey ? `${parentKey}[${key}]` : key; + let paramVal = obj[key]; + if (typeof paramVal === "object") { + queryStr.push(this.serialize(paramVal, paramKey)); + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)); + } + } + return queryStr.join("&"); + } + static appendParams(url, params) { + if (Object.keys(params).length === 0) { + return url; + } + let prefix = url.match(/\?/) ? "&" : "?"; + return `${url}${prefix}${this.serialize(params)}`; + } +}; + +// js/phoenix/longpoll.js +var LongPoll = class { + constructor(endPoint) { + this.endPoint = null; + this.token = null; + this.skipHeartbeat = true; + this.reqs = /* @__PURE__ */ new Set(); + this.onopen = function() { + }; + this.onerror = function() { + }; + this.onmessage = function() { + }; + this.onclose = function() { + }; + this.pollEndpoint = this.normalizeEndpoint(endPoint); + this.readyState = SOCKET_STATES.connecting; + this.poll(); + } + normalizeEndpoint(endPoint) { + return endPoint.replace("ws://", "http://").replace("wss://", "https://").replace(new RegExp("(.*)/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll); + } + endpointURL() { + return Ajax.appendParams(this.pollEndpoint, { token: this.token }); + } + closeAndRetry(code, reason, wasClean) { + this.close(code, reason, wasClean); + this.readyState = SOCKET_STATES.connecting; + } + ontimeout() { + this.onerror("timeout"); + this.closeAndRetry(1005, "timeout", false); + } + isActive() { + return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting; + } + poll() { + this.ajax("GET", null, () => this.ontimeout(), (resp) => { + if (resp) { + var { status, token, messages } = resp; + this.token = token; + } else { + status = 0; + } + switch (status) { + case 200: + messages.forEach((msg) => { + setTimeout(() => this.onmessage({ data: msg }), 0); + }); + this.poll(); + break; + case 204: + this.poll(); + break; + case 410: + this.readyState = SOCKET_STATES.open; + this.onopen({}); + this.poll(); + break; + case 403: + this.onerror(403); + this.close(1008, "forbidden", false); + break; + case 0: + case 500: + this.onerror(500); + this.closeAndRetry(1011, "internal server error", 500); + break; + default: + throw new Error(`unhandled poll status ${status}`); + } + }); + } + send(body) { + this.ajax("POST", body, () => this.onerror("timeout"), (resp) => { + if (!resp || resp.status !== 200) { + this.onerror(resp && resp.status); + this.closeAndRetry(1011, "internal server error", false); + } + }); + } + close(code, reason, wasClean) { + for (let req of this.reqs) { + req.abort(); + } + this.readyState = SOCKET_STATES.closed; + let opts = Object.assign({ code: 1e3, reason: void 0, wasClean: true }, { code, reason, wasClean }); + if (typeof CloseEvent !== "undefined") { + this.onclose(new CloseEvent("close", opts)); + } else { + this.onclose(opts); + } + } + ajax(method, body, onCallerTimeout, callback) { + let req; + let ontimeout = () => { + this.reqs.delete(req); + onCallerTimeout(); + }; + req = Ajax.request(method, this.endpointURL(), "application/json", body, this.timeout, ontimeout, (resp) => { + this.reqs.delete(req); + if (this.isActive()) { + callback(resp); + } + }); + this.reqs.add(req); + } +}; + +// js/phoenix/presence.js +var Presence = class { + constructor(channel, opts = {}) { + let events = opts.events || { state: "presence_state", diff: "presence_diff" }; + this.state = {}; + this.pendingDiffs = []; + this.channel = channel; + this.joinRef = null; + this.caller = { + onJoin: function() { + }, + onLeave: function() { + }, + onSync: function() { + } + }; + this.channel.on(events.state, (newState) => { + let { onJoin, onLeave, onSync } = this.caller; + this.joinRef = this.channel.joinRef(); + this.state = Presence.syncState(this.state, newState, onJoin, onLeave); + this.pendingDiffs.forEach((diff) => { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave); + }); + this.pendingDiffs = []; + onSync(); + }); + this.channel.on(events.diff, (diff) => { + let { onJoin, onLeave, onSync } = this.caller; + if (this.inPendingSyncState()) { + this.pendingDiffs.push(diff); + } else { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave); + onSync(); + } + }); + } + onJoin(callback) { + this.caller.onJoin = callback; + } + onLeave(callback) { + this.caller.onLeave = callback; + } + onSync(callback) { + this.caller.onSync = callback; + } + list(by) { + return Presence.list(this.state, by); + } + inPendingSyncState() { + return !this.joinRef || this.joinRef !== this.channel.joinRef(); + } + static syncState(currentState, newState, onJoin, onLeave) { + let state = this.clone(currentState); + let joins = {}; + let leaves = {}; + this.map(state, (key, presence) => { + if (!newState[key]) { + leaves[key] = presence; + } + }); + this.map(newState, (key, newPresence) => { + let currentPresence = state[key]; + if (currentPresence) { + let newRefs = newPresence.metas.map((m) => m.phx_ref); + let curRefs = currentPresence.metas.map((m) => m.phx_ref); + let joinedMetas = newPresence.metas.filter((m) => curRefs.indexOf(m.phx_ref) < 0); + let leftMetas = currentPresence.metas.filter((m) => newRefs.indexOf(m.phx_ref) < 0); + if (joinedMetas.length > 0) { + joins[key] = newPresence; + joins[key].metas = joinedMetas; + } + if (leftMetas.length > 0) { + leaves[key] = this.clone(currentPresence); + leaves[key].metas = leftMetas; + } + } else { + joins[key] = newPresence; + } + }); + return this.syncDiff(state, { joins, leaves }, onJoin, onLeave); + } + static syncDiff(state, diff, onJoin, onLeave) { + let { joins, leaves } = this.clone(diff); + if (!onJoin) { + onJoin = function() { + }; + } + if (!onLeave) { + onLeave = function() { + }; + } + this.map(joins, (key, newPresence) => { + let currentPresence = state[key]; + state[key] = this.clone(newPresence); + if (currentPresence) { + let joinedRefs = state[key].metas.map((m) => m.phx_ref); + let curMetas = currentPresence.metas.filter((m) => joinedRefs.indexOf(m.phx_ref) < 0); + state[key].metas.unshift(...curMetas); + } + onJoin(key, currentPresence, newPresence); + }); + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key]; + if (!currentPresence) { + return; + } + let refsToRemove = leftPresence.metas.map((m) => m.phx_ref); + currentPresence.metas = currentPresence.metas.filter((p) => { + return refsToRemove.indexOf(p.phx_ref) < 0; + }); + onLeave(key, currentPresence, leftPresence); + if (currentPresence.metas.length === 0) { + delete state[key]; + } + }); + return state; + } + static list(presences, chooser) { + if (!chooser) { + chooser = function(key, pres) { + return pres; + }; + } + return this.map(presences, (key, presence) => { + return chooser(key, presence); + }); + } + static map(obj, func) { + return Object.getOwnPropertyNames(obj).map((key) => func(key, obj[key])); + } + static clone(obj) { + return JSON.parse(JSON.stringify(obj)); + } +}; + +// js/phoenix/serializer.js +var serializer_default = { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: { push: 0, reply: 1, broadcast: 2 }, + encode(msg, callback) { + if (msg.payload.constructor === ArrayBuffer) { + return callback(this.binaryEncode(msg)); + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]; + return callback(JSON.stringify(payload)); + } + }, + decode(rawPayload, callback) { + if (rawPayload.constructor === ArrayBuffer) { + return callback(this.binaryDecode(rawPayload)); + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload); + return callback({ join_ref, ref, topic, event, payload }); + } + }, + binaryEncode(message) { + let { join_ref, ref, event, topic, payload } = message; + let metaLength = this.META_LENGTH + join_ref.length + ref.length + topic.length + event.length; + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength); + let view = new DataView(header); + let offset = 0; + view.setUint8(offset++, this.KINDS.push); + view.setUint8(offset++, join_ref.length); + view.setUint8(offset++, ref.length); + view.setUint8(offset++, topic.length); + view.setUint8(offset++, event.length); + Array.from(join_ref, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(ref, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(topic, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(event, (char) => view.setUint8(offset++, char.charCodeAt(0))); + var combined = new Uint8Array(header.byteLength + payload.byteLength); + combined.set(new Uint8Array(header), 0); + combined.set(new Uint8Array(payload), header.byteLength); + return combined.buffer; + }, + binaryDecode(buffer) { + let view = new DataView(buffer); + let kind = view.getUint8(0); + let decoder = new TextDecoder(); + switch (kind) { + case this.KINDS.push: + return this.decodePush(buffer, view, decoder); + case this.KINDS.reply: + return this.decodeReply(buffer, view, decoder); + case this.KINDS.broadcast: + return this.decodeBroadcast(buffer, view, decoder); + } + }, + decodePush(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let topicSize = view.getUint8(2); + let eventSize = view.getUint8(3); + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: joinRef, ref: null, topic, event, payload: data }; + }, + decodeReply(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let refSize = view.getUint8(2); + let topicSize = view.getUint8(3); + let eventSize = view.getUint8(4); + let offset = this.HEADER_LENGTH + this.META_LENGTH; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let ref = decoder.decode(buffer.slice(offset, offset + refSize)); + offset = offset + refSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + let payload = { status: event, response: data }; + return { join_ref: joinRef, ref, topic, event: CHANNEL_EVENTS.reply, payload }; + }, + decodeBroadcast(buffer, view, decoder) { + let topicSize = view.getUint8(1); + let eventSize = view.getUint8(2); + let offset = this.HEADER_LENGTH + 2; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: null, ref: null, topic, event, payload: data }; + } +}; + +// js/phoenix/socket.js +var Socket = class { + constructor(endPoint, opts = {}) { + this.stateChangeCallbacks = { open: [], close: [], error: [], message: [] }; + this.channels = []; + this.sendBuffer = []; + this.ref = 0; + this.timeout = opts.timeout || DEFAULT_TIMEOUT; + this.transport = opts.transport || global.WebSocket || LongPoll; + this.establishedConnections = 0; + this.defaultEncoder = serializer_default.encode.bind(serializer_default); + this.defaultDecoder = serializer_default.decode.bind(serializer_default); + this.closeWasClean = false; + this.binaryType = opts.binaryType || "arraybuffer"; + this.connectClock = 1; + if (this.transport !== LongPoll) { + this.encode = opts.encode || this.defaultEncoder; + this.decode = opts.decode || this.defaultDecoder; + } else { + this.encode = this.defaultEncoder; + this.decode = this.defaultDecoder; + } + let awaitingConnectionOnPageShow = null; + if (phxWindow && phxWindow.addEventListener) { + phxWindow.addEventListener("pagehide", (_e) => { + if (this.conn) { + this.disconnect(); + awaitingConnectionOnPageShow = this.connectClock; + } + }); + phxWindow.addEventListener("pageshow", (_e) => { + if (awaitingConnectionOnPageShow === this.connectClock) { + awaitingConnectionOnPageShow = null; + this.connect(); + } + }); + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 3e4; + this.rejoinAfterMs = (tries) => { + if (opts.rejoinAfterMs) { + return opts.rejoinAfterMs(tries); + } else { + return [1e3, 2e3, 5e3][tries - 1] || 1e4; + } + }; + this.reconnectAfterMs = (tries) => { + if (opts.reconnectAfterMs) { + return opts.reconnectAfterMs(tries); + } else { + return [10, 50, 100, 150, 200, 250, 500, 1e3, 2e3][tries - 1] || 5e3; + } + }; + this.logger = opts.logger || null; + this.longpollerTimeout = opts.longpollerTimeout || 2e4; + this.params = closure(opts.params || {}); + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`; + this.vsn = opts.vsn || DEFAULT_VSN; + this.heartbeatTimer = null; + this.pendingHeartbeatRef = null; + this.reconnectTimer = new Timer(() => { + this.teardown(() => this.connect()); + }, this.reconnectAfterMs); + } + getLongPollTransport() { + return LongPoll; + } + replaceTransport(newTransport) { + this.connectClock++; + this.closeWasClean = true; + this.reconnectTimer.reset(); + this.sendBuffer = []; + if (this.conn) { + this.conn.close(); + this.conn = null; + } + this.transport = newTransport; + } + protocol() { + return location.protocol.match(/^https/) ? "wss" : "ws"; + } + endPointURL() { + let uri = Ajax.appendParams(Ajax.appendParams(this.endPoint, this.params()), { vsn: this.vsn }); + if (uri.charAt(0) !== "/") { + return uri; + } + if (uri.charAt(1) === "/") { + return `${this.protocol()}:${uri}`; + } + return `${this.protocol()}://${location.host}${uri}`; + } + disconnect(callback, code, reason) { + this.connectClock++; + this.closeWasClean = true; + this.reconnectTimer.reset(); + this.teardown(callback, code, reason); + } + connect(params) { + if (params) { + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"); + this.params = closure(params); + } + if (this.conn) { + return; + } + this.connectClock++; + this.closeWasClean = false; + this.conn = new this.transport(this.endPointURL()); + this.conn.binaryType = this.binaryType; + this.conn.timeout = this.longpollerTimeout; + this.conn.onopen = () => this.onConnOpen(); + this.conn.onerror = (error) => this.onConnError(error); + this.conn.onmessage = (event) => this.onConnMessage(event); + this.conn.onclose = (event) => this.onConnClose(event); + } + log(kind, msg, data) { + this.logger(kind, msg, data); + } + hasLogger() { + return this.logger !== null; + } + onOpen(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.open.push([ref, callback]); + return ref; + } + onClose(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.close.push([ref, callback]); + return ref; + } + onError(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.error.push([ref, callback]); + return ref; + } + onMessage(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.message.push([ref, callback]); + return ref; + } + ping(callback) { + if (!this.isConnected()) { + return false; + } + let ref = this.makeRef(); + let startTime = Date.now(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref }); + let onMsgRef = this.onMessage((msg) => { + if (msg.ref === ref) { + this.off([onMsgRef]); + callback(Date.now() - startTime); + } + }); + return true; + } + onConnOpen() { + if (this.hasLogger()) + this.log("transport", `connected to ${this.endPointURL()}`); + this.closeWasClean = false; + this.establishedConnections++; + this.flushSendBuffer(); + this.reconnectTimer.reset(); + this.resetHeartbeat(); + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()); + } + heartbeatTimeout() { + if (this.pendingHeartbeatRef) { + this.pendingHeartbeatRef = null; + if (this.hasLogger()) { + this.log("transport", "heartbeat timeout. Attempting to re-establish connection"); + } + this.abnormalClose("heartbeat timeout"); + } + } + resetHeartbeat() { + if (this.conn && this.conn.skipHeartbeat) { + return; + } + this.pendingHeartbeatRef = null; + clearTimeout(this.heartbeatTimer); + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + teardown(callback, code, reason) { + if (!this.conn) { + return callback && callback(); + } + this.waitForBufferDone(() => { + if (this.conn) { + if (code) { + this.conn.close(code, reason || ""); + } else { + this.conn.close(); + } + } + this.waitForSocketClosed(() => { + if (this.conn) { + this.conn.onclose = function() { + }; + this.conn = null; + } + callback && callback(); + }); + }); + } + waitForBufferDone(callback, tries = 1) { + if (tries === 5 || !this.conn || !this.conn.bufferedAmount) { + callback(); + return; + } + setTimeout(() => { + this.waitForBufferDone(callback, tries + 1); + }, 150 * tries); + } + waitForSocketClosed(callback, tries = 1) { + if (tries === 5 || !this.conn || this.conn.readyState === SOCKET_STATES.closed) { + callback(); + return; + } + setTimeout(() => { + this.waitForSocketClosed(callback, tries + 1); + }, 150 * tries); + } + onConnClose(event) { + let closeCode = event && event.code; + if (this.hasLogger()) + this.log("transport", "close", event); + this.triggerChanError(); + clearTimeout(this.heartbeatTimer); + if (!this.closeWasClean && closeCode !== 1e3) { + this.reconnectTimer.scheduleTimeout(); + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)); + } + onConnError(error) { + if (this.hasLogger()) + this.log("transport", error); + let transportBefore = this.transport; + let establishedBefore = this.establishedConnections; + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore); + }); + if (transportBefore === this.transport || establishedBefore > 0) { + this.triggerChanError(); + } + } + triggerChanError() { + this.channels.forEach((channel) => { + if (!(channel.isErrored() || channel.isLeaving() || channel.isClosed())) { + channel.trigger(CHANNEL_EVENTS.error); + } + }); + } + connectionState() { + switch (this.conn && this.conn.readyState) { + case SOCKET_STATES.connecting: + return "connecting"; + case SOCKET_STATES.open: + return "open"; + case SOCKET_STATES.closing: + return "closing"; + default: + return "closed"; + } + } + isConnected() { + return this.connectionState() === "open"; + } + remove(channel) { + this.off(channel.stateChangeRefs); + this.channels = this.channels.filter((c) => c.joinRef() !== channel.joinRef()); + } + off(refs) { + for (let key in this.stateChangeCallbacks) { + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1; + }); + } + } + channel(topic, chanParams = {}) { + let chan = new Channel(topic, chanParams, this); + this.channels.push(chan); + return chan; + } + push(data) { + if (this.hasLogger()) { + let { topic, event, payload, ref, join_ref } = data; + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload); + } + if (this.isConnected()) { + this.encode(data, (result) => this.conn.send(result)); + } else { + this.sendBuffer.push(() => this.encode(data, (result) => this.conn.send(result))); + } + } + makeRef() { + let newRef = this.ref + 1; + if (newRef === this.ref) { + this.ref = 0; + } else { + this.ref = newRef; + } + return this.ref.toString(); + } + sendHeartbeat() { + if (this.pendingHeartbeatRef && !this.isConnected()) { + return; + } + this.pendingHeartbeatRef = this.makeRef(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef }); + this.heartbeatTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs); + } + abnormalClose(reason) { + this.closeWasClean = false; + if (this.isConnected()) { + this.conn.close(WS_CLOSE_NORMAL, reason); + } + } + flushSendBuffer() { + if (this.isConnected() && this.sendBuffer.length > 0) { + this.sendBuffer.forEach((callback) => callback()); + this.sendBuffer = []; + } + } + onConnMessage(rawMessage) { + this.decode(rawMessage.data, (msg) => { + let { topic, event, payload, ref, join_ref } = msg; + if (ref && ref === this.pendingHeartbeatRef) { + clearTimeout(this.heartbeatTimer); + this.pendingHeartbeatRef = null; + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + if (this.hasLogger()) + this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload); + for (let i = 0; i < this.channels.length; i++) { + const channel = this.channels[i]; + if (!channel.isMember(topic, event, payload, join_ref)) { + continue; + } + channel.trigger(event, payload, ref, join_ref); + } + for (let i = 0; i < this.stateChangeCallbacks.message.length; i++) { + let [, callback] = this.stateChangeCallbacks.message[i]; + callback(msg); + } + }); + } + leaveOpenTopic(topic) { + let dupChannel = this.channels.find((c) => c.topic === topic && (c.isJoined() || c.isJoining())); + if (dupChannel) { + if (this.hasLogger()) + this.log("transport", `leaving duplicate topic "${topic}"`); + dupChannel.leave(); + } + } +}; +//# sourceMappingURL=phoenix.cjs.js.map diff --git a/deps/phoenix/priv/static/phoenix.cjs.js.map b/deps/phoenix/priv/static/phoenix.cjs.js.map new file mode 100644 index 0000000..f603c41 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.cjs.js.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["../../assets/js/phoenix/index.js", "../../assets/js/phoenix/utils.js", "../../assets/js/phoenix/constants.js", "../../assets/js/phoenix/push.js", "../../assets/js/phoenix/timer.js", "../../assets/js/phoenix/channel.js", "../../assets/js/phoenix/ajax.js", "../../assets/js/phoenix/longpoll.js", "../../assets/js/phoenix/presence.js", "../../assets/js/phoenix/serializer.js", "../../assets/js/phoenix/socket.js"], + "sourcesContent": ["/**\n * Phoenix Channels JavaScript client\n *\n * ## Socket Connection\n *\n * A single connection is established to the server and\n * channels are multiplexed over the connection.\n * Connect to the server using the `Socket` class:\n *\n * ```javascript\n * let socket = new Socket(\"/socket\", {params: {userToken: \"123\"}})\n * socket.connect()\n * ```\n *\n * The `Socket` constructor takes the mount point of the socket,\n * the authentication params, as well as options that can be found in\n * the Socket docs, such as configuring the `LongPoll` transport, and\n * heartbeat.\n *\n * ## Channels\n *\n * Channels are isolated, concurrent processes on the server that\n * subscribe to topics and broker events between the client and server.\n * To join a channel, you must provide the topic, and channel params for\n * authorization. Here's an example chat room example where `\"new_msg\"`\n * events are listened for, messages are pushed to the server, and\n * the channel is joined with ok/error/timeout matches:\n *\n * ```javascript\n * let channel = socket.channel(\"room:123\", {token: roomToken})\n * channel.on(\"new_msg\", msg => console.log(\"Got message\", msg) )\n * $input.onEnter( e => {\n * channel.push(\"new_msg\", {body: e.target.val}, 10000)\n * .receive(\"ok\", (msg) => console.log(\"created message\", msg) )\n * .receive(\"error\", (reasons) => console.log(\"create failed\", reasons) )\n * .receive(\"timeout\", () => console.log(\"Networking issue...\") )\n * })\n *\n * channel.join()\n * .receive(\"ok\", ({messages}) => console.log(\"catching up\", messages) )\n * .receive(\"error\", ({reason}) => console.log(\"failed join\", reason) )\n * .receive(\"timeout\", () => console.log(\"Networking issue. Still waiting...\"))\n *```\n *\n * ## Joining\n *\n * Creating a channel with `socket.channel(topic, params)`, binds the params to\n * `channel.params`, which are sent up on `channel.join()`.\n * Subsequent rejoins will send up the modified params for\n * updating authorization params, or passing up last_message_id information.\n * Successful joins receive an \"ok\" status, while unsuccessful joins\n * receive \"error\".\n *\n * With the default serializers and WebSocket transport, JSON text frames are\n * used for pushing a JSON object literal. If an `ArrayBuffer` instance is provided,\n * binary encoding will be used and the message will be sent with the binary\n * opcode.\n *\n * *Note*: binary messages are only supported on the WebSocket transport.\n *\n * ## Duplicate Join Subscriptions\n *\n * While the client may join any number of topics on any number of channels,\n * the client may only hold a single subscription for each unique topic at any\n * given time. When attempting to create a duplicate subscription,\n * the server will close the existing channel, log a warning, and\n * spawn a new channel for the topic. The client will have their\n * `channel.onClose` callbacks fired for the existing channel, and the new\n * channel join will have its receive hooks processed as normal.\n *\n * ## Pushing Messages\n *\n * From the previous example, we can see that pushing messages to the server\n * can be done with `channel.push(eventName, payload)` and we can optionally\n * receive responses from the push. Additionally, we can use\n * `receive(\"timeout\", callback)` to abort waiting for our other `receive` hooks\n * and take action after some period of waiting. The default timeout is 10000ms.\n *\n *\n * ## Socket Hooks\n *\n * Lifecycle events of the multiplexed connection can be hooked into via\n * `socket.onError()` and `socket.onClose()` events, ie:\n *\n * ```javascript\n * socket.onError( () => console.log(\"there was an error with the connection!\") )\n * socket.onClose( () => console.log(\"the connection dropped\") )\n * ```\n *\n *\n * ## Channel Hooks\n *\n * For each joined channel, you can bind to `onError` and `onClose` events\n * to monitor the channel lifecycle, ie:\n *\n * ```javascript\n * channel.onError( () => console.log(\"there was an error!\") )\n * channel.onClose( () => console.log(\"the channel has gone away gracefully\") )\n * ```\n *\n * ### onError hooks\n *\n * `onError` hooks are invoked if the socket connection drops, or the channel\n * crashes on the server. In either case, a channel rejoin is attempted\n * automatically in an exponential backoff manner.\n *\n * ### onClose hooks\n *\n * `onClose` hooks are invoked only in two cases. 1) the channel explicitly\n * closed on the server, or 2). The client explicitly closed, by calling\n * `channel.leave()`\n *\n *\n * ## Presence\n *\n * The `Presence` object provides features for syncing presence information\n * from the server with the client and handling presences joining and leaving.\n *\n * ### Syncing state from the server\n *\n * To sync presence state from the server, first instantiate an object and\n * pass your channel in to track lifecycle events:\n *\n * ```javascript\n * let channel = socket.channel(\"some:topic\")\n * let presence = new Presence(channel)\n * ```\n *\n * Next, use the `presence.onSync` callback to react to state changes\n * from the server. For example, to render the list of users every time\n * the list changes, you could write:\n *\n * ```javascript\n * presence.onSync(() => {\n * myRenderUsersFunction(presence.list())\n * })\n * ```\n *\n * ### Listing Presences\n *\n * `presence.list` is used to return a list of presence information\n * based on the local state of metadata. By default, all presence\n * metadata is returned, but a `listBy` function can be supplied to\n * allow the client to select which metadata to use for a given presence.\n * For example, you may have a user online from different devices with\n * a metadata status of \"online\", but they have set themselves to \"away\"\n * on another device. In this case, the app may choose to use the \"away\"\n * status for what appears on the UI. The example below defines a `listBy`\n * function which prioritizes the first metadata which was registered for\n * each user. This could be the first tab they opened, or the first device\n * they came online from:\n *\n * ```javascript\n * let listBy = (id, {metas: [first, ...rest]}) => {\n * first.count = rest.length + 1 // count of this user's presences\n * first.id = id\n * return first\n * }\n * let onlineUsers = presence.list(listBy)\n * ```\n *\n * ### Handling individual presence join and leave events\n *\n * The `presence.onJoin` and `presence.onLeave` callbacks can be used to\n * react to individual presences joining and leaving the app. For example:\n *\n * ```javascript\n * let presence = new Presence(channel)\n *\n * // detect if user has joined for the 1st time or from another tab/device\n * presence.onJoin((id, current, newPres) => {\n * if(!current){\n * console.log(\"user has entered for the first time\", newPres)\n * } else {\n * console.log(\"user additional presence\", newPres)\n * }\n * })\n *\n * // detect if user has left from all tabs/devices, or is still present\n * presence.onLeave((id, current, leftPres) => {\n * if(current.metas.length === 0){\n * console.log(\"user has left from all devices\", leftPres)\n * } else {\n * console.log(\"user left from a device\", leftPres)\n * }\n * })\n * // receive presence data from server\n * presence.onSync(() => {\n * displayUsers(presence.list())\n * })\n * ```\n * @module phoenix\n */\n\nimport Channel from \"./channel\"\nimport LongPoll from \"./longpoll\"\nimport Presence from \"./presence\"\nimport Serializer from \"./serializer\"\nimport Socket from \"./socket\"\n\nexport {\n Channel,\n LongPoll,\n Presence,\n Serializer,\n Socket\n}\n", "// wraps value in closure or returns closure\nexport let closure = (value) => {\n if(typeof value === \"function\"){\n return value\n } else {\n let closure = function (){ return value }\n return closure\n }\n}\n", "export const globalSelf = typeof self !== \"undefined\" ? self : null\nexport const phxWindow = typeof window !== \"undefined\" ? window : null\nexport const global = globalSelf || phxWindow || global\nexport const DEFAULT_VSN = \"2.0.0\"\nexport const SOCKET_STATES = {connecting: 0, open: 1, closing: 2, closed: 3}\nexport const DEFAULT_TIMEOUT = 10000\nexport const WS_CLOSE_NORMAL = 1000\nexport const CHANNEL_STATES = {\n closed: \"closed\",\n errored: \"errored\",\n joined: \"joined\",\n joining: \"joining\",\n leaving: \"leaving\",\n}\nexport const CHANNEL_EVENTS = {\n close: \"phx_close\",\n error: \"phx_error\",\n join: \"phx_join\",\n reply: \"phx_reply\",\n leave: \"phx_leave\"\n}\n\nexport const TRANSPORTS = {\n longpoll: \"longpoll\",\n websocket: \"websocket\"\n}\nexport const XHR_STATES = {\n complete: 4\n}\n", "/**\n * Initializes the Push\n * @param {Channel} channel - The Channel\n * @param {string} event - The event, for example `\"phx_join\"`\n * @param {Object} payload - The payload, for example `{user_id: 123}`\n * @param {number} timeout - The push timeout in milliseconds\n */\nexport default class Push {\n constructor(channel, event, payload, timeout){\n this.channel = channel\n this.event = event\n this.payload = payload || function (){ return {} }\n this.receivedResp = null\n this.timeout = timeout\n this.timeoutTimer = null\n this.recHooks = []\n this.sent = false\n }\n\n /**\n *\n * @param {number} timeout\n */\n resend(timeout){\n this.timeout = timeout\n this.reset()\n this.send()\n }\n\n /**\n *\n */\n send(){\n if(this.hasReceived(\"timeout\")){ return }\n this.startTimeout()\n this.sent = true\n this.channel.socket.push({\n topic: this.channel.topic,\n event: this.event,\n payload: this.payload(),\n ref: this.ref,\n join_ref: this.channel.joinRef()\n })\n }\n\n /**\n *\n * @param {*} status\n * @param {*} callback\n */\n receive(status, callback){\n if(this.hasReceived(status)){\n callback(this.receivedResp.response)\n }\n\n this.recHooks.push({status, callback})\n return this\n }\n\n /**\n * @private\n */\n reset(){\n this.cancelRefEvent()\n this.ref = null\n this.refEvent = null\n this.receivedResp = null\n this.sent = false\n }\n\n /**\n * @private\n */\n matchReceive({status, response, _ref}){\n this.recHooks.filter(h => h.status === status)\n .forEach(h => h.callback(response))\n }\n\n /**\n * @private\n */\n cancelRefEvent(){\n if(!this.refEvent){ return }\n this.channel.off(this.refEvent)\n }\n\n /**\n * @private\n */\n cancelTimeout(){\n clearTimeout(this.timeoutTimer)\n this.timeoutTimer = null\n }\n\n /**\n * @private\n */\n startTimeout(){\n if(this.timeoutTimer){ this.cancelTimeout() }\n this.ref = this.channel.socket.makeRef()\n this.refEvent = this.channel.replyEventName(this.ref)\n\n this.channel.on(this.refEvent, payload => {\n this.cancelRefEvent()\n this.cancelTimeout()\n this.receivedResp = payload\n this.matchReceive(payload)\n })\n\n this.timeoutTimer = setTimeout(() => {\n this.trigger(\"timeout\", {})\n }, this.timeout)\n }\n\n /**\n * @private\n */\n hasReceived(status){\n return this.receivedResp && this.receivedResp.status === status\n }\n\n /**\n * @private\n */\n trigger(status, response){\n this.channel.trigger(this.refEvent, {status, response})\n }\n}\n", "/**\n *\n * Creates a timer that accepts a `timerCalc` function to perform\n * calculated timeout retries, such as exponential backoff.\n *\n * @example\n * let reconnectTimer = new Timer(() => this.connect(), function(tries){\n * return [1000, 5000, 10000][tries - 1] || 10000\n * })\n * reconnectTimer.scheduleTimeout() // fires after 1000\n * reconnectTimer.scheduleTimeout() // fires after 5000\n * reconnectTimer.reset()\n * reconnectTimer.scheduleTimeout() // fires after 1000\n *\n * @param {Function} callback\n * @param {Function} timerCalc\n */\nexport default class Timer {\n constructor(callback, timerCalc){\n this.callback = callback\n this.timerCalc = timerCalc\n this.timer = null\n this.tries = 0\n }\n\n reset(){\n this.tries = 0\n clearTimeout(this.timer)\n }\n\n /**\n * Cancels any previous scheduleTimeout and schedules callback\n */\n scheduleTimeout(){\n clearTimeout(this.timer)\n\n this.timer = setTimeout(() => {\n this.tries = this.tries + 1\n this.callback()\n }, this.timerCalc(this.tries + 1))\n }\n}\n", "import {closure} from \"./utils\"\nimport {\n CHANNEL_EVENTS,\n CHANNEL_STATES,\n} from \"./constants\"\n\nimport Push from \"./push\"\nimport Timer from \"./timer\"\n\n/**\n *\n * @param {string} topic\n * @param {(Object|function)} params\n * @param {Socket} socket\n */\nexport default class Channel {\n constructor(topic, params, socket){\n this.state = CHANNEL_STATES.closed\n this.topic = topic\n this.params = closure(params || {})\n this.socket = socket\n this.bindings = []\n this.bindingRef = 0\n this.timeout = this.socket.timeout\n this.joinedOnce = false\n this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout)\n this.pushBuffer = []\n this.stateChangeRefs = []\n\n this.rejoinTimer = new Timer(() => {\n if(this.socket.isConnected()){ this.rejoin() }\n }, this.socket.rejoinAfterMs)\n this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset()))\n this.stateChangeRefs.push(this.socket.onOpen(() => {\n this.rejoinTimer.reset()\n if(this.isErrored()){ this.rejoin() }\n })\n )\n this.joinPush.receive(\"ok\", () => {\n this.state = CHANNEL_STATES.joined\n this.rejoinTimer.reset()\n this.pushBuffer.forEach(pushEvent => pushEvent.send())\n this.pushBuffer = []\n })\n this.joinPush.receive(\"error\", () => {\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.onClose(() => {\n this.rejoinTimer.reset()\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `close ${this.topic} ${this.joinRef()}`)\n this.state = CHANNEL_STATES.closed\n this.socket.remove(this)\n })\n this.onError(reason => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `error ${this.topic}`, reason)\n if(this.isJoining()){ this.joinPush.reset() }\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.joinPush.receive(\"timeout\", () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout)\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout)\n leavePush.send()\n this.state = CHANNEL_STATES.errored\n this.joinPush.reset()\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.on(CHANNEL_EVENTS.reply, (payload, ref) => {\n this.trigger(this.replyEventName(ref), payload)\n })\n }\n\n /**\n * Join the channel\n * @param {integer} timeout\n * @returns {Push}\n */\n join(timeout = this.timeout){\n if(this.joinedOnce){\n throw new Error(\"tried to join multiple times. 'join' can only be called a single time per channel instance\")\n } else {\n this.timeout = timeout\n this.joinedOnce = true\n this.rejoin()\n return this.joinPush\n }\n }\n\n /**\n * Hook into channel close\n * @param {Function} callback\n */\n onClose(callback){\n this.on(CHANNEL_EVENTS.close, callback)\n }\n\n /**\n * Hook into channel errors\n * @param {Function} callback\n */\n onError(callback){\n return this.on(CHANNEL_EVENTS.error, reason => callback(reason))\n }\n\n /**\n * Subscribes on channel events\n *\n * Subscription returns a ref counter, which can be used later to\n * unsubscribe the exact event listener\n *\n * @example\n * const ref1 = channel.on(\"event\", do_stuff)\n * const ref2 = channel.on(\"event\", do_other_stuff)\n * channel.off(\"event\", ref1)\n * // Since unsubscription, do_stuff won't fire,\n * // while do_other_stuff will keep firing on the \"event\"\n *\n * @param {string} event\n * @param {Function} callback\n * @returns {integer} ref\n */\n on(event, callback){\n let ref = this.bindingRef++\n this.bindings.push({event, ref, callback})\n return ref\n }\n\n /**\n * Unsubscribes off of channel events\n *\n * Use the ref returned from a channel.on() to unsubscribe one\n * handler, or pass nothing for the ref to unsubscribe all\n * handlers for the given event.\n *\n * @example\n * // Unsubscribe the do_stuff handler\n * const ref1 = channel.on(\"event\", do_stuff)\n * channel.off(\"event\", ref1)\n *\n * // Unsubscribe all handlers from event\n * channel.off(\"event\")\n *\n * @param {string} event\n * @param {integer} ref\n */\n off(event, ref){\n this.bindings = this.bindings.filter((bind) => {\n return !(bind.event === event && (typeof ref === \"undefined\" || ref === bind.ref))\n })\n }\n\n /**\n * @private\n */\n canPush(){ return this.socket.isConnected() && this.isJoined() }\n\n /**\n * Sends a message `event` to phoenix with the payload `payload`.\n * Phoenix receives this in the `handle_in(event, payload, socket)`\n * function. if phoenix replies or it times out (default 10000ms),\n * then optionally the reply can be received.\n *\n * @example\n * channel.push(\"event\")\n * .receive(\"ok\", payload => console.log(\"phoenix replied:\", payload))\n * .receive(\"error\", err => console.log(\"phoenix errored\", err))\n * .receive(\"timeout\", () => console.log(\"timed out pushing\"))\n * @param {string} event\n * @param {Object} payload\n * @param {number} [timeout]\n * @returns {Push}\n */\n push(event, payload, timeout = this.timeout){\n payload = payload || {}\n if(!this.joinedOnce){\n throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`)\n }\n let pushEvent = new Push(this, event, function (){ return payload }, timeout)\n if(this.canPush()){\n pushEvent.send()\n } else {\n pushEvent.startTimeout()\n this.pushBuffer.push(pushEvent)\n }\n\n return pushEvent\n }\n\n /** Leaves the channel\n *\n * Unsubscribes from server events, and\n * instructs channel to terminate on server\n *\n * Triggers onClose() hooks\n *\n * To receive leave acknowledgements, use the `receive`\n * hook to bind to the server ack, ie:\n *\n * @example\n * channel.leave().receive(\"ok\", () => alert(\"left!\") )\n *\n * @param {integer} timeout\n * @returns {Push}\n */\n leave(timeout = this.timeout){\n this.rejoinTimer.reset()\n this.joinPush.cancelTimeout()\n\n this.state = CHANNEL_STATES.leaving\n let onClose = () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `leave ${this.topic}`)\n this.trigger(CHANNEL_EVENTS.close, \"leave\")\n }\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout)\n leavePush.receive(\"ok\", () => onClose())\n .receive(\"timeout\", () => onClose())\n leavePush.send()\n if(!this.canPush()){ leavePush.trigger(\"ok\", {}) }\n\n return leavePush\n }\n\n /**\n * Overridable message hook\n *\n * Receives all events for specialized message handling\n * before dispatching to the channel callbacks.\n *\n * Must return the payload, modified or unmodified\n * @param {string} event\n * @param {Object} payload\n * @param {integer} ref\n * @returns {Object}\n */\n onMessage(_event, payload, _ref){ return payload }\n\n /**\n * @private\n */\n isMember(topic, event, payload, joinRef){\n if(this.topic !== topic){ return false }\n\n if(joinRef && joinRef !== this.joinRef()){\n if(this.socket.hasLogger()) this.socket.log(\"channel\", \"dropping outdated message\", {topic, event, payload, joinRef})\n return false\n } else {\n return true\n }\n }\n\n /**\n * @private\n */\n joinRef(){ return this.joinPush.ref }\n\n /**\n * @private\n */\n rejoin(timeout = this.timeout){\n if(this.isLeaving()){ return }\n this.socket.leaveOpenTopic(this.topic)\n this.state = CHANNEL_STATES.joining\n this.joinPush.resend(timeout)\n }\n\n /**\n * @private\n */\n trigger(event, payload, ref, joinRef){\n let handledPayload = this.onMessage(event, payload, ref, joinRef)\n if(payload && !handledPayload){ throw new Error(\"channel onMessage callbacks must return the payload, modified or unmodified\") }\n\n let eventBindings = this.bindings.filter(bind => bind.event === event)\n\n for(let i = 0; i < eventBindings.length; i++){\n let bind = eventBindings[i]\n bind.callback(handledPayload, ref, joinRef || this.joinRef())\n }\n }\n\n /**\n * @private\n */\n replyEventName(ref){ return `chan_reply_${ref}` }\n\n /**\n * @private\n */\n isClosed(){ return this.state === CHANNEL_STATES.closed }\n\n /**\n * @private\n */\n isErrored(){ return this.state === CHANNEL_STATES.errored }\n\n /**\n * @private\n */\n isJoined(){ return this.state === CHANNEL_STATES.joined }\n\n /**\n * @private\n */\n isJoining(){ return this.state === CHANNEL_STATES.joining }\n\n /**\n * @private\n */\n isLeaving(){ return this.state === CHANNEL_STATES.leaving }\n}\n", "import {\n global,\n XHR_STATES\n} from \"./constants\"\n\nexport default class Ajax {\n\n static request(method, endPoint, accept, body, timeout, ontimeout, callback){\n if(global.XDomainRequest){\n let req = new global.XDomainRequest() // IE8, IE9\n return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback)\n } else {\n let req = new global.XMLHttpRequest() // IE7+, Firefox, Chrome, Opera, Safari\n return this.xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback)\n }\n }\n\n static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback){\n req.timeout = timeout\n req.open(method, endPoint)\n req.onload = () => {\n let response = this.parseJSON(req.responseText)\n callback && callback(response)\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n // Work around bug in IE9 that requires an attached onprogress handler\n req.onprogress = () => { }\n\n req.send(body)\n return req\n }\n\n static xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback){\n req.open(method, endPoint, true)\n req.timeout = timeout\n req.setRequestHeader(\"Content-Type\", accept)\n req.onerror = () => callback && callback(null)\n req.onreadystatechange = () => {\n if(req.readyState === XHR_STATES.complete && callback){\n let response = this.parseJSON(req.responseText)\n callback(response)\n }\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n req.send(body)\n return req\n }\n\n static parseJSON(resp){\n if(!resp || resp === \"\"){ return null }\n\n try {\n return JSON.parse(resp)\n } catch (e){\n console && console.log(\"failed to parse JSON response\", resp)\n return null\n }\n }\n\n static serialize(obj, parentKey){\n let queryStr = []\n for(var key in obj){\n if(!Object.prototype.hasOwnProperty.call(obj, key)){ continue }\n let paramKey = parentKey ? `${parentKey}[${key}]` : key\n let paramVal = obj[key]\n if(typeof paramVal === \"object\"){\n queryStr.push(this.serialize(paramVal, paramKey))\n } else {\n queryStr.push(encodeURIComponent(paramKey) + \"=\" + encodeURIComponent(paramVal))\n }\n }\n return queryStr.join(\"&\")\n }\n\n static appendParams(url, params){\n if(Object.keys(params).length === 0){ return url }\n\n let prefix = url.match(/\\?/) ? \"&\" : \"?\"\n return `${url}${prefix}${this.serialize(params)}`\n }\n}\n", "import {\n SOCKET_STATES,\n TRANSPORTS\n} from \"./constants\"\n\nimport Ajax from \"./ajax\"\n\nexport default class LongPoll {\n\n constructor(endPoint){\n this.endPoint = null\n this.token = null\n this.skipHeartbeat = true\n this.reqs = new Set()\n this.onopen = function (){ } // noop\n this.onerror = function (){ } // noop\n this.onmessage = function (){ } // noop\n this.onclose = function (){ } // noop\n this.pollEndpoint = this.normalizeEndpoint(endPoint)\n this.readyState = SOCKET_STATES.connecting\n this.poll()\n }\n\n normalizeEndpoint(endPoint){\n return (endPoint\n .replace(\"ws://\", \"http://\")\n .replace(\"wss://\", \"https://\")\n .replace(new RegExp(\"(.*)\\/\" + TRANSPORTS.websocket), \"$1/\" + TRANSPORTS.longpoll))\n }\n\n endpointURL(){\n return Ajax.appendParams(this.pollEndpoint, {token: this.token})\n }\n\n closeAndRetry(code, reason, wasClean){\n this.close(code, reason, wasClean)\n this.readyState = SOCKET_STATES.connecting\n }\n\n ontimeout(){\n this.onerror(\"timeout\")\n this.closeAndRetry(1005, \"timeout\", false)\n }\n\n isActive(){ return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting }\n\n poll(){\n this.ajax(\"GET\", null, () => this.ontimeout(), resp => {\n if(resp){\n var {status, token, messages} = resp\n this.token = token\n } else {\n status = 0\n }\n\n switch(status){\n case 200:\n messages.forEach(msg => {\n // Tasks are what things like event handlers, setTimeout callbacks,\n // promise resolves and more are run within.\n // In modern browsers, there are two different kinds of tasks,\n // microtasks and macrotasks.\n // Microtasks are mainly used for Promises, while macrotasks are\n // used for everything else.\n // Microtasks always have priority over macrotasks. If the JS engine\n // is looking for a task to run, it will always try to empty the\n // microtask queue before attempting to run anything from the\n // macrotask queue.\n //\n // For the WebSocket transport, messages always arrive in their own\n // event. This means that if any promises are resolved from within,\n // their callbacks will always finish execution by the time the\n // next message event handler is run.\n //\n // In order to emulate this behaviour, we need to make sure each\n // onmessage handler is run within it's own macrotask.\n setTimeout(() => this.onmessage({data: msg}), 0)\n })\n this.poll()\n break\n case 204:\n this.poll()\n break\n case 410:\n this.readyState = SOCKET_STATES.open\n this.onopen({})\n this.poll()\n break\n case 403:\n this.onerror(403)\n this.close(1008, \"forbidden\", false)\n break\n case 0:\n case 500:\n this.onerror(500)\n this.closeAndRetry(1011, \"internal server error\", 500)\n break\n default: throw new Error(`unhandled poll status ${status}`)\n }\n })\n }\n\n send(body){\n this.ajax(\"POST\", body, () => this.onerror(\"timeout\"), resp => {\n if(!resp || resp.status !== 200){\n this.onerror(resp && resp.status)\n this.closeAndRetry(1011, \"internal server error\", false)\n }\n })\n }\n\n close(code, reason, wasClean){\n for(let req of this.reqs){ req.abort() }\n this.readyState = SOCKET_STATES.closed\n let opts = Object.assign({code: 1000, reason: undefined, wasClean: true}, {code, reason, wasClean})\n if(typeof(CloseEvent) !== \"undefined\"){\n this.onclose(new CloseEvent(\"close\", opts))\n } else {\n this.onclose(opts)\n }\n }\n\n ajax(method, body, onCallerTimeout, callback){\n let req\n let ontimeout = () => {\n this.reqs.delete(req)\n onCallerTimeout()\n }\n req = Ajax.request(method, this.endpointURL(), \"application/json\", body, this.timeout, ontimeout, resp => {\n this.reqs.delete(req)\n if(this.isActive()){ callback(resp) }\n })\n this.reqs.add(req)\n }\n}\n", "/**\n * Initializes the Presence\n * @param {Channel} channel - The Channel\n * @param {Object} opts - The options,\n * for example `{events: {state: \"state\", diff: \"diff\"}}`\n */\nexport default class Presence {\n\n constructor(channel, opts = {}){\n let events = opts.events || {state: \"presence_state\", diff: \"presence_diff\"}\n this.state = {}\n this.pendingDiffs = []\n this.channel = channel\n this.joinRef = null\n this.caller = {\n onJoin: function (){ },\n onLeave: function (){ },\n onSync: function (){ }\n }\n\n this.channel.on(events.state, newState => {\n let {onJoin, onLeave, onSync} = this.caller\n\n this.joinRef = this.channel.joinRef()\n this.state = Presence.syncState(this.state, newState, onJoin, onLeave)\n\n this.pendingDiffs.forEach(diff => {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n })\n this.pendingDiffs = []\n onSync()\n })\n\n this.channel.on(events.diff, diff => {\n let {onJoin, onLeave, onSync} = this.caller\n\n if(this.inPendingSyncState()){\n this.pendingDiffs.push(diff)\n } else {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n onSync()\n }\n })\n }\n\n onJoin(callback){ this.caller.onJoin = callback }\n\n onLeave(callback){ this.caller.onLeave = callback }\n\n onSync(callback){ this.caller.onSync = callback }\n\n list(by){ return Presence.list(this.state, by) }\n\n inPendingSyncState(){\n return !this.joinRef || (this.joinRef !== this.channel.joinRef())\n }\n\n // lower-level public static API\n\n /**\n * Used to sync the list of presences on the server\n * with the client's state. An optional `onJoin` and `onLeave` callback can\n * be provided to react to changes in the client's local presences across\n * disconnects and reconnects with the server.\n *\n * @returns {Presence}\n */\n static syncState(currentState, newState, onJoin, onLeave){\n let state = this.clone(currentState)\n let joins = {}\n let leaves = {}\n\n this.map(state, (key, presence) => {\n if(!newState[key]){\n leaves[key] = presence\n }\n })\n this.map(newState, (key, newPresence) => {\n let currentPresence = state[key]\n if(currentPresence){\n let newRefs = newPresence.metas.map(m => m.phx_ref)\n let curRefs = currentPresence.metas.map(m => m.phx_ref)\n let joinedMetas = newPresence.metas.filter(m => curRefs.indexOf(m.phx_ref) < 0)\n let leftMetas = currentPresence.metas.filter(m => newRefs.indexOf(m.phx_ref) < 0)\n if(joinedMetas.length > 0){\n joins[key] = newPresence\n joins[key].metas = joinedMetas\n }\n if(leftMetas.length > 0){\n leaves[key] = this.clone(currentPresence)\n leaves[key].metas = leftMetas\n }\n } else {\n joins[key] = newPresence\n }\n })\n return this.syncDiff(state, {joins: joins, leaves: leaves}, onJoin, onLeave)\n }\n\n /**\n *\n * Used to sync a diff of presence join and leave\n * events from the server, as they happen. Like `syncState`, `syncDiff`\n * accepts optional `onJoin` and `onLeave` callbacks to react to a user\n * joining or leaving from a device.\n *\n * @returns {Presence}\n */\n static syncDiff(state, diff, onJoin, onLeave){\n let {joins, leaves} = this.clone(diff)\n if(!onJoin){ onJoin = function (){ } }\n if(!onLeave){ onLeave = function (){ } }\n\n this.map(joins, (key, newPresence) => {\n let currentPresence = state[key]\n state[key] = this.clone(newPresence)\n if(currentPresence){\n let joinedRefs = state[key].metas.map(m => m.phx_ref)\n let curMetas = currentPresence.metas.filter(m => joinedRefs.indexOf(m.phx_ref) < 0)\n state[key].metas.unshift(...curMetas)\n }\n onJoin(key, currentPresence, newPresence)\n })\n this.map(leaves, (key, leftPresence) => {\n let currentPresence = state[key]\n if(!currentPresence){ return }\n let refsToRemove = leftPresence.metas.map(m => m.phx_ref)\n currentPresence.metas = currentPresence.metas.filter(p => {\n return refsToRemove.indexOf(p.phx_ref) < 0\n })\n onLeave(key, currentPresence, leftPresence)\n if(currentPresence.metas.length === 0){\n delete state[key]\n }\n })\n return state\n }\n\n /**\n * Returns the array of presences, with selected metadata.\n *\n * @param {Object} presences\n * @param {Function} chooser\n *\n * @returns {Presence}\n */\n static list(presences, chooser){\n if(!chooser){ chooser = function (key, pres){ return pres } }\n\n return this.map(presences, (key, presence) => {\n return chooser(key, presence)\n })\n }\n\n // private\n\n static map(obj, func){\n return Object.getOwnPropertyNames(obj).map(key => func(key, obj[key]))\n }\n\n static clone(obj){ return JSON.parse(JSON.stringify(obj)) }\n}\n", "/* The default serializer for encoding and decoding messages */\nimport {\n CHANNEL_EVENTS\n} from \"./constants\"\n\nexport default {\n HEADER_LENGTH: 1,\n META_LENGTH: 4,\n KINDS: {push: 0, reply: 1, broadcast: 2},\n\n encode(msg, callback){\n if(msg.payload.constructor === ArrayBuffer){\n return callback(this.binaryEncode(msg))\n } else {\n let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]\n return callback(JSON.stringify(payload))\n }\n },\n\n decode(rawPayload, callback){\n if(rawPayload.constructor === ArrayBuffer){\n return callback(this.binaryDecode(rawPayload))\n } else {\n let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload)\n return callback({join_ref, ref, topic, event, payload})\n }\n },\n\n // private\n\n binaryEncode(message){\n let {join_ref, ref, event, topic, payload} = message\n let metaLength = this.META_LENGTH + join_ref.length + ref.length + topic.length + event.length\n let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength)\n let view = new DataView(header)\n let offset = 0\n\n view.setUint8(offset++, this.KINDS.push) // kind\n view.setUint8(offset++, join_ref.length)\n view.setUint8(offset++, ref.length)\n view.setUint8(offset++, topic.length)\n view.setUint8(offset++, event.length)\n Array.from(join_ref, char => view.setUint8(offset++, char.charCodeAt(0)))\n Array.from(ref, char => view.setUint8(offset++, char.charCodeAt(0)))\n Array.from(topic, char => view.setUint8(offset++, char.charCodeAt(0)))\n Array.from(event, char => view.setUint8(offset++, char.charCodeAt(0)))\n\n var combined = new Uint8Array(header.byteLength + payload.byteLength)\n combined.set(new Uint8Array(header), 0)\n combined.set(new Uint8Array(payload), header.byteLength)\n\n return combined.buffer\n },\n\n binaryDecode(buffer){\n let view = new DataView(buffer)\n let kind = view.getUint8(0)\n let decoder = new TextDecoder()\n switch(kind){\n case this.KINDS.push: return this.decodePush(buffer, view, decoder)\n case this.KINDS.reply: return this.decodeReply(buffer, view, decoder)\n case this.KINDS.broadcast: return this.decodeBroadcast(buffer, view, decoder)\n }\n },\n\n decodePush(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let topicSize = view.getUint8(2)\n let eventSize = view.getUint8(3)\n let offset = this.HEADER_LENGTH + this.META_LENGTH - 1 // pushes have no ref\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n return {join_ref: joinRef, ref: null, topic: topic, event: event, payload: data}\n },\n\n decodeReply(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let refSize = view.getUint8(2)\n let topicSize = view.getUint8(3)\n let eventSize = view.getUint8(4)\n let offset = this.HEADER_LENGTH + this.META_LENGTH\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let ref = decoder.decode(buffer.slice(offset, offset + refSize))\n offset = offset + refSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n let payload = {status: event, response: data}\n return {join_ref: joinRef, ref: ref, topic: topic, event: CHANNEL_EVENTS.reply, payload: payload}\n },\n\n decodeBroadcast(buffer, view, decoder){\n let topicSize = view.getUint8(1)\n let eventSize = view.getUint8(2)\n let offset = this.HEADER_LENGTH + 2\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n\n return {join_ref: null, ref: null, topic: topic, event: event, payload: data}\n }\n}\n", "import {\n global,\n phxWindow,\n CHANNEL_EVENTS,\n DEFAULT_TIMEOUT,\n DEFAULT_VSN,\n SOCKET_STATES,\n TRANSPORTS,\n WS_CLOSE_NORMAL\n} from \"./constants\"\n\nimport {\n closure\n} from \"./utils\"\n\nimport Ajax from \"./ajax\"\nimport Channel from \"./channel\"\nimport LongPoll from \"./longpoll\"\nimport Serializer from \"./serializer\"\nimport Timer from \"./timer\"\n\n/** Initializes the Socket *\n *\n * For IE8 support use an ES5-shim (https://github.com/es-shims/es5-shim)\n *\n * @param {string} endPoint - The string WebSocket endpoint, ie, `\"ws://example.com/socket\"`,\n * `\"wss://example.com\"`\n * `\"/socket\"` (inherited host & protocol)\n * @param {Object} [opts] - Optional configuration\n * @param {Function} [opts.transport] - The Websocket Transport, for example WebSocket or Phoenix.LongPoll.\n *\n * Defaults to WebSocket with automatic LongPoll fallback.\n * @param {Function} [opts.encode] - The function to encode outgoing messages.\n *\n * Defaults to JSON encoder.\n *\n * @param {Function} [opts.decode] - The function to decode incoming messages.\n *\n * Defaults to JSON:\n *\n * ```javascript\n * (payload, callback) => callback(JSON.parse(payload))\n * ```\n *\n * @param {number} [opts.timeout] - The default timeout in milliseconds to trigger push timeouts.\n *\n * Defaults `DEFAULT_TIMEOUT`\n * @param {number} [opts.heartbeatIntervalMs] - The millisec interval to send a heartbeat message\n * @param {number} [opts.reconnectAfterMs] - The optional function that returns the millsec\n * socket reconnect interval.\n *\n * Defaults to stepped backoff of:\n *\n * ```javascript\n * function(tries){\n * return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n * }\n * ````\n *\n * @param {number} [opts.rejoinAfterMs] - The optional function that returns the millsec\n * rejoin interval for individual channels.\n *\n * ```javascript\n * function(tries){\n * return [1000, 2000, 5000][tries - 1] || 10000\n * }\n * ````\n *\n * @param {Function} [opts.logger] - The optional function for specialized logging, ie:\n *\n * ```javascript\n * function(kind, msg, data) {\n * console.log(`${kind}: ${msg}`, data)\n * }\n * ```\n *\n * @param {number} [opts.longpollerTimeout] - The maximum timeout of a long poll AJAX request.\n *\n * Defaults to 20s (double the server long poll timer).\n *\n * @param {(Object|function)} [opts.params] - The optional params to pass when connecting\n * @param {string} [opts.binaryType] - The binary type to use for binary WebSocket frames.\n *\n * Defaults to \"arraybuffer\"\n *\n * @param {vsn} [opts.vsn] - The serializer's protocol version to send on connect.\n *\n * Defaults to DEFAULT_VSN.\n*/\nexport default class Socket {\n constructor(endPoint, opts = {}){\n this.stateChangeCallbacks = {open: [], close: [], error: [], message: []}\n this.channels = []\n this.sendBuffer = []\n this.ref = 0\n this.timeout = opts.timeout || DEFAULT_TIMEOUT\n this.transport = opts.transport || global.WebSocket || LongPoll\n this.establishedConnections = 0\n this.defaultEncoder = Serializer.encode.bind(Serializer)\n this.defaultDecoder = Serializer.decode.bind(Serializer)\n this.closeWasClean = false\n this.binaryType = opts.binaryType || \"arraybuffer\"\n this.connectClock = 1\n if(this.transport !== LongPoll){\n this.encode = opts.encode || this.defaultEncoder\n this.decode = opts.decode || this.defaultDecoder\n } else {\n this.encode = this.defaultEncoder\n this.decode = this.defaultDecoder\n }\n let awaitingConnectionOnPageShow = null\n if(phxWindow && phxWindow.addEventListener){\n phxWindow.addEventListener(\"pagehide\", _e => {\n if(this.conn){\n this.disconnect()\n awaitingConnectionOnPageShow = this.connectClock\n }\n })\n phxWindow.addEventListener(\"pageshow\", _e => {\n if(awaitingConnectionOnPageShow === this.connectClock){\n awaitingConnectionOnPageShow = null\n this.connect()\n }\n })\n }\n this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 30000\n this.rejoinAfterMs = (tries) => {\n if(opts.rejoinAfterMs){\n return opts.rejoinAfterMs(tries)\n } else {\n return [1000, 2000, 5000][tries - 1] || 10000\n }\n }\n this.reconnectAfterMs = (tries) => {\n if(opts.reconnectAfterMs){\n return opts.reconnectAfterMs(tries)\n } else {\n return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n }\n }\n this.logger = opts.logger || null\n this.longpollerTimeout = opts.longpollerTimeout || 20000\n this.params = closure(opts.params || {})\n this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`\n this.vsn = opts.vsn || DEFAULT_VSN\n this.heartbeatTimer = null\n this.pendingHeartbeatRef = null\n this.reconnectTimer = new Timer(() => {\n this.teardown(() => this.connect())\n }, this.reconnectAfterMs)\n }\n\n /**\n * Returns the LongPoll transport reference\n */\n getLongPollTransport(){ return LongPoll }\n\n /**\n * Disconnects and replaces the active transport\n *\n * @param {Function} newTransport - The new transport class to instantiate\n *\n */\n replaceTransport(newTransport){\n this.connectClock++\n this.closeWasClean = true\n this.reconnectTimer.reset()\n this.sendBuffer = []\n if(this.conn){\n this.conn.close()\n this.conn = null\n }\n this.transport = newTransport\n }\n\n /**\n * Returns the socket protocol\n *\n * @returns {string}\n */\n protocol(){ return location.protocol.match(/^https/) ? \"wss\" : \"ws\" }\n\n /**\n * The fully qualifed socket url\n *\n * @returns {string}\n */\n endPointURL(){\n let uri = Ajax.appendParams(\n Ajax.appendParams(this.endPoint, this.params()), {vsn: this.vsn})\n if(uri.charAt(0) !== \"/\"){ return uri }\n if(uri.charAt(1) === \"/\"){ return `${this.protocol()}:${uri}` }\n\n return `${this.protocol()}://${location.host}${uri}`\n }\n\n /**\n * Disconnects the socket\n *\n * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes.\n *\n * @param {Function} callback - Optional callback which is called after socket is disconnected.\n * @param {integer} code - A status code for disconnection (Optional).\n * @param {string} reason - A textual description of the reason to disconnect. (Optional)\n */\n disconnect(callback, code, reason){\n this.connectClock++\n this.closeWasClean = true\n this.reconnectTimer.reset()\n this.teardown(callback, code, reason)\n }\n\n /**\n *\n * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}`\n *\n * Passing params to connect is deprecated; pass them in the Socket constructor instead:\n * `new Socket(\"/socket\", {params: {user_id: userToken}})`.\n */\n connect(params){\n if(params){\n console && console.log(\"passing params to connect is deprecated. Instead pass :params to the Socket constructor\")\n this.params = closure(params)\n }\n if(this.conn){ return }\n\n this.connectClock++\n this.closeWasClean = false\n this.conn = new this.transport(this.endPointURL())\n this.conn.binaryType = this.binaryType\n this.conn.timeout = this.longpollerTimeout\n this.conn.onopen = () => this.onConnOpen()\n this.conn.onerror = error => this.onConnError(error)\n this.conn.onmessage = event => this.onConnMessage(event)\n this.conn.onclose = event => this.onConnClose(event)\n }\n\n /**\n * Logs the message. Override `this.logger` for specialized logging. noops by default\n * @param {string} kind\n * @param {string} msg\n * @param {Object} data\n */\n log(kind, msg, data){ this.logger(kind, msg, data) }\n\n /**\n * Returns true if a logger has been set on this socket.\n */\n hasLogger(){ return this.logger !== null }\n\n /**\n * Registers callbacks for connection open events\n *\n * @example socket.onOpen(function(){ console.info(\"the socket was opened\") })\n *\n * @param {Function} callback\n */\n onOpen(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.open.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection close events\n * @param {Function} callback\n */\n onClose(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.close.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection error events\n *\n * @example socket.onError(function(error){ alert(\"An error occurred\") })\n *\n * @param {Function} callback\n */\n onError(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.error.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection message events\n * @param {Function} callback\n */\n onMessage(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.message.push([ref, callback])\n return ref\n }\n\n /**\n * Pings the server and invokes the callback with the RTT in milliseconds\n * @param {Function} callback\n *\n * Returns true if the ping was pushed or false if unable to be pushed.\n */\n ping(callback){\n if(!this.isConnected()){ return false }\n let ref = this.makeRef()\n let startTime = Date.now()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: ref})\n let onMsgRef = this.onMessage(msg => {\n if(msg.ref === ref){\n this.off([onMsgRef])\n callback(Date.now() - startTime)\n }\n })\n return true\n }\n\n /**\n * @private\n */\n onConnOpen(){\n if(this.hasLogger()) this.log(\"transport\", `connected to ${this.endPointURL()}`)\n this.closeWasClean = false\n this.establishedConnections++\n this.flushSendBuffer()\n this.reconnectTimer.reset()\n this.resetHeartbeat()\n this.stateChangeCallbacks.open.forEach(([, callback]) => callback())\n }\n\n /**\n * @private\n */\n\n heartbeatTimeout(){\n if(this.pendingHeartbeatRef){\n this.pendingHeartbeatRef = null\n if(this.hasLogger()){ this.log(\"transport\", \"heartbeat timeout. Attempting to re-establish connection\") }\n this.abnormalClose(\"heartbeat timeout\")\n }\n }\n\n resetHeartbeat(){\n if(this.conn && this.conn.skipHeartbeat){ return }\n this.pendingHeartbeatRef = null\n clearTimeout(this.heartbeatTimer)\n setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n teardown(callback, code, reason){\n if(!this.conn){\n return callback && callback()\n }\n\n this.waitForBufferDone(() => {\n if(this.conn){\n if(code){ this.conn.close(code, reason || \"\") } else { this.conn.close() }\n }\n\n this.waitForSocketClosed(() => {\n if(this.conn){\n this.conn.onclose = function (){ } // noop\n this.conn = null\n }\n\n callback && callback()\n })\n })\n }\n\n waitForBufferDone(callback, tries = 1){\n if(tries === 5 || !this.conn || !this.conn.bufferedAmount){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForBufferDone(callback, tries + 1)\n }, 150 * tries)\n }\n\n waitForSocketClosed(callback, tries = 1){\n if(tries === 5 || !this.conn || this.conn.readyState === SOCKET_STATES.closed){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForSocketClosed(callback, tries + 1)\n }, 150 * tries)\n }\n\n onConnClose(event){\n let closeCode = event && event.code\n if(this.hasLogger()) this.log(\"transport\", \"close\", event)\n this.triggerChanError()\n clearTimeout(this.heartbeatTimer)\n if(!this.closeWasClean && closeCode !== 1000){\n this.reconnectTimer.scheduleTimeout()\n }\n this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event))\n }\n\n /**\n * @private\n */\n onConnError(error){\n if(this.hasLogger()) this.log(\"transport\", error)\n let transportBefore = this.transport\n let establishedBefore = this.establishedConnections\n this.stateChangeCallbacks.error.forEach(([, callback]) => {\n callback(error, transportBefore, establishedBefore)\n })\n if(transportBefore === this.transport || establishedBefore > 0){\n this.triggerChanError()\n }\n }\n\n /**\n * @private\n */\n triggerChanError(){\n this.channels.forEach(channel => {\n if(!(channel.isErrored() || channel.isLeaving() || channel.isClosed())){\n channel.trigger(CHANNEL_EVENTS.error)\n }\n })\n }\n\n /**\n * @returns {string}\n */\n connectionState(){\n switch(this.conn && this.conn.readyState){\n case SOCKET_STATES.connecting: return \"connecting\"\n case SOCKET_STATES.open: return \"open\"\n case SOCKET_STATES.closing: return \"closing\"\n default: return \"closed\"\n }\n }\n\n /**\n * @returns {boolean}\n */\n isConnected(){ return this.connectionState() === \"open\" }\n\n /**\n * @private\n *\n * @param {Channel}\n */\n remove(channel){\n this.off(channel.stateChangeRefs)\n this.channels = this.channels.filter(c => c.joinRef() !== channel.joinRef())\n }\n\n /**\n * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations.\n *\n * @param {refs} - list of refs returned by calls to\n * `onOpen`, `onClose`, `onError,` and `onMessage`\n */\n off(refs){\n for(let key in this.stateChangeCallbacks){\n this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => {\n return refs.indexOf(ref) === -1\n })\n }\n }\n\n /**\n * Initiates a new channel for the given topic\n *\n * @param {string} topic\n * @param {Object} chanParams - Parameters for the channel\n * @returns {Channel}\n */\n channel(topic, chanParams = {}){\n let chan = new Channel(topic, chanParams, this)\n this.channels.push(chan)\n return chan\n }\n\n /**\n * @param {Object} data\n */\n push(data){\n if(this.hasLogger()){\n let {topic, event, payload, ref, join_ref} = data\n this.log(\"push\", `${topic} ${event} (${join_ref}, ${ref})`, payload)\n }\n\n if(this.isConnected()){\n this.encode(data, result => this.conn.send(result))\n } else {\n this.sendBuffer.push(() => this.encode(data, result => this.conn.send(result)))\n }\n }\n\n /**\n * Return the next message ref, accounting for overflows\n * @returns {string}\n */\n makeRef(){\n let newRef = this.ref + 1\n if(newRef === this.ref){ this.ref = 0 } else { this.ref = newRef }\n\n return this.ref.toString()\n }\n\n sendHeartbeat(){\n if(this.pendingHeartbeatRef && !this.isConnected()){ return }\n this.pendingHeartbeatRef = this.makeRef()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: this.pendingHeartbeatRef})\n this.heartbeatTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs)\n }\n\n abnormalClose(reason){\n this.closeWasClean = false\n if(this.isConnected()){ this.conn.close(WS_CLOSE_NORMAL, reason) }\n }\n\n flushSendBuffer(){\n if(this.isConnected() && this.sendBuffer.length > 0){\n this.sendBuffer.forEach(callback => callback())\n this.sendBuffer = []\n }\n }\n\n onConnMessage(rawMessage){\n this.decode(rawMessage.data, msg => {\n let {topic, event, payload, ref, join_ref} = msg\n if(ref && ref === this.pendingHeartbeatRef){\n clearTimeout(this.heartbeatTimer)\n this.pendingHeartbeatRef = null\n setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n if(this.hasLogger()) this.log(\"receive\", `${payload.status || \"\"} ${topic} ${event} ${ref && \"(\" + ref + \")\" || \"\"}`, payload)\n\n for(let i = 0; i < this.channels.length; i++){\n const channel = this.channels[i]\n if(!channel.isMember(topic, event, payload, join_ref)){ continue }\n channel.trigger(event, payload, ref, join_ref)\n }\n\n for(let i = 0; i < this.stateChangeCallbacks.message.length; i++){\n let [, callback] = this.stateChangeCallbacks.message[i]\n callback(msg)\n }\n })\n }\n\n leaveOpenTopic(topic){\n let dupChannel = this.channels.find(c => c.topic === topic && (c.isJoined() || c.isJoining()))\n if(dupChannel){\n if(this.hasLogger()) this.log(\"transport\", `leaving duplicate topic \"${topic}\"`)\n dupChannel.leave()\n }\n }\n}\n"], + "mappings": ";;;;;;;;;;;;;;;;;;;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;AAAA;;;ACCO,IAAI,UAAU,CAAC,UAAU;AAC9B,MAAG,OAAO,UAAU,YAAW;AAC7B,WAAO;AAAA,EACT,OAAO;AACL,QAAI,WAAU,WAAW;AAAE,aAAO;AAAA,IAAM;AACxC,WAAO;AAAA,EACT;AACF;;;ACRO,IAAM,aAAa,OAAO,SAAS,cAAc,OAAO;AACxD,IAAM,YAAY,OAAO,WAAW,cAAc,SAAS;AAC3D,IAAM,SAAS,cAAc,aAAa;AAC1C,IAAM,cAAc;AACpB,IAAM,gBAAgB,EAAC,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,EAAC;AACpE,IAAM,kBAAkB;AACxB,IAAM,kBAAkB;AACxB,IAAM,iBAAiB;AAAA,EAC5B,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,SAAS;AACX;AACO,IAAM,iBAAiB;AAAA,EAC5B,OAAO;AAAA,EACP,OAAO;AAAA,EACP,MAAM;AAAA,EACN,OAAO;AAAA,EACP,OAAO;AACT;AAEO,IAAM,aAAa;AAAA,EACxB,UAAU;AAAA,EACV,WAAW;AACb;AACO,IAAM,aAAa;AAAA,EACxB,UAAU;AACZ;;;ACrBA,IAAqB,OAArB,MAA0B;AAAA,EACxB,YAAY,SAAS,OAAO,SAAS,SAAQ;AAC3C,SAAK,UAAU;AACf,SAAK,QAAQ;AACb,SAAK,UAAU,WAAW,WAAW;AAAE,aAAO,CAAC;AAAA,IAAE;AACjD,SAAK,eAAe;AACpB,SAAK,UAAU;AACf,SAAK,eAAe;AACpB,SAAK,WAAW,CAAC;AACjB,SAAK,OAAO;AAAA,EACd;AAAA,EAMA,OAAO,SAAQ;AACb,SAAK,UAAU;AACf,SAAK,MAAM;AACX,SAAK,KAAK;AAAA,EACZ;AAAA,EAKA,OAAM;AACJ,QAAG,KAAK,YAAY,SAAS,GAAE;AAAE;AAAA,IAAO;AACxC,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,QAAQ,OAAO,KAAK;AAAA,MACvB,OAAO,KAAK,QAAQ;AAAA,MACpB,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,QAAQ;AAAA,MACtB,KAAK,KAAK;AAAA,MACV,UAAU,KAAK,QAAQ,QAAQ;AAAA,IACjC,CAAC;AAAA,EACH;AAAA,EAOA,QAAQ,QAAQ,UAAS;AACvB,QAAG,KAAK,YAAY,MAAM,GAAE;AAC1B,eAAS,KAAK,aAAa,QAAQ;AAAA,IACrC;AAEA,SAAK,SAAS,KAAK,EAAC,QAAQ,SAAQ,CAAC;AACrC,WAAO;AAAA,EACT;AAAA,EAKA,QAAO;AACL,SAAK,eAAe;AACpB,SAAK,MAAM;AACX,SAAK,WAAW;AAChB,SAAK,eAAe;AACpB,SAAK,OAAO;AAAA,EACd;AAAA,EAKA,aAAa,EAAC,QAAQ,UAAU,QAAM;AACpC,SAAK,SAAS,OAAO,OAAK,EAAE,WAAW,MAAM,EAC1C,QAAQ,OAAK,EAAE,SAAS,QAAQ,CAAC;AAAA,EACtC;AAAA,EAKA,iBAAgB;AACd,QAAG,CAAC,KAAK,UAAS;AAAE;AAAA,IAAO;AAC3B,SAAK,QAAQ,IAAI,KAAK,QAAQ;AAAA,EAChC;AAAA,EAKA,gBAAe;AACb,iBAAa,KAAK,YAAY;AAC9B,SAAK,eAAe;AAAA,EACtB;AAAA,EAKA,eAAc;AACZ,QAAG,KAAK,cAAa;AAAE,WAAK,cAAc;AAAA,IAAE;AAC5C,SAAK,MAAM,KAAK,QAAQ,OAAO,QAAQ;AACvC,SAAK,WAAW,KAAK,QAAQ,eAAe,KAAK,GAAG;AAEpD,SAAK,QAAQ,GAAG,KAAK,UAAU,aAAW;AACxC,WAAK,eAAe;AACpB,WAAK,cAAc;AACnB,WAAK,eAAe;AACpB,WAAK,aAAa,OAAO;AAAA,IAC3B,CAAC;AAED,SAAK,eAAe,WAAW,MAAM;AACnC,WAAK,QAAQ,WAAW,CAAC,CAAC;AAAA,IAC5B,GAAG,KAAK,OAAO;AAAA,EACjB;AAAA,EAKA,YAAY,QAAO;AACjB,WAAO,KAAK,gBAAgB,KAAK,aAAa,WAAW;AAAA,EAC3D;AAAA,EAKA,QAAQ,QAAQ,UAAS;AACvB,SAAK,QAAQ,QAAQ,KAAK,UAAU,EAAC,QAAQ,SAAQ,CAAC;AAAA,EACxD;AACF;;;AC9GA,IAAqB,QAArB,MAA2B;AAAA,EACzB,YAAY,UAAU,WAAU;AAC9B,SAAK,WAAW;AAChB,SAAK,YAAY;AACjB,SAAK,QAAQ;AACb,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,QAAO;AACL,SAAK,QAAQ;AACb,iBAAa,KAAK,KAAK;AAAA,EACzB;AAAA,EAKA,kBAAiB;AACf,iBAAa,KAAK,KAAK;AAEvB,SAAK,QAAQ,WAAW,MAAM;AAC5B,WAAK,QAAQ,KAAK,QAAQ;AAC1B,WAAK,SAAS;AAAA,IAChB,GAAG,KAAK,UAAU,KAAK,QAAQ,CAAC,CAAC;AAAA,EACnC;AACF;;;AC1BA,IAAqB,UAArB,MAA6B;AAAA,EAC3B,YAAY,OAAO,QAAQ,QAAO;AAChC,SAAK,QAAQ,eAAe;AAC5B,SAAK,QAAQ;AACb,SAAK,SAAS,QAAQ,UAAU,CAAC,CAAC;AAClC,SAAK,SAAS;AACd,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa;AAClB,SAAK,UAAU,KAAK,OAAO;AAC3B,SAAK,aAAa;AAClB,SAAK,WAAW,IAAI,KAAK,MAAM,eAAe,MAAM,KAAK,QAAQ,KAAK,OAAO;AAC7E,SAAK,aAAa,CAAC;AACnB,SAAK,kBAAkB,CAAC;AAExB,SAAK,cAAc,IAAI,MAAM,MAAM;AACjC,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,OAAO;AAAA,MAAE;AAAA,IAC/C,GAAG,KAAK,OAAO,aAAa;AAC5B,SAAK,gBAAgB,KAAK,KAAK,OAAO,QAAQ,MAAM,KAAK,YAAY,MAAM,CAAC,CAAC;AAC7E,SAAK,gBAAgB,KAAK,KAAK,OAAO,OAAO,MAAM;AACjD,WAAK,YAAY,MAAM;AACvB,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,OAAO;AAAA,MAAE;AAAA,IACtC,CAAC,CACD;AACA,SAAK,SAAS,QAAQ,MAAM,MAAM;AAChC,WAAK,QAAQ,eAAe;AAC5B,WAAK,YAAY,MAAM;AACvB,WAAK,WAAW,QAAQ,eAAa,UAAU,KAAK,CAAC;AACrD,WAAK,aAAa,CAAC;AAAA,IACrB,CAAC;AACD,SAAK,SAAS,QAAQ,SAAS,MAAM;AACnC,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,QAAQ,MAAM;AACjB,WAAK,YAAY,MAAM;AACvB,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,SAAS,KAAK,SAAS,KAAK,QAAQ,GAAG;AAC9F,WAAK,QAAQ,eAAe;AAC5B,WAAK,OAAO,OAAO,IAAI;AAAA,IACzB,CAAC;AACD,SAAK,QAAQ,YAAU;AACrB,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,SAAS,KAAK,SAAS,MAAM;AACpF,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,SAAS,MAAM;AAAA,MAAE;AAC5C,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,SAAS,QAAQ,WAAW,MAAM;AACrC,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,WAAW,KAAK,UAAU,KAAK,QAAQ,MAAM,KAAK,SAAS,OAAO;AACzH,UAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,KAAK,OAAO;AAC9E,gBAAU,KAAK;AACf,WAAK,QAAQ,eAAe;AAC5B,WAAK,SAAS,MAAM;AACpB,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,GAAG,eAAe,OAAO,CAAC,SAAS,QAAQ;AAC9C,WAAK,QAAQ,KAAK,eAAe,GAAG,GAAG,OAAO;AAAA,IAChD,CAAC;AAAA,EACH;AAAA,EAOA,KAAK,UAAU,KAAK,SAAQ;AAC1B,QAAG,KAAK,YAAW;AACjB,YAAM,IAAI,MAAM,4FAA4F;AAAA,IAC9G,OAAO;AACL,WAAK,UAAU;AACf,WAAK,aAAa;AAClB,WAAK,OAAO;AACZ,aAAO,KAAK;AAAA,IACd;AAAA,EACF;AAAA,EAMA,QAAQ,UAAS;AACf,SAAK,GAAG,eAAe,OAAO,QAAQ;AAAA,EACxC;AAAA,EAMA,QAAQ,UAAS;AACf,WAAO,KAAK,GAAG,eAAe,OAAO,YAAU,SAAS,MAAM,CAAC;AAAA,EACjE;AAAA,EAmBA,GAAG,OAAO,UAAS;AACjB,QAAI,MAAM,KAAK;AACf,SAAK,SAAS,KAAK,EAAC,OAAO,KAAK,SAAQ,CAAC;AACzC,WAAO;AAAA,EACT;AAAA,EAoBA,IAAI,OAAO,KAAI;AACb,SAAK,WAAW,KAAK,SAAS,OAAO,CAAC,SAAS;AAC7C,aAAO,CAAE,MAAK,UAAU,SAAU,QAAO,QAAQ,eAAe,QAAQ,KAAK;AAAA,IAC/E,CAAC;AAAA,EACH;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,OAAO,YAAY,KAAK,KAAK,SAAS;AAAA,EAAE;AAAA,EAkB/D,KAAK,OAAO,SAAS,UAAU,KAAK,SAAQ;AAC1C,cAAU,WAAW,CAAC;AACtB,QAAG,CAAC,KAAK,YAAW;AAClB,YAAM,IAAI,MAAM,kBAAkB,cAAc,KAAK,iEAAiE;AAAA,IACxH;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,OAAO,WAAW;AAAE,aAAO;AAAA,IAAQ,GAAG,OAAO;AAC5E,QAAG,KAAK,QAAQ,GAAE;AAChB,gBAAU,KAAK;AAAA,IACjB,OAAO;AACL,gBAAU,aAAa;AACvB,WAAK,WAAW,KAAK,SAAS;AAAA,IAChC;AAEA,WAAO;AAAA,EACT;AAAA,EAkBA,MAAM,UAAU,KAAK,SAAQ;AAC3B,SAAK,YAAY,MAAM;AACvB,SAAK,SAAS,cAAc;AAE5B,SAAK,QAAQ,eAAe;AAC5B,QAAI,UAAU,MAAM;AAClB,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,SAAS,KAAK,OAAO;AAC5E,WAAK,QAAQ,eAAe,OAAO,OAAO;AAAA,IAC5C;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,OAAO;AACzE,cAAU,QAAQ,MAAM,MAAM,QAAQ,CAAC,EACpC,QAAQ,WAAW,MAAM,QAAQ,CAAC;AACrC,cAAU,KAAK;AACf,QAAG,CAAC,KAAK,QAAQ,GAAE;AAAE,gBAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,IAAE;AAEjD,WAAO;AAAA,EACT;AAAA,EAcA,UAAU,QAAQ,SAAS,MAAK;AAAE,WAAO;AAAA,EAAQ;AAAA,EAKjD,SAAS,OAAO,OAAO,SAAS,SAAQ;AACtC,QAAG,KAAK,UAAU,OAAM;AAAE,aAAO;AAAA,IAAM;AAEvC,QAAG,WAAW,YAAY,KAAK,QAAQ,GAAE;AACvC,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,6BAA6B,EAAC,OAAO,OAAO,SAAS,QAAO,CAAC;AACpH,aAAO;AAAA,IACT,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,SAAS;AAAA,EAAI;AAAA,EAKpC,OAAO,UAAU,KAAK,SAAQ;AAC5B,QAAG,KAAK,UAAU,GAAE;AAAE;AAAA,IAAO;AAC7B,SAAK,OAAO,eAAe,KAAK,KAAK;AACrC,SAAK,QAAQ,eAAe;AAC5B,SAAK,SAAS,OAAO,OAAO;AAAA,EAC9B;AAAA,EAKA,QAAQ,OAAO,SAAS,KAAK,SAAQ;AACnC,QAAI,iBAAiB,KAAK,UAAU,OAAO,SAAS,KAAK,OAAO;AAChE,QAAG,WAAW,CAAC,gBAAe;AAAE,YAAM,IAAI,MAAM,6EAA6E;AAAA,IAAE;AAE/H,QAAI,gBAAgB,KAAK,SAAS,OAAO,UAAQ,KAAK,UAAU,KAAK;AAErE,aAAQ,IAAI,GAAG,IAAI,cAAc,QAAQ,KAAI;AAC3C,UAAI,OAAO,cAAc;AACzB,WAAK,SAAS,gBAAgB,KAAK,WAAW,KAAK,QAAQ,CAAC;AAAA,IAC9D;AAAA,EACF;AAAA,EAKA,eAAe,KAAI;AAAE,WAAO,cAAc;AAAA,EAAM;AAAA,EAKhD,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA,EAK1D,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA,EAK1D,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAC5D;;;ACjTA,IAAqB,OAArB,MAA0B;AAAA,EAExB,OAAO,QAAQ,QAAQ,UAAU,QAAQ,MAAM,SAAS,WAAW,UAAS;AAC1E,QAAG,OAAO,gBAAe;AACvB,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,QAAQ;AAAA,IACtF,OAAO;AACL,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,WAAW,KAAK,QAAQ,UAAU,QAAQ,MAAM,SAAS,WAAW,QAAQ;AAAA,IAC1F;AAAA,EACF;AAAA,EAEA,OAAO,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,UAAS;AAC9E,QAAI,UAAU;AACd,QAAI,KAAK,QAAQ,QAAQ;AACzB,QAAI,SAAS,MAAM;AACjB,UAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,kBAAY,SAAS,QAAQ;AAAA,IAC/B;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAGzC,QAAI,aAAa,MAAM;AAAA,IAAE;AAEzB,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,WAAW,KAAK,QAAQ,UAAU,QAAQ,MAAM,SAAS,WAAW,UAAS;AAClF,QAAI,KAAK,QAAQ,UAAU,IAAI;AAC/B,QAAI,UAAU;AACd,QAAI,iBAAiB,gBAAgB,MAAM;AAC3C,QAAI,UAAU,MAAM,YAAY,SAAS,IAAI;AAC7C,QAAI,qBAAqB,MAAM;AAC7B,UAAG,IAAI,eAAe,WAAW,YAAY,UAAS;AACpD,YAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,iBAAS,QAAQ;AAAA,MACnB;AAAA,IACF;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAEzC,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,UAAU,MAAK;AACpB,QAAG,CAAC,QAAQ,SAAS,IAAG;AAAE,aAAO;AAAA,IAAK;AAEtC,QAAI;AACF,aAAO,KAAK,MAAM,IAAI;AAAA,IACxB,SAAS,GAAP;AACA,iBAAW,QAAQ,IAAI,iCAAiC,IAAI;AAC5D,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,OAAO,UAAU,KAAK,WAAU;AAC9B,QAAI,WAAW,CAAC;AAChB,aAAQ,OAAO,KAAI;AACjB,UAAG,CAAC,OAAO,UAAU,eAAe,KAAK,KAAK,GAAG,GAAE;AAAE;AAAA,MAAS;AAC9D,UAAI,WAAW,YAAY,GAAG,aAAa,SAAS;AACpD,UAAI,WAAW,IAAI;AACnB,UAAG,OAAO,aAAa,UAAS;AAC9B,iBAAS,KAAK,KAAK,UAAU,UAAU,QAAQ,CAAC;AAAA,MAClD,OAAO;AACL,iBAAS,KAAK,mBAAmB,QAAQ,IAAI,MAAM,mBAAmB,QAAQ,CAAC;AAAA,MACjF;AAAA,IACF;AACA,WAAO,SAAS,KAAK,GAAG;AAAA,EAC1B;AAAA,EAEA,OAAO,aAAa,KAAK,QAAO;AAC9B,QAAG,OAAO,KAAK,MAAM,EAAE,WAAW,GAAE;AAAE,aAAO;AAAA,IAAI;AAEjD,QAAI,SAAS,IAAI,MAAM,IAAI,IAAI,MAAM;AACrC,WAAO,GAAG,MAAM,SAAS,KAAK,UAAU,MAAM;AAAA,EAChD;AACF;;;AC3EA,IAAqB,WAArB,MAA8B;AAAA,EAE5B,YAAY,UAAS;AACnB,SAAK,WAAW;AAChB,SAAK,QAAQ;AACb,SAAK,gBAAgB;AACrB,SAAK,OAAO,oBAAI,IAAI;AACpB,SAAK,SAAS,WAAW;AAAA,IAAE;AAC3B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,YAAY,WAAW;AAAA,IAAE;AAC9B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,eAAe,KAAK,kBAAkB,QAAQ;AACnD,SAAK,aAAa,cAAc;AAChC,SAAK,KAAK;AAAA,EACZ;AAAA,EAEA,kBAAkB,UAAS;AACzB,WAAQ,SACL,QAAQ,SAAS,SAAS,EAC1B,QAAQ,UAAU,UAAU,EAC5B,QAAQ,IAAI,OAAO,UAAW,WAAW,SAAS,GAAG,QAAQ,WAAW,QAAQ;AAAA,EACrF;AAAA,EAEA,cAAa;AACX,WAAO,KAAK,aAAa,KAAK,cAAc,EAAC,OAAO,KAAK,MAAK,CAAC;AAAA,EACjE;AAAA,EAEA,cAAc,MAAM,QAAQ,UAAS;AACnC,SAAK,MAAM,MAAM,QAAQ,QAAQ;AACjC,SAAK,aAAa,cAAc;AAAA,EAClC;AAAA,EAEA,YAAW;AACT,SAAK,QAAQ,SAAS;AACtB,SAAK,cAAc,MAAM,WAAW,KAAK;AAAA,EAC3C;AAAA,EAEA,WAAU;AAAE,WAAO,KAAK,eAAe,cAAc,QAAQ,KAAK,eAAe,cAAc;AAAA,EAAW;AAAA,EAE1G,OAAM;AACJ,SAAK,KAAK,OAAO,MAAM,MAAM,KAAK,UAAU,GAAG,UAAQ;AACrD,UAAG,MAAK;AACN,YAAI,EAAC,QAAQ,OAAO,aAAY;AAChC,aAAK,QAAQ;AAAA,MACf,OAAO;AACL,iBAAS;AAAA,MACX;AAEA,cAAO;AAAA,aACA;AACH,mBAAS,QAAQ,SAAO;AAmBtB,uBAAW,MAAM,KAAK,UAAU,EAAC,MAAM,IAAG,CAAC,GAAG,CAAC;AAAA,UACjD,CAAC;AACD,eAAK,KAAK;AACV;AAAA,aACG;AACH,eAAK,KAAK;AACV;AAAA,aACG;AACH,eAAK,aAAa,cAAc;AAChC,eAAK,OAAO,CAAC,CAAC;AACd,eAAK,KAAK;AACV;AAAA,aACG;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,MAAM,MAAM,aAAa,KAAK;AACnC;AAAA,aACG;AAAA,aACA;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,cAAc,MAAM,yBAAyB,GAAG;AACrD;AAAA;AACO,gBAAM,IAAI,MAAM,yBAAyB,QAAQ;AAAA;AAAA,IAE9D,CAAC;AAAA,EACH;AAAA,EAEA,KAAK,MAAK;AACR,SAAK,KAAK,QAAQ,MAAM,MAAM,KAAK,QAAQ,SAAS,GAAG,UAAQ;AAC7D,UAAG,CAAC,QAAQ,KAAK,WAAW,KAAI;AAC9B,aAAK,QAAQ,QAAQ,KAAK,MAAM;AAChC,aAAK,cAAc,MAAM,yBAAyB,KAAK;AAAA,MACzD;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,MAAM,QAAQ,UAAS;AAC3B,aAAQ,OAAO,KAAK,MAAK;AAAE,UAAI,MAAM;AAAA,IAAE;AACvC,SAAK,aAAa,cAAc;AAChC,QAAI,OAAO,OAAO,OAAO,EAAC,MAAM,KAAM,QAAQ,QAAW,UAAU,KAAI,GAAG,EAAC,MAAM,QAAQ,SAAQ,CAAC;AAClG,QAAG,OAAO,eAAgB,aAAY;AACpC,WAAK,QAAQ,IAAI,WAAW,SAAS,IAAI,CAAC;AAAA,IAC5C,OAAO;AACL,WAAK,QAAQ,IAAI;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,KAAK,QAAQ,MAAM,iBAAiB,UAAS;AAC3C,QAAI;AACJ,QAAI,YAAY,MAAM;AACpB,WAAK,KAAK,OAAO,GAAG;AACpB,sBAAgB;AAAA,IAClB;AACA,UAAM,KAAK,QAAQ,QAAQ,KAAK,YAAY,GAAG,oBAAoB,MAAM,KAAK,SAAS,WAAW,UAAQ;AACxG,WAAK,KAAK,OAAO,GAAG;AACpB,UAAG,KAAK,SAAS,GAAE;AAAE,iBAAS,IAAI;AAAA,MAAE;AAAA,IACtC,CAAC;AACD,SAAK,KAAK,IAAI,GAAG;AAAA,EACnB;AACF;;;AChIA,IAAqB,WAArB,MAA8B;AAAA,EAE5B,YAAY,SAAS,OAAO,CAAC,GAAE;AAC7B,QAAI,SAAS,KAAK,UAAU,EAAC,OAAO,kBAAkB,MAAM,gBAAe;AAC3E,SAAK,QAAQ,CAAC;AACd,SAAK,eAAe,CAAC;AACrB,SAAK,UAAU;AACf,SAAK,UAAU;AACf,SAAK,SAAS;AAAA,MACZ,QAAQ,WAAW;AAAA,MAAE;AAAA,MACrB,SAAS,WAAW;AAAA,MAAE;AAAA,MACtB,QAAQ,WAAW;AAAA,MAAE;AAAA,IACvB;AAEA,SAAK,QAAQ,GAAG,OAAO,OAAO,cAAY;AACxC,UAAI,EAAC,QAAQ,SAAS,WAAU,KAAK;AAErC,WAAK,UAAU,KAAK,QAAQ,QAAQ;AACpC,WAAK,QAAQ,SAAS,UAAU,KAAK,OAAO,UAAU,QAAQ,OAAO;AAErE,WAAK,aAAa,QAAQ,UAAQ;AAChC,aAAK,QAAQ,SAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAAA,MAClE,CAAC;AACD,WAAK,eAAe,CAAC;AACrB,aAAO;AAAA,IACT,CAAC;AAED,SAAK,QAAQ,GAAG,OAAO,MAAM,UAAQ;AACnC,UAAI,EAAC,QAAQ,SAAS,WAAU,KAAK;AAErC,UAAG,KAAK,mBAAmB,GAAE;AAC3B,aAAK,aAAa,KAAK,IAAI;AAAA,MAC7B,OAAO;AACL,aAAK,QAAQ,SAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAChE,eAAO;AAAA,MACT;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,QAAQ,UAAS;AAAE,SAAK,OAAO,UAAU;AAAA,EAAS;AAAA,EAElD,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,KAAK,IAAG;AAAE,WAAO,SAAS,KAAK,KAAK,OAAO,EAAE;AAAA,EAAE;AAAA,EAE/C,qBAAoB;AAClB,WAAO,CAAC,KAAK,WAAY,KAAK,YAAY,KAAK,QAAQ,QAAQ;AAAA,EACjE;AAAA,EAYA,OAAO,UAAU,cAAc,UAAU,QAAQ,SAAQ;AACvD,QAAI,QAAQ,KAAK,MAAM,YAAY;AACnC,QAAI,QAAQ,CAAC;AACb,QAAI,SAAS,CAAC;AAEd,SAAK,IAAI,OAAO,CAAC,KAAK,aAAa;AACjC,UAAG,CAAC,SAAS,MAAK;AAChB,eAAO,OAAO;AAAA,MAChB;AAAA,IACF,CAAC;AACD,SAAK,IAAI,UAAU,CAAC,KAAK,gBAAgB;AACvC,UAAI,kBAAkB,MAAM;AAC5B,UAAG,iBAAgB;AACjB,YAAI,UAAU,YAAY,MAAM,IAAI,OAAK,EAAE,OAAO;AAClD,YAAI,UAAU,gBAAgB,MAAM,IAAI,OAAK,EAAE,OAAO;AACtD,YAAI,cAAc,YAAY,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAC9E,YAAI,YAAY,gBAAgB,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAChF,YAAG,YAAY,SAAS,GAAE;AACxB,gBAAM,OAAO;AACb,gBAAM,KAAK,QAAQ;AAAA,QACrB;AACA,YAAG,UAAU,SAAS,GAAE;AACtB,iBAAO,OAAO,KAAK,MAAM,eAAe;AACxC,iBAAO,KAAK,QAAQ;AAAA,QACtB;AAAA,MACF,OAAO;AACL,cAAM,OAAO;AAAA,MACf;AAAA,IACF,CAAC;AACD,WAAO,KAAK,SAAS,OAAO,EAAC,OAAc,OAAc,GAAG,QAAQ,OAAO;AAAA,EAC7E;AAAA,EAWA,OAAO,SAAS,OAAO,MAAM,QAAQ,SAAQ;AAC3C,QAAI,EAAC,OAAO,WAAU,KAAK,MAAM,IAAI;AACrC,QAAG,CAAC,QAAO;AAAE,eAAS,WAAW;AAAA,MAAE;AAAA,IAAE;AACrC,QAAG,CAAC,SAAQ;AAAE,gBAAU,WAAW;AAAA,MAAE;AAAA,IAAE;AAEvC,SAAK,IAAI,OAAO,CAAC,KAAK,gBAAgB;AACpC,UAAI,kBAAkB,MAAM;AAC5B,YAAM,OAAO,KAAK,MAAM,WAAW;AACnC,UAAG,iBAAgB;AACjB,YAAI,aAAa,MAAM,KAAK,MAAM,IAAI,OAAK,EAAE,OAAO;AACpD,YAAI,WAAW,gBAAgB,MAAM,OAAO,OAAK,WAAW,QAAQ,EAAE,OAAO,IAAI,CAAC;AAClF,cAAM,KAAK,MAAM,QAAQ,GAAG,QAAQ;AAAA,MACtC;AACA,aAAO,KAAK,iBAAiB,WAAW;AAAA,IAC1C,CAAC;AACD,SAAK,IAAI,QAAQ,CAAC,KAAK,iBAAiB;AACtC,UAAI,kBAAkB,MAAM;AAC5B,UAAG,CAAC,iBAAgB;AAAE;AAAA,MAAO;AAC7B,UAAI,eAAe,aAAa,MAAM,IAAI,OAAK,EAAE,OAAO;AACxD,sBAAgB,QAAQ,gBAAgB,MAAM,OAAO,OAAK;AACxD,eAAO,aAAa,QAAQ,EAAE,OAAO,IAAI;AAAA,MAC3C,CAAC;AACD,cAAQ,KAAK,iBAAiB,YAAY;AAC1C,UAAG,gBAAgB,MAAM,WAAW,GAAE;AACpC,eAAO,MAAM;AAAA,MACf;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA,EAUA,OAAO,KAAK,WAAW,SAAQ;AAC7B,QAAG,CAAC,SAAQ;AAAE,gBAAU,SAAU,KAAK,MAAK;AAAE,eAAO;AAAA,MAAK;AAAA,IAAE;AAE5D,WAAO,KAAK,IAAI,WAAW,CAAC,KAAK,aAAa;AAC5C,aAAO,QAAQ,KAAK,QAAQ;AAAA,IAC9B,CAAC;AAAA,EACH;AAAA,EAIA,OAAO,IAAI,KAAK,MAAK;AACnB,WAAO,OAAO,oBAAoB,GAAG,EAAE,IAAI,SAAO,KAAK,KAAK,IAAI,IAAI,CAAC;AAAA,EACvE;AAAA,EAEA,OAAO,MAAM,KAAI;AAAE,WAAO,KAAK,MAAM,KAAK,UAAU,GAAG,CAAC;AAAA,EAAE;AAC5D;;;AC5JA,IAAO,qBAAQ;AAAA,EACb,eAAe;AAAA,EACf,aAAa;AAAA,EACb,OAAO,EAAC,MAAM,GAAG,OAAO,GAAG,WAAW,EAAC;AAAA,EAEvC,OAAO,KAAK,UAAS;AACnB,QAAG,IAAI,QAAQ,gBAAgB,aAAY;AACzC,aAAO,SAAS,KAAK,aAAa,GAAG,CAAC;AAAA,IACxC,OAAO;AACL,UAAI,UAAU,CAAC,IAAI,UAAU,IAAI,KAAK,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO;AACvE,aAAO,SAAS,KAAK,UAAU,OAAO,CAAC;AAAA,IACzC;AAAA,EACF;AAAA,EAEA,OAAO,YAAY,UAAS;AAC1B,QAAG,WAAW,gBAAgB,aAAY;AACxC,aAAO,SAAS,KAAK,aAAa,UAAU,CAAC;AAAA,IAC/C,OAAO;AACL,UAAI,CAAC,UAAU,KAAK,OAAO,OAAO,WAAW,KAAK,MAAM,UAAU;AAClE,aAAO,SAAS,EAAC,UAAU,KAAK,OAAO,OAAO,QAAO,CAAC;AAAA,IACxD;AAAA,EACF;AAAA,EAIA,aAAa,SAAQ;AACnB,QAAI,EAAC,UAAU,KAAK,OAAO,OAAO,YAAW;AAC7C,QAAI,aAAa,KAAK,cAAc,SAAS,SAAS,IAAI,SAAS,MAAM,SAAS,MAAM;AACxF,QAAI,SAAS,IAAI,YAAY,KAAK,gBAAgB,UAAU;AAC5D,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,SAAS;AAEb,SAAK,SAAS,UAAU,KAAK,MAAM,IAAI;AACvC,SAAK,SAAS,UAAU,SAAS,MAAM;AACvC,SAAK,SAAS,UAAU,IAAI,MAAM;AAClC,SAAK,SAAS,UAAU,MAAM,MAAM;AACpC,SAAK,SAAS,UAAU,MAAM,MAAM;AACpC,UAAM,KAAK,UAAU,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AACxE,UAAM,KAAK,KAAK,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AACnE,UAAM,KAAK,OAAO,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AACrE,UAAM,KAAK,OAAO,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AAErE,QAAI,WAAW,IAAI,WAAW,OAAO,aAAa,QAAQ,UAAU;AACpE,aAAS,IAAI,IAAI,WAAW,MAAM,GAAG,CAAC;AACtC,aAAS,IAAI,IAAI,WAAW,OAAO,GAAG,OAAO,UAAU;AAEvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,aAAa,QAAO;AAClB,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,OAAO,KAAK,SAAS,CAAC;AAC1B,QAAI,UAAU,IAAI,YAAY;AAC9B,YAAO;AAAA,WACA,KAAK,MAAM;AAAM,eAAO,KAAK,WAAW,QAAQ,MAAM,OAAO;AAAA,WAC7D,KAAK,MAAM;AAAO,eAAO,KAAK,YAAY,QAAQ,MAAM,OAAO;AAAA,WAC/D,KAAK,MAAM;AAAW,eAAO,KAAK,gBAAgB,QAAQ,MAAM,OAAO;AAAA;AAAA,EAEhF;AAAA,EAEA,WAAW,QAAQ,MAAM,SAAQ;AAC/B,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK,cAAc;AACrD,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,WAAO,EAAC,UAAU,SAAS,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EACjF;AAAA,EAEA,YAAY,QAAQ,MAAM,SAAQ;AAChC,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,UAAU,KAAK,SAAS,CAAC;AAC7B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK;AACvC,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,MAAM,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,OAAO,CAAC;AAC/D,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,QAAI,UAAU,EAAC,QAAQ,OAAO,UAAU,KAAI;AAC5C,WAAO,EAAC,UAAU,SAAS,KAAU,OAAc,OAAO,eAAe,OAAO,QAAgB;AAAA,EAClG;AAAA,EAEA,gBAAgB,QAAQ,MAAM,SAAQ;AACpC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB;AAClC,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AAEjD,WAAO,EAAC,UAAU,MAAM,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EAC9E;AACF;;;ACtBA,IAAqB,SAArB,MAA4B;AAAA,EAC1B,YAAY,UAAU,OAAO,CAAC,GAAE;AAC9B,SAAK,uBAAuB,EAAC,MAAM,CAAC,GAAG,OAAO,CAAC,GAAG,OAAO,CAAC,GAAG,SAAS,CAAC,EAAC;AACxE,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa,CAAC;AACnB,SAAK,MAAM;AACX,SAAK,UAAU,KAAK,WAAW;AAC/B,SAAK,YAAY,KAAK,aAAa,OAAO,aAAa;AACvD,SAAK,yBAAyB;AAC9B,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AACvD,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AACvD,SAAK,gBAAgB;AACrB,SAAK,aAAa,KAAK,cAAc;AACrC,SAAK,eAAe;AACpB,QAAG,KAAK,cAAc,UAAS;AAC7B,WAAK,SAAS,KAAK,UAAU,KAAK;AAClC,WAAK,SAAS,KAAK,UAAU,KAAK;AAAA,IACpC,OAAO;AACL,WAAK,SAAS,KAAK;AACnB,WAAK,SAAS,KAAK;AAAA,IACrB;AACA,QAAI,+BAA+B;AACnC,QAAG,aAAa,UAAU,kBAAiB;AACzC,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,KAAK,MAAK;AACX,eAAK,WAAW;AAChB,yCAA+B,KAAK;AAAA,QACtC;AAAA,MACF,CAAC;AACD,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,iCAAiC,KAAK,cAAa;AACpD,yCAA+B;AAC/B,eAAK,QAAQ;AAAA,QACf;AAAA,MACF,CAAC;AAAA,IACH;AACA,SAAK,sBAAsB,KAAK,uBAAuB;AACvD,SAAK,gBAAgB,CAAC,UAAU;AAC9B,UAAG,KAAK,eAAc;AACpB,eAAO,KAAK,cAAc,KAAK;AAAA,MACjC,OAAO;AACL,eAAO,CAAC,KAAM,KAAM,GAAI,EAAE,QAAQ,MAAM;AAAA,MAC1C;AAAA,IACF;AACA,SAAK,mBAAmB,CAAC,UAAU;AACjC,UAAG,KAAK,kBAAiB;AACvB,eAAO,KAAK,iBAAiB,KAAK;AAAA,MACpC,OAAO;AACL,eAAO,CAAC,IAAI,IAAI,KAAK,KAAK,KAAK,KAAK,KAAK,KAAM,GAAI,EAAE,QAAQ,MAAM;AAAA,MACrE;AAAA,IACF;AACA,SAAK,SAAS,KAAK,UAAU;AAC7B,SAAK,oBAAoB,KAAK,qBAAqB;AACnD,SAAK,SAAS,QAAQ,KAAK,UAAU,CAAC,CAAC;AACvC,SAAK,WAAW,GAAG,YAAY,WAAW;AAC1C,SAAK,MAAM,KAAK,OAAO;AACvB,SAAK,iBAAiB;AACtB,SAAK,sBAAsB;AAC3B,SAAK,iBAAiB,IAAI,MAAM,MAAM;AACpC,WAAK,SAAS,MAAM,KAAK,QAAQ,CAAC;AAAA,IACpC,GAAG,KAAK,gBAAgB;AAAA,EAC1B;AAAA,EAKA,uBAAsB;AAAE,WAAO;AAAA,EAAS;AAAA,EAQxC,iBAAiB,cAAa;AAC5B,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,aAAa,CAAC;AACnB,QAAG,KAAK,MAAK;AACX,WAAK,KAAK,MAAM;AAChB,WAAK,OAAO;AAAA,IACd;AACA,SAAK,YAAY;AAAA,EACnB;AAAA,EAOA,WAAU;AAAE,WAAO,SAAS,SAAS,MAAM,QAAQ,IAAI,QAAQ;AAAA,EAAK;AAAA,EAOpE,cAAa;AACX,QAAI,MAAM,KAAK,aACb,KAAK,aAAa,KAAK,UAAU,KAAK,OAAO,CAAC,GAAG,EAAC,KAAK,KAAK,IAAG,CAAC;AAClE,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO;AAAA,IAAI;AACtC,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO,GAAG,KAAK,SAAS,KAAK;AAAA,IAAM;AAE9D,WAAO,GAAG,KAAK,SAAS,OAAO,SAAS,OAAO;AAAA,EACjD;AAAA,EAWA,WAAW,UAAU,MAAM,QAAO;AAChC,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,SAAS,UAAU,MAAM,MAAM;AAAA,EACtC;AAAA,EASA,QAAQ,QAAO;AACb,QAAG,QAAO;AACR,iBAAW,QAAQ,IAAI,yFAAyF;AAChH,WAAK,SAAS,QAAQ,MAAM;AAAA,IAC9B;AACA,QAAG,KAAK,MAAK;AAAE;AAAA,IAAO;AAEtB,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,OAAO,IAAI,KAAK,UAAU,KAAK,YAAY,CAAC;AACjD,SAAK,KAAK,aAAa,KAAK;AAC5B,SAAK,KAAK,UAAU,KAAK;AACzB,SAAK,KAAK,SAAS,MAAM,KAAK,WAAW;AACzC,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AACnD,SAAK,KAAK,YAAY,WAAS,KAAK,cAAc,KAAK;AACvD,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AAAA,EACrD;AAAA,EAQA,IAAI,MAAM,KAAK,MAAK;AAAE,SAAK,OAAO,MAAM,KAAK,IAAI;AAAA,EAAE;AAAA,EAKnD,YAAW;AAAE,WAAO,KAAK,WAAW;AAAA,EAAK;AAAA,EASzC,OAAO,UAAS;AACd,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,KAAK,KAAK,CAAC,KAAK,QAAQ,CAAC;AACnD,WAAO;AAAA,EACT;AAAA,EAMA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA,EASA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA,EAMA,UAAU,UAAS;AACjB,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,QAAQ,KAAK,CAAC,KAAK,QAAQ,CAAC;AACtD,WAAO;AAAA,EACT;AAAA,EAQA,KAAK,UAAS;AACZ,QAAG,CAAC,KAAK,YAAY,GAAE;AAAE,aAAO;AAAA,IAAM;AACtC,QAAI,MAAM,KAAK,QAAQ;AACvB,QAAI,YAAY,KAAK,IAAI;AACzB,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,IAAQ,CAAC;AACvE,QAAI,WAAW,KAAK,UAAU,SAAO;AACnC,UAAG,IAAI,QAAQ,KAAI;AACjB,aAAK,IAAI,CAAC,QAAQ,CAAC;AACnB,iBAAS,KAAK,IAAI,IAAI,SAAS;AAAA,MACjC;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA,EAKA,aAAY;AACV,QAAG,KAAK,UAAU;AAAG,WAAK,IAAI,aAAa,gBAAgB,KAAK,YAAY,GAAG;AAC/E,SAAK,gBAAgB;AACrB,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,eAAe;AACpB,SAAK,qBAAqB,KAAK,QAAQ,CAAC,CAAC,EAAE,cAAc,SAAS,CAAC;AAAA,EACrE;AAAA,EAMA,mBAAkB;AAChB,QAAG,KAAK,qBAAoB;AAC1B,WAAK,sBAAsB;AAC3B,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,IAAI,aAAa,0DAA0D;AAAA,MAAE;AACxG,WAAK,cAAc,mBAAmB;AAAA,IACxC;AAAA,EACF;AAAA,EAEA,iBAAgB;AACd,QAAG,KAAK,QAAQ,KAAK,KAAK,eAAc;AAAE;AAAA,IAAO;AACjD,SAAK,sBAAsB;AAC3B,iBAAa,KAAK,cAAc;AAChC,eAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,EACjE;AAAA,EAEA,SAAS,UAAU,MAAM,QAAO;AAC9B,QAAG,CAAC,KAAK,MAAK;AACZ,aAAO,YAAY,SAAS;AAAA,IAC9B;AAEA,SAAK,kBAAkB,MAAM;AAC3B,UAAG,KAAK,MAAK;AACX,YAAG,MAAK;AAAE,eAAK,KAAK,MAAM,MAAM,UAAU,EAAE;AAAA,QAAE,OAAO;AAAE,eAAK,KAAK,MAAM;AAAA,QAAE;AAAA,MAC3E;AAEA,WAAK,oBAAoB,MAAM;AAC7B,YAAG,KAAK,MAAK;AACX,eAAK,KAAK,UAAU,WAAW;AAAA,UAAE;AACjC,eAAK,OAAO;AAAA,QACd;AAEA,oBAAY,SAAS;AAAA,MACvB,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAAA,EAEA,kBAAkB,UAAU,QAAQ,GAAE;AACpC,QAAG,UAAU,KAAK,CAAC,KAAK,QAAQ,CAAC,KAAK,KAAK,gBAAe;AACxD,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,kBAAkB,UAAU,QAAQ,CAAC;AAAA,IAC5C,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,oBAAoB,UAAU,QAAQ,GAAE;AACtC,QAAG,UAAU,KAAK,CAAC,KAAK,QAAQ,KAAK,KAAK,eAAe,cAAc,QAAO;AAC5E,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,oBAAoB,UAAU,QAAQ,CAAC;AAAA,IAC9C,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,YAAY,OAAM;AAChB,QAAI,YAAY,SAAS,MAAM;AAC/B,QAAG,KAAK,UAAU;AAAG,WAAK,IAAI,aAAa,SAAS,KAAK;AACzD,SAAK,iBAAiB;AACtB,iBAAa,KAAK,cAAc;AAChC,QAAG,CAAC,KAAK,iBAAiB,cAAc,KAAK;AAC3C,WAAK,eAAe,gBAAgB;AAAA,IACtC;AACA,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,cAAc,SAAS,KAAK,CAAC;AAAA,EAC3E;AAAA,EAKA,YAAY,OAAM;AAChB,QAAG,KAAK,UAAU;AAAG,WAAK,IAAI,aAAa,KAAK;AAChD,QAAI,kBAAkB,KAAK;AAC3B,QAAI,oBAAoB,KAAK;AAC7B,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,cAAc;AACxD,eAAS,OAAO,iBAAiB,iBAAiB;AAAA,IACpD,CAAC;AACD,QAAG,oBAAoB,KAAK,aAAa,oBAAoB,GAAE;AAC7D,WAAK,iBAAiB;AAAA,IACxB;AAAA,EACF;AAAA,EAKA,mBAAkB;AAChB,SAAK,SAAS,QAAQ,aAAW;AAC/B,UAAG,CAAE,SAAQ,UAAU,KAAK,QAAQ,UAAU,KAAK,QAAQ,SAAS,IAAG;AACrE,gBAAQ,QAAQ,eAAe,KAAK;AAAA,MACtC;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAKA,kBAAiB;AACf,YAAO,KAAK,QAAQ,KAAK,KAAK;AAAA,WACvB,cAAc;AAAY,eAAO;AAAA,WACjC,cAAc;AAAM,eAAO;AAAA,WAC3B,cAAc;AAAS,eAAO;AAAA;AAC1B,eAAO;AAAA;AAAA,EAEpB;AAAA,EAKA,cAAa;AAAE,WAAO,KAAK,gBAAgB,MAAM;AAAA,EAAO;AAAA,EAOxD,OAAO,SAAQ;AACb,SAAK,IAAI,QAAQ,eAAe;AAChC,SAAK,WAAW,KAAK,SAAS,OAAO,OAAK,EAAE,QAAQ,MAAM,QAAQ,QAAQ,CAAC;AAAA,EAC7E;AAAA,EAQA,IAAI,MAAK;AACP,aAAQ,OAAO,KAAK,sBAAqB;AACvC,WAAK,qBAAqB,OAAO,KAAK,qBAAqB,KAAK,OAAO,CAAC,CAAC,SAAS;AAChF,eAAO,KAAK,QAAQ,GAAG,MAAM;AAAA,MAC/B,CAAC;AAAA,IACH;AAAA,EACF;AAAA,EASA,QAAQ,OAAO,aAAa,CAAC,GAAE;AAC7B,QAAI,OAAO,IAAI,QAAQ,OAAO,YAAY,IAAI;AAC9C,SAAK,SAAS,KAAK,IAAI;AACvB,WAAO;AAAA,EACT;AAAA,EAKA,KAAK,MAAK;AACR,QAAG,KAAK,UAAU,GAAE;AAClB,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,aAAY;AAC7C,WAAK,IAAI,QAAQ,GAAG,SAAS,UAAU,aAAa,QAAQ,OAAO;AAAA,IACrE;AAEA,QAAG,KAAK,YAAY,GAAE;AACpB,WAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC;AAAA,IACpD,OAAO;AACL,WAAK,WAAW,KAAK,MAAM,KAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC,CAAC;AAAA,IAChF;AAAA,EACF;AAAA,EAMA,UAAS;AACP,QAAI,SAAS,KAAK,MAAM;AACxB,QAAG,WAAW,KAAK,KAAI;AAAE,WAAK,MAAM;AAAA,IAAE,OAAO;AAAE,WAAK,MAAM;AAAA,IAAO;AAEjE,WAAO,KAAK,IAAI,SAAS;AAAA,EAC3B;AAAA,EAEA,gBAAe;AACb,QAAG,KAAK,uBAAuB,CAAC,KAAK,YAAY,GAAE;AAAE;AAAA,IAAO;AAC5D,SAAK,sBAAsB,KAAK,QAAQ;AACxC,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,KAAK,KAAK,oBAAmB,CAAC;AAC5F,SAAK,iBAAiB,WAAW,MAAM,KAAK,iBAAiB,GAAG,KAAK,mBAAmB;AAAA,EAC1F;AAAA,EAEA,cAAc,QAAO;AACnB,SAAK,gBAAgB;AACrB,QAAG,KAAK,YAAY,GAAE;AAAE,WAAK,KAAK,MAAM,iBAAiB,MAAM;AAAA,IAAE;AAAA,EACnE;AAAA,EAEA,kBAAiB;AACf,QAAG,KAAK,YAAY,KAAK,KAAK,WAAW,SAAS,GAAE;AAClD,WAAK,WAAW,QAAQ,cAAY,SAAS,CAAC;AAC9C,WAAK,aAAa,CAAC;AAAA,IACrB;AAAA,EACF;AAAA,EAEA,cAAc,YAAW;AACvB,SAAK,OAAO,WAAW,MAAM,SAAO;AAClC,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,aAAY;AAC7C,UAAG,OAAO,QAAQ,KAAK,qBAAoB;AACzC,qBAAa,KAAK,cAAc;AAChC,aAAK,sBAAsB;AAC3B,mBAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,MACjE;AAEA,UAAG,KAAK,UAAU;AAAG,aAAK,IAAI,WAAW,GAAG,QAAQ,UAAU,MAAM,SAAS,SAAS,OAAO,MAAM,MAAM,OAAO,MAAM,OAAO;AAE7H,eAAQ,IAAI,GAAG,IAAI,KAAK,SAAS,QAAQ,KAAI;AAC3C,cAAM,UAAU,KAAK,SAAS;AAC9B,YAAG,CAAC,QAAQ,SAAS,OAAO,OAAO,SAAS,QAAQ,GAAE;AAAE;AAAA,QAAS;AACjE,gBAAQ,QAAQ,OAAO,SAAS,KAAK,QAAQ;AAAA,MAC/C;AAEA,eAAQ,IAAI,GAAG,IAAI,KAAK,qBAAqB,QAAQ,QAAQ,KAAI;AAC/D,YAAI,CAAC,EAAE,YAAY,KAAK,qBAAqB,QAAQ;AACrD,iBAAS,GAAG;AAAA,MACd;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,eAAe,OAAM;AACnB,QAAI,aAAa,KAAK,SAAS,KAAK,OAAK,EAAE,UAAU,SAAU,GAAE,SAAS,KAAK,EAAE,UAAU,EAAE;AAC7F,QAAG,YAAW;AACZ,UAAG,KAAK,UAAU;AAAG,aAAK,IAAI,aAAa,4BAA4B,QAAQ;AAC/E,iBAAW,MAAM;AAAA,IACnB;AAAA,EACF;AACF;", + "names": [] +} diff --git a/deps/phoenix/priv/static/phoenix.js b/deps/phoenix/priv/static/phoenix.js new file mode 100644 index 0000000..d5ea802 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.js @@ -0,0 +1,1145 @@ +var Phoenix = (() => { + var __defProp = Object.defineProperty; + var __getOwnPropDesc = Object.getOwnPropertyDescriptor; + var __getOwnPropNames = Object.getOwnPropertyNames; + var __hasOwnProp = Object.prototype.hasOwnProperty; + var __export = (target, all) => { + for (var name in all) + __defProp(target, name, { get: all[name], enumerable: true }); + }; + var __copyProps = (to, from, except, desc) => { + if (from && typeof from === "object" || typeof from === "function") { + for (let key of __getOwnPropNames(from)) + if (!__hasOwnProp.call(to, key) && key !== except) + __defProp(to, key, { get: () => from[key], enumerable: !(desc = __getOwnPropDesc(from, key)) || desc.enumerable }); + } + return to; + }; + var __toCommonJS = (mod) => __copyProps(__defProp({}, "__esModule", { value: true }), mod); + + // js/phoenix/index.js + var phoenix_exports = {}; + __export(phoenix_exports, { + Channel: () => Channel, + LongPoll: () => LongPoll, + Presence: () => Presence, + Serializer: () => serializer_default, + Socket: () => Socket + }); + + // js/phoenix/utils.js + var closure = (value) => { + if (typeof value === "function") { + return value; + } else { + let closure2 = function() { + return value; + }; + return closure2; + } + }; + + // js/phoenix/constants.js + var globalSelf = typeof self !== "undefined" ? self : null; + var phxWindow = typeof window !== "undefined" ? window : null; + var global = globalSelf || phxWindow || global; + var DEFAULT_VSN = "2.0.0"; + var SOCKET_STATES = { connecting: 0, open: 1, closing: 2, closed: 3 }; + var DEFAULT_TIMEOUT = 1e4; + var WS_CLOSE_NORMAL = 1e3; + var CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving" + }; + var CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" + }; + var TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" + }; + var XHR_STATES = { + complete: 4 + }; + + // js/phoenix/push.js + var Push = class { + constructor(channel, event, payload, timeout) { + this.channel = channel; + this.event = event; + this.payload = payload || function() { + return {}; + }; + this.receivedResp = null; + this.timeout = timeout; + this.timeoutTimer = null; + this.recHooks = []; + this.sent = false; + } + resend(timeout) { + this.timeout = timeout; + this.reset(); + this.send(); + } + send() { + if (this.hasReceived("timeout")) { + return; + } + this.startTimeout(); + this.sent = true; + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }); + } + receive(status, callback) { + if (this.hasReceived(status)) { + callback(this.receivedResp.response); + } + this.recHooks.push({ status, callback }); + return this; + } + reset() { + this.cancelRefEvent(); + this.ref = null; + this.refEvent = null; + this.receivedResp = null; + this.sent = false; + } + matchReceive({ status, response, _ref }) { + this.recHooks.filter((h) => h.status === status).forEach((h) => h.callback(response)); + } + cancelRefEvent() { + if (!this.refEvent) { + return; + } + this.channel.off(this.refEvent); + } + cancelTimeout() { + clearTimeout(this.timeoutTimer); + this.timeoutTimer = null; + } + startTimeout() { + if (this.timeoutTimer) { + this.cancelTimeout(); + } + this.ref = this.channel.socket.makeRef(); + this.refEvent = this.channel.replyEventName(this.ref); + this.channel.on(this.refEvent, (payload) => { + this.cancelRefEvent(); + this.cancelTimeout(); + this.receivedResp = payload; + this.matchReceive(payload); + }); + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}); + }, this.timeout); + } + hasReceived(status) { + return this.receivedResp && this.receivedResp.status === status; + } + trigger(status, response) { + this.channel.trigger(this.refEvent, { status, response }); + } + }; + + // js/phoenix/timer.js + var Timer = class { + constructor(callback, timerCalc) { + this.callback = callback; + this.timerCalc = timerCalc; + this.timer = null; + this.tries = 0; + } + reset() { + this.tries = 0; + clearTimeout(this.timer); + } + scheduleTimeout() { + clearTimeout(this.timer); + this.timer = setTimeout(() => { + this.tries = this.tries + 1; + this.callback(); + }, this.timerCalc(this.tries + 1)); + } + }; + + // js/phoenix/channel.js + var Channel = class { + constructor(topic, params, socket) { + this.state = CHANNEL_STATES.closed; + this.topic = topic; + this.params = closure(params || {}); + this.socket = socket; + this.bindings = []; + this.bindingRef = 0; + this.timeout = this.socket.timeout; + this.joinedOnce = false; + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout); + this.pushBuffer = []; + this.stateChangeRefs = []; + this.rejoinTimer = new Timer(() => { + if (this.socket.isConnected()) { + this.rejoin(); + } + }, this.socket.rejoinAfterMs); + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())); + this.stateChangeRefs.push(this.socket.onOpen(() => { + this.rejoinTimer.reset(); + if (this.isErrored()) { + this.rejoin(); + } + })); + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined; + this.rejoinTimer.reset(); + this.pushBuffer.forEach((pushEvent) => pushEvent.send()); + this.pushBuffer = []; + }); + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.onClose(() => { + this.rejoinTimer.reset(); + if (this.socket.hasLogger()) + this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`); + this.state = CHANNEL_STATES.closed; + this.socket.remove(this); + }); + this.onError((reason) => { + if (this.socket.hasLogger()) + this.socket.log("channel", `error ${this.topic}`, reason); + if (this.isJoining()) { + this.joinPush.reset(); + } + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.joinPush.receive("timeout", () => { + if (this.socket.hasLogger()) + this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout); + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout); + leavePush.send(); + this.state = CHANNEL_STATES.errored; + this.joinPush.reset(); + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload); + }); + } + join(timeout = this.timeout) { + if (this.joinedOnce) { + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance"); + } else { + this.timeout = timeout; + this.joinedOnce = true; + this.rejoin(); + return this.joinPush; + } + } + onClose(callback) { + this.on(CHANNEL_EVENTS.close, callback); + } + onError(callback) { + return this.on(CHANNEL_EVENTS.error, (reason) => callback(reason)); + } + on(event, callback) { + let ref = this.bindingRef++; + this.bindings.push({ event, ref, callback }); + return ref; + } + off(event, ref) { + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)); + }); + } + canPush() { + return this.socket.isConnected() && this.isJoined(); + } + push(event, payload, timeout = this.timeout) { + payload = payload || {}; + if (!this.joinedOnce) { + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`); + } + let pushEvent = new Push(this, event, function() { + return payload; + }, timeout); + if (this.canPush()) { + pushEvent.send(); + } else { + pushEvent.startTimeout(); + this.pushBuffer.push(pushEvent); + } + return pushEvent; + } + leave(timeout = this.timeout) { + this.rejoinTimer.reset(); + this.joinPush.cancelTimeout(); + this.state = CHANNEL_STATES.leaving; + let onClose = () => { + if (this.socket.hasLogger()) + this.socket.log("channel", `leave ${this.topic}`); + this.trigger(CHANNEL_EVENTS.close, "leave"); + }; + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout); + leavePush.receive("ok", () => onClose()).receive("timeout", () => onClose()); + leavePush.send(); + if (!this.canPush()) { + leavePush.trigger("ok", {}); + } + return leavePush; + } + onMessage(_event, payload, _ref) { + return payload; + } + isMember(topic, event, payload, joinRef) { + if (this.topic !== topic) { + return false; + } + if (joinRef && joinRef !== this.joinRef()) { + if (this.socket.hasLogger()) + this.socket.log("channel", "dropping outdated message", { topic, event, payload, joinRef }); + return false; + } else { + return true; + } + } + joinRef() { + return this.joinPush.ref; + } + rejoin(timeout = this.timeout) { + if (this.isLeaving()) { + return; + } + this.socket.leaveOpenTopic(this.topic); + this.state = CHANNEL_STATES.joining; + this.joinPush.resend(timeout); + } + trigger(event, payload, ref, joinRef) { + let handledPayload = this.onMessage(event, payload, ref, joinRef); + if (payload && !handledPayload) { + throw new Error("channel onMessage callbacks must return the payload, modified or unmodified"); + } + let eventBindings = this.bindings.filter((bind) => bind.event === event); + for (let i = 0; i < eventBindings.length; i++) { + let bind = eventBindings[i]; + bind.callback(handledPayload, ref, joinRef || this.joinRef()); + } + } + replyEventName(ref) { + return `chan_reply_${ref}`; + } + isClosed() { + return this.state === CHANNEL_STATES.closed; + } + isErrored() { + return this.state === CHANNEL_STATES.errored; + } + isJoined() { + return this.state === CHANNEL_STATES.joined; + } + isJoining() { + return this.state === CHANNEL_STATES.joining; + } + isLeaving() { + return this.state === CHANNEL_STATES.leaving; + } + }; + + // js/phoenix/ajax.js + var Ajax = class { + static request(method, endPoint, accept, body, timeout, ontimeout, callback) { + if (global.XDomainRequest) { + let req = new global.XDomainRequest(); + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback); + } else { + let req = new global.XMLHttpRequest(); + return this.xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback); + } + } + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) { + req.timeout = timeout; + req.open(method, endPoint); + req.onload = () => { + let response = this.parseJSON(req.responseText); + callback && callback(response); + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.onprogress = () => { + }; + req.send(body); + return req; + } + static xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback) { + req.open(method, endPoint, true); + req.timeout = timeout; + req.setRequestHeader("Content-Type", accept); + req.onerror = () => callback && callback(null); + req.onreadystatechange = () => { + if (req.readyState === XHR_STATES.complete && callback) { + let response = this.parseJSON(req.responseText); + callback(response); + } + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.send(body); + return req; + } + static parseJSON(resp) { + if (!resp || resp === "") { + return null; + } + try { + return JSON.parse(resp); + } catch (e) { + console && console.log("failed to parse JSON response", resp); + return null; + } + } + static serialize(obj, parentKey) { + let queryStr = []; + for (var key in obj) { + if (!Object.prototype.hasOwnProperty.call(obj, key)) { + continue; + } + let paramKey = parentKey ? `${parentKey}[${key}]` : key; + let paramVal = obj[key]; + if (typeof paramVal === "object") { + queryStr.push(this.serialize(paramVal, paramKey)); + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)); + } + } + return queryStr.join("&"); + } + static appendParams(url, params) { + if (Object.keys(params).length === 0) { + return url; + } + let prefix = url.match(/\?/) ? "&" : "?"; + return `${url}${prefix}${this.serialize(params)}`; + } + }; + + // js/phoenix/longpoll.js + var LongPoll = class { + constructor(endPoint) { + this.endPoint = null; + this.token = null; + this.skipHeartbeat = true; + this.reqs = /* @__PURE__ */ new Set(); + this.onopen = function() { + }; + this.onerror = function() { + }; + this.onmessage = function() { + }; + this.onclose = function() { + }; + this.pollEndpoint = this.normalizeEndpoint(endPoint); + this.readyState = SOCKET_STATES.connecting; + this.poll(); + } + normalizeEndpoint(endPoint) { + return endPoint.replace("ws://", "http://").replace("wss://", "https://").replace(new RegExp("(.*)/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll); + } + endpointURL() { + return Ajax.appendParams(this.pollEndpoint, { token: this.token }); + } + closeAndRetry(code, reason, wasClean) { + this.close(code, reason, wasClean); + this.readyState = SOCKET_STATES.connecting; + } + ontimeout() { + this.onerror("timeout"); + this.closeAndRetry(1005, "timeout", false); + } + isActive() { + return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting; + } + poll() { + this.ajax("GET", null, () => this.ontimeout(), (resp) => { + if (resp) { + var { status, token, messages } = resp; + this.token = token; + } else { + status = 0; + } + switch (status) { + case 200: + messages.forEach((msg) => { + setTimeout(() => this.onmessage({ data: msg }), 0); + }); + this.poll(); + break; + case 204: + this.poll(); + break; + case 410: + this.readyState = SOCKET_STATES.open; + this.onopen({}); + this.poll(); + break; + case 403: + this.onerror(403); + this.close(1008, "forbidden", false); + break; + case 0: + case 500: + this.onerror(500); + this.closeAndRetry(1011, "internal server error", 500); + break; + default: + throw new Error(`unhandled poll status ${status}`); + } + }); + } + send(body) { + this.ajax("POST", body, () => this.onerror("timeout"), (resp) => { + if (!resp || resp.status !== 200) { + this.onerror(resp && resp.status); + this.closeAndRetry(1011, "internal server error", false); + } + }); + } + close(code, reason, wasClean) { + for (let req of this.reqs) { + req.abort(); + } + this.readyState = SOCKET_STATES.closed; + let opts = Object.assign({ code: 1e3, reason: void 0, wasClean: true }, { code, reason, wasClean }); + if (typeof CloseEvent !== "undefined") { + this.onclose(new CloseEvent("close", opts)); + } else { + this.onclose(opts); + } + } + ajax(method, body, onCallerTimeout, callback) { + let req; + let ontimeout = () => { + this.reqs.delete(req); + onCallerTimeout(); + }; + req = Ajax.request(method, this.endpointURL(), "application/json", body, this.timeout, ontimeout, (resp) => { + this.reqs.delete(req); + if (this.isActive()) { + callback(resp); + } + }); + this.reqs.add(req); + } + }; + + // js/phoenix/presence.js + var Presence = class { + constructor(channel, opts = {}) { + let events = opts.events || { state: "presence_state", diff: "presence_diff" }; + this.state = {}; + this.pendingDiffs = []; + this.channel = channel; + this.joinRef = null; + this.caller = { + onJoin: function() { + }, + onLeave: function() { + }, + onSync: function() { + } + }; + this.channel.on(events.state, (newState) => { + let { onJoin, onLeave, onSync } = this.caller; + this.joinRef = this.channel.joinRef(); + this.state = Presence.syncState(this.state, newState, onJoin, onLeave); + this.pendingDiffs.forEach((diff) => { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave); + }); + this.pendingDiffs = []; + onSync(); + }); + this.channel.on(events.diff, (diff) => { + let { onJoin, onLeave, onSync } = this.caller; + if (this.inPendingSyncState()) { + this.pendingDiffs.push(diff); + } else { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave); + onSync(); + } + }); + } + onJoin(callback) { + this.caller.onJoin = callback; + } + onLeave(callback) { + this.caller.onLeave = callback; + } + onSync(callback) { + this.caller.onSync = callback; + } + list(by) { + return Presence.list(this.state, by); + } + inPendingSyncState() { + return !this.joinRef || this.joinRef !== this.channel.joinRef(); + } + static syncState(currentState, newState, onJoin, onLeave) { + let state = this.clone(currentState); + let joins = {}; + let leaves = {}; + this.map(state, (key, presence) => { + if (!newState[key]) { + leaves[key] = presence; + } + }); + this.map(newState, (key, newPresence) => { + let currentPresence = state[key]; + if (currentPresence) { + let newRefs = newPresence.metas.map((m) => m.phx_ref); + let curRefs = currentPresence.metas.map((m) => m.phx_ref); + let joinedMetas = newPresence.metas.filter((m) => curRefs.indexOf(m.phx_ref) < 0); + let leftMetas = currentPresence.metas.filter((m) => newRefs.indexOf(m.phx_ref) < 0); + if (joinedMetas.length > 0) { + joins[key] = newPresence; + joins[key].metas = joinedMetas; + } + if (leftMetas.length > 0) { + leaves[key] = this.clone(currentPresence); + leaves[key].metas = leftMetas; + } + } else { + joins[key] = newPresence; + } + }); + return this.syncDiff(state, { joins, leaves }, onJoin, onLeave); + } + static syncDiff(state, diff, onJoin, onLeave) { + let { joins, leaves } = this.clone(diff); + if (!onJoin) { + onJoin = function() { + }; + } + if (!onLeave) { + onLeave = function() { + }; + } + this.map(joins, (key, newPresence) => { + let currentPresence = state[key]; + state[key] = this.clone(newPresence); + if (currentPresence) { + let joinedRefs = state[key].metas.map((m) => m.phx_ref); + let curMetas = currentPresence.metas.filter((m) => joinedRefs.indexOf(m.phx_ref) < 0); + state[key].metas.unshift(...curMetas); + } + onJoin(key, currentPresence, newPresence); + }); + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key]; + if (!currentPresence) { + return; + } + let refsToRemove = leftPresence.metas.map((m) => m.phx_ref); + currentPresence.metas = currentPresence.metas.filter((p) => { + return refsToRemove.indexOf(p.phx_ref) < 0; + }); + onLeave(key, currentPresence, leftPresence); + if (currentPresence.metas.length === 0) { + delete state[key]; + } + }); + return state; + } + static list(presences, chooser) { + if (!chooser) { + chooser = function(key, pres) { + return pres; + }; + } + return this.map(presences, (key, presence) => { + return chooser(key, presence); + }); + } + static map(obj, func) { + return Object.getOwnPropertyNames(obj).map((key) => func(key, obj[key])); + } + static clone(obj) { + return JSON.parse(JSON.stringify(obj)); + } + }; + + // js/phoenix/serializer.js + var serializer_default = { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: { push: 0, reply: 1, broadcast: 2 }, + encode(msg, callback) { + if (msg.payload.constructor === ArrayBuffer) { + return callback(this.binaryEncode(msg)); + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]; + return callback(JSON.stringify(payload)); + } + }, + decode(rawPayload, callback) { + if (rawPayload.constructor === ArrayBuffer) { + return callback(this.binaryDecode(rawPayload)); + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload); + return callback({ join_ref, ref, topic, event, payload }); + } + }, + binaryEncode(message) { + let { join_ref, ref, event, topic, payload } = message; + let metaLength = this.META_LENGTH + join_ref.length + ref.length + topic.length + event.length; + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength); + let view = new DataView(header); + let offset = 0; + view.setUint8(offset++, this.KINDS.push); + view.setUint8(offset++, join_ref.length); + view.setUint8(offset++, ref.length); + view.setUint8(offset++, topic.length); + view.setUint8(offset++, event.length); + Array.from(join_ref, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(ref, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(topic, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(event, (char) => view.setUint8(offset++, char.charCodeAt(0))); + var combined = new Uint8Array(header.byteLength + payload.byteLength); + combined.set(new Uint8Array(header), 0); + combined.set(new Uint8Array(payload), header.byteLength); + return combined.buffer; + }, + binaryDecode(buffer) { + let view = new DataView(buffer); + let kind = view.getUint8(0); + let decoder = new TextDecoder(); + switch (kind) { + case this.KINDS.push: + return this.decodePush(buffer, view, decoder); + case this.KINDS.reply: + return this.decodeReply(buffer, view, decoder); + case this.KINDS.broadcast: + return this.decodeBroadcast(buffer, view, decoder); + } + }, + decodePush(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let topicSize = view.getUint8(2); + let eventSize = view.getUint8(3); + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: joinRef, ref: null, topic, event, payload: data }; + }, + decodeReply(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let refSize = view.getUint8(2); + let topicSize = view.getUint8(3); + let eventSize = view.getUint8(4); + let offset = this.HEADER_LENGTH + this.META_LENGTH; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let ref = decoder.decode(buffer.slice(offset, offset + refSize)); + offset = offset + refSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + let payload = { status: event, response: data }; + return { join_ref: joinRef, ref, topic, event: CHANNEL_EVENTS.reply, payload }; + }, + decodeBroadcast(buffer, view, decoder) { + let topicSize = view.getUint8(1); + let eventSize = view.getUint8(2); + let offset = this.HEADER_LENGTH + 2; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: null, ref: null, topic, event, payload: data }; + } + }; + + // js/phoenix/socket.js + var Socket = class { + constructor(endPoint, opts = {}) { + this.stateChangeCallbacks = { open: [], close: [], error: [], message: [] }; + this.channels = []; + this.sendBuffer = []; + this.ref = 0; + this.timeout = opts.timeout || DEFAULT_TIMEOUT; + this.transport = opts.transport || global.WebSocket || LongPoll; + this.establishedConnections = 0; + this.defaultEncoder = serializer_default.encode.bind(serializer_default); + this.defaultDecoder = serializer_default.decode.bind(serializer_default); + this.closeWasClean = false; + this.binaryType = opts.binaryType || "arraybuffer"; + this.connectClock = 1; + if (this.transport !== LongPoll) { + this.encode = opts.encode || this.defaultEncoder; + this.decode = opts.decode || this.defaultDecoder; + } else { + this.encode = this.defaultEncoder; + this.decode = this.defaultDecoder; + } + let awaitingConnectionOnPageShow = null; + if (phxWindow && phxWindow.addEventListener) { + phxWindow.addEventListener("pagehide", (_e) => { + if (this.conn) { + this.disconnect(); + awaitingConnectionOnPageShow = this.connectClock; + } + }); + phxWindow.addEventListener("pageshow", (_e) => { + if (awaitingConnectionOnPageShow === this.connectClock) { + awaitingConnectionOnPageShow = null; + this.connect(); + } + }); + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 3e4; + this.rejoinAfterMs = (tries) => { + if (opts.rejoinAfterMs) { + return opts.rejoinAfterMs(tries); + } else { + return [1e3, 2e3, 5e3][tries - 1] || 1e4; + } + }; + this.reconnectAfterMs = (tries) => { + if (opts.reconnectAfterMs) { + return opts.reconnectAfterMs(tries); + } else { + return [10, 50, 100, 150, 200, 250, 500, 1e3, 2e3][tries - 1] || 5e3; + } + }; + this.logger = opts.logger || null; + this.longpollerTimeout = opts.longpollerTimeout || 2e4; + this.params = closure(opts.params || {}); + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`; + this.vsn = opts.vsn || DEFAULT_VSN; + this.heartbeatTimer = null; + this.pendingHeartbeatRef = null; + this.reconnectTimer = new Timer(() => { + this.teardown(() => this.connect()); + }, this.reconnectAfterMs); + } + getLongPollTransport() { + return LongPoll; + } + replaceTransport(newTransport) { + this.connectClock++; + this.closeWasClean = true; + this.reconnectTimer.reset(); + this.sendBuffer = []; + if (this.conn) { + this.conn.close(); + this.conn = null; + } + this.transport = newTransport; + } + protocol() { + return location.protocol.match(/^https/) ? "wss" : "ws"; + } + endPointURL() { + let uri = Ajax.appendParams(Ajax.appendParams(this.endPoint, this.params()), { vsn: this.vsn }); + if (uri.charAt(0) !== "/") { + return uri; + } + if (uri.charAt(1) === "/") { + return `${this.protocol()}:${uri}`; + } + return `${this.protocol()}://${location.host}${uri}`; + } + disconnect(callback, code, reason) { + this.connectClock++; + this.closeWasClean = true; + this.reconnectTimer.reset(); + this.teardown(callback, code, reason); + } + connect(params) { + if (params) { + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"); + this.params = closure(params); + } + if (this.conn) { + return; + } + this.connectClock++; + this.closeWasClean = false; + this.conn = new this.transport(this.endPointURL()); + this.conn.binaryType = this.binaryType; + this.conn.timeout = this.longpollerTimeout; + this.conn.onopen = () => this.onConnOpen(); + this.conn.onerror = (error) => this.onConnError(error); + this.conn.onmessage = (event) => this.onConnMessage(event); + this.conn.onclose = (event) => this.onConnClose(event); + } + log(kind, msg, data) { + this.logger(kind, msg, data); + } + hasLogger() { + return this.logger !== null; + } + onOpen(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.open.push([ref, callback]); + return ref; + } + onClose(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.close.push([ref, callback]); + return ref; + } + onError(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.error.push([ref, callback]); + return ref; + } + onMessage(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.message.push([ref, callback]); + return ref; + } + ping(callback) { + if (!this.isConnected()) { + return false; + } + let ref = this.makeRef(); + let startTime = Date.now(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref }); + let onMsgRef = this.onMessage((msg) => { + if (msg.ref === ref) { + this.off([onMsgRef]); + callback(Date.now() - startTime); + } + }); + return true; + } + onConnOpen() { + if (this.hasLogger()) + this.log("transport", `connected to ${this.endPointURL()}`); + this.closeWasClean = false; + this.establishedConnections++; + this.flushSendBuffer(); + this.reconnectTimer.reset(); + this.resetHeartbeat(); + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()); + } + heartbeatTimeout() { + if (this.pendingHeartbeatRef) { + this.pendingHeartbeatRef = null; + if (this.hasLogger()) { + this.log("transport", "heartbeat timeout. Attempting to re-establish connection"); + } + this.abnormalClose("heartbeat timeout"); + } + } + resetHeartbeat() { + if (this.conn && this.conn.skipHeartbeat) { + return; + } + this.pendingHeartbeatRef = null; + clearTimeout(this.heartbeatTimer); + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + teardown(callback, code, reason) { + if (!this.conn) { + return callback && callback(); + } + this.waitForBufferDone(() => { + if (this.conn) { + if (code) { + this.conn.close(code, reason || ""); + } else { + this.conn.close(); + } + } + this.waitForSocketClosed(() => { + if (this.conn) { + this.conn.onclose = function() { + }; + this.conn = null; + } + callback && callback(); + }); + }); + } + waitForBufferDone(callback, tries = 1) { + if (tries === 5 || !this.conn || !this.conn.bufferedAmount) { + callback(); + return; + } + setTimeout(() => { + this.waitForBufferDone(callback, tries + 1); + }, 150 * tries); + } + waitForSocketClosed(callback, tries = 1) { + if (tries === 5 || !this.conn || this.conn.readyState === SOCKET_STATES.closed) { + callback(); + return; + } + setTimeout(() => { + this.waitForSocketClosed(callback, tries + 1); + }, 150 * tries); + } + onConnClose(event) { + let closeCode = event && event.code; + if (this.hasLogger()) + this.log("transport", "close", event); + this.triggerChanError(); + clearTimeout(this.heartbeatTimer); + if (!this.closeWasClean && closeCode !== 1e3) { + this.reconnectTimer.scheduleTimeout(); + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)); + } + onConnError(error) { + if (this.hasLogger()) + this.log("transport", error); + let transportBefore = this.transport; + let establishedBefore = this.establishedConnections; + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore); + }); + if (transportBefore === this.transport || establishedBefore > 0) { + this.triggerChanError(); + } + } + triggerChanError() { + this.channels.forEach((channel) => { + if (!(channel.isErrored() || channel.isLeaving() || channel.isClosed())) { + channel.trigger(CHANNEL_EVENTS.error); + } + }); + } + connectionState() { + switch (this.conn && this.conn.readyState) { + case SOCKET_STATES.connecting: + return "connecting"; + case SOCKET_STATES.open: + return "open"; + case SOCKET_STATES.closing: + return "closing"; + default: + return "closed"; + } + } + isConnected() { + return this.connectionState() === "open"; + } + remove(channel) { + this.off(channel.stateChangeRefs); + this.channels = this.channels.filter((c) => c.joinRef() !== channel.joinRef()); + } + off(refs) { + for (let key in this.stateChangeCallbacks) { + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1; + }); + } + } + channel(topic, chanParams = {}) { + let chan = new Channel(topic, chanParams, this); + this.channels.push(chan); + return chan; + } + push(data) { + if (this.hasLogger()) { + let { topic, event, payload, ref, join_ref } = data; + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload); + } + if (this.isConnected()) { + this.encode(data, (result) => this.conn.send(result)); + } else { + this.sendBuffer.push(() => this.encode(data, (result) => this.conn.send(result))); + } + } + makeRef() { + let newRef = this.ref + 1; + if (newRef === this.ref) { + this.ref = 0; + } else { + this.ref = newRef; + } + return this.ref.toString(); + } + sendHeartbeat() { + if (this.pendingHeartbeatRef && !this.isConnected()) { + return; + } + this.pendingHeartbeatRef = this.makeRef(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef }); + this.heartbeatTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs); + } + abnormalClose(reason) { + this.closeWasClean = false; + if (this.isConnected()) { + this.conn.close(WS_CLOSE_NORMAL, reason); + } + } + flushSendBuffer() { + if (this.isConnected() && this.sendBuffer.length > 0) { + this.sendBuffer.forEach((callback) => callback()); + this.sendBuffer = []; + } + } + onConnMessage(rawMessage) { + this.decode(rawMessage.data, (msg) => { + let { topic, event, payload, ref, join_ref } = msg; + if (ref && ref === this.pendingHeartbeatRef) { + clearTimeout(this.heartbeatTimer); + this.pendingHeartbeatRef = null; + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + if (this.hasLogger()) + this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload); + for (let i = 0; i < this.channels.length; i++) { + const channel = this.channels[i]; + if (!channel.isMember(topic, event, payload, join_ref)) { + continue; + } + channel.trigger(event, payload, ref, join_ref); + } + for (let i = 0; i < this.stateChangeCallbacks.message.length; i++) { + let [, callback] = this.stateChangeCallbacks.message[i]; + callback(msg); + } + }); + } + leaveOpenTopic(topic) { + let dupChannel = this.channels.find((c) => c.topic === topic && (c.isJoined() || c.isJoining())); + if (dupChannel) { + if (this.hasLogger()) + this.log("transport", `leaving duplicate topic "${topic}"`); + dupChannel.leave(); + } + } + }; + return __toCommonJS(phoenix_exports); +})(); diff --git a/deps/phoenix/priv/static/phoenix.min.js b/deps/phoenix/priv/static/phoenix.min.js new file mode 100644 index 0000000..50542d9 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.min.js @@ -0,0 +1 @@ +var Phoenix=(()=>{var w=Object.defineProperty;var U=Object.getOwnPropertyDescriptor;var M=Object.getOwnPropertyNames;var $=Object.prototype.hasOwnProperty;var P=(h,e)=>{for(var t in e)w(h,t,{get:e[t],enumerable:!0})},B=(h,e,t,i)=>{if(e&&typeof e=="object"||typeof e=="function")for(let s of M(e))!$.call(h,s)&&s!==t&&w(h,s,{get:()=>e[s],enumerable:!(i=U(e,s))||i.enumerable});return h};var J=h=>B(w({},"__esModule",{value:!0}),h);var I={};P(I,{Channel:()=>b,LongPoll:()=>T,Presence:()=>m,Serializer:()=>y,Socket:()=>k});var R=h=>typeof h=="function"?h:function(){return h};var z=typeof self!="undefined"?self:null,A=typeof window!="undefined"?window:null,S=z||A||S,N="2.0.0",d={connecting:0,open:1,closing:2,closed:3},O=1e4,H=1e3,u={closed:"closed",errored:"errored",joined:"joined",joining:"joining",leaving:"leaving"},p={close:"phx_close",error:"phx_error",join:"phx_join",reply:"phx_reply",leave:"phx_leave"},L={longpoll:"longpoll",websocket:"websocket"},D={complete:4};var E=class{constructor(e,t,i,s){this.channel=e,this.event=t,this.payload=i||function(){return{}},this.receivedResp=null,this.timeout=s,this.timeoutTimer=null,this.recHooks=[],this.sent=!1}resend(e){this.timeout=e,this.reset(),this.send()}send(){this.hasReceived("timeout")||(this.startTimeout(),this.sent=!0,this.channel.socket.push({topic:this.channel.topic,event:this.event,payload:this.payload(),ref:this.ref,join_ref:this.channel.joinRef()}))}receive(e,t){return this.hasReceived(e)&&t(this.receivedResp.response),this.recHooks.push({status:e,callback:t}),this}reset(){this.cancelRefEvent(),this.ref=null,this.refEvent=null,this.receivedResp=null,this.sent=!1}matchReceive({status:e,response:t,_ref:i}){this.recHooks.filter(s=>s.status===e).forEach(s=>s.callback(t))}cancelRefEvent(){!this.refEvent||this.channel.off(this.refEvent)}cancelTimeout(){clearTimeout(this.timeoutTimer),this.timeoutTimer=null}startTimeout(){this.timeoutTimer&&this.cancelTimeout(),this.ref=this.channel.socket.makeRef(),this.refEvent=this.channel.replyEventName(this.ref),this.channel.on(this.refEvent,e=>{this.cancelRefEvent(),this.cancelTimeout(),this.receivedResp=e,this.matchReceive(e)}),this.timeoutTimer=setTimeout(()=>{this.trigger("timeout",{})},this.timeout)}hasReceived(e){return this.receivedResp&&this.receivedResp.status===e}trigger(e,t){this.channel.trigger(this.refEvent,{status:e,response:t})}};var j=class{constructor(e,t){this.callback=e,this.timerCalc=t,this.timer=null,this.tries=0}reset(){this.tries=0,clearTimeout(this.timer)}scheduleTimeout(){clearTimeout(this.timer),this.timer=setTimeout(()=>{this.tries=this.tries+1,this.callback()},this.timerCalc(this.tries+1))}};var b=class{constructor(e,t,i){this.state=u.closed,this.topic=e,this.params=R(t||{}),this.socket=i,this.bindings=[],this.bindingRef=0,this.timeout=this.socket.timeout,this.joinedOnce=!1,this.joinPush=new E(this,p.join,this.params,this.timeout),this.pushBuffer=[],this.stateChangeRefs=[],this.rejoinTimer=new j(()=>{this.socket.isConnected()&&this.rejoin()},this.socket.rejoinAfterMs),this.stateChangeRefs.push(this.socket.onError(()=>this.rejoinTimer.reset())),this.stateChangeRefs.push(this.socket.onOpen(()=>{this.rejoinTimer.reset(),this.isErrored()&&this.rejoin()})),this.joinPush.receive("ok",()=>{this.state=u.joined,this.rejoinTimer.reset(),this.pushBuffer.forEach(s=>s.send()),this.pushBuffer=[]}),this.joinPush.receive("error",()=>{this.state=u.errored,this.socket.isConnected()&&this.rejoinTimer.scheduleTimeout()}),this.onClose(()=>{this.rejoinTimer.reset(),this.socket.hasLogger()&&this.socket.log("channel",`close ${this.topic} ${this.joinRef()}`),this.state=u.closed,this.socket.remove(this)}),this.onError(s=>{this.socket.hasLogger()&&this.socket.log("channel",`error ${this.topic}`,s),this.isJoining()&&this.joinPush.reset(),this.state=u.errored,this.socket.isConnected()&&this.rejoinTimer.scheduleTimeout()}),this.joinPush.receive("timeout",()=>{this.socket.hasLogger()&&this.socket.log("channel",`timeout ${this.topic} (${this.joinRef()})`,this.joinPush.timeout),new E(this,p.leave,R({}),this.timeout).send(),this.state=u.errored,this.joinPush.reset(),this.socket.isConnected()&&this.rejoinTimer.scheduleTimeout()}),this.on(p.reply,(s,o)=>{this.trigger(this.replyEventName(o),s)})}join(e=this.timeout){if(this.joinedOnce)throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance");return this.timeout=e,this.joinedOnce=!0,this.rejoin(),this.joinPush}onClose(e){this.on(p.close,e)}onError(e){return this.on(p.error,t=>e(t))}on(e,t){let i=this.bindingRef++;return this.bindings.push({event:e,ref:i,callback:t}),i}off(e,t){this.bindings=this.bindings.filter(i=>!(i.event===e&&(typeof t=="undefined"||t===i.ref)))}canPush(){return this.socket.isConnected()&&this.isJoined()}push(e,t,i=this.timeout){if(t=t||{},!this.joinedOnce)throw new Error(`tried to push '${e}' to '${this.topic}' before joining. Use channel.join() before pushing events`);let s=new E(this,e,function(){return t},i);return this.canPush()?s.send():(s.startTimeout(),this.pushBuffer.push(s)),s}leave(e=this.timeout){this.rejoinTimer.reset(),this.joinPush.cancelTimeout(),this.state=u.leaving;let t=()=>{this.socket.hasLogger()&&this.socket.log("channel",`leave ${this.topic}`),this.trigger(p.close,"leave")},i=new E(this,p.leave,R({}),e);return i.receive("ok",()=>t()).receive("timeout",()=>t()),i.send(),this.canPush()||i.trigger("ok",{}),i}onMessage(e,t,i){return t}isMember(e,t,i,s){return this.topic!==e?!1:s&&s!==this.joinRef()?(this.socket.hasLogger()&&this.socket.log("channel","dropping outdated message",{topic:e,event:t,payload:i,joinRef:s}),!1):!0}joinRef(){return this.joinPush.ref}rejoin(e=this.timeout){this.isLeaving()||(this.socket.leaveOpenTopic(this.topic),this.state=u.joining,this.joinPush.resend(e))}trigger(e,t,i,s){let o=this.onMessage(e,t,i,s);if(t&&!o)throw new Error("channel onMessage callbacks must return the payload, modified or unmodified");let r=this.bindings.filter(n=>n.event===e);for(let n=0;n{let a=this.parseJSON(e.responseText);n&&n(a)},r&&(e.ontimeout=r),e.onprogress=()=>{},e.send(s),e}static xhrRequest(e,t,i,s,o,r,n,a){return e.open(t,i,!0),e.timeout=r,e.setRequestHeader("Content-Type",s),e.onerror=()=>a&&a(null),e.onreadystatechange=()=>{if(e.readyState===D.complete&&a){let l=this.parseJSON(e.responseText);a(l)}},n&&(e.ontimeout=n),e.send(o),e}static parseJSON(e){if(!e||e==="")return null;try{return JSON.parse(e)}catch(t){return console&&console.log("failed to parse JSON response",e),null}}static serialize(e,t){let i=[];for(var s in e){if(!Object.prototype.hasOwnProperty.call(e,s))continue;let o=t?`${t}[${s}]`:s,r=e[s];typeof r=="object"?i.push(this.serialize(r,o)):i.push(encodeURIComponent(o)+"="+encodeURIComponent(r))}return i.join("&")}static appendParams(e,t){if(Object.keys(t).length===0)return e;let i=e.match(/\?/)?"&":"?";return`${e}${i}${this.serialize(t)}`}};var T=class{constructor(e){this.endPoint=null,this.token=null,this.skipHeartbeat=!0,this.reqs=new Set,this.onopen=function(){},this.onerror=function(){},this.onmessage=function(){},this.onclose=function(){},this.pollEndpoint=this.normalizeEndpoint(e),this.readyState=d.connecting,this.poll()}normalizeEndpoint(e){return e.replace("ws://","http://").replace("wss://","https://").replace(new RegExp("(.*)/"+L.websocket),"$1/"+L.longpoll)}endpointURL(){return g.appendParams(this.pollEndpoint,{token:this.token})}closeAndRetry(e,t,i){this.close(e,t,i),this.readyState=d.connecting}ontimeout(){this.onerror("timeout"),this.closeAndRetry(1005,"timeout",!1)}isActive(){return this.readyState===d.open||this.readyState===d.connecting}poll(){this.ajax("GET",null,()=>this.ontimeout(),e=>{if(e){var{status:t,token:i,messages:s}=e;this.token=i}else t=0;switch(t){case 200:s.forEach(o=>{setTimeout(()=>this.onmessage({data:o}),0)}),this.poll();break;case 204:this.poll();break;case 410:this.readyState=d.open,this.onopen({}),this.poll();break;case 403:this.onerror(403),this.close(1008,"forbidden",!1);break;case 0:case 500:this.onerror(500),this.closeAndRetry(1011,"internal server error",500);break;default:throw new Error(`unhandled poll status ${t}`)}})}send(e){this.ajax("POST",e,()=>this.onerror("timeout"),t=>{(!t||t.status!==200)&&(this.onerror(t&&t.status),this.closeAndRetry(1011,"internal server error",!1))})}close(e,t,i){for(let o of this.reqs)o.abort();this.readyState=d.closed;let s=Object.assign({code:1e3,reason:void 0,wasClean:!0},{code:e,reason:t,wasClean:i});typeof CloseEvent!="undefined"?this.onclose(new CloseEvent("close",s)):this.onclose(s)}ajax(e,t,i,s){let o,r=()=>{this.reqs.delete(o),i()};o=g.request(e,this.endpointURL(),"application/json",t,this.timeout,r,n=>{this.reqs.delete(o),this.isActive()&&s(n)}),this.reqs.add(o)}};var m=class{constructor(e,t={}){let i=t.events||{state:"presence_state",diff:"presence_diff"};this.state={},this.pendingDiffs=[],this.channel=e,this.joinRef=null,this.caller={onJoin:function(){},onLeave:function(){},onSync:function(){}},this.channel.on(i.state,s=>{let{onJoin:o,onLeave:r,onSync:n}=this.caller;this.joinRef=this.channel.joinRef(),this.state=m.syncState(this.state,s,o,r),this.pendingDiffs.forEach(a=>{this.state=m.syncDiff(this.state,a,o,r)}),this.pendingDiffs=[],n()}),this.channel.on(i.diff,s=>{let{onJoin:o,onLeave:r,onSync:n}=this.caller;this.inPendingSyncState()?this.pendingDiffs.push(s):(this.state=m.syncDiff(this.state,s,o,r),n())})}onJoin(e){this.caller.onJoin=e}onLeave(e){this.caller.onLeave=e}onSync(e){this.caller.onSync=e}list(e){return m.list(this.state,e)}inPendingSyncState(){return!this.joinRef||this.joinRef!==this.channel.joinRef()}static syncState(e,t,i,s){let o=this.clone(e),r={},n={};return this.map(o,(a,l)=>{t[a]||(n[a]=l)}),this.map(t,(a,l)=>{let f=o[a];if(f){let c=l.metas.map(v=>v.phx_ref),C=f.metas.map(v=>v.phx_ref),x=l.metas.filter(v=>C.indexOf(v.phx_ref)<0),_=f.metas.filter(v=>c.indexOf(v.phx_ref)<0);x.length>0&&(r[a]=l,r[a].metas=x),_.length>0&&(n[a]=this.clone(f),n[a].metas=_)}else r[a]=l}),this.syncDiff(o,{joins:r,leaves:n},i,s)}static syncDiff(e,t,i,s){let{joins:o,leaves:r}=this.clone(t);return i||(i=function(){}),s||(s=function(){}),this.map(o,(n,a)=>{let l=e[n];if(e[n]=this.clone(a),l){let f=e[n].metas.map(C=>C.phx_ref),c=l.metas.filter(C=>f.indexOf(C.phx_ref)<0);e[n].metas.unshift(...c)}i(n,l,a)}),this.map(r,(n,a)=>{let l=e[n];if(!l)return;let f=a.metas.map(c=>c.phx_ref);l.metas=l.metas.filter(c=>f.indexOf(c.phx_ref)<0),s(n,l,a),l.metas.length===0&&delete e[n]}),e}static list(e,t){return t||(t=function(i,s){return s}),this.map(e,(i,s)=>t(i,s))}static map(e,t){return Object.getOwnPropertyNames(e).map(i=>t(i,e[i]))}static clone(e){return JSON.parse(JSON.stringify(e))}};var y={HEADER_LENGTH:1,META_LENGTH:4,KINDS:{push:0,reply:1,broadcast:2},encode(h,e){if(h.payload.constructor===ArrayBuffer)return e(this.binaryEncode(h));{let t=[h.join_ref,h.ref,h.topic,h.event,h.payload];return e(JSON.stringify(t))}},decode(h,e){if(h.constructor===ArrayBuffer)return e(this.binaryDecode(h));{let[t,i,s,o,r]=JSON.parse(h);return e({join_ref:t,ref:i,topic:s,event:o,payload:r})}},binaryEncode(h){let{join_ref:e,ref:t,event:i,topic:s,payload:o}=h,r=this.META_LENGTH+e.length+t.length+s.length+i.length,n=new ArrayBuffer(this.HEADER_LENGTH+r),a=new DataView(n),l=0;a.setUint8(l++,this.KINDS.push),a.setUint8(l++,e.length),a.setUint8(l++,t.length),a.setUint8(l++,s.length),a.setUint8(l++,i.length),Array.from(e,c=>a.setUint8(l++,c.charCodeAt(0))),Array.from(t,c=>a.setUint8(l++,c.charCodeAt(0))),Array.from(s,c=>a.setUint8(l++,c.charCodeAt(0))),Array.from(i,c=>a.setUint8(l++,c.charCodeAt(0)));var f=new Uint8Array(n.byteLength+o.byteLength);return f.set(new Uint8Array(n),0),f.set(new Uint8Array(o),n.byteLength),f.buffer},binaryDecode(h){let e=new DataView(h),t=e.getUint8(0),i=new TextDecoder;switch(t){case this.KINDS.push:return this.decodePush(h,e,i);case this.KINDS.reply:return this.decodeReply(h,e,i);case this.KINDS.broadcast:return this.decodeBroadcast(h,e,i)}},decodePush(h,e,t){let i=e.getUint8(1),s=e.getUint8(2),o=e.getUint8(3),r=this.HEADER_LENGTH+this.META_LENGTH-1,n=t.decode(h.slice(r,r+i));r=r+i;let a=t.decode(h.slice(r,r+s));r=r+s;let l=t.decode(h.slice(r,r+o));r=r+o;let f=h.slice(r,h.byteLength);return{join_ref:n,ref:null,topic:a,event:l,payload:f}},decodeReply(h,e,t){let i=e.getUint8(1),s=e.getUint8(2),o=e.getUint8(3),r=e.getUint8(4),n=this.HEADER_LENGTH+this.META_LENGTH,a=t.decode(h.slice(n,n+i));n=n+i;let l=t.decode(h.slice(n,n+s));n=n+s;let f=t.decode(h.slice(n,n+o));n=n+o;let c=t.decode(h.slice(n,n+r));n=n+r;let C=h.slice(n,h.byteLength),x={status:c,response:C};return{join_ref:a,ref:l,topic:f,event:p.reply,payload:x}},decodeBroadcast(h,e,t){let i=e.getUint8(1),s=e.getUint8(2),o=this.HEADER_LENGTH+2,r=t.decode(h.slice(o,o+i));o=o+i;let n=t.decode(h.slice(o,o+s));o=o+s;let a=h.slice(o,h.byteLength);return{join_ref:null,ref:null,topic:r,event:n,payload:a}}};var k=class{constructor(e,t={}){this.stateChangeCallbacks={open:[],close:[],error:[],message:[]},this.channels=[],this.sendBuffer=[],this.ref=0,this.timeout=t.timeout||O,this.transport=t.transport||S.WebSocket||T,this.establishedConnections=0,this.defaultEncoder=y.encode.bind(y),this.defaultDecoder=y.decode.bind(y),this.closeWasClean=!1,this.binaryType=t.binaryType||"arraybuffer",this.connectClock=1,this.transport!==T?(this.encode=t.encode||this.defaultEncoder,this.decode=t.decode||this.defaultDecoder):(this.encode=this.defaultEncoder,this.decode=this.defaultDecoder);let i=null;A&&A.addEventListener&&(A.addEventListener("pagehide",s=>{this.conn&&(this.disconnect(),i=this.connectClock)}),A.addEventListener("pageshow",s=>{i===this.connectClock&&(i=null,this.connect())})),this.heartbeatIntervalMs=t.heartbeatIntervalMs||3e4,this.rejoinAfterMs=s=>t.rejoinAfterMs?t.rejoinAfterMs(s):[1e3,2e3,5e3][s-1]||1e4,this.reconnectAfterMs=s=>t.reconnectAfterMs?t.reconnectAfterMs(s):[10,50,100,150,200,250,500,1e3,2e3][s-1]||5e3,this.logger=t.logger||null,this.longpollerTimeout=t.longpollerTimeout||2e4,this.params=R(t.params||{}),this.endPoint=`${e}/${L.websocket}`,this.vsn=t.vsn||N,this.heartbeatTimer=null,this.pendingHeartbeatRef=null,this.reconnectTimer=new j(()=>{this.teardown(()=>this.connect())},this.reconnectAfterMs)}getLongPollTransport(){return T}replaceTransport(e){this.connectClock++,this.closeWasClean=!0,this.reconnectTimer.reset(),this.sendBuffer=[],this.conn&&(this.conn.close(),this.conn=null),this.transport=e}protocol(){return location.protocol.match(/^https/)?"wss":"ws"}endPointURL(){let e=g.appendParams(g.appendParams(this.endPoint,this.params()),{vsn:this.vsn});return e.charAt(0)!=="/"?e:e.charAt(1)==="/"?`${this.protocol()}:${e}`:`${this.protocol()}://${location.host}${e}`}disconnect(e,t,i){this.connectClock++,this.closeWasClean=!0,this.reconnectTimer.reset(),this.teardown(e,t,i)}connect(e){e&&(console&&console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"),this.params=R(e)),!this.conn&&(this.connectClock++,this.closeWasClean=!1,this.conn=new this.transport(this.endPointURL()),this.conn.binaryType=this.binaryType,this.conn.timeout=this.longpollerTimeout,this.conn.onopen=()=>this.onConnOpen(),this.conn.onerror=t=>this.onConnError(t),this.conn.onmessage=t=>this.onConnMessage(t),this.conn.onclose=t=>this.onConnClose(t))}log(e,t,i){this.logger(e,t,i)}hasLogger(){return this.logger!==null}onOpen(e){let t=this.makeRef();return this.stateChangeCallbacks.open.push([t,e]),t}onClose(e){let t=this.makeRef();return this.stateChangeCallbacks.close.push([t,e]),t}onError(e){let t=this.makeRef();return this.stateChangeCallbacks.error.push([t,e]),t}onMessage(e){let t=this.makeRef();return this.stateChangeCallbacks.message.push([t,e]),t}ping(e){if(!this.isConnected())return!1;let t=this.makeRef(),i=Date.now();this.push({topic:"phoenix",event:"heartbeat",payload:{},ref:t});let s=this.onMessage(o=>{o.ref===t&&(this.off([s]),e(Date.now()-i))});return!0}onConnOpen(){this.hasLogger()&&this.log("transport",`connected to ${this.endPointURL()}`),this.closeWasClean=!1,this.establishedConnections++,this.flushSendBuffer(),this.reconnectTimer.reset(),this.resetHeartbeat(),this.stateChangeCallbacks.open.forEach(([,e])=>e())}heartbeatTimeout(){this.pendingHeartbeatRef&&(this.pendingHeartbeatRef=null,this.hasLogger()&&this.log("transport","heartbeat timeout. Attempting to re-establish connection"),this.abnormalClose("heartbeat timeout"))}resetHeartbeat(){this.conn&&this.conn.skipHeartbeat||(this.pendingHeartbeatRef=null,clearTimeout(this.heartbeatTimer),setTimeout(()=>this.sendHeartbeat(),this.heartbeatIntervalMs))}teardown(e,t,i){if(!this.conn)return e&&e();this.waitForBufferDone(()=>{this.conn&&(t?this.conn.close(t,i||""):this.conn.close()),this.waitForSocketClosed(()=>{this.conn&&(this.conn.onclose=function(){},this.conn=null),e&&e()})})}waitForBufferDone(e,t=1){if(t===5||!this.conn||!this.conn.bufferedAmount){e();return}setTimeout(()=>{this.waitForBufferDone(e,t+1)},150*t)}waitForSocketClosed(e,t=1){if(t===5||!this.conn||this.conn.readyState===d.closed){e();return}setTimeout(()=>{this.waitForSocketClosed(e,t+1)},150*t)}onConnClose(e){let t=e&&e.code;this.hasLogger()&&this.log("transport","close",e),this.triggerChanError(),clearTimeout(this.heartbeatTimer),!this.closeWasClean&&t!==1e3&&this.reconnectTimer.scheduleTimeout(),this.stateChangeCallbacks.close.forEach(([,i])=>i(e))}onConnError(e){this.hasLogger()&&this.log("transport",e);let t=this.transport,i=this.establishedConnections;this.stateChangeCallbacks.error.forEach(([,s])=>{s(e,t,i)}),(t===this.transport||i>0)&&this.triggerChanError()}triggerChanError(){this.channels.forEach(e=>{e.isErrored()||e.isLeaving()||e.isClosed()||e.trigger(p.error)})}connectionState(){switch(this.conn&&this.conn.readyState){case d.connecting:return"connecting";case d.open:return"open";case d.closing:return"closing";default:return"closed"}}isConnected(){return this.connectionState()==="open"}remove(e){this.off(e.stateChangeRefs),this.channels=this.channels.filter(t=>t.joinRef()!==e.joinRef())}off(e){for(let t in this.stateChangeCallbacks)this.stateChangeCallbacks[t]=this.stateChangeCallbacks[t].filter(([i])=>e.indexOf(i)===-1)}channel(e,t={}){let i=new b(e,t,this);return this.channels.push(i),i}push(e){if(this.hasLogger()){let{topic:t,event:i,payload:s,ref:o,join_ref:r}=e;this.log("push",`${t} ${i} (${r}, ${o})`,s)}this.isConnected()?this.encode(e,t=>this.conn.send(t)):this.sendBuffer.push(()=>this.encode(e,t=>this.conn.send(t)))}makeRef(){let e=this.ref+1;return e===this.ref?this.ref=0:this.ref=e,this.ref.toString()}sendHeartbeat(){this.pendingHeartbeatRef&&!this.isConnected()||(this.pendingHeartbeatRef=this.makeRef(),this.push({topic:"phoenix",event:"heartbeat",payload:{},ref:this.pendingHeartbeatRef}),this.heartbeatTimer=setTimeout(()=>this.heartbeatTimeout(),this.heartbeatIntervalMs))}abnormalClose(e){this.closeWasClean=!1,this.isConnected()&&this.conn.close(H,e)}flushSendBuffer(){this.isConnected()&&this.sendBuffer.length>0&&(this.sendBuffer.forEach(e=>e()),this.sendBuffer=[])}onConnMessage(e){this.decode(e.data,t=>{let{topic:i,event:s,payload:o,ref:r,join_ref:n}=t;r&&r===this.pendingHeartbeatRef&&(clearTimeout(this.heartbeatTimer),this.pendingHeartbeatRef=null,setTimeout(()=>this.sendHeartbeat(),this.heartbeatIntervalMs)),this.hasLogger()&&this.log("receive",`${o.status||""} ${i} ${s} ${r&&"("+r+")"||""}`,o);for(let a=0;ai.topic===e&&(i.isJoined()||i.isJoining()));t&&(this.hasLogger()&&this.log("transport",`leaving duplicate topic "${e}"`),t.leave())}};return J(I);})(); diff --git a/deps/phoenix/priv/static/phoenix.mjs b/deps/phoenix/priv/static/phoenix.mjs new file mode 100644 index 0000000..7767f03 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.mjs @@ -0,0 +1,1122 @@ +// js/phoenix/utils.js +var closure = (value) => { + if (typeof value === "function") { + return value; + } else { + let closure2 = function() { + return value; + }; + return closure2; + } +}; + +// js/phoenix/constants.js +var globalSelf = typeof self !== "undefined" ? self : null; +var phxWindow = typeof window !== "undefined" ? window : null; +var global = globalSelf || phxWindow || global; +var DEFAULT_VSN = "2.0.0"; +var SOCKET_STATES = { connecting: 0, open: 1, closing: 2, closed: 3 }; +var DEFAULT_TIMEOUT = 1e4; +var WS_CLOSE_NORMAL = 1e3; +var CHANNEL_STATES = { + closed: "closed", + errored: "errored", + joined: "joined", + joining: "joining", + leaving: "leaving" +}; +var CHANNEL_EVENTS = { + close: "phx_close", + error: "phx_error", + join: "phx_join", + reply: "phx_reply", + leave: "phx_leave" +}; +var TRANSPORTS = { + longpoll: "longpoll", + websocket: "websocket" +}; +var XHR_STATES = { + complete: 4 +}; + +// js/phoenix/push.js +var Push = class { + constructor(channel, event, payload, timeout) { + this.channel = channel; + this.event = event; + this.payload = payload || function() { + return {}; + }; + this.receivedResp = null; + this.timeout = timeout; + this.timeoutTimer = null; + this.recHooks = []; + this.sent = false; + } + resend(timeout) { + this.timeout = timeout; + this.reset(); + this.send(); + } + send() { + if (this.hasReceived("timeout")) { + return; + } + this.startTimeout(); + this.sent = true; + this.channel.socket.push({ + topic: this.channel.topic, + event: this.event, + payload: this.payload(), + ref: this.ref, + join_ref: this.channel.joinRef() + }); + } + receive(status, callback) { + if (this.hasReceived(status)) { + callback(this.receivedResp.response); + } + this.recHooks.push({ status, callback }); + return this; + } + reset() { + this.cancelRefEvent(); + this.ref = null; + this.refEvent = null; + this.receivedResp = null; + this.sent = false; + } + matchReceive({ status, response, _ref }) { + this.recHooks.filter((h) => h.status === status).forEach((h) => h.callback(response)); + } + cancelRefEvent() { + if (!this.refEvent) { + return; + } + this.channel.off(this.refEvent); + } + cancelTimeout() { + clearTimeout(this.timeoutTimer); + this.timeoutTimer = null; + } + startTimeout() { + if (this.timeoutTimer) { + this.cancelTimeout(); + } + this.ref = this.channel.socket.makeRef(); + this.refEvent = this.channel.replyEventName(this.ref); + this.channel.on(this.refEvent, (payload) => { + this.cancelRefEvent(); + this.cancelTimeout(); + this.receivedResp = payload; + this.matchReceive(payload); + }); + this.timeoutTimer = setTimeout(() => { + this.trigger("timeout", {}); + }, this.timeout); + } + hasReceived(status) { + return this.receivedResp && this.receivedResp.status === status; + } + trigger(status, response) { + this.channel.trigger(this.refEvent, { status, response }); + } +}; + +// js/phoenix/timer.js +var Timer = class { + constructor(callback, timerCalc) { + this.callback = callback; + this.timerCalc = timerCalc; + this.timer = null; + this.tries = 0; + } + reset() { + this.tries = 0; + clearTimeout(this.timer); + } + scheduleTimeout() { + clearTimeout(this.timer); + this.timer = setTimeout(() => { + this.tries = this.tries + 1; + this.callback(); + }, this.timerCalc(this.tries + 1)); + } +}; + +// js/phoenix/channel.js +var Channel = class { + constructor(topic, params, socket) { + this.state = CHANNEL_STATES.closed; + this.topic = topic; + this.params = closure(params || {}); + this.socket = socket; + this.bindings = []; + this.bindingRef = 0; + this.timeout = this.socket.timeout; + this.joinedOnce = false; + this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout); + this.pushBuffer = []; + this.stateChangeRefs = []; + this.rejoinTimer = new Timer(() => { + if (this.socket.isConnected()) { + this.rejoin(); + } + }, this.socket.rejoinAfterMs); + this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset())); + this.stateChangeRefs.push(this.socket.onOpen(() => { + this.rejoinTimer.reset(); + if (this.isErrored()) { + this.rejoin(); + } + })); + this.joinPush.receive("ok", () => { + this.state = CHANNEL_STATES.joined; + this.rejoinTimer.reset(); + this.pushBuffer.forEach((pushEvent) => pushEvent.send()); + this.pushBuffer = []; + }); + this.joinPush.receive("error", () => { + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.onClose(() => { + this.rejoinTimer.reset(); + if (this.socket.hasLogger()) + this.socket.log("channel", `close ${this.topic} ${this.joinRef()}`); + this.state = CHANNEL_STATES.closed; + this.socket.remove(this); + }); + this.onError((reason) => { + if (this.socket.hasLogger()) + this.socket.log("channel", `error ${this.topic}`, reason); + if (this.isJoining()) { + this.joinPush.reset(); + } + this.state = CHANNEL_STATES.errored; + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.joinPush.receive("timeout", () => { + if (this.socket.hasLogger()) + this.socket.log("channel", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout); + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout); + leavePush.send(); + this.state = CHANNEL_STATES.errored; + this.joinPush.reset(); + if (this.socket.isConnected()) { + this.rejoinTimer.scheduleTimeout(); + } + }); + this.on(CHANNEL_EVENTS.reply, (payload, ref) => { + this.trigger(this.replyEventName(ref), payload); + }); + } + join(timeout = this.timeout) { + if (this.joinedOnce) { + throw new Error("tried to join multiple times. 'join' can only be called a single time per channel instance"); + } else { + this.timeout = timeout; + this.joinedOnce = true; + this.rejoin(); + return this.joinPush; + } + } + onClose(callback) { + this.on(CHANNEL_EVENTS.close, callback); + } + onError(callback) { + return this.on(CHANNEL_EVENTS.error, (reason) => callback(reason)); + } + on(event, callback) { + let ref = this.bindingRef++; + this.bindings.push({ event, ref, callback }); + return ref; + } + off(event, ref) { + this.bindings = this.bindings.filter((bind) => { + return !(bind.event === event && (typeof ref === "undefined" || ref === bind.ref)); + }); + } + canPush() { + return this.socket.isConnected() && this.isJoined(); + } + push(event, payload, timeout = this.timeout) { + payload = payload || {}; + if (!this.joinedOnce) { + throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`); + } + let pushEvent = new Push(this, event, function() { + return payload; + }, timeout); + if (this.canPush()) { + pushEvent.send(); + } else { + pushEvent.startTimeout(); + this.pushBuffer.push(pushEvent); + } + return pushEvent; + } + leave(timeout = this.timeout) { + this.rejoinTimer.reset(); + this.joinPush.cancelTimeout(); + this.state = CHANNEL_STATES.leaving; + let onClose = () => { + if (this.socket.hasLogger()) + this.socket.log("channel", `leave ${this.topic}`); + this.trigger(CHANNEL_EVENTS.close, "leave"); + }; + let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout); + leavePush.receive("ok", () => onClose()).receive("timeout", () => onClose()); + leavePush.send(); + if (!this.canPush()) { + leavePush.trigger("ok", {}); + } + return leavePush; + } + onMessage(_event, payload, _ref) { + return payload; + } + isMember(topic, event, payload, joinRef) { + if (this.topic !== topic) { + return false; + } + if (joinRef && joinRef !== this.joinRef()) { + if (this.socket.hasLogger()) + this.socket.log("channel", "dropping outdated message", { topic, event, payload, joinRef }); + return false; + } else { + return true; + } + } + joinRef() { + return this.joinPush.ref; + } + rejoin(timeout = this.timeout) { + if (this.isLeaving()) { + return; + } + this.socket.leaveOpenTopic(this.topic); + this.state = CHANNEL_STATES.joining; + this.joinPush.resend(timeout); + } + trigger(event, payload, ref, joinRef) { + let handledPayload = this.onMessage(event, payload, ref, joinRef); + if (payload && !handledPayload) { + throw new Error("channel onMessage callbacks must return the payload, modified or unmodified"); + } + let eventBindings = this.bindings.filter((bind) => bind.event === event); + for (let i = 0; i < eventBindings.length; i++) { + let bind = eventBindings[i]; + bind.callback(handledPayload, ref, joinRef || this.joinRef()); + } + } + replyEventName(ref) { + return `chan_reply_${ref}`; + } + isClosed() { + return this.state === CHANNEL_STATES.closed; + } + isErrored() { + return this.state === CHANNEL_STATES.errored; + } + isJoined() { + return this.state === CHANNEL_STATES.joined; + } + isJoining() { + return this.state === CHANNEL_STATES.joining; + } + isLeaving() { + return this.state === CHANNEL_STATES.leaving; + } +}; + +// js/phoenix/ajax.js +var Ajax = class { + static request(method, endPoint, accept, body, timeout, ontimeout, callback) { + if (global.XDomainRequest) { + let req = new global.XDomainRequest(); + return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback); + } else { + let req = new global.XMLHttpRequest(); + return this.xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback); + } + } + static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback) { + req.timeout = timeout; + req.open(method, endPoint); + req.onload = () => { + let response = this.parseJSON(req.responseText); + callback && callback(response); + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.onprogress = () => { + }; + req.send(body); + return req; + } + static xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback) { + req.open(method, endPoint, true); + req.timeout = timeout; + req.setRequestHeader("Content-Type", accept); + req.onerror = () => callback && callback(null); + req.onreadystatechange = () => { + if (req.readyState === XHR_STATES.complete && callback) { + let response = this.parseJSON(req.responseText); + callback(response); + } + }; + if (ontimeout) { + req.ontimeout = ontimeout; + } + req.send(body); + return req; + } + static parseJSON(resp) { + if (!resp || resp === "") { + return null; + } + try { + return JSON.parse(resp); + } catch (e) { + console && console.log("failed to parse JSON response", resp); + return null; + } + } + static serialize(obj, parentKey) { + let queryStr = []; + for (var key in obj) { + if (!Object.prototype.hasOwnProperty.call(obj, key)) { + continue; + } + let paramKey = parentKey ? `${parentKey}[${key}]` : key; + let paramVal = obj[key]; + if (typeof paramVal === "object") { + queryStr.push(this.serialize(paramVal, paramKey)); + } else { + queryStr.push(encodeURIComponent(paramKey) + "=" + encodeURIComponent(paramVal)); + } + } + return queryStr.join("&"); + } + static appendParams(url, params) { + if (Object.keys(params).length === 0) { + return url; + } + let prefix = url.match(/\?/) ? "&" : "?"; + return `${url}${prefix}${this.serialize(params)}`; + } +}; + +// js/phoenix/longpoll.js +var LongPoll = class { + constructor(endPoint) { + this.endPoint = null; + this.token = null; + this.skipHeartbeat = true; + this.reqs = /* @__PURE__ */ new Set(); + this.onopen = function() { + }; + this.onerror = function() { + }; + this.onmessage = function() { + }; + this.onclose = function() { + }; + this.pollEndpoint = this.normalizeEndpoint(endPoint); + this.readyState = SOCKET_STATES.connecting; + this.poll(); + } + normalizeEndpoint(endPoint) { + return endPoint.replace("ws://", "http://").replace("wss://", "https://").replace(new RegExp("(.*)/" + TRANSPORTS.websocket), "$1/" + TRANSPORTS.longpoll); + } + endpointURL() { + return Ajax.appendParams(this.pollEndpoint, { token: this.token }); + } + closeAndRetry(code, reason, wasClean) { + this.close(code, reason, wasClean); + this.readyState = SOCKET_STATES.connecting; + } + ontimeout() { + this.onerror("timeout"); + this.closeAndRetry(1005, "timeout", false); + } + isActive() { + return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting; + } + poll() { + this.ajax("GET", null, () => this.ontimeout(), (resp) => { + if (resp) { + var { status, token, messages } = resp; + this.token = token; + } else { + status = 0; + } + switch (status) { + case 200: + messages.forEach((msg) => { + setTimeout(() => this.onmessage({ data: msg }), 0); + }); + this.poll(); + break; + case 204: + this.poll(); + break; + case 410: + this.readyState = SOCKET_STATES.open; + this.onopen({}); + this.poll(); + break; + case 403: + this.onerror(403); + this.close(1008, "forbidden", false); + break; + case 0: + case 500: + this.onerror(500); + this.closeAndRetry(1011, "internal server error", 500); + break; + default: + throw new Error(`unhandled poll status ${status}`); + } + }); + } + send(body) { + this.ajax("POST", body, () => this.onerror("timeout"), (resp) => { + if (!resp || resp.status !== 200) { + this.onerror(resp && resp.status); + this.closeAndRetry(1011, "internal server error", false); + } + }); + } + close(code, reason, wasClean) { + for (let req of this.reqs) { + req.abort(); + } + this.readyState = SOCKET_STATES.closed; + let opts = Object.assign({ code: 1e3, reason: void 0, wasClean: true }, { code, reason, wasClean }); + if (typeof CloseEvent !== "undefined") { + this.onclose(new CloseEvent("close", opts)); + } else { + this.onclose(opts); + } + } + ajax(method, body, onCallerTimeout, callback) { + let req; + let ontimeout = () => { + this.reqs.delete(req); + onCallerTimeout(); + }; + req = Ajax.request(method, this.endpointURL(), "application/json", body, this.timeout, ontimeout, (resp) => { + this.reqs.delete(req); + if (this.isActive()) { + callback(resp); + } + }); + this.reqs.add(req); + } +}; + +// js/phoenix/presence.js +var Presence = class { + constructor(channel, opts = {}) { + let events = opts.events || { state: "presence_state", diff: "presence_diff" }; + this.state = {}; + this.pendingDiffs = []; + this.channel = channel; + this.joinRef = null; + this.caller = { + onJoin: function() { + }, + onLeave: function() { + }, + onSync: function() { + } + }; + this.channel.on(events.state, (newState) => { + let { onJoin, onLeave, onSync } = this.caller; + this.joinRef = this.channel.joinRef(); + this.state = Presence.syncState(this.state, newState, onJoin, onLeave); + this.pendingDiffs.forEach((diff) => { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave); + }); + this.pendingDiffs = []; + onSync(); + }); + this.channel.on(events.diff, (diff) => { + let { onJoin, onLeave, onSync } = this.caller; + if (this.inPendingSyncState()) { + this.pendingDiffs.push(diff); + } else { + this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave); + onSync(); + } + }); + } + onJoin(callback) { + this.caller.onJoin = callback; + } + onLeave(callback) { + this.caller.onLeave = callback; + } + onSync(callback) { + this.caller.onSync = callback; + } + list(by) { + return Presence.list(this.state, by); + } + inPendingSyncState() { + return !this.joinRef || this.joinRef !== this.channel.joinRef(); + } + static syncState(currentState, newState, onJoin, onLeave) { + let state = this.clone(currentState); + let joins = {}; + let leaves = {}; + this.map(state, (key, presence) => { + if (!newState[key]) { + leaves[key] = presence; + } + }); + this.map(newState, (key, newPresence) => { + let currentPresence = state[key]; + if (currentPresence) { + let newRefs = newPresence.metas.map((m) => m.phx_ref); + let curRefs = currentPresence.metas.map((m) => m.phx_ref); + let joinedMetas = newPresence.metas.filter((m) => curRefs.indexOf(m.phx_ref) < 0); + let leftMetas = currentPresence.metas.filter((m) => newRefs.indexOf(m.phx_ref) < 0); + if (joinedMetas.length > 0) { + joins[key] = newPresence; + joins[key].metas = joinedMetas; + } + if (leftMetas.length > 0) { + leaves[key] = this.clone(currentPresence); + leaves[key].metas = leftMetas; + } + } else { + joins[key] = newPresence; + } + }); + return this.syncDiff(state, { joins, leaves }, onJoin, onLeave); + } + static syncDiff(state, diff, onJoin, onLeave) { + let { joins, leaves } = this.clone(diff); + if (!onJoin) { + onJoin = function() { + }; + } + if (!onLeave) { + onLeave = function() { + }; + } + this.map(joins, (key, newPresence) => { + let currentPresence = state[key]; + state[key] = this.clone(newPresence); + if (currentPresence) { + let joinedRefs = state[key].metas.map((m) => m.phx_ref); + let curMetas = currentPresence.metas.filter((m) => joinedRefs.indexOf(m.phx_ref) < 0); + state[key].metas.unshift(...curMetas); + } + onJoin(key, currentPresence, newPresence); + }); + this.map(leaves, (key, leftPresence) => { + let currentPresence = state[key]; + if (!currentPresence) { + return; + } + let refsToRemove = leftPresence.metas.map((m) => m.phx_ref); + currentPresence.metas = currentPresence.metas.filter((p) => { + return refsToRemove.indexOf(p.phx_ref) < 0; + }); + onLeave(key, currentPresence, leftPresence); + if (currentPresence.metas.length === 0) { + delete state[key]; + } + }); + return state; + } + static list(presences, chooser) { + if (!chooser) { + chooser = function(key, pres) { + return pres; + }; + } + return this.map(presences, (key, presence) => { + return chooser(key, presence); + }); + } + static map(obj, func) { + return Object.getOwnPropertyNames(obj).map((key) => func(key, obj[key])); + } + static clone(obj) { + return JSON.parse(JSON.stringify(obj)); + } +}; + +// js/phoenix/serializer.js +var serializer_default = { + HEADER_LENGTH: 1, + META_LENGTH: 4, + KINDS: { push: 0, reply: 1, broadcast: 2 }, + encode(msg, callback) { + if (msg.payload.constructor === ArrayBuffer) { + return callback(this.binaryEncode(msg)); + } else { + let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]; + return callback(JSON.stringify(payload)); + } + }, + decode(rawPayload, callback) { + if (rawPayload.constructor === ArrayBuffer) { + return callback(this.binaryDecode(rawPayload)); + } else { + let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload); + return callback({ join_ref, ref, topic, event, payload }); + } + }, + binaryEncode(message) { + let { join_ref, ref, event, topic, payload } = message; + let metaLength = this.META_LENGTH + join_ref.length + ref.length + topic.length + event.length; + let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength); + let view = new DataView(header); + let offset = 0; + view.setUint8(offset++, this.KINDS.push); + view.setUint8(offset++, join_ref.length); + view.setUint8(offset++, ref.length); + view.setUint8(offset++, topic.length); + view.setUint8(offset++, event.length); + Array.from(join_ref, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(ref, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(topic, (char) => view.setUint8(offset++, char.charCodeAt(0))); + Array.from(event, (char) => view.setUint8(offset++, char.charCodeAt(0))); + var combined = new Uint8Array(header.byteLength + payload.byteLength); + combined.set(new Uint8Array(header), 0); + combined.set(new Uint8Array(payload), header.byteLength); + return combined.buffer; + }, + binaryDecode(buffer) { + let view = new DataView(buffer); + let kind = view.getUint8(0); + let decoder = new TextDecoder(); + switch (kind) { + case this.KINDS.push: + return this.decodePush(buffer, view, decoder); + case this.KINDS.reply: + return this.decodeReply(buffer, view, decoder); + case this.KINDS.broadcast: + return this.decodeBroadcast(buffer, view, decoder); + } + }, + decodePush(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let topicSize = view.getUint8(2); + let eventSize = view.getUint8(3); + let offset = this.HEADER_LENGTH + this.META_LENGTH - 1; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: joinRef, ref: null, topic, event, payload: data }; + }, + decodeReply(buffer, view, decoder) { + let joinRefSize = view.getUint8(1); + let refSize = view.getUint8(2); + let topicSize = view.getUint8(3); + let eventSize = view.getUint8(4); + let offset = this.HEADER_LENGTH + this.META_LENGTH; + let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize)); + offset = offset + joinRefSize; + let ref = decoder.decode(buffer.slice(offset, offset + refSize)); + offset = offset + refSize; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + let payload = { status: event, response: data }; + return { join_ref: joinRef, ref, topic, event: CHANNEL_EVENTS.reply, payload }; + }, + decodeBroadcast(buffer, view, decoder) { + let topicSize = view.getUint8(1); + let eventSize = view.getUint8(2); + let offset = this.HEADER_LENGTH + 2; + let topic = decoder.decode(buffer.slice(offset, offset + topicSize)); + offset = offset + topicSize; + let event = decoder.decode(buffer.slice(offset, offset + eventSize)); + offset = offset + eventSize; + let data = buffer.slice(offset, buffer.byteLength); + return { join_ref: null, ref: null, topic, event, payload: data }; + } +}; + +// js/phoenix/socket.js +var Socket = class { + constructor(endPoint, opts = {}) { + this.stateChangeCallbacks = { open: [], close: [], error: [], message: [] }; + this.channels = []; + this.sendBuffer = []; + this.ref = 0; + this.timeout = opts.timeout || DEFAULT_TIMEOUT; + this.transport = opts.transport || global.WebSocket || LongPoll; + this.establishedConnections = 0; + this.defaultEncoder = serializer_default.encode.bind(serializer_default); + this.defaultDecoder = serializer_default.decode.bind(serializer_default); + this.closeWasClean = false; + this.binaryType = opts.binaryType || "arraybuffer"; + this.connectClock = 1; + if (this.transport !== LongPoll) { + this.encode = opts.encode || this.defaultEncoder; + this.decode = opts.decode || this.defaultDecoder; + } else { + this.encode = this.defaultEncoder; + this.decode = this.defaultDecoder; + } + let awaitingConnectionOnPageShow = null; + if (phxWindow && phxWindow.addEventListener) { + phxWindow.addEventListener("pagehide", (_e) => { + if (this.conn) { + this.disconnect(); + awaitingConnectionOnPageShow = this.connectClock; + } + }); + phxWindow.addEventListener("pageshow", (_e) => { + if (awaitingConnectionOnPageShow === this.connectClock) { + awaitingConnectionOnPageShow = null; + this.connect(); + } + }); + } + this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 3e4; + this.rejoinAfterMs = (tries) => { + if (opts.rejoinAfterMs) { + return opts.rejoinAfterMs(tries); + } else { + return [1e3, 2e3, 5e3][tries - 1] || 1e4; + } + }; + this.reconnectAfterMs = (tries) => { + if (opts.reconnectAfterMs) { + return opts.reconnectAfterMs(tries); + } else { + return [10, 50, 100, 150, 200, 250, 500, 1e3, 2e3][tries - 1] || 5e3; + } + }; + this.logger = opts.logger || null; + this.longpollerTimeout = opts.longpollerTimeout || 2e4; + this.params = closure(opts.params || {}); + this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`; + this.vsn = opts.vsn || DEFAULT_VSN; + this.heartbeatTimer = null; + this.pendingHeartbeatRef = null; + this.reconnectTimer = new Timer(() => { + this.teardown(() => this.connect()); + }, this.reconnectAfterMs); + } + getLongPollTransport() { + return LongPoll; + } + replaceTransport(newTransport) { + this.connectClock++; + this.closeWasClean = true; + this.reconnectTimer.reset(); + this.sendBuffer = []; + if (this.conn) { + this.conn.close(); + this.conn = null; + } + this.transport = newTransport; + } + protocol() { + return location.protocol.match(/^https/) ? "wss" : "ws"; + } + endPointURL() { + let uri = Ajax.appendParams(Ajax.appendParams(this.endPoint, this.params()), { vsn: this.vsn }); + if (uri.charAt(0) !== "/") { + return uri; + } + if (uri.charAt(1) === "/") { + return `${this.protocol()}:${uri}`; + } + return `${this.protocol()}://${location.host}${uri}`; + } + disconnect(callback, code, reason) { + this.connectClock++; + this.closeWasClean = true; + this.reconnectTimer.reset(); + this.teardown(callback, code, reason); + } + connect(params) { + if (params) { + console && console.log("passing params to connect is deprecated. Instead pass :params to the Socket constructor"); + this.params = closure(params); + } + if (this.conn) { + return; + } + this.connectClock++; + this.closeWasClean = false; + this.conn = new this.transport(this.endPointURL()); + this.conn.binaryType = this.binaryType; + this.conn.timeout = this.longpollerTimeout; + this.conn.onopen = () => this.onConnOpen(); + this.conn.onerror = (error) => this.onConnError(error); + this.conn.onmessage = (event) => this.onConnMessage(event); + this.conn.onclose = (event) => this.onConnClose(event); + } + log(kind, msg, data) { + this.logger(kind, msg, data); + } + hasLogger() { + return this.logger !== null; + } + onOpen(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.open.push([ref, callback]); + return ref; + } + onClose(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.close.push([ref, callback]); + return ref; + } + onError(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.error.push([ref, callback]); + return ref; + } + onMessage(callback) { + let ref = this.makeRef(); + this.stateChangeCallbacks.message.push([ref, callback]); + return ref; + } + ping(callback) { + if (!this.isConnected()) { + return false; + } + let ref = this.makeRef(); + let startTime = Date.now(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref }); + let onMsgRef = this.onMessage((msg) => { + if (msg.ref === ref) { + this.off([onMsgRef]); + callback(Date.now() - startTime); + } + }); + return true; + } + onConnOpen() { + if (this.hasLogger()) + this.log("transport", `connected to ${this.endPointURL()}`); + this.closeWasClean = false; + this.establishedConnections++; + this.flushSendBuffer(); + this.reconnectTimer.reset(); + this.resetHeartbeat(); + this.stateChangeCallbacks.open.forEach(([, callback]) => callback()); + } + heartbeatTimeout() { + if (this.pendingHeartbeatRef) { + this.pendingHeartbeatRef = null; + if (this.hasLogger()) { + this.log("transport", "heartbeat timeout. Attempting to re-establish connection"); + } + this.abnormalClose("heartbeat timeout"); + } + } + resetHeartbeat() { + if (this.conn && this.conn.skipHeartbeat) { + return; + } + this.pendingHeartbeatRef = null; + clearTimeout(this.heartbeatTimer); + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + teardown(callback, code, reason) { + if (!this.conn) { + return callback && callback(); + } + this.waitForBufferDone(() => { + if (this.conn) { + if (code) { + this.conn.close(code, reason || ""); + } else { + this.conn.close(); + } + } + this.waitForSocketClosed(() => { + if (this.conn) { + this.conn.onclose = function() { + }; + this.conn = null; + } + callback && callback(); + }); + }); + } + waitForBufferDone(callback, tries = 1) { + if (tries === 5 || !this.conn || !this.conn.bufferedAmount) { + callback(); + return; + } + setTimeout(() => { + this.waitForBufferDone(callback, tries + 1); + }, 150 * tries); + } + waitForSocketClosed(callback, tries = 1) { + if (tries === 5 || !this.conn || this.conn.readyState === SOCKET_STATES.closed) { + callback(); + return; + } + setTimeout(() => { + this.waitForSocketClosed(callback, tries + 1); + }, 150 * tries); + } + onConnClose(event) { + let closeCode = event && event.code; + if (this.hasLogger()) + this.log("transport", "close", event); + this.triggerChanError(); + clearTimeout(this.heartbeatTimer); + if (!this.closeWasClean && closeCode !== 1e3) { + this.reconnectTimer.scheduleTimeout(); + } + this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event)); + } + onConnError(error) { + if (this.hasLogger()) + this.log("transport", error); + let transportBefore = this.transport; + let establishedBefore = this.establishedConnections; + this.stateChangeCallbacks.error.forEach(([, callback]) => { + callback(error, transportBefore, establishedBefore); + }); + if (transportBefore === this.transport || establishedBefore > 0) { + this.triggerChanError(); + } + } + triggerChanError() { + this.channels.forEach((channel) => { + if (!(channel.isErrored() || channel.isLeaving() || channel.isClosed())) { + channel.trigger(CHANNEL_EVENTS.error); + } + }); + } + connectionState() { + switch (this.conn && this.conn.readyState) { + case SOCKET_STATES.connecting: + return "connecting"; + case SOCKET_STATES.open: + return "open"; + case SOCKET_STATES.closing: + return "closing"; + default: + return "closed"; + } + } + isConnected() { + return this.connectionState() === "open"; + } + remove(channel) { + this.off(channel.stateChangeRefs); + this.channels = this.channels.filter((c) => c.joinRef() !== channel.joinRef()); + } + off(refs) { + for (let key in this.stateChangeCallbacks) { + this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => { + return refs.indexOf(ref) === -1; + }); + } + } + channel(topic, chanParams = {}) { + let chan = new Channel(topic, chanParams, this); + this.channels.push(chan); + return chan; + } + push(data) { + if (this.hasLogger()) { + let { topic, event, payload, ref, join_ref } = data; + this.log("push", `${topic} ${event} (${join_ref}, ${ref})`, payload); + } + if (this.isConnected()) { + this.encode(data, (result) => this.conn.send(result)); + } else { + this.sendBuffer.push(() => this.encode(data, (result) => this.conn.send(result))); + } + } + makeRef() { + let newRef = this.ref + 1; + if (newRef === this.ref) { + this.ref = 0; + } else { + this.ref = newRef; + } + return this.ref.toString(); + } + sendHeartbeat() { + if (this.pendingHeartbeatRef && !this.isConnected()) { + return; + } + this.pendingHeartbeatRef = this.makeRef(); + this.push({ topic: "phoenix", event: "heartbeat", payload: {}, ref: this.pendingHeartbeatRef }); + this.heartbeatTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs); + } + abnormalClose(reason) { + this.closeWasClean = false; + if (this.isConnected()) { + this.conn.close(WS_CLOSE_NORMAL, reason); + } + } + flushSendBuffer() { + if (this.isConnected() && this.sendBuffer.length > 0) { + this.sendBuffer.forEach((callback) => callback()); + this.sendBuffer = []; + } + } + onConnMessage(rawMessage) { + this.decode(rawMessage.data, (msg) => { + let { topic, event, payload, ref, join_ref } = msg; + if (ref && ref === this.pendingHeartbeatRef) { + clearTimeout(this.heartbeatTimer); + this.pendingHeartbeatRef = null; + setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs); + } + if (this.hasLogger()) + this.log("receive", `${payload.status || ""} ${topic} ${event} ${ref && "(" + ref + ")" || ""}`, payload); + for (let i = 0; i < this.channels.length; i++) { + const channel = this.channels[i]; + if (!channel.isMember(topic, event, payload, join_ref)) { + continue; + } + channel.trigger(event, payload, ref, join_ref); + } + for (let i = 0; i < this.stateChangeCallbacks.message.length; i++) { + let [, callback] = this.stateChangeCallbacks.message[i]; + callback(msg); + } + }); + } + leaveOpenTopic(topic) { + let dupChannel = this.channels.find((c) => c.topic === topic && (c.isJoined() || c.isJoining())); + if (dupChannel) { + if (this.hasLogger()) + this.log("transport", `leaving duplicate topic "${topic}"`); + dupChannel.leave(); + } + } +}; +export { + Channel, + LongPoll, + Presence, + serializer_default as Serializer, + Socket +}; +//# sourceMappingURL=phoenix.mjs.map diff --git a/deps/phoenix/priv/static/phoenix.mjs.map b/deps/phoenix/priv/static/phoenix.mjs.map new file mode 100644 index 0000000..04024b9 --- /dev/null +++ b/deps/phoenix/priv/static/phoenix.mjs.map @@ -0,0 +1,7 @@ +{ + "version": 3, + "sources": ["../../assets/js/phoenix/utils.js", "../../assets/js/phoenix/constants.js", "../../assets/js/phoenix/push.js", "../../assets/js/phoenix/timer.js", "../../assets/js/phoenix/channel.js", "../../assets/js/phoenix/ajax.js", "../../assets/js/phoenix/longpoll.js", "../../assets/js/phoenix/presence.js", "../../assets/js/phoenix/serializer.js", "../../assets/js/phoenix/socket.js"], + "sourcesContent": ["// wraps value in closure or returns closure\nexport let closure = (value) => {\n if(typeof value === \"function\"){\n return value\n } else {\n let closure = function (){ return value }\n return closure\n }\n}\n", "export const globalSelf = typeof self !== \"undefined\" ? self : null\nexport const phxWindow = typeof window !== \"undefined\" ? window : null\nexport const global = globalSelf || phxWindow || global\nexport const DEFAULT_VSN = \"2.0.0\"\nexport const SOCKET_STATES = {connecting: 0, open: 1, closing: 2, closed: 3}\nexport const DEFAULT_TIMEOUT = 10000\nexport const WS_CLOSE_NORMAL = 1000\nexport const CHANNEL_STATES = {\n closed: \"closed\",\n errored: \"errored\",\n joined: \"joined\",\n joining: \"joining\",\n leaving: \"leaving\",\n}\nexport const CHANNEL_EVENTS = {\n close: \"phx_close\",\n error: \"phx_error\",\n join: \"phx_join\",\n reply: \"phx_reply\",\n leave: \"phx_leave\"\n}\n\nexport const TRANSPORTS = {\n longpoll: \"longpoll\",\n websocket: \"websocket\"\n}\nexport const XHR_STATES = {\n complete: 4\n}\n", "/**\n * Initializes the Push\n * @param {Channel} channel - The Channel\n * @param {string} event - The event, for example `\"phx_join\"`\n * @param {Object} payload - The payload, for example `{user_id: 123}`\n * @param {number} timeout - The push timeout in milliseconds\n */\nexport default class Push {\n constructor(channel, event, payload, timeout){\n this.channel = channel\n this.event = event\n this.payload = payload || function (){ return {} }\n this.receivedResp = null\n this.timeout = timeout\n this.timeoutTimer = null\n this.recHooks = []\n this.sent = false\n }\n\n /**\n *\n * @param {number} timeout\n */\n resend(timeout){\n this.timeout = timeout\n this.reset()\n this.send()\n }\n\n /**\n *\n */\n send(){\n if(this.hasReceived(\"timeout\")){ return }\n this.startTimeout()\n this.sent = true\n this.channel.socket.push({\n topic: this.channel.topic,\n event: this.event,\n payload: this.payload(),\n ref: this.ref,\n join_ref: this.channel.joinRef()\n })\n }\n\n /**\n *\n * @param {*} status\n * @param {*} callback\n */\n receive(status, callback){\n if(this.hasReceived(status)){\n callback(this.receivedResp.response)\n }\n\n this.recHooks.push({status, callback})\n return this\n }\n\n /**\n * @private\n */\n reset(){\n this.cancelRefEvent()\n this.ref = null\n this.refEvent = null\n this.receivedResp = null\n this.sent = false\n }\n\n /**\n * @private\n */\n matchReceive({status, response, _ref}){\n this.recHooks.filter(h => h.status === status)\n .forEach(h => h.callback(response))\n }\n\n /**\n * @private\n */\n cancelRefEvent(){\n if(!this.refEvent){ return }\n this.channel.off(this.refEvent)\n }\n\n /**\n * @private\n */\n cancelTimeout(){\n clearTimeout(this.timeoutTimer)\n this.timeoutTimer = null\n }\n\n /**\n * @private\n */\n startTimeout(){\n if(this.timeoutTimer){ this.cancelTimeout() }\n this.ref = this.channel.socket.makeRef()\n this.refEvent = this.channel.replyEventName(this.ref)\n\n this.channel.on(this.refEvent, payload => {\n this.cancelRefEvent()\n this.cancelTimeout()\n this.receivedResp = payload\n this.matchReceive(payload)\n })\n\n this.timeoutTimer = setTimeout(() => {\n this.trigger(\"timeout\", {})\n }, this.timeout)\n }\n\n /**\n * @private\n */\n hasReceived(status){\n return this.receivedResp && this.receivedResp.status === status\n }\n\n /**\n * @private\n */\n trigger(status, response){\n this.channel.trigger(this.refEvent, {status, response})\n }\n}\n", "/**\n *\n * Creates a timer that accepts a `timerCalc` function to perform\n * calculated timeout retries, such as exponential backoff.\n *\n * @example\n * let reconnectTimer = new Timer(() => this.connect(), function(tries){\n * return [1000, 5000, 10000][tries - 1] || 10000\n * })\n * reconnectTimer.scheduleTimeout() // fires after 1000\n * reconnectTimer.scheduleTimeout() // fires after 5000\n * reconnectTimer.reset()\n * reconnectTimer.scheduleTimeout() // fires after 1000\n *\n * @param {Function} callback\n * @param {Function} timerCalc\n */\nexport default class Timer {\n constructor(callback, timerCalc){\n this.callback = callback\n this.timerCalc = timerCalc\n this.timer = null\n this.tries = 0\n }\n\n reset(){\n this.tries = 0\n clearTimeout(this.timer)\n }\n\n /**\n * Cancels any previous scheduleTimeout and schedules callback\n */\n scheduleTimeout(){\n clearTimeout(this.timer)\n\n this.timer = setTimeout(() => {\n this.tries = this.tries + 1\n this.callback()\n }, this.timerCalc(this.tries + 1))\n }\n}\n", "import {closure} from \"./utils\"\nimport {\n CHANNEL_EVENTS,\n CHANNEL_STATES,\n} from \"./constants\"\n\nimport Push from \"./push\"\nimport Timer from \"./timer\"\n\n/**\n *\n * @param {string} topic\n * @param {(Object|function)} params\n * @param {Socket} socket\n */\nexport default class Channel {\n constructor(topic, params, socket){\n this.state = CHANNEL_STATES.closed\n this.topic = topic\n this.params = closure(params || {})\n this.socket = socket\n this.bindings = []\n this.bindingRef = 0\n this.timeout = this.socket.timeout\n this.joinedOnce = false\n this.joinPush = new Push(this, CHANNEL_EVENTS.join, this.params, this.timeout)\n this.pushBuffer = []\n this.stateChangeRefs = []\n\n this.rejoinTimer = new Timer(() => {\n if(this.socket.isConnected()){ this.rejoin() }\n }, this.socket.rejoinAfterMs)\n this.stateChangeRefs.push(this.socket.onError(() => this.rejoinTimer.reset()))\n this.stateChangeRefs.push(this.socket.onOpen(() => {\n this.rejoinTimer.reset()\n if(this.isErrored()){ this.rejoin() }\n })\n )\n this.joinPush.receive(\"ok\", () => {\n this.state = CHANNEL_STATES.joined\n this.rejoinTimer.reset()\n this.pushBuffer.forEach(pushEvent => pushEvent.send())\n this.pushBuffer = []\n })\n this.joinPush.receive(\"error\", () => {\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.onClose(() => {\n this.rejoinTimer.reset()\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `close ${this.topic} ${this.joinRef()}`)\n this.state = CHANNEL_STATES.closed\n this.socket.remove(this)\n })\n this.onError(reason => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `error ${this.topic}`, reason)\n if(this.isJoining()){ this.joinPush.reset() }\n this.state = CHANNEL_STATES.errored\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.joinPush.receive(\"timeout\", () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `timeout ${this.topic} (${this.joinRef()})`, this.joinPush.timeout)\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), this.timeout)\n leavePush.send()\n this.state = CHANNEL_STATES.errored\n this.joinPush.reset()\n if(this.socket.isConnected()){ this.rejoinTimer.scheduleTimeout() }\n })\n this.on(CHANNEL_EVENTS.reply, (payload, ref) => {\n this.trigger(this.replyEventName(ref), payload)\n })\n }\n\n /**\n * Join the channel\n * @param {integer} timeout\n * @returns {Push}\n */\n join(timeout = this.timeout){\n if(this.joinedOnce){\n throw new Error(\"tried to join multiple times. 'join' can only be called a single time per channel instance\")\n } else {\n this.timeout = timeout\n this.joinedOnce = true\n this.rejoin()\n return this.joinPush\n }\n }\n\n /**\n * Hook into channel close\n * @param {Function} callback\n */\n onClose(callback){\n this.on(CHANNEL_EVENTS.close, callback)\n }\n\n /**\n * Hook into channel errors\n * @param {Function} callback\n */\n onError(callback){\n return this.on(CHANNEL_EVENTS.error, reason => callback(reason))\n }\n\n /**\n * Subscribes on channel events\n *\n * Subscription returns a ref counter, which can be used later to\n * unsubscribe the exact event listener\n *\n * @example\n * const ref1 = channel.on(\"event\", do_stuff)\n * const ref2 = channel.on(\"event\", do_other_stuff)\n * channel.off(\"event\", ref1)\n * // Since unsubscription, do_stuff won't fire,\n * // while do_other_stuff will keep firing on the \"event\"\n *\n * @param {string} event\n * @param {Function} callback\n * @returns {integer} ref\n */\n on(event, callback){\n let ref = this.bindingRef++\n this.bindings.push({event, ref, callback})\n return ref\n }\n\n /**\n * Unsubscribes off of channel events\n *\n * Use the ref returned from a channel.on() to unsubscribe one\n * handler, or pass nothing for the ref to unsubscribe all\n * handlers for the given event.\n *\n * @example\n * // Unsubscribe the do_stuff handler\n * const ref1 = channel.on(\"event\", do_stuff)\n * channel.off(\"event\", ref1)\n *\n * // Unsubscribe all handlers from event\n * channel.off(\"event\")\n *\n * @param {string} event\n * @param {integer} ref\n */\n off(event, ref){\n this.bindings = this.bindings.filter((bind) => {\n return !(bind.event === event && (typeof ref === \"undefined\" || ref === bind.ref))\n })\n }\n\n /**\n * @private\n */\n canPush(){ return this.socket.isConnected() && this.isJoined() }\n\n /**\n * Sends a message `event` to phoenix with the payload `payload`.\n * Phoenix receives this in the `handle_in(event, payload, socket)`\n * function. if phoenix replies or it times out (default 10000ms),\n * then optionally the reply can be received.\n *\n * @example\n * channel.push(\"event\")\n * .receive(\"ok\", payload => console.log(\"phoenix replied:\", payload))\n * .receive(\"error\", err => console.log(\"phoenix errored\", err))\n * .receive(\"timeout\", () => console.log(\"timed out pushing\"))\n * @param {string} event\n * @param {Object} payload\n * @param {number} [timeout]\n * @returns {Push}\n */\n push(event, payload, timeout = this.timeout){\n payload = payload || {}\n if(!this.joinedOnce){\n throw new Error(`tried to push '${event}' to '${this.topic}' before joining. Use channel.join() before pushing events`)\n }\n let pushEvent = new Push(this, event, function (){ return payload }, timeout)\n if(this.canPush()){\n pushEvent.send()\n } else {\n pushEvent.startTimeout()\n this.pushBuffer.push(pushEvent)\n }\n\n return pushEvent\n }\n\n /** Leaves the channel\n *\n * Unsubscribes from server events, and\n * instructs channel to terminate on server\n *\n * Triggers onClose() hooks\n *\n * To receive leave acknowledgements, use the `receive`\n * hook to bind to the server ack, ie:\n *\n * @example\n * channel.leave().receive(\"ok\", () => alert(\"left!\") )\n *\n * @param {integer} timeout\n * @returns {Push}\n */\n leave(timeout = this.timeout){\n this.rejoinTimer.reset()\n this.joinPush.cancelTimeout()\n\n this.state = CHANNEL_STATES.leaving\n let onClose = () => {\n if(this.socket.hasLogger()) this.socket.log(\"channel\", `leave ${this.topic}`)\n this.trigger(CHANNEL_EVENTS.close, \"leave\")\n }\n let leavePush = new Push(this, CHANNEL_EVENTS.leave, closure({}), timeout)\n leavePush.receive(\"ok\", () => onClose())\n .receive(\"timeout\", () => onClose())\n leavePush.send()\n if(!this.canPush()){ leavePush.trigger(\"ok\", {}) }\n\n return leavePush\n }\n\n /**\n * Overridable message hook\n *\n * Receives all events for specialized message handling\n * before dispatching to the channel callbacks.\n *\n * Must return the payload, modified or unmodified\n * @param {string} event\n * @param {Object} payload\n * @param {integer} ref\n * @returns {Object}\n */\n onMessage(_event, payload, _ref){ return payload }\n\n /**\n * @private\n */\n isMember(topic, event, payload, joinRef){\n if(this.topic !== topic){ return false }\n\n if(joinRef && joinRef !== this.joinRef()){\n if(this.socket.hasLogger()) this.socket.log(\"channel\", \"dropping outdated message\", {topic, event, payload, joinRef})\n return false\n } else {\n return true\n }\n }\n\n /**\n * @private\n */\n joinRef(){ return this.joinPush.ref }\n\n /**\n * @private\n */\n rejoin(timeout = this.timeout){\n if(this.isLeaving()){ return }\n this.socket.leaveOpenTopic(this.topic)\n this.state = CHANNEL_STATES.joining\n this.joinPush.resend(timeout)\n }\n\n /**\n * @private\n */\n trigger(event, payload, ref, joinRef){\n let handledPayload = this.onMessage(event, payload, ref, joinRef)\n if(payload && !handledPayload){ throw new Error(\"channel onMessage callbacks must return the payload, modified or unmodified\") }\n\n let eventBindings = this.bindings.filter(bind => bind.event === event)\n\n for(let i = 0; i < eventBindings.length; i++){\n let bind = eventBindings[i]\n bind.callback(handledPayload, ref, joinRef || this.joinRef())\n }\n }\n\n /**\n * @private\n */\n replyEventName(ref){ return `chan_reply_${ref}` }\n\n /**\n * @private\n */\n isClosed(){ return this.state === CHANNEL_STATES.closed }\n\n /**\n * @private\n */\n isErrored(){ return this.state === CHANNEL_STATES.errored }\n\n /**\n * @private\n */\n isJoined(){ return this.state === CHANNEL_STATES.joined }\n\n /**\n * @private\n */\n isJoining(){ return this.state === CHANNEL_STATES.joining }\n\n /**\n * @private\n */\n isLeaving(){ return this.state === CHANNEL_STATES.leaving }\n}\n", "import {\n global,\n XHR_STATES\n} from \"./constants\"\n\nexport default class Ajax {\n\n static request(method, endPoint, accept, body, timeout, ontimeout, callback){\n if(global.XDomainRequest){\n let req = new global.XDomainRequest() // IE8, IE9\n return this.xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback)\n } else {\n let req = new global.XMLHttpRequest() // IE7+, Firefox, Chrome, Opera, Safari\n return this.xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback)\n }\n }\n\n static xdomainRequest(req, method, endPoint, body, timeout, ontimeout, callback){\n req.timeout = timeout\n req.open(method, endPoint)\n req.onload = () => {\n let response = this.parseJSON(req.responseText)\n callback && callback(response)\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n // Work around bug in IE9 that requires an attached onprogress handler\n req.onprogress = () => { }\n\n req.send(body)\n return req\n }\n\n static xhrRequest(req, method, endPoint, accept, body, timeout, ontimeout, callback){\n req.open(method, endPoint, true)\n req.timeout = timeout\n req.setRequestHeader(\"Content-Type\", accept)\n req.onerror = () => callback && callback(null)\n req.onreadystatechange = () => {\n if(req.readyState === XHR_STATES.complete && callback){\n let response = this.parseJSON(req.responseText)\n callback(response)\n }\n }\n if(ontimeout){ req.ontimeout = ontimeout }\n\n req.send(body)\n return req\n }\n\n static parseJSON(resp){\n if(!resp || resp === \"\"){ return null }\n\n try {\n return JSON.parse(resp)\n } catch (e){\n console && console.log(\"failed to parse JSON response\", resp)\n return null\n }\n }\n\n static serialize(obj, parentKey){\n let queryStr = []\n for(var key in obj){\n if(!Object.prototype.hasOwnProperty.call(obj, key)){ continue }\n let paramKey = parentKey ? `${parentKey}[${key}]` : key\n let paramVal = obj[key]\n if(typeof paramVal === \"object\"){\n queryStr.push(this.serialize(paramVal, paramKey))\n } else {\n queryStr.push(encodeURIComponent(paramKey) + \"=\" + encodeURIComponent(paramVal))\n }\n }\n return queryStr.join(\"&\")\n }\n\n static appendParams(url, params){\n if(Object.keys(params).length === 0){ return url }\n\n let prefix = url.match(/\\?/) ? \"&\" : \"?\"\n return `${url}${prefix}${this.serialize(params)}`\n }\n}\n", "import {\n SOCKET_STATES,\n TRANSPORTS\n} from \"./constants\"\n\nimport Ajax from \"./ajax\"\n\nexport default class LongPoll {\n\n constructor(endPoint){\n this.endPoint = null\n this.token = null\n this.skipHeartbeat = true\n this.reqs = new Set()\n this.onopen = function (){ } // noop\n this.onerror = function (){ } // noop\n this.onmessage = function (){ } // noop\n this.onclose = function (){ } // noop\n this.pollEndpoint = this.normalizeEndpoint(endPoint)\n this.readyState = SOCKET_STATES.connecting\n this.poll()\n }\n\n normalizeEndpoint(endPoint){\n return (endPoint\n .replace(\"ws://\", \"http://\")\n .replace(\"wss://\", \"https://\")\n .replace(new RegExp(\"(.*)\\/\" + TRANSPORTS.websocket), \"$1/\" + TRANSPORTS.longpoll))\n }\n\n endpointURL(){\n return Ajax.appendParams(this.pollEndpoint, {token: this.token})\n }\n\n closeAndRetry(code, reason, wasClean){\n this.close(code, reason, wasClean)\n this.readyState = SOCKET_STATES.connecting\n }\n\n ontimeout(){\n this.onerror(\"timeout\")\n this.closeAndRetry(1005, \"timeout\", false)\n }\n\n isActive(){ return this.readyState === SOCKET_STATES.open || this.readyState === SOCKET_STATES.connecting }\n\n poll(){\n this.ajax(\"GET\", null, () => this.ontimeout(), resp => {\n if(resp){\n var {status, token, messages} = resp\n this.token = token\n } else {\n status = 0\n }\n\n switch(status){\n case 200:\n messages.forEach(msg => {\n // Tasks are what things like event handlers, setTimeout callbacks,\n // promise resolves and more are run within.\n // In modern browsers, there are two different kinds of tasks,\n // microtasks and macrotasks.\n // Microtasks are mainly used for Promises, while macrotasks are\n // used for everything else.\n // Microtasks always have priority over macrotasks. If the JS engine\n // is looking for a task to run, it will always try to empty the\n // microtask queue before attempting to run anything from the\n // macrotask queue.\n //\n // For the WebSocket transport, messages always arrive in their own\n // event. This means that if any promises are resolved from within,\n // their callbacks will always finish execution by the time the\n // next message event handler is run.\n //\n // In order to emulate this behaviour, we need to make sure each\n // onmessage handler is run within it's own macrotask.\n setTimeout(() => this.onmessage({data: msg}), 0)\n })\n this.poll()\n break\n case 204:\n this.poll()\n break\n case 410:\n this.readyState = SOCKET_STATES.open\n this.onopen({})\n this.poll()\n break\n case 403:\n this.onerror(403)\n this.close(1008, \"forbidden\", false)\n break\n case 0:\n case 500:\n this.onerror(500)\n this.closeAndRetry(1011, \"internal server error\", 500)\n break\n default: throw new Error(`unhandled poll status ${status}`)\n }\n })\n }\n\n send(body){\n this.ajax(\"POST\", body, () => this.onerror(\"timeout\"), resp => {\n if(!resp || resp.status !== 200){\n this.onerror(resp && resp.status)\n this.closeAndRetry(1011, \"internal server error\", false)\n }\n })\n }\n\n close(code, reason, wasClean){\n for(let req of this.reqs){ req.abort() }\n this.readyState = SOCKET_STATES.closed\n let opts = Object.assign({code: 1000, reason: undefined, wasClean: true}, {code, reason, wasClean})\n if(typeof(CloseEvent) !== \"undefined\"){\n this.onclose(new CloseEvent(\"close\", opts))\n } else {\n this.onclose(opts)\n }\n }\n\n ajax(method, body, onCallerTimeout, callback){\n let req\n let ontimeout = () => {\n this.reqs.delete(req)\n onCallerTimeout()\n }\n req = Ajax.request(method, this.endpointURL(), \"application/json\", body, this.timeout, ontimeout, resp => {\n this.reqs.delete(req)\n if(this.isActive()){ callback(resp) }\n })\n this.reqs.add(req)\n }\n}\n", "/**\n * Initializes the Presence\n * @param {Channel} channel - The Channel\n * @param {Object} opts - The options,\n * for example `{events: {state: \"state\", diff: \"diff\"}}`\n */\nexport default class Presence {\n\n constructor(channel, opts = {}){\n let events = opts.events || {state: \"presence_state\", diff: \"presence_diff\"}\n this.state = {}\n this.pendingDiffs = []\n this.channel = channel\n this.joinRef = null\n this.caller = {\n onJoin: function (){ },\n onLeave: function (){ },\n onSync: function (){ }\n }\n\n this.channel.on(events.state, newState => {\n let {onJoin, onLeave, onSync} = this.caller\n\n this.joinRef = this.channel.joinRef()\n this.state = Presence.syncState(this.state, newState, onJoin, onLeave)\n\n this.pendingDiffs.forEach(diff => {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n })\n this.pendingDiffs = []\n onSync()\n })\n\n this.channel.on(events.diff, diff => {\n let {onJoin, onLeave, onSync} = this.caller\n\n if(this.inPendingSyncState()){\n this.pendingDiffs.push(diff)\n } else {\n this.state = Presence.syncDiff(this.state, diff, onJoin, onLeave)\n onSync()\n }\n })\n }\n\n onJoin(callback){ this.caller.onJoin = callback }\n\n onLeave(callback){ this.caller.onLeave = callback }\n\n onSync(callback){ this.caller.onSync = callback }\n\n list(by){ return Presence.list(this.state, by) }\n\n inPendingSyncState(){\n return !this.joinRef || (this.joinRef !== this.channel.joinRef())\n }\n\n // lower-level public static API\n\n /**\n * Used to sync the list of presences on the server\n * with the client's state. An optional `onJoin` and `onLeave` callback can\n * be provided to react to changes in the client's local presences across\n * disconnects and reconnects with the server.\n *\n * @returns {Presence}\n */\n static syncState(currentState, newState, onJoin, onLeave){\n let state = this.clone(currentState)\n let joins = {}\n let leaves = {}\n\n this.map(state, (key, presence) => {\n if(!newState[key]){\n leaves[key] = presence\n }\n })\n this.map(newState, (key, newPresence) => {\n let currentPresence = state[key]\n if(currentPresence){\n let newRefs = newPresence.metas.map(m => m.phx_ref)\n let curRefs = currentPresence.metas.map(m => m.phx_ref)\n let joinedMetas = newPresence.metas.filter(m => curRefs.indexOf(m.phx_ref) < 0)\n let leftMetas = currentPresence.metas.filter(m => newRefs.indexOf(m.phx_ref) < 0)\n if(joinedMetas.length > 0){\n joins[key] = newPresence\n joins[key].metas = joinedMetas\n }\n if(leftMetas.length > 0){\n leaves[key] = this.clone(currentPresence)\n leaves[key].metas = leftMetas\n }\n } else {\n joins[key] = newPresence\n }\n })\n return this.syncDiff(state, {joins: joins, leaves: leaves}, onJoin, onLeave)\n }\n\n /**\n *\n * Used to sync a diff of presence join and leave\n * events from the server, as they happen. Like `syncState`, `syncDiff`\n * accepts optional `onJoin` and `onLeave` callbacks to react to a user\n * joining or leaving from a device.\n *\n * @returns {Presence}\n */\n static syncDiff(state, diff, onJoin, onLeave){\n let {joins, leaves} = this.clone(diff)\n if(!onJoin){ onJoin = function (){ } }\n if(!onLeave){ onLeave = function (){ } }\n\n this.map(joins, (key, newPresence) => {\n let currentPresence = state[key]\n state[key] = this.clone(newPresence)\n if(currentPresence){\n let joinedRefs = state[key].metas.map(m => m.phx_ref)\n let curMetas = currentPresence.metas.filter(m => joinedRefs.indexOf(m.phx_ref) < 0)\n state[key].metas.unshift(...curMetas)\n }\n onJoin(key, currentPresence, newPresence)\n })\n this.map(leaves, (key, leftPresence) => {\n let currentPresence = state[key]\n if(!currentPresence){ return }\n let refsToRemove = leftPresence.metas.map(m => m.phx_ref)\n currentPresence.metas = currentPresence.metas.filter(p => {\n return refsToRemove.indexOf(p.phx_ref) < 0\n })\n onLeave(key, currentPresence, leftPresence)\n if(currentPresence.metas.length === 0){\n delete state[key]\n }\n })\n return state\n }\n\n /**\n * Returns the array of presences, with selected metadata.\n *\n * @param {Object} presences\n * @param {Function} chooser\n *\n * @returns {Presence}\n */\n static list(presences, chooser){\n if(!chooser){ chooser = function (key, pres){ return pres } }\n\n return this.map(presences, (key, presence) => {\n return chooser(key, presence)\n })\n }\n\n // private\n\n static map(obj, func){\n return Object.getOwnPropertyNames(obj).map(key => func(key, obj[key]))\n }\n\n static clone(obj){ return JSON.parse(JSON.stringify(obj)) }\n}\n", "/* The default serializer for encoding and decoding messages */\nimport {\n CHANNEL_EVENTS\n} from \"./constants\"\n\nexport default {\n HEADER_LENGTH: 1,\n META_LENGTH: 4,\n KINDS: {push: 0, reply: 1, broadcast: 2},\n\n encode(msg, callback){\n if(msg.payload.constructor === ArrayBuffer){\n return callback(this.binaryEncode(msg))\n } else {\n let payload = [msg.join_ref, msg.ref, msg.topic, msg.event, msg.payload]\n return callback(JSON.stringify(payload))\n }\n },\n\n decode(rawPayload, callback){\n if(rawPayload.constructor === ArrayBuffer){\n return callback(this.binaryDecode(rawPayload))\n } else {\n let [join_ref, ref, topic, event, payload] = JSON.parse(rawPayload)\n return callback({join_ref, ref, topic, event, payload})\n }\n },\n\n // private\n\n binaryEncode(message){\n let {join_ref, ref, event, topic, payload} = message\n let metaLength = this.META_LENGTH + join_ref.length + ref.length + topic.length + event.length\n let header = new ArrayBuffer(this.HEADER_LENGTH + metaLength)\n let view = new DataView(header)\n let offset = 0\n\n view.setUint8(offset++, this.KINDS.push) // kind\n view.setUint8(offset++, join_ref.length)\n view.setUint8(offset++, ref.length)\n view.setUint8(offset++, topic.length)\n view.setUint8(offset++, event.length)\n Array.from(join_ref, char => view.setUint8(offset++, char.charCodeAt(0)))\n Array.from(ref, char => view.setUint8(offset++, char.charCodeAt(0)))\n Array.from(topic, char => view.setUint8(offset++, char.charCodeAt(0)))\n Array.from(event, char => view.setUint8(offset++, char.charCodeAt(0)))\n\n var combined = new Uint8Array(header.byteLength + payload.byteLength)\n combined.set(new Uint8Array(header), 0)\n combined.set(new Uint8Array(payload), header.byteLength)\n\n return combined.buffer\n },\n\n binaryDecode(buffer){\n let view = new DataView(buffer)\n let kind = view.getUint8(0)\n let decoder = new TextDecoder()\n switch(kind){\n case this.KINDS.push: return this.decodePush(buffer, view, decoder)\n case this.KINDS.reply: return this.decodeReply(buffer, view, decoder)\n case this.KINDS.broadcast: return this.decodeBroadcast(buffer, view, decoder)\n }\n },\n\n decodePush(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let topicSize = view.getUint8(2)\n let eventSize = view.getUint8(3)\n let offset = this.HEADER_LENGTH + this.META_LENGTH - 1 // pushes have no ref\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n return {join_ref: joinRef, ref: null, topic: topic, event: event, payload: data}\n },\n\n decodeReply(buffer, view, decoder){\n let joinRefSize = view.getUint8(1)\n let refSize = view.getUint8(2)\n let topicSize = view.getUint8(3)\n let eventSize = view.getUint8(4)\n let offset = this.HEADER_LENGTH + this.META_LENGTH\n let joinRef = decoder.decode(buffer.slice(offset, offset + joinRefSize))\n offset = offset + joinRefSize\n let ref = decoder.decode(buffer.slice(offset, offset + refSize))\n offset = offset + refSize\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n let payload = {status: event, response: data}\n return {join_ref: joinRef, ref: ref, topic: topic, event: CHANNEL_EVENTS.reply, payload: payload}\n },\n\n decodeBroadcast(buffer, view, decoder){\n let topicSize = view.getUint8(1)\n let eventSize = view.getUint8(2)\n let offset = this.HEADER_LENGTH + 2\n let topic = decoder.decode(buffer.slice(offset, offset + topicSize))\n offset = offset + topicSize\n let event = decoder.decode(buffer.slice(offset, offset + eventSize))\n offset = offset + eventSize\n let data = buffer.slice(offset, buffer.byteLength)\n\n return {join_ref: null, ref: null, topic: topic, event: event, payload: data}\n }\n}\n", "import {\n global,\n phxWindow,\n CHANNEL_EVENTS,\n DEFAULT_TIMEOUT,\n DEFAULT_VSN,\n SOCKET_STATES,\n TRANSPORTS,\n WS_CLOSE_NORMAL\n} from \"./constants\"\n\nimport {\n closure\n} from \"./utils\"\n\nimport Ajax from \"./ajax\"\nimport Channel from \"./channel\"\nimport LongPoll from \"./longpoll\"\nimport Serializer from \"./serializer\"\nimport Timer from \"./timer\"\n\n/** Initializes the Socket *\n *\n * For IE8 support use an ES5-shim (https://github.com/es-shims/es5-shim)\n *\n * @param {string} endPoint - The string WebSocket endpoint, ie, `\"ws://example.com/socket\"`,\n * `\"wss://example.com\"`\n * `\"/socket\"` (inherited host & protocol)\n * @param {Object} [opts] - Optional configuration\n * @param {Function} [opts.transport] - The Websocket Transport, for example WebSocket or Phoenix.LongPoll.\n *\n * Defaults to WebSocket with automatic LongPoll fallback.\n * @param {Function} [opts.encode] - The function to encode outgoing messages.\n *\n * Defaults to JSON encoder.\n *\n * @param {Function} [opts.decode] - The function to decode incoming messages.\n *\n * Defaults to JSON:\n *\n * ```javascript\n * (payload, callback) => callback(JSON.parse(payload))\n * ```\n *\n * @param {number} [opts.timeout] - The default timeout in milliseconds to trigger push timeouts.\n *\n * Defaults `DEFAULT_TIMEOUT`\n * @param {number} [opts.heartbeatIntervalMs] - The millisec interval to send a heartbeat message\n * @param {number} [opts.reconnectAfterMs] - The optional function that returns the millsec\n * socket reconnect interval.\n *\n * Defaults to stepped backoff of:\n *\n * ```javascript\n * function(tries){\n * return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n * }\n * ````\n *\n * @param {number} [opts.rejoinAfterMs] - The optional function that returns the millsec\n * rejoin interval for individual channels.\n *\n * ```javascript\n * function(tries){\n * return [1000, 2000, 5000][tries - 1] || 10000\n * }\n * ````\n *\n * @param {Function} [opts.logger] - The optional function for specialized logging, ie:\n *\n * ```javascript\n * function(kind, msg, data) {\n * console.log(`${kind}: ${msg}`, data)\n * }\n * ```\n *\n * @param {number} [opts.longpollerTimeout] - The maximum timeout of a long poll AJAX request.\n *\n * Defaults to 20s (double the server long poll timer).\n *\n * @param {(Object|function)} [opts.params] - The optional params to pass when connecting\n * @param {string} [opts.binaryType] - The binary type to use for binary WebSocket frames.\n *\n * Defaults to \"arraybuffer\"\n *\n * @param {vsn} [opts.vsn] - The serializer's protocol version to send on connect.\n *\n * Defaults to DEFAULT_VSN.\n*/\nexport default class Socket {\n constructor(endPoint, opts = {}){\n this.stateChangeCallbacks = {open: [], close: [], error: [], message: []}\n this.channels = []\n this.sendBuffer = []\n this.ref = 0\n this.timeout = opts.timeout || DEFAULT_TIMEOUT\n this.transport = opts.transport || global.WebSocket || LongPoll\n this.establishedConnections = 0\n this.defaultEncoder = Serializer.encode.bind(Serializer)\n this.defaultDecoder = Serializer.decode.bind(Serializer)\n this.closeWasClean = false\n this.binaryType = opts.binaryType || \"arraybuffer\"\n this.connectClock = 1\n if(this.transport !== LongPoll){\n this.encode = opts.encode || this.defaultEncoder\n this.decode = opts.decode || this.defaultDecoder\n } else {\n this.encode = this.defaultEncoder\n this.decode = this.defaultDecoder\n }\n let awaitingConnectionOnPageShow = null\n if(phxWindow && phxWindow.addEventListener){\n phxWindow.addEventListener(\"pagehide\", _e => {\n if(this.conn){\n this.disconnect()\n awaitingConnectionOnPageShow = this.connectClock\n }\n })\n phxWindow.addEventListener(\"pageshow\", _e => {\n if(awaitingConnectionOnPageShow === this.connectClock){\n awaitingConnectionOnPageShow = null\n this.connect()\n }\n })\n }\n this.heartbeatIntervalMs = opts.heartbeatIntervalMs || 30000\n this.rejoinAfterMs = (tries) => {\n if(opts.rejoinAfterMs){\n return opts.rejoinAfterMs(tries)\n } else {\n return [1000, 2000, 5000][tries - 1] || 10000\n }\n }\n this.reconnectAfterMs = (tries) => {\n if(opts.reconnectAfterMs){\n return opts.reconnectAfterMs(tries)\n } else {\n return [10, 50, 100, 150, 200, 250, 500, 1000, 2000][tries - 1] || 5000\n }\n }\n this.logger = opts.logger || null\n this.longpollerTimeout = opts.longpollerTimeout || 20000\n this.params = closure(opts.params || {})\n this.endPoint = `${endPoint}/${TRANSPORTS.websocket}`\n this.vsn = opts.vsn || DEFAULT_VSN\n this.heartbeatTimer = null\n this.pendingHeartbeatRef = null\n this.reconnectTimer = new Timer(() => {\n this.teardown(() => this.connect())\n }, this.reconnectAfterMs)\n }\n\n /**\n * Returns the LongPoll transport reference\n */\n getLongPollTransport(){ return LongPoll }\n\n /**\n * Disconnects and replaces the active transport\n *\n * @param {Function} newTransport - The new transport class to instantiate\n *\n */\n replaceTransport(newTransport){\n this.connectClock++\n this.closeWasClean = true\n this.reconnectTimer.reset()\n this.sendBuffer = []\n if(this.conn){\n this.conn.close()\n this.conn = null\n }\n this.transport = newTransport\n }\n\n /**\n * Returns the socket protocol\n *\n * @returns {string}\n */\n protocol(){ return location.protocol.match(/^https/) ? \"wss\" : \"ws\" }\n\n /**\n * The fully qualifed socket url\n *\n * @returns {string}\n */\n endPointURL(){\n let uri = Ajax.appendParams(\n Ajax.appendParams(this.endPoint, this.params()), {vsn: this.vsn})\n if(uri.charAt(0) !== \"/\"){ return uri }\n if(uri.charAt(1) === \"/\"){ return `${this.protocol()}:${uri}` }\n\n return `${this.protocol()}://${location.host}${uri}`\n }\n\n /**\n * Disconnects the socket\n *\n * See https://developer.mozilla.org/en-US/docs/Web/API/CloseEvent#Status_codes for valid status codes.\n *\n * @param {Function} callback - Optional callback which is called after socket is disconnected.\n * @param {integer} code - A status code for disconnection (Optional).\n * @param {string} reason - A textual description of the reason to disconnect. (Optional)\n */\n disconnect(callback, code, reason){\n this.connectClock++\n this.closeWasClean = true\n this.reconnectTimer.reset()\n this.teardown(callback, code, reason)\n }\n\n /**\n *\n * @param {Object} params - The params to send when connecting, for example `{user_id: userToken}`\n *\n * Passing params to connect is deprecated; pass them in the Socket constructor instead:\n * `new Socket(\"/socket\", {params: {user_id: userToken}})`.\n */\n connect(params){\n if(params){\n console && console.log(\"passing params to connect is deprecated. Instead pass :params to the Socket constructor\")\n this.params = closure(params)\n }\n if(this.conn){ return }\n\n this.connectClock++\n this.closeWasClean = false\n this.conn = new this.transport(this.endPointURL())\n this.conn.binaryType = this.binaryType\n this.conn.timeout = this.longpollerTimeout\n this.conn.onopen = () => this.onConnOpen()\n this.conn.onerror = error => this.onConnError(error)\n this.conn.onmessage = event => this.onConnMessage(event)\n this.conn.onclose = event => this.onConnClose(event)\n }\n\n /**\n * Logs the message. Override `this.logger` for specialized logging. noops by default\n * @param {string} kind\n * @param {string} msg\n * @param {Object} data\n */\n log(kind, msg, data){ this.logger(kind, msg, data) }\n\n /**\n * Returns true if a logger has been set on this socket.\n */\n hasLogger(){ return this.logger !== null }\n\n /**\n * Registers callbacks for connection open events\n *\n * @example socket.onOpen(function(){ console.info(\"the socket was opened\") })\n *\n * @param {Function} callback\n */\n onOpen(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.open.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection close events\n * @param {Function} callback\n */\n onClose(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.close.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection error events\n *\n * @example socket.onError(function(error){ alert(\"An error occurred\") })\n *\n * @param {Function} callback\n */\n onError(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.error.push([ref, callback])\n return ref\n }\n\n /**\n * Registers callbacks for connection message events\n * @param {Function} callback\n */\n onMessage(callback){\n let ref = this.makeRef()\n this.stateChangeCallbacks.message.push([ref, callback])\n return ref\n }\n\n /**\n * Pings the server and invokes the callback with the RTT in milliseconds\n * @param {Function} callback\n *\n * Returns true if the ping was pushed or false if unable to be pushed.\n */\n ping(callback){\n if(!this.isConnected()){ return false }\n let ref = this.makeRef()\n let startTime = Date.now()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: ref})\n let onMsgRef = this.onMessage(msg => {\n if(msg.ref === ref){\n this.off([onMsgRef])\n callback(Date.now() - startTime)\n }\n })\n return true\n }\n\n /**\n * @private\n */\n onConnOpen(){\n if(this.hasLogger()) this.log(\"transport\", `connected to ${this.endPointURL()}`)\n this.closeWasClean = false\n this.establishedConnections++\n this.flushSendBuffer()\n this.reconnectTimer.reset()\n this.resetHeartbeat()\n this.stateChangeCallbacks.open.forEach(([, callback]) => callback())\n }\n\n /**\n * @private\n */\n\n heartbeatTimeout(){\n if(this.pendingHeartbeatRef){\n this.pendingHeartbeatRef = null\n if(this.hasLogger()){ this.log(\"transport\", \"heartbeat timeout. Attempting to re-establish connection\") }\n this.abnormalClose(\"heartbeat timeout\")\n }\n }\n\n resetHeartbeat(){\n if(this.conn && this.conn.skipHeartbeat){ return }\n this.pendingHeartbeatRef = null\n clearTimeout(this.heartbeatTimer)\n setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n teardown(callback, code, reason){\n if(!this.conn){\n return callback && callback()\n }\n\n this.waitForBufferDone(() => {\n if(this.conn){\n if(code){ this.conn.close(code, reason || \"\") } else { this.conn.close() }\n }\n\n this.waitForSocketClosed(() => {\n if(this.conn){\n this.conn.onclose = function (){ } // noop\n this.conn = null\n }\n\n callback && callback()\n })\n })\n }\n\n waitForBufferDone(callback, tries = 1){\n if(tries === 5 || !this.conn || !this.conn.bufferedAmount){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForBufferDone(callback, tries + 1)\n }, 150 * tries)\n }\n\n waitForSocketClosed(callback, tries = 1){\n if(tries === 5 || !this.conn || this.conn.readyState === SOCKET_STATES.closed){\n callback()\n return\n }\n\n setTimeout(() => {\n this.waitForSocketClosed(callback, tries + 1)\n }, 150 * tries)\n }\n\n onConnClose(event){\n let closeCode = event && event.code\n if(this.hasLogger()) this.log(\"transport\", \"close\", event)\n this.triggerChanError()\n clearTimeout(this.heartbeatTimer)\n if(!this.closeWasClean && closeCode !== 1000){\n this.reconnectTimer.scheduleTimeout()\n }\n this.stateChangeCallbacks.close.forEach(([, callback]) => callback(event))\n }\n\n /**\n * @private\n */\n onConnError(error){\n if(this.hasLogger()) this.log(\"transport\", error)\n let transportBefore = this.transport\n let establishedBefore = this.establishedConnections\n this.stateChangeCallbacks.error.forEach(([, callback]) => {\n callback(error, transportBefore, establishedBefore)\n })\n if(transportBefore === this.transport || establishedBefore > 0){\n this.triggerChanError()\n }\n }\n\n /**\n * @private\n */\n triggerChanError(){\n this.channels.forEach(channel => {\n if(!(channel.isErrored() || channel.isLeaving() || channel.isClosed())){\n channel.trigger(CHANNEL_EVENTS.error)\n }\n })\n }\n\n /**\n * @returns {string}\n */\n connectionState(){\n switch(this.conn && this.conn.readyState){\n case SOCKET_STATES.connecting: return \"connecting\"\n case SOCKET_STATES.open: return \"open\"\n case SOCKET_STATES.closing: return \"closing\"\n default: return \"closed\"\n }\n }\n\n /**\n * @returns {boolean}\n */\n isConnected(){ return this.connectionState() === \"open\" }\n\n /**\n * @private\n *\n * @param {Channel}\n */\n remove(channel){\n this.off(channel.stateChangeRefs)\n this.channels = this.channels.filter(c => c.joinRef() !== channel.joinRef())\n }\n\n /**\n * Removes `onOpen`, `onClose`, `onError,` and `onMessage` registrations.\n *\n * @param {refs} - list of refs returned by calls to\n * `onOpen`, `onClose`, `onError,` and `onMessage`\n */\n off(refs){\n for(let key in this.stateChangeCallbacks){\n this.stateChangeCallbacks[key] = this.stateChangeCallbacks[key].filter(([ref]) => {\n return refs.indexOf(ref) === -1\n })\n }\n }\n\n /**\n * Initiates a new channel for the given topic\n *\n * @param {string} topic\n * @param {Object} chanParams - Parameters for the channel\n * @returns {Channel}\n */\n channel(topic, chanParams = {}){\n let chan = new Channel(topic, chanParams, this)\n this.channels.push(chan)\n return chan\n }\n\n /**\n * @param {Object} data\n */\n push(data){\n if(this.hasLogger()){\n let {topic, event, payload, ref, join_ref} = data\n this.log(\"push\", `${topic} ${event} (${join_ref}, ${ref})`, payload)\n }\n\n if(this.isConnected()){\n this.encode(data, result => this.conn.send(result))\n } else {\n this.sendBuffer.push(() => this.encode(data, result => this.conn.send(result)))\n }\n }\n\n /**\n * Return the next message ref, accounting for overflows\n * @returns {string}\n */\n makeRef(){\n let newRef = this.ref + 1\n if(newRef === this.ref){ this.ref = 0 } else { this.ref = newRef }\n\n return this.ref.toString()\n }\n\n sendHeartbeat(){\n if(this.pendingHeartbeatRef && !this.isConnected()){ return }\n this.pendingHeartbeatRef = this.makeRef()\n this.push({topic: \"phoenix\", event: \"heartbeat\", payload: {}, ref: this.pendingHeartbeatRef})\n this.heartbeatTimer = setTimeout(() => this.heartbeatTimeout(), this.heartbeatIntervalMs)\n }\n\n abnormalClose(reason){\n this.closeWasClean = false\n if(this.isConnected()){ this.conn.close(WS_CLOSE_NORMAL, reason) }\n }\n\n flushSendBuffer(){\n if(this.isConnected() && this.sendBuffer.length > 0){\n this.sendBuffer.forEach(callback => callback())\n this.sendBuffer = []\n }\n }\n\n onConnMessage(rawMessage){\n this.decode(rawMessage.data, msg => {\n let {topic, event, payload, ref, join_ref} = msg\n if(ref && ref === this.pendingHeartbeatRef){\n clearTimeout(this.heartbeatTimer)\n this.pendingHeartbeatRef = null\n setTimeout(() => this.sendHeartbeat(), this.heartbeatIntervalMs)\n }\n\n if(this.hasLogger()) this.log(\"receive\", `${payload.status || \"\"} ${topic} ${event} ${ref && \"(\" + ref + \")\" || \"\"}`, payload)\n\n for(let i = 0; i < this.channels.length; i++){\n const channel = this.channels[i]\n if(!channel.isMember(topic, event, payload, join_ref)){ continue }\n channel.trigger(event, payload, ref, join_ref)\n }\n\n for(let i = 0; i < this.stateChangeCallbacks.message.length; i++){\n let [, callback] = this.stateChangeCallbacks.message[i]\n callback(msg)\n }\n })\n }\n\n leaveOpenTopic(topic){\n let dupChannel = this.channels.find(c => c.topic === topic && (c.isJoined() || c.isJoining()))\n if(dupChannel){\n if(this.hasLogger()) this.log(\"transport\", `leaving duplicate topic \"${topic}\"`)\n dupChannel.leave()\n }\n }\n}\n"], + "mappings": ";AACO,IAAI,UAAU,CAAC,UAAU;AAC9B,MAAG,OAAO,UAAU,YAAW;AAC7B,WAAO;AAAA,EACT,OAAO;AACL,QAAI,WAAU,WAAW;AAAE,aAAO;AAAA,IAAM;AACxC,WAAO;AAAA,EACT;AACF;;;ACRO,IAAM,aAAa,OAAO,SAAS,cAAc,OAAO;AACxD,IAAM,YAAY,OAAO,WAAW,cAAc,SAAS;AAC3D,IAAM,SAAS,cAAc,aAAa;AAC1C,IAAM,cAAc;AACpB,IAAM,gBAAgB,EAAC,YAAY,GAAG,MAAM,GAAG,SAAS,GAAG,QAAQ,EAAC;AACpE,IAAM,kBAAkB;AACxB,IAAM,kBAAkB;AACxB,IAAM,iBAAiB;AAAA,EAC5B,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,QAAQ;AAAA,EACR,SAAS;AAAA,EACT,SAAS;AACX;AACO,IAAM,iBAAiB;AAAA,EAC5B,OAAO;AAAA,EACP,OAAO;AAAA,EACP,MAAM;AAAA,EACN,OAAO;AAAA,EACP,OAAO;AACT;AAEO,IAAM,aAAa;AAAA,EACxB,UAAU;AAAA,EACV,WAAW;AACb;AACO,IAAM,aAAa;AAAA,EACxB,UAAU;AACZ;;;ACrBA,IAAqB,OAArB,MAA0B;AAAA,EACxB,YAAY,SAAS,OAAO,SAAS,SAAQ;AAC3C,SAAK,UAAU;AACf,SAAK,QAAQ;AACb,SAAK,UAAU,WAAW,WAAW;AAAE,aAAO,CAAC;AAAA,IAAE;AACjD,SAAK,eAAe;AACpB,SAAK,UAAU;AACf,SAAK,eAAe;AACpB,SAAK,WAAW,CAAC;AACjB,SAAK,OAAO;AAAA,EACd;AAAA,EAMA,OAAO,SAAQ;AACb,SAAK,UAAU;AACf,SAAK,MAAM;AACX,SAAK,KAAK;AAAA,EACZ;AAAA,EAKA,OAAM;AACJ,QAAG,KAAK,YAAY,SAAS,GAAE;AAAE;AAAA,IAAO;AACxC,SAAK,aAAa;AAClB,SAAK,OAAO;AACZ,SAAK,QAAQ,OAAO,KAAK;AAAA,MACvB,OAAO,KAAK,QAAQ;AAAA,MACpB,OAAO,KAAK;AAAA,MACZ,SAAS,KAAK,QAAQ;AAAA,MACtB,KAAK,KAAK;AAAA,MACV,UAAU,KAAK,QAAQ,QAAQ;AAAA,IACjC,CAAC;AAAA,EACH;AAAA,EAOA,QAAQ,QAAQ,UAAS;AACvB,QAAG,KAAK,YAAY,MAAM,GAAE;AAC1B,eAAS,KAAK,aAAa,QAAQ;AAAA,IACrC;AAEA,SAAK,SAAS,KAAK,EAAC,QAAQ,SAAQ,CAAC;AACrC,WAAO;AAAA,EACT;AAAA,EAKA,QAAO;AACL,SAAK,eAAe;AACpB,SAAK,MAAM;AACX,SAAK,WAAW;AAChB,SAAK,eAAe;AACpB,SAAK,OAAO;AAAA,EACd;AAAA,EAKA,aAAa,EAAC,QAAQ,UAAU,QAAM;AACpC,SAAK,SAAS,OAAO,OAAK,EAAE,WAAW,MAAM,EAC1C,QAAQ,OAAK,EAAE,SAAS,QAAQ,CAAC;AAAA,EACtC;AAAA,EAKA,iBAAgB;AACd,QAAG,CAAC,KAAK,UAAS;AAAE;AAAA,IAAO;AAC3B,SAAK,QAAQ,IAAI,KAAK,QAAQ;AAAA,EAChC;AAAA,EAKA,gBAAe;AACb,iBAAa,KAAK,YAAY;AAC9B,SAAK,eAAe;AAAA,EACtB;AAAA,EAKA,eAAc;AACZ,QAAG,KAAK,cAAa;AAAE,WAAK,cAAc;AAAA,IAAE;AAC5C,SAAK,MAAM,KAAK,QAAQ,OAAO,QAAQ;AACvC,SAAK,WAAW,KAAK,QAAQ,eAAe,KAAK,GAAG;AAEpD,SAAK,QAAQ,GAAG,KAAK,UAAU,aAAW;AACxC,WAAK,eAAe;AACpB,WAAK,cAAc;AACnB,WAAK,eAAe;AACpB,WAAK,aAAa,OAAO;AAAA,IAC3B,CAAC;AAED,SAAK,eAAe,WAAW,MAAM;AACnC,WAAK,QAAQ,WAAW,CAAC,CAAC;AAAA,IAC5B,GAAG,KAAK,OAAO;AAAA,EACjB;AAAA,EAKA,YAAY,QAAO;AACjB,WAAO,KAAK,gBAAgB,KAAK,aAAa,WAAW;AAAA,EAC3D;AAAA,EAKA,QAAQ,QAAQ,UAAS;AACvB,SAAK,QAAQ,QAAQ,KAAK,UAAU,EAAC,QAAQ,SAAQ,CAAC;AAAA,EACxD;AACF;;;AC9GA,IAAqB,QAArB,MAA2B;AAAA,EACzB,YAAY,UAAU,WAAU;AAC9B,SAAK,WAAW;AAChB,SAAK,YAAY;AACjB,SAAK,QAAQ;AACb,SAAK,QAAQ;AAAA,EACf;AAAA,EAEA,QAAO;AACL,SAAK,QAAQ;AACb,iBAAa,KAAK,KAAK;AAAA,EACzB;AAAA,EAKA,kBAAiB;AACf,iBAAa,KAAK,KAAK;AAEvB,SAAK,QAAQ,WAAW,MAAM;AAC5B,WAAK,QAAQ,KAAK,QAAQ;AAC1B,WAAK,SAAS;AAAA,IAChB,GAAG,KAAK,UAAU,KAAK,QAAQ,CAAC,CAAC;AAAA,EACnC;AACF;;;AC1BA,IAAqB,UAArB,MAA6B;AAAA,EAC3B,YAAY,OAAO,QAAQ,QAAO;AAChC,SAAK,QAAQ,eAAe;AAC5B,SAAK,QAAQ;AACb,SAAK,SAAS,QAAQ,UAAU,CAAC,CAAC;AAClC,SAAK,SAAS;AACd,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa;AAClB,SAAK,UAAU,KAAK,OAAO;AAC3B,SAAK,aAAa;AAClB,SAAK,WAAW,IAAI,KAAK,MAAM,eAAe,MAAM,KAAK,QAAQ,KAAK,OAAO;AAC7E,SAAK,aAAa,CAAC;AACnB,SAAK,kBAAkB,CAAC;AAExB,SAAK,cAAc,IAAI,MAAM,MAAM;AACjC,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,OAAO;AAAA,MAAE;AAAA,IAC/C,GAAG,KAAK,OAAO,aAAa;AAC5B,SAAK,gBAAgB,KAAK,KAAK,OAAO,QAAQ,MAAM,KAAK,YAAY,MAAM,CAAC,CAAC;AAC7E,SAAK,gBAAgB,KAAK,KAAK,OAAO,OAAO,MAAM;AACjD,WAAK,YAAY,MAAM;AACvB,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,OAAO;AAAA,MAAE;AAAA,IACtC,CAAC,CACD;AACA,SAAK,SAAS,QAAQ,MAAM,MAAM;AAChC,WAAK,QAAQ,eAAe;AAC5B,WAAK,YAAY,MAAM;AACvB,WAAK,WAAW,QAAQ,eAAa,UAAU,KAAK,CAAC;AACrD,WAAK,aAAa,CAAC;AAAA,IACrB,CAAC;AACD,SAAK,SAAS,QAAQ,SAAS,MAAM;AACnC,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,QAAQ,MAAM;AACjB,WAAK,YAAY,MAAM;AACvB,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,SAAS,KAAK,SAAS,KAAK,QAAQ,GAAG;AAC9F,WAAK,QAAQ,eAAe;AAC5B,WAAK,OAAO,OAAO,IAAI;AAAA,IACzB,CAAC;AACD,SAAK,QAAQ,YAAU;AACrB,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,SAAS,KAAK,SAAS,MAAM;AACpF,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,SAAS,MAAM;AAAA,MAAE;AAC5C,WAAK,QAAQ,eAAe;AAC5B,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,SAAS,QAAQ,WAAW,MAAM;AACrC,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,WAAW,KAAK,UAAU,KAAK,QAAQ,MAAM,KAAK,SAAS,OAAO;AACzH,UAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,KAAK,OAAO;AAC9E,gBAAU,KAAK;AACf,WAAK,QAAQ,eAAe;AAC5B,WAAK,SAAS,MAAM;AACpB,UAAG,KAAK,OAAO,YAAY,GAAE;AAAE,aAAK,YAAY,gBAAgB;AAAA,MAAE;AAAA,IACpE,CAAC;AACD,SAAK,GAAG,eAAe,OAAO,CAAC,SAAS,QAAQ;AAC9C,WAAK,QAAQ,KAAK,eAAe,GAAG,GAAG,OAAO;AAAA,IAChD,CAAC;AAAA,EACH;AAAA,EAOA,KAAK,UAAU,KAAK,SAAQ;AAC1B,QAAG,KAAK,YAAW;AACjB,YAAM,IAAI,MAAM,4FAA4F;AAAA,IAC9G,OAAO;AACL,WAAK,UAAU;AACf,WAAK,aAAa;AAClB,WAAK,OAAO;AACZ,aAAO,KAAK;AAAA,IACd;AAAA,EACF;AAAA,EAMA,QAAQ,UAAS;AACf,SAAK,GAAG,eAAe,OAAO,QAAQ;AAAA,EACxC;AAAA,EAMA,QAAQ,UAAS;AACf,WAAO,KAAK,GAAG,eAAe,OAAO,YAAU,SAAS,MAAM,CAAC;AAAA,EACjE;AAAA,EAmBA,GAAG,OAAO,UAAS;AACjB,QAAI,MAAM,KAAK;AACf,SAAK,SAAS,KAAK,EAAC,OAAO,KAAK,SAAQ,CAAC;AACzC,WAAO;AAAA,EACT;AAAA,EAoBA,IAAI,OAAO,KAAI;AACb,SAAK,WAAW,KAAK,SAAS,OAAO,CAAC,SAAS;AAC7C,aAAO,CAAE,MAAK,UAAU,SAAU,QAAO,QAAQ,eAAe,QAAQ,KAAK;AAAA,IAC/E,CAAC;AAAA,EACH;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,OAAO,YAAY,KAAK,KAAK,SAAS;AAAA,EAAE;AAAA,EAkB/D,KAAK,OAAO,SAAS,UAAU,KAAK,SAAQ;AAC1C,cAAU,WAAW,CAAC;AACtB,QAAG,CAAC,KAAK,YAAW;AAClB,YAAM,IAAI,MAAM,kBAAkB,cAAc,KAAK,iEAAiE;AAAA,IACxH;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,OAAO,WAAW;AAAE,aAAO;AAAA,IAAQ,GAAG,OAAO;AAC5E,QAAG,KAAK,QAAQ,GAAE;AAChB,gBAAU,KAAK;AAAA,IACjB,OAAO;AACL,gBAAU,aAAa;AACvB,WAAK,WAAW,KAAK,SAAS;AAAA,IAChC;AAEA,WAAO;AAAA,EACT;AAAA,EAkBA,MAAM,UAAU,KAAK,SAAQ;AAC3B,SAAK,YAAY,MAAM;AACvB,SAAK,SAAS,cAAc;AAE5B,SAAK,QAAQ,eAAe;AAC5B,QAAI,UAAU,MAAM;AAClB,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,SAAS,KAAK,OAAO;AAC5E,WAAK,QAAQ,eAAe,OAAO,OAAO;AAAA,IAC5C;AACA,QAAI,YAAY,IAAI,KAAK,MAAM,eAAe,OAAO,QAAQ,CAAC,CAAC,GAAG,OAAO;AACzE,cAAU,QAAQ,MAAM,MAAM,QAAQ,CAAC,EACpC,QAAQ,WAAW,MAAM,QAAQ,CAAC;AACrC,cAAU,KAAK;AACf,QAAG,CAAC,KAAK,QAAQ,GAAE;AAAE,gBAAU,QAAQ,MAAM,CAAC,CAAC;AAAA,IAAE;AAEjD,WAAO;AAAA,EACT;AAAA,EAcA,UAAU,QAAQ,SAAS,MAAK;AAAE,WAAO;AAAA,EAAQ;AAAA,EAKjD,SAAS,OAAO,OAAO,SAAS,SAAQ;AACtC,QAAG,KAAK,UAAU,OAAM;AAAE,aAAO;AAAA,IAAM;AAEvC,QAAG,WAAW,YAAY,KAAK,QAAQ,GAAE;AACvC,UAAG,KAAK,OAAO,UAAU;AAAG,aAAK,OAAO,IAAI,WAAW,6BAA6B,EAAC,OAAO,OAAO,SAAS,QAAO,CAAC;AACpH,aAAO;AAAA,IACT,OAAO;AACL,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAKA,UAAS;AAAE,WAAO,KAAK,SAAS;AAAA,EAAI;AAAA,EAKpC,OAAO,UAAU,KAAK,SAAQ;AAC5B,QAAG,KAAK,UAAU,GAAE;AAAE;AAAA,IAAO;AAC7B,SAAK,OAAO,eAAe,KAAK,KAAK;AACrC,SAAK,QAAQ,eAAe;AAC5B,SAAK,SAAS,OAAO,OAAO;AAAA,EAC9B;AAAA,EAKA,QAAQ,OAAO,SAAS,KAAK,SAAQ;AACnC,QAAI,iBAAiB,KAAK,UAAU,OAAO,SAAS,KAAK,OAAO;AAChE,QAAG,WAAW,CAAC,gBAAe;AAAE,YAAM,IAAI,MAAM,6EAA6E;AAAA,IAAE;AAE/H,QAAI,gBAAgB,KAAK,SAAS,OAAO,UAAQ,KAAK,UAAU,KAAK;AAErE,aAAQ,IAAI,GAAG,IAAI,cAAc,QAAQ,KAAI;AAC3C,UAAI,OAAO,cAAc;AACzB,WAAK,SAAS,gBAAgB,KAAK,WAAW,KAAK,QAAQ,CAAC;AAAA,IAC9D;AAAA,EACF;AAAA,EAKA,eAAe,KAAI;AAAE,WAAO,cAAc;AAAA,EAAM;AAAA,EAKhD,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA,EAK1D,WAAU;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAO;AAAA,EAKxD,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAAA,EAK1D,YAAW;AAAE,WAAO,KAAK,UAAU,eAAe;AAAA,EAAQ;AAC5D;;;ACjTA,IAAqB,OAArB,MAA0B;AAAA,EAExB,OAAO,QAAQ,QAAQ,UAAU,QAAQ,MAAM,SAAS,WAAW,UAAS;AAC1E,QAAG,OAAO,gBAAe;AACvB,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,QAAQ;AAAA,IACtF,OAAO;AACL,UAAI,MAAM,IAAI,OAAO,eAAe;AACpC,aAAO,KAAK,WAAW,KAAK,QAAQ,UAAU,QAAQ,MAAM,SAAS,WAAW,QAAQ;AAAA,IAC1F;AAAA,EACF;AAAA,EAEA,OAAO,eAAe,KAAK,QAAQ,UAAU,MAAM,SAAS,WAAW,UAAS;AAC9E,QAAI,UAAU;AACd,QAAI,KAAK,QAAQ,QAAQ;AACzB,QAAI,SAAS,MAAM;AACjB,UAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,kBAAY,SAAS,QAAQ;AAAA,IAC/B;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAGzC,QAAI,aAAa,MAAM;AAAA,IAAE;AAEzB,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,WAAW,KAAK,QAAQ,UAAU,QAAQ,MAAM,SAAS,WAAW,UAAS;AAClF,QAAI,KAAK,QAAQ,UAAU,IAAI;AAC/B,QAAI,UAAU;AACd,QAAI,iBAAiB,gBAAgB,MAAM;AAC3C,QAAI,UAAU,MAAM,YAAY,SAAS,IAAI;AAC7C,QAAI,qBAAqB,MAAM;AAC7B,UAAG,IAAI,eAAe,WAAW,YAAY,UAAS;AACpD,YAAI,WAAW,KAAK,UAAU,IAAI,YAAY;AAC9C,iBAAS,QAAQ;AAAA,MACnB;AAAA,IACF;AACA,QAAG,WAAU;AAAE,UAAI,YAAY;AAAA,IAAU;AAEzC,QAAI,KAAK,IAAI;AACb,WAAO;AAAA,EACT;AAAA,EAEA,OAAO,UAAU,MAAK;AACpB,QAAG,CAAC,QAAQ,SAAS,IAAG;AAAE,aAAO;AAAA,IAAK;AAEtC,QAAI;AACF,aAAO,KAAK,MAAM,IAAI;AAAA,IACxB,SAAS,GAAP;AACA,iBAAW,QAAQ,IAAI,iCAAiC,IAAI;AAC5D,aAAO;AAAA,IACT;AAAA,EACF;AAAA,EAEA,OAAO,UAAU,KAAK,WAAU;AAC9B,QAAI,WAAW,CAAC;AAChB,aAAQ,OAAO,KAAI;AACjB,UAAG,CAAC,OAAO,UAAU,eAAe,KAAK,KAAK,GAAG,GAAE;AAAE;AAAA,MAAS;AAC9D,UAAI,WAAW,YAAY,GAAG,aAAa,SAAS;AACpD,UAAI,WAAW,IAAI;AACnB,UAAG,OAAO,aAAa,UAAS;AAC9B,iBAAS,KAAK,KAAK,UAAU,UAAU,QAAQ,CAAC;AAAA,MAClD,OAAO;AACL,iBAAS,KAAK,mBAAmB,QAAQ,IAAI,MAAM,mBAAmB,QAAQ,CAAC;AAAA,MACjF;AAAA,IACF;AACA,WAAO,SAAS,KAAK,GAAG;AAAA,EAC1B;AAAA,EAEA,OAAO,aAAa,KAAK,QAAO;AAC9B,QAAG,OAAO,KAAK,MAAM,EAAE,WAAW,GAAE;AAAE,aAAO;AAAA,IAAI;AAEjD,QAAI,SAAS,IAAI,MAAM,IAAI,IAAI,MAAM;AACrC,WAAO,GAAG,MAAM,SAAS,KAAK,UAAU,MAAM;AAAA,EAChD;AACF;;;AC3EA,IAAqB,WAArB,MAA8B;AAAA,EAE5B,YAAY,UAAS;AACnB,SAAK,WAAW;AAChB,SAAK,QAAQ;AACb,SAAK,gBAAgB;AACrB,SAAK,OAAO,oBAAI,IAAI;AACpB,SAAK,SAAS,WAAW;AAAA,IAAE;AAC3B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,YAAY,WAAW;AAAA,IAAE;AAC9B,SAAK,UAAU,WAAW;AAAA,IAAE;AAC5B,SAAK,eAAe,KAAK,kBAAkB,QAAQ;AACnD,SAAK,aAAa,cAAc;AAChC,SAAK,KAAK;AAAA,EACZ;AAAA,EAEA,kBAAkB,UAAS;AACzB,WAAQ,SACL,QAAQ,SAAS,SAAS,EAC1B,QAAQ,UAAU,UAAU,EAC5B,QAAQ,IAAI,OAAO,UAAW,WAAW,SAAS,GAAG,QAAQ,WAAW,QAAQ;AAAA,EACrF;AAAA,EAEA,cAAa;AACX,WAAO,KAAK,aAAa,KAAK,cAAc,EAAC,OAAO,KAAK,MAAK,CAAC;AAAA,EACjE;AAAA,EAEA,cAAc,MAAM,QAAQ,UAAS;AACnC,SAAK,MAAM,MAAM,QAAQ,QAAQ;AACjC,SAAK,aAAa,cAAc;AAAA,EAClC;AAAA,EAEA,YAAW;AACT,SAAK,QAAQ,SAAS;AACtB,SAAK,cAAc,MAAM,WAAW,KAAK;AAAA,EAC3C;AAAA,EAEA,WAAU;AAAE,WAAO,KAAK,eAAe,cAAc,QAAQ,KAAK,eAAe,cAAc;AAAA,EAAW;AAAA,EAE1G,OAAM;AACJ,SAAK,KAAK,OAAO,MAAM,MAAM,KAAK,UAAU,GAAG,UAAQ;AACrD,UAAG,MAAK;AACN,YAAI,EAAC,QAAQ,OAAO,aAAY;AAChC,aAAK,QAAQ;AAAA,MACf,OAAO;AACL,iBAAS;AAAA,MACX;AAEA,cAAO;AAAA,aACA;AACH,mBAAS,QAAQ,SAAO;AAmBtB,uBAAW,MAAM,KAAK,UAAU,EAAC,MAAM,IAAG,CAAC,GAAG,CAAC;AAAA,UACjD,CAAC;AACD,eAAK,KAAK;AACV;AAAA,aACG;AACH,eAAK,KAAK;AACV;AAAA,aACG;AACH,eAAK,aAAa,cAAc;AAChC,eAAK,OAAO,CAAC,CAAC;AACd,eAAK,KAAK;AACV;AAAA,aACG;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,MAAM,MAAM,aAAa,KAAK;AACnC;AAAA,aACG;AAAA,aACA;AACH,eAAK,QAAQ,GAAG;AAChB,eAAK,cAAc,MAAM,yBAAyB,GAAG;AACrD;AAAA;AACO,gBAAM,IAAI,MAAM,yBAAyB,QAAQ;AAAA;AAAA,IAE9D,CAAC;AAAA,EACH;AAAA,EAEA,KAAK,MAAK;AACR,SAAK,KAAK,QAAQ,MAAM,MAAM,KAAK,QAAQ,SAAS,GAAG,UAAQ;AAC7D,UAAG,CAAC,QAAQ,KAAK,WAAW,KAAI;AAC9B,aAAK,QAAQ,QAAQ,KAAK,MAAM;AAChC,aAAK,cAAc,MAAM,yBAAyB,KAAK;AAAA,MACzD;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,MAAM,MAAM,QAAQ,UAAS;AAC3B,aAAQ,OAAO,KAAK,MAAK;AAAE,UAAI,MAAM;AAAA,IAAE;AACvC,SAAK,aAAa,cAAc;AAChC,QAAI,OAAO,OAAO,OAAO,EAAC,MAAM,KAAM,QAAQ,QAAW,UAAU,KAAI,GAAG,EAAC,MAAM,QAAQ,SAAQ,CAAC;AAClG,QAAG,OAAO,eAAgB,aAAY;AACpC,WAAK,QAAQ,IAAI,WAAW,SAAS,IAAI,CAAC;AAAA,IAC5C,OAAO;AACL,WAAK,QAAQ,IAAI;AAAA,IACnB;AAAA,EACF;AAAA,EAEA,KAAK,QAAQ,MAAM,iBAAiB,UAAS;AAC3C,QAAI;AACJ,QAAI,YAAY,MAAM;AACpB,WAAK,KAAK,OAAO,GAAG;AACpB,sBAAgB;AAAA,IAClB;AACA,UAAM,KAAK,QAAQ,QAAQ,KAAK,YAAY,GAAG,oBAAoB,MAAM,KAAK,SAAS,WAAW,UAAQ;AACxG,WAAK,KAAK,OAAO,GAAG;AACpB,UAAG,KAAK,SAAS,GAAE;AAAE,iBAAS,IAAI;AAAA,MAAE;AAAA,IACtC,CAAC;AACD,SAAK,KAAK,IAAI,GAAG;AAAA,EACnB;AACF;;;AChIA,IAAqB,WAArB,MAA8B;AAAA,EAE5B,YAAY,SAAS,OAAO,CAAC,GAAE;AAC7B,QAAI,SAAS,KAAK,UAAU,EAAC,OAAO,kBAAkB,MAAM,gBAAe;AAC3E,SAAK,QAAQ,CAAC;AACd,SAAK,eAAe,CAAC;AACrB,SAAK,UAAU;AACf,SAAK,UAAU;AACf,SAAK,SAAS;AAAA,MACZ,QAAQ,WAAW;AAAA,MAAE;AAAA,MACrB,SAAS,WAAW;AAAA,MAAE;AAAA,MACtB,QAAQ,WAAW;AAAA,MAAE;AAAA,IACvB;AAEA,SAAK,QAAQ,GAAG,OAAO,OAAO,cAAY;AACxC,UAAI,EAAC,QAAQ,SAAS,WAAU,KAAK;AAErC,WAAK,UAAU,KAAK,QAAQ,QAAQ;AACpC,WAAK,QAAQ,SAAS,UAAU,KAAK,OAAO,UAAU,QAAQ,OAAO;AAErE,WAAK,aAAa,QAAQ,UAAQ;AAChC,aAAK,QAAQ,SAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAAA,MAClE,CAAC;AACD,WAAK,eAAe,CAAC;AACrB,aAAO;AAAA,IACT,CAAC;AAED,SAAK,QAAQ,GAAG,OAAO,MAAM,UAAQ;AACnC,UAAI,EAAC,QAAQ,SAAS,WAAU,KAAK;AAErC,UAAG,KAAK,mBAAmB,GAAE;AAC3B,aAAK,aAAa,KAAK,IAAI;AAAA,MAC7B,OAAO;AACL,aAAK,QAAQ,SAAS,SAAS,KAAK,OAAO,MAAM,QAAQ,OAAO;AAChE,eAAO;AAAA,MACT;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,QAAQ,UAAS;AAAE,SAAK,OAAO,UAAU;AAAA,EAAS;AAAA,EAElD,OAAO,UAAS;AAAE,SAAK,OAAO,SAAS;AAAA,EAAS;AAAA,EAEhD,KAAK,IAAG;AAAE,WAAO,SAAS,KAAK,KAAK,OAAO,EAAE;AAAA,EAAE;AAAA,EAE/C,qBAAoB;AAClB,WAAO,CAAC,KAAK,WAAY,KAAK,YAAY,KAAK,QAAQ,QAAQ;AAAA,EACjE;AAAA,EAYA,OAAO,UAAU,cAAc,UAAU,QAAQ,SAAQ;AACvD,QAAI,QAAQ,KAAK,MAAM,YAAY;AACnC,QAAI,QAAQ,CAAC;AACb,QAAI,SAAS,CAAC;AAEd,SAAK,IAAI,OAAO,CAAC,KAAK,aAAa;AACjC,UAAG,CAAC,SAAS,MAAK;AAChB,eAAO,OAAO;AAAA,MAChB;AAAA,IACF,CAAC;AACD,SAAK,IAAI,UAAU,CAAC,KAAK,gBAAgB;AACvC,UAAI,kBAAkB,MAAM;AAC5B,UAAG,iBAAgB;AACjB,YAAI,UAAU,YAAY,MAAM,IAAI,OAAK,EAAE,OAAO;AAClD,YAAI,UAAU,gBAAgB,MAAM,IAAI,OAAK,EAAE,OAAO;AACtD,YAAI,cAAc,YAAY,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAC9E,YAAI,YAAY,gBAAgB,MAAM,OAAO,OAAK,QAAQ,QAAQ,EAAE,OAAO,IAAI,CAAC;AAChF,YAAG,YAAY,SAAS,GAAE;AACxB,gBAAM,OAAO;AACb,gBAAM,KAAK,QAAQ;AAAA,QACrB;AACA,YAAG,UAAU,SAAS,GAAE;AACtB,iBAAO,OAAO,KAAK,MAAM,eAAe;AACxC,iBAAO,KAAK,QAAQ;AAAA,QACtB;AAAA,MACF,OAAO;AACL,cAAM,OAAO;AAAA,MACf;AAAA,IACF,CAAC;AACD,WAAO,KAAK,SAAS,OAAO,EAAC,OAAc,OAAc,GAAG,QAAQ,OAAO;AAAA,EAC7E;AAAA,EAWA,OAAO,SAAS,OAAO,MAAM,QAAQ,SAAQ;AAC3C,QAAI,EAAC,OAAO,WAAU,KAAK,MAAM,IAAI;AACrC,QAAG,CAAC,QAAO;AAAE,eAAS,WAAW;AAAA,MAAE;AAAA,IAAE;AACrC,QAAG,CAAC,SAAQ;AAAE,gBAAU,WAAW;AAAA,MAAE;AAAA,IAAE;AAEvC,SAAK,IAAI,OAAO,CAAC,KAAK,gBAAgB;AACpC,UAAI,kBAAkB,MAAM;AAC5B,YAAM,OAAO,KAAK,MAAM,WAAW;AACnC,UAAG,iBAAgB;AACjB,YAAI,aAAa,MAAM,KAAK,MAAM,IAAI,OAAK,EAAE,OAAO;AACpD,YAAI,WAAW,gBAAgB,MAAM,OAAO,OAAK,WAAW,QAAQ,EAAE,OAAO,IAAI,CAAC;AAClF,cAAM,KAAK,MAAM,QAAQ,GAAG,QAAQ;AAAA,MACtC;AACA,aAAO,KAAK,iBAAiB,WAAW;AAAA,IAC1C,CAAC;AACD,SAAK,IAAI,QAAQ,CAAC,KAAK,iBAAiB;AACtC,UAAI,kBAAkB,MAAM;AAC5B,UAAG,CAAC,iBAAgB;AAAE;AAAA,MAAO;AAC7B,UAAI,eAAe,aAAa,MAAM,IAAI,OAAK,EAAE,OAAO;AACxD,sBAAgB,QAAQ,gBAAgB,MAAM,OAAO,OAAK;AACxD,eAAO,aAAa,QAAQ,EAAE,OAAO,IAAI;AAAA,MAC3C,CAAC;AACD,cAAQ,KAAK,iBAAiB,YAAY;AAC1C,UAAG,gBAAgB,MAAM,WAAW,GAAE;AACpC,eAAO,MAAM;AAAA,MACf;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA,EAUA,OAAO,KAAK,WAAW,SAAQ;AAC7B,QAAG,CAAC,SAAQ;AAAE,gBAAU,SAAU,KAAK,MAAK;AAAE,eAAO;AAAA,MAAK;AAAA,IAAE;AAE5D,WAAO,KAAK,IAAI,WAAW,CAAC,KAAK,aAAa;AAC5C,aAAO,QAAQ,KAAK,QAAQ;AAAA,IAC9B,CAAC;AAAA,EACH;AAAA,EAIA,OAAO,IAAI,KAAK,MAAK;AACnB,WAAO,OAAO,oBAAoB,GAAG,EAAE,IAAI,SAAO,KAAK,KAAK,IAAI,IAAI,CAAC;AAAA,EACvE;AAAA,EAEA,OAAO,MAAM,KAAI;AAAE,WAAO,KAAK,MAAM,KAAK,UAAU,GAAG,CAAC;AAAA,EAAE;AAC5D;;;AC5JA,IAAO,qBAAQ;AAAA,EACb,eAAe;AAAA,EACf,aAAa;AAAA,EACb,OAAO,EAAC,MAAM,GAAG,OAAO,GAAG,WAAW,EAAC;AAAA,EAEvC,OAAO,KAAK,UAAS;AACnB,QAAG,IAAI,QAAQ,gBAAgB,aAAY;AACzC,aAAO,SAAS,KAAK,aAAa,GAAG,CAAC;AAAA,IACxC,OAAO;AACL,UAAI,UAAU,CAAC,IAAI,UAAU,IAAI,KAAK,IAAI,OAAO,IAAI,OAAO,IAAI,OAAO;AACvE,aAAO,SAAS,KAAK,UAAU,OAAO,CAAC;AAAA,IACzC;AAAA,EACF;AAAA,EAEA,OAAO,YAAY,UAAS;AAC1B,QAAG,WAAW,gBAAgB,aAAY;AACxC,aAAO,SAAS,KAAK,aAAa,UAAU,CAAC;AAAA,IAC/C,OAAO;AACL,UAAI,CAAC,UAAU,KAAK,OAAO,OAAO,WAAW,KAAK,MAAM,UAAU;AAClE,aAAO,SAAS,EAAC,UAAU,KAAK,OAAO,OAAO,QAAO,CAAC;AAAA,IACxD;AAAA,EACF;AAAA,EAIA,aAAa,SAAQ;AACnB,QAAI,EAAC,UAAU,KAAK,OAAO,OAAO,YAAW;AAC7C,QAAI,aAAa,KAAK,cAAc,SAAS,SAAS,IAAI,SAAS,MAAM,SAAS,MAAM;AACxF,QAAI,SAAS,IAAI,YAAY,KAAK,gBAAgB,UAAU;AAC5D,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,SAAS;AAEb,SAAK,SAAS,UAAU,KAAK,MAAM,IAAI;AACvC,SAAK,SAAS,UAAU,SAAS,MAAM;AACvC,SAAK,SAAS,UAAU,IAAI,MAAM;AAClC,SAAK,SAAS,UAAU,MAAM,MAAM;AACpC,SAAK,SAAS,UAAU,MAAM,MAAM;AACpC,UAAM,KAAK,UAAU,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AACxE,UAAM,KAAK,KAAK,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AACnE,UAAM,KAAK,OAAO,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AACrE,UAAM,KAAK,OAAO,UAAQ,KAAK,SAAS,UAAU,KAAK,WAAW,CAAC,CAAC,CAAC;AAErE,QAAI,WAAW,IAAI,WAAW,OAAO,aAAa,QAAQ,UAAU;AACpE,aAAS,IAAI,IAAI,WAAW,MAAM,GAAG,CAAC;AACtC,aAAS,IAAI,IAAI,WAAW,OAAO,GAAG,OAAO,UAAU;AAEvD,WAAO,SAAS;AAAA,EAClB;AAAA,EAEA,aAAa,QAAO;AAClB,QAAI,OAAO,IAAI,SAAS,MAAM;AAC9B,QAAI,OAAO,KAAK,SAAS,CAAC;AAC1B,QAAI,UAAU,IAAI,YAAY;AAC9B,YAAO;AAAA,WACA,KAAK,MAAM;AAAM,eAAO,KAAK,WAAW,QAAQ,MAAM,OAAO;AAAA,WAC7D,KAAK,MAAM;AAAO,eAAO,KAAK,YAAY,QAAQ,MAAM,OAAO;AAAA,WAC/D,KAAK,MAAM;AAAW,eAAO,KAAK,gBAAgB,QAAQ,MAAM,OAAO;AAAA;AAAA,EAEhF;AAAA,EAEA,WAAW,QAAQ,MAAM,SAAQ;AAC/B,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK,cAAc;AACrD,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,WAAO,EAAC,UAAU,SAAS,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EACjF;AAAA,EAEA,YAAY,QAAQ,MAAM,SAAQ;AAChC,QAAI,cAAc,KAAK,SAAS,CAAC;AACjC,QAAI,UAAU,KAAK,SAAS,CAAC;AAC7B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB,KAAK;AACvC,QAAI,UAAU,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,WAAW,CAAC;AACvE,aAAS,SAAS;AAClB,QAAI,MAAM,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,OAAO,CAAC;AAC/D,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AACjD,QAAI,UAAU,EAAC,QAAQ,OAAO,UAAU,KAAI;AAC5C,WAAO,EAAC,UAAU,SAAS,KAAU,OAAc,OAAO,eAAe,OAAO,QAAgB;AAAA,EAClG;AAAA,EAEA,gBAAgB,QAAQ,MAAM,SAAQ;AACpC,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,YAAY,KAAK,SAAS,CAAC;AAC/B,QAAI,SAAS,KAAK,gBAAgB;AAClC,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,QAAQ,QAAQ,OAAO,OAAO,MAAM,QAAQ,SAAS,SAAS,CAAC;AACnE,aAAS,SAAS;AAClB,QAAI,OAAO,OAAO,MAAM,QAAQ,OAAO,UAAU;AAEjD,WAAO,EAAC,UAAU,MAAM,KAAK,MAAM,OAAc,OAAc,SAAS,KAAI;AAAA,EAC9E;AACF;;;ACtBA,IAAqB,SAArB,MAA4B;AAAA,EAC1B,YAAY,UAAU,OAAO,CAAC,GAAE;AAC9B,SAAK,uBAAuB,EAAC,MAAM,CAAC,GAAG,OAAO,CAAC,GAAG,OAAO,CAAC,GAAG,SAAS,CAAC,EAAC;AACxE,SAAK,WAAW,CAAC;AACjB,SAAK,aAAa,CAAC;AACnB,SAAK,MAAM;AACX,SAAK,UAAU,KAAK,WAAW;AAC/B,SAAK,YAAY,KAAK,aAAa,OAAO,aAAa;AACvD,SAAK,yBAAyB;AAC9B,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AACvD,SAAK,iBAAiB,mBAAW,OAAO,KAAK,kBAAU;AACvD,SAAK,gBAAgB;AACrB,SAAK,aAAa,KAAK,cAAc;AACrC,SAAK,eAAe;AACpB,QAAG,KAAK,cAAc,UAAS;AAC7B,WAAK,SAAS,KAAK,UAAU,KAAK;AAClC,WAAK,SAAS,KAAK,UAAU,KAAK;AAAA,IACpC,OAAO;AACL,WAAK,SAAS,KAAK;AACnB,WAAK,SAAS,KAAK;AAAA,IACrB;AACA,QAAI,+BAA+B;AACnC,QAAG,aAAa,UAAU,kBAAiB;AACzC,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,KAAK,MAAK;AACX,eAAK,WAAW;AAChB,yCAA+B,KAAK;AAAA,QACtC;AAAA,MACF,CAAC;AACD,gBAAU,iBAAiB,YAAY,QAAM;AAC3C,YAAG,iCAAiC,KAAK,cAAa;AACpD,yCAA+B;AAC/B,eAAK,QAAQ;AAAA,QACf;AAAA,MACF,CAAC;AAAA,IACH;AACA,SAAK,sBAAsB,KAAK,uBAAuB;AACvD,SAAK,gBAAgB,CAAC,UAAU;AAC9B,UAAG,KAAK,eAAc;AACpB,eAAO,KAAK,cAAc,KAAK;AAAA,MACjC,OAAO;AACL,eAAO,CAAC,KAAM,KAAM,GAAI,EAAE,QAAQ,MAAM;AAAA,MAC1C;AAAA,IACF;AACA,SAAK,mBAAmB,CAAC,UAAU;AACjC,UAAG,KAAK,kBAAiB;AACvB,eAAO,KAAK,iBAAiB,KAAK;AAAA,MACpC,OAAO;AACL,eAAO,CAAC,IAAI,IAAI,KAAK,KAAK,KAAK,KAAK,KAAK,KAAM,GAAI,EAAE,QAAQ,MAAM;AAAA,MACrE;AAAA,IACF;AACA,SAAK,SAAS,KAAK,UAAU;AAC7B,SAAK,oBAAoB,KAAK,qBAAqB;AACnD,SAAK,SAAS,QAAQ,KAAK,UAAU,CAAC,CAAC;AACvC,SAAK,WAAW,GAAG,YAAY,WAAW;AAC1C,SAAK,MAAM,KAAK,OAAO;AACvB,SAAK,iBAAiB;AACtB,SAAK,sBAAsB;AAC3B,SAAK,iBAAiB,IAAI,MAAM,MAAM;AACpC,WAAK,SAAS,MAAM,KAAK,QAAQ,CAAC;AAAA,IACpC,GAAG,KAAK,gBAAgB;AAAA,EAC1B;AAAA,EAKA,uBAAsB;AAAE,WAAO;AAAA,EAAS;AAAA,EAQxC,iBAAiB,cAAa;AAC5B,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,aAAa,CAAC;AACnB,QAAG,KAAK,MAAK;AACX,WAAK,KAAK,MAAM;AAChB,WAAK,OAAO;AAAA,IACd;AACA,SAAK,YAAY;AAAA,EACnB;AAAA,EAOA,WAAU;AAAE,WAAO,SAAS,SAAS,MAAM,QAAQ,IAAI,QAAQ;AAAA,EAAK;AAAA,EAOpE,cAAa;AACX,QAAI,MAAM,KAAK,aACb,KAAK,aAAa,KAAK,UAAU,KAAK,OAAO,CAAC,GAAG,EAAC,KAAK,KAAK,IAAG,CAAC;AAClE,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO;AAAA,IAAI;AACtC,QAAG,IAAI,OAAO,CAAC,MAAM,KAAI;AAAE,aAAO,GAAG,KAAK,SAAS,KAAK;AAAA,IAAM;AAE9D,WAAO,GAAG,KAAK,SAAS,OAAO,SAAS,OAAO;AAAA,EACjD;AAAA,EAWA,WAAW,UAAU,MAAM,QAAO;AAChC,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,SAAS,UAAU,MAAM,MAAM;AAAA,EACtC;AAAA,EASA,QAAQ,QAAO;AACb,QAAG,QAAO;AACR,iBAAW,QAAQ,IAAI,yFAAyF;AAChH,WAAK,SAAS,QAAQ,MAAM;AAAA,IAC9B;AACA,QAAG,KAAK,MAAK;AAAE;AAAA,IAAO;AAEtB,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,OAAO,IAAI,KAAK,UAAU,KAAK,YAAY,CAAC;AACjD,SAAK,KAAK,aAAa,KAAK;AAC5B,SAAK,KAAK,UAAU,KAAK;AACzB,SAAK,KAAK,SAAS,MAAM,KAAK,WAAW;AACzC,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AACnD,SAAK,KAAK,YAAY,WAAS,KAAK,cAAc,KAAK;AACvD,SAAK,KAAK,UAAU,WAAS,KAAK,YAAY,KAAK;AAAA,EACrD;AAAA,EAQA,IAAI,MAAM,KAAK,MAAK;AAAE,SAAK,OAAO,MAAM,KAAK,IAAI;AAAA,EAAE;AAAA,EAKnD,YAAW;AAAE,WAAO,KAAK,WAAW;AAAA,EAAK;AAAA,EASzC,OAAO,UAAS;AACd,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,KAAK,KAAK,CAAC,KAAK,QAAQ,CAAC;AACnD,WAAO;AAAA,EACT;AAAA,EAMA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA,EASA,QAAQ,UAAS;AACf,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,MAAM,KAAK,CAAC,KAAK,QAAQ,CAAC;AACpD,WAAO;AAAA,EACT;AAAA,EAMA,UAAU,UAAS;AACjB,QAAI,MAAM,KAAK,QAAQ;AACvB,SAAK,qBAAqB,QAAQ,KAAK,CAAC,KAAK,QAAQ,CAAC;AACtD,WAAO;AAAA,EACT;AAAA,EAQA,KAAK,UAAS;AACZ,QAAG,CAAC,KAAK,YAAY,GAAE;AAAE,aAAO;AAAA,IAAM;AACtC,QAAI,MAAM,KAAK,QAAQ;AACvB,QAAI,YAAY,KAAK,IAAI;AACzB,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,IAAQ,CAAC;AACvE,QAAI,WAAW,KAAK,UAAU,SAAO;AACnC,UAAG,IAAI,QAAQ,KAAI;AACjB,aAAK,IAAI,CAAC,QAAQ,CAAC;AACnB,iBAAS,KAAK,IAAI,IAAI,SAAS;AAAA,MACjC;AAAA,IACF,CAAC;AACD,WAAO;AAAA,EACT;AAAA,EAKA,aAAY;AACV,QAAG,KAAK,UAAU;AAAG,WAAK,IAAI,aAAa,gBAAgB,KAAK,YAAY,GAAG;AAC/E,SAAK,gBAAgB;AACrB,SAAK;AACL,SAAK,gBAAgB;AACrB,SAAK,eAAe,MAAM;AAC1B,SAAK,eAAe;AACpB,SAAK,qBAAqB,KAAK,QAAQ,CAAC,CAAC,EAAE,cAAc,SAAS,CAAC;AAAA,EACrE;AAAA,EAMA,mBAAkB;AAChB,QAAG,KAAK,qBAAoB;AAC1B,WAAK,sBAAsB;AAC3B,UAAG,KAAK,UAAU,GAAE;AAAE,aAAK,IAAI,aAAa,0DAA0D;AAAA,MAAE;AACxG,WAAK,cAAc,mBAAmB;AAAA,IACxC;AAAA,EACF;AAAA,EAEA,iBAAgB;AACd,QAAG,KAAK,QAAQ,KAAK,KAAK,eAAc;AAAE;AAAA,IAAO;AACjD,SAAK,sBAAsB;AAC3B,iBAAa,KAAK,cAAc;AAChC,eAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,EACjE;AAAA,EAEA,SAAS,UAAU,MAAM,QAAO;AAC9B,QAAG,CAAC,KAAK,MAAK;AACZ,aAAO,YAAY,SAAS;AAAA,IAC9B;AAEA,SAAK,kBAAkB,MAAM;AAC3B,UAAG,KAAK,MAAK;AACX,YAAG,MAAK;AAAE,eAAK,KAAK,MAAM,MAAM,UAAU,EAAE;AAAA,QAAE,OAAO;AAAE,eAAK,KAAK,MAAM;AAAA,QAAE;AAAA,MAC3E;AAEA,WAAK,oBAAoB,MAAM;AAC7B,YAAG,KAAK,MAAK;AACX,eAAK,KAAK,UAAU,WAAW;AAAA,UAAE;AACjC,eAAK,OAAO;AAAA,QACd;AAEA,oBAAY,SAAS;AAAA,MACvB,CAAC;AAAA,IACH,CAAC;AAAA,EACH;AAAA,EAEA,kBAAkB,UAAU,QAAQ,GAAE;AACpC,QAAG,UAAU,KAAK,CAAC,KAAK,QAAQ,CAAC,KAAK,KAAK,gBAAe;AACxD,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,kBAAkB,UAAU,QAAQ,CAAC;AAAA,IAC5C,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,oBAAoB,UAAU,QAAQ,GAAE;AACtC,QAAG,UAAU,KAAK,CAAC,KAAK,QAAQ,KAAK,KAAK,eAAe,cAAc,QAAO;AAC5E,eAAS;AACT;AAAA,IACF;AAEA,eAAW,MAAM;AACf,WAAK,oBAAoB,UAAU,QAAQ,CAAC;AAAA,IAC9C,GAAG,MAAM,KAAK;AAAA,EAChB;AAAA,EAEA,YAAY,OAAM;AAChB,QAAI,YAAY,SAAS,MAAM;AAC/B,QAAG,KAAK,UAAU;AAAG,WAAK,IAAI,aAAa,SAAS,KAAK;AACzD,SAAK,iBAAiB;AACtB,iBAAa,KAAK,cAAc;AAChC,QAAG,CAAC,KAAK,iBAAiB,cAAc,KAAK;AAC3C,WAAK,eAAe,gBAAgB;AAAA,IACtC;AACA,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,cAAc,SAAS,KAAK,CAAC;AAAA,EAC3E;AAAA,EAKA,YAAY,OAAM;AAChB,QAAG,KAAK,UAAU;AAAG,WAAK,IAAI,aAAa,KAAK;AAChD,QAAI,kBAAkB,KAAK;AAC3B,QAAI,oBAAoB,KAAK;AAC7B,SAAK,qBAAqB,MAAM,QAAQ,CAAC,CAAC,EAAE,cAAc;AACxD,eAAS,OAAO,iBAAiB,iBAAiB;AAAA,IACpD,CAAC;AACD,QAAG,oBAAoB,KAAK,aAAa,oBAAoB,GAAE;AAC7D,WAAK,iBAAiB;AAAA,IACxB;AAAA,EACF;AAAA,EAKA,mBAAkB;AAChB,SAAK,SAAS,QAAQ,aAAW;AAC/B,UAAG,CAAE,SAAQ,UAAU,KAAK,QAAQ,UAAU,KAAK,QAAQ,SAAS,IAAG;AACrE,gBAAQ,QAAQ,eAAe,KAAK;AAAA,MACtC;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAKA,kBAAiB;AACf,YAAO,KAAK,QAAQ,KAAK,KAAK;AAAA,WACvB,cAAc;AAAY,eAAO;AAAA,WACjC,cAAc;AAAM,eAAO;AAAA,WAC3B,cAAc;AAAS,eAAO;AAAA;AAC1B,eAAO;AAAA;AAAA,EAEpB;AAAA,EAKA,cAAa;AAAE,WAAO,KAAK,gBAAgB,MAAM;AAAA,EAAO;AAAA,EAOxD,OAAO,SAAQ;AACb,SAAK,IAAI,QAAQ,eAAe;AAChC,SAAK,WAAW,KAAK,SAAS,OAAO,OAAK,EAAE,QAAQ,MAAM,QAAQ,QAAQ,CAAC;AAAA,EAC7E;AAAA,EAQA,IAAI,MAAK;AACP,aAAQ,OAAO,KAAK,sBAAqB;AACvC,WAAK,qBAAqB,OAAO,KAAK,qBAAqB,KAAK,OAAO,CAAC,CAAC,SAAS;AAChF,eAAO,KAAK,QAAQ,GAAG,MAAM;AAAA,MAC/B,CAAC;AAAA,IACH;AAAA,EACF;AAAA,EASA,QAAQ,OAAO,aAAa,CAAC,GAAE;AAC7B,QAAI,OAAO,IAAI,QAAQ,OAAO,YAAY,IAAI;AAC9C,SAAK,SAAS,KAAK,IAAI;AACvB,WAAO;AAAA,EACT;AAAA,EAKA,KAAK,MAAK;AACR,QAAG,KAAK,UAAU,GAAE;AAClB,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,aAAY;AAC7C,WAAK,IAAI,QAAQ,GAAG,SAAS,UAAU,aAAa,QAAQ,OAAO;AAAA,IACrE;AAEA,QAAG,KAAK,YAAY,GAAE;AACpB,WAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC;AAAA,IACpD,OAAO;AACL,WAAK,WAAW,KAAK,MAAM,KAAK,OAAO,MAAM,YAAU,KAAK,KAAK,KAAK,MAAM,CAAC,CAAC;AAAA,IAChF;AAAA,EACF;AAAA,EAMA,UAAS;AACP,QAAI,SAAS,KAAK,MAAM;AACxB,QAAG,WAAW,KAAK,KAAI;AAAE,WAAK,MAAM;AAAA,IAAE,OAAO;AAAE,WAAK,MAAM;AAAA,IAAO;AAEjE,WAAO,KAAK,IAAI,SAAS;AAAA,EAC3B;AAAA,EAEA,gBAAe;AACb,QAAG,KAAK,uBAAuB,CAAC,KAAK,YAAY,GAAE;AAAE;AAAA,IAAO;AAC5D,SAAK,sBAAsB,KAAK,QAAQ;AACxC,SAAK,KAAK,EAAC,OAAO,WAAW,OAAO,aAAa,SAAS,CAAC,GAAG,KAAK,KAAK,oBAAmB,CAAC;AAC5F,SAAK,iBAAiB,WAAW,MAAM,KAAK,iBAAiB,GAAG,KAAK,mBAAmB;AAAA,EAC1F;AAAA,EAEA,cAAc,QAAO;AACnB,SAAK,gBAAgB;AACrB,QAAG,KAAK,YAAY,GAAE;AAAE,WAAK,KAAK,MAAM,iBAAiB,MAAM;AAAA,IAAE;AAAA,EACnE;AAAA,EAEA,kBAAiB;AACf,QAAG,KAAK,YAAY,KAAK,KAAK,WAAW,SAAS,GAAE;AAClD,WAAK,WAAW,QAAQ,cAAY,SAAS,CAAC;AAC9C,WAAK,aAAa,CAAC;AAAA,IACrB;AAAA,EACF;AAAA,EAEA,cAAc,YAAW;AACvB,SAAK,OAAO,WAAW,MAAM,SAAO;AAClC,UAAI,EAAC,OAAO,OAAO,SAAS,KAAK,aAAY;AAC7C,UAAG,OAAO,QAAQ,KAAK,qBAAoB;AACzC,qBAAa,KAAK,cAAc;AAChC,aAAK,sBAAsB;AAC3B,mBAAW,MAAM,KAAK,cAAc,GAAG,KAAK,mBAAmB;AAAA,MACjE;AAEA,UAAG,KAAK,UAAU;AAAG,aAAK,IAAI,WAAW,GAAG,QAAQ,UAAU,MAAM,SAAS,SAAS,OAAO,MAAM,MAAM,OAAO,MAAM,OAAO;AAE7H,eAAQ,IAAI,GAAG,IAAI,KAAK,SAAS,QAAQ,KAAI;AAC3C,cAAM,UAAU,KAAK,SAAS;AAC9B,YAAG,CAAC,QAAQ,SAAS,OAAO,OAAO,SAAS,QAAQ,GAAE;AAAE;AAAA,QAAS;AACjE,gBAAQ,QAAQ,OAAO,SAAS,KAAK,QAAQ;AAAA,MAC/C;AAEA,eAAQ,IAAI,GAAG,IAAI,KAAK,qBAAqB,QAAQ,QAAQ,KAAI;AAC/D,YAAI,CAAC,EAAE,YAAY,KAAK,qBAAqB,QAAQ;AACrD,iBAAS,GAAG;AAAA,MACd;AAAA,IACF,CAAC;AAAA,EACH;AAAA,EAEA,eAAe,OAAM;AACnB,QAAI,aAAa,KAAK,SAAS,KAAK,OAAK,EAAE,UAAU,SAAU,GAAE,SAAS,KAAK,EAAE,UAAU,EAAE;AAC7F,QAAG,YAAW;AACZ,UAAG,KAAK,UAAU;AAAG,aAAK,IAAI,aAAa,4BAA4B,QAAQ;AAC/E,iBAAW,MAAM;AAAA,IACnB;AAAA,EACF;AACF;", + "names": [] +} diff --git a/deps/phoenix/priv/static/phoenix.png b/deps/phoenix/priv/static/phoenix.png new file mode 100644 index 0000000000000000000000000000000000000000..9c81075f63d2151e6f40e9aa66f665749a87cc6a GIT binary patch literal 13900 zcmaL8WmsF?7A@RTTCBLc6?b=ccXxso4H~R1?gT4RtT+@6?yiLril%4@T7niU{_*z6 z{eIkY^CMY%XUs9jnrrU0pClu(+L}t3=w#^6o;|}(O%cy#x4LjZZH1q*$X;nePbVE4Ruj~ha0EO zKNwDso99#XvuEN`AWs{Bi@gtxt-YhOy9C{FXD=O%vz-K;k$?ubhNqmple2Q5m%Uz~ zramCh1t4NaCnZTE4ibGLaI^QZp#izMx_gU)Bn$}9dm*VB;%os*A`rzjVfzrR1HKOd)umm?RCh=|BP9K5_7PY4e00Cyi75Qn=r z{eKwb?Y#kB&YnKb9_}>%FxuF9`1(lDJt_Uy6x=-jOY83a?=n3Vj0LBly^W8Dm%fLG z>wl`K?d0L(;qBz%Nh7BxK%-#;aCZOa_%B{VLsZ4x+sDQoV6P%CLHESK>FjJL%Eu=o zC@9Y_#G@c6$it(+FQO9uXOy|HR6B0DRr--F^NOYxjR*h5u*lKds>A z`IK4S-pkp~-cHfW!;R+eltrEYw-$l_$@lMAyZ^04@PEc~J&ED^XJP+;3;mx{Pu=s+ z@V{;QbnxHCw|9T)cCV+l_Rhg0diIRBPeoovAGCCkhmu7!e=!0j%CIc1U{;0rzhnzj zRH%Ot=y$J%$R~ap!UOQPkR*PGC6W<##xjgp8{rXFTPGUhD7@5RKexzmd%We{#b|6i z`?lh2^&{jx)SK#0PhPgi&eUZ0vBcGiH`@-FoRy{i3j{L(leZ-WVvvA2{XVGbnr9s* zG$JW*Sqd>q(BQkwNG{TIu68tN%oQnb6^FFNR~xPl$I zm|>W*j{xhT(g3sl-2z1KY@&qA0a~--8mlbo6MSY3Sy29DZRC=_#b9K&IcW(xbn3qD zali;DIL*NQ2a>E?#=CXQMk;2IJDpfLGR5_w?UEM;`!OQP>sJa904@JRBdgqw<{A-f zPODilVldJY3tG8mjj<9Cq%HNX;km>BP=EQ!_>VT)lC6`dm~$b&B*aCJ*_t6bQD*XIIA zrrq#>z~6ik=?Q&P-|3PvgPI@=_MRFRi5f&qlac?_B_cT$A11<`f;&+p^s(QUcKGMS zNYwS6+Y109HVx5PCw$%fR|2X^WJR_R&T>NOOaXhEOOBl@ACRbf{Q38g%!l_W!fCv{ zyn=GMr7&FEFtoISlT(_%iFGOyAW*%LTFx{?IMb~HaOTxco0(xXa`wb0B-{sjpkZ9F zbnZMIZIc!;=Qqv2^WY_d{p1IDf88Rxts3(SLO{5`#Xi5aUOr5);GFV06(V2G0%QE` zw{cbL@W!uuqA3n1q)>mMxU?wl*Pwndp(E*^iJ@$Hm4EfeJ`y=_@(E_@&+FH@D;5#% z%5izR;P_>FEfS3Nmq*3SI-GpsAP~&&m$citnCRwyK%Fs4!m6qG(fj((-y-2~&7)oQ z4#JKn4nA=SUWP)V&DUvjP#Hz?-yUdXY;@ zNlmhBn0p;i0j^5OqhqN%)6E;;VN5UVdzE$GmIS%ZKVBDViH>uKNOQ&Uq5yG0Dlp-V zTpnO8cV6#UAk z)?vp{kNcLNu9V6yaw#|j*h9p`zNZJMyYcx_9Zx@es61Md4Nc*y09>UV7@wE@EGya!%G<~=$Cg%(LWWrD<&NXYR$#UpU; zl-N8X3auH&u_czz`2@`)@9^Q(Z%i7Hf=u*EDPZM>R2Fk4J#Q=0-x+Y2G~abPx7&Ra z2NL1RzJ6GzOMmMRqU6 z$VT^YqYCg33>3Q}C1=wdL-qO~RY!>-RljOAeEMmD^wu(R)f~VT!$Ug{0mvR$s&%fPY=gWk9kNN8m)<5-VE?(DW&De z_K7#3AU;h7d9k4~t}aji!~JOUAShjMOMAIETdSX?IMsgoD0hRthVvFz_Pv zdB+jF*ZW#({d2~{sX9F*h~py)k>5uVOoN%aFYVn4R`h41lz|0c2VZIB=nppL5y=g> zu!5%WhCXBkP}Z@2N_Vz!AzjR@qHsS0JYuj-#`U;&ZpDXpK_mAhyos?3Q{PNOL0pmg zC+VYZt}AEuYBcotKWk`m>a(=zjXxDB3#5Um zVOPP7@tHWfoJhBge!5gA4xHSVT7cu2&GC^pQ`A)wCChhgTf&%uxo`T!dK!h-3`){W zpvJr6%XD*gpM-&tSGPXMc(X9$3n{M4OiY7A9Xmh?(uP=TgDFkP-egM4nbFfm?^>b$ zOW3Npm^VN^_io|YL=pYnX73Ft-K|c|A1*#YT?(+WskD4SwQN8cBq))xT(;M{@0~D8 zL`ANR>lb0mKLRtNENx&SAp>P7857a%ZP{0S3snYW+tbd!X-*{GL}**b@G};C z)Q3bSoD}bG=Jx$POx1UDzM= z`-IZDl+GJgv`ehIT0``{&WDsH3nEG03F1%AU(!=nGsjuyzcneB{{lp{>#5)ndCUO;OINf(7fpu|jyopb#q zlcAO8B?*00y0gq?{w~Rm#QuV^oj)tPcv!7-@bCr?Zk?hlTDK)}c8r_PG$e2Sxtqkw znT9qczCHX17&fsDl3Vm2V-Aarj3y0gN1oyt+l*_2>We#0j5b%9+SO=cHnf?jhBVL* zc#p)VMKXMa?+hxBt}v^^v`27e&jC%v7U zYKYuMhjG$Ix{NA9pgZ+vM>wy}WFw4vHwJAgeD0=m%D2|9gU5(o73(HHxx~ z$`tS4W>`?peBKOuh2OZWrn>N15K@lt?#^(;0WnTZ?_LtcuN$kZ4>wSZ(5iUWZ$`jTC z_ci7nCc@Rp`ZOBltEe^pK#3|uV{VnV_K305Q3%H-7{5pCjN#f=F$6GY0!$*`&2k!S zIddNLT9i~PSY$C(Vk}fNjSg5anR_qHRGpDH-%`M=-M#Uy)$8I8o`groI|!?V_x3%D z*jIq7JKZ%3t7W0A9=PatJ(#|9PuiW+t}h-&qnBZ5P*GhxNr~gqcYtmMghEcf1;N$b z?-KJjMQTx=;qx4;2QzXIHdtmV{?c(qZn=JMuV7*~^o}L0PZRG-cNY-v$m+tCNWA;qfeK|Ja$ z?dtZ+=kKMyDZQ?#yBJCu@vCPRGRG#W=#Uqy7gWdT#9=CV-aUP``ekX{im2fj$(ICH zrqyj>sx@=@VhTUP^u8#smC#HX@iA!B1&~*#t~u+7Nq74FS*V0Q0?u(R5}(HKHeXU| zaX6UE!_YCc0<@~U?km)OK|HeGDJuLE1en`EE(|f3b_8Kc>^KoR$h}C4y*efcDc79k z)u3b4(j8swz`YC~>rtU}6ui^r7(E_B<4DBV|5_E&6Rp|K-w*sw)y8zPZhwG05z^^w zLRAg*Our%j74=A`>3&;5GjxWvxa*y0L3)y#_vIKsT*HJxThAl=kcG%Qs?J-inZbh@ zq`FJ)@rN?G3!zzcyL6$GtD~<-+L`H#r!{AWlr~}E%2bRDzO|+VWq4@vyEP<&_QmKI7yfHm7c|~ zkdcGa5KJs;WE|^Wm#k^lqqyS>>?&VZTzP8uAppMl3)U|MmG^Sp-h8%HE>eK^IF3|u z6blQxe|+599-P{(w9u$@#Po)>v4I0!Sh_Zp$De)M6#l5 zMLd&@Q!>%r&X>3(dy1Sy?PO++U1`I)&{?M@Uo z%#2bAa3&rk<63k``;b?*UQ=TG&ME|}*pK;D6(8EIW`d64<`Ai~rNBrJ{k%38h0VrZ z)(*?!ceIz6p#l3bgLvo%tKy^07Gr2rg@|ENO0eGhf^tf4;XC)3w)a9%k-CFMjbN)`@oRUehd@f#YrH`!qtJ(}CQ8lR z+MUwQHG!ZjF=2+LRco1w;NA)|e&(F=;@5@~YvQ*}WwH|1 zW{l!fpO$_sGYm*FDc`WXx|&tI;x;P(o+0HlocYS>GuQ0YJ}uF5G$wr!TF%IET{Q4|>d}!k>Q%%+Z{vc^)k{}BmP<=f)KU-84}F(W3?QXO?M&M_+fH%H zP1RGVhy8_TH3xc5er1$IF9!{db){AF1?8D6r6x6UC#X=y=*ObiCe zZ|cKVcuN6?)kxDj?`&dz$0gLFecX{V&Au;2g)e>UH(kt49)MhGU9UX2($=TV6dnKe zCR!eldvubP@OGmDCuf$w`Jo*ml6I!*Z&(Oa{eaWP`8m*aE|7#?ovVrug{PNqINSdu z@u72)Vd`WJ6OYNAB#+hOE$k8B(PtN)wdfZ;ELi6(7IlI>Ir~TU<;xx4Tn0^Lm885k z!2|CbsSv##hl_!eoJ#>wpS`2KtE(5CZ!Hf~l*~7UMiIR+&UO9*juK5%YYJjtkERgP zggP=dxb4%E8W((`2g)%g?g>E+RZW)7*L)HMnl}Lnu;J?<6ODpm3RLPGq6Vl;z|aNp z5*5uzK$K)Bp{dY?A*8crtu--(0(l+bO&*>5!u!KQD+;nt(a~g^`=2T;v-g>ul$x_u zLcQ{AV+YeSFP`@OYqz>QCGH1>^M==xc=@-W?jSBT@vfSWgAluU7WT?eutjJ2$9ZSdl;^rlm2JPtQ%6@Y$l7(6B9 zlqVdq@F&qdugX5%1MkA<3y`rQM$#0zn1``Jaacc^tu(EL=wALU?vJ70Xwx&+^%@ab z;OsbwDLNe;#0Iv-_)%@b(BG3aEi4P?nhDFaEm@06YtqSK88&-%%KNKLjXM)jlt$0d z(q8vr_pCL!w|MrQ((|ceeWT@-V(H#9J;(%sS2B8f8}xNox|N@GD5loR?9+n2fWKZY zc(Y*>gX85*ALqgajeA^)lhbXRioH>St-U3|TRjZd87wh*%kX(J1H3jQhhtV+p3fcPQ>XQUKsF9mm zoH!0Sr&YY;%y1%&bJqhNV_vk;?sx~5__YLXe|G`Bd!GququTI(0J-~}A@a(HCwYmO zWj>cDZ4_FKb}1f&lN4TD2*1zVVhK*wFN*D6oRC-~%)GsE{(N>owOd z%1cRV&^^^z@YP_}sI0j+rz_3|Zk9B;z|^}WEhV^Bpm;=Uf9IpY5Fn6A|FO@j7Z8&B z96ZFHGbnNB^C(Vfa20auH(3;B>~V!Yon}t?kpi_J#_}@sKCrK4uY_Xf`p7hv`XQ=8 zWNp{9H3nF%DY43p1+@_OnTmXtj z%WgVqwJ!5UnSrBy?rhLiXKT?d}y73{iOJdN@mhf#J?H_awxEp#WUbKF{0}s=woC6Y47);j* z8rB1{w*AVT>0NSmFtEae;*67g8T_nxO0c+ov@>{eu5n{@#RGTr>^Bb8=wBEbB;0`7 zz|!xSHUh-AuPL^G!?~=j#GR%GzgKr%icju#i74clZV*{+CP!VXw1lVu78LdOSdw{V z{4*;Lt7ier$fJSEz6+QygOA+}x_4ilo(2pO&gO2#M3YigPU!~HbZzFpPP(m(7_Dq( z6E$iYyBlF8m8$F1Cuz4}csC&yn=cM8WVgfaL&h75{Shd3)~!cR zCrAVcxl!YrKl=V^piF14E39&aLJVb9-eT+g2xImTQ%l7;}SHq_(LSbo^EM-HXXtZ0O zdW3nm2Xc86CsIwEsbP>@Q~2ojkx)cvw^BKDjB5;4cJZr2KyPiMdSz9LK~+wi4%NKr zbN2DsiY=l;nH8!iP250F?V2V~z(9!|pVCyX9mL_@_ zlcc-NP!BZ_1zEf>pRi=1_Kqh(3X+M9b?No%R8SQvDbofi&Fz$Vs(U!_CusVn+==X` z4cUNCy9%^!gq7dHZ(d7yf82(&o(5y7mF`*OIvT28jRocQywzcRqsbN4HuB~hLSmiP z1-e(k^;S23LfRT&ykT>g@~+hOx!lg!Sf~$2v?1w2ja>QgaJtM|?p@SM9&ls$0J<8;>A`IHQY5INUj<+t`aZ}v)4 zTMv2I_QwzEM=Wg(QohmrlBbJ|jcKc6rM(eJ>_{Ce7!j7Wl-87@z;z5`*K8^*wY?^P zXZWbVI~{|7l7A`bsQ034<(8h(+iSK&8}ijuX4p=^0dk;0zaKuYr~S&idu-;u+p3y# zh&LfPIM%YArf&^E-XlY^y8hl$%bp>Gi+MuNLb0pOLODZ47f-(U&F8UH%lFk)H3Pg8 zGX$RR8odn{YWkC>IU_o}?Bgs(hY9Wy8?sIR0}Vgrg%#6#9%R$r^539t@SnujcyONj zpE?(`U`-_m!Nt>6WU8?;PR;ou0f`wuvuj1xX4j}4+M{ZmBHI>~O54)>S3Z}=gNpD= z-B$ESnoSp)Ib~)v6o{j~ZKMpo4IJYIwwCY%v9+$k%2a=ut+ETf&f;R4JYriH_yjfh zcF16FMV7{Bm~xVwCmSeQ>{H^VpmBwKi?xX5tMS?s%PV;WKlk>RF2_ zaQ#KT_9dmokkCTOdHzpHF5DT*Q$Z=`2&Z8*iEw|IL>%}ep?*ArUV@HuU70}fr}vsu z7ct2;mYIn^8+D@M!HHQVZamDm4kufo_&Lv2PQ+;2qON&of3i4Z`6^WdW!GxVHw*o( z9RCu?86CO{>RZqmkKJi#IZw5A|C&P3R7~+e1O|KX>AO!{L~~2Q^j{VcJ?fn1_JtHu zo#68?Z;9QhCQ%>Wl+v*xbCBkOYksQ3ErxKmI#@o+=yEv*{noTagX`J);d!Sqs6~1- z_t3kU4AG&!bh}$vq8bSpCgNXZ%R$m zvOkBz6;t?`*dmP4KpQa6S(Tb1v2UM_yTrv=nIeEr4bEdkEf&tcKxgqz=0#_b6#}=d z<1+YBT8K_dgbVSiDuNBJv!Zzw;~H`1CnOI;NRH;M5O3aN0V4|fV%s{@tfO&#!{~vE zXkC?8J?SKAwT&lDA&ld*Yz*V@55gw}#xX07=)to%1He+@{4HiU*{$`=4_`dDSl!dE zrb@kaTRT7dc#5TRzxH}})^%cZIN6|2;?tLujjh6Ku4c*Pw+2LJ{e43$piypJ3@{zz z{ZyQ_eCg6H#lsA4@F@ubKQ?$Sr!)(1u-g0Y@!Y3D0$d`L8{h{xE*7}P)$8&a||XD*TfFRvL{%LTfbnlB1i z`xZ=4^3YZ0(&j19vpsX0>pdpp@?^hP1Lua|`g^OU4F@JZvt-JBeIhxTzTB`_7Ha(C zXpMKEgjelG#+Z1pH3QN?T{LaXLXs&7drY%!CjC6=jey#;hs!{-|i#z2tEed4Ti=&S3x@^6XZrGR|k} znjEuABs|D(T|wc}%1sHwoY(yB{a6Ys6`5RKt#YYI&kJ0bNGe4P*Uq9}0YZR`s>=o) z$^kQp3e)J59I>B@@PGAi_X6G%Sved~($wM_il`m%ViYFIyuN(JJ|msKAXrNRV#341 z1|2JQNES0Z;*5kT&$YHc%^PE`bnRw~uILz)Jn z)rtYuuV1r^>4a@XS-a!^ETgu|Hbj0rKjU`uCKq2mWUW!kEocyb*qm8%j`6#5FX;H5 zH}?G7Z?<6e>UQ1ZW!lOfGLsiJ6Cmv5nnJCrOjaP?lKh2^41eXWTy*hxjZKwSr_VJ}-~$&#D3 zzhiEKdrOMKKU0O4xvH7-t>i*p@I!2=k5-G?6tO+uraKwk8#JkfX*#Z{*%i}i_x~lXo^+A!ibrcM>WX|z89iEn| zyC2#BpijrGcW&p}+^3j>Wt$A*=Jrvh8ETLM8aKVsi0&;hlS@-###$Xy))F)OMv57; zZdh4t?c_)zrcUIaOVOUk1$;wMCE>D~-O=N0NFI9^e^C}x37OgGLo)!Q zl=io=P5JDB<$lI%4Y+J3XEphD`qO&Kd_8!yc<*ECCAvC#XTpXe+6u_cmTjEJ| znoqk>=_ZZ4uO5-(m)F08ceF!p<}!?TgW`7279=mKmj~~5tj;zg?PgUz-)5VMM%0j%)T?pU<0Uk|D3p5{2e??#5jMB{Y!BJEFH zuWNq7jM!7<2zWCvPQRj%cXAC#;y_}2ul?h8L$gjQfeIy;;;WXDudit7Uv|Z2b;SrX zfetgr<80WRG+xgFc;C!8+A#ako200^e2Q~AmM2ENwvrd`El^q3CVWk8#pR}l6cCg~ zUYS?4ylI87x!WdHAgi(~ry661S05Qi1wbZZh3H*x{Rw|u!|$*brVLWole{Fe)at#5 z&|6f+nmc3oc&?6vkxR;joiAOb9VuypZ0J$RUBbNxlH~&My}W2{rLRnL z_-^!!5*@@mLvLnIN0QiIhGHHqzPd<3m6&`Vvw8X{6CQBzCaG00F|!`5<-vmAC>~F}0=9+5g-X4W2>mQBUE2eh0%g|SqINm6Te;DOFibuJZ*{m1m-=$li zA>OF0B&aPG^YmL#sfV^T*RCPN%5N9BL>0$sDyvtimKQ1W9gBJ=5(@^odQd1zJ)8Lo(zG zeg;Iwc}daKZlFmS1a-tPNNEfJ99rixy+0qS+Sm5iq zL+jh*2DCx)TBOktKeP!XXqS-sX*+N5l;5o1VpaD@M%Pak^Vqbsa_Eo0WNcXh8i zafO?AZFRj;yl(n{r6|&IBA_<(2I?rB(2@jt?Fv>m#>YoLznm1vhc1`weTd-;OKNlU z7eAu`QWzX1>w@I0VgfW#HL`x)yyghsLOaU(#V{i%@fmXs*QfgI)M>KgCz&&%`=PNZ zPu+yGi`h*t8-5KMsj5_yxl+d&O}k-3yJGaH4TJX)ynmlzXsKl%oOgmmFTRO-s`ckV z&u!9meAquxYhwk+gHo^`Q|*lIBH2K=|B*NDyfTf|*+wzNwSNZ2hkhakih?%7j(lPT zD;YT{1@b6F_gc~lu)m$%A9Eb*aK&Q@qrFOd-)-p{v7hkz2lg2jw=-pNt0yOAU(svi zLYL#99x*+EkqXq&U$tR)E{^73j>i*upyP+bN9CfUhi~MgD<%5{I+<#AWsg?a)U-af z&|(T&_pI1K{XL`TB94{Ou)PPi5Y+MbOb^}#nvWufpZWaDcRLGjsu}h_miC|C;Ors| z=3G3ILzSiI!nCg+;$03@KDrVVI`VxANUQz+09hW z{~WkYa@aKYcKD$MeY0x*7Sec0vr5BAj`1Ov&~s(J`O2>w{g%{Jq-lIT_L=68?J+E* zGGTu~fpOk97y&7_Diw3aL;G8#ku@_Hyb)LWa$+&s zEF~rPhKO&PraSlge{A(pz0+TTl9mN_uDi-)@vS9E8zK$1amRo!FM&6Ys)yQdvVSt? zd&vc0p2sNLeK7sJ7^QO9Xkp(Tm$9A!ml{~8K2#1711%(JGl8Eh9QYUDKEx@cv!JHg)>??HhpzbPA3DM&~U< ze~Rf!mHiBTPgT>F;L?v|Ymp&(l9!ZA&Mt9(uv}|zk8-{XfKyu7vYP#;ao1qBoecXG zs7P|7#x6hY;x|`wfR2^)K5ub~0ncUzK+Ybe)UnPC7iajN`lE-k73KK}UD zKzHTYGesC!j*8N598|aVJHKu;Qd&wK$pOh<2p%XS*W6`g#nH`{4mC<`Tm8tWUzn}AWi3+;%dy%2o{JaR5Qy)!>H z%gz0!Cx`4fqYzD`j6j=|L6X8+kHP1A*E0lNx2(ItObT73J3_eKE@=MB4=jMRRrw62 zG<8C+vWR^_5OLT~3Brb~kl1OQ5_pGlWb@Ulbtbkbg~d5y_X_mvTrZdJ`R2u?sF<7U zZv~d(&CJ-A72TvW_u`}1Z=|JAbP7kMUj`&-f$L>F7R;6ggDkC*jsf|P&oalP8U8fK zT_2wdY0JFNakO#`swMjx zM!cT4Z}M9M_60r_9>16xcaX^`A9gqPZ`l_3nb%}8T`Chs482ZkvJhPcGX?jMR}=ah zTZDVQSSASC6SiqO@{GT!Qk?JszB*o9FY#TP6Dko7-f4$6V16IQQ`bDNN^kJC2IR;t zY?SB&z67>8I0W=}iwTS;u3x6J_59+L8+<7^p24|fLiU+*HlGuF3@?Ppk+A-3MnmFl z)qZ;$wA_$w?+0srI|;Kh_%r5`bfl_d$kA>k$+avzku2rs<@<_TvP^;(tTuzj zhE_CzlafJ^=I2x-PY=Nl5R<=t%`qL1pvH4;}21B9;( zkl_bYZ2+YII)|5v`(DLhC^8SK&@Rg;W2>Er#Wa&~W~5#GeHRr{N`OC4&x8mdeH^(Z zSo~{uE-6NJ{V*qLT*hB@@O-Qm!r>wH*J1pN8Ht>Ri`CHLtL;2>NxDqFb41bk*1z+J zhV>B-vfA2MMCt)_#) z3G~quaUUm>*(ov1gX?+|@8-u$!zgCPz9kxLJH$2OO{(l${;)=ie$@*MH+Dtp83U5!%o~k zPQ8KRJ141&WM*HM=`hd+PDS93YX&}Sllg@j-BHpM?!v8!WeV^^4DX@GQ`sea*>H?=b|NHgB}D2V9jt) zJ=prm-}$6M+ZsPel4vwOBmuhqij3Ujz<~(=Z+%`0#*Vm+M8&7Up%ajiBU{{m!_%D9 z1zJjlE#0`HNju{ds8|+m7h{Hj5#iNXfrHNd}8lmEE zQSW{7z*8sq+W$*S6LniEU?Z!#B?GdWkjUeg4$&N$;$N7gqx*-E<^6-zhv(0nSsJz2 UWxWXg`G1#+f~I_}taaG`2PLnS&Hw-a literal 0 HcmV?d00001 diff --git a/deps/phoenix/priv/templates/phx.gen.auth/_menu.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/_menu.html.heex new file mode 100644 index 0000000..6e8b72a --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/_menu.html.heex @@ -0,0 +1,10 @@ +
    +<%%= if @current_<%= schema.singular %> do %> +
  • <%%= @current_<%= schema.singular %>.email %>
  • +
  • <%%= link "Settings", to: Routes.<%= schema.route_helper %>_settings_path(@conn, :edit) %>
  • +
  • <%%= link "Log out", to: Routes.<%= schema.route_helper %>_session_path(@conn, :delete), method: :delete %>
  • +<%% else %> +
  • <%%= link "Register", to: Routes.<%= schema.route_helper %>_registration_path(@conn, :new) %>
  • +
  • <%%= link "Log in", to: Routes.<%= schema.route_helper %>_session_path(@conn, :new) %>
  • +<%% end %> +
diff --git a/deps/phoenix/priv/templates/phx.gen.auth/auth.ex b/deps/phoenix/priv/templates/phx.gen.auth/auth.ex new file mode 100644 index 0000000..8fe4a7a --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/auth.ex @@ -0,0 +1,149 @@ +defmodule <%= inspect auth_module %> do + import Plug.Conn + import Phoenix.Controller + + alias <%= inspect context.module %> + alias <%= inspect context.web_module %>.Router.Helpers, as: Routes + + # Make the remember me cookie valid for 60 days. + # If you want bump or reduce this value, also change + # the token expiry itself in <%= inspect schema.alias %>Token. + @max_age 60 * 60 * 24 * 60 + @remember_me_cookie "_<%= web_app_name %>_<%= schema.singular %>_remember_me" + @remember_me_options [sign: true, max_age: @max_age, same_site: "Lax"] + + @doc """ + Logs the <%= schema.singular %> in. + + It renews the session ID and clears the whole session + to avoid fixation attacks. See the renew_session + function to customize this behaviour. + + It also sets a `:live_socket_id` key in the session, + so LiveView sessions are identified and automatically + disconnected on log out. The line can be safely removed + if you are not using LiveView. + """ + def log_in_<%= schema.singular %>(conn, <%= schema.singular %>, params \\ %{}) do + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + <%= schema.singular %>_return_to = get_session(conn, :<%= schema.singular %>_return_to) + + conn + |> renew_session() + |> put_session(:<%= schema.singular %>_token, token) + |> put_session(:live_socket_id, "<%= schema.plural %>_sessions:#{Base.url_encode64(token)}") + |> maybe_write_remember_me_cookie(token, params) + |> redirect(to: <%= schema.singular %>_return_to || signed_in_path(conn)) + end + + defp maybe_write_remember_me_cookie(conn, token, %{"remember_me" => "true"}) do + put_resp_cookie(conn, @remember_me_cookie, token, @remember_me_options) + end + + defp maybe_write_remember_me_cookie(conn, _token, _params) do + conn + end + + # This function renews the session ID and erases the whole + # session to avoid fixation attacks. If there is any data + # in the session you may want to preserve after log in/log out, + # you must explicitly fetch the session data before clearing + # and then immediately set it after clearing, for example: + # + # defp renew_session(conn) do + # preferred_locale = get_session(conn, :preferred_locale) + # + # conn + # |> configure_session(renew: true) + # |> clear_session() + # |> put_session(:preferred_locale, preferred_locale) + # end + # + defp renew_session(conn) do + conn + |> configure_session(renew: true) + |> clear_session() + end + + @doc """ + Logs the <%= schema.singular %> out. + + It clears all session data for safety. See renew_session. + """ + def log_out_<%= schema.singular %>(conn) do + <%= schema.singular %>_token = get_session(conn, :<%= schema.singular %>_token) + <%= schema.singular %>_token && <%= inspect context.alias %>.delete_session_token(<%= schema.singular %>_token) + + if live_socket_id = get_session(conn, :live_socket_id) do + <%= inspect(endpoint_module) %>.broadcast(live_socket_id, "disconnect", %{}) + end + + conn + |> renew_session() + |> delete_resp_cookie(@remember_me_cookie) + |> redirect(to: "/") + end + + @doc """ + Authenticates the <%= schema.singular %> by looking into the session + and remember me token. + """ + def fetch_current_<%= schema.singular %>(conn, _opts) do + {<%= schema.singular %>_token, conn} = ensure_<%= schema.singular %>_token(conn) + <%= schema.singular %> = <%= schema.singular %>_token && <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(<%= schema.singular %>_token) + assign(conn, :current_<%= schema.singular %>, <%= schema.singular %>) + end + + defp ensure_<%= schema.singular %>_token(conn) do + if <%= schema.singular %>_token = get_session(conn, :<%= schema.singular %>_token) do + {<%= schema.singular %>_token, conn} + else + conn = fetch_cookies(conn, signed: [@remember_me_cookie]) + + if <%= schema.singular %>_token = conn.cookies[@remember_me_cookie] do + {<%= schema.singular %>_token, put_session(conn, :<%= schema.singular %>_token, <%= schema.singular %>_token)} + else + {nil, conn} + end + end + end + + @doc """ + Used for routes that require the <%= schema.singular %> to not be authenticated. + """ + def redirect_if_<%= schema.singular %>_is_authenticated(conn, _opts) do + if conn.assigns[:current_<%= schema.singular %>] do + conn + |> redirect(to: signed_in_path(conn)) + |> halt() + else + conn + end + end + + @doc """ + Used for routes that require the <%= schema.singular %> to be authenticated. + + If you want to enforce the <%= schema.singular %> email is confirmed before + they use the application at all, here would be a good place. + """ + def require_authenticated_<%= schema.singular %>(conn, _opts) do + if conn.assigns[:current_<%= schema.singular %>] do + conn + else + conn + |> put_flash(:error, "You must log in to access this page.") + |> maybe_store_return_to() + |> redirect(to: Routes.<%= schema.route_helper %>_session_path(conn, :new)) + |> halt() + end + end + + defp maybe_store_return_to(%{method: "GET"} = conn) do + put_session(conn, :<%= schema.singular %>_return_to, current_path(conn)) + end + + defp maybe_store_return_to(conn), do: conn + + defp signed_in_path(_conn), do: "/" +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/auth_test.exs b/deps/phoenix/priv/templates/phx.gen.auth/auth_test.exs new file mode 100644 index 0000000..bf26929 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/auth_test.exs @@ -0,0 +1,170 @@ +defmodule <%= inspect auth_module %>Test do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + alias <%= inspect context.module %> + alias <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Auth + import <%= inspect context.module %>Fixtures + + @remember_me_cookie "_<%= web_app_name %>_<%= schema.singular %>_remember_me" + + setup %{conn: conn} do + conn = + conn + |> Map.replace!(:secret_key_base, <%= inspect endpoint_module %>.config(:secret_key_base)) + |> init_test_session(%{}) + + %{<%= schema.singular %>: <%= schema.singular %>_fixture(), conn: conn} + end + + describe "log_in_<%= schema.singular %>/3" do + test "stores the <%= schema.singular %> token in the session", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(conn, <%= schema.singular %>) + assert token = get_session(conn, :<%= schema.singular %>_token) + assert get_session(conn, :live_socket_id) == "<%= schema.plural %>_sessions:#{Base.url_encode64(token)}" + assert redirected_to(conn) == "/" + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + end + + test "clears everything previously stored in the session", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> put_session(:to_be_removed, "value") |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + refute get_session(conn, :to_be_removed) + end + + test "redirects to the configured path", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> put_session(:<%= schema.singular %>_return_to, "/hello") |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + assert redirected_to(conn) == "/hello" + end + + test "writes a cookie if remember_me is configured", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, %{"remember_me" => "true"}) + assert get_session(conn, :<%= schema.singular %>_token) == conn.cookies[@remember_me_cookie] + + assert %{value: signed_token, max_age: max_age} = conn.resp_cookies[@remember_me_cookie] + assert signed_token != get_session(conn, :<%= schema.singular %>_token) + assert max_age == 5_184_000 + end + end + + describe "logout_<%= schema.singular %>/1" do + test "erases session and cookies", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + + conn = + conn + |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) + |> put_req_cookie(@remember_me_cookie, <%= schema.singular %>_token) + |> fetch_cookies() + |> <%= inspect schema.alias %>Auth.log_out_<%= schema.singular %>() + + refute get_session(conn, :<%= schema.singular %>_token) + refute conn.cookies[@remember_me_cookie] + assert %{max_age: 0} = conn.resp_cookies[@remember_me_cookie] + assert redirected_to(conn) == "/" + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(<%= schema.singular %>_token) + end + + test "broadcasts to the given live_socket_id", %{conn: conn} do + live_socket_id = "<%= schema.plural %>_sessions:abcdef-token" + <%= inspect(endpoint_module) %>.subscribe(live_socket_id) + + conn + |> put_session(:live_socket_id, live_socket_id) + |> <%= inspect(schema.alias) %>Auth.log_out_<%= schema.singular %>() + + assert_receive %Phoenix.Socket.Broadcast{event: "disconnect", topic: ^live_socket_id} + end + + test "works even if <%= schema.singular %> is already logged out", %{conn: conn} do + conn = conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_out_<%= schema.singular %>() + refute get_session(conn, :<%= schema.singular %>_token) + assert %{max_age: 0} = conn.resp_cookies[@remember_me_cookie] + assert redirected_to(conn) == "/" + end + end + + describe "fetch_current_<%= schema.singular %>/2" do + test "authenticates <%= schema.singular %> from session", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + <%= schema.singular %>_token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + conn = conn |> put_session(:<%= schema.singular %>_token, <%= schema.singular %>_token) |> <%= inspect schema.alias %>Auth.fetch_current_<%= schema.singular %>([]) + assert conn.assigns.current_<%= schema.singular %>.id == <%= schema.singular %>.id + end + + test "authenticates <%= schema.singular %> from cookies", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + logged_in_conn = + conn |> fetch_cookies() |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>, %{"remember_me" => "true"}) + + <%= schema.singular %>_token = logged_in_conn.cookies[@remember_me_cookie] + %{value: signed_token} = logged_in_conn.resp_cookies[@remember_me_cookie] + + conn = + conn + |> put_req_cookie(@remember_me_cookie, signed_token) + |> <%= inspect schema.alias %>Auth.fetch_current_<%= schema.singular %>([]) + + assert get_session(conn, :<%= schema.singular %>_token) == <%= schema.singular %>_token + assert conn.assigns.current_<%= schema.singular %>.id == <%= schema.singular %>.id + end + + test "does not authenticate if data is missing", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + _ = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + conn = <%= inspect schema.alias %>Auth.fetch_current_<%= schema.singular %>(conn, []) + refute get_session(conn, :<%= schema.singular %>_token) + refute conn.assigns.current_<%= schema.singular %> + end + end + + describe "redirect_if_<%= schema.singular %>_is_authenticated/2" do + test "redirects if <%= schema.singular %> is authenticated", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> assign(:current_<%= schema.singular %>, <%= schema.singular %>) |> <%= inspect schema.alias %>Auth.redirect_if_<%= schema.singular %>_is_authenticated([]) + assert conn.halted + assert redirected_to(conn) == "/" + end + + test "does not redirect if <%= schema.singular %> is not authenticated", %{conn: conn} do + conn = <%= inspect schema.alias %>Auth.redirect_if_<%= schema.singular %>_is_authenticated(conn, []) + refute conn.halted + refute conn.status + end + end + + describe "require_authenticated_<%= schema.singular %>/2" do + test "redirects if <%= schema.singular %> is not authenticated", %{conn: conn} do + conn = conn |> fetch_flash() |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + assert conn.halted + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_session_path(conn, :new) + assert get_flash(conn, :error) == "You must log in to access this page." + end + + test "stores the path to redirect to on GET", %{conn: conn} do + halted_conn = + %{conn | path_info: ["foo"], query_string: ""} + |> fetch_flash() + |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + + assert halted_conn.halted + assert get_session(halted_conn, :<%= schema.singular %>_return_to) == "/foo" + + halted_conn = + %{conn | path_info: ["foo"], query_string: "bar=baz"} + |> fetch_flash() + |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + + assert halted_conn.halted + assert get_session(halted_conn, :<%= schema.singular %>_return_to) == "/foo?bar=baz" + + halted_conn = + %{conn | path_info: ["foo"], query_string: "bar", method: "POST"} + |> fetch_flash() + |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + + assert halted_conn.halted + refute get_session(halted_conn, :<%= schema.singular %>_return_to) + end + + test "does not redirect if <%= schema.singular %> is authenticated", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> assign(:current_<%= schema.singular %>, <%= schema.singular %>) |> <%= inspect schema.alias %>Auth.require_authenticated_<%= schema.singular %>([]) + refute conn.halted + refute conn.status + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller.ex b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller.ex new file mode 100644 index 0000000..07c4eac --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller.ex @@ -0,0 +1,56 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ConfirmationController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + + def new(conn, _params) do + render(conn, "new.html") + end + + def create(conn, %{"<%= schema.singular %>" => %{"email" => email}}) do + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(email) do + <%= inspect context.alias %>.deliver_<%= schema.singular %>_confirmation_instructions( + <%= schema.singular %>, + &Routes.<%= schema.route_helper %>_confirmation_url(conn, :edit, &1) + ) + end + + conn + |> put_flash( + :info, + "If your email is in our system and it has not been confirmed yet, " <> + "you will receive an email with instructions shortly." + ) + |> redirect(to: "/") + end + + def edit(conn, %{"token" => token}) do + render(conn, "edit.html", token: token) + end + + # Do not log in the <%= schema.singular %> after confirmation to avoid a + # leaked token giving the <%= schema.singular %> access to the account. + def update(conn, %{"token" => token}) do + case <%= inspect context.alias %>.confirm_<%= schema.singular %>(token) do + {:ok, _} -> + conn + |> put_flash(:info, "<%= schema.human_singular %> confirmed successfully.") + |> redirect(to: "/") + + :error -> + # If there is a current <%= schema.singular %> and the account was already confirmed, + # then odds are that the confirmation link was already visited, either + # by some automation or by the <%= schema.singular %> themselves, so we redirect without + # a warning message. + case conn.assigns do + %{current_<%= schema.singular %>: %{confirmed_at: confirmed_at}} when not is_nil(confirmed_at) -> + redirect(conn, to: "/") + + %{} -> + conn + |> put_flash(:error, "<%= schema.human_singular %> confirmation link is invalid or it has expired.") + |> redirect(to: "/") + end + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller_test.exs b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller_test.exs new file mode 100644 index 0000000..d05b799 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_controller_test.exs @@ -0,0 +1,105 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ConfirmationControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + alias <%= inspect context.module %> + alias <%= inspect schema.repo %> + import <%= inspect context.module %>Fixtures + + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + describe "GET /<%= schema.plural %>/confirm" do + test "renders the resend confirmation page", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :new)) + response = html_response(conn, 200) + assert response =~ "

Resend confirmation instructions

" + end + end + + describe "POST /<%= schema.plural %>/confirm" do + @tag :capture_log + test "sends a new confirmation token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + post(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email} + }) + + assert redirected_to(conn) == "/" + assert get_flash(conn, :info) =~ "If your email is in our system" + assert Repo.get_by!(<%= inspect context.alias %>.<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id).context == "confirm" + end + + test "does not send confirmation token if <%= schema.human_singular %> is confirmed", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + Repo.update!(<%= inspect context.alias %>.<%= inspect schema.alias %>.confirm_changeset(<%= schema.singular %>)) + + conn = + post(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email} + }) + + assert redirected_to(conn) == "/" + assert get_flash(conn, :info) =~ "If your email is in our system" + refute Repo.get_by(<%= inspect context.alias %>.<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not send confirmation token if email is invalid", %{conn: conn} do + conn = + post(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => "unknown@example.com"} + }) + + assert redirected_to(conn) == "/" + assert get_flash(conn, :info) =~ "If your email is in our system" + assert Repo.all(<%= inspect context.alias %>.<%= inspect schema.alias %>Token) == [] + end + end + + describe "GET /<%= schema.plural %>/confirm/:token" do + test "renders the confirmation page", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :edit, "some-token")) + response = html_response(conn, 200) + assert response =~ "

Confirm account

" + + form_action = Routes.<%= schema.route_helper %>_confirmation_path(conn, :update, "some-token") + assert response =~ "action=\"#{form_action}\"" + end + end + + describe "POST /<%= schema.plural %>/confirm/:token" do + test "confirms the given token once", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_confirmation_instructions(<%= schema.singular %>, url) + end) + + conn = post(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :update, token)) + assert redirected_to(conn) == "/" + assert get_flash(conn, :info) =~ "<%= schema.human_singular %> confirmed successfully" + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id).confirmed_at + refute get_session(conn, :<%= schema.singular %>_token) + assert Repo.all(<%= inspect context.alias %>.<%= inspect schema.alias %>Token) == [] + + # When not logged in + conn = post(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :update, token)) + assert redirected_to(conn) == "/" + assert get_flash(conn, :error) =~ "<%= schema.human_singular %> confirmation link is invalid or it has expired" + + # When logged in + conn = + build_conn() + |> log_in_<%= schema.singular %>(<%= schema.singular %>) + |> post(Routes.<%= schema.route_helper %>_confirmation_path(conn, :update, token)) + + assert redirected_to(conn) == "/" + refute get_flash(conn, :error) + end + + test "does not confirm email with invalid token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = post(conn, Routes.<%= schema.route_helper %>_confirmation_path(conn, :update, "oops")) + assert redirected_to(conn) == "/" + assert get_flash(conn, :error) =~ "<%= schema.human_singular %> confirmation link is invalid or it has expired" + refute <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id).confirmed_at + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/confirmation_edit.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_edit.html.heex new file mode 100644 index 0000000..a6db909 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_edit.html.heex @@ -0,0 +1,12 @@ +

Confirm account

+ +<.form let={_f} for={:<%= schema.singular %>} action={Routes.<%= schema.route_helper %>_confirmation_path(@conn, :update, @token)}> +
+ <%%= submit "Confirm my account" %> +
+ + +

+ <%%= link "Register", to: Routes.<%= schema.route_helper %>_registration_path(@conn, :new) %> | + <%%= link "Log in", to: Routes.<%= schema.route_helper %>_session_path(@conn, :new) %> +

diff --git a/deps/phoenix/priv/templates/phx.gen.auth/confirmation_new.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_new.html.heex new file mode 100644 index 0000000..28b6b67 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_new.html.heex @@ -0,0 +1,15 @@ +

Resend confirmation instructions

+ +<.form let={f} for={:<%= schema.singular %>} action={Routes.<%= schema.route_helper %>_confirmation_path(@conn, :create)}> + <%%= label f, :email %> + <%%= email_input f, :email, required: true %> + +
+ <%%= submit "Resend confirmation instructions" %> +
+ + +

+ <%%= link "Register", to: Routes.<%= schema.route_helper %>_registration_path(@conn, :new) %> | + <%%= link "Log in", to: Routes.<%= schema.route_helper %>_session_path(@conn, :new) %> +

diff --git a/deps/phoenix/priv/templates/phx.gen.auth/confirmation_view.ex b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_view.ex new file mode 100644 index 0000000..d1ab3bb --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/confirmation_view.ex @@ -0,0 +1,3 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ConfirmationView do + use <%= inspect context.web_module %>, :view +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/conn_case.exs b/deps/phoenix/priv/templates/phx.gen.auth/conn_case.exs new file mode 100644 index 0000000..c3939fd --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/conn_case.exs @@ -0,0 +1,26 @@ + + @doc """ + Setup helper that registers and logs in <%= schema.plural %>. + + setup :register_and_log_in_<%= schema.singular %> + + It stores an updated connection and a registered <%= schema.singular %> in the + test context. + """ + def register_and_log_in_<%= schema.singular %>(%{conn: conn}) do + <%= schema.singular %> = <%= inspect context.module %>Fixtures.<%= schema.singular %>_fixture() + %{conn: log_in_<%= schema.singular %>(conn, <%= schema.singular %>), <%= schema.singular %>: <%= schema.singular %>} + end + + @doc """ + Logs the given `<%= schema.singular %>` into the `conn`. + + It returns an updated `conn`. + """ + def log_in_<%= schema.singular %>(conn, <%= schema.singular %>) do + token = <%= inspect context.module %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + + conn + |> Phoenix.ConnTest.init_test_session(%{}) + |> Plug.Conn.put_session(:<%= schema.singular %>_token, token) + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/context_fixtures_functions.ex b/deps/phoenix/priv/templates/phx.gen.auth/context_fixtures_functions.ex new file mode 100644 index 0000000..5a8b147 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/context_fixtures_functions.ex @@ -0,0 +1,24 @@ + def unique_<%= schema.singular %>_email, do: "<%= schema.singular %>#{System.unique_integer()}@example.com" + def valid_<%= schema.singular %>_password, do: "hello world!" + + def valid_<%= schema.singular %>_attributes(attrs \\ %{}) do + Enum.into(attrs, %{ + email: unique_<%= schema.singular %>_email(), + password: valid_<%= schema.singular %>_password() + }) + end + + def <%= schema.singular %>_fixture(attrs \\ %{}) do + {:ok, <%= schema.singular %>} = + attrs + |> valid_<%= schema.singular %>_attributes() + |> <%= inspect context.module %>.register_<%= schema.singular %>() + + <%= schema.singular %> + end + + def extract_<%= schema.singular %>_token(fun) do + {:ok, captured_email} = fun.(&"[TOKEN]#{&1}[TOKEN]") + [_, token | _] = String.split(captured_email.text_body, "[TOKEN]") + token + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/context_functions.ex b/deps/phoenix/priv/templates/phx.gen.auth/context_functions.ex new file mode 100644 index 0000000..d2e6a5e --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/context_functions.ex @@ -0,0 +1,344 @@ + alias <%= inspect context.module %>.{<%= inspect schema.alias %>, <%= inspect schema.alias %>Token, <%= inspect schema.alias %>Notifier} + + ## Database getters + + @doc """ + Gets a <%= schema.singular %> by email. + + ## Examples + + iex> get_<%= schema.singular %>_by_email("foo@example.com") + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>_by_email("unknown@example.com") + nil + + """ + def get_<%= schema.singular %>_by_email(email) when is_binary(email) do + Repo.get_by(<%= inspect schema.alias %>, email: email) + end + + @doc """ + Gets a <%= schema.singular %> by email and password. + + ## Examples + + iex> get_<%= schema.singular %>_by_email_and_password("foo@example.com", "correct_password") + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>_by_email_and_password("foo@example.com", "invalid_password") + nil + + """ + def get_<%= schema.singular %>_by_email_and_password(email, password) + when is_binary(email) and is_binary(password) do + <%= schema.singular %> = Repo.get_by(<%= inspect schema.alias %>, email: email) + if <%= inspect schema.alias %>.valid_password?(<%= schema.singular %>, password), do: <%= schema.singular %> + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises `Ecto.NoResultsError` if the <%= inspect schema.alias %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(123) + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>!(456) + ** (Ecto.NoResultsError) + + """ + def get_<%= schema.singular %>!(id), do: Repo.get!(<%= inspect schema.alias %>, id) + + ## <%= schema.human_singular %> registration + + @doc """ + Registers a <%= schema.singular %>. + + ## Examples + + iex> register_<%= schema.singular %>(%{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> register_<%= schema.singular %>(%{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def register_<%= schema.singular %>(attrs) do + %<%= inspect schema.alias %>{} + |> <%= inspect schema.alias %>.registration_changeset(attrs) + |> Repo.insert() + end + + @doc """ + Returns an `%Ecto.Changeset{}` for tracking <%= schema.singular %> changes. + + ## Examples + + iex> change_<%= schema.singular %>_registration(<%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>_registration(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs \\ %{}) do + <%= inspect schema.alias %>.registration_changeset(<%= schema.singular %>, attrs, hash_password: false) + end + + ## Settings + + @doc """ + Returns an `%Ecto.Changeset{}` for changing the <%= schema.singular %> email. + + ## Examples + + iex> change_<%= schema.singular %>_email(<%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>_email(<%= schema.singular %>, attrs \\ %{}) do + <%= inspect schema.alias %>.email_changeset(<%= schema.singular %>, attrs) + end + + @doc """ + Emulates that the email will change without actually changing + it in the database. + + ## Examples + + iex> apply_<%= schema.singular %>_email(<%= schema.singular %>, "valid password", %{email: ...}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> apply_<%= schema.singular %>_email(<%= schema.singular %>, "invalid password", %{email: ...}) + {:error, %Ecto.Changeset{}} + + """ + def apply_<%= schema.singular %>_email(<%= schema.singular %>, password, attrs) do + <%= schema.singular %> + |> <%= inspect schema.alias %>.email_changeset(attrs) + |> <%= inspect schema.alias %>.validate_current_password(password) + |> Ecto.Changeset.apply_action(:update) + end + + @doc """ + Updates the <%= schema.singular %> email using the given token. + + If the token matches, the <%= schema.singular %> email is updated and the token is deleted. + The confirmed_at date is also updated to the current time. + """ + def update_<%= schema.singular %>_email(<%= schema.singular %>, token) do + context = "change:#{<%= schema.singular %>.email}" + + with {:ok, query} <- <%= inspect schema.alias %>Token.verify_change_email_token_query(token, context), + %<%= inspect schema.alias %>Token{sent_to: email} <- Repo.one(query), + {:ok, _} <- Repo.transaction(<%= schema.singular %>_email_multi(<%= schema.singular %>, email, context)) do + :ok + else + _ -> :error + end + end + + defp <%= schema.singular %>_email_multi(<%= schema.singular %>, email, context) do + changeset = + <%= schema.singular %> + |> <%= inspect schema.alias %>.email_changeset(%{email: email}) + |> <%= inspect schema.alias %>.confirm_changeset() + + Ecto.Multi.new() + |> Ecto.Multi.update(:<%= schema.singular %>, changeset) + |> Ecto.Multi.delete_all(:tokens, <%= inspect schema.alias %>Token.<%= schema.singular %>_and_contexts_query(<%= schema.singular %>, [context])) + end + + @doc """ + Delivers the update email instructions to the given <%= schema.singular %>. + + ## Examples + + iex> deliver_update_email_instructions(<%= schema.singular %>, current_email, &Routes.<%= schema.singular %>_update_email_url(conn, :edit, &1)) + {:ok, %{to: ..., body: ...}} + + """ + def deliver_update_email_instructions(%<%= inspect schema.alias %>{} = <%= schema.singular %>, current_email, update_email_url_fun) + when is_function(update_email_url_fun, 1) do + {encoded_token, <%= schema.singular %>_token} = <%= inspect schema.alias %>Token.build_email_token(<%= schema.singular %>, "change:#{current_email}") + + Repo.insert!(<%= schema.singular %>_token) + <%= inspect schema.alias %>Notifier.deliver_update_email_instructions(<%= schema.singular %>, update_email_url_fun.(encoded_token)) + end + + @doc """ + Returns an `%Ecto.Changeset{}` for changing the <%= schema.singular %> password. + + ## Examples + + iex> change_<%= schema.singular %>_password(<%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>_password(<%= schema.singular %>, attrs \\ %{}) do + <%= inspect schema.alias %>.password_changeset(<%= schema.singular %>, attrs, hash_password: false) + end + + @doc """ + Updates the <%= schema.singular %> password. + + ## Examples + + iex> update_<%= schema.singular %>_password(<%= schema.singular %>, "valid password", %{password: ...}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> update_<%= schema.singular %>_password(<%= schema.singular %>, "invalid password", %{password: ...}) + {:error, %Ecto.Changeset{}} + + """ + def update_<%= schema.singular %>_password(<%= schema.singular %>, password, attrs) do + changeset = + <%= schema.singular %> + |> <%= inspect schema.alias %>.password_changeset(attrs) + |> <%= inspect schema.alias %>.validate_current_password(password) + + Ecto.Multi.new() + |> Ecto.Multi.update(:<%= schema.singular %>, changeset) + |> Ecto.Multi.delete_all(:tokens, <%= inspect schema.alias %>Token.<%= schema.singular %>_and_contexts_query(<%= schema.singular %>, :all)) + |> Repo.transaction() + |> case do + {:ok, %{<%= schema.singular %>: <%= schema.singular %>}} -> {:ok, <%= schema.singular %>} + {:error, :<%= schema.singular %>, changeset, _} -> {:error, changeset} + end + end + + ## Session + + @doc """ + Generates a session token. + """ + def generate_<%= schema.singular %>_session_token(<%= schema.singular %>) do + {token, <%= schema.singular %>_token} = <%= inspect schema.alias %>Token.build_session_token(<%= schema.singular %>) + Repo.insert!(<%= schema.singular %>_token) + token + end + + @doc """ + Gets the <%= schema.singular %> with the given signed token. + """ + def get_<%= schema.singular %>_by_session_token(token) do + {:ok, query} = <%= inspect schema.alias %>Token.verify_session_token_query(token) + Repo.one(query) + end + + @doc """ + Deletes the signed token with the given context. + """ + def delete_session_token(token) do + Repo.delete_all(<%= inspect schema.alias %>Token.token_and_context_query(token, "session")) + :ok + end + + ## Confirmation + + @doc """ + Delivers the confirmation email instructions to the given <%= schema.singular %>. + + ## Examples + + iex> deliver_<%= schema.singular %>_confirmation_instructions(<%= schema.singular %>, &Routes.<%= schema.singular %>_confirmation_url(conn, :edit, &1)) + {:ok, %{to: ..., body: ...}} + + iex> deliver_<%= schema.singular %>_confirmation_instructions(confirmed_<%= schema.singular %>, &Routes.<%= schema.singular %>_confirmation_url(conn, :edit, &1)) + {:error, :already_confirmed} + + """ + def deliver_<%= schema.singular %>_confirmation_instructions(%<%= inspect schema.alias %>{} = <%= schema.singular %>, confirmation_url_fun) + when is_function(confirmation_url_fun, 1) do + if <%= schema.singular %>.confirmed_at do + {:error, :already_confirmed} + else + {encoded_token, <%= schema.singular %>_token} = <%= inspect schema.alias %>Token.build_email_token(<%= schema.singular %>, "confirm") + Repo.insert!(<%= schema.singular %>_token) + <%= inspect schema.alias %>Notifier.deliver_confirmation_instructions(<%= schema.singular %>, confirmation_url_fun.(encoded_token)) + end + end + + @doc """ + Confirms a <%= schema.singular %> by the given token. + + If the token matches, the <%= schema.singular %> account is marked as confirmed + and the token is deleted. + """ + def confirm_<%= schema.singular %>(token) do + with {:ok, query} <- <%= inspect schema.alias %>Token.verify_email_token_query(token, "confirm"), + %<%= inspect schema.alias %>{} = <%= schema.singular %> <- Repo.one(query), + {:ok, %{<%= schema.singular %>: <%= schema.singular %>}} <- Repo.transaction(confirm_<%= schema.singular %>_multi(<%= schema.singular %>)) do + {:ok, <%= schema.singular %>} + else + _ -> :error + end + end + + defp confirm_<%= schema.singular %>_multi(<%= schema.singular %>) do + Ecto.Multi.new() + |> Ecto.Multi.update(:<%= schema.singular %>, <%= inspect schema.alias %>.confirm_changeset(<%= schema.singular %>)) + |> Ecto.Multi.delete_all(:tokens, <%= inspect schema.alias %>Token.<%= schema.singular %>_and_contexts_query(<%= schema.singular %>, ["confirm"])) + end + + ## Reset password + + @doc """ + Delivers the reset password email to the given <%= schema.singular %>. + + ## Examples + + iex> deliver_<%= schema.singular %>_reset_password_instructions(<%= schema.singular %>, &Routes.<%= schema.singular %>_reset_password_url(conn, :edit, &1)) + {:ok, %{to: ..., body: ...}} + + """ + def deliver_<%= schema.singular %>_reset_password_instructions(%<%= inspect schema.alias %>{} = <%= schema.singular %>, reset_password_url_fun) + when is_function(reset_password_url_fun, 1) do + {encoded_token, <%= schema.singular %>_token} = <%= inspect schema.alias %>Token.build_email_token(<%= schema.singular %>, "reset_password") + Repo.insert!(<%= schema.singular %>_token) + <%= inspect schema.alias %>Notifier.deliver_reset_password_instructions(<%= schema.singular %>, reset_password_url_fun.(encoded_token)) + end + + @doc """ + Gets the <%= schema.singular %> by reset password token. + + ## Examples + + iex> get_<%= schema.singular %>_by_reset_password_token("validtoken") + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>_by_reset_password_token("invalidtoken") + nil + + """ + def get_<%= schema.singular %>_by_reset_password_token(token) do + with {:ok, query} <- <%= inspect schema.alias %>Token.verify_email_token_query(token, "reset_password"), + %<%= inspect schema.alias %>{} = <%= schema.singular %> <- Repo.one(query) do + <%= schema.singular %> + else + _ -> nil + end + end + + @doc """ + Resets the <%= schema.singular %> password. + + ## Examples + + iex> reset_<%= schema.singular %>_password(<%= schema.singular %>, %{password: "new long password", password_confirmation: "new long password"}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> reset_<%= schema.singular %>_password(<%= schema.singular %>, %{password: "valid", password_confirmation: "not the same"}) + {:error, %Ecto.Changeset{}} + + """ + def reset_<%= schema.singular %>_password(<%= schema.singular %>, attrs) do + Ecto.Multi.new() + |> Ecto.Multi.update(:<%= schema.singular %>, <%= inspect schema.alias %>.password_changeset(<%= schema.singular %>, attrs)) + |> Ecto.Multi.delete_all(:tokens, <%= inspect schema.alias %>Token.<%= schema.singular %>_and_contexts_query(<%= schema.singular %>, :all)) + |> Repo.transaction() + |> case do + {:ok, %{<%= schema.singular %>: <%= schema.singular %>}} -> {:ok, <%= schema.singular %>} + {:error, :<%= schema.singular %>, changeset, _} -> {:error, changeset} + end + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/migration.ex b/deps/phoenix/priv/templates/phx.gen.auth/migration.ex new file mode 100644 index 0000000..e8cdd47 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/migration.ex @@ -0,0 +1,29 @@ +defmodule <%= inspect schema.repo %>.Migrations.Create<%= Macro.camelize(schema.table) %>AuthTables do + use Ecto.Migration + + def change do<%= if Enum.any?(migration.extensions) do %><%= for extension <- migration.extensions do %> + <%= extension %><% end %> +<% end %> + create table(:<%= schema.table %><%= if schema.binary_id do %>, primary_key: false<% end %>) do +<%= if schema.binary_id do %> add :id, :binary_id, primary_key: true +<% end %> <%= migration.column_definitions[:email] %> + add :hashed_password, :string, null: false + add :confirmed_at, :naive_datetime + timestamps() + end + + create unique_index(:<%= schema.table %>, [:email]) + + create table(:<%= schema.table %>_tokens<%= if schema.binary_id do %>, primary_key: false<% end %>) do +<%= if schema.binary_id do %> add :id, :binary_id, primary_key: true +<% end %> add :<%= schema.singular %>_id, references(:<%= schema.table %>, <%= if schema.binary_id do %>type: :binary_id, <% end %>on_delete: :delete_all), null: false + <%= migration.column_definitions[:token] %> + add :context, :string, null: false + add :sent_to, :string + timestamps(updated_at: false) + end + + create index(:<%= schema.table %>_tokens, [:<%= schema.singular %>_id]) + create unique_index(:<%= schema.table %>_tokens, [:context, :token]) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/notifier.ex b/deps/phoenix/priv/templates/phx.gen.auth/notifier.ex new file mode 100644 index 0000000..a8a0a58 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/notifier.ex @@ -0,0 +1,79 @@ +defmodule <%= inspect context.module %>.<%= inspect schema.alias %>Notifier do + import Swoosh.Email + + alias <%= inspect context.base_module %>.Mailer + + # Delivers the email using the application mailer. + defp deliver(recipient, subject, body) do + email = + new() + |> to(recipient) + |> from({"<%= inspect context.base_module %>", "contact@example.com"}) + |> subject(subject) + |> text_body(body) + + with {:ok, _metadata} <- Mailer.deliver(email) do + {:ok, email} + end + end + + @doc """ + Deliver instructions to confirm account. + """ + def deliver_confirmation_instructions(<%= schema.singular %>, url) do + deliver(<%= schema.singular %>.email, "Confirmation instructions", """ + + ============================== + + Hi #{<%= schema.singular %>.email}, + + You can confirm your account by visiting the URL below: + + #{url} + + If you didn't create an account with us, please ignore this. + + ============================== + """) + end + + @doc """ + Deliver instructions to reset a <%= schema.singular %> password. + """ + def deliver_reset_password_instructions(<%= schema.singular %>, url) do + deliver(<%= schema.singular %>.email, "Reset password instructions", """ + + ============================== + + Hi #{<%= schema.singular %>.email}, + + You can reset your password by visiting the URL below: + + #{url} + + If you didn't request this change, please ignore this. + + ============================== + """) + end + + @doc """ + Deliver instructions to update a <%= schema.singular %> email. + """ + def deliver_update_email_instructions(<%= schema.singular %>, url) do + deliver(<%= schema.singular %>.email, "Update email instructions", """ + + ============================== + + Hi #{<%= schema.singular %>.email}, + + You can change your email by visiting the URL below: + + #{url} + + If you didn't request this change, please ignore this. + + ============================== + """) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_controller.ex b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller.ex new file mode 100644 index 0000000..1ff6713 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller.ex @@ -0,0 +1,30 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>RegistrationController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + alias <%= inspect auth_module %> + + def new(conn, _params) do + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_registration(%<%= inspect schema.alias %>{}) + render(conn, "new.html", changeset: changeset) + end + + def create(conn, %{"<%= schema.singular %>" => <%= schema.singular %>_params}) do + case <%= inspect context.alias %>.register_<%= schema.singular %>(<%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + {:ok, _} = + <%= inspect context.alias %>.deliver_<%= schema.singular %>_confirmation_instructions( + <%= schema.singular %>, + &Routes.<%= schema.route_helper %>_confirmation_url(conn, :edit, &1) + ) + + conn + |> put_flash(:info, "<%= schema.human_singular %> created successfully.") + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + + {:error, %Ecto.Changeset{} = changeset} -> + render(conn, "new.html", changeset: changeset) + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_controller_test.exs b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller_test.exs new file mode 100644 index 0000000..085b215 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_controller_test.exs @@ -0,0 +1,54 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>RegistrationControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + import <%= inspect context.module %>Fixtures + + describe "GET <%= web_path_prefix %>/<%= schema.plural %>/register" do + test "renders registration page", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_registration_path(conn, :new)) + response = html_response(conn, 200) + assert response =~ "

Register

" + assert response =~ "Log in" + assert response =~ "Register" + end + + test "redirects if already logged in", %{conn: conn} do + conn = conn |> log_in_<%= schema.singular %>(<%= schema.singular %>_fixture()) |> get(Routes.<%= schema.route_helper %>_registration_path(conn, :new)) + assert redirected_to(conn) == "/" + end + end + + describe "POST <%= web_path_prefix %>/<%= schema.plural %>/register" do + @tag :capture_log + test "creates account and logs the <%= schema.singular %> in", %{conn: conn} do + email = unique_<%= schema.singular %>_email() + + conn = + post(conn, Routes.<%= schema.route_helper %>_registration_path(conn, :create), %{ + "<%= schema.singular %>" => valid_<%= schema.singular %>_attributes(email: email) + }) + + assert get_session(conn, :<%= schema.singular %>_token) + assert redirected_to(conn) == "/" + + # Now do a logged in request and assert on the menu + conn = get(conn, "/") + response = html_response(conn, 200) + assert response =~ email + assert response =~ "Settings" + assert response =~ "Log out" + end + + test "render errors for invalid data", %{conn: conn} do + conn = + post(conn, Routes.<%= schema.route_helper %>_registration_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => "with spaces", "password" => "too short"} + }) + + response = html_response(conn, 200) + assert response =~ "

Register

" + assert response =~ "must have the @ sign and no spaces" + assert response =~ "should be at least 12 character" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_new.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/registration_new.html.heex new file mode 100644 index 0000000..46d2b06 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_new.html.heex @@ -0,0 +1,26 @@ +

Register

+ +<.form let={f} for={@changeset} action={Routes.<%= schema.route_helper %>_registration_path(@conn, :create)}> + <%%= if @changeset.action do %> +
+

Oops, something went wrong! Please check the errors below.

+
+ <%% end %> + + <%%= label f, :email %> + <%%= email_input f, :email, required: true %> + <%%= error_tag f, :email %> + + <%%= label f, :password %> + <%%= password_input f, :password, required: true %> + <%%= error_tag f, :password %> + +
+ <%%= submit "Register" %> +
+ + +

+ <%%= link "Log in", to: Routes.<%= schema.route_helper %>_session_path(@conn, :new) %> | + <%%= link "Forgot your password?", to: Routes.<%= schema.route_helper %>_reset_password_path(@conn, :new) %> +

diff --git a/deps/phoenix/priv/templates/phx.gen.auth/registration_view.ex b/deps/phoenix/priv/templates/phx.gen.auth/registration_view.ex new file mode 100644 index 0000000..c526210 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/registration_view.ex @@ -0,0 +1,3 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>RegistrationView do + use <%= inspect context.web_module %>, :view +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller.ex b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller.ex new file mode 100644 index 0000000..13860c9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller.ex @@ -0,0 +1,58 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ResetPasswordController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + + plug :get_<%= schema.singular %>_by_reset_password_token when action in [:edit, :update] + + def new(conn, _params) do + render(conn, "new.html") + end + + def create(conn, %{"<%= schema.singular %>" => %{"email" => email}}) do + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(email) do + <%= inspect context.alias %>.deliver_<%= schema.singular %>_reset_password_instructions( + <%= schema.singular %>, + &Routes.<%= schema.route_helper %>_reset_password_url(conn, :edit, &1) + ) + end + + conn + |> put_flash( + :info, + "If your email is in our system, you will receive instructions to reset your password shortly." + ) + |> redirect(to: "/") + end + + def edit(conn, _params) do + render(conn, "edit.html", changeset: <%= inspect context.alias %>.change_<%= schema.singular %>_password(conn.assigns.<%= schema.singular %>)) + end + + # Do not log in the <%= schema.singular %> after reset password to avoid a + # leaked token giving the <%= schema.singular %> access to the account. + def update(conn, %{"<%= schema.singular %>" => <%= schema.singular %>_params}) do + case <%= inspect context.alias %>.reset_<%= schema.singular %>_password(conn.assigns.<%= schema.singular %>, <%= schema.singular %>_params) do + {:ok, _} -> + conn + |> put_flash(:info, "Password reset successfully.") + |> redirect(to: Routes.<%= schema.route_helper %>_session_path(conn, :new)) + + {:error, changeset} -> + render(conn, "edit.html", changeset: changeset) + end + end + + defp get_<%= schema.singular %>_by_reset_password_token(conn, _opts) do + %{"token" => token} = conn.params + + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_reset_password_token(token) do + conn |> assign(:<%= schema.singular %>, <%= schema.singular %>) |> assign(:token, token) + else + conn + |> put_flash(:error, "Reset password link is invalid or it has expired.") + |> redirect(to: "/") + |> halt() + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller_test.exs b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller_test.exs new file mode 100644 index 0000000..e1f4a18 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_controller_test.exs @@ -0,0 +1,113 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ResetPasswordControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + alias <%= inspect context.module %> + alias <%= inspect schema.repo %> + import <%= inspect context.module %>Fixtures + + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + describe "GET <%= web_path_prefix %>/<%= schema.plural %>/reset_password" do + test "renders the reset password page", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :new)) + response = html_response(conn, 200) + assert response =~ "

Forgot your password?

" + end + end + + describe "POST <%= web_path_prefix %>/<%= schema.plural %>/reset_password" do + @tag :capture_log + test "sends a new reset password token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + post(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email} + }) + + assert redirected_to(conn) == "/" + assert get_flash(conn, :info) =~ "If your email is in our system" + assert Repo.get_by!(<%= inspect context.alias %>.<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id).context == "reset_password" + end + + test "does not send reset password token if email is invalid", %{conn: conn} do + conn = + post(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => "unknown@example.com"} + }) + + assert redirected_to(conn) == "/" + assert get_flash(conn, :info) =~ "If your email is in our system" + assert Repo.all(<%= inspect context.alias %>.<%= inspect schema.alias %>Token) == [] + end + end + + describe "GET <%= web_path_prefix %>/<%= schema.plural %>/reset_password/:token" do + setup %{<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_reset_password_instructions(<%= schema.singular %>, url) + end) + + %{token: token} + end + + test "renders reset password", %{conn: conn, token: token} do + conn = get(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :edit, token)) + assert html_response(conn, 200) =~ "

Reset password

" + end + + test "does not render reset password with invalid token", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :edit, "oops")) + assert redirected_to(conn) == "/" + assert get_flash(conn, :error) =~ "Reset password link is invalid or it has expired" + end + end + + describe "PUT <%= web_path_prefix %>/<%= schema.plural %>/reset_password/:token" do + setup %{<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_reset_password_instructions(<%= schema.singular %>, url) + end) + + %{token: token} + end + + test "resets password once", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>, token: token} do + conn = + put(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :update, token), %{ + "<%= schema.singular %>" => %{ + "password" => "new valid password", + "password_confirmation" => "new valid password" + } + }) + + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_session_path(conn, :new) + refute get_session(conn, :<%= schema.singular %>_token) + assert get_flash(conn, :info) =~ "Password reset successfully" + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "new valid password") + end + + test "does not reset password on invalid data", %{conn: conn, token: token} do + conn = + put(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :update, token), %{ + "<%= schema.singular %>" => %{ + "password" => "too short", + "password_confirmation" => "does not match" + } + }) + + response = html_response(conn, 200) + assert response =~ "

Reset password

" + assert response =~ "should be at least 12 character(s)" + assert response =~ "does not match password" + end + + test "does not reset password with invalid token", %{conn: conn} do + conn = put(conn, Routes.<%= schema.route_helper %>_reset_password_path(conn, :update, "oops")) + assert redirected_to(conn) == "/" + assert get_flash(conn, :error) =~ "Reset password link is invalid or it has expired" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/reset_password_edit.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_edit.html.heex new file mode 100644 index 0000000..46ba9bd --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_edit.html.heex @@ -0,0 +1,26 @@ +

Reset password

+ +<.form let={f} for={@changeset} action={Routes.<%= schema.route_helper %>_reset_password_path(@conn, :update, @token)}> + <%%= if @changeset.action do %> +
+

Oops, something went wrong! Please check the errors below.

+
+ <%% end %> + + <%%= label f, :password, "New password" %> + <%%= password_input f, :password, required: true %> + <%%= error_tag f, :password %> + + <%%= label f, :password_confirmation, "Confirm new password" %> + <%%= password_input f, :password_confirmation, required: true %> + <%%= error_tag f, :password_confirmation %> + +
+ <%%= submit "Reset password" %> +
+ + +

+ <%%= link "Register", to: Routes.<%= schema.route_helper %>_registration_path(@conn, :new) %> | + <%%= link "Log in", to: Routes.<%= schema.route_helper %>_session_path(@conn, :new) %> +

diff --git a/deps/phoenix/priv/templates/phx.gen.auth/reset_password_new.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_new.html.heex new file mode 100644 index 0000000..32ca3b5 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_new.html.heex @@ -0,0 +1,15 @@ +

Forgot your password?

+ +<.form let={f} for={:<%= schema.singular %>} action={Routes.<%= schema.route_helper %>_reset_password_path(@conn, :create)}> + <%%= label f, :email %> + <%%= email_input f, :email, required: true %> + +
+ <%%= submit "Send instructions to reset password" %> +
+ + +

+ <%%= link "Register", to: Routes.<%= schema.route_helper %>_registration_path(@conn, :new) %> | + <%%= link "Log in", to: Routes.<%= schema.route_helper %>_session_path(@conn, :new) %> +

diff --git a/deps/phoenix/priv/templates/phx.gen.auth/reset_password_view.ex b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_view.ex new file mode 100644 index 0000000..9dee8ec --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/reset_password_view.ex @@ -0,0 +1,3 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ResetPasswordView do + use <%= inspect context.web_module %>, :view +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/routes.ex b/deps/phoenix/priv/templates/phx.gen.auth/routes.ex new file mode 100644 index 0000000..45bfda7 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/routes.ex @@ -0,0 +1,33 @@ + + ## Authentication routes + + scope <%= router_scope %> do + pipe_through [:browser, :redirect_if_<%= schema.singular %>_is_authenticated] + + get "/<%= schema.plural %>/register", <%= inspect schema.alias %>RegistrationController, :new + post "/<%= schema.plural %>/register", <%= inspect schema.alias %>RegistrationController, :create + get "/<%= schema.plural %>/log_in", <%= inspect schema.alias %>SessionController, :new + post "/<%= schema.plural %>/log_in", <%= inspect schema.alias %>SessionController, :create + get "/<%= schema.plural %>/reset_password", <%= inspect schema.alias %>ResetPasswordController, :new + post "/<%= schema.plural %>/reset_password", <%= inspect schema.alias %>ResetPasswordController, :create + get "/<%= schema.plural %>/reset_password/:token", <%= inspect schema.alias %>ResetPasswordController, :edit + put "/<%= schema.plural %>/reset_password/:token", <%= inspect schema.alias %>ResetPasswordController, :update + end + + scope <%= router_scope %> do + pipe_through [:browser, :require_authenticated_<%= schema.singular %>] + + get "/<%= schema.plural %>/settings", <%= inspect schema.alias %>SettingsController, :edit + put "/<%= schema.plural %>/settings", <%= inspect schema.alias %>SettingsController, :update + get "/<%= schema.plural %>/settings/confirm_email/:token", <%= inspect schema.alias %>SettingsController, :confirm_email + end + + scope <%= router_scope %> do + pipe_through [:browser] + + delete "/<%= schema.plural %>/log_out", <%= inspect schema.alias %>SessionController, :delete + get "/<%= schema.plural %>/confirm", <%= inspect schema.alias %>ConfirmationController, :new + post "/<%= schema.plural %>/confirm", <%= inspect schema.alias %>ConfirmationController, :create + get "/<%= schema.plural %>/confirm/:token", <%= inspect schema.alias %>ConfirmationController, :edit + post "/<%= schema.plural %>/confirm/:token", <%= inspect schema.alias %>ConfirmationController, :update + end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/schema.ex b/deps/phoenix/priv/templates/phx.gen.auth/schema.ex new file mode 100644 index 0000000..48ae868 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/schema.ex @@ -0,0 +1,141 @@ +defmodule <%= inspect schema.module %> do + use Ecto.Schema + import Ecto.Changeset +<%= if schema.binary_id do %> @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id<% end %> + schema <%= inspect schema.table %> do + field :email, :string + field :password, :string, virtual: true, redact: true + field :hashed_password, :string, redact: true + field :confirmed_at, :naive_datetime + + timestamps() + end + + @doc """ + A <%= schema.singular %> changeset for registration. + + It is important to validate the length of both email and password. + Otherwise databases may truncate the email without warnings, which + could lead to unpredictable or insecure behaviour. Long passwords may + also be very expensive to hash for certain algorithms. + + ## Options + + * `:hash_password` - Hashes the password so it can be stored securely + in the database and ensures the password field is cleared to prevent + leaks in the logs. If password hashing is not needed and clearing the + password field is not desired (like when using this changeset for + validations on a LiveView form), this option can be set to `false`. + Defaults to `true`. + """ + def registration_changeset(<%= schema.singular %>, attrs, opts \\ []) do + <%= schema.singular %> + |> cast(attrs, [:email, :password]) + |> validate_email() + |> validate_password(opts) + end + + defp validate_email(changeset) do + changeset + |> validate_required([:email]) + |> validate_format(:email, ~r/^[^\s]+@[^\s]+$/, message: "must have the @ sign and no spaces") + |> validate_length(:email, max: 160) + |> unsafe_validate_unique(:email, <%= inspect schema.repo %>) + |> unique_constraint(:email) + end + + defp validate_password(changeset, opts) do + changeset + |> validate_required([:password]) + |> validate_length(:password, min: 12, max: 72) + # |> validate_format(:password, ~r/[a-z]/, message: "at least one lower case character") + # |> validate_format(:password, ~r/[A-Z]/, message: "at least one upper case character") + # |> validate_format(:password, ~r/[!?@#$%^&*_0-9]/, message: "at least one digit or punctuation character") + |> maybe_hash_password(opts) + end + + defp maybe_hash_password(changeset, opts) do + hash_password? = Keyword.get(opts, :hash_password, true) + password = get_change(changeset, :password) + + if hash_password? && password && changeset.valid? do + changeset<%= if hashing_library.name == :bcrypt do %> + # If using Bcrypt, then further validate it is at most 72 bytes long + |> validate_length(:password, max: 72, count: :bytes)<% end %> + |> put_change(:hashed_password, <%= inspect hashing_library.module %>.hash_pwd_salt(password)) + |> delete_change(:password) + else + changeset + end + end + + @doc """ + A <%= schema.singular %> changeset for changing the email. + + It requires the email to change otherwise an error is added. + """ + def email_changeset(<%= schema.singular %>, attrs) do + <%= schema.singular %> + |> cast(attrs, [:email]) + |> validate_email() + |> case do + %{changes: %{email: _}} = changeset -> changeset + %{} = changeset -> add_error(changeset, :email, "did not change") + end + end + + @doc """ + A <%= schema.singular %> changeset for changing the password. + + ## Options + + * `:hash_password` - Hashes the password so it can be stored securely + in the database and ensures the password field is cleared to prevent + leaks in the logs. If password hashing is not needed and clearing the + password field is not desired (like when using this changeset for + validations on a LiveView form), this option can be set to `false`. + Defaults to `true`. + """ + def password_changeset(<%= schema.singular %>, attrs, opts \\ []) do + <%= schema.singular %> + |> cast(attrs, [:password]) + |> validate_confirmation(:password, message: "does not match password") + |> validate_password(opts) + end + + @doc """ + Confirms the account by setting `confirmed_at`. + """ + def confirm_changeset(<%= schema.singular %>) do + now = NaiveDateTime.utc_now() |> NaiveDateTime.truncate(:second) + change(<%= schema.singular %>, confirmed_at: now) + end + + @doc """ + Verifies the password. + + If there is no <%= schema.singular %> or the <%= schema.singular %> doesn't have a password, we call + `<%= inspect hashing_library.module %>.no_user_verify/0` to avoid timing attacks. + """ + def valid_password?(%<%= inspect schema.module %>{hashed_password: hashed_password}, password) + when is_binary(hashed_password) and byte_size(password) > 0 do + <%= inspect hashing_library.module %>.verify_pass(password, hashed_password) + end + + def valid_password?(_, _) do + <%= inspect hashing_library.module %>.no_user_verify() + false + end + + @doc """ + Validates the current password otherwise adds an error to the changeset. + """ + def validate_current_password(changeset, password) do + if valid_password?(changeset.data, password) do + changeset + else + add_error(changeset, :current_password, "is not valid") + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/schema_token.ex b/deps/phoenix/priv/templates/phx.gen.auth/schema_token.ex new file mode 100644 index 0000000..71bdc95 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/schema_token.ex @@ -0,0 +1,181 @@ +defmodule <%= inspect schema.module %>Token do + use Ecto.Schema + import Ecto.Query + alias <%= inspect schema.module %>Token + + @hash_algorithm :sha256 + @rand_size 32 + + # It is very important to keep the reset password token expiry short, + # since someone with access to the email may take over the account. + @reset_password_validity_in_days 1 + @confirm_validity_in_days 7 + @change_email_validity_in_days 7 + @session_validity_in_days 60 +<%= if schema.binary_id do %> + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id<% end %> + schema "<%= schema.table %>_tokens" do + field :token, :binary + field :context, :string + field :sent_to, :string + belongs_to :<%= schema.singular %>, <%= inspect schema.module %> + + timestamps(updated_at: false) + end + + @doc """ + Generates a token that will be stored in a signed place, + such as session or cookie. As they are signed, those + tokens do not need to be hashed. + + The reason why we store session tokens in the database, even + though Phoenix already provides a session cookie, is because + Phoenix' default session cookies are not persisted, they are + simply signed and potentially encrypted. This means they are + valid indefinitely, unless you change the signing/encryption + salt. + + Therefore, storing them allows individual <%= schema.singular %> + sessions to be expired. The token system can also be extended + to store additional data, such as the device used for logging in. + You could then use this information to display all valid sessions + and devices in the UI and allow users to explicitly expire any + session they deem invalid. + """ + def build_session_token(<%= schema.singular %>) do + token = :crypto.strong_rand_bytes(@rand_size) + {token, %<%= inspect schema.alias %>Token{token: token, context: "session", <%= schema.singular %>_id: <%= schema.singular %>.id}} + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + The query returns the <%= schema.singular %> found by the token, if any. + + The token is valid if it matches the value in the database and it has + not expired (after @session_validity_in_days). + """ + def verify_session_token_query(token) do + query = + from token in token_and_context_query(token, "session"), + join: <%= schema.singular %> in assoc(token, :<%= schema.singular %>), + where: token.inserted_at > ago(@session_validity_in_days, "day"), + select: <%= schema.singular %> + + {:ok, query} + end + + @doc """ + Builds a token and its hash to be delivered to the <%= schema.singular %>'s email. + + The non-hashed token is sent to the <%= schema.singular %> email while the + hashed part is stored in the database. The original token cannot be reconstructed, + which means anyone with read-only access to the database cannot directly use + the token in the application to gain access. Furthermore, if the user changes + their email in the system, the tokens sent to the previous email are no longer + valid. + + Users can easily adapt the existing code to provide other types of delivery methods, + for example, by phone numbers. + """ + def build_email_token(<%= schema.singular %>, context) do + build_hashed_token(<%= schema.singular %>, context, <%= schema.singular %>.email) + end + + defp build_hashed_token(<%= schema.singular %>, context, sent_to) do + token = :crypto.strong_rand_bytes(@rand_size) + hashed_token = :crypto.hash(@hash_algorithm, token) + + {Base.url_encode64(token, padding: false), + %<%= inspect schema.alias %>Token{ + token: hashed_token, + context: context, + sent_to: sent_to, + <%= schema.singular %>_id: <%= schema.singular %>.id + }} + end + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + The query returns the <%= schema.singular %> found by the token, if any. + + The given token is valid if it matches its hashed counterpart in the + database and the user email has not changed. This function also checks + if the token is being used within a certain period, depending on the + context. The default contexts supported by this function are either + "confirm", for account confirmation emails, and "reset_password", + for resetting the password. For verifying requests to change the email, + see `verify_change_email_token_query/2`. + """ + def verify_email_token_query(token, context) do + case Base.url_decode64(token, padding: false) do + {:ok, decoded_token} -> + hashed_token = :crypto.hash(@hash_algorithm, decoded_token) + days = days_for_context(context) + + query = + from token in token_and_context_query(hashed_token, context), + join: <%= schema.singular %> in assoc(token, :<%= schema.singular %>), + where: token.inserted_at > ago(^days, "day") and token.sent_to == <%= schema.singular %>.email, + select: <%= schema.singular %> + + {:ok, query} + + :error -> + :error + end + end + + defp days_for_context("confirm"), do: @confirm_validity_in_days + defp days_for_context("reset_password"), do: @reset_password_validity_in_days + + @doc """ + Checks if the token is valid and returns its underlying lookup query. + + The query returns the <%= schema.singular %> found by the token, if any. + + This is used to validate requests to change the <%= schema.singular %> + email. It is different from `verify_email_token_query/2` precisely because + `verify_email_token_query/2` validates the email has not changed, which is + the starting point by this function. + + The given token is valid if it matches its hashed counterpart in the + database and if it has not expired (after @change_email_validity_in_days). + The context must always start with "change:". + """ + def verify_change_email_token_query(token, "change:" <> _ = context) do + case Base.url_decode64(token, padding: false) do + {:ok, decoded_token} -> + hashed_token = :crypto.hash(@hash_algorithm, decoded_token) + + query = + from token in token_and_context_query(hashed_token, context), + where: token.inserted_at > ago(@change_email_validity_in_days, "day") + + {:ok, query} + + :error -> + :error + end + end + + @doc """ + Returns the token struct for the given token value and context. + """ + def token_and_context_query(token, context) do + from <%= inspect schema.alias %>Token, where: [token: ^token, context: ^context] + end + + @doc """ + Gets all tokens for the given <%= schema.singular %> for the given contexts. + """ + def <%= schema.singular %>_and_contexts_query(<%= schema.singular %>, :all) do + from t in <%= inspect schema.alias %>Token, where: t.<%= schema.singular %>_id == ^<%= schema.singular %>.id + end + + def <%= schema.singular %>_and_contexts_query(<%= schema.singular %>, [_ | _] = contexts) do + from t in <%= inspect schema.alias %>Token, where: t.<%= schema.singular %>_id == ^<%= schema.singular %>.id and t.context in ^contexts + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_controller.ex b/deps/phoenix/priv/templates/phx.gen.auth/session_controller.ex new file mode 100644 index 0000000..0926433 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_controller.ex @@ -0,0 +1,27 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SessionController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect auth_module %> + + def new(conn, _params) do + render(conn, "new.html", error_message: nil) + end + + def create(conn, %{"<%= schema.singular %>" => <%= schema.singular %>_params}) do + %{"email" => email, "password" => password} = <%= schema.singular %>_params + + if <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(email, password) do + <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(conn, <%= schema.singular %>, <%= schema.singular %>_params) + else + # In order to prevent user enumeration attacks, don't disclose whether the email is registered. + render(conn, "new.html", error_message: "Invalid email or password") + end + end + + def delete(conn, _params) do + conn + |> put_flash(:info, "Logged out successfully.") + |> <%= inspect schema.alias %>Auth.log_out_<%= schema.singular %>() + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_controller_test.exs b/deps/phoenix/priv/templates/phx.gen.auth/session_controller_test.exs new file mode 100644 index 0000000..c5a883b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_controller_test.exs @@ -0,0 +1,98 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SessionControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + import <%= inspect context.module %>Fixtures + + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + describe "GET <%= web_path_prefix %>/<%= schema.plural %>/log_in" do + test "renders log in page", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_session_path(conn, :new)) + response = html_response(conn, 200) + assert response =~ "

Log in

" + assert response =~ "Register" + assert response =~ "Forgot your password?" + end + + test "redirects if already logged in", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> log_in_<%= schema.singular %>(<%= schema.singular %>) |> get(Routes.<%= schema.route_helper %>_session_path(conn, :new)) + assert redirected_to(conn) == "/" + end + end + + describe "POST <%= web_path_prefix %>/<%= schema.plural %>/log_in" do + test "logs the <%= schema.singular %> in", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + post(conn, Routes.<%= schema.route_helper %>_session_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email, "password" => valid_<%= schema.singular %>_password()} + }) + + assert get_session(conn, :<%= schema.singular %>_token) + assert redirected_to(conn) == "/" + + # Now do a logged in request and assert on the menu + conn = get(conn, "/") + response = html_response(conn, 200) + assert response =~ <%= schema.singular %>.email + assert response =~ "Settings" + assert response =~ "Log out" + end + + test "logs the <%= schema.singular %> in with remember me", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + post(conn, Routes.<%= schema.route_helper %>_session_path(conn, :create), %{ + "<%= schema.singular %>" => %{ + "email" => <%= schema.singular %>.email, + "password" => valid_<%= schema.singular %>_password(), + "remember_me" => "true" + } + }) + + assert conn.resp_cookies["_<%= web_app_name %>_<%= schema.singular %>_remember_me"] + assert redirected_to(conn) == "/" + end + + test "logs the <%= schema.singular %> in with return to", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + conn + |> init_test_session(<%= schema.singular %>_return_to: "/foo/bar") + |> post(Routes.<%= schema.route_helper %>_session_path(conn, :create), %{ + "<%= schema.singular %>" => %{ + "email" => <%= schema.singular %>.email, + "password" => valid_<%= schema.singular %>_password() + } + }) + + assert redirected_to(conn) == "/foo/bar" + end + + test "emits error message with invalid credentials", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + post(conn, Routes.<%= schema.route_helper %>_session_path(conn, :create), %{ + "<%= schema.singular %>" => %{"email" => <%= schema.singular %>.email, "password" => "invalid_password"} + }) + + response = html_response(conn, 200) + assert response =~ "

Log in

" + assert response =~ "Invalid email or password" + end + end + + describe "DELETE <%= web_path_prefix %>/<%= schema.plural %>/log_out" do + test "logs the <%= schema.singular %> out", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = conn |> log_in_<%= schema.singular %>(<%= schema.singular %>) |> delete(Routes.<%= schema.route_helper %>_session_path(conn, :delete)) + assert redirected_to(conn) == "/" + refute get_session(conn, :<%= schema.singular %>_token) + assert get_flash(conn, :info) =~ "Logged out successfully" + end + + test "succeeds even if the <%= schema.singular %> is not logged in", %{conn: conn} do + conn = delete(conn, Routes.<%= schema.route_helper %>_session_path(conn, :delete)) + assert redirected_to(conn) == "/" + refute get_session(conn, :<%= schema.singular %>_token) + assert get_flash(conn, :info) =~ "Logged out successfully" + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_new.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/session_new.html.heex new file mode 100644 index 0000000..7d1740c --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_new.html.heex @@ -0,0 +1,27 @@ +

Log in

+ +<.form let={f} for={@conn} action={Routes.<%= schema.route_helper %>_session_path(@conn, :create)} as={:<%= schema.singular %>}> + <%%= if @error_message do %> +
+

<%%= @error_message %>

+
+ <%% end %> + + <%%= label f, :email %> + <%%= email_input f, :email, required: true %> + + <%%= label f, :password %> + <%%= password_input f, :password, required: true %> + + <%%= label f, :remember_me, "Keep me logged in for 60 days" %> + <%%= checkbox f, :remember_me %> + +
+ <%%= submit "Log in" %> +
+ + +

+ <%%= link "Register", to: Routes.<%= schema.route_helper %>_registration_path(@conn, :new) %> | + <%%= link "Forgot your password?", to: Routes.<%= schema.route_helper %>_reset_password_path(@conn, :new) %> +

diff --git a/deps/phoenix/priv/templates/phx.gen.auth/session_view.ex b/deps/phoenix/priv/templates/phx.gen.auth/session_view.ex new file mode 100644 index 0000000..d225293 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/session_view.ex @@ -0,0 +1,3 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SessionView do + use <%= inspect context.web_module %>, :view +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_controller.ex b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller.ex new file mode 100644 index 0000000..31a4639 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller.ex @@ -0,0 +1,74 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SettingsController do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect auth_module %> + + plug :assign_email_and_password_changesets + + def edit(conn, _params) do + render(conn, "edit.html") + end + + def update(conn, %{"action" => "update_email"} = params) do + %{"current_password" => password, "<%= schema.singular %>" => <%= schema.singular %>_params} = params + <%= schema.singular %> = conn.assigns.current_<%= schema.singular %> + + case <%= inspect context.alias %>.apply_<%= schema.singular %>_email(<%= schema.singular %>, password, <%= schema.singular %>_params) do + {:ok, applied_<%= schema.singular %>} -> + <%= inspect context.alias %>.deliver_update_email_instructions( + applied_<%= schema.singular %>, + <%= schema.singular %>.email, + &Routes.<%= schema.route_helper %>_settings_url(conn, :confirm_email, &1) + ) + + conn + |> put_flash( + :info, + "A link to confirm your email change has been sent to the new address." + ) + |> redirect(to: Routes.<%= schema.route_helper %>_settings_path(conn, :edit)) + + {:error, changeset} -> + render(conn, "edit.html", email_changeset: changeset) + end + end + + def update(conn, %{"action" => "update_password"} = params) do + %{"current_password" => password, "<%= schema.singular %>" => <%= schema.singular %>_params} = params + <%= schema.singular %> = conn.assigns.current_<%= schema.singular %> + + case <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, password, <%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + conn + |> put_flash(:info, "Password updated successfully.") + |> put_session(:<%= schema.singular %>_return_to, Routes.<%= schema.route_helper %>_settings_path(conn, :edit)) + |> <%= inspect schema.alias %>Auth.log_in_<%= schema.singular %>(<%= schema.singular %>) + + {:error, changeset} -> + render(conn, "edit.html", password_changeset: changeset) + end + end + + def confirm_email(conn, %{"token" => token}) do + case <%= inspect context.alias %>.update_<%= schema.singular %>_email(conn.assigns.current_<%= schema.singular %>, token) do + :ok -> + conn + |> put_flash(:info, "Email changed successfully.") + |> redirect(to: Routes.<%= schema.route_helper %>_settings_path(conn, :edit)) + + :error -> + conn + |> put_flash(:error, "Email change link is invalid or it has expired.") + |> redirect(to: Routes.<%= schema.route_helper %>_settings_path(conn, :edit)) + end + end + + defp assign_email_and_password_changesets(conn, _opts) do + <%= schema.singular %> = conn.assigns.current_<%= schema.singular %> + + conn + |> assign(:email_changeset, <%= inspect context.alias %>.change_<%= schema.singular %>_email(<%= schema.singular %>)) + |> assign(:password_changeset, <%= inspect context.alias %>.change_<%= schema.singular %>_password(<%= schema.singular %>)) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_controller_test.exs b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller_test.exs new file mode 100644 index 0000000..a8c9a8d --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_controller_test.exs @@ -0,0 +1,129 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SettingsControllerTest do + use <%= inspect context.web_module %>.ConnCase<%= test_case_options %> + + alias <%= inspect context.module %> + import <%= inspect context.module %>Fixtures + + setup :register_and_log_in_<%= schema.singular %> + + describe "GET <%= web_path_prefix %>/<%= schema.plural %>/settings" do + test "renders settings page", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :edit)) + response = html_response(conn, 200) + assert response =~ "

Settings

" + end + + test "redirects if <%= schema.singular %> is not logged in" do + conn = build_conn() + conn = get(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :edit)) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_session_path(conn, :new) + end + end + + describe "PUT <%= web_path_prefix %>/<%= schema.plural %>/settings (change password form)" do + test "updates the <%= schema.singular %> password and resets tokens", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + new_password_conn = + put(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :update), %{ + "action" => "update_password", + "current_password" => valid_<%= schema.singular %>_password(), + "<%= schema.singular %>" => %{ + "password" => "new valid password", + "password_confirmation" => "new valid password" + } + }) + + assert redirected_to(new_password_conn) == Routes.<%= schema.route_helper %>_settings_path(conn, :edit) + assert get_session(new_password_conn, :<%= schema.singular %>_token) != get_session(conn, :<%= schema.singular %>_token) + assert get_flash(new_password_conn, :info) =~ "Password updated successfully" + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "new valid password") + end + + test "does not update password on invalid data", %{conn: conn} do + old_password_conn = + put(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :update), %{ + "action" => "update_password", + "current_password" => "invalid", + "<%= schema.singular %>" => %{ + "password" => "too short", + "password_confirmation" => "does not match" + } + }) + + response = html_response(old_password_conn, 200) + assert response =~ "

Settings

" + assert response =~ "should be at least 12 character(s)" + assert response =~ "does not match password" + assert response =~ "is not valid" + + assert get_session(old_password_conn, :<%= schema.singular %>_token) == get_session(conn, :<%= schema.singular %>_token) + end + end + + describe "PUT <%= web_path_prefix %>/<%= schema.plural %>/settings (change email form)" do + @tag :capture_log + test "updates the <%= schema.singular %> email", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = + put(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :update), %{ + "action" => "update_email", + "current_password" => valid_<%= schema.singular %>_password(), + "<%= schema.singular %>" => %{"email" => unique_<%= schema.singular %>_email()} + }) + + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_settings_path(conn, :edit) + assert get_flash(conn, :info) =~ "A link to confirm your email" + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + + test "does not update email on invalid data", %{conn: conn} do + conn = + put(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :update), %{ + "action" => "update_email", + "current_password" => "invalid", + "<%= schema.singular %>" => %{"email" => "with spaces"} + }) + + response = html_response(conn, 200) + assert response =~ "

Settings

" + assert response =~ "must have the @ sign and no spaces" + assert response =~ "is not valid" + end + end + + describe "GET <%= web_path_prefix %>/<%= schema.plural %>/settings/confirm_email/:token" do + setup %{<%= schema.singular %>: <%= schema.singular %>} do + email = unique_<%= schema.singular %>_email() + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_update_email_instructions(%{<%= schema.singular %> | email: email}, <%= schema.singular %>.email, url) + end) + + %{token: token, email: email} + end + + test "updates the <%= schema.singular %> email once", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>, token: token, email: email} do + conn = get(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :confirm_email, token)) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_settings_path(conn, :edit) + assert get_flash(conn, :info) =~ "Email changed successfully" + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(email) + + conn = get(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :confirm_email, token)) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_settings_path(conn, :edit) + assert get_flash(conn, :error) =~ "Email change link is invalid or it has expired" + end + + test "does not update email with invalid token", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = get(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :confirm_email, "oops")) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_settings_path(conn, :edit) + assert get_flash(conn, :error) =~ "Email change link is invalid or it has expired" + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + + test "redirects if <%= schema.singular %> is not logged in", %{token: token} do + conn = build_conn() + conn = get(conn, Routes.<%= schema.route_helper %>_settings_path(conn, :confirm_email, token)) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_session_path(conn, :new) + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_edit.html.heex b/deps/phoenix/priv/templates/phx.gen.auth/settings_edit.html.heex new file mode 100644 index 0000000..aac0214 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_edit.html.heex @@ -0,0 +1,53 @@ +

Settings

+ +

Change email

+ +<.form let={f} for={@email_changeset} action={Routes.<%= schema.route_helper %>_settings_path(@conn, :update)} id="update_email"> + <%%= if @email_changeset.action do %> +
+

Oops, something went wrong! Please check the errors below.

+
+ <%% end %> + + <%%= hidden_input f, :action, name: "action", value: "update_email" %> + + <%%= label f, :email %> + <%%= email_input f, :email, required: true %> + <%%= error_tag f, :email %> + + <%%= label f, :current_password, for: "current_password_for_email" %> + <%%= password_input f, :current_password, required: true, name: "current_password", id: "current_password_for_email" %> + <%%= error_tag f, :current_password %> + +
+ <%%= submit "Change email" %> +
+ + +

Change password

+ +<.form let={f} for={@password_changeset} action={Routes.<%= schema.route_helper %>_settings_path(@conn, :update)} id="update_password"> + <%%= if @password_changeset.action do %> +
+

Oops, something went wrong! Please check the errors below.

+
+ <%% end %> + + <%%= hidden_input f, :action, name: "action", value: "update_password" %> + + <%%= label f, :password, "New password" %> + <%%= password_input f, :password, required: true %> + <%%= error_tag f, :password %> + + <%%= label f, :password_confirmation, "Confirm new password" %> + <%%= password_input f, :password_confirmation, required: true %> + <%%= error_tag f, :password_confirmation %> + + <%%= label f, :current_password, for: "current_password_for_password" %> + <%%= password_input f, :current_password, required: true, name: "current_password", id: "current_password_for_password" %> + <%%= error_tag f, :current_password %> + +
+ <%%= submit "Change password" %> +
+ diff --git a/deps/phoenix/priv/templates/phx.gen.auth/settings_view.ex b/deps/phoenix/priv/templates/phx.gen.auth/settings_view.ex new file mode 100644 index 0000000..b84a92f --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/settings_view.ex @@ -0,0 +1,3 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>SettingsView do + use <%= inspect context.web_module %>, :view +end diff --git a/deps/phoenix/priv/templates/phx.gen.auth/test_cases.exs b/deps/phoenix/priv/templates/phx.gen.auth/test_cases.exs new file mode 100644 index 0000000..40d24c4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.auth/test_cases.exs @@ -0,0 +1,502 @@ + import <%= inspect context.module %>Fixtures + alias <%= inspect context.module %>.{<%= inspect schema.alias %>, <%= inspect schema.alias %>Token} + + describe "get_<%= schema.singular %>_by_email/1" do + test "does not return the <%= schema.singular %> if the email does not exist" do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email("unknown@example.com") + end + + test "returns the <%= schema.singular %> if the email exists" do + %{id: id} = <%= schema.singular %> = <%= schema.singular %>_fixture() + assert %<%= inspect schema.alias %>{id: ^id} = <%= inspect context.alias %>.get_<%= schema.singular %>_by_email(<%= schema.singular %>.email) + end + end + + describe "get_<%= schema.singular %>_by_email_and_password/2" do + test "does not return the <%= schema.singular %> if the email does not exist" do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password("unknown@example.com", "hello world!") + end + + test "does not return the <%= schema.singular %> if the password is not valid" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "invalid") + end + + test "returns the <%= schema.singular %> if the email and password are valid" do + %{id: id} = <%= schema.singular %> = <%= schema.singular %>_fixture() + + assert %<%= inspect schema.alias %>{id: ^id} = + <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, valid_<%= schema.singular %>_password()) + end + end + + describe "get_<%= schema.singular %>!/1" do + test "raises if id is invalid" do + assert_raise Ecto.NoResultsError, fn -> + <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= inspect schema.sample_id %>) + end + end + + test "returns the <%= schema.singular %> with the given id" do + %{id: id} = <%= schema.singular %> = <%= schema.singular %>_fixture() + assert %<%= inspect schema.alias %>{id: ^id} = <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id) + end + end + + describe "register_<%= schema.singular %>/1" do + test "requires email and password to be set" do + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{}) + + assert %{ + password: ["can't be blank"], + email: ["can't be blank"] + } = errors_on(changeset) + end + + test "validates email and password when given" do + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: "not valid", password: "not valid"}) + + assert %{ + email: ["must have the @ sign and no spaces"], + password: ["should be at least 12 character(s)"] + } = errors_on(changeset) + end + + test "validates maximum values for email and password for security" do + too_long = String.duplicate("db", 100) + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: too_long, password: too_long}) + assert "should be at most 160 character(s)" in errors_on(changeset).email + assert "should be at most 72 character(s)" in errors_on(changeset).password + end + + test "validates email uniqueness" do + %{email: email} = <%= schema.singular %>_fixture() + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: email}) + assert "has already been taken" in errors_on(changeset).email + + # Now try with the upper cased email too, to check that email case is ignored. + {:error, changeset} = <%= inspect context.alias %>.register_<%= schema.singular %>(%{email: String.upcase(email)}) + assert "has already been taken" in errors_on(changeset).email + end + + test "registers <%= schema.plural %> with a hashed password" do + email = unique_<%= schema.singular %>_email() + {:ok, <%= schema.singular %>} = <%= inspect context.alias %>.register_<%= schema.singular %>(valid_<%= schema.singular %>_attributes(email: email)) + assert <%= schema.singular %>.email == email + assert is_binary(<%= schema.singular %>.hashed_password) + assert is_nil(<%= schema.singular %>.confirmed_at) + assert is_nil(<%= schema.singular %>.password) + end + end + + describe "change_<%= schema.singular %>_registration/2" do + test "returns a changeset" do + assert %Ecto.Changeset{} = changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_registration(%<%= inspect schema.alias %>{}) + assert changeset.required == [:password, :email] + end + + test "allows fields to be set" do + email = unique_<%= schema.singular %>_email() + password = valid_<%= schema.singular %>_password() + + changeset = + <%= inspect context.alias %>.change_<%= schema.singular %>_registration( + %<%= inspect schema.alias %>{}, + valid_<%= schema.singular %>_attributes(email: email, password: password) + ) + + assert changeset.valid? + assert get_change(changeset, :email) == email + assert get_change(changeset, :password) == password + assert is_nil(get_change(changeset, :hashed_password)) + end + end + + describe "change_<%= schema.singular %>_email/2" do + test "returns a <%= schema.singular %> changeset" do + assert %Ecto.Changeset{} = changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_email(%<%= inspect schema.alias %>{}) + assert changeset.required == [:email] + end + end + + describe "apply_<%= schema.singular %>_email/3" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "requires email to change", %{<%= schema.singular %>: <%= schema.singular %>} do + {:error, changeset} = <%= inspect context.alias %>.apply_<%= schema.singular %>_email(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{}) + assert %{email: ["did not change"]} = errors_on(changeset) + end + + test "validates email", %{<%= schema.singular %>: <%= schema.singular %>} do + {:error, changeset} = + <%= inspect context.alias %>.apply_<%= schema.singular %>_email(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{email: "not valid"}) + + assert %{email: ["must have the @ sign and no spaces"]} = errors_on(changeset) + end + + test "validates maximum value for email for security", %{<%= schema.singular %>: <%= schema.singular %>} do + too_long = String.duplicate("db", 100) + + {:error, changeset} = + <%= inspect context.alias %>.apply_<%= schema.singular %>_email(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{email: too_long}) + + assert "should be at most 160 character(s)" in errors_on(changeset).email + end + + test "validates email uniqueness", %{<%= schema.singular %>: <%= schema.singular %>} do + %{email: email} = <%= schema.singular %>_fixture() + + {:error, changeset} = + <%= inspect context.alias %>.apply_<%= schema.singular %>_email(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{email: email}) + + assert "has already been taken" in errors_on(changeset).email + end + + test "validates current password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:error, changeset} = + <%= inspect context.alias %>.apply_<%= schema.singular %>_email(<%= schema.singular %>, "invalid", %{email: unique_<%= schema.singular %>_email()}) + + assert %{current_password: ["is not valid"]} = errors_on(changeset) + end + + test "applies the email without persisting it", %{<%= schema.singular %>: <%= schema.singular %>} do + email = unique_<%= schema.singular %>_email() + {:ok, <%= schema.singular %>} = <%= inspect context.alias %>.apply_<%= schema.singular %>_email(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{email: email}) + assert <%= schema.singular %>.email == email + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id).email != email + end + end + + describe "deliver_update_email_instructions/3" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "sends token through notification", %{<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_update_email_instructions(<%= schema.singular %>, "current@example.com", url) + end) + + {:ok, token} = Base.url_decode64(token, padding: false) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: :crypto.hash(:sha256, token)) + assert <%= schema.singular %>_token.<%= schema.singular %>_id == <%= schema.singular %>.id + assert <%= schema.singular %>_token.sent_to == <%= schema.singular %>.email + assert <%= schema.singular %>_token.context == "change:current@example.com" + end + end + + describe "update_<%= schema.singular %>_email/2" do + setup do + <%= schema.singular %> = <%= schema.singular %>_fixture() + email = unique_<%= schema.singular %>_email() + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_update_email_instructions(%{<%= schema.singular %> | email: email}, <%= schema.singular %>.email, url) + end) + + %{<%= schema.singular %>: <%= schema.singular %>, token: token, email: email} + end + + test "updates the email with a valid token", %{<%= schema.singular %>: <%= schema.singular %>, token: token, email: email} do + assert <%= inspect context.alias %>.update_<%= schema.singular %>_email(<%= schema.singular %>, token) == :ok + changed_<%= schema.singular %> = Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id) + assert changed_<%= schema.singular %>.email != <%= schema.singular %>.email + assert changed_<%= schema.singular %>.email == email + assert changed_<%= schema.singular %>.confirmed_at + assert changed_<%= schema.singular %>.confirmed_at != <%= schema.singular %>.confirmed_at + refute Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not update email with invalid token", %{<%= schema.singular %>: <%= schema.singular %>} do + assert <%= inspect context.alias %>.update_<%= schema.singular %>_email(<%= schema.singular %>, "oops") == :error + assert Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).email == <%= schema.singular %>.email + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not update email if <%= schema.singular %> email changed", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + assert <%= inspect context.alias %>.update_<%= schema.singular %>_email(%{<%= schema.singular %> | email: "current@example.com"}, token) == :error + assert Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).email == <%= schema.singular %>.email + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not update email if token expired", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + {1, nil} = Repo.update_all(<%= inspect schema.alias %>Token, set: [inserted_at: ~N[2020-01-01 00:00:00]]) + assert <%= inspect context.alias %>.update_<%= schema.singular %>_email(<%= schema.singular %>, token) == :error + assert Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).email == <%= schema.singular %>.email + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + end + + describe "change_<%= schema.singular %>_password/2" do + test "returns a <%= schema.singular %> changeset" do + assert %Ecto.Changeset{} = changeset = <%= inspect context.alias %>.change_<%= schema.singular %>_password(%<%= inspect schema.alias %>{}) + assert changeset.required == [:password] + end + + test "allows fields to be set" do + changeset = + <%= inspect context.alias %>.change_<%= schema.singular %>_password(%<%= inspect schema.alias %>{}, %{ + "password" => "new valid password" + }) + + assert changeset.valid? + assert get_change(changeset, :password) == "new valid password" + assert is_nil(get_change(changeset, :hashed_password)) + end + end + + describe "update_<%= schema.singular %>_password/3" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "validates password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:error, changeset} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{ + password: "not valid", + password_confirmation: "another" + }) + + assert %{ + password: ["should be at least 12 character(s)"], + password_confirmation: ["does not match password"] + } = errors_on(changeset) + end + + test "validates maximum values for password for security", %{<%= schema.singular %>: <%= schema.singular %>} do + too_long = String.duplicate("db", 100) + + {:error, changeset} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{password: too_long}) + + assert "should be at most 72 character(s)" in errors_on(changeset).password + end + + test "validates current password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:error, changeset} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, "invalid", %{password: valid_<%= schema.singular %>_password()}) + + assert %{current_password: ["is not valid"]} = errors_on(changeset) + end + + test "updates the password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:ok, <%= schema.singular %>} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{ + password: "new valid password" + }) + + assert is_nil(<%= schema.singular %>.password) + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "new valid password") + end + + test "deletes all tokens for the given <%= schema.singular %>", %{<%= schema.singular %>: <%= schema.singular %>} do + _ = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + + {:ok, _} = + <%= inspect context.alias %>.update_<%= schema.singular %>_password(<%= schema.singular %>, valid_<%= schema.singular %>_password(), %{ + password: "new valid password" + }) + + refute Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + end + + describe "generate_<%= schema.singular %>_session_token/1" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "generates a token", %{<%= schema.singular %>: <%= schema.singular %>} do + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: token) + assert <%= schema.singular %>_token.context == "session" + + # Creating the same token for another <%= schema.singular %> should fail + assert_raise Ecto.ConstraintError, fn -> + Repo.insert!(%<%= inspect schema.alias %>Token{ + token: <%= schema.singular %>_token.token, + <%= schema.singular %>_id: <%= schema.singular %>_fixture().id, + context: "session" + }) + end + end + end + + describe "get_<%= schema.singular %>_by_session_token/1" do + setup do + <%= schema.singular %> = <%= schema.singular %>_fixture() + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + %{<%= schema.singular %>: <%= schema.singular %>, token: token} + end + + test "returns <%= schema.singular %> by token", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + assert session_<%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + assert session_<%= schema.singular %>.id == <%= schema.singular %>.id + end + + test "does not return <%= schema.singular %> for invalid token" do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token("oops") + end + + test "does not return <%= schema.singular %> for expired token", %{token: token} do + {1, nil} = Repo.update_all(<%= inspect schema.alias %>Token, set: [inserted_at: ~N[2020-01-01 00:00:00]]) + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + end + end + + describe "delete_session_token/1" do + test "deletes the token" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + token = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + assert <%= inspect context.alias %>.delete_session_token(token) == :ok + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_session_token(token) + end + end + + describe "deliver_<%= schema.singular %>_confirmation_instructions/2" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "sends token through notification", %{<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_confirmation_instructions(<%= schema.singular %>, url) + end) + + {:ok, token} = Base.url_decode64(token, padding: false) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: :crypto.hash(:sha256, token)) + assert <%= schema.singular %>_token.<%= schema.singular %>_id == <%= schema.singular %>.id + assert <%= schema.singular %>_token.sent_to == <%= schema.singular %>.email + assert <%= schema.singular %>_token.context == "confirm" + end + end + + describe "confirm_<%= schema.singular %>/1" do + setup do + <%= schema.singular %> = <%= schema.singular %>_fixture() + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_confirmation_instructions(<%= schema.singular %>, url) + end) + + %{<%= schema.singular %>: <%= schema.singular %>, token: token} + end + + test "confirms the email with a valid token", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + assert {:ok, confirmed_<%= schema.singular %>} = <%= inspect context.alias %>.confirm_<%= schema.singular %>(token) + assert confirmed_<%= schema.singular %>.confirmed_at + assert confirmed_<%= schema.singular %>.confirmed_at != <%= schema.singular %>.confirmed_at + assert Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).confirmed_at + refute Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not confirm with invalid token", %{<%= schema.singular %>: <%= schema.singular %>} do + assert <%= inspect context.alias %>.confirm_<%= schema.singular %>("oops") == :error + refute Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).confirmed_at + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not confirm email if token expired", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + {1, nil} = Repo.update_all(<%= inspect schema.alias %>Token, set: [inserted_at: ~N[2020-01-01 00:00:00]]) + assert <%= inspect context.alias %>.confirm_<%= schema.singular %>(token) == :error + refute Repo.get!(<%= inspect schema.alias %>, <%= schema.singular %>.id).confirmed_at + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + end + + describe "deliver_<%= schema.singular %>_reset_password_instructions/2" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "sends token through notification", %{<%= schema.singular %>: <%= schema.singular %>} do + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_reset_password_instructions(<%= schema.singular %>, url) + end) + + {:ok, token} = Base.url_decode64(token, padding: false) + assert <%= schema.singular %>_token = Repo.get_by(<%= inspect schema.alias %>Token, token: :crypto.hash(:sha256, token)) + assert <%= schema.singular %>_token.<%= schema.singular %>_id == <%= schema.singular %>.id + assert <%= schema.singular %>_token.sent_to == <%= schema.singular %>.email + assert <%= schema.singular %>_token.context == "reset_password" + end + end + + describe "get_<%= schema.singular %>_by_reset_password_token/1" do + setup do + <%= schema.singular %> = <%= schema.singular %>_fixture() + + token = + extract_<%= schema.singular %>_token(fn url -> + <%= inspect context.alias %>.deliver_<%= schema.singular %>_reset_password_instructions(<%= schema.singular %>, url) + end) + + %{<%= schema.singular %>: <%= schema.singular %>, token: token} + end + + test "returns the <%= schema.singular %> with valid token", %{<%= schema.singular %>: %{id: id}, token: token} do + assert %<%= inspect schema.alias %>{id: ^id} = <%= inspect context.alias %>.get_<%= schema.singular %>_by_reset_password_token(token) + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: id) + end + + test "does not return the <%= schema.singular %> with invalid token", %{<%= schema.singular %>: <%= schema.singular %>} do + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_reset_password_token("oops") + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + + test "does not return the <%= schema.singular %> if token expired", %{<%= schema.singular %>: <%= schema.singular %>, token: token} do + {1, nil} = Repo.update_all(<%= inspect schema.alias %>Token, set: [inserted_at: ~N[2020-01-01 00:00:00]]) + refute <%= inspect context.alias %>.get_<%= schema.singular %>_by_reset_password_token(token) + assert Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + end + + describe "reset_<%= schema.singular %>_password/2" do + setup do + %{<%= schema.singular %>: <%= schema.singular %>_fixture()} + end + + test "validates password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:error, changeset} = + <%= inspect context.alias %>.reset_<%= schema.singular %>_password(<%= schema.singular %>, %{ + password: "not valid", + password_confirmation: "another" + }) + + assert %{ + password: ["should be at least 12 character(s)"], + password_confirmation: ["does not match password"] + } = errors_on(changeset) + end + + test "validates maximum values for password for security", %{<%= schema.singular %>: <%= schema.singular %>} do + too_long = String.duplicate("db", 100) + {:error, changeset} = <%= inspect context.alias %>.reset_<%= schema.singular %>_password(<%= schema.singular %>, %{password: too_long}) + assert "should be at most 72 character(s)" in errors_on(changeset).password + end + + test "updates the password", %{<%= schema.singular %>: <%= schema.singular %>} do + {:ok, updated_<%= schema.singular %>} = <%= inspect context.alias %>.reset_<%= schema.singular %>_password(<%= schema.singular %>, %{password: "new valid password"}) + assert is_nil(updated_<%= schema.singular %>.password) + assert <%= inspect context.alias %>.get_<%= schema.singular %>_by_email_and_password(<%= schema.singular %>.email, "new valid password") + end + + test "deletes all tokens for the given <%= schema.singular %>", %{<%= schema.singular %>: <%= schema.singular %>} do + _ = <%= inspect context.alias %>.generate_<%= schema.singular %>_session_token(<%= schema.singular %>) + {:ok, _} = <%= inspect context.alias %>.reset_<%= schema.singular %>_password(<%= schema.singular %>, %{password: "new valid password"}) + refute Repo.get_by(<%= inspect schema.alias %>Token, <%= schema.singular %>_id: <%= schema.singular %>.id) + end + end + + describe "inspect/2" do + test "does not include password" do + refute inspect(%<%= inspect schema.alias %>{password: "123456"}) =~ "password: \"123456\"" + end + end diff --git a/deps/phoenix/priv/templates/phx.gen.channel/channel.ex b/deps/phoenix/priv/templates/phx.gen.channel/channel.ex new file mode 100644 index 0000000..0f7a9b5 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.channel/channel.ex @@ -0,0 +1,32 @@ +defmodule <%= module %>Channel do + use <%= web_module %>, :channel + + @impl true + def join("<%= singular %>:lobby", payload, socket) do + if authorized?(payload) do + {:ok, socket} + else + {:error, %{reason: "unauthorized"}} + end + end + + # Channels can be used in a request/response fashion + # by sending replies to requests from the client + @impl true + def handle_in("ping", payload, socket) do + {:reply, {:ok, payload}, socket} + end + + # It is also common to receive messages from the client and + # broadcast to everyone in the current topic (<%= singular %>:lobby). + @impl true + def handle_in("shout", payload, socket) do + broadcast(socket, "shout", payload) + {:noreply, socket} + end + + # Add authorization logic here as required. + defp authorized?(_payload) do + true + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.channel/channel_case.ex b/deps/phoenix/priv/templates/phx.gen.channel/channel_case.ex new file mode 100644 index 0000000..f0dd5f9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.channel/channel_case.ex @@ -0,0 +1,39 @@ +defmodule <%= web_module %>.ChannelCase do + @moduledoc """ + This module defines the test case to be used by + channel tests. + + Such tests rely on `Phoenix.ChannelTest` and also + import other functionality to make it easier + to build common data structures and query the data layer. + + Finally, if the test case interacts with the database, + we enable the SQL sandbox, so changes done to the database + are reverted at the end of every test. If you are using + PostgreSQL, you can even run database tests asynchronously + by setting `use <%= web_module %>.ChannelCase, async: true`, although + this option is not recommended for other databases. + """ + + use ExUnit.CaseTemplate + + using do + quote do + # Import conveniences for testing with channels + import Phoenix.ChannelTest + import <%= web_module %>.ChannelCase + + # The default endpoint for testing + @endpoint <%= web_module %>.Endpoint + end + end<%= if Code.ensure_loaded?(Ecto.Adapters.SQL) do %> + + setup tags do + <%= base %>.DataCase.setup_sandbox(tags) + :ok + end<% else %> + + setup _tags do + :ok + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.channel/channel_test.exs b/deps/phoenix/priv/templates/phx.gen.channel/channel_test.exs new file mode 100644 index 0000000..7698d87 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.channel/channel_test.exs @@ -0,0 +1,27 @@ +defmodule <%= module %>ChannelTest do + use <%= web_module %>.ChannelCase + + setup do + {:ok, _, socket} = + <%= web_module %>.UserSocket + |> socket("user_id", %{some: :assign}) + |> subscribe_and_join(<%= module %>Channel, "<%= singular %>:lobby") + + %{socket: socket} + end + + test "ping replies with status ok", %{socket: socket} do + ref = push(socket, "ping", %{"hello" => "there"}) + assert_reply ref, :ok, %{"hello" => "there"} + end + + test "shout broadcasts to <%= singular %>:lobby", %{socket: socket} do + push(socket, "shout", %{"hello" => "all"}) + assert_broadcast "shout", %{"hello" => "all"} + end + + test "broadcasts are pushed to the client", %{socket: socket} do + broadcast_from!(socket, "broadcast", %{"some" => "data"}) + assert_push "broadcast", %{"some" => "data"} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/access_no_schema.ex b/deps/phoenix/priv/templates/phx.gen.context/access_no_schema.ex new file mode 100644 index 0000000..71afc77 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/access_no_schema.ex @@ -0,0 +1,89 @@ + + alias <%= inspect schema.module %> + + @doc """ + Returns the list of <%= schema.plural %>. + + ## Examples + + iex> list_<%= schema.plural %>() + [%<%= inspect schema.alias %>{}, ...] + + """ + def list_<%= schema.plural %> do + raise "TODO" + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises if the <%= schema.human_singular %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(123) + %<%= inspect schema.alias %>{} + + """ + def get_<%= schema.singular %>!(id), do: raise "TODO" + + @doc """ + Creates a <%= schema.singular %>. + + ## Examples + + iex> create_<%= schema.singular %>(%{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> create_<%= schema.singular %>(%{field: bad_value}) + {:error, ...} + + """ + def create_<%= schema.singular %>(attrs \\ %{}) do + raise "TODO" + end + + @doc """ + Updates a <%= schema.singular %>. + + ## Examples + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: new_value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: bad_value}) + {:error, ...} + + """ + def update_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + raise "TODO" + end + + @doc """ + Deletes a <%= inspect schema.alias %>. + + ## Examples + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:ok, %<%= inspect schema.alias %>{}} + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:error, ...} + + """ + def delete_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>) do + raise "TODO" + end + + @doc """ + Returns a data structure for tracking <%= schema.singular %> changes. + + ## Examples + + iex> change_<%= schema.singular %>(<%= schema.singular %>) + %Todo{...} + + """ + def change_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, _attrs \\ %{}) do + raise "TODO" + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/context.ex b/deps/phoenix/priv/templates/phx.gen.context/context.ex new file mode 100644 index 0000000..4215d5c --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/context.ex @@ -0,0 +1,8 @@ +defmodule <%= inspect context.module %> do + @moduledoc """ + The <%= context.name %> context. + """ + + import Ecto.Query, warn: false + alias <%= inspect schema.repo %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/context_test.exs b/deps/phoenix/priv/templates/phx.gen.context/context_test.exs new file mode 100644 index 0000000..8958ec4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/context_test.exs @@ -0,0 +1,5 @@ +defmodule <%= inspect context.module %>Test do + use <%= inspect context.base_module %>.DataCase + + alias <%= inspect context.module %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/fixtures.ex b/deps/phoenix/priv/templates/phx.gen.context/fixtures.ex new file mode 100644 index 0000000..13e1f73 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/fixtures.ex @@ -0,0 +1,17 @@ +<%= for {attr, {_function_name, function_def, _needs_impl?}} <- schema.fixture_unique_functions do %> @doc """ + Generate a unique <%= schema.singular %> <%= attr %>. + """ +<%= function_def %> +<% end %> @doc """ + Generate a <%= schema.singular %>. + """ + def <%= schema.singular %>_fixture(attrs \\ %{}) do + {:ok, <%= schema.singular %>} = + attrs + |> Enum.into(%{ +<%= schema.fixture_params |> Enum.map(fn {key, code} -> " #{key}: #{code}" end) |> Enum.join(",\n") %> + }) + |> <%= inspect context.module %>.create_<%= schema.singular %>() + + <%= schema.singular %> + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/fixtures_module.ex b/deps/phoenix/priv/templates/phx.gen.context/fixtures_module.ex new file mode 100644 index 0000000..111acbf --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/fixtures_module.ex @@ -0,0 +1,6 @@ +defmodule <%= inspect context.module %>Fixtures do + @moduledoc """ + This module defines test helpers for creating + entities via the `<%= inspect context.module %>` context. + """ +end diff --git a/deps/phoenix/priv/templates/phx.gen.context/schema_access.ex b/deps/phoenix/priv/templates/phx.gen.context/schema_access.ex new file mode 100644 index 0000000..bdee344 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/schema_access.ex @@ -0,0 +1,96 @@ + + alias <%= inspect schema.module %> + + @doc """ + Returns the list of <%= schema.plural %>. + + ## Examples + + iex> list_<%= schema.plural %>() + [%<%= inspect schema.alias %>{}, ...] + + """ + def list_<%= schema.plural %> do + Repo.all(<%= inspect schema.alias %>) + end + + @doc """ + Gets a single <%= schema.singular %>. + + Raises `Ecto.NoResultsError` if the <%= schema.human_singular %> does not exist. + + ## Examples + + iex> get_<%= schema.singular %>!(123) + %<%= inspect schema.alias %>{} + + iex> get_<%= schema.singular %>!(456) + ** (Ecto.NoResultsError) + + """ + def get_<%= schema.singular %>!(id), do: Repo.get!(<%= inspect schema.alias %>, id) + + @doc """ + Creates a <%= schema.singular %>. + + ## Examples + + iex> create_<%= schema.singular %>(%{field: value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> create_<%= schema.singular %>(%{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def create_<%= schema.singular %>(attrs \\ %{}) do + %<%= inspect schema.alias %>{} + |> <%= inspect schema.alias %>.changeset(attrs) + |> Repo.insert() + end + + @doc """ + Updates a <%= schema.singular %>. + + ## Examples + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: new_value}) + {:ok, %<%= inspect schema.alias %>{}} + + iex> update_<%= schema.singular %>(<%= schema.singular %>, %{field: bad_value}) + {:error, %Ecto.Changeset{}} + + """ + def update_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + <%= schema.singular %> + |> <%= inspect schema.alias %>.changeset(attrs) + |> Repo.update() + end + + @doc """ + Deletes a <%= schema.singular %>. + + ## Examples + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:ok, %<%= inspect schema.alias %>{}} + + iex> delete_<%= schema.singular %>(<%= schema.singular %>) + {:error, %Ecto.Changeset{}} + + """ + def delete_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>) do + Repo.delete(<%= schema.singular %>) + end + + @doc """ + Returns an `%Ecto.Changeset{}` for tracking <%= schema.singular %> changes. + + ## Examples + + iex> change_<%= schema.singular %>(<%= schema.singular %>) + %Ecto.Changeset{data: %<%= inspect schema.alias %>{}} + + """ + def change_<%= schema.singular %>(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs \\ %{}) do + <%= inspect schema.alias %>.changeset(<%= schema.singular %>, attrs) + end diff --git a/deps/phoenix/priv/templates/phx.gen.context/test_cases.exs b/deps/phoenix/priv/templates/phx.gen.context/test_cases.exs new file mode 100644 index 0000000..3931af5 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.context/test_cases.exs @@ -0,0 +1,54 @@ + + describe "<%= schema.plural %>" do + alias <%= inspect schema.module %> + + import <%= inspect context.module %>Fixtures + + @invalid_attrs <%= Mix.Phoenix.to_text for {key, _} <- schema.params.create, into: %{}, do: {key, nil} %> + + test "list_<%= schema.plural %>/0 returns all <%= schema.plural %>" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert <%= inspect context.alias %>.list_<%= schema.plural %>() == [<%= schema.singular %>] + end + + test "get_<%= schema.singular %>!/1 returns the <%= schema.singular %> with given id" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id) == <%= schema.singular %> + end + + test "create_<%= schema.singular %>/1 with valid data creates a <%= schema.singular %>" do + valid_attrs = <%= Mix.Phoenix.to_text schema.params.create %> + + assert {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} = <%= inspect context.alias %>.create_<%= schema.singular %>(valid_attrs)<%= for {field, value} <- schema.params.create do %> + assert <%= schema.singular %>.<%= field %> == <%= Mix.Phoenix.Schema.value(schema, field, value) %><% end %> + end + + test "create_<%= schema.singular %>/1 with invalid data returns error changeset" do + assert {:error, %Ecto.Changeset{}} = <%= inspect context.alias %>.create_<%= schema.singular %>(@invalid_attrs) + end + + test "update_<%= schema.singular %>/2 with valid data updates the <%= schema.singular %>" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + update_attrs = <%= Mix.Phoenix.to_text schema.params.update%> + + assert {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} = <%= inspect context.alias %>.update_<%= schema.singular %>(<%= schema.singular %>, update_attrs)<%= for {field, value} <- schema.params.update do %> + assert <%= schema.singular %>.<%= field %> == <%= Mix.Phoenix.Schema.value(schema, field, value) %><% end %> + end + + test "update_<%= schema.singular %>/2 with invalid data returns error changeset" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert {:error, %Ecto.Changeset{}} = <%= inspect context.alias %>.update_<%= schema.singular %>(<%= schema.singular %>, @invalid_attrs) + assert <%= schema.singular %> == <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id) + end + + test "delete_<%= schema.singular %>/1 deletes the <%= schema.singular %>" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert {:ok, %<%= inspect schema.alias %>{}} = <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= schema.singular %>) + assert_raise Ecto.NoResultsError, fn -> <%= inspect context.alias %>.get_<%= schema.singular %>!(<%= schema.singular %>.id) end + end + + test "change_<%= schema.singular %>/1 returns a <%= schema.singular %> changeset" do + <%= schema.singular %> = <%= schema.singular %>_fixture() + assert %Ecto.Changeset{} = <%= inspect context.alias %>.change_<%= schema.singular %>(<%= schema.singular %>) + end + end diff --git a/deps/phoenix/priv/templates/phx.gen.embedded/embedded_schema.ex b/deps/phoenix/priv/templates/phx.gen.embedded/embedded_schema.ex new file mode 100644 index 0000000..e3acfb8 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.embedded/embedded_schema.ex @@ -0,0 +1,16 @@ +defmodule <%= inspect schema.module %> do + use Ecto.Schema + import Ecto.Changeset + alias <%= inspect schema.module %> + + embedded_schema do +<%= Mix.Phoenix.Schema.format_fields_for_schema(schema) %> + end + + @doc false + def changeset(%<%= inspect schema.alias %>{} = <%= schema.singular %>, attrs) do + <%= schema.singular %> + |> cast(attrs, [<%= Enum.map_join(schema.attrs, ", ", &inspect(elem(&1, 0))) %>]) + |> validate_required([<%= Enum.map_join(schema.attrs, ", ", &inspect(elem(&1, 0))) %>]) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.html/controller.ex b/deps/phoenix/priv/templates/phx.gen.html/controller.ex new file mode 100644 index 0000000..a5cc457 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/controller.ex @@ -0,0 +1,62 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Controller do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + def index(conn, _params) do + <%= schema.plural %> = <%= inspect context.alias %>.list_<%= schema.plural %>() + render(conn, "index.html", <%= schema.plural %>: <%= schema.plural %>) + end + + def new(conn, _params) do + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>(%<%= inspect schema.alias %>{}) + render(conn, "new.html", changeset: changeset) + end + + def create(conn, %{<%= inspect schema.singular %> => <%= schema.singular %>_params}) do + case <%= inspect context.alias %>.create_<%= schema.singular %>(<%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + conn + |> put_flash(:info, "<%= schema.human_singular %> created successfully.") + |> redirect(to: Routes.<%= schema.route_helper %>_path(conn, :show, <%= schema.singular %>)) + + {:error, %Ecto.Changeset{} = changeset} -> + render(conn, "new.html", changeset: changeset) + end + end + + def show(conn, %{"id" => id}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + render(conn, "show.html", <%= schema.singular %>: <%= schema.singular %>) + end + + def edit(conn, %{"id" => id}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>(<%= schema.singular %>) + render(conn, "edit.html", <%= schema.singular %>: <%= schema.singular %>, changeset: changeset) + end + + def update(conn, %{"id" => id, <%= inspect schema.singular %> => <%= schema.singular %>_params}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + + case <%= inspect context.alias %>.update_<%= schema.singular %>(<%= schema.singular %>, <%= schema.singular %>_params) do + {:ok, <%= schema.singular %>} -> + conn + |> put_flash(:info, "<%= schema.human_singular %> updated successfully.") + |> redirect(to: Routes.<%= schema.route_helper %>_path(conn, :show, <%= schema.singular %>)) + + {:error, %Ecto.Changeset{} = changeset} -> + render(conn, "edit.html", <%= schema.singular %>: <%= schema.singular %>, changeset: changeset) + end + end + + def delete(conn, %{"id" => id}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + {:ok, _<%= schema.singular %>} = <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= schema.singular %>) + + conn + |> put_flash(:info, "<%= schema.human_singular %> deleted successfully.") + |> redirect(to: Routes.<%= schema.route_helper %>_path(conn, :index)) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.html/controller_test.exs b/deps/phoenix/priv/templates/phx.gen.html/controller_test.exs new file mode 100644 index 0000000..0ac312e --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/controller_test.exs @@ -0,0 +1,85 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ControllerTest do + use <%= inspect context.web_module %>.ConnCase + + import <%= inspect context.module %>Fixtures + + @create_attrs <%= Mix.Phoenix.to_text schema.params.create %> + @update_attrs <%= Mix.Phoenix.to_text schema.params.update %> + @invalid_attrs <%= Mix.Phoenix.to_text (for {key, _} <- schema.params.create, into: %{}, do: {key, nil}) %> + + describe "index" do + test "lists all <%= schema.plural %>", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :index)) + assert html_response(conn, 200) =~ "Listing <%= schema.human_plural %>" + end + end + + describe "new <%= schema.singular %>" do + test "renders form", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :new)) + assert html_response(conn, 200) =~ "New <%= schema.human_singular %>" + end + end + + describe "create <%= schema.singular %>" do + test "redirects to show when data is valid", %{conn: conn} do + conn = post(conn, Routes.<%= schema.route_helper %>_path(conn, :create), <%= schema.singular %>: @create_attrs) + + assert %{id: id} = redirected_params(conn) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_path(conn, :show, id) + + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :show, id)) + assert html_response(conn, 200) =~ "Show <%= schema.human_singular %>" + end + + test "renders errors when data is invalid", %{conn: conn} do + conn = post(conn, Routes.<%= schema.route_helper %>_path(conn, :create), <%= schema.singular %>: @invalid_attrs) + assert html_response(conn, 200) =~ "New <%= schema.human_singular %>" + end + end + + describe "edit <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "renders form for editing chosen <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :edit, <%= schema.singular %>)) + assert html_response(conn, 200) =~ "Edit <%= schema.human_singular %>" + end + end + + describe "update <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "redirects when data is valid", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = put(conn, Routes.<%= schema.route_helper %>_path(conn, :update, <%= schema.singular %>), <%= schema.singular %>: @update_attrs) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_path(conn, :show, <%= schema.singular %>) + + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :show, <%= schema.singular %>))<%= if schema.string_attr do %> + assert html_response(conn, 200) =~ <%= inspect Mix.Phoenix.Schema.default_param(schema, :update) %><% else %> + assert html_response(conn, 200)<% end %> + end + + test "renders errors when data is invalid", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = put(conn, Routes.<%= schema.route_helper %>_path(conn, :update, <%= schema.singular %>), <%= schema.singular %>: @invalid_attrs) + assert html_response(conn, 200) =~ "Edit <%= schema.human_singular %>" + end + end + + describe "delete <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "deletes chosen <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = delete(conn, Routes.<%= schema.route_helper %>_path(conn, :delete, <%= schema.singular %>)) + assert redirected_to(conn) == Routes.<%= schema.route_helper %>_path(conn, :index) + + assert_error_sent 404, fn -> + get(conn, Routes.<%= schema.route_helper %>_path(conn, :show, <%= schema.singular %>)) + end + end + end + + defp create_<%= schema.singular %>(_) do + <%= schema.singular %> = <%= schema.singular %>_fixture() + %{<%= schema.singular %>: <%= schema.singular %>} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.html/edit.html.heex b/deps/phoenix/priv/templates/phx.gen.html/edit.html.heex new file mode 100644 index 0000000..a846d72 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/edit.html.heex @@ -0,0 +1,5 @@ +

Edit <%= schema.human_singular %>

+ +<%%= render "form.html", Map.put(assigns, :action, Routes.<%= schema.route_helper %>_path(@conn, :update, @<%= schema.singular %>)) %> + +<%%= link "Back", to: Routes.<%= schema.route_helper %>_path(@conn, :index) %> diff --git a/deps/phoenix/priv/templates/phx.gen.html/form.html.heex b/deps/phoenix/priv/templates/phx.gen.html/form.html.heex new file mode 100644 index 0000000..f0dfb86 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/form.html.heex @@ -0,0 +1,15 @@ +<.form let={f} for={@changeset} action={@action}> + <%%= if @changeset.action do %> +
+

Oops, something went wrong! Please check the errors below.

+
+ <%% end %> +<%= for {label, input, error} <- inputs, input do %> + <%= label %> + <%= input %> + <%= error %> +<% end %> +
+ <%%= submit "Save" %> +
+ diff --git a/deps/phoenix/priv/templates/phx.gen.html/index.html.heex b/deps/phoenix/priv/templates/phx.gen.html/index.html.heex new file mode 100644 index 0000000..b3d271a --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/index.html.heex @@ -0,0 +1,26 @@ +

Listing <%= schema.human_plural %>

+ + + + +<%= for {k, _} <- schema.attrs do %> +<% end %> + + + + +<%%= for <%= schema.singular %> <- @<%= schema.plural %> do %> + +<%= for {k, _} <- schema.attrs do %> +<% end %> + + +<%% end %> + +
<%= Phoenix.Naming.humanize(Atom.to_string(k)) %>
<%%= <%= schema.singular %>.<%= k %> %> + <%%= link "Show", to: Routes.<%= schema.route_helper %>_path(@conn, :show, <%= schema.singular %>) %> + <%%= link "Edit", to: Routes.<%= schema.route_helper %>_path(@conn, :edit, <%= schema.singular %>) %> + <%%= link "Delete", to: Routes.<%= schema.route_helper %>_path(@conn, :delete, <%= schema.singular %>), method: :delete, data: [confirm: "Are you sure?"] %> +
+ +<%%= link "New <%= schema.human_singular %>", to: Routes.<%= schema.route_helper %>_path(@conn, :new) %> diff --git a/deps/phoenix/priv/templates/phx.gen.html/new.html.heex b/deps/phoenix/priv/templates/phx.gen.html/new.html.heex new file mode 100644 index 0000000..135bafa --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/new.html.heex @@ -0,0 +1,5 @@ +

New <%= schema.human_singular %>

+ +<%%= render "form.html", Map.put(assigns, :action, Routes.<%= schema.route_helper %>_path(@conn, :create)) %> + +<%%= link "Back", to: Routes.<%= schema.route_helper %>_path(@conn, :index) %> diff --git a/deps/phoenix/priv/templates/phx.gen.html/show.html.heex b/deps/phoenix/priv/templates/phx.gen.html/show.html.heex new file mode 100644 index 0000000..469fc4a --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/show.html.heex @@ -0,0 +1,13 @@ +

Show <%= schema.human_singular %>

+ +
    +<%= for {k, _} <- schema.attrs do %> +
  • + <%= Phoenix.Naming.humanize(Atom.to_string(k)) %>: + <%%= @<%= schema.singular %>.<%= k %> %> +
  • +<% end %> +
+ +<%%= link "Edit", to: Routes.<%= schema.route_helper %>_path(@conn, :edit, @<%= schema.singular %>) %> | +<%%= link "Back", to: Routes.<%= schema.route_helper %>_path(@conn, :index) %> diff --git a/deps/phoenix/priv/templates/phx.gen.html/view.ex b/deps/phoenix/priv/templates/phx.gen.html/view.ex new file mode 100644 index 0000000..e828b07 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.html/view.ex @@ -0,0 +1,3 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>View do + use <%= inspect context.web_module %>, :view +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/changeset_view.ex b/deps/phoenix/priv/templates/phx.gen.json/changeset_view.ex new file mode 100644 index 0000000..27b58e8 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/changeset_view.ex @@ -0,0 +1,19 @@ +defmodule <%= inspect context.web_module %>.ChangesetView do + use <%= inspect context.web_module %>, :view + + @doc """ + Traverses and translates changeset errors. + + See `Ecto.Changeset.traverse_errors/2` and + `<%= inspect context.web_module %>.ErrorHelpers.translate_error/1` for more details. + """ + def translate_errors(changeset) do + Ecto.Changeset.traverse_errors(changeset, &translate_error/1) + end + + def render("error.json", %{changeset: changeset}) do + # When encoded, the changeset returns its errors + # as a JSON object. So we just pass it forward. + %{errors: translate_errors(changeset)} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/controller.ex b/deps/phoenix/priv/templates/phx.gen.json/controller.ex new file mode 100644 index 0000000..1eb81e5 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/controller.ex @@ -0,0 +1,43 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Controller do + use <%= inspect context.web_module %>, :controller + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + action_fallback <%= inspect context.web_module %>.FallbackController + + def index(conn, _params) do + <%= schema.plural %> = <%= inspect context.alias %>.list_<%= schema.plural %>() + render(conn, "index.json", <%= schema.plural %>: <%= schema.plural %>) + end + + def create(conn, %{<%= inspect schema.singular %> => <%= schema.singular %>_params}) do + with {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} <- <%= inspect context.alias %>.create_<%= schema.singular %>(<%= schema.singular %>_params) do + conn + |> put_status(:created) + |> put_resp_header("location", Routes.<%= schema.route_helper %>_path(conn, :show, <%= schema.singular %>)) + |> render("show.json", <%= schema.singular %>: <%= schema.singular %>) + end + end + + def show(conn, %{"id" => id}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + render(conn, "show.json", <%= schema.singular %>: <%= schema.singular %>) + end + + def update(conn, %{"id" => id, <%= inspect schema.singular %> => <%= schema.singular %>_params}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + + with {:ok, %<%= inspect schema.alias %>{} = <%= schema.singular %>} <- <%= inspect context.alias %>.update_<%= schema.singular %>(<%= schema.singular %>, <%= schema.singular %>_params) do + render(conn, "show.json", <%= schema.singular %>: <%= schema.singular %>) + end + end + + def delete(conn, %{"id" => id}) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + + with {:ok, %<%= inspect schema.alias %>{}} <- <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= schema.singular %>) do + send_resp(conn, :no_content, "") + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/controller_test.exs b/deps/phoenix/priv/templates/phx.gen.json/controller_test.exs new file mode 100644 index 0000000..0208cc4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/controller_test.exs @@ -0,0 +1,84 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>ControllerTest do + use <%= inspect context.web_module %>.ConnCase + + import <%= inspect context.module %>Fixtures + + alias <%= inspect schema.module %> + + @create_attrs %{ +<%= schema.params.create |> Enum.map(fn {key, val} -> " #{key}: #{inspect(val)}" end) |> Enum.join(",\n") %> + } + @update_attrs %{ +<%= schema.params.update |> Enum.map(fn {key, val} -> " #{key}: #{inspect(val)}" end) |> Enum.join(",\n") %> + } + @invalid_attrs <%= Mix.Phoenix.to_text for {key, _} <- schema.params.create, into: %{}, do: {key, nil} %> + + setup %{conn: conn} do + {:ok, conn: put_req_header(conn, "accept", "application/json")} + end + + describe "index" do + test "lists all <%= schema.plural %>", %{conn: conn} do + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :index)) + assert json_response(conn, 200)["data"] == [] + end + end + + describe "create <%= schema.singular %>" do + test "renders <%= schema.singular %> when data is valid", %{conn: conn} do + conn = post(conn, Routes.<%= schema.route_helper %>_path(conn, :create), <%= schema.singular %>: @create_attrs) + assert %{"id" => id} = json_response(conn, 201)["data"] + + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :show, id)) + + assert %{ + "id" => ^id<%= for {key, val} <- schema.params.create |> Phoenix.json_library().encode!() |> Phoenix.json_library().decode!() do %>, + "<%= key %>" => <%= inspect(val) %><% end %> + } = json_response(conn, 200)["data"] + end + + test "renders errors when data is invalid", %{conn: conn} do + conn = post(conn, Routes.<%= schema.route_helper %>_path(conn, :create), <%= schema.singular %>: @invalid_attrs) + assert json_response(conn, 422)["errors"] != %{} + end + end + + describe "update <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "renders <%= schema.singular %> when data is valid", %{conn: conn, <%= schema.singular %>: %<%= inspect schema.alias %>{id: id} = <%= schema.singular %>} do + conn = put(conn, Routes.<%= schema.route_helper %>_path(conn, :update, <%= schema.singular %>), <%= schema.singular %>: @update_attrs) + assert %{"id" => ^id} = json_response(conn, 200)["data"] + + conn = get(conn, Routes.<%= schema.route_helper %>_path(conn, :show, id)) + + assert %{ + "id" => ^id<%= for {key, val} <- schema.params.update |> Phoenix.json_library().encode!() |> Phoenix.json_library().decode!() do %>, + "<%= key %>" => <%= inspect(val) %><% end %> + } = json_response(conn, 200)["data"] + end + + test "renders errors when data is invalid", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = put(conn, Routes.<%= schema.route_helper %>_path(conn, :update, <%= schema.singular %>), <%= schema.singular %>: @invalid_attrs) + assert json_response(conn, 422)["errors"] != %{} + end + end + + describe "delete <%= schema.singular %>" do + setup [:create_<%= schema.singular %>] + + test "deletes chosen <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + conn = delete(conn, Routes.<%= schema.route_helper %>_path(conn, :delete, <%= schema.singular %>)) + assert response(conn, 204) + + assert_error_sent 404, fn -> + get(conn, Routes.<%= schema.route_helper %>_path(conn, :show, <%= schema.singular %>)) + end + end + end + + defp create_<%= schema.singular %>(_) do + <%= schema.singular %> = <%= schema.singular %>_fixture() + %{<%= schema.singular %>: <%= schema.singular %>} + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/fallback_controller.ex b/deps/phoenix/priv/templates/phx.gen.json/fallback_controller.ex new file mode 100644 index 0000000..6ae62aa --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/fallback_controller.ex @@ -0,0 +1,24 @@ +defmodule <%= inspect context.web_module %>.FallbackController do + @moduledoc """ + Translates controller action results into valid `Plug.Conn` responses. + + See `Phoenix.Controller.action_fallback/1` for more details. + """ + use <%= inspect context.web_module %>, :controller + + <%= if schema.generate? do %># This clause handles errors returned by Ecto's insert/update/delete. + def call(conn, {:error, %Ecto.Changeset{} = changeset}) do + conn + |> put_status(:unprocessable_entity) + |> put_view(<%= inspect context.web_module %>.ChangesetView) + |> render("error.json", changeset: changeset) + end + + <% end %># This clause is an example of how to handle resources that cannot be found. + def call(conn, {:error, :not_found}) do + conn + |> put_status(:not_found) + |> put_view(<%= inspect context.web_module %>.ErrorView) + |> render(:"404") + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.json/view.ex b/deps/phoenix/priv/templates/phx.gen.json/view.ex new file mode 100644 index 0000000..0381109 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.json/view.ex @@ -0,0 +1,18 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>View do + use <%= inspect context.web_module %>, :view + alias <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>View + + def render("index.json", %{<%= schema.plural %>: <%= schema.plural %>}) do + %{data: render_many(<%= schema.plural %>, <%= inspect schema.alias %>View, "<%= schema.singular %>.json")} + end + + def render("show.json", %{<%= schema.singular %>: <%= schema.singular %>}) do + %{data: render_one(<%= schema.singular %>, <%= inspect schema.alias %>View, "<%= schema.singular %>.json")} + end + + def render("<%= schema.singular %>.json", %{<%= schema.singular %>: <%= schema.singular %>}) do + %{ +<%= [{:id, :id} | schema.attrs] |> Enum.map(fn {k, _} -> " #{k}: #{schema.singular}.#{k}" end) |> Enum.join(",\n") %> + } + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/form_component.ex b/deps/phoenix/priv/templates/phx.gen.live/form_component.ex new file mode 100644 index 0000000..6957239 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/form_component.ex @@ -0,0 +1,55 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.FormComponent do + use <%= inspect context.web_module %>, :live_component + + alias <%= inspect context.module %> + + @impl true + def update(%{<%= schema.singular %>: <%= schema.singular %>} = assigns, socket) do + changeset = <%= inspect context.alias %>.change_<%= schema.singular %>(<%= schema.singular %>) + + {:ok, + socket + |> assign(assigns) + |> assign(:changeset, changeset)} + end + + @impl true + def handle_event("validate", %{"<%= schema.singular %>" => <%= schema.singular %>_params}, socket) do + changeset = + socket.assigns.<%= schema.singular %> + |> <%= inspect context.alias %>.change_<%= schema.singular %>(<%= schema.singular %>_params) + |> Map.put(:action, :validate) + + {:noreply, assign(socket, :changeset, changeset)} + end + + def handle_event("save", %{"<%= schema.singular %>" => <%= schema.singular %>_params}, socket) do + save_<%= schema.singular %>(socket, socket.assigns.action, <%= schema.singular %>_params) + end + + defp save_<%= schema.singular %>(socket, :edit, <%= schema.singular %>_params) do + case <%= inspect context.alias %>.update_<%= schema.singular %>(socket.assigns.<%= schema.singular %>, <%= schema.singular %>_params) do + {:ok, _<%= schema.singular %>} -> + {:noreply, + socket + |> put_flash(:info, "<%= schema.human_singular %> updated successfully") + |> push_redirect(to: socket.assigns.return_to)} + + {:error, %Ecto.Changeset{} = changeset} -> + {:noreply, assign(socket, :changeset, changeset)} + end + end + + defp save_<%= schema.singular %>(socket, :new, <%= schema.singular %>_params) do + case <%= inspect context.alias %>.create_<%= schema.singular %>(<%= schema.singular %>_params) do + {:ok, _<%= schema.singular %>} -> + {:noreply, + socket + |> put_flash(:info, "<%= schema.human_singular %> created successfully") + |> push_redirect(to: socket.assigns.return_to)} + + {:error, %Ecto.Changeset{} = changeset} -> + {:noreply, assign(socket, changeset: changeset)} + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/form_component.html.heex b/deps/phoenix/priv/templates/phx.gen.live/form_component.html.heex new file mode 100644 index 0000000..7f185d4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/form_component.html.heex @@ -0,0 +1,20 @@ +
+

<%%= @title %>

+ + <.form + let={f} + for={@changeset} + id="<%= schema.singular %>-form" + phx-target={@myself} + phx-change="validate" + phx-submit="save"> + <%= for {label, input, error} <- inputs, input do %> + <%= label %> + <%= input %> + <%= error %> + <% end %> +
+ <%%= submit "Save", phx_disable_with: "Saving..." %> +
+ +
diff --git a/deps/phoenix/priv/templates/phx.gen.live/index.ex b/deps/phoenix/priv/templates/phx.gen.live/index.ex new file mode 100644 index 0000000..f46c53b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/index.ex @@ -0,0 +1,46 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Index do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + alias <%= inspect schema.module %> + + @impl true + def mount(_params, _session, socket) do + {:ok, assign(socket, :<%= schema.collection %>, list_<%= schema.plural %>())} + end + + @impl true + def handle_params(params, _url, socket) do + {:noreply, apply_action(socket, socket.assigns.live_action, params)} + end + + defp apply_action(socket, :edit, %{"id" => id}) do + socket + |> assign(:page_title, "Edit <%= schema.human_singular %>") + |> assign(:<%= schema.singular %>, <%= inspect context.alias %>.get_<%= schema.singular %>!(id)) + end + + defp apply_action(socket, :new, _params) do + socket + |> assign(:page_title, "New <%= schema.human_singular %>") + |> assign(:<%= schema.singular %>, %<%= inspect schema.alias %>{}) + end + + defp apply_action(socket, :index, _params) do + socket + |> assign(:page_title, "Listing <%= schema.human_plural %>") + |> assign(:<%= schema.singular %>, nil) + end + + @impl true + def handle_event("delete", %{"id" => id}, socket) do + <%= schema.singular %> = <%= inspect context.alias %>.get_<%= schema.singular %>!(id) + {:ok, _} = <%= inspect context.alias %>.delete_<%= schema.singular %>(<%= schema.singular %>) + + {:noreply, assign(socket, :<%= schema.collection %>, list_<%=schema.plural %>())} + end + + defp list_<%= schema.plural %> do + <%= inspect context.alias %>.list_<%= schema.plural %>() + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/index.html.heex b/deps/phoenix/priv/templates/phx.gen.live/index.html.heex new file mode 100644 index 0000000..3cf6780 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/index.html.heex @@ -0,0 +1,39 @@ +

Listing <%= schema.human_plural %>

+ +<%%= if @live_action in [:new, :edit] do %> + <.modal return_to={Routes.<%= schema.route_helper %>_index_path(@socket, :index)}> + <.live_component + module={<%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.FormComponent} + id={@<%= schema.singular %>.id || :new} + title={@page_title} + action={@live_action} + <%= schema.singular %>={@<%= schema.singular %>} + return_to={Routes.<%= schema.route_helper %>_index_path(@socket, :index)} + /> + +<%% end %> + + + + +<%= for {k, _} <- schema.attrs do %> +<% end %> + + + + + <%%= for <%= schema.singular %> <- @<%= schema.collection %> do %> + -#{<%= schema.singular %>.id}"}> +<%= for {k, _} <- schema.attrs do %> +<% end %> + + + <%% end %> + +
<%= Phoenix.Naming.humanize(Atom.to_string(k)) %>
<%%= <%= schema.singular %>.<%= k %> %> + <%%= live_redirect "Show", to: Routes.<%= schema.route_helper %>_show_path(@socket, :show, <%= schema.singular %>) %> + <%%= live_patch "Edit", to: Routes.<%= schema.route_helper %>_index_path(@socket, :edit, <%= schema.singular %>) %> + <%%= link "Delete", to: "#", phx_click: "delete", phx_value_id: <%= schema.singular %>.id, data: [confirm: "Are you sure?"] %> +
+ +<%%= live_patch "New <%= schema.human_singular %>", to: Routes.<%= schema.route_helper %>_index_path(@socket, :new) %> diff --git a/deps/phoenix/priv/templates/phx.gen.live/live_helpers.ex b/deps/phoenix/priv/templates/phx.gen.live/live_helpers.ex new file mode 100644 index 0000000..2f2f644 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/live_helpers.ex @@ -0,0 +1,60 @@ +defmodule <%= inspect context.web_module %>.LiveHelpers do + import Phoenix.LiveView + import Phoenix.LiveView.Helpers + + alias Phoenix.LiveView.JS + + @doc """ + Renders a live component inside a modal. + + The rendered modal receives a `:return_to` option to properly update + the URL when the modal is closed. + + ## Examples + + <.modal return_to={Routes.<%= schema.singular %>_index_path(@socket, :index)}> + <.live_component + module={<%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.FormComponent} + id={@<%= schema.singular %>.id || :new} + title={@page_title} + action={@live_action} + return_to={Routes.<%= schema.singular %>_index_path(@socket, :index)} + <%= schema.singular %>: @<%= schema.singular %> + /> + + """ + def modal(assigns) do + assigns = assign_new(assigns, :return_to, fn -> nil end) + + ~H""" + + """ + end + + defp hide_modal(js \\ %JS{}) do + js + |> JS.hide(to: "#modal", transition: "fade-out") + |> JS.hide(to: "#modal-content", transition: "fade-out-scale") + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/live_test.exs b/deps/phoenix/priv/templates/phx.gen.live/live_test.exs new file mode 100644 index 0000000..d71e4b3 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/live_test.exs @@ -0,0 +1,110 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>LiveTest do + use <%= inspect context.web_module %>.ConnCase + + import Phoenix.LiveViewTest + import <%= inspect context.module %>Fixtures + + @create_attrs <%= Mix.Phoenix.to_text for {key, value} <- schema.params.create, into: %{}, do: {key, Mix.Phoenix.Schema.live_form_value(value)} %> + @update_attrs <%= Mix.Phoenix.to_text for {key, value} <- schema.params.update, into: %{}, do: {key, Mix.Phoenix.Schema.live_form_value(value)} %> + @invalid_attrs <%= Mix.Phoenix.to_text for {key, value} <- schema.params.create, into: %{}, do: {key, value |> Mix.Phoenix.Schema.live_form_value() |> Mix.Phoenix.Schema.invalid_form_value()} %> + + defp create_<%= schema.singular %>(_) do + <%= schema.singular %> = <%= schema.singular %>_fixture() + %{<%= schema.singular %>: <%= schema.singular %>} + end + + describe "Index" do + setup [:create_<%= schema.singular %>] + + test "lists all <%= schema.plural %>", <%= if schema.string_attr do %>%{conn: conn, <%= schema.singular %>: <%= schema.singular %>}<% else %>%{conn: conn}<% end %> do + {:ok, _index_live, html} = live(conn, Routes.<%= schema.route_helper %>_index_path(conn, :index)) + + assert html =~ "Listing <%= schema.human_plural %>"<%= if schema.string_attr do %> + assert html =~ <%= schema.singular %>.<%= schema.string_attr %><% end %> + end + + test "saves new <%= schema.singular %>", %{conn: conn} do + {:ok, index_live, _html} = live(conn, Routes.<%= schema.route_helper %>_index_path(conn, :index)) + + assert index_live |> element("a", "New <%= schema.human_singular %>") |> render_click() =~ + "New <%= schema.human_singular %>" + + assert_patch(index_live, Routes.<%= schema.route_helper %>_index_path(conn, :new)) + + assert index_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @invalid_attrs) + |> render_change() =~ "<%= Mix.Phoenix.Schema.failed_render_change_message(schema) %>" + + {:ok, _, html} = + index_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @create_attrs) + |> render_submit() + |> follow_redirect(conn, Routes.<%= schema.route_helper %>_index_path(conn, :index)) + + assert html =~ "<%= schema.human_singular %> created successfully"<%= if schema.string_attr do %> + assert html =~ "some <%= schema.string_attr %>"<% end %> + end + + test "updates <%= schema.singular %> in listing", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {:ok, index_live, _html} = live(conn, Routes.<%= schema.route_helper %>_index_path(conn, :index)) + + assert index_live |> element("#<%= schema.singular %>-#{<%= schema.singular %>.id} a", "Edit") |> render_click() =~ + "Edit <%= schema.human_singular %>" + + assert_patch(index_live, Routes.<%= schema.route_helper %>_index_path(conn, :edit, <%= schema.singular %>)) + + assert index_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @invalid_attrs) + |> render_change() =~ "<%= Mix.Phoenix.Schema.failed_render_change_message(schema) %>" + + {:ok, _, html} = + index_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @update_attrs) + |> render_submit() + |> follow_redirect(conn, Routes.<%= schema.route_helper %>_index_path(conn, :index)) + + assert html =~ "<%= schema.human_singular %> updated successfully"<%= if schema.string_attr do %> + assert html =~ "some updated <%= schema.string_attr %>"<% end %> + end + + test "deletes <%= schema.singular %> in listing", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {:ok, index_live, _html} = live(conn, Routes.<%= schema.route_helper %>_index_path(conn, :index)) + + assert index_live |> element("#<%= schema.singular %>-#{<%= schema.singular %>.id} a", "Delete") |> render_click() + refute has_element?(index_live, "#<%= schema.singular %>-#{<%= schema.singular %>.id}") + end + end + + describe "Show" do + setup [:create_<%= schema.singular %>] + + test "displays <%= schema.singular %>", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {:ok, _show_live, html} = live(conn, Routes.<%= schema.route_helper %>_show_path(conn, :show, <%= schema.singular %>)) + + assert html =~ "Show <%= schema.human_singular %>"<%= if schema.string_attr do %> + assert html =~ <%= schema.singular %>.<%= schema.string_attr %><% end %> + end + + test "updates <%= schema.singular %> within modal", %{conn: conn, <%= schema.singular %>: <%= schema.singular %>} do + {:ok, show_live, _html} = live(conn, Routes.<%= schema.route_helper %>_show_path(conn, :show, <%= schema.singular %>)) + + assert show_live |> element("a", "Edit") |> render_click() =~ + "Edit <%= schema.human_singular %>" + + assert_patch(show_live, Routes.<%= schema.route_helper %>_show_path(conn, :edit, <%= schema.singular %>)) + + assert show_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @invalid_attrs) + |> render_change() =~ "<%= Mix.Phoenix.Schema.failed_render_change_message(schema) %>" + + {:ok, _, html} = + show_live + |> form("#<%= schema.singular %>-form", <%= schema.singular %>: @update_attrs) + |> render_submit() + |> follow_redirect(conn, Routes.<%= schema.route_helper %>_show_path(conn, :show, <%= schema.singular %>)) + + assert html =~ "<%= schema.human_singular %> updated successfully"<%= if schema.string_attr do %> + assert html =~ "some updated <%= schema.string_attr %>"<% end %> + end + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/show.ex b/deps/phoenix/priv/templates/phx.gen.live/show.ex new file mode 100644 index 0000000..b75bade --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/show.ex @@ -0,0 +1,21 @@ +defmodule <%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.Show do + use <%= inspect context.web_module %>, :live_view + + alias <%= inspect context.module %> + + @impl true + def mount(_params, _session, socket) do + {:ok, socket} + end + + @impl true + def handle_params(%{"id" => id}, _, socket) do + {:noreply, + socket + |> assign(:page_title, page_title(socket.assigns.live_action)) + |> assign(:<%= schema.singular %>, <%= inspect context.alias %>.get_<%= schema.singular %>!(id))} + end + + defp page_title(:show), do: "Show <%= schema.human_singular %>" + defp page_title(:edit), do: "Edit <%= schema.human_singular %>" +end diff --git a/deps/phoenix/priv/templates/phx.gen.live/show.html.heex b/deps/phoenix/priv/templates/phx.gen.live/show.html.heex new file mode 100644 index 0000000..ff91114 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.live/show.html.heex @@ -0,0 +1,26 @@ +

Show <%= schema.human_singular %>

+ +<%%= if @live_action in [:edit] do %> + <.modal return_to={Routes.<%= schema.route_helper %>_show_path(@socket, :show, @<%= schema.singular %>)}> + <.live_component + module={<%= inspect context.web_module %>.<%= inspect Module.concat(schema.web_namespace, schema.alias) %>Live.FormComponent} + id={@<%= schema.singular %>.id} + title={@page_title} + action={@live_action} + <%= schema.singular %>={@<%= schema.singular %>} + return_to={Routes.<%= schema.route_helper %>_show_path(@socket, :show, @<%= schema.singular %>)} + /> + +<%% end %> + +
    +<%= for {k, _} <- schema.attrs do %> +
  • + <%= Phoenix.Naming.humanize(Atom.to_string(k)) %>: + <%%= @<%= schema.singular %>.<%= k %> %> +
  • +<% end %> +
+ +<%%= live_patch "Edit", to: Routes.<%= schema.route_helper %>_show_path(@socket, :edit, @<%= schema.singular %>), class: "button" %> | +<%%= live_redirect "Back", to: Routes.<%= schema.route_helper %>_index_path(@socket, :index) %> diff --git a/deps/phoenix/priv/templates/phx.gen.notifier/notifier.ex b/deps/phoenix/priv/templates/phx.gen.notifier/notifier.ex new file mode 100644 index 0000000..7a2cb2a --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.notifier/notifier.ex @@ -0,0 +1,14 @@ +defmodule <%= inspect context.module %> do + import Swoosh.Email + alias <%= inspect context.base_module %>.Mailer<%= for message <- notifier_messages do %> + + def deliver_<%= message %>(%{name: name, email: email}) do + new() + |> to({name, email}) + |> from({"Phoenix Team", "team@example.com"}) + |> subject("Welcome to Phoenix, #{name}!") + |> html_body("

Hello, #{name}

") + |> text_body("Hello, #{name}\n") + |> Mailer.deliver() + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.notifier/notifier_test.exs b/deps/phoenix/priv/templates/phx.gen.notifier/notifier_test.exs new file mode 100644 index 0000000..74f7bf4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.notifier/notifier_test.exs @@ -0,0 +1,18 @@ +defmodule <%= inspect context.module %>Test do + use ExUnit.Case, async: true + import Swoosh.TestAssertions + + alias <%= inspect context.module %><%= for message <- notifier_messages do %> + + test "deliver_<%= message %>/1" do + user = %{name: "Alice", email: "alice@example.com"} + + <%= inflections[:alias] %>.deliver_<%= message %>(user) + + assert_email_sent( + subject: "Welcome to Phoenix, Alice!", + to: {"Alice", "alice@example.com"}, + text_body: ~r/Hello, Alice/ + ) + end<% end %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.presence/presence.ex b/deps/phoenix/priv/templates/phx.gen.presence/presence.ex new file mode 100644 index 0000000..204ef33 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.presence/presence.ex @@ -0,0 +1,10 @@ +defmodule <%= module %> do + @moduledoc """ + Provides presence tracking to channels and processes. + + See the [`Phoenix.Presence`](https://hexdocs.pm/phoenix/Phoenix.Presence.html) + docs for more details. + """ + use Phoenix.Presence, otp_app: <%= inspect otp_app %>, + pubsub_server: <%= inspect pubsub_server %> +end diff --git a/deps/phoenix/priv/templates/phx.gen.release/Dockerfile.eex b/deps/phoenix/priv/templates/phx.gen.release/Dockerfile.eex new file mode 100644 index 0000000..a50d4e9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/Dockerfile.eex @@ -0,0 +1,92 @@ +# Find eligible builder and runner images on Docker Hub. We use Ubuntu/Debian instead of +# Alpine to avoid DNS resolution issues in production. +# +# https://hub.docker.com/r/hexpm/elixir/tags?page=1&name=ubuntu +# https://hub.docker.com/_/ubuntu?tab=tags +# +# +# This file is based on these images: +# +# - https://hub.docker.com/r/hexpm/elixir/tags - for the build image +# - https://hub.docker.com/_/debian?tab=tags&page=1&name=bullseye-20220801-slim - for the release image +# - https://pkgs.org/ - resource for finding needed packages +# - Ex: hexpm/elixir:<%= elixir_vsn %>-erlang-<%= otp_vsn %>-debian-bullseye-20210902-slim +# +ARG ELIXIR_VERSION=<%= elixir_vsn %> +ARG OTP_VERSION=<%= otp_vsn %> +ARG DEBIAN_VERSION=bullseye-20220801-slim + +ARG BUILDER_IMAGE="hexpm/elixir:${ELIXIR_VERSION}-erlang-${OTP_VERSION}-debian-${DEBIAN_VERSION}" +ARG RUNNER_IMAGE="debian:${DEBIAN_VERSION}" + +FROM ${BUILDER_IMAGE} as builder + +# install build dependencies +RUN apt-get update -y && apt-get install -y build-essential git \ + && apt-get clean && rm -f /var/lib/apt/lists/*_* + +# prepare build dir +WORKDIR /app + +# install hex + rebar +RUN mix local.hex --force && \ + mix local.rebar --force + +# set build ENV +ENV MIX_ENV="prod" + +# install mix dependencies +COPY mix.exs mix.lock ./ +RUN mix deps.get --only $MIX_ENV +RUN mkdir config + +# copy compile-time config files before we compile dependencies +# to ensure any relevant config change will trigger the dependencies +# to be re-compiled. +COPY config/config.exs config/${MIX_ENV}.exs config/ +RUN mix deps.compile + +COPY priv priv + +COPY lib lib +<%= if assets_dir_exists? do %> +COPY assets assets + +# compile assets +RUN mix assets.deploy +<% end %> +# Compile the release +RUN mix compile + +# Changes to config/runtime.exs don't require recompiling the code +COPY config/runtime.exs config/ + +COPY rel rel +RUN mix release + +# start a new build stage so that the final image will only contain +# the compiled release and other runtime necessities +FROM ${RUNNER_IMAGE} + +RUN apt-get update -y && apt-get install -y libstdc++6 openssl libncurses5 locales \ + && apt-get clean && rm -f /var/lib/apt/lists/*_* + +# Set the locale +RUN sed -i '/en_US.UTF-8/s/^# //g' /etc/locale.gen && locale-gen + +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en +ENV LC_ALL en_US.UTF-8 + +WORKDIR "/app" +RUN chown nobody /app + +# set runner ENV +ENV MIX_ENV="prod" + +# Only copy the final release from the build stage +COPY --from=builder --chown=nobody:root /app/_build/${MIX_ENV}/rel/<%= otp_app %> ./ + +USER nobody + +CMD ["/app/bin/server"] \ No newline at end of file diff --git a/deps/phoenix/priv/templates/phx.gen.release/dockerignore.eex b/deps/phoenix/priv/templates/phx.gen.release/dockerignore.eex new file mode 100644 index 0000000..61a7393 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/dockerignore.eex @@ -0,0 +1,45 @@ +# This file excludes paths from the Docker build context. +# +# By default, Docker's build context includes all files (and folders) in the +# current directory. Even if a file isn't copied into the container it is still sent to +# the Docker daemon. +# +# There are multiple reasons to exclude files from the build context: +# +# 1. Prevent nested folders from being copied into the container (ex: exclude +# /assets/node_modules when copying /assets) +# 2. Reduce the size of the build context and improve build time (ex. /build, /deps, /doc) +# 3. Avoid sending files containing sensitive information +# +# More information on using .dockerignore is available here: +# https://docs.docker.com/engine/reference/builder/#dockerignore-file + +.dockerignore + +# Ignore git, but keep git HEAD and refs to access current commit hash if needed: +# +# $ cat .git/HEAD | awk '{print ".git/"$2}' | xargs cat +# d0b8727759e1e0e7aa3d41707d12376e373d5ecc +.git +!.git/HEAD +!.git/refs + +# Common development/test artifacts +/cover/ +/doc/ +/test/ +/tmp/ +.elixir_ls + +# Mix artifacts +/_build/ +/deps/ +*.ez + +# Generated on crash by the VM +erl_crash.dump + +# Static artifacts - These should be fetched and built inside the Docker image +/assets/node_modules/ +/priv/static/assets/ +/priv/static/cache_manifest.json diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.bat.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.bat.eex new file mode 100644 index 0000000..8e2503b --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.bat.eex @@ -0,0 +1 @@ +call "%~dp0\<%= otp_app %>" eval <%= app_namespace %>.Release.migrate diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.sh.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.sh.eex new file mode 100644 index 0000000..5e3d114 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/migrate.sh.eex @@ -0,0 +1,3 @@ +#!/bin/sh +cd -P -- "$(dirname -- "$0")" +exec ./<%= otp_app %> eval <%= app_namespace %>.Release.migrate diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/server.bat.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/server.bat.eex new file mode 100644 index 0000000..d6216f6 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/server.bat.eex @@ -0,0 +1,2 @@ +set PHX_SERVER=true +call "%~dp0\<%= otp_app %>" start diff --git a/deps/phoenix/priv/templates/phx.gen.release/rel/server.sh.eex b/deps/phoenix/priv/templates/phx.gen.release/rel/server.sh.eex new file mode 100644 index 0000000..2b35aee --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/rel/server.sh.eex @@ -0,0 +1,3 @@ +#!/bin/sh +cd -P -- "$(dirname -- "$0")" +PHX_SERVER=true exec ./<%= otp_app %> start diff --git a/deps/phoenix/priv/templates/phx.gen.release/release.ex b/deps/phoenix/priv/templates/phx.gen.release/release.ex new file mode 100644 index 0000000..cbd7858 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.release/release.ex @@ -0,0 +1,28 @@ +defmodule <%= app_namespace %>.Release do + @moduledoc """ + Used for executing DB release tasks when run in production without Mix + installed. + """ + @app :<%= otp_app %> + + def migrate do + load_app() + + for repo <- repos() do + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :up, all: true)) + end + end + + def rollback(repo, version) do + load_app() + {:ok, _, _} = Ecto.Migrator.with_repo(repo, &Ecto.Migrator.run(&1, :down, to: version)) + end + + defp repos do + Application.fetch_env!(@app, :ecto_repos) + end + + defp load_app do + Application.load(@app) + end +end diff --git a/deps/phoenix/priv/templates/phx.gen.schema/migration.exs b/deps/phoenix/priv/templates/phx.gen.schema/migration.exs new file mode 100644 index 0000000..34a9196 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.schema/migration.exs @@ -0,0 +1,15 @@ +defmodule <%= inspect schema.repo %>.Migrations.Create<%= Macro.camelize(schema.table) %> do + use <%= inspect schema.migration_module %> + + def change do + create table(:<%= schema.table %><%= if schema.binary_id do %>, primary_key: false<% end %><%= if schema.prefix do %>, prefix: :<%= schema.prefix %><% end %>) do +<%= if schema.binary_id do %> add :id, :binary_id, primary_key: true +<% end %><%= for {k, v} <- schema.attrs do %> add <%= inspect k %>, <%= inspect Mix.Phoenix.Schema.type_for_migration(v) %><%= schema.migration_defaults[k] %> +<% end %><%= for {_, i, _, s} <- schema.assocs do %> add <%= inspect(i) %>, references(<%= inspect(s) %>, on_delete: :nothing<%= if schema.binary_id do %>, type: :binary_id<% end %>) +<% end %> + timestamps() + end +<%= if Enum.any?(schema.indexes) do %><%= for index <- schema.indexes do %> + <%= index %><% end %> +<% end %> end +end diff --git a/deps/phoenix/priv/templates/phx.gen.schema/schema.ex b/deps/phoenix/priv/templates/phx.gen.schema/schema.ex new file mode 100644 index 0000000..fc1d478 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.schema/schema.ex @@ -0,0 +1,22 @@ +defmodule <%= inspect schema.module %> do + use Ecto.Schema + import Ecto.Changeset +<%= if schema.prefix do %> + @schema_prefix :<%= schema.prefix %><% end %><%= if schema.binary_id do %> + @primary_key {:id, :binary_id, autogenerate: true} + @foreign_key_type :binary_id<% end %> + schema <%= inspect schema.table %> do +<%= Mix.Phoenix.Schema.format_fields_for_schema(schema) %> +<%= for {_, k, _, _} <- schema.assocs do %> field <%= inspect k %>, <%= if schema.binary_id do %>:binary_id<% else %>:id<% end %> +<% end %> + timestamps() + end + + @doc false + def changeset(<%= schema.singular %>, attrs) do + <%= schema.singular %> + |> cast(attrs, [<%= Enum.map_join(schema.attrs, ", ", &inspect(elem(&1, 0))) %>]) + |> validate_required([<%= Enum.map_join(schema.attrs, ", ", &inspect(elem(&1, 0))) %>]) +<%= for k <- schema.uniques do %> |> unique_constraint(<%= inspect k %>) +<% end %> end +end diff --git a/deps/phoenix/priv/templates/phx.gen.socket/socket.ex b/deps/phoenix/priv/templates/phx.gen.socket/socket.ex new file mode 100644 index 0000000..3ce1be9 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.socket/socket.ex @@ -0,0 +1,54 @@ +defmodule <%= module %>Socket do + use Phoenix.Socket + + # A Socket handler + # + # It's possible to control the websocket connection and + # assign values that can be accessed by your channel topics. + + ## Channels<%= if existing_channel do %> + + channel "<%= existing_channel[:singular] %>:*", <%= existing_channel[:module] %>Channel +<% else %> + # Uncomment the following line to define a "room:*" topic + # pointing to the `<%= web_module %>.RoomChannel`: + # + # channel "room:*", <%= web_module %>.RoomChannel + # + # To create a channel file, use the mix task: + # + # mix phx.gen.channel Room + # + # See the [`Channels guide`](https://hexdocs.pm/phoenix/channels.html) + # for further details. + +<% end %> + # Socket params are passed from the client and can + # be used to verify and authenticate a user. After + # verification, you can put default assigns into + # the socket that will be set for all channels, ie + # + # {:ok, assign(socket, :user_id, verified_user_id)} + # + # To deny connection, return `:error`. + # + # See `Phoenix.Token` documentation for examples in + # performing token verification on connect. + @impl true + def connect(_params, socket, _connect_info) do + {:ok, socket} + end + + # Socket id's are topics that allow you to identify all sockets for a given user: + # + # def id(socket), do: "user_socket:#{socket.assigns.user_id}" + # + # Would allow you to broadcast a "disconnect" event and terminate + # all active sockets and channels for a given user: + # + # <%= endpoint_module %>.broadcast("user_socket:#{user.id}", "disconnect", %{}) + # + # Returning `nil` makes this socket anonymous. + @impl true + def id(_socket), do: nil +end diff --git a/deps/phoenix/priv/templates/phx.gen.socket/socket.js b/deps/phoenix/priv/templates/phx.gen.socket/socket.js new file mode 100644 index 0000000..dd178f4 --- /dev/null +++ b/deps/phoenix/priv/templates/phx.gen.socket/socket.js @@ -0,0 +1,64 @@ +// NOTE: The contents of this file will only be executed if +// you uncomment its entry in "assets/js/app.js". + +// Bring in Phoenix channels client library: +import {Socket} from "phoenix" + +// And connect to the path in "<%= web_prefix %>/endpoint.ex". We pass the +// token for authentication. Read below how it should be used. +let socket = new Socket("/socket", {params: {token: window.userToken}}) + +// When you connect, you'll often need to authenticate the client. +// For example, imagine you have an authentication plug, `MyAuth`, +// which authenticates the session and assigns a `:current_user`. +// If the current user exists you can assign the user's token in +// the connection for use in the layout. +// +// In your "<%= web_prefix %>/router.ex": +// +// pipeline :browser do +// ... +// plug MyAuth +// plug :put_user_token +// end +// +// defp put_user_token(conn, _) do +// if current_user = conn.assigns[:current_user] do +// token = Phoenix.Token.sign(conn, "user socket", current_user.id) +// assign(conn, :user_token, token) +// else +// conn +// end +// end +// +// Now you need to pass this token to JavaScript. You can do so +// inside a script tag in "<%= web_prefix %>/templates/layout/app.html.heex": +// +// +// +// You will need to verify the user token in the "connect/3" function +// in "<%= web_prefix %>/channels/user_socket.ex": +// +// def connect(%{"token" => token}, socket, _connect_info) do +// # max_age: 1209600 is equivalent to two weeks in seconds +// case Phoenix.Token.verify(socket, "user socket", token, max_age: 1_209_600) do +// {:ok, user_id} -> +// {:ok, assign(socket, :user, user_id)} +// +// {:error, reason} -> +// :error +// end +// end +// +// Finally, connect to the socket: +socket.connect() + +// Now that you are connected, you can join channels with a topic. +// Let's assume you have a channel with a topic named `room` and the +// subtopic is its id - in this case 42: +let channel = socket.channel("room:42", {}) +channel.join() + .receive("ok", resp => { console.log("Joined successfully", resp) }) + .receive("error", resp => { console.log("Unable to join", resp) }) + +export default socket diff --git a/deps/phoenix_ecto/.fetch b/deps/phoenix_ecto/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/phoenix_ecto/.formatter.exs b/deps/phoenix_ecto/.formatter.exs new file mode 100644 index 0000000..a79525b --- /dev/null +++ b/deps/phoenix_ecto/.formatter.exs @@ -0,0 +1,4 @@ +[ + import_deps: [:ecto], + inputs: ["{mix,.formatter}.exs", "{config,lib,test}/**/*.{ex,exs}"] +] diff --git a/deps/phoenix_ecto/.hex b/deps/phoenix_ecto/.hex new file mode 100644 index 0000000000000000000000000000000000000000..656025185a510b79c93ddda1f95b2a81f8a1553e GIT binary patch literal 276 zcmZXP%WlIU5JhQ{J{DC!CBkNaVb?#2fEkL@0+!=c-uK6gyX|gtb#>2~pM1zi635J& zKX|bUA)ZLQO||lJiJ9Z)?OsYx-=(o56EL8}pi=0!10p7LP9u+F3Wy^ySnsS+5M>w? z+aQBZ){c_IOYzlz@N)a=dk-C2rtM1NRDIzc@jB 2.9 + +## v3.1.0 + +* Enhancements + * Depend on Ecto ~> 2.1 and support new `:naive_datetime` and `:utc_datetime` types + +## v3.0.1 + +* Enhancements + * Support non-struct data in changeset + +## v3.0.0 + +* Enhancements + * Add `Phoenix.Ecto.SQL.Sandbox` for concurrent acceptance tests with Phoenix and Ecto based on user-agent + * Use the new sandbox based on user-agent + * Depend on Phoenix.HTML ~> 2.6 + * Depend on Ecto ~> 2.0 + +* Bug fixes + * Do not list errors if changeset has no action + +## v2.0.0 + +* Enhancements + * Depend on Ecto ~> 1.1 + +* Backwords incompatible changes + * `f.errors` now returns a raw list of `changeset.errors` for the form's changeset which can be further translated with Phoenix' new Gettext support + * No longer implement Poison protocol for `Ecto.Changeset` + +## v1.2.0 + +* Enhancements + * Depend on Ecto ~> 1.0 + * Depend on Phoenix.HTML ~> 2.2 + * Use the new `:as` option for naming inputs fields instead of `:name` + +## v1.1.0 + +* Enhancements + * Depend on Ecto ~> 0.15 + * Support `skip_deleted` in inputs_for + * Support default values from data rather from `:default` option + +## v1.0.0 + +* Enhancements + * Depend on Phoenix.HTML ~> 2.1 + * Depend on Ecto ~> 0.15 + * Support associations on changesets + +## v0.9.0 + +* Enhancements + * Depend on Phoenix.HTML ~> 2.0 + +## v0.8.1 + +* Bug fix + * Ensure we can encode decimals and floats from errors messages + +## v0.8.0 + +* Enhancements + * Depend on Phoenix.HTML ~> 1.4 (includes `input_type` and `input_validation` support) + * Include embeds errors during JSON generation + +## v0.7.0 + +* Enhancements + * Depend on Phoenix.HTML ~> 1.3 (includes `inputs_for` support) + +## v0.6.0 + +* Enhancements + * Depend on Ecto ~> 0.14 + +## v0.5.0 + +* Enhancements + * Depend on Ecto ~> 0.12 + +## v0.4.0 + +* Enhancements + * Depend on phoenix_html as optional dependency instead of Phoenix + * Depend on poison as optional dependency instead of Phoenix + +## v0.3.2 + +* Bug fix + * Ensure we interpolate `%{count}` in JSON encoding + +## v0.3.1 + +* Enhancements + * Implement Plug.Exception for Ecto exceptions + +## v0.3.0 + +* Enhancements + * Support Phoenix v0.11.0 errors entry in form data + +## v0.2.0 + +* Enhancements + * Implement `Phoenix.HTML.Safe` for `Ecto.Date`, `Ecto.Time` and `Ecto.DateTime` + * Implement `Poison.Encoder` for `Ecto.Changeset`, `Decimal`, `Ecto.Date`, `Ecto.Time` and `Ecto.DateTime` + +## v0.1.0 + +* Enhancements + * Implement `Phoenix.HTML.FormData` for `Ecto.Changeset` + * Implement `Phoenix.HTML.Safe` for `Decimal` diff --git a/deps/phoenix_ecto/LICENSE b/deps/phoenix_ecto/LICENSE new file mode 100644 index 0000000..c142eb4 --- /dev/null +++ b/deps/phoenix_ecto/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2015 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/phoenix_ecto/README.md b/deps/phoenix_ecto/README.md new file mode 100644 index 0000000..097b784 --- /dev/null +++ b/deps/phoenix_ecto/README.md @@ -0,0 +1,128 @@ +# Phoenix/Ecto + +[![Action Status](https://github.com/phoenixframework/phoenix_ecto/workflows/CI/badge.svg)](https://github.com/phoenixframework/phoenix_ecto/actions?query=workflow%3ACI) + +A project that integrates [Phoenix](http://github.com/phoenixframework/phoenix) with [Ecto](http://github.com/elixir-lang/ecto), implementing all relevant protocols. + +## Installation + +You can install `phoenix_ecto` by adding it to your list of dependencies in `mix.exs`: + +```elixir +def deps do + [{:phoenix_ecto, "~> 4.0"}] +end +``` + +## Concurrent browser tests + +This library also provides a plug called `Phoenix.Ecto.SQL.Sandbox` that allows developers to run acceptance tests powered by headless browsers such as ChromeDriver and Selenium concurrently. If you are not familiar with Ecto's SQL sandbox, we recommend you to first get acquainted with it by [reading `Ecto.Adapters.SQL.Sandbox` documentation](https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.Sandbox.html). + +To enable concurrent acceptance tests, make sure you are using PostgreSQL and follow the instructions below: + +1. Set a flag to enable the sandbox in `config/test.exs`: + + ```elixir + config :your_app, sql_sandbox: true + ``` + +2. And use the flag to conditionally add the plug to `lib/your_app/endpoint.ex`: + + ```elixir + if Application.get_env(:your_app, :sql_sandbox) do + plug Phoenix.Ecto.SQL.Sandbox + end + ``` + + Make sure that this is placed **before** the line `plug YourApp.Router` (or any other plug that may access the database). + +You can now checkout a sandboxed connection and pass the connection information to an acceptance testing tool like [Hound](https://github.com/hashnuke/hound) or [Wallaby](https://github.com/elixir-wallaby/wallaby). + +### Hound + +To write concurrent acceptance tests with Hound, first add it as a dependency to your `mix.exs`: + +```elixir +{:hound, "~> 1.0"} +``` + +Make sure to start it at the top of your `test/test_helper.exs`: + +```elixir +{:ok, _} = Application.ensure_all_started(:hound) +``` + +Then add the following to your test case (or case template): + +```elixir +use Hound.Helpers + +setup do + :ok = Ecto.Adapters.SQL.Sandbox.checkout(YourApp.Repo) + metadata = Phoenix.Ecto.SQL.Sandbox.metadata_for(YourApp.Repo, self()) + Hound.start_session(metadata: metadata) + :ok +end +``` + +Hound supports multiple drivers like Chrome, Firefox, etc but it does not support concurrent tests under PhantomJS (the default). + +### Wallaby + +To write concurrent acceptance tests with Wallaby, first add it as a dependency to your `mix.exs`: + +```elixir +{:wallaby, "~> 0.25", only: :test} +``` + +Wallaby can take care of setting up the Ecto Sandbox for you if you use `use Wallaby.Feature` in your test module. + +```elixir +defmodule MyAppWeb.PageFeature do + use ExUnit.Case, async: true + use Wallaby.Feature + + feature "shows some text", %{session: session} do + session + |> visit("/home") + |> assert_text("Hello world!") + end +end +``` + +If you don't `use Wallaby.Feature`, you can add the following to your test case (or case template): + +```elixir +use Wallaby.DSL + +setup do + :ok = Ecto.Adapters.SQL.Sandbox.checkout(YourApp.Repo) + metadata = Phoenix.Ecto.SQL.Sandbox.metadata_for(YourApp.Repo, self()) + {:ok, session} = Wallaby.start_session(metadata: metadata) +end +``` + +Wallaby currently supports ChromeDriver and Selenium, allowing testing in almost any browser. + +## The Phoenix <-> Ecto integration + +Thanks to Elixir protocols, the integration between Phoenix and Ecto is simply a matter of implementing a handful of protocols. We provide the following implementations: + + * `Phoenix.HTML.FormData` protocol for `Ecto.Changeset` + * `Phoenix.HTML.Safe` protocol for `Decimal` + * `Plug.Exception` protocol for the relevant Ecto exceptions + +## Configuration + +The `Plug.Exception` implementations for Ecto exceptions may be disabled by including the error in the mix configuration. + +```elixir +config :phoenix_ecto, + exclude_ecto_exceptions_from_plug: [Ecto.NoResultsError] +``` + +## Copyright and License + +Copyright (c) 2015, Chris McCord. + +Phoenix/Ecto source code is licensed under the [MIT License](https://github.com/phoenixframework/phoenix_ecto/blob/master/LICENSE). diff --git a/deps/phoenix_ecto/hex_metadata.config b/deps/phoenix_ecto/hex_metadata.config new file mode 100644 index 0000000..f0a94bf --- /dev/null +++ b/deps/phoenix_ecto/hex_metadata.config @@ -0,0 +1,33 @@ +{<<"app">>,<<"phoenix_ecto">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>,<<"Integration between Phoenix & Ecto">>}. +{<<"elixir">>,<<"~> 1.7">>}. +{<<"files">>, + [<<"lib">>,<<"lib/phoenix_ecto">>,<<"lib/phoenix_ecto/html.ex">>, + <<"lib/phoenix_ecto/check_repo_status.ex">>,<<"lib/phoenix_ecto/plug.ex">>, + <<"lib/phoenix_ecto/exceptions.ex">>,<<"lib/phoenix_ecto/sql">>, + <<"lib/phoenix_ecto/sql/sandbox_session.ex">>, + <<"lib/phoenix_ecto/sql/sandbox.ex">>,<<"lib/phoenix_ecto.ex">>, + <<".formatter.exs">>,<<"mix.exs">>,<<"README.md">>,<<"LICENSE">>, + <<"CHANGELOG.md">>]}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"links">>, + [{<<"GitHub">>,<<"https://github.com/phoenixframework/phoenix_ecto">>}]}. +{<<"name">>,<<"phoenix_ecto">>}. +{<<"requirements">>, + [[{<<"app">>,<<"phoenix_html">>}, + {<<"name">>,<<"phoenix_html">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 2.14.2 or ~> 3.0">>}], + [{<<"app">>,<<"ecto">>}, + {<<"name">>,<<"ecto">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 3.3">>}], + [{<<"app">>,<<"plug">>}, + {<<"name">>,<<"plug">>}, + {<<"optional">>,false}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.9">>}]]}. +{<<"version">>,<<"4.4.0">>}. diff --git a/deps/phoenix_ecto/lib/phoenix_ecto.ex b/deps/phoenix_ecto/lib/phoenix_ecto.ex new file mode 100644 index 0000000..829c150 --- /dev/null +++ b/deps/phoenix_ecto/lib/phoenix_ecto.ex @@ -0,0 +1,17 @@ +defmodule Phoenix.Ecto do + @moduledoc """ + Integrates Phoenix with Ecto. + + It implements many protocols that makes it easier to use + Ecto with Phoenix either when working with HTML or JSON. + """ + use Application + + def start(_type, _args) do + children = [ + {DynamicSupervisor, name: Phoenix.Ecto.SQL.SandboxSupervisor, strategy: :one_for_one} + ] + + Supervisor.start_link(children, strategy: :one_for_one, name: Phoenix.Ecto.Supervisor) + end +end diff --git a/deps/phoenix_ecto/lib/phoenix_ecto/check_repo_status.ex b/deps/phoenix_ecto/lib/phoenix_ecto/check_repo_status.ex new file mode 100644 index 0000000..06394a6 --- /dev/null +++ b/deps/phoenix_ecto/lib/phoenix_ecto/check_repo_status.ex @@ -0,0 +1,71 @@ +defmodule Phoenix.Ecto.CheckRepoStatus do + @moduledoc """ + A plug that does some checks on your application repos. + + Checks if the storage is up (database is created) or if there are any pending migrations. + Both checks can raise an error if the conditions are not met. + + ## Plug options + + * `:otp_app` - name of the application which the repos are fetched from + + """ + + @behaviour Plug + + alias Plug.Conn + + def init(opts) do + Keyword.fetch!(opts, :otp_app) + opts + end + + def call(%Conn{} = conn, opts) do + repos = Application.get_env(opts[:otp_app], :ecto_repos, []) + + for repo <- repos, Process.whereis(repo) do + unless check_pending_migrations!(repo, opts) do + check_storage_up!(repo) + end + end + + conn + end + + defp check_storage_up!(repo) do + try do + adapter = repo.__adapter__() + + if Code.ensure_loaded?(adapter) && function_exported?(adapter, :storage_status, 1) do + adapter.storage_status(repo.config()) + end + rescue + _ -> :ok + else + :down -> raise Phoenix.Ecto.StorageNotCreatedError, repo: repo + _ -> :ok + end + end + + defp check_pending_migrations!(repo, opts) do + try do + # If the dependency `ecto_sql` is not loaded we can't check if + # there are pending migrations so we try to fail gracefully here + fallback_get_migrations = + if Code.ensure_loaded?(Ecto.Migrator), + do: &Ecto.Migrator.migrations/1, + else: fn _repo -> raise "to be rescued" end + + get_migrations = Keyword.get(opts, :get_migrations_function, fallback_get_migrations) + + repo + |> get_migrations.() + |> Enum.any?(fn {status, _version, _migration} -> status == :down end) + rescue + _ -> false + else + true -> raise Phoenix.Ecto.PendingMigrationError, repo: repo + false -> true + end + end +end diff --git a/deps/phoenix_ecto/lib/phoenix_ecto/exceptions.ex b/deps/phoenix_ecto/lib/phoenix_ecto/exceptions.ex new file mode 100644 index 0000000..cbaeca3 --- /dev/null +++ b/deps/phoenix_ecto/lib/phoenix_ecto/exceptions.ex @@ -0,0 +1,17 @@ +defmodule Phoenix.Ecto.StorageNotCreatedError do + defexception [:repo] + + def message(%__MODULE__{repo: repo}) do + "the storage is not created for repo: #{inspect(repo)}. " <> + "Try running `mix ecto.create` in the command line to create it" + end +end + +defmodule Phoenix.Ecto.PendingMigrationError do + defexception [:repo] + + def message(%__MODULE__{repo: repo}) do + "there are pending migrations for repo: #{inspect(repo)}. " <> + "Try running `mix ecto.migrate` in the command line to migrate it" + end +end diff --git a/deps/phoenix_ecto/lib/phoenix_ecto/html.ex b/deps/phoenix_ecto/lib/phoenix_ecto/html.ex new file mode 100644 index 0000000..9bccf34 --- /dev/null +++ b/deps/phoenix_ecto/lib/phoenix_ecto/html.ex @@ -0,0 +1,331 @@ +if Code.ensure_loaded?(Phoenix.HTML) do + defimpl Phoenix.HTML.FormData, for: Ecto.Changeset do + def to_form(changeset, opts) do + %{params: params, data: data} = changeset + {name, opts} = Keyword.pop(opts, :as) + + name = to_string(name || form_for_name(data)) + id = Keyword.get(opts, :id) || name + + %Phoenix.HTML.Form{ + source: changeset, + impl: __MODULE__, + id: id, + name: name, + errors: form_for_errors(changeset), + data: data, + params: params || %{}, + hidden: form_for_hidden(data), + options: Keyword.put_new(opts, :method, form_for_method(data)) + } + end + + def to_form(%{action: parent_action} = source, form, field, opts) do + if Keyword.has_key?(opts, :default) do + raise ArgumentError, + ":default is not supported on inputs_for with changesets. " <> + "The default value must be set in the changeset data" + end + + {prepend, opts} = Keyword.pop(opts, :prepend, []) + {append, opts} = Keyword.pop(opts, :append, []) + {name, opts} = Keyword.pop(opts, :as) + {id, opts} = Keyword.pop(opts, :id) + + id = to_string(id || form.id <> "_#{field}") + name = to_string(name || form.name <> "[#{field}]") + + case find_inputs_for_type!(source, field) do + {:one, cast, module} -> + changesets = + case Map.fetch(source.changes, field) do + {:ok, nil} -> + [] + + {:ok, map} -> + [validate_map!(map, field)] + + _ -> + [validate_map!(assoc_from_data(source.data, field), field) || module.__struct__] + end + + for changeset <- skip_replaced(changesets) do + %{data: data, params: params} = + changeset = to_changeset(changeset, parent_action, module, cast) + + %Phoenix.HTML.Form{ + source: changeset, + impl: __MODULE__, + id: id, + name: name, + errors: form_for_errors(changeset), + data: data, + params: params || %{}, + hidden: form_for_hidden(data), + options: opts + } + end + + {:many, cast, module} -> + changesets = + validate_list!(Map.get(source.changes, field), field) || + validate_list!(assoc_from_data(source.data, field), field) || [] + + changesets = + if form.params[Atom.to_string(field)] do + changesets + else + prepend ++ changesets ++ append + end + + changesets = skip_replaced(changesets) + + for {changeset, index} <- Enum.with_index(changesets) do + %{data: data, params: params} = + changeset = to_changeset(changeset, parent_action, module, cast) + + index_string = Integer.to_string(index) + + %Phoenix.HTML.Form{ + source: changeset, + impl: __MODULE__, + id: id <> "_" <> index_string, + name: name <> "[" <> index_string <> "]", + index: index, + errors: form_for_errors(changeset), + data: data, + params: params || %{}, + hidden: form_for_hidden(data), + options: opts + } + end + end + end + + def input_value(data, form, field, computed \\ nil) + + def input_value(%{changes: changes, data: data}, %{params: params}, field, computed) + when is_atom(field) do + case Map.fetch(changes, field) do + {:ok, value} -> + value + + :error -> + case Map.fetch(params, Atom.to_string(field)) do + {:ok, value} -> + value + + :error when is_nil(computed) -> + Map.get(data, field) + + :error -> + computed + end + end + end + + def input_value(_data, _form, field, _computed) do + raise ArgumentError, "expected field to be an atom, got: #{inspect(field)}" + end + + def input_type(%{types: types}, _, field) do + type = Map.get(types, field, :string) + type = if Ecto.Type.primitive?(type), do: type, else: type.type + + case type do + :integer -> :number_input + :boolean -> :checkbox + :date -> :date_select + :time -> :time_select + :utc_datetime -> :datetime_select + :naive_datetime -> :datetime_select + _ -> :text_input + end + end + + def input_validations(%{required: required, validations: validations} = changeset, _, field) do + [required: field in required] ++ + for {key, validation} <- validations, + key == field, + attr <- validation_to_attrs(validation, field, changeset), + do: attr + end + + defp assoc_from_data(data, field) do + assoc_from_data(data, Map.fetch!(data, field), field) + end + + defp assoc_from_data(%{__meta__: %{state: :built}}, %Ecto.Association.NotLoaded{}, _field) do + nil + end + + defp assoc_from_data(%{__struct__: struct}, %Ecto.Association.NotLoaded{}, field) do + raise ArgumentError, + "using inputs_for for association `#{field}` " <> + "from `#{inspect(struct)}` but it was not loaded. Please preload your " <> + "associations before using them in inputs_for" + end + + defp assoc_from_data(_data, value, _field) do + value + end + + defp skip_replaced(changesets) do + Enum.reject(changesets, fn + %Ecto.Changeset{action: :replace} -> true + _ -> false + end) + end + + defp validation_to_attrs({:length, opts}, _field, _changeset) do + max = + if val = Keyword.get(opts, :max) do + [maxlength: val] + else + [] + end + + min = + if val = Keyword.get(opts, :min) do + [minlength: val] + else + [] + end + + max ++ min + end + + defp validation_to_attrs({:number, opts}, field, changeset) do + type = Map.get(changeset.types, field, :integer) + step_for(type) ++ min_for(type, opts) ++ max_for(type, opts) + end + + defp validation_to_attrs(_validation, _field, _changeset) do + [] + end + + defp step_for(:integer), do: [step: 1] + defp step_for(_other), do: [step: "any"] + + defp max_for(type, opts) do + cond do + max = type == :integer && Keyword.get(opts, :less_than) -> [max: max - 1] + max = Keyword.get(opts, :less_than_or_equal_to) -> [max: max] + true -> [] + end + end + + defp min_for(type, opts) do + cond do + min = type == :integer && Keyword.get(opts, :greater_than) -> [min: min + 1] + min = Keyword.get(opts, :greater_than_or_equal_to) -> [min: min] + true -> [] + end + end + + defp find_inputs_for_type!(changeset, field) do + case Map.fetch(changeset.types, field) do + {:ok, {tag, %{cardinality: cardinality, on_cast: cast, related: module}}} + when tag in [:embed, :assoc] -> + {cardinality, cast, module} + + _ -> + struct = changeset.data.__struct__ + + raise ArgumentError, + "could not generate inputs for #{inspect(field)} from #{inspect(struct)}. " <> + "Check the field exists and it is one of embeds_one, embeds_many, has_one, " <> + "has_many, belongs_to or many_to_many" + end + end + + defp to_changeset(%Ecto.Changeset{} = changeset, parent_action, _module, _cast), + do: apply_action(changeset, parent_action) + + defp to_changeset(%{} = data, parent_action, _module, cast) when is_function(cast, 2), + do: apply_action(cast!(cast, data), parent_action) + + defp to_changeset(%{} = data, parent_action, _module, {module, func, arguments} = mfa) + when is_atom(module) and is_atom(func) and is_list(arguments), + do: apply_action(apply!(mfa, data), parent_action) + + defp to_changeset(%{} = data, parent_action, _module, nil), + do: apply_action(Ecto.Changeset.change(data), parent_action) + + defp cast!(cast, data) do + case cast.(data, %{}) do + %Ecto.Changeset{} = changeset -> + changeset + + other -> + raise "expected on_cast/2 callback #{inspect(cast)} to return an Ecto.Changeset, " <> + "got: #{inspect(other)}" + end + end + + defp apply!({module, func, arguments}, data) do + case apply(module, func, [data, %{} | arguments]) do + %Ecto.Changeset{} = changeset -> + changeset + + other -> + raise "expected #{module}.#{func} to return an Ecto.Changeset, " <> + "got: #{inspect(other)}" + end + end + + # If the parent changeset had no action, we need to remove the action + # from children changeset so we ignore all errors accordingly. + defp apply_action(changeset, nil), + do: %{changeset | action: nil} + + defp apply_action(changeset, _action), + do: changeset + + defp validate_list!(value, _what) when is_list(value) or is_nil(value), do: value + + defp validate_list!(value, what) do + raise ArgumentError, "expected #{what} to be a list, got: #{inspect(value)}" + end + + defp validate_map!(value, _what) when is_map(value) or is_nil(value), do: value + + defp validate_map!(value, what) do + raise ArgumentError, "expected #{what} to be a map/struct, got: #{inspect(value)}" + end + + defp form_for_errors(%{action: nil}), do: [] + defp form_for_errors(%{action: :ignore}), do: [] + defp form_for_errors(%{errors: errors}), do: errors + + defp form_for_hidden(%{__struct__: module} = data) do + module.__schema__(:primary_key) + rescue + _ -> [] + else + keys -> for k <- keys, v = Map.fetch!(data, k), do: {k, v} + end + + defp form_for_hidden(_), do: [] + + defp form_for_name(%{__struct__: module}) do + module + |> Module.split() + |> List.last() + |> Macro.underscore() + end + + defp form_for_name(_) do + raise ArgumentError, "non-struct data in changeset requires the :as option to be given" + end + + defp form_for_method(%{__meta__: %{state: :loaded}}), do: "put" + defp form_for_method(_), do: "post" + end + + defimpl Phoenix.HTML.Safe, for: Decimal do + def to_iodata(t) do + @for.to_string(t, :normal) + end + end +end diff --git a/deps/phoenix_ecto/lib/phoenix_ecto/plug.ex b/deps/phoenix_ecto/lib/phoenix_ecto/plug.ex new file mode 100644 index 0000000..5ee55af --- /dev/null +++ b/deps/phoenix_ecto/lib/phoenix_ecto/plug.ex @@ -0,0 +1,59 @@ +errors = [ + {Ecto.CastError, 400}, + {Ecto.Query.CastError, 400}, + {Ecto.NoResultsError, 404}, + {Ecto.StaleEntryError, 409} +] + +excluded_exceptions = Application.get_env(:phoenix_ecto, :exclude_ecto_exceptions_from_plug, []) + +for {exception, status_code} <- errors do + unless exception in excluded_exceptions do + defimpl Plug.Exception, for: exception do + def status(_), do: unquote(status_code) + def actions(_), do: [] + end + end +end + +unless Ecto.SubQueryError in excluded_exceptions do + defimpl Plug.Exception, for: Ecto.SubQueryError do + def status(sub_query_error) do + Plug.Exception.status(sub_query_error.exception) + end + + def actions(_), do: [] + end +end + +unless Phoenix.Ecto.PendingMigrationError in excluded_exceptions do + defimpl Plug.Exception, for: Phoenix.Ecto.PendingMigrationError do + def status(_error), do: 503 + + def actions(%{repo: repo}), + do: [ + %{ + label: "Run migrations for repo", + handler: {__MODULE__, :migrate, [repo]} + } + ] + + def migrate(repo), do: Ecto.Migrator.run(repo, :up, all: true) + end +end + +unless Phoenix.Ecto.StorageNotCreatedError in excluded_exceptions do + defimpl Plug.Exception, for: Phoenix.Ecto.StorageNotCreatedError do + def status(_error), do: 503 + + def actions(%{repo: repo}), + do: [ + %{ + label: "Create database for repo", + handler: {__MODULE__, :storage_up, [repo]} + } + ] + + def storage_up(repo), do: repo.__adapter__().storage_up(repo.config()) + end +end diff --git a/deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox.ex b/deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox.ex new file mode 100644 index 0000000..7a6d883 --- /dev/null +++ b/deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox.ex @@ -0,0 +1,306 @@ +defmodule Phoenix.Ecto.SQL.Sandbox do + @moduledoc """ + A plug to allow concurrent, transactional acceptance tests with [`Ecto.Adapters.SQL.Sandbox`] + (https://hexdocs.pm/ecto_sql/Ecto.Adapters.SQL.Sandbox.html). + + ## Example + + This plug should only be used during tests. First, set a flag to + enable it in `config/test.exs`: + + config :your_app, sql_sandbox: true + + And use the flag to conditionally add the plug to `lib/your_app/endpoint.ex`: + + if Application.compile_env(:your_app, :sql_sandbox) do + plug Phoenix.Ecto.SQL.Sandbox + end + + It's important that this is at the top of `endpoint.ex`, before any other plugs. + + Then, within an acceptance test, checkout a sandboxed connection as before. + Use `metadata_for/2` helper to get the session metadata to that will allow access + to the test's connection. + + Here's an example using [Hound](https://hex.pm/packages/hound): + + use Hound.Helpers + + setup do + :ok = Ecto.Adapters.SQL.Sandbox.checkout(YourApp.Repo) + metadata = Phoenix.Ecto.SQL.Sandbox.metadata_for(YourApp.Repo, self()) + Hound.start_session(metadata: metadata) + :ok + end + + ## Supporting socket connections + + To support socket connections the spawned processes need access to the header + used for transporting the metadata. By default this is the user agent header, + but you can also use custom `X-`-headers. + + socket "/path", Socket, + websocket: [connect_info: [:user_agent, โ€ฆ]] + + socket "/path", Socket, + websocket: [connect_info: [:x_headers, โ€ฆ]] + + To fetch the value you either use `connect_info[:user_agent]` or + for a custom header: + + Enum.find_value(connect_info.x_headers, fn + {"x-my-custom-header", val} -> val + _ -> false + end) + + ### Channels + + For channels, `:connect_info` data is available to any of your Sockets' + `c:Phoenix.Socket.connect/3` callbacks: + + # user_socket.ex + def connect(_params, socket, connect_info) do + {:ok, assign(socket, :phoenix_ecto_sandbox, connect_info[:user_agent])} + end + + This stores the value on the socket, so it can be available to all of your + channels for allowing the sandbox. + + # room_channel.ex + def join("room:lobby", _payload, socket) do + allow_ecto_sandbox(socket) + {:ok, socket} + end + + # This is a great function to extract to a helper module + defp allow_ecto_sandbox(socket) do + Phoenix.Ecto.SQL.Sandbox.allow( + socket.assigns.phoenix_ecto_sandbox, + Ecto.Adapters.SQL.Sandbox + ) + end + + `allow/2` needs to be manually called once for each channel, at best directly + at the start of `c:Phoenix.Channel.join/3`. + + ### LiveView + + LiveViews can be supported in a similar fashion than channels, but using the + `c:Phoenix.LiveView.mount/3` callback. + + def mount(_, _, socket) do + allow_ecto_sandbox(socket) + โ€ฆ + end + + # This is a great function to extract to a helper module + defp allow_ecto_sandbox(socket) do + %{assigns: %{phoenix_ecto_sandbox: metadata}} = + assign_new(socket, :phoenix_ecto_sandbox, fn -> + if connected?(socket), do: get_connect_info(socket)[:user_agent] + end) + + Phoenix.Ecto.SQL.Sandbox.allow(metadata, Ecto.Adapters.SQL.Sandbox) + end + + This is a bit more complex than the channel code, because LiveViews not only + are their own processes when spawned via a socket connection, but also when + doing the static render as part of the plug pipeline. Given `get_connect_info/1` + is only available for socket connections, this uses the `:phoenix_ecto_sandbox` + assign of the rendering `conn` for the static render. + + ## Concurrent end-to-end tests with external clients + + Concurrent and transactional tests for external HTTP clients is supported, + allowing for complete end-to-end tests. This is useful for cases such as + JavaScript test suites for single page applications that exercise the + Phoenix endpoint for end-to-end test setup and teardown. To enable this, + you can expose a sandbox route on the `Phoenix.Ecto.SQL.Sandbox` plug by + providing the `:at`, and `:repo` options. For example: + + plug Phoenix.Ecto.SQL.Sandbox, + at: "/sandbox", + repo: MyApp.Repo, + timeout: 15_000 # the default + + This would expose a route at `"/sandbox"` for the given repo where + external clients send POST requests to spawn a new sandbox session, + and DELETE requests to stop an active sandbox session. By default, + the external client is expected to pass up the `"user-agent"` header + containing serialized sandbox metadata returned from the POST request, + but this value may customized with the `:header` option. + """ + + import Plug.Conn + alias Plug.Conn + alias Phoenix.Ecto.SQL.{SandboxSession, SandboxSupervisor} + + @doc """ + Spawns a sandbox session to checkout a connection for a remote client. + + ## Examples + + iex> {:ok, _owner_pid, metadata} = start_child(MyApp.Repo) + """ + def start_child(repo, opts \\ []) do + child_spec = {SandboxSession, {repo, self(), opts}} + + case DynamicSupervisor.start_child(SandboxSupervisor, child_spec) do + {:ok, owner} -> + metadata = metadata_for(repo, owner) + {:ok, owner, metadata} + + {:error, reason} -> + {:error, reason} + end + end + + @doc """ + Stops a sandbox session holding a connection for a remote client. + + ## Examples + + iex> {:ok, owner_pid, metadata} = start_child(MyApp.Repo) + iex> :ok = stop(owner_pid) + """ + def stop(owner) when is_pid(owner) do + GenServer.call(owner, :checkin) + end + + @doc false + def init(opts \\ []) do + session_opts = Keyword.take(opts, [:sandbox, :timeout]) + + %{ + header: Keyword.get(opts, :header, "user-agent"), + path: get_path_info(opts[:at]), + repo: opts[:repo], + sandbox: session_opts[:sandbox] || Ecto.Adapters.SQL.Sandbox, + session_opts: session_opts + } + end + + defp get_path_info(nil), do: nil + defp get_path_info(path), do: Plug.Router.Utils.split(path) + + @doc false + def call(%Conn{method: "POST", path_info: path} = conn, %{path: path} = opts) do + %{repo: repo, session_opts: session_opts} = opts + {:ok, _owner, metadata} = start_child(repo, session_opts) + + conn + |> put_resp_content_type("text/plain") + |> send_resp(200, encode_metadata(metadata)) + |> halt() + end + + def call(%Conn{method: "DELETE", path_info: path} = conn, %{path: path} = opts) do + case decode_metadata(extract_header(conn, opts.header)) do + %{owner: owner} -> + :ok = stop(owner) + + conn + |> put_resp_content_type("text/plain") + |> send_resp(200, "") + |> halt() + + %{} -> + conn + |> send_resp(410, "") + |> halt() + end + end + + def call(conn, %{header: header, sandbox: sandbox}) do + header = extract_header(conn, header) + allow(header, sandbox) + assign(conn, :phoenix_ecto_sandbox, header) + end + + defp extract_header(%Conn{} = conn, header) do + conn |> get_req_header(header) |> List.first() + end + + @doc """ + Returns metadata to establish a sandbox for. + + The metadata is then passed via user-agent/headers to browsers. + Upon request, the `Phoenix.Ecto.SQL.Sandbox` plug will decode + the header and allow the request process under the sandbox. + + ## Options + + * `:trap_exit` - if the browser being used for integration + testing navigates away from a page or aborts a AJAX request + while the request process is talking to the database, it + will corrupt the database connection and make the test fail. + Therefore, to avoid intermitent tests, we recommend trapping + exits in the request process, so all database connections shut + down cleanly. You can disable this behaviour by setting the + option to false. + + """ + @spec metadata_for(Ecto.Repo.t() | [Ecto.Repo.t()], pid, keyword) :: map + def metadata_for(repo_or_repos, pid, opts \\ []) when is_pid(pid) do + %{repo: repo_or_repos, owner: pid, trap_exit: Keyword.get(opts, :trap_exit, true)} + end + + @doc """ + Encodes metadata generated by `metadata_for/2` for client response. + """ + def encode_metadata(metadata) do + encoded = + {:v1, metadata} + |> :erlang.term_to_binary() + |> Base.url_encode64() + + "BeamMetadata (#{encoded})" + end + + @doc """ + Decodes encoded metadata back into map generated from `metadata_for/2`. + """ + def decode_metadata(encoded_meta) when is_binary(encoded_meta) do + case encoded_meta |> String.split("/") |> List.last() do + "BeamMetadata (" <> metadata -> + metadata + |> binary_part(0, byte_size(metadata) - 1) + |> parse_metadata() + + _ -> + %{} + end + end + + def decode_metadata(_), do: %{} + + defp parse_metadata(encoded_metadata) do + encoded_metadata + |> Base.url_decode64!() + |> :erlang.binary_to_term() + |> case do + {:v1, metadata} -> metadata + _ -> %{} + end + end + + @doc """ + Decodes the given metadata and allows the current process + under the given sandbox. + """ + def allow(encoded_metadata, sandbox) when is_binary(encoded_metadata) do + metadata = decode_metadata(encoded_metadata) + + with %{trap_exit: true} <- metadata do + Process.flag(:trap_exit, true) + end + + allow(metadata, sandbox) + end + + def allow(%{repo: repo, owner: owner}, sandbox), + do: Enum.each(List.wrap(repo), &sandbox.allow(&1, owner, self())) + + def allow(%{}, _sandbox), do: :ok + def allow(nil, _sandbox), do: :ok +end diff --git a/deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox_session.ex b/deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox_session.ex new file mode 100644 index 0000000..343d7ca --- /dev/null +++ b/deps/phoenix_ecto/lib/phoenix_ecto/sql/sandbox_session.ex @@ -0,0 +1,43 @@ +defmodule Phoenix.Ecto.SQL.SandboxSession do + @moduledoc false + use GenServer, restart: :temporary + + @timeout 15_000 + + def start_link({repo, client, opts}) do + GenServer.start_link(__MODULE__, [repo, client, opts]) + end + + def init([repo, client, opts]) do + timeout = opts[:timeout] || @timeout + sandbox = opts[:sandbox] || Ecto.Adapters.SQL.Sandbox + + :ok = checkout_connection(sandbox, repo, client) + Process.send_after(self(), :timeout, timeout) + + {:ok, %{repo: repo, client: client, sandbox: sandbox}} + end + + def handle_call(:checkin, _from, state) do + :ok = checkin_connection(state.sandbox, state.repo, state.client) + {:stop, :shutdown, :ok, state} + end + + def handle_info(:timeout, state) do + :ok = checkin_connection(state.sandbox, state.repo, state.client) + {:stop, :shutdown, state} + end + + def handle_info({:allowed, repo}, state) do + send(state.client, {:allowed, repo}) + {:noreply, state} + end + + defp checkin_connection(sandbox, repo, client) do + sandbox.checkin(repo, client: client) + end + + defp checkout_connection(sandbox, repo, client) do + sandbox.checkout(repo, client: client) + end +end diff --git a/deps/phoenix_ecto/mix.exs b/deps/phoenix_ecto/mix.exs new file mode 100644 index 0000000..64f2a32 --- /dev/null +++ b/deps/phoenix_ecto/mix.exs @@ -0,0 +1,59 @@ +defmodule PhoenixEcto.Mixfile do + use Mix.Project + + @source_url "https://github.com/phoenixframework/phoenix_ecto" + @version "4.4.0" + + def project do + [ + app: :phoenix_ecto, + version: @version, + elixir: "~> 1.7", + deps: deps(), + + # Hex + description: "Integration between Phoenix & Ecto", + package: package(), + + # Docs + name: "Phoenix/Ecto", + docs: [ + main: "main", + extras: ["README.md": [filename: "main", title: "Phoenix/Ecto"]], + source_ref: "v#{@version}", + source_url: @source_url + ], + xref: [ + exclude: [ + {Ecto.Migrator, :migrations, 1}, + {Ecto.Migrator, :run, 3} + ] + ] + ] + end + + def application do + [ + mod: {Phoenix.Ecto, []}, + extra_applications: [:logger], + env: [exclude_ecto_exceptions_from_plug: []] + ] + end + + defp package do + [ + maintainers: ["Josรฉ Valim", "Chris Mccord"], + licenses: ["MIT"], + links: %{"GitHub" => @source_url} + ] + end + + defp deps do + [ + {:phoenix_html, "~> 2.14.2 or ~> 3.0", optional: true}, + {:ecto, "~> 3.3"}, + {:plug, "~> 1.9"}, + {:ex_doc, ">= 0.0.0", only: :docs} + ] + end +end diff --git a/deps/phoenix_html/.fetch b/deps/phoenix_html/.fetch new file mode 100644 index 0000000..e69de29 diff --git a/deps/phoenix_html/.hex b/deps/phoenix_html/.hex new file mode 100644 index 0000000000000000000000000000000000000000..1374e00dd6d3fe8f6c6d644f31f0583f16c97c87 GIT binary patch literal 276 zcmZ9HJ8r`;5JVX}zaq#{3@-odauZ{bl|74nNgM7^ ztU`!a7N28lX*p6!^mDn^+S@N6P!QP+js~TWyvm@m*Gx7eJMz|PkcMT@*#nTy#KBrX z$OaPti}#8RzteI#_r0eMt>boL@!YVc2jXKcQ)=UOENeY?iRrrjm(VUz^aEPQ%94bh zGsLJ!`)GYK%DBOq$WF;U9po=j C#Y_kQ literal 0 HcmV?d00001 diff --git a/deps/phoenix_html/CHANGELOG.md b/deps/phoenix_html/CHANGELOG.md new file mode 100644 index 0000000..f7af4e2 --- /dev/null +++ b/deps/phoenix_html/CHANGELOG.md @@ -0,0 +1,379 @@ +# Changelog + +# v3.2.0 (2021-12-18) + +* Enhancements + * Raise if the `id` attribute is set to a number. This is actually an invalid value according to the HTML spec and it can lead to problematic client behaviour, especially in LiveView and other client frameworks. + * Allow `phx` attributes to be nested, similar to `aria` and `data` attributes + * Allow hidden fields in forms to be a list of values + +# v3.1.0 (2021-10-23) + +* Bug fix + * Do not submit data-method links if default has been prevented +* Deprecations + * Deprecate `~E` and `Phoenix.HTML.Tag.attributes_escape/1` + * Remove deprecated `Phoenix.HTML.Link.link/1` + +# v3.0.4 (2021-09-23) + +* Bug fix + * Ensure `class={@class}` in HEEx templates and `:class` attribute in `content_tag` are properly escaped against XSS + +# v3.0.3 (2021-09-04) + +* Bug fix + * Fix sorting of attributes in `tag`/`content_tag` + +# v3.0.2 (2021-08-19) + +* Enhancements + * Support maps on `Phoenix.HTML.Tag.attributes_escape/1` + +# v3.0.1 (2021-08-14) + +* Enhancements + * Add `Phoenix.HTML.Tag.csrf_input_tag/2` + +# v3.0.0 (2021-08-06) + +* Enhancements + * Allow extra html attributes on the `:prompt` option in `select` + * Make `Plug` an optional dependency + * Prefix form id on inputs when it is given to `form_for/3` + * Allow `%URI{}` to be passed to `link/2` and `button/2` as `:to` + * Expose `Phoenix.HTML.Tag.csrf_token_value/1` + * Add `Phoenix.HTML.Tag.attributes_escape/1` + +* Bug fixes + * Honor the `form` attribute when creating hidden checkbox input + * Use `to_iso8601` as the standard implementation for safe dates and times + +* Deprecations + * `form_for` without an anonymous function has been deprecated. v3.0 has deprecated the usage, v3.1 will emit warnings, and v3.2 will fully remove the functionality + +* Backwards incompatible changes + * Strings given as attributes keys in `tag` and `content_tag` are now emitted as is (without being dasherized) and are also HTML escaped + * Prefix form id on inputs when it is given to `form_for/3` + * By default dates and times will format to the `to_iso8601` functions provided by their implementation + * Do not include `csrf-param` and `method-param` in generated `csrf_meta_tag` + * Remove deprecated `escape_javascript` in favor of `javascript_escape` + * Remove deprecated `field_value` in favor of `input_value` + * Remove deprecated `field_name` in favor of `input_name` + * Remove deprecated `field_id` in favor of `input_id` + +## v2.14.3 (2020-12-12) + +* Bug fixes + * Fix warnings on Elixir v1.12 + +## v2.14.2 (2020-04-30) + +* Deprecations + * Deprecate `Phoenix`-specific assigns `:view_module` and `:view_template` + +## v2.14.1 (2020-03-20) + +* Enhancements + * Add `Phoenix.HTML.Form.options_for_select/2` + * Add `Phoenix.HTML.Form.inputs_for/3` + +* Bug fixes + * Disable hidden input for disabled checkboxes + +## v2.14.0 (2020-01-28) + +* Enhancements + * Remove enforce_utf8 workaround on forms as it is no longer required by browser + * Remove support tuple-based date/time with microseconds calendar types + * Allow strings as first element in `content_tag` + * Add `:srcset` support to `img_tag` + * Allow `inputs_for` to skip hidden fields + +## v2.13.4 (2020-01-28) + +* Bug fixes + * Fix invalid :line in Elixir v1.10.0 + +## v2.13.3 (2019-05-31) + +* Enhancements + * Add atom support to FormData + +* Bug fixes + * Keep proper line numbers on .eex templates for proper coverage + +## v2.13.2 (2019-03-29) + +* Bug fixes + * Stop event propagation when confirm dialog is canceled + +## v2.13.1 (2019-01-05) + +* Enhancements + * Allow safe content to be given to label + * Also escale template literals in `javascript_escape/1` + +* Bug fixes + * Fix deprecation warnings to point to the correct alternative + +## v2.13.0 (2018-12-09) + +* Enhancements + * Require Elixir v1.5+ for more efficient template compilation/rendering + * Add `Phoenix.HTML.Engine.encode_to_iodata!/1` + * Add `Phoenix.HTML.Form.form_for/3` that works without an anonymous function + +* Deprecations + * Deprecate `Phoenix.HTML.escape_javascript/1` in favor of `Phoenix.HTML.javascript_escape/1` for consistency + +## v2.12.0 (2018-08-06) + +* Enhancements + * Configurable and extendable data-confirm behaviour + * Allow data-confirm with submit buttons + * Support ISO 8601 formatted strings for date and time values + +* Bug fixes + * Provide a default id of the field name for `@conn` based forms + +## v2.11.2 (2018-04-13) + +* Enhancements + * Support custom precision on time input + +* Bug fixes + * Do not raise when `:` is part of a path on link/button attributes + +## v2.11.1 (2018-03-20) + +* Enhancements + * Add `label/1` + * Copy the target attribute of the link in the generated JS form + +* Bug fixes + * Support any value that is html escapable in `radio_button` + +## v2.11.0 (2018-03-09) + +* Enhancements + * Add date, datetime-local and time input types + * Enable string keys to be usable with forms + * Support carriage return in `text_to_html` + * Add support for HTML5 boolean attributes to `content_tag` and `tag` + * Improve performance by relying on `html_safe_to_iodata/1` + * Protect against CSRF tokens leaking across hosts when the POST URL is dynamic + * Require `to` attribute in links and buttons to explicitly pass protocols as a separate option for safety reasons + +* Bug fixes + * Guarantee `input_name/2` always returns strings + * Improve handling of uncommon whitespace and null in `escape_javascript` + * Escape value attribute so it is never treated as a boolean + +* Backwards incompatible changes + * The :csrf_token_generator configuration in the Phoenix.HTML app no longer works due to the improved security mechanisms + +## v2.10.5 (2017-11-08) + +* Enhancements + * Do not require the :as option in form_for + +## v2.10.4 (2017-08-15) + +* Bug fixes + * Fix formatting of days in datetime_builder + +## v2.10.3 (2017-07-30) + +* Enhancements + * Allow specifying a custom CSRF token generator + +* Bug fixes + * Do not submit `method: :get` in buttons as "post" + +## v2.10.2 (2017-07-24) + +* Bug fixes + * Traverse DOM elements up when handling data-method + +## v2.10.1 (2017-07-22) + +* Bug fixes + * Only generate CSRF token if necessary + +## v2.10.0 (2017-07-21) + +* Enhancements + * Support custom attributes in options in select + +* Bug fixes + * Accept non-binary values in textarea's content + * Allow nested forms on the javascript side. This means `link` and `button` no longer generate a child form such as the `:form` option has no effect and "data-submit=parent" is no longer supported. Instead "data-to" and "data-method" are set on the entities and the form is generated on the javascript side of things + +## v2.9.3 (2016-12-24) + +* Bug fixes + * Once again support any name for atom forms + +## v2.9.2 (2016-12-24) + +* Bug fixes + * Always read from `form.params` and then from `:selected` in `select` and `multiple_select` before falling back to `input_value/2` + +## v2.9.1 (2016-12-20) + +* Bug fixes + * Implement proper `input_value/3` callback + +## v2.9.0 (2016-12-19) + +* Enhancements + * Add `img_tag/2` helper to `Phoenix.HTML.Tag` + * Submit nearest form even if not direct descendent + * Use more iodata for `tag/2` and `content_tag/3` + * Add `input_value/3`, `input_id/2` and `input_name/2` as a unified API around the input (alongside `input_type/3` and `input_validations/2`) + +## v2.8.0 (2016-11-15) + +* Enhancements + * Add `csrf_meta_tag/0` helper to `Phoenix.HTML.Tag` + * Allow passing a `do:` option to `Phoenix.HTML.Link.button/2` + +## v2.7.0 (2016-09-21) + +* Enhancements + * Render button tags for form submits and in the `button/2` function + * Allow `submit/2` and `button/2` to receive `do` blocks + * Support the `:multiple` option in `file_input/3` + * Remove previously deprecated and unused `model` field + +## v2.6.1 (2016-07-08) + +* Enhancements + * Remove warnings on v1.4 + +* Bug fixes + * Ensure some contents are properly escaped as an integer + * Ensure JavaScript data-submit events bubble up until it finds the proper parent + +## v2.6.0 (2016-06-16) + +* Enhancements + * Raise helpful error when using invalid iodata + * Inline date/time API with Elixir v1.3 Calendar types + * Add `:insert_brs` option to `text_to_html/2` + * Run on Erlang 19 without warnings + +* Client-side changes + * Use event delegation in `phoenix_html.js` + * Drop IE8 support on `phoenix_html.js` + +* Backwards incompatible changes + * `:min`, `:sec` option in `Phoenix.HTML.Form` (`datetime_select/3` and `time_select/3`) are no longer supported. Use `:minute` or `:second` instead. + +## v2.5.1 (2016-03-12) + +* Bug fixes + * Ensure multipart files work with inputs_for + +## v2.5.0 (2016-01-28) + +* Enhancements + * Introduce `form.data` field instead of `form.model`. Currently those values are kept in sync then the form is built but `form.model` will be deprecated in the long term + +## v2.4.0 (2016-01-21) + +* Enhancements + * Add `rel=nofollow` auto generation for non-get links + * Introduce `:selected` option for `select` and `multiple_select` + +* Bug fixes + * Fix safe engine incorrectly marking safe code as unsafe when last expression is `<% ... %>` + +## v2.3.0 (2015-12-16) + +* Enhancements + * Add `escape_javascript/1` + * Add helpful error message when using unknown `@inner` assign + * Add `Phoenix.HTML.Format.text_to_html/2` + +## v2.2.0 (2015-09-01) + +* Bug fix + * Allow the `:name` to be given in forms. For this, using `:name` to configure the underlying input name prefix has been deprecated in favor of `:as` + +## v2.1.2 (2015-08-22) + +* Bug fix + * Do not include values in `password_input/3` + +## v2.1.1 (2015-08-15) + +* Enhancements + * Allow nil in `raw/1` + * Allow block options in `label/3` + * Introduce `:skip_deleted` in `inputs_for/4` + +## v2.1.0 (2015-08-06) + +* Enhancements + * Add an index field to forms to be used by `inputs_for/4` collections + +## v2.0.1 (2015-07-31) + +* Bug fix + * Include web directory in Hex package + +## v2.0.0 (2015-07-30) + +* Enhancements + * No longer generate onclick attributes. + + The main motivation for this is to provide support + for Content Security Policy, which recommends + disabling all inline scripts in a page. + + We took the opportunity to also add support for + data-confirm in `link/2`. + +## v1.4.0 (2015-07-26) + +* Enhancements + * Support `input_type/2` and `input_validations/2` as reflection mechanisms + +## v1.3.0 (2015-07-23) + +* Enhancements + * Add `Phoenix.HTML.Form.inputs_for/4` support + * Add multiple select support + * Add reset input + * Infer default text context for labels + +## v1.2.1 (2015-06-02) + +* Bug fix + * Ensure nil parameters are not discarded when rendering input + +## v1.2.0 (2015-05-30) + +* Enhancements + * Add `label/3` for generating a label tag within a form + +## v1.1.0 (2015-05-20) + +* Enhancements + * Allow do/end syntax with `link/2` + * Raise on missing assigns + +## v1.0.1 + +* Bug fixes + * Avoid variable clash in Phoenix.HTML engine buffers + +## v1.0.0 + +* Enhancements + * Provides an EEx engine with HTML safe rendering + * Provides a `Phoenix.HTML.Safe` protocol + * Provides a `Phoenix.HTML.FormData` protocol + * Provides functions for generating tags, links and form builders in a safe way diff --git a/deps/phoenix_html/LICENSE b/deps/phoenix_html/LICENSE new file mode 100644 index 0000000..7d74eb5 --- /dev/null +++ b/deps/phoenix_html/LICENSE @@ -0,0 +1,20 @@ +Copyright (c) 2014 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/phoenix_html/README.md b/deps/phoenix_html/README.md new file mode 100644 index 0000000..d316ab5 --- /dev/null +++ b/deps/phoenix_html/README.md @@ -0,0 +1,34 @@ +# Phoenix.HTML + +[![Build Status](https://github.com/phoenixframework/phoenix_html/workflows/Tests/badge.svg)](https://github.com/phoenixframework/phoenix_html/actions?query=workflow%3ATests) + +Collection of helpers to generate and manipulate HTML contents. + +Although this project was originally extracted from Phoenix, +it does not depend on Phoenix and can be used with any Plug +application (or even without Plug). + +See the [docs](https://hexdocs.pm/phoenix_html/) for more information. + +## License + +Copyright (c) 2014 Chris McCord + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/deps/phoenix_html/hex_metadata.config b/deps/phoenix_html/hex_metadata.config new file mode 100644 index 0000000..8874e5c --- /dev/null +++ b/deps/phoenix_html/hex_metadata.config @@ -0,0 +1,24 @@ +{<<"app">>,<<"phoenix_html">>}. +{<<"build_tools">>,[<<"mix">>]}. +{<<"description">>, + <<"Phoenix view functions for working with HTML templates">>}. +{<<"elixir">>,<<"~> 1.7">>}. +{<<"files">>, + [<<"lib">>,<<"lib/phoenix_html">>,<<"lib/phoenix_html/format.ex">>, + <<"lib/phoenix_html/link.ex">>,<<"lib/phoenix_html/safe.ex">>, + <<"lib/phoenix_html/form_data.ex">>,<<"lib/phoenix_html/tag.ex">>, + <<"lib/phoenix_html/engine.ex">>,<<"lib/phoenix_html/form.ex">>, + <<"lib/phoenix_html.ex">>,<<"priv">>,<<"priv/static">>, + <<"priv/static/phoenix_html.js">>,<<"CHANGELOG.md">>,<<"LICENSE">>, + <<"mix.exs">>,<<"package.json">>,<<"README.md">>]}. +{<<"licenses">>,[<<"MIT">>]}. +{<<"links">>, + [{<<"github">>,<<"https://github.com/phoenixframework/phoenix_html">>}]}. +{<<"name">>,<<"phoenix_html">>}. +{<<"requirements">>, + [[{<<"app">>,<<"plug">>}, + {<<"name">>,<<"plug">>}, + {<<"optional">>,true}, + {<<"repository">>,<<"hexpm">>}, + {<<"requirement">>,<<"~> 1.5">>}]]}. +{<<"version">>,<<"3.2.0">>}. diff --git a/deps/phoenix_html/lib/phoenix_html.ex b/deps/phoenix_html/lib/phoenix_html.ex new file mode 100644 index 0000000..f68d265 --- /dev/null +++ b/deps/phoenix_html/lib/phoenix_html.ex @@ -0,0 +1,339 @@ +defmodule Phoenix.HTML do + @moduledoc """ + Helpers for working with HTML strings and templates. + + When used, it imports the given modules: + + * `Phoenix.HTML` - functions to handle HTML safety; + + * `Phoenix.HTML.Tag` - functions for generating HTML tags; + + * `Phoenix.HTML.Form` - functions for working with forms; + + * `Phoenix.HTML.Link` - functions for generating links and urls; + + * `Phoenix.HTML.Format` - functions for formatting text; + + ## HTML Safe + + One of the main responsibilities of this module is to + provide convenience functions for escaping and marking + HTML code as safe. + + By default, data output in templates is not considered + safe: + + <%= "" %> + + will be shown as: + + <hello> + + User data or data coming from the database is almost never + considered safe. However, in some cases, you may want to tag + it as safe and show its "raw" contents: + + <%= raw "" %> + + Keep in mind most helpers will automatically escape your data + and return safe content: + + <%= content_tag :p, "" %> + + will properly output: + +

<hello>

+ + ## JavaScript library + + This project ships with a tiny bit of JavaScript that listens + to all click events to: + + * Support `data-confirm="message"` attributes, which shows + a confirmation modal with the given message + + * Support `data-method="patch|post|put|delete"` attributes, + which sends the current click as a PATCH/POST/PUT/DELETE + HTTP request. You will need to add `data-to` with the URL + and `data-csrf` with the CSRF token value + + * Dispatch a "phoenix.link.click" event. You can listen to this + event to customize the behaviour above. Returning false from + this event will disable `data-method`. Stopping propagation + will disable `data-confirm` + + """ + + @doc false + defmacro __using__(_) do + quote do + import Phoenix.HTML + import Phoenix.HTML.Form + import Phoenix.HTML.Link + import Phoenix.HTML.Tag, except: [attributes_escape: 1] + import Phoenix.HTML.Format + end + end + + @typedoc "Guaranteed to be safe" + @type safe :: {:safe, iodata} + + @typedoc "May be safe or unsafe (i.e. it needs to be converted)" + @type unsafe :: Phoenix.HTML.Safe.t() + + @doc false + @deprecated "use the ~H sigil instead" + defmacro sigil_e(expr, opts) do + handle_sigil(expr, opts, __CALLER__) + end + + @doc false + @deprecated "use the ~H sigil instead" + defmacro sigil_E(expr, opts) do + handle_sigil(expr, opts, __CALLER__) + end + + defp handle_sigil({:<<>>, meta, [expr]}, [], caller) do + options = [ + engine: Phoenix.HTML.Engine, + file: caller.file, + line: caller.line + 1, + indentation: meta[:indentation] || 0 + ] + + EEx.compile_string(expr, options) + end + + defp handle_sigil(_, _, _) do + raise ArgumentError, + "interpolation not allowed in ~e sigil. " <> + "Remove the interpolation, use <%= %> to insert values, " <> + "or use ~E to show the interpolation literally" + end + + @doc """ + Marks the given content as raw. + + This means any HTML code inside the given + string won't be escaped. + + iex> raw("") + {:safe, ""} + iex> raw({:safe, ""}) + {:safe, ""} + iex> raw(nil) + {:safe, ""} + + """ + @spec raw(iodata | safe | nil) :: safe + def raw({:safe, value}), do: {:safe, value} + def raw(nil), do: {:safe, ""} + def raw(value) when is_binary(value) or is_list(value), do: {:safe, value} + + @doc """ + Escapes the HTML entities in the given term, returning safe iodata. + + iex> html_escape("") + {:safe, [[[] | "<"], "hello" | ">"]} + + iex> html_escape('') + {:safe, ["<", 104, 101, 108, 108, 111, ">"]} + + iex> html_escape(1) + {:safe, "1"} + + iex> html_escape({:safe, ""}) + {:safe, ""} + + """ + @spec html_escape(unsafe) :: safe + def html_escape({:safe, _} = safe), do: safe + def html_escape(other), do: {:safe, Phoenix.HTML.Engine.encode_to_iodata!(other)} + + @doc """ + Converts a safe result into a string. + + Fails if the result is not safe. In such cases, you can + invoke `html_escape/1` or `raw/1` accordingly before. + + You can combine `html_escape/1` and `safe_to_string/1` + to convert a data structure to a escaped string: + + data |> html_escape() |> safe_to_string() + """ + @spec safe_to_string(safe) :: String.t() + def safe_to_string({:safe, iodata}) do + IO.iodata_to_binary(iodata) + end + + @doc ~S""" + Escapes an enumerable of attributes, returning iodata. + + The attributes are rendered in the given order. Note if + a map is given, the key ordering is not guaranteed. + + The keys and values can be of any shape, as long as they + implement the `Phoenix.HTML.Safe` protocol. In addition, + if the key is an atom, it will be "dasherized". In other + words, `:phx_value_id` will be converted to `phx-value-id`. + + Furthemore, the following attributes provide behaviour: + + * `:aria`, `:data`, and `:phx` - they accept a keyword list as + value. `data: [confirm: "are you sure?"]` is converted to + `data-confirm="are you sure?"`. + + * `:class` - it accepts a list of classes as argument. Each + element in the list is separated by space. `nil` and `false` + elements are discarded. `class: ["foo", nil, "bar"]` then + becomes `class="foo bar"`. + + * `:id` - it is validated raise if a number is given as ID, + which is not allowed by the HTML spec and leads to unpredictable + behaviour. + + ## Examples + + iex> safe_to_string attributes_escape(title: "the title", id: "the id", selected: true) + " title=\"the title\" id=\"the id\" selected" + + iex> safe_to_string attributes_escape(%{data: [confirm: "Are you sure?"], class: "foo"}) + " class=\"foo\" data-confirm=\"Are you sure?\"" + + iex> safe_to_string attributes_escape(%{phx: [value: [foo: "bar"]], class: "foo"}) + " class=\"foo\" phx-value-foo=\"bar\"" + + """ + def attributes_escape(attrs) when is_list(attrs) do + {:safe, build_attrs(attrs)} + end + + def attributes_escape(attrs) do + {:safe, attrs |> Enum.to_list() |> build_attrs()} + end + + defp build_attrs([{k, true} | t]), + do: [?\s, key_escape(k) | build_attrs(t)] + + defp build_attrs([{_, false} | t]), + do: build_attrs(t) + + defp build_attrs([{_, nil} | t]), + do: build_attrs(t) + + defp build_attrs([{:id, v} | t]), + do: [" id=\"", id_value(v), ?" | build_attrs(t)] + + defp build_attrs([{:class, v} | t]), + do: [" class=\"", class_value(v), ?" | build_attrs(t)] + + defp build_attrs([{:aria, v} | t]) when is_list(v), + do: nested_attrs(v, " aria", t) + + defp build_attrs([{:data, v} | t]) when is_list(v), + do: nested_attrs(v, " data", t) + + defp build_attrs([{:phx, v} | t]) when is_list(v), + do: nested_attrs(v, " phx", t) + + defp build_attrs([{"id", v} | t]), + do: [" id=\"", id_value(v), ?" | build_attrs(t)] + + defp build_attrs([{"class", v} | t]), + do: [" class=\"", class_value(v), ?" | build_attrs(t)] + + defp build_attrs([{"aria", v} | t]) when is_list(v), + do: nested_attrs(v, " aria", t) + + defp build_attrs([{"data", v} | t]) when is_list(v), + do: nested_attrs(v, " data", t) + + defp build_attrs([{"phx", v} | t]) when is_list(v), + do: nested_attrs(v, " phx", t) + + defp build_attrs([{k, v} | t]), + do: [?\s, key_escape(k), ?=, ?", attr_escape(v), ?" | build_attrs(t)] + + defp build_attrs([]), do: [] + + defp nested_attrs([{k, v} | kv], attr, t) when is_list(v), + do: [nested_attrs(v, "#{attr}-#{key_escape(k)}", []) | nested_attrs(kv, attr, t)] + + defp nested_attrs([{k, v} | kv], attr, t), + do: [attr, ?-, key_escape(k), ?=, ?", attr_escape(v), ?" | nested_attrs(kv, attr, t)] + + defp nested_attrs([], _attr, t), + do: build_attrs(t) + + defp id_value(value) when is_number(value) do + raise ArgumentError, + "attempting to set id attribute to #{value}, but the DOM ID cannot be set to a number" + end + + defp id_value(value) do + attr_escape(value) + end + + defp class_value(value) when is_list(value) do + value + |> Enum.filter(& &1) + |> Enum.join(" ") + |> attr_escape() + end + + defp class_value(value) do + attr_escape(value) + end + + defp key_escape(value) when is_atom(value), do: String.replace(Atom.to_string(value), "_", "-") + defp key_escape(value), do: attr_escape(value) + + defp attr_escape({:safe, data}), do: data + defp attr_escape(nil), do: [] + defp attr_escape(other) when is_binary(other), do: Phoenix.HTML.Engine.encode_to_iodata!(other) + defp attr_escape(other), do: Phoenix.HTML.Safe.to_iodata(other) + + @doc """ + Escapes HTML content to be inserted a JavaScript string. + + This function is useful in JavaScript responses when there is a need + to escape HTML rendered from other templates, like in the following: + + $("#container").append("<%= javascript_escape(render("post.html", post: @post)) %>"); + + It escapes quotes (double and single), double backslashes and others. + """ + @spec javascript_escape(binary) :: binary + @spec javascript_escape(safe) :: safe + def javascript_escape({:safe, data}), + do: {:safe, data |> IO.iodata_to_binary() |> javascript_escape("")} + + def javascript_escape(data) when is_binary(data), + do: javascript_escape(data, "") + + defp javascript_escape(<<0x2028::utf8, t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<0x2029::utf8, t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<0::utf8, t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<">, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<"\r\n", t::binary>>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<>, acc) when h in [?", ?', ?\\, ?`], + do: javascript_escape(t, <>) + + defp javascript_escape(<>, acc) when h in [?\r, ?\n], + do: javascript_escape(t, <>) + + defp javascript_escape(<>, acc), + do: javascript_escape(t, <>) + + defp javascript_escape(<<>>, acc), do: acc +end diff --git a/deps/phoenix_html/lib/phoenix_html/engine.ex b/deps/phoenix_html/lib/phoenix_html/engine.ex new file mode 100644 index 0000000..41ffd62 --- /dev/null +++ b/deps/phoenix_html/lib/phoenix_html/engine.ex @@ -0,0 +1,198 @@ +defmodule Phoenix.HTML.Engine do + @moduledoc """ + This is an implementation of EEx.Engine that guarantees + templates are HTML Safe. + + The `encode_to_iodata!/1` function converts the rendered + template result into iodata. + """ + + @behaviour EEx.Engine + + @anno (if :erlang.system_info(:otp_release) >= '19' do + [generated: true] + else + [line: -1] + end) + + @doc """ + Encodes the HTML templates to iodata. + """ + def encode_to_iodata!({:safe, body}), do: body + def encode_to_iodata!(nil), do: "" + def encode_to_iodata!(bin) when is_binary(bin), do: html_escape(bin) + def encode_to_iodata!(list) when is_list(list), do: Phoenix.HTML.Safe.List.to_iodata(list) + def encode_to_iodata!(other), do: Phoenix.HTML.Safe.to_iodata(other) + + @doc false + def html_escape(bin) when is_binary(bin) do + html_escape(bin, 0, bin, []) + end + + escapes = [ + {?<, "<"}, + {?>, ">"}, + {?&, "&"}, + {?", """}, + {?', "'"} + ] + + for {match, insert} <- escapes do + defp html_escape(<>, skip, original, acc) do + html_escape(rest, skip + 1, original, [acc | unquote(insert)]) + end + end + + defp html_escape(<<_char, rest::bits>>, skip, original, acc) do + html_escape(rest, skip, original, acc, 1) + end + + defp html_escape(<<>>, _skip, _original, acc) do + acc + end + + for {match, insert} <- escapes do + defp html_escape(<>, skip, original, acc, len) do + part = binary_part(original, skip, len) + html_escape(rest, skip + len + 1, original, [acc, part | unquote(insert)]) + end + end + + defp html_escape(<<_char, rest::bits>>, skip, original, acc, len) do + html_escape(rest, skip, original, acc, len + 1) + end + + defp html_escape(<<>>, 0, original, _acc, _len) do + original + end + + defp html_escape(<<>>, skip, original, acc, len) do + [acc | binary_part(original, skip, len)] + end + + @doc false + def init(_opts) do + %{ + iodata: [], + dynamic: [], + vars_count: 0 + } + end + + @doc false + def handle_begin(state) do + %{state | iodata: [], dynamic: []} + end + + @doc false + def handle_end(quoted) do + handle_body(quoted) + end + + @doc false + def handle_body(state) do + %{iodata: iodata, dynamic: dynamic} = state + safe = {:safe, Enum.reverse(iodata)} + {:__block__, [], Enum.reverse([safe | dynamic])} + end + + @doc false + def handle_text(state, text) do + handle_text(state, [], text) + end + + @doc false + def handle_text(state, _meta, text) do + %{iodata: iodata} = state + %{state | iodata: [text | iodata]} + end + + @doc false + def handle_expr(state, "=", ast) do + ast = traverse(ast) + %{iodata: iodata, dynamic: dynamic, vars_count: vars_count} = state + var = Macro.var(:"arg#{vars_count}", __MODULE__) + ast = quote do: unquote(var) = unquote(to_safe(ast)) + %{state | dynamic: [ast | dynamic], iodata: [var | iodata], vars_count: vars_count + 1} + end + + def handle_expr(state, "", ast) do + ast = traverse(ast) + %{dynamic: dynamic} = state + %{state | dynamic: [ast | dynamic]} + end + + def handle_expr(state, marker, ast) do + EEx.Engine.handle_expr(state, marker, ast) + end + + ## Safe conversion + + defp to_safe(ast), do: to_safe(ast, line_from_expr(ast)) + + defp line_from_expr({_, meta, _}) when is_list(meta), do: Keyword.get(meta, :line, 0) + defp line_from_expr(_), do: 0 + + # We can do the work at compile time + defp to_safe(literal, _line) + when is_binary(literal) or is_atom(literal) or is_number(literal) do + literal + |> Phoenix.HTML.Safe.to_iodata() + |> IO.iodata_to_binary() + end + + # We can do the work at runtime + defp to_safe(literal, line) when is_list(literal) do + quote line: line, do: Phoenix.HTML.Safe.List.to_iodata(unquote(literal)) + end + + # We need to check at runtime and we do so by optimizing common cases. + defp to_safe(expr, line) do + # Keep stacktraces for protocol dispatch and coverage + safe_return = quote line: line, do: data + bin_return = quote line: line, do: Phoenix.HTML.Engine.html_escape(bin) + other_return = quote line: line, do: Phoenix.HTML.Safe.to_iodata(other) + + # However ignore them for the generated clauses to avoid warnings + quote @anno do + case unquote(expr) do + {:safe, data} -> unquote(safe_return) + bin when is_binary(bin) -> unquote(bin_return) + other -> unquote(other_return) + end + end + end + + ## Traversal + + defp traverse(expr) do + Macro.prewalk(expr, &handle_assign/1) + end + + defp handle_assign({:@, meta, [{name, _, atom}]}) when is_atom(name) and is_atom(atom) do + quote line: meta[:line] || 0 do + Phoenix.HTML.Engine.fetch_assign!(var!(assigns), unquote(name)) + end + end + + defp handle_assign(arg), do: arg + + @doc false + def fetch_assign!(assigns, key) do + case Access.fetch(assigns, key) do + {:ok, val} -> + val + + :error -> + raise ArgumentError, """ + assign @#{key} not available in template. + + Please make sure all proper assigns have been set. If this + is a child template, ensure assigns are given explicitly by + the parent template as they are not automatically forwarded. + + Available assigns: #{inspect(Enum.map(assigns, &elem(&1, 0)))} + """ + end + end +end diff --git a/deps/phoenix_html/lib/phoenix_html/form.ex b/deps/phoenix_html/lib/phoenix_html/form.ex new file mode 100644 index 0000000..e0d85ba --- /dev/null +++ b/deps/phoenix_html/lib/phoenix_html/form.ex @@ -0,0 +1,1772 @@ +defmodule Phoenix.HTML.Form do + @moduledoc ~S""" + Helpers related to producing HTML forms. + + The functions in this module can be used in three + distinct scenarios: + + * with changeset data - when information to populate + the form comes from a changeset + + * with limited data - when a form is created without + an underlying data layer. In this scenario, you can + use the connection information (aka Plug.Conn.params) + or pass the form values by hand + + * outside of a form - when the functions are used directly, + outside of `form_for` + + We will explore all three scenarios below. + + ## With changeset data + + The entry point for defining forms in Phoenix is with + the `form_for/4` function. For this example, we will + use `Ecto.Changeset`, which integrates nicely with Phoenix + forms via the `phoenix_ecto` package. + + Imagine you have the following action in your controller: + + def new(conn, _params) do + changeset = User.changeset(%User{}) + render conn, "new.html", changeset: changeset + end + + where `User.changeset/2` is defined as follows: + + def changeset(user, params \\ %{}) do + cast(user, params, [:name, :age]) + end + + Now a `@changeset` assign is available in views which we + can pass to the form: + + <%= form_for @changeset, Routes.user_path(@conn, :create), fn f -> %> + + + + + <%= submit "Submit" %> + <% end %> + + `form_for/4` receives the `Ecto.Changeset` and converts it + to a form, which is passed to the function as the argument + `f`. All the remaining functions in this module receive + the form and automatically generate the input fields, often + by extracting information from the given changeset. For example, + if the user had a default value for age set, it will + automatically show up as selected in the form. + + ### A note on `:errors` + + If no action has been applied to the changeset or action was set to `:ignore`, + no errors are shown on the form object even if the changeset has a non-empty + `:errors` value. + + This is useful for things like validation hints on form fields, e.g. an empty + changeset for a new form. That changeset isn't valid, but we don't want to + show errors until an actual user action has been performed. + + Ecto automatically applies the action for you when you call + Repo.insert/update/delete, but if you want to show errors manually you can + also set the action yourself, either directly on the `Ecto.Changeset` struct + field or by using `Ecto.Changeset.apply_action/2`. + + ## With limited data + + `form_for/4` expects as first argument any data structure that + implements the `Phoenix.HTML.FormData` protocol. By default, + Phoenix implements this protocol for `Plug.Conn` and `Atom`. + + This is useful when you are creating forms that are not backed + by any kind of data layer. Let's assume that we're submitting a + form to the `:new` action in the `FooController`: + + <%= form_for @conn, Routes.foo_path(@conn, :new), [as: :foo], fn f -> %> + <%= text_input f, :for %> + <%= submit "Search" %> + <% end %> + + `form_for/4` uses the `Plug.Conn` to set input values from the + request parameters. + + Alternatively, if you don't have a connection, you can pass `:foo` + as the form data source and explicitly pass the value for every input: + + <%= form_for :foo, Routes.foo_path(MyApp.Endpoint, :new), fn f -> %> + <%= text_input f, :for, value: "current value" %> + <%= submit "Search" %> + <% end %> + + ## Without form data + + Sometimes we may want to generate a `text_input/3` or any other + tag outside of a form. The functions in this module also support + such usage by simply passing an atom as first argument instead + of the form. + + <%= text_input :user, :name, value: "This is a prepopulated value" %> + + ## Nested inputs + + If your data layer supports embedding or nested associations, + you can use `inputs_for` to attach nested data to the form. + + Imagine the following Ecto schemas: + + defmodule User do + use Ecto.Schema + + schema "users" do + field :name + embeds_one :permalink, Permalink + end + end + + defmodule Permalink do + use Ecto.Schema + + embedded_schema do + field :url + end + end + + In the form, you now can: + + <%= form_for @changeset, Routes.user_path(@conn, :create), fn f -> %> + <%= text_input f, :name %> + + <%= inputs_for f, :permalink, fn fp -> %> + <%= text_input fp, :url %> + <% end %> + <% end %> + + The default option can be given to populate the fields if none + is given: + + <%= inputs_for f, :permalink, [default: %Permalink{title: "default"}], fn fp -> %> + <%= text_input fp, :url %> + <% end %> + + `inputs_for/4` can be used to work with single entities or + collections. When working with collections, `:prepend` and + `:append` can be used to add entries to the collection + stored in the changeset. + + ## CSRF protection + + The form generates a CSRF token by default. Your application should + check this token on the server to avoid attackers from making requests + on your server on behalf of other users. Phoenix by default checks + this token. + + When posting a form with a host in its address, such as "//host.com/path" + instead of only "/path", Phoenix will include the host signature in the + token and validate the token only if the accessed host is the same as + the host in the token. This is to avoid tokens from leaking to third + party applications. If this behaviour is problematic, you can generate + a non-host specific token with `Plug.CSRFProtection.get_csrf_token/0` and + pass it to the form generator via the `:csrf_token` option. + + ## Phoenix.LiveView integration + + Phoenix.LiveView builds on top of this function to [provide a function + component named `form`](https://hexdocs.pm/phoenix_live_view/Phoenix.LiveView.Helpers.html#form/1). + Inside your HEEx templates, instead of doing this: + + <%= form_for @changeset, url, opts, fn f -> %> + <%= text_input f, :name %> + <% end %> + + you should import `Phoenix.LiveView.Helpers` and then write: + + <.form let={f} for={@changeset}> + <%= text_input f, :name %> + + + """ + + alias Phoenix.HTML.Form + import Phoenix.HTML + import Phoenix.HTML.Tag + + @doc """ + Defines the Phoenix.HTML.Form struct. + + Its fields are: + + * `:source` - the data structure given to `form_for/4` that + implements the form data protocol + + * `:impl` - the module with the form data protocol implementation. + This is used to avoid multiple protocol dispatches. + + * `:id` - the id to be used when generating input fields + + * `:index` - the index of the struct in the form + + * `:name` - the name to be used when generating input fields + + * `:data` - the field used to store lookup data + + * `:params` - the parameters associated to this form in case + they were sent as part of a previous request + + * `:hidden` - a keyword list of fields that are required for + submitting the form behind the scenes as hidden inputs + + * `:options` - a copy of the options given when creating the + form via `form_for/4` without any form data specific key + + * `:action` - the action the form is meant to submit to + + * `:errors` - a keyword list of errors that associated with + the form + """ + defstruct source: nil, + impl: nil, + id: nil, + name: nil, + data: nil, + hidden: [], + params: %{}, + errors: [], + options: [], + index: nil, + action: nil + + @type t :: %Form{ + source: Phoenix.HTML.FormData.t(), + name: String.t(), + data: %{field => term}, + params: %{binary => term}, + hidden: Keyword.t(), + options: Keyword.t(), + errors: Keyword.t(), + impl: module, + id: String.t(), + index: nil | non_neg_integer, + action: nil | String.t() + } + + @type field :: atom | String.t() + + # TODO v3.2: Remove me once form_for without anonymous function has been deprecated + defimpl Phoenix.HTML.Safe do + def to_iodata(%{action: action, options: options}) do + {:safe, contents} = form_tag(action, options) + contents + end + end + + @doc """ + Converts an attribute/form field into its humanize version. + + iex> humanize(:username) + "Username" + iex> humanize(:created_at) + "Created at" + iex> humanize("user_id") + "User" + + """ + def humanize(atom) when is_atom(atom), do: humanize(Atom.to_string(atom)) + + def humanize(bin) when is_binary(bin) do + bin = + if String.ends_with?(bin, "_id") do + binary_part(bin, 0, byte_size(bin) - 3) + else + bin + end + + bin |> String.replace("_", " ") |> String.capitalize() + end + + # TODO v3.1: Effectively deprecate and remove me in future versions + @doc false + def form_for(form_data, action) do + form_for(form_data, action, []) + end + + @doc """ + Generates a form tag with a form builder **without** an anonymous function. + + This functionality exists mostly for integration with `Phoenix.LiveView` + that replaces the anonymous function for explicit closing of the `
` + tag: + + <%= f = form_for @changeset, Routes.user_path(@conn, :create), opts %> + Name: <%= text_input f, :name %> +
+ + See the [Phoenix.LiveView integration](#module-phoenix-liveview-integration) + section in module documentation for examples of using this function. + + See `form_for/4` for the available options. + """ + # TODO v3.1: Effectively deprecate and remove me in future versions + @doc deprecated: "This functionality is deprecated in favor of form_for with a function" + @spec form_for(Phoenix.HTML.FormData.t(), String.t(), Keyword.t()) :: Phoenix.HTML.Form.t() + def form_for(form_data, action, options) when is_list(options) do + %{Phoenix.HTML.FormData.to_form(form_data, options) | action: action} + end + + @doc """ + Generates a form tag with a form builder and an anonymous function. + + <%= form_for @changeset, Routes.user_path(@conn, :create), fn f -> %> + Name: <%= text_input f, :name %> + <% end %> + + See the module documentation for examples of using this function. + + ## Options + + * `:as` - the server side parameter in which all params for this + form will be collected (i.e. `as: :user_params` would mean all fields + for this form will be accessed as `conn.params.user_params` server + side). Automatically inflected when a changeset is given. + + * `:method` - the HTTP method. If the method is not "get" nor "post", + an input tag with name `_method` is generated along-side the form tag. + Defaults to "post". + + * `:multipart` - when true, sets enctype to "multipart/form-data". + Required when uploading files + + * `:csrf_token` - for "post" requests, the form tag will automatically + include an input tag with name `_csrf_token`. When set to false, this + is disabled + + * `:errors` - use this to manually pass a keyword list of errors to the form + (for example from `conn.assigns[:errors]`). This option is only used when a + connection is used as the form source and it will make the errors available + under `f.errors` + + * `:id` - the ID of the form attribute. If an ID is given, all form inputs + will also be prefixed by the given ID + + All other options will be passed as html attributes, such as `class: "foo"`. + """ + @spec form_for(Phoenix.HTML.FormData.t(), String.t(), (t -> Phoenix.HTML.unsafe())) :: + Phoenix.HTML.safe() + @spec form_for(Phoenix.HTML.FormData.t(), String.t(), Keyword.t(), (t -> Phoenix.HTML.unsafe())) :: + Phoenix.HTML.safe() + def form_for(form_data, action, options \\ [], fun) when is_function(fun, 1) do + %{action: action, options: options} = form = form_for(form_data, action, options) + html_escape([form_tag(action, options), fun.(form), raw("")]) + end + + @doc """ + Same as `inputs_for(form, field, [])`. + """ + @spec inputs_for(t, field) :: list(Phoenix.HTML.Form.t()) + def inputs_for(form, field) when is_atom(field) or is_binary(field), + do: inputs_for(form, field, []) + + @doc """ + Generate a new form builder for the given parameter in form **without** an + anonymous function. + + This functionality exists mostly for integration with `Phoenix.LiveView` + that replaces the anonymous function for returning the generated forms + instead. + + Keep in mind that this function does not generate hidden inputs automatically + like `inputs_for/4`. To generate them you need to explicit do it by yourself. + + <%= f = form_for @changeset, Routes.user_path(@conn, :create), opts %> + Name: <%= text_input f, :name %> + + <%= for friend_form <- inputs_for(f, :friends) do %> + # for generating hidden inputs. + <%= hidden_inputs_for(friend_form) %> + <%= text_input friend_form, :name %> + <% end %> + + + See `inputs_for/4` for the available options. + """ + @spec inputs_for(t, field, Keyword.t()) :: list(Phoenix.HTML.Form.t()) + def inputs_for(%{impl: impl} = form, field, options) + when (is_atom(field) or is_binary(field)) and is_list(options) do + options = + form.options + |> Keyword.take([:multipart]) + |> Keyword.merge(options) + + impl.to_form(form.source, form, field, options) + end + + @doc """ + Generate a new form builder for the given parameter in form. + + See the module documentation for examples of using this function. + + ## Options + + * `:id` - the id to be used in the form, defaults to the + concatenation of the given `field` to the parent form id + + * `:as` - the name to be used in the form, defaults to the + concatenation of the given `field` to the parent form name + + * `:default` - the value to use if none is available + + * `:prepend` - the values to prepend when rendering. This only + applies if the field value is a list and no parameters were + sent through the form. + + * `:append` - the values to append when rendering. This only + applies if the field value is a list and no parameters were + sent through the form. + + * `:skip_hidden` - skip the automatic rendering of hidden + fields to allow for more tight control over the generated + markup. You can access `form.hidden` to generate them manually + within the supplied callback. + + """ + @spec inputs_for(t, field, (t -> Phoenix.HTML.unsafe())) :: Phoenix.HTML.safe() + @spec inputs_for(t, field, Keyword.t(), (t -> Phoenix.HTML.unsafe())) :: Phoenix.HTML.safe() + def inputs_for(%{impl: impl} = form, field, options \\ [], fun) + when is_atom(field) or is_binary(field) do + {skip, options} = Keyword.pop(options, :skip_hidden, false) + + options = + form.options + |> Keyword.take([:multipart]) + |> Keyword.merge(options) + + forms = impl.to_form(form.source, form, field, options) + + html_escape( + Enum.map(forms, fn form -> + if skip do + fun.(form) + else + [hidden_inputs_for(form), fun.(form)] + end + end) + ) + end + + @doc """ + Returns a value of a corresponding form field. + + The `form` should either be a `Phoenix.HTML.Form` emitted + by `form_for` or an atom. + + When a form is given, it will lookup for changes and then + fallback to parameters and finally fallback to the default + struct/map value. + + Since the function looks up parameter values too, there is + no guarantee that the value will have a certain type. For + example, a boolean field will be sent as "false" as a + parameter, and this function will return it as is. If you + need to normalize the result of `input_value`, the best + option is to call `html_escape` on it and compare the + resulting string. + """ + @spec input_value(t | atom, field) :: term + def input_value(%{source: source, impl: impl} = form, field) + when is_atom(field) or is_binary(field) do + try do + impl.input_value(source, form, field) + rescue + UndefinedFunctionError -> + case Map.fetch(form.params, field_to_string(field)) do + {:ok, value} -> + value + + :error -> + Map.get(form.data, field) + end + end + end + + def input_value(name, _field) when is_atom(name), do: nil + + @doc """ + Returns an id of a corresponding form field. + + The form should either be a `Phoenix.HTML.Form` emitted + by `form_for` or an atom. + """ + @spec input_id(t | atom, field) :: String.t() + def input_id(%{id: nil}, field), do: "#{field}" + + def input_id(%{id: id}, field) when is_atom(field) or is_binary(field) do + "#{id}_#{field}" + end + + def input_id(name, field) when (is_atom(name) and is_atom(field)) or is_binary(field) do + "#{name}_#{field}" + end + + @doc """ + Returns an id of a corresponding form field and value attached to it. + + Useful for radio buttons and inputs like multiselect checkboxes. + """ + @spec input_id(t | atom, field, Phoenix.HTML.Safe.t()) :: String.t() + def input_id(name, field, value) do + {:safe, value} = html_escape(value) + value_id = value |> IO.iodata_to_binary() |> String.replace(~r/\W/u, "_") + input_id(name, field) <> "_" <> value_id + end + + @doc """ + Returns a name of a corresponding form field. + + The first argument should either be a `Phoenix.HTML.Form` emitted + by `form_for` or an atom. + + ## Examples + + iex> Phoenix.HTML.Form.input_name(:user, :first_name) + "user[first_name]" + """ + @spec input_name(t | atom, field) :: String.t() + def input_name(form_or_name, field) + + def input_name(%{name: nil}, field), do: to_string(field) + + def input_name(%{name: name}, field) when is_atom(field) or is_binary(field), + do: "#{name}[#{field}]" + + def input_name(name, field) when (is_atom(name) and is_atom(field)) or is_binary(field), + do: "#{name}[#{field}]" + + @doc """ + Returns the HTML5 validations that would apply to + the given field. + """ + @spec input_validations(t, field) :: Keyword.t() + def input_validations(%{source: source, impl: impl} = form, field) + when is_atom(field) or is_binary(field) do + impl.input_validations(source, form, field) + end + + @mapping %{ + "url" => :url_input, + "email" => :email_input, + "search" => :search_input, + "password" => :password_input + } + + @doc """ + Gets the input type for a given field. + + If the underlying input type is a `:text_field`, + a mapping could be given to further inflect + the input type based solely on the field name. + The default mapping is: + + %{"url" => :url_input, + "email" => :email_input, + "search" => :search_input, + "password" => :password_input} + + """ + @spec input_type(t, field) :: atom + def input_type(%{impl: impl, source: source} = form, field, mapping \\ @mapping) + when is_atom(field) or is_binary(field) do + type = impl.input_type(source, form, field) + + if type == :text_input do + field = field_to_string(field) + + Enum.find_value(mapping, type, fn {k, v} -> + String.contains?(field, k) && v + end) + else + type + end + end + + ## Form helpers + + @doc """ + Generates a text input. + + The form should either be a `Phoenix.HTML.Form` emitted + by `form_for` or an atom. + + All given options are forwarded to the underlying input, + default values are provided for id, name and value if + possible. + + ## Examples + + # Assuming form contains a User schema + text_input(form, :name) + #=> + + text_input(:user, :name) + #=> + + """ + def text_input(form, field, opts \\ []) do + generic_input(:text, form, field, opts) + end + + @doc """ + Generates a hidden input. + + See `text_input/3` for example and docs. + """ + def hidden_input(form, field, opts \\ []) do + generic_input(:hidden, form, field, opts) + end + + @doc """ + Generates hidden inputs for the given form. + """ + @spec hidden_inputs_for(t) :: list(Phoenix.HTML.safe()) + def hidden_inputs_for(form) do + Enum.flat_map(form.hidden, fn {k, v} -> + hidden_inputs_for(form, k, v) + end) + end + + defp hidden_inputs_for(form, k, values) when is_list(values) do + id = input_id(form, k) + name = input_name(form, k) + + for {v, index} <- Enum.with_index(values) do + hidden_input(form, k, + id: id <> "_" <> Integer.to_string(index), + name: name <> "[]", + value: v + ) + end + end + + defp hidden_inputs_for(form, k, v) do + [hidden_input(form, k, value: v)] + end + + @doc """ + Generates an email input. + + See `text_input/3` for example and docs. + """ + def email_input(form, field, opts \\ []) do + generic_input(:email, form, field, opts) + end + + @doc """ + Generates a number input. + + See `text_input/3` for example and docs. + """ + def number_input(form, field, opts \\ []) do + generic_input(:number, form, field, opts) + end + + @doc """ + Generates a password input. + + For security reasons, the form data and parameter values + are never re-used in `password_input/3`. Pass the value + explicitly if you would like to set one. + + See `text_input/3` for example and docs. + """ + def password_input(form, field, opts \\ []) do + opts = + opts + |> Keyword.put_new(:type, "password") + |> Keyword.put_new(:id, input_id(form, field)) + |> Keyword.put_new(:name, input_name(form, field)) + + tag(:input, opts) + end + + @doc """ + Generates an url input. + + See `text_input/3` for example and docs. + """ + def url_input(form, field, opts \\ []) do + generic_input(:url, form, field, opts) + end + + @doc """ + Generates a search input. + + See `text_input/3` for example and docs. + """ + def search_input(form, field, opts \\ []) do + generic_input(:search, form, field, opts) + end + + @doc """ + Generates a telephone input. + + See `text_input/3` for example and docs. + """ + def telephone_input(form, field, opts \\ []) do + generic_input(:tel, form, field, opts) + end + + @doc """ + Generates a color input. + + Warning: this feature isn't available in all browsers. + Check `http://caniuse.com/#feat=input-color` for further information. + + See `text_input/3` for example and docs. + """ + def color_input(form, field, opts \\ []) do + generic_input(:color, form, field, opts) + end + + @doc """ + Generates a range input. + + See `text_input/3` for example and docs. + """ + def range_input(form, field, opts \\ []) do + generic_input(:range, form, field, opts) + end + + @doc """ + Generates a date input. + + Warning: this feature isn't available in all browsers. + Check `http://caniuse.com/#feat=input-datetime` for further information. + + See `text_input/3` for example and docs. + """ + def date_input(form, field, opts \\ []) do + generic_input(:date, form, field, opts) + end + + @doc """ + Generates a datetime-local input. + + Warning: this feature isn't available in all browsers. + Check `http://caniuse.com/#feat=input-datetime` for further information. + + See `text_input/3` for example and docs. + """ + def datetime_local_input(form, field, opts \\ []) do + value = Keyword.get(opts, :value, input_value(form, field)) + opts = Keyword.put(opts, :value, datetime_local_input_value(value)) + + generic_input(:"datetime-local", form, field, opts) + end + + defp datetime_local_input_value(%struct{} = value) when struct in [NaiveDateTime, DateTime] do + <> = struct.to_string(value) + + [date, ?T, hour_minute] + end + + defp datetime_local_input_value(other), do: other + + @doc """ + Generates a time input. + + Warning: this feature isn't available in all browsers. + Check `http://caniuse.com/#feat=input-datetime` for further information. + + ## Options + + * `:precision` - Allowed values: `:minute`, `:second`, `:millisecond`. + Defaults to `:minute`. + + All other options are forwarded. See `text_input/3` for example and docs. + + ## Examples + + time_input form, :time + #=> + + time_input form, :time, precision: :second + #=> + + time_input form, :time, precision: :millisecond + #=> + """ + def time_input(form, field, opts \\ []) do + {precision, opts} = Keyword.pop(opts, :precision, :minute) + value = opts[:value] || input_value(form, field) + opts = Keyword.put(opts, :value, truncate_time(value, precision)) + + generic_input(:time, form, field, opts) + end + + defp truncate_time(%Time{} = time, :minute) do + time + |> Time.to_string() + |> String.slice(0, 5) + end + + defp truncate_time(%Time{} = time, precision) do + time + |> Time.truncate(precision) + |> Time.to_string() + end + + defp truncate_time(value, _), do: value + + defp generic_input(type, form, field, opts) + when is_list(opts) and (is_atom(field) or is_binary(field)) do + opts = + opts + |> Keyword.put_new(:type, type) + |> Keyword.put_new(:id, input_id(form, field)) + |> Keyword.put_new(:name, input_name(form, field)) + |> Keyword.put_new(:value, input_value(form, field)) + |> Keyword.update!(:value, &maybe_html_escape/1) + + tag(:input, opts) + end + + defp maybe_html_escape(nil), do: nil + defp maybe_html_escape(value), do: html_escape(value) + + @doc """ + Generates a textarea input. + + All given options are forwarded to the underlying input, + default values are provided for id, name and textarea + content if possible. + + ## Examples + + # Assuming form contains a User schema + textarea(form, :description) + #=> + + ## New lines + + Notice the generated textarea includes a new line after + the opening tag. This is because the HTML spec says new + lines after tags must be ignored and all major browser + implementations do that. + + So in order to avoid new lines provided by the user + from being ignored when the form is resubmitted, we + automatically add a new line before the text area + value. + """ + def textarea(form, field, opts \\ []) do + opts = + opts + |> Keyword.put_new(:id, input_id(form, field)) + |> Keyword.put_new(:name, input_name(form, field)) + + {value, opts} = Keyword.pop(opts, :value, input_value(form, field)) + content_tag(:textarea, ["\n", html_escape(value || "")], opts) + end + + @doc """ + Generates a file input. + + It requires the given form to be configured with `multipart: true` + when invoking `form_for/4`, otherwise it fails with `ArgumentError`. + + See `text_input/3` for example and docs. + """ + def file_input(form, field, opts \\ []) do + if match?(%Form{}, form) and !form.options[:multipart] do + raise ArgumentError, + "file_input/3 requires the enclosing form_for/4 " <> + "to be configured with multipart: true" + end + + opts = + opts + |> Keyword.put_new(:type, :file) + |> Keyword.put_new(:id, input_id(form, field)) + |> Keyword.put_new(:name, input_name(form, field)) + + opts = + if opts[:multiple] do + Keyword.update!(opts, :name, &"#{&1}[]") + else + opts + end + + tag(:input, opts) + end + + @doc """ + Generates a submit button to send the form. + + ## Examples + + submit do: "Submit" + #=> + + """ + def submit([do: _] = block_option), do: submit([], block_option) + + @doc """ + Generates a submit button to send the form. + + All options are forwarded to the underlying button tag. + When called with a `do:` block, the button tag options + come first. + + ## Examples + + submit "Submit" + #=> + + submit "Submit", class: "btn" + #=> + + submit [class: "btn"], do: "Submit" + #=> + + """ + def submit(value, opts \\ []) + + def submit(opts, [do: _] = block_option) do + opts = Keyword.put_new(opts, :type, "submit") + + content_tag(:button, opts, block_option) + end + + def submit(value, opts) do + opts = Keyword.put_new(opts, :type, "submit") + + content_tag(:button, value, opts) + end + + @doc """ + Generates a reset input to reset all the form fields to + their original state. + + All options are forwarded to the underlying input tag. + + ## Examples + + reset "Reset" + #=> + + reset "Reset", class: "btn" + #=> + + """ + def reset(value, opts \\ []) do + opts = + opts + |> Keyword.put_new(:type, "reset") + |> Keyword.put_new(:value, value) + + tag(:input, opts) + end + + @doc """ + Generates a radio button. + + Invoke this function for each possible value you want + to be sent to the server. + + ## Examples + + # Assuming form contains a User schema + radio_button(form, :role, "admin") + #=> + + ## Options + + All options are simply forwarded to the underlying HTML tag. + """ + def radio_button(form, field, value, opts \\ []) do + escaped_value = html_escape(value) + + opts = + opts + |> Keyword.put_new(:type, "radio") + |> Keyword.put_new(:id, input_id(form, field, escaped_value)) + |> Keyword.put_new(:name, input_name(form, field)) + + opts = + if escaped_value == html_escape(input_value(form, field)) do + Keyword.put_new(opts, :checked, true) + else + opts + end + + tag(:input, [value: escaped_value] ++ opts) + end + + @doc """ + Generates a checkbox. + + This function is useful for sending boolean values to the server. + + ## Examples + + # Assuming form contains a User schema + checkbox(form, :famous) + #=> + #=> + + ## Options + + * `:checked_value` - the value to be sent when the checkbox is checked. + Defaults to "true" + + * `:hidden_input` - controls if this function will generate a hidden input + to submit the unchecked value or not. Defaults to "true" + + * `:unchecked_value` - the value to be sent when the checkbox is unchecked, + Defaults to "false" + + * `:value` - the value used to check if a checkbox is checked or unchecked. + The default value is extracted from the form data if available + + All other options are forwarded to the underlying HTML tag. + + ## Hidden fields + + Because an unchecked checkbox is not sent to the server, Phoenix + automatically generates a hidden field with the unchecked_value + *before* the checkbox field to ensure the `unchecked_value` is sent + when the checkbox is not marked. Set `hidden_input` to false If you + don't want to send values from unchecked checkbox to the server. + """ + def checkbox(form, field, opts \\ []) do + opts = + opts + |> Keyword.put_new(:type, "checkbox") + |> Keyword.put_new(:id, input_id(form, field)) + |> Keyword.put_new(:name, input_name(form, field)) + + {value, opts} = Keyword.pop(opts, :value, input_value(form, field)) + {checked_value, opts} = Keyword.pop(opts, :checked_value, true) + {unchecked_value, opts} = Keyword.pop(opts, :unchecked_value, false) + {hidden_input, opts} = Keyword.pop(opts, :hidden_input, true) + + # We html escape all values to be sure we are comparing + # apples to apples. After all we may have true in the data + # but "true" in the params and both need to match. + value = html_escape(value) + checked_value = html_escape(checked_value) + unchecked_value = html_escape(unchecked_value) + + opts = + if value == checked_value do + Keyword.put_new(opts, :checked, true) + else + opts + end + + if hidden_input do + hidden_opts = [type: "hidden", value: unchecked_value] + + html_escape([ + tag(:input, hidden_opts ++ Keyword.take(opts, [:name, :disabled, :form])), + tag(:input, [value: checked_value] ++ opts) + ]) + else + html_escape([ + tag(:input, [value: checked_value] ++ opts) + ]) + end + end + + @doc """ + Generates a select tag with the given `options`. + + `options` are expected to be an enumerable which will be used to + generate each respective `option`. The enumerable may have: + + * keyword lists - each keyword list is expected to have the keys + `:key` and `:value`. Additional keys such as `:disabled` may + be given to customize the option + + * two-item tuples - where the first element is an atom, string or + integer to be used as the option label and the second element is + an atom, string or integer to be used as the option value + + * atom, string or integer - which will be used as both label and value + for the generated select + + ## Optgroups + + If `options` is map or keyword list where the first element is a string, + atom or integer and the second element is a list or a map, it is assumed + the key will be wrapped in an `` and the value will be used to + generate `` nested under the group. + + ## Examples + + # Assuming form contains a User schema + select(form, :age, 0..120) + #=> + + select(form, :role, ["Admin": "admin", "User": "user"]) + #=> + + select(form, :role, [[key: "Admin", value: "admin", disabled: true], + [key: "User", value: "user"]]) + #=> + + You can also pass a prompt: + + select(form, :role, ["Admin": "admin", "User": "user"], prompt: "Choose your role") + #=> + + And customize the prompt as any other entry: + + select(form, :role, ["Admin": "admin", "User": "user"], prompt: [key: "Choose your role", disabled: true]) + #=> + + If you want to select an option that comes from the database, + such as a manager for a given project, you may write: + + select(form, :manager_id, Enum.map(@managers, &{&1.name, &1.id})) + #=> + + Finally, if the values are a list or a map, we use the keys for + grouping: + + select(form, :country, ["Europe": ["UK", "Sweden", "France"]], ...) + #=> + + ## Options + + * `:prompt` - an option to include at the top of the options. It may be + a string or a keyword list of attributes and the `:key` + + * `:selected` - the default value to use when none was sent as parameter + + Be aware that a `:multiple` option will not generate a correctly + functioning multiple select element. Use `multiple_select/4` instead. + + All other options are forwarded to the underlying HTML tag. + """ + def select(form, field, options, opts \\ []) when is_atom(field) or is_binary(field) do + {selected, opts} = selected(form, field, opts) + options_html = options_for_select(options, selected) + + {options_html, opts} = + case Keyword.pop(opts, :prompt) do + {nil, opts} -> {options_html, opts} + {prompt, opts} -> {[prompt_option(prompt) | options_html], opts} + end + + opts = + opts + |> Keyword.put_new(:id, input_id(form, field)) + |> Keyword.put_new(:name, input_name(form, field)) + + content_tag(:select, options_html, opts) + end + + defp prompt_option(prompt) when is_list(prompt) do + {prompt_key, prompt_opts} = Keyword.pop(prompt, :key) + + prompt_key || + raise ArgumentError, + "expected :key key when building a prompt select option with a keyword list: " <> + inspect(prompt) + + prompt_option(prompt_key, prompt_opts) + end + + defp prompt_option(key) when is_binary(key), do: prompt_option(key, []) + + defp prompt_option(key, opts) when is_list(opts) do + content_tag(:option, key, Keyword.put_new(opts, :value, "")) + end + + @doc """ + Returns options to be used inside a select. + + This is useful when building the select by hand. + It expects all options and one or more select values. + + ## Examples + + options_for_select(["Admin": "admin", "User": "user"], "admin") + #=> + #=> + + Groups are also supported: + + options_for_select(["Europe": ["UK", "Sweden", "France"], ...], nil) + #=> + #=> + #=> + #=> + #=> + + """ + def options_for_select(options, selected_values) do + {:safe, + escaped_options_for_select( + options, + selected_values |> List.wrap() |> Enum.map(&html_escape/1) + )} + end + + defp escaped_options_for_select(options, selected_values) do + Enum.reduce(options, [], fn + {option_key, option_value}, acc -> + [acc | option(option_key, option_value, [], selected_values)] + + options, acc when is_list(options) -> + {option_key, options} = Keyword.pop(options, :key) + + option_key || + raise ArgumentError, + "expected :key key when building