From 1dad781140663c1b8a0892f5a5105cbb102dd878 Mon Sep 17 00:00:00 2001 From: Bogdan Pintea Date: Mon, 12 Aug 2019 13:57:42 +0200 Subject: [PATCH] Introduce CBOR format support for REST payloads (#169) * Squashed 'libs/tinycbor/' content from commit d2dd95c git-subtree-dir: libs/tinycbor git-subtree-split: d2dd95cb8841d88d5a801e3ef9c328fd6200e7bd * add tinycbor library to the project With this commit CMake will add the tinycbor files to the sources to be compiled into the driver. CMake will exclude two files: - open_memstream.c that won't be compiled by MSVC (WITHOUT_OPEN_MEMSTREAM compilation flag doesn't exclude its source); - cborparser.c that will be copied (and patched) under the building folder. The patching of the later file above adds one function that allows copy-free extraction of text/byte strings. * introduce CBOR format support for REST payloads This commit adds basic support for CBOR encapsulation, as an alternative to JSON. The format to use is connection-specific and configured in the connection string. The introduction stops short of supporting encoding/decoding of the parameters and result sets values: it will allow building a non-parameterized query and decoding the response object (either with a result-set or with an error), but without unpacking the values in the result set of the latter. Server version querying is also carried over CBOR. All communication is done either over JSON or CBOR, although the driver will carry on if it receives a JSON response to a CBOR request (or the other way around). However, driver-generated "fake" responses to catalog queries are always JSON-formatted (makes maintenance of text responses easier). The commit also enhances the support of the Elasticsearch-formatted errors (both JSON and CBOR) in that the "error" parameter will be parsed if of a map type (generally an ES/SQL error) or passed on as is, if of a string type. The previous behavior was to abandon parsing if "error" wasn't a map and present the entire error answer to the user; this wouldn't work well with a CBOR object now. * fix a copy&paste define error - the define is not yet used, though. * add the legal files for tinycbor as 3rd party lib. - add to the repo the files necessary to generate the legal notices and reports. * addressing PR review comments - reducing code duplication on srv. version checking; - fixing a couple of comment typos; - assigning const strings to named vars. (cherry picked from commit 992f0e00590e46c75500b884630de05ae0c87b48) --- CMakeLists.txt | 30 +- devtools/3rd_party/licenses/tinycbor-INFO.csv | 2 + .../3rd_party/licenses/tinycbor-LICENSE.txt | 21 + .../3rd_party/licenses/tinycbor-NOTICE.txt | 0 driver/catalogue.c | 26 +- driver/connect.c | 349 ++- driver/connect.h | 3 +- driver/convert.c | 20 +- driver/handles.c | 2 +- driver/handles.h | 35 +- driver/odbc.c | 19 +- driver/queries.c | 1359 +++++++--- driver/queries.h | 68 +- driver/tinycbor.c | 287 +++ driver/tinycbor.h | 92 + driver/util.c | 33 +- driver/util.h | 31 +- libs/tinycbor/.appveyor.yml | 35 + libs/tinycbor/.gitattributes | 4 + libs/tinycbor/.gitignore | 81 + libs/tinycbor/.tag | 1 + libs/tinycbor/.travis.yml | 84 + libs/tinycbor/Doxyfile | 49 + libs/tinycbor/LICENSE | 21 + libs/tinycbor/Makefile | 240 ++ libs/tinycbor/Makefile.configure | 35 + libs/tinycbor/Makefile.nmake | 47 + libs/tinycbor/README | 13 + libs/tinycbor/TODO | 25 + libs/tinycbor/VERSION | 1 + libs/tinycbor/examples/examples.pro | 2 + libs/tinycbor/examples/simplereader.c | 181 ++ libs/tinycbor/examples/simplereader.pro | 3 + libs/tinycbor/scripts/maketag.pl | 91 + libs/tinycbor/scripts/update-docs.sh | 52 + libs/tinycbor/src/cbor.dox | 123 + libs/tinycbor/src/cbor.h | 606 +++++ libs/tinycbor/src/cborencoder.c | 645 +++++ .../src/cborencoder_close_container_checked.c | 57 + libs/tinycbor/src/cborerrorstrings.c | 182 ++ libs/tinycbor/src/cborinternal_p.h | 161 ++ libs/tinycbor/src/cborjson.h | 62 + libs/tinycbor/src/cborparser.c | 1430 +++++++++++ libs/tinycbor/src/cborparser_dup_string.c | 119 + libs/tinycbor/src/cborpretty.c | 580 +++++ libs/tinycbor/src/cborpretty_stdio.c | 87 + libs/tinycbor/src/cbortojson.c | 699 ++++++ libs/tinycbor/src/cborvalidation.c | 670 +++++ libs/tinycbor/src/compilersupport_p.h | 205 ++ libs/tinycbor/src/open_memstream.c | 114 + libs/tinycbor/src/parsetags.pl | 116 + libs/tinycbor/src/src.pri | 16 + libs/tinycbor/src/tags.txt | 23 + libs/tinycbor/src/tinycbor-version.h | 3 + libs/tinycbor/src/tinycbor.pro | 6 + libs/tinycbor/src/utf8_p.h | 104 + libs/tinycbor/tests/.gitignore | 15 + libs/tinycbor/tests/c90/c90.pro | 7 + libs/tinycbor/tests/c90/tst_c90.c | 30 + libs/tinycbor/tests/cpp/cpp.pro | 5 + libs/tinycbor/tests/cpp/tst_cpp.cpp | 42 + libs/tinycbor/tests/encoder/encoder.pro | 9 + libs/tinycbor/tests/encoder/tst_encoder.cpp | 734 ++++++ libs/tinycbor/tests/parser/parser.pro | 10 + libs/tinycbor/tests/parser/tst_parser.cpp | 2182 +++++++++++++++++ libs/tinycbor/tests/tests.pro | 3 + libs/tinycbor/tests/tojson/tojson.pro | 8 + libs/tinycbor/tests/tojson/tst_tojson.cpp | 721 ++++++ libs/tinycbor/tinycbor.pc.in | 11 + libs/tinycbor/tools/Makefile | 12 + libs/tinycbor/tools/cbordump/cbordump.c | 164 ++ libs/tinycbor/tools/cbordump/cbordump.pro | 10 + libs/tinycbor/tools/json2cbor/json2cbor.c | 493 ++++ libs/tinycbor/tools/json2cbor/json2cbor.pro | 20 + test/connected_dbc.cc | 18 +- test/connected_dbc.h | 4 +- test/test_queries.cc | 4 +- 77 files changed, 13316 insertions(+), 536 deletions(-) create mode 100644 devtools/3rd_party/licenses/tinycbor-INFO.csv create mode 100644 devtools/3rd_party/licenses/tinycbor-LICENSE.txt create mode 100644 devtools/3rd_party/licenses/tinycbor-NOTICE.txt create mode 100644 driver/tinycbor.c create mode 100644 driver/tinycbor.h create mode 100644 libs/tinycbor/.appveyor.yml create mode 100644 libs/tinycbor/.gitattributes create mode 100644 libs/tinycbor/.gitignore create mode 100644 libs/tinycbor/.tag create mode 100644 libs/tinycbor/.travis.yml create mode 100644 libs/tinycbor/Doxyfile create mode 100644 libs/tinycbor/LICENSE create mode 100644 libs/tinycbor/Makefile create mode 100644 libs/tinycbor/Makefile.configure create mode 100644 libs/tinycbor/Makefile.nmake create mode 100644 libs/tinycbor/README create mode 100644 libs/tinycbor/TODO create mode 100644 libs/tinycbor/VERSION create mode 100644 libs/tinycbor/examples/examples.pro create mode 100644 libs/tinycbor/examples/simplereader.c create mode 100644 libs/tinycbor/examples/simplereader.pro create mode 100644 libs/tinycbor/scripts/maketag.pl create mode 100755 libs/tinycbor/scripts/update-docs.sh create mode 100644 libs/tinycbor/src/cbor.dox create mode 100644 libs/tinycbor/src/cbor.h create mode 100644 libs/tinycbor/src/cborencoder.c create mode 100644 libs/tinycbor/src/cborencoder_close_container_checked.c create mode 100644 libs/tinycbor/src/cborerrorstrings.c create mode 100644 libs/tinycbor/src/cborinternal_p.h create mode 100644 libs/tinycbor/src/cborjson.h create mode 100644 libs/tinycbor/src/cborparser.c create mode 100644 libs/tinycbor/src/cborparser_dup_string.c create mode 100644 libs/tinycbor/src/cborpretty.c create mode 100644 libs/tinycbor/src/cborpretty_stdio.c create mode 100644 libs/tinycbor/src/cbortojson.c create mode 100644 libs/tinycbor/src/cborvalidation.c create mode 100644 libs/tinycbor/src/compilersupport_p.h create mode 100644 libs/tinycbor/src/open_memstream.c create mode 100755 libs/tinycbor/src/parsetags.pl create mode 100644 libs/tinycbor/src/src.pri create mode 100644 libs/tinycbor/src/tags.txt create mode 100644 libs/tinycbor/src/tinycbor-version.h create mode 100644 libs/tinycbor/src/tinycbor.pro create mode 100644 libs/tinycbor/src/utf8_p.h create mode 100644 libs/tinycbor/tests/.gitignore create mode 100644 libs/tinycbor/tests/c90/c90.pro create mode 100644 libs/tinycbor/tests/c90/tst_c90.c create mode 100644 libs/tinycbor/tests/cpp/cpp.pro create mode 100644 libs/tinycbor/tests/cpp/tst_cpp.cpp create mode 100644 libs/tinycbor/tests/encoder/encoder.pro create mode 100644 libs/tinycbor/tests/encoder/tst_encoder.cpp create mode 100644 libs/tinycbor/tests/parser/parser.pro create mode 100644 libs/tinycbor/tests/parser/tst_parser.cpp create mode 100644 libs/tinycbor/tests/tests.pro create mode 100644 libs/tinycbor/tests/tojson/tojson.pro create mode 100644 libs/tinycbor/tests/tojson/tst_tojson.cpp create mode 100644 libs/tinycbor/tinycbor.pc.in create mode 100644 libs/tinycbor/tools/Makefile create mode 100644 libs/tinycbor/tools/cbordump/cbordump.c create mode 100644 libs/tinycbor/tools/cbordump/cbordump.pro create mode 100644 libs/tinycbor/tools/json2cbor/json2cbor.c create mode 100644 libs/tinycbor/tools/json2cbor/json2cbor.pro diff --git a/CMakeLists.txt b/CMakeLists.txt index 616e8225..eb0e5abd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -283,6 +283,33 @@ add_custom_target(curlclean WORKING_DIRECTORY "${LIBCURL_PATH_SRC}/winbuild" ) +# +# add tinycbor to the project +# +set(TINYCBOR_PATH_SRC ${CMAKE_SOURCE_DIR}/libs/tinycbor CACHE PATH + "Lib tinycbor source path") +aux_source_directory(${TINYCBOR_PATH_SRC}/src DRV_SRC) +list(FILTER DRV_SRC EXCLUDE REGEX .*open_memstream.c$) # Win-unsupported +list(FILTER DRV_SRC EXCLUDE REGEX .*cborparser.c$) # to be patched +file(COPY ${TINYCBOR_PATH_SRC}/src/cborparser.c DESTINATION ${CMAKE_BINARY_DIR}) +# tinycbor doesn't expose (yet? #125) the text/binary string pointer, since the +# string can span multiple stream chunks. However, in our case the CBOR object +# is available entirely, so access to it can safely be had; this saves a +# superfluous allocation/copy. FIXME -- lib PR, proper exposure. +file(APPEND ${CMAKE_BINARY_DIR}/cborparser.c +" +CborError cbor_value_get_string_chunk(CborValue *it, + const void **bufferptr, size_t *len) +{ + CborError err = get_string_chunk(it, bufferptr, len); + return err != CborNoError ? err : preparse_next_value(it); +}") +aux_source_directory(${CMAKE_BINARY_DIR} DRV_SRC) +set(TINYCBOR_INC ${TINYCBOR_PATH_SRC}/src) +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /DWITHOUT_OPEN_MEMSTREAM") +# limit how deep the parser will recurse (current need: 3) +set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /DCBOR_PARSER_MAX_RECURSIONS=16") + # # Patch installer's and editor's AssemblyInfos with the version being built # @@ -339,7 +366,8 @@ add_library(${DRV_NAME} SHARED ${DRV_SRC} ${CMAKE_BINARY_DIR}/${DRV_NAME}.def target_compile_definitions(${DRV_NAME} PRIVATE "DRIVER_BUILD") add_dependencies(${DRV_NAME} dsneditor) include_directories(${ODBC_INC} ${DRV_SRC_DIR} ${LIBCURL_INC_PATH} - ${UJSON4C_INC} ${CTIMESTAMP_PATH_SRC} ${DSNEDITOR_INC_PATH}) + ${UJSON4C_INC} ${CTIMESTAMP_PATH_SRC} ${TINYCBOR_INC} + ${DSNEDITOR_INC_PATH}) target_link_libraries(${DRV_NAME} odbccp32 legacy_stdio_definitions ${DSNBND_LIB_BIN_DIR_BASE}-$/esdsnbnd${BARCH}${CMAKE_IMPORT_LIBRARY_SUFFIX} libcurl ${LIBCURL_WIN_LIBS}) diff --git a/devtools/3rd_party/licenses/tinycbor-INFO.csv b/devtools/3rd_party/licenses/tinycbor-INFO.csv new file mode 100644 index 00000000..88687655 --- /dev/null +++ b/devtools/3rd_party/licenses/tinycbor-INFO.csv @@ -0,0 +1,2 @@ +name,version,revision,url,license,copyright +tinycbor,,d2dd95c,https://github.com/intel/tinycbor.git,MIT,Copyright (c) 2017 Intel Corporation diff --git a/devtools/3rd_party/licenses/tinycbor-LICENSE.txt b/devtools/3rd_party/licenses/tinycbor-LICENSE.txt new file mode 100644 index 00000000..4aad977c --- /dev/null +++ b/devtools/3rd_party/licenses/tinycbor-LICENSE.txt @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Intel Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/devtools/3rd_party/licenses/tinycbor-NOTICE.txt b/devtools/3rd_party/licenses/tinycbor-NOTICE.txt new file mode 100644 index 00000000..e69de29b diff --git a/driver/catalogue.c b/driver/catalogue.c index 2071678e..36567ea3 100644 --- a/driver/catalogue.c +++ b/driver/catalogue.c @@ -42,15 +42,15 @@ " CATALOG " ESODBC_STRING_DELIM WPFWP_LDESC ESODBC_STRING_DELIM -static SQLRETURN fake_answer(SQLHSTMT hstmt, const char *src, size_t cnt) +static SQLRETURN fake_answer(SQLHSTMT hstmt, cstr_st *answer) { - char *dup; + cstr_st fake = *answer; - if (! (dup = strdup(src))) { - ERRNH(hstmt, "OOM with %zu.", cnt); + if (! (fake.str = strdup(answer->str))) { + ERRNH(hstmt, "OOM with %zu.", fake.cnt); RET_HDIAGS(hstmt, SQL_STATE_HY001); } - return attach_answer(STMH(hstmt), dup, cnt); + return attach_answer(STMH(hstmt), &fake, /*is JSON*/TRUE); } @@ -86,10 +86,10 @@ SQLRETURN EsSQLStatisticsW( "\"rows\":[]" \ "}" /*INDENT-ON*/ + cstr_st statistics = CSTR_INIT(STATISTICS_EMPTY); INFOH(hstmt, "no statistics available."); - return fake_answer(hstmt, STATISTICS_EMPTY, - sizeof(STATISTICS_EMPTY) - /*\0*/1); + return fake_answer(hstmt, &statistics); # undef STATISTICS_EMPTY } @@ -614,11 +614,11 @@ SQLRETURN EsSQLSpecialColumnsW "\"rows\":[]" \ "}" /*INDENT-ON*/ + cstr_st special_cols = CSTR_INIT(SPECIAL_COLUMNS_EMPTY); INFOH(hstmt, "no special columns available."); - return fake_answer(hstmt, SPECIAL_COLUMNS_EMPTY, - sizeof(SPECIAL_COLUMNS_EMPTY) - /*\0*/1); + return fake_answer(hstmt, &special_cols); # undef SPECIAL_COLUMNS_EMPTY } @@ -661,10 +661,10 @@ SQLRETURN EsSQLForeignKeysW( "\"rows\":[]" \ "}" /*INDENT-ON*/ + cstr_st foreign_keys = CSTR_INIT(FOREIGN_KEYS_EMPTY); INFOH(hstmt, "no foreign keys supported."); - return fake_answer(hstmt, FOREIGN_KEYS_EMPTY, - sizeof(FOREIGN_KEYS_EMPTY) - /*\0*/1); + return fake_answer(hstmt, &foreign_keys); # undef FOREIGN_KEYS_EMPTY } @@ -692,10 +692,10 @@ SQLRETURN EsSQLPrimaryKeysW( "\"rows\":[]" \ "}" /*INDENT-ON*/ + cstr_st prim_keys = CSTR_INIT(PRIMARY_KEYS_EMPTY); INFOH(hstmt, "no primary keys supported."); - return fake_answer(hstmt, PRIMARY_KEYS_EMPTY, - sizeof(PRIMARY_KEYS_EMPTY) - /*\0*/1); + return fake_answer(hstmt, &prim_keys); # undef PRIMARY_KEYS_EMPTY } diff --git a/driver/connect.c b/driver/connect.c index 8ef744f7..115772d3 100644 --- a/driver/connect.c +++ b/driver/connect.c @@ -9,6 +9,8 @@ #include #include +#include "tinycbor.h" + #include "connect.h" #include "queries.h" #include "catalogue.h" @@ -25,8 +27,13 @@ #endif /*! CURL_STATICLIB*/ /* HTTP headers default for every request */ -#define HTTP_ACCEPT_JSON "Accept: application/json" -#define HTTP_CONTENT_TYPE_JSON "Content-Type: application/json; charset=utf-8" +#define HTTP_APP_CBOR "application/cbor" +#define HTTP_APP_JSON "application/json" +#define HTTP_ACCEPT_CBOR "Accept: " HTTP_APP_CBOR +#define HTTP_ACCEPT_JSON "Accept: " HTTP_APP_JSON +#define HTTP_CONTENT_TYPE_CBOR "Content-Type: " HTTP_APP_CBOR +#define HTTP_CONTENT_TYPE_JSON "Content-Type: " HTTP_APP_JSON \ + "; charset=utf-8" /* Elasticsearch/SQL data types */ /* 2 */ @@ -86,6 +93,8 @@ #define TYPE_IVL_MINUTE_TO_SECOND "INTERVAL_MINUTE_TO_SECOND" +#define ESINFO_KEY_VERSION "version" +#define ESINFO_KEY_NUMBER "number" /* structure for one row returned by the ES. * This is a mirror of elasticsearch_type, with length-or-indicator fields @@ -131,9 +140,11 @@ typedef struct { SQLLEN interval_precision_loi; } estype_row_st; /* - * HTTP headers used for all requests (Content-Type, Accept). + * HTTP headers used for all requests (Content-Type, Accept), split by + * encodying type. */ -static struct curl_slist *http_headers = NULL; +static struct curl_slist *json_headers = NULL; +static struct curl_slist *cbor_headers = NULL; /* counter used to number DBC log files: * the files are stamped with time (@ second resolution) and PID, which is not * enough to avoid name clashes. */ @@ -165,11 +176,15 @@ BOOL connect_init() curl_info->ssl_version ? curl_info->ssl_version : "NONE"); } - http_headers = curl_slist_append(http_headers, HTTP_ACCEPT_JSON); - if (http_headers) { - http_headers = curl_slist_append(http_headers, HTTP_CONTENT_TYPE_JSON); + json_headers = curl_slist_append(json_headers, HTTP_ACCEPT_JSON); + if (json_headers) { + json_headers = curl_slist_append(json_headers, HTTP_CONTENT_TYPE_JSON); + } + cbor_headers = curl_slist_append(cbor_headers, HTTP_ACCEPT_CBOR); + if (cbor_headers) { + cbor_headers = curl_slist_append(cbor_headers, HTTP_CONTENT_TYPE_CBOR); } - if (! http_headers) { + if ((! json_headers) || (! cbor_headers)) { ERR("libcurl: failed to init headers."); return FALSE; } @@ -181,7 +196,8 @@ BOOL connect_init() void connect_cleanup() { DBG("cleaning up connection/transport."); - curl_slist_free_all(http_headers); + curl_slist_free_all(json_headers); + curl_slist_free_all(cbor_headers); curl_global_cleanup(); } @@ -426,8 +442,9 @@ static SQLRETURN dbc_curl_init(esodbc_dbc_st *dbc) goto err; } - /* set the Content-Type, Accept HTTP headers */ - dbc->curl_err = curl_easy_setopt(curl, CURLOPT_HTTPHEADER, http_headers); + /* set the HTTP headers: Content-Type, Accept */ + dbc->curl_err = curl_easy_setopt(curl, CURLOPT_HTTPHEADER, + dbc->pack_json ? json_headers : cbor_headers); if (dbc->curl_err != CURLE_OK) { ERRH(dbc, "libcurl: failed to set HTTP headers list."); goto err; @@ -572,7 +589,10 @@ static SQLRETURN dbc_curl_init(esodbc_dbc_st *dbc) return ret; } -static BOOL dbc_curl_perform(esodbc_dbc_st *dbc, long *code, cstr_st *resp) +/* Perform a HTTP request, on the (pre)prepared connection. + * Returns the HTTP code, response body (if any) and its type (if present). */ +static BOOL dbc_curl_perform(esodbc_dbc_st *dbc, long *code, cstr_st *rsp_body, + char **cont_type) { curl_off_t xfer_tm_start, xfer_tm_total; @@ -582,8 +602,8 @@ static BOOL dbc_curl_perform(esodbc_dbc_st *dbc, long *code, cstr_st *resp) dbc->curl_err = curl_easy_perform(dbc->curl); /* copy answer references */ - resp->str = dbc->abuff; - resp->cnt = dbc->apos; + rsp_body->str = dbc->abuff; + rsp_body->cnt = dbc->apos; /* clear call-back members for next call */ dbc->abuff = NULL; @@ -599,6 +619,16 @@ static BOOL dbc_curl_perform(esodbc_dbc_st *dbc, long *code, cstr_st *resp) ERRH(dbc, "libcurl: failed to retrieve response code."); goto err; } + + if (rsp_body->cnt) { + dbc->curl_err = curl_easy_getinfo(dbc->curl, CURLINFO_CONTENT_TYPE, + cont_type); + if (dbc->curl_err != CURLE_OK) { + ERRH(dbc, "libcurl: failed to get Content-Type header."); + goto err; + } + } + if (curl_easy_getinfo(dbc->curl, CURLINFO_STARTTRANSFER_TIME_T, &xfer_tm_start) != CURLE_OK) { ERRH(dbc, "libcurl: failed to retrieve transfer start time."); @@ -610,9 +640,10 @@ static BOOL dbc_curl_perform(esodbc_dbc_st *dbc, long *code, cstr_st *resp) xfer_tm_total = 0; } - INFOH(dbc, "libcurl: request answered, received code %ld and %zu bytes" - " back; times(ms): start: %" CURL_FORMAT_CURL_OFF_T ".%03d, " - "total: %" CURL_FORMAT_CURL_OFF_T ".%03d).", *code, resp->cnt, + INFOH(dbc, "libcurl: request answered, received code %ld and %zu bytes of " + "type '%s' back; times(ms): start: %" CURL_FORMAT_CURL_OFF_T ".%03d, " + "total: %" CURL_FORMAT_CURL_OFF_T ".%03d).", + *code, rsp_body->cnt, *cont_type ? *cont_type : "", xfer_tm_start / 1000, (long)(xfer_tm_start % 1000), xfer_tm_total / 1000, (long)(xfer_tm_total % 1000)); @@ -624,6 +655,32 @@ static BOOL dbc_curl_perform(esodbc_dbc_st *dbc, long *code, cstr_st *resp) return FALSE; } +static SQLRETURN content_type_supported(esodbc_dbc_st *dbc, + const char *cont_type_val, BOOL *is_json) +{ + if (! cont_type_val) { + WARNH(dbc, "no content type provided; assuming '%s'.", + dbc->pack_json ? "JSON" : "CBOR"); + *is_json = dbc->pack_json; + return SQL_SUCCESS; + } + DBGH(dbc, "content type HTTP header: `%s`.", cont_type_val); + if (! strncasecmp(cont_type_val, HTTP_APP_JSON, + sizeof(HTTP_APP_JSON) - /*\0*/1)) { + *is_json = TRUE; + } else if (! strncasecmp(cont_type_val, HTTP_APP_CBOR, + sizeof(HTTP_APP_CBOR) - /*\0*/1)) { + *is_json = FALSE; + } else { + ERRH(dbc, "unsupported content type received: `%s` " + "(must be JSON or CBOR).", cont_type_val); + return post_c_diagnostic(dbc, SQL_STATE_08S01, + "Unsupported content type received", 0); + } + DBGH(dbc, "content of type: %s.", *is_json ? "JSON" : "CBOR"); + return SQL_SUCCESS; +} + static BOOL dbc_curl_add_post_body(esodbc_dbc_st *dbc, SQLULEN tout, const cstr_st *u8body) { @@ -666,21 +723,29 @@ static BOOL dbc_curl_add_post_body(esodbc_dbc_st *dbc, SQLULEN tout, } /* - * Sends a POST request with the given JSON object body. + * Sends a HTTP POST request with the given request body. */ -SQLRETURN post_json(esodbc_stmt_st *stmt, int url_type, const cstr_st *u8body) +SQLRETURN curl_post(esodbc_stmt_st *stmt, int url_type, + const cstr_st *req_body) { SQLRETURN ret; CURLcode res = CURLE_OK; esodbc_dbc_st *dbc = HDRH(stmt)->dbc; SQLULEN tout; long code = -1; /* = no answer available */ - cstr_st resp = (cstr_st) { + cstr_st rsp_body = (cstr_st) { NULL, 0 }; + char *cont_type; + BOOL is_json; - DBGH(stmt, "POSTing JSON type %d: [%zd] `" LCPDL "`.", url_type, - u8body->cnt, LCSTR(u8body)); + if (dbc->pack_json) { + DBGH(stmt, "POSTing JSON type %d: [%zu] `" LCPDL "`.", url_type, + req_body->cnt, LCSTR(req_body)); + } else { + DBGH(stmt, "POSTing CBOR type %d: [%zu] `%s`.", url_type, + req_body->cnt, cstr_hex_dump(req_body)); + } ESODBC_MUX_LOCK(&dbc->curl_mux); @@ -704,14 +769,17 @@ SQLRETURN post_json(esodbc_stmt_st *stmt, int url_type, const cstr_st *u8body) tout = dbc->timeout < stmt->query_timeout ? stmt->query_timeout : dbc->timeout; - if (dbc_curl_add_post_body(dbc, tout, u8body) && - dbc_curl_perform(dbc, &code, &resp)) { - if (code == 200) { - if (resp.cnt) { + if (dbc_curl_add_post_body(dbc, tout, req_body) && + dbc_curl_perform(dbc, &code, &rsp_body, &cont_type)) { + ret = content_type_supported(dbc, cont_type, &is_json); + if (! SQL_SUCCEEDED(ret)) { + code = -1; /* make answer unavailable */ + } else if (code == 200) { + if (rsp_body.cnt) { ESODBC_MUX_UNLOCK(&dbc->curl_mux); return (url_type == ESODBC_CURL_QUERY) ? - attach_answer(stmt, resp.str, resp.cnt) : - close_es_answ_handler(stmt, resp.str, resp.cnt); + attach_answer(stmt, &rsp_body, is_json) : + close_es_answ_handler(stmt, &rsp_body, is_json); } else { ERRH(stmt, "received 200 response code with empty body."); ret = post_c_diagnostic(dbc, SQL_STATE_08S01, @@ -722,12 +790,12 @@ SQLRETURN post_json(esodbc_stmt_st *stmt, int url_type, const cstr_st *u8body) ret = dbc_curl_post_diag(dbc, SQL_STATE_08S01); } - /* something went wrong */ + /* something went wrong, reset cURL handle/connection */ cleanup_curl(dbc); err: /* was there an error answer received correctly? */ if (0 < code) { - ret = attach_error(stmt, &resp, code); + ret = attach_error(stmt, &rsp_body, is_json, code); } else { /* copy any error occured at DBC level back down to the statement, * where it's going to be read from. */ @@ -740,9 +808,9 @@ SQLRETURN post_json(esodbc_stmt_st *stmt, int url_type, const cstr_st *u8body) /* an answer might have been received, but a late curl error (like * fetching the result code) could have occurred. */ - if (resp.str) { - free(resp.str); - resp.str = NULL; + if (rsp_body.str) { + free(rsp_body.str); + rsp_body.str = NULL; } return ret; @@ -1427,21 +1495,132 @@ void cleanup_dbc(esodbc_dbc_st *dbc) } } +static BOOL parse_es_version_cbor(esodbc_dbc_st *dbc, cstr_st *rsp_body, + cstr_st *version) +{ + CborParser parser; + CborValue top_obj, iter_top, iter_ver, val; + CborError res; + CborType obj_type; + +# define CHK_RES(_fmt, ...) \ + JUMP_ON_CBOR_ERR(res, err, dbc, _fmt, __VA_ARGS__) + + res = cbor_parser_init(rsp_body->str, rsp_body->cnt, ES_CBOR_PARSE_FLAGS, + &parser, &top_obj); + CHK_RES("failed to parse CBOR object: [%zu] `%s`", + rsp_body->cnt, cstr_hex_dump(rsp_body)); +# ifndef NDEBUG +# if 0 // ES uses indefinite-length containers (TODO) which trips this check + /* the _init() doesn't actually validate the object */ + res = cbor_value_validate(&top_obj, ES_CBOR_PARSE_FLAGS); + CHK_RES(stmt, "failed to validate CBOR object: [%zu] `%s`", + stmt->rset.body.cnt, cstr_hex_dump(&stmt->rset.body)); +# endif /*0*/ +# endif /* !NDEBUG */ + + if ((obj_type = cbor_value_get_type(&top_obj)) != CborMapType) { + ERRH(dbc, "top object (of type 0x%x) is not a map.", obj_type); + return FALSE; + } + res = cbor_value_enter_container(&top_obj, &iter_top); + CHK_RES("failed to enter top map"); + + /* search for the `version` parameter in top map */ + res = cbor_map_advance_to_key(&iter_top, ESINFO_KEY_VERSION, + sizeof(ESINFO_KEY_VERSION) - 1, &val); + CHK_RES("failed to lookup '" ESINFO_KEY_VERSION "' key in map"); + if (! cbor_value_is_valid(&val)) { + ERRH(dbc, "parameter '" ESINFO_KEY_VERSION "' not found in top map."); + return FALSE; + } + if ((obj_type = cbor_value_get_type(&iter_top)) != CborMapType) { + ERRH(dbc, "'" ESINFO_KEY_VERSION "' param's value type (0x%x) is not a" + " map.", obj_type); + return FALSE; + } + res = cbor_value_enter_container(&iter_top, &iter_ver); + CHK_RES("failed to enter " ESINFO_KEY_VERSION " map"); + + /* search for the `number` parameter in `version` map */ + res = cbor_map_advance_to_key(&iter_ver, ESINFO_KEY_NUMBER, + sizeof(ESINFO_KEY_NUMBER) - 1, &val); + CHK_RES("failed to lookup '" ESINFO_KEY_NUMBER "' key in map"); + if (! cbor_value_is_valid(&val)) { + ERRH(dbc, "parameter '" ESINFO_KEY_NUMBER "' not found in map."); + return FALSE; + } + if ((obj_type = cbor_value_get_type(&iter_ver)) != CborTextStringType) { + ERRH(dbc, "value for key '" ESINFO_KEY_NUMBER "' is not string " + "(but %d).", obj_type); + return FALSE; + } + + /* fetch `version` value */ + res = cbor_value_get_string_chunk(&iter_ver, &version->str, &version->cnt); + CHK_RES("failed to fetch " ESINFO_KEY_NUMBER " value"); + + /* Note: containers must be "left" (cbor_value_leave_container()) if ever + * using anything else in the info object! */ + + DBGH(dbc, "Elasticsearch'es version number: [%zu] `" LCPDL "`.", + version->cnt, LCSTR(version)); + return TRUE; + +err: + return FALSE; + +# undef CHK_RES +} + +static BOOL parse_es_version_json(esodbc_dbc_st *dbc, cstr_st *rsp_body, + wstr_st *version, void **state) +{ + UJObject obj, o_version, o_number; + /* top-level key of interest */ + const wchar_t *tl_key[] = {MK_WPTR(ESINFO_KEY_VERSION)}; + const wchar_t *version_key[] = {MK_WPTR(ESINFO_KEY_NUMBER)}; + int unpacked; + + obj = UJDecode(rsp_body->str, rsp_body->cnt, /*heap f()s*/NULL, state); + if (! obj) { + ERRH(dbc, "failed to parse JSON: %s ([%zu] `" LCPDL "`).", + *state ? UJGetError(*state) : "", + rsp_body->cnt, LCSTR(rsp_body)); + return FALSE; + } + memset(&o_version, 0, sizeof(o_version)); + unpacked = UJObjectUnpack(obj, 1, "O", tl_key, &o_version); + if ((unpacked < 1) || (! o_version)) { + ERRH(dbc, "no 'version' object in answer."); + return FALSE; + } + memset(&o_number, 0, sizeof(o_number)); + unpacked = UJObjectUnpack(o_version, 1, "S", version_key, &o_number); + if ((unpacked < 1) || (! o_number)) { + ERRH(dbc, "no 'number' element in version."); + return FALSE; + } + version->str = (SQLWCHAR *)UJReadString(o_number, &version->cnt); + DBGH(dbc, "Elasticsearch'es version number: [%zu] `" LWPDL "`.", + version->cnt, LWSTR(version)); + return TRUE; +} /* * Note: not thread safe: only usable on connection setup. */ static SQLRETURN check_server_version(esodbc_dbc_st *dbc) { long code; - cstr_st resp = {0}; + cstr_st rsp_body = {0}; + char *cont_type; + BOOL is_json; SQLRETURN ret; - UJObject obj, o_version, o_number; void *state = NULL; - int unpacked; - const wchar_t *tl_key[] = {L"version"}; /* top-level key of interest */ - const wchar_t *version_key[] = {L"number"}; - wstr_st ver_no; + unsigned char ver_checking; wstr_st own_ver = WSTR_INIT(STR(DRV_VERSION)); /*build-time define*/ + wstr_st es_ver, ver_no; + cstr_st es_ver_c; static const wchar_t err_msg_fmt[] = L"Version mismatch between server (" WPFWP_LDESC ") and driver (" WPFWP_LDESC "). Please use a driver whose" " version matches that of your server."; @@ -1456,43 +1635,49 @@ static SQLRETURN check_server_version(esodbc_dbc_st *dbc) } RESET_HDIAG(dbc); - if (! dbc_curl_perform(dbc, &code, &resp)) { + if (! dbc_curl_perform(dbc, &code, &rsp_body, &cont_type)) { dbc_curl_post_diag(dbc, SQL_STATE_HY000); cleanup_curl(dbc); return SQL_ERROR; } - if (! resp.cnt) { + if (! SQL_SUCCEEDED(content_type_supported(dbc, cont_type, &is_json))) { + goto err; + } + if (! rsp_body.cnt) { ERRH(dbc, "failed to get a response with body: code=%ld, " - "body len: %zu.", code, resp.cnt); + "body len: %zu.", code, rsp_body.cnt); goto err; } else if (code != 200) { - ret = attach_error(dbc, &resp, code); + ret = attach_error(dbc, &rsp_body, is_json, code); goto err; } - /* 200 with body received: decode (hopefully JSON) answer */ + /* 200 with body received: decode (JSON/CBOR) answer */ - obj = UJDecode(resp.str, resp.cnt, /*heap f()s*/NULL, &state); - if (! obj) { - ERRH(dbc, "failed to parse JSON: %s ([%zd] `" LCPDL "`).", - state ? UJGetError(state) : "", - resp.cnt, LCSTR(&resp)); + if (is_json ? parse_es_version_json(dbc, &rsp_body, &es_ver, &state) : + parse_es_version_cbor(dbc, &rsp_body, &es_ver_c)) { + n = is_json ? (int)es_ver.cnt : (int)es_ver_c.cnt; + } else { + ERRH(dbc, "failed to extract Elasticsearch'es version."); goto err; } - memset(&o_version, 0, sizeof(o_version)); - unpacked = UJObjectUnpack(obj, 1, "O", tl_key, &o_version); - if ((unpacked < 1) || (! o_version)) { - ERRH(dbc, "no 'version' object in answer."); + + ver_checking = dbc->srv_ver.checking; + /* version is returned to application, which requires a NTS => +1 for \0 */ + dbc->srv_ver.string.str = malloc((n + 1) * sizeof(SQLWCHAR)); + if (! dbc->srv_ver.string.str) { + ERRNH(dbc, "OOM for %zu.", (n + 1) * sizeof(SQLWCHAR)); + post_diagnostic(dbc, SQL_STATE_HY001, NULL, 0); goto err; - } - memset(&o_number, 0, sizeof(o_number)); - unpacked = UJObjectUnpack(o_version, 1, "S", version_key, &o_number); - if ((unpacked < 1) || (! o_number)) { - ERRH(dbc, "no 'number' element in version."); + } else if (is_json) { + memcpy(dbc->srv_ver.string.str, es_ver.str, n * sizeof(SQLWCHAR)); + } else if (ascii_c2w(es_ver_c.str, dbc->srv_ver.string.str, n) < 0) { + /* non-ASCII or empty */ + ERRH(dbc, "Elasticsearch version string is invalid."); goto err; } - ver_no.str = (SQLWCHAR *)UJReadString(o_number, &ver_no.cnt); - DBGH(dbc, "read version number: [%zu] `" LWPDL "`.", ver_no.cnt, - LWSTR(&ver_no)); + dbc->srv_ver.string.cnt = n; + dbc->srv_ver.string.str[n] = 0; + ver_no = dbc->srv_ver.string; # ifndef NDEBUG /* strip any qualifiers (=anything following a first `-`) in debug mode */ @@ -1500,13 +1685,13 @@ static SQLRETURN check_server_version(esodbc_dbc_st *dbc) wtrim_at(&own_ver, L'-'); # endif /* !NDEBUG */ - if (tolower(dbc->srv_ver.checking) == tolower(ESODBC_DSN_VC_MAJOR[0])) { + if (tolower(ver_checking) == tolower(ESODBC_DSN_VC_MAJOR[0])) { /* trim versions to the first dot, i.e. major version */ wtrim_at(&ver_no, L'.'); wtrim_at(&own_ver, L'.'); } - if (tolower(dbc->srv_ver.checking) != tolower(ESODBC_DSN_VC_NONE[0])) { + if (tolower(ver_checking) != tolower(ESODBC_DSN_VC_NONE[0])) { if (! EQ_WSTR(&ver_no, &own_ver)) { ERRH(dbc, "version mismatch: server: " LWPDL ", " "own: " LWPDL ".", LWSTR(&ver_no), LWSTR(&own_ver)); @@ -1526,31 +1711,24 @@ static SQLRETURN check_server_version(esodbc_dbc_st *dbc) # endif /* !NDEBUG */ } - /* re-read the original version (before trimming) and dup it */ - ver_no.str = (SQLWCHAR *)UJReadString(o_number, &ver_no.cnt); - /* version is returned to application, which requires a NTS => +1 for \0 */ - dbc->srv_ver.string.str = malloc((ver_no.cnt + 1) * sizeof(SQLWCHAR)); - if (! dbc->srv_ver.string.str) { - ERRNH(dbc, "OOM for %zd.", ver_no.cnt * sizeof(SQLWCHAR)); - post_diagnostic(dbc, SQL_STATE_HY001, NULL, 0); - goto err; - } else { - memcpy(dbc->srv_ver.string.str, ver_no.str, - ver_no.cnt * sizeof(SQLWCHAR)); - dbc->srv_ver.string.cnt = ver_no.cnt; - dbc->srv_ver.string.str[ver_no.cnt] = 0; + free(rsp_body.str); + if (is_json) { + /* UJSON4C will ref strings in its 'state' */ + assert(state); + UJFree(state); } - - free(resp.str); - assert(state); - UJFree(state); return ret; err: - if (resp.cnt) { - ERRH(dbc, "failed to process server's answer: [%zu] `" LWPDL "`.", - resp.cnt, LCSTR(&resp)); - free(resp.str); + if (rsp_body.cnt) { + if (is_json) { + ERRH(dbc, "failed to process server's answer: [%zu] `" LCPDL "`.", + rsp_body.cnt, LCSTR(&rsp_body)); + } else { + ERRH(dbc, "failed to process server's answer: [%zu] `%s`.", + rsp_body.cnt, cstr_hex_dump(&rsp_body)); + } + free(rsp_body.str); } if (state) { UJFree(state); @@ -2383,8 +2561,7 @@ static BOOL load_es_types(esodbc_dbc_st *dbc) if (dbc->hwin) { cstr_st *types_answer = (cstr_st *)dbc->hwin; dbc->hwin = NULL; - if (! SQL_SUCCEEDED(attach_answer(stmt, types_answer->str, - types_answer->cnt))) { + if (! SQL_SUCCEEDED(attach_answer(stmt, types_answer, /*JSON*/TRUE))) { ERRH(stmt, "failed to attach dummmy ES types answer"); goto end; } diff --git a/driver/connect.h b/driver/connect.h index b85fdabb..abe4a8e0 100644 --- a/driver/connect.h +++ b/driver/connect.h @@ -90,7 +90,8 @@ BOOL connect_init(); void connect_cleanup(); SQLRETURN dbc_curl_set_url(esodbc_dbc_st *dbc, int url_type); -SQLRETURN post_json(esodbc_stmt_st *stmt, int url_type, const cstr_st *u8body); +SQLRETURN curl_post(esodbc_stmt_st *stmt, int url_type, + const cstr_st *req_body); void cleanup_dbc(esodbc_dbc_st *dbc); SQLRETURN do_connect(esodbc_dbc_st *dbc, esodbc_dsn_attrs_st *attrs); SQLRETURN config_dbc(esodbc_dbc_st *dbc, esodbc_dsn_attrs_st *attrs); diff --git a/driver/convert.c b/driver/convert.c index 228f2894..1306041e 100644 --- a/driver/convert.c +++ b/driver/convert.c @@ -1448,7 +1448,7 @@ static SQLRETURN wstr_to_cstr(esodbc_rec_st *arec, esodbc_rec_st *irec, assert(xstr.w.str[xstr.w.cnt] == L'\0'); /* how much space would the converted string take? */ - in_bytes = WCS2U8(xstr.w.str, (int)xstr.w.cnt + 1, NULL, 0); + in_bytes = U16WC_TO_MBU8(xstr.w.str, xstr.w.cnt + 1, NULL, 0); if (in_bytes <= 0) { ERRNH(stmt, "failed to convert wchar* to char* for string `" LWPDL "`.", LWSTR(&xstr.w)); @@ -1456,8 +1456,8 @@ static SQLRETURN wstr_to_cstr(esodbc_rec_st *arec, esodbc_rec_st *irec, } /* out length needs to be provided with no (potential) truncation. */ if (octet_len_ptr) { - /* chars_0 accounts for 0-terminator, so WCS2U8 will count that in - * the output as well => trim it, since we must not count it when + /* chars_0 accounts for 0-terminator, so U16WC_TO_MBU8 will count that + * in the output as well => trim it, since we must not count it when * indicating the length to the application */ out_bytes = in_bytes - 1; write_out_octets(octet_len_ptr, out_bytes, irec); @@ -1476,10 +1476,10 @@ static SQLRETURN wstr_to_cstr(esodbc_rec_st *arec, esodbc_rec_st *irec, /* trim the original string until it fits in output buffer, with given * length limitation */ for (c = (int)xstr.w.cnt + 1; 0 < c; c --) { - out_bytes = WCS2U8(xstr.w.str, c, charp, in_bytes); + out_bytes = U16WC_TO_MBU8(xstr.w.str, c, charp, in_bytes); /* if user gives 0 as buffer size, out_bytes will also be 0 */ if (out_bytes <= 0) { - if (WCS2U8_BUFF_INSUFFICIENT) { + if (WAPI_ERR_EBUFF()) { continue; } ERRNH(stmt, "failed to convert wchar_t* to char* for string `" @@ -4674,18 +4674,18 @@ static SQLRETURN c2sql_wstr2qstr(esodbc_rec_st *arec, esodbc_rec_st *irec, DBGH(stmt, "converting w-string [%lld] `" LWPDL "`; target@0x%p.", cnt, cnt, (wchar_t *)data_ptr, dest); - if (cnt) { /* WCS2U8 will fail with empty string */ - SetLastError(0); - octets = WCS2U8((wchar_t *)data_ptr, (int)cnt, dest + !!dest, + if (cnt) { /* U16WC_TO_MBU8 will fail with empty string, but set no err */ + WAPI_CLR_ERRNO(); + octets = U16WC_TO_MBU8((wchar_t *)data_ptr, cnt, dest + !!dest, dest ? INT_MAX : 0); - if ((err = GetLastError())) { + if ((err = WAPI_ERRNO()) != ERROR_SUCCESS) { ERRH(stmt, "converting to multibyte string failed: %d", err); RET_HDIAGS(stmt, SQL_STATE_HY000); } + assert(0 < octets); /* shouldn't not fail and return negative */ } else { octets = 0; } - assert(0 <= octets); /* buffer might be empty, so 0 is valid */ *len = (size_t)octets; if (dest) { diff --git a/driver/handles.c b/driver/handles.c index b4c9e99e..6b4fb60f 100644 --- a/driver/handles.c +++ b/driver/handles.c @@ -126,7 +126,7 @@ static void clear_desc(esodbc_desc_st *desc, BOOL reinit) break; case DESC_TYPE_IRD: - if (HDRH(desc)->stmt->rset.ecurs.cnt) { + if (STMT_HAS_CURSOR(HDRH(desc)->stmt)) { close_es_cursor(HDRH(desc)->stmt); } if (STMT_HAS_RESULTSET(desc->hdr.stmt)) { diff --git a/driver/handles.h b/driver/handles.h index 22f72068..6c2f6f68 100644 --- a/driver/handles.h +++ b/driver/handles.h @@ -13,6 +13,7 @@ #include "error.h" #include "defs.h" #include "log.h" +#include "tinycbor.h" /* forward declarations */ struct struct_env; @@ -299,21 +300,37 @@ typedef struct struct_desc { #define ASSERT_IXD_HAS_ES_TYPE(_rec) \ assert(DESC_TYPE_IS_IMPLEMENTATION(_rec->desc->type) && _rec->es_type) +struct resultset_cbor { + cstr_st curs; /* ES'es cursor; refs req's body */ + CborValue rows_iter; /* iterator over received rows; refs req's body */ + wstr_st cols_buff /* columns descriptions; refs allocated chunk */; +}; -typedef struct struct_resultset { - long code; /* code of last response */ - char *buff; /* buffer containing the answer to the last request in a STM */ - size_t blen; /* length of the answer */ - - wstr_st ecurs; /* Elastic's cursor object */ +struct resultset_json { + wstr_st curs; /* ES'es cursor; refs UJSON4C 'state' */ void *state; /* top UJSON decoder state */ void *rows_iter; /* UJSON iterator with the rows in result set */ UJObject row_array; /* UJSON object for current row */ +}; + +typedef struct struct_resultset { + long code; /* HTTP code of last response */ + cstr_st body; /* HTTP body of last answer to a statement */ + + union { + struct resultset_cbor cbor; + struct resultset_json json; + } pack; size_t nrows; /* (count of) rows in current result set */ size_t vrows; /* (count of) visited rows in current result set */ } resultset_st; +#define STMT_HAS_CURSOR(_stmt) \ + (HDRH(_stmt)->dbc->pack_json ? \ + (_stmt)->rset.pack.json.curs.cnt : \ + (_stmt)->rset.pack.cbor.curs.cnt) + /* * "The fields of an IRD have a default value only after the statement has * been prepared or executed and the IRD has been populated, not when the @@ -526,9 +543,9 @@ SQLRETURN EsSQLSetDescRec( return esodbc_errors[_s].retcode; \ } while (0) -#define STMT_HAS_RESULTSET(stmt) ((stmt)->rset.buff != NULL) -#define STMT_FORCE_NODATA(stmt) (stmt)->rset.blen = (size_t)-1 -#define STMT_NODATA_FORCED(stmt) ((stmt)->rset.blen == (size_t)-1) +#define STMT_HAS_RESULTSET(stmt) ((stmt)->rset.body.str != NULL) +#define STMT_FORCE_NODATA(stmt) (stmt)->rset.body.cnt = (size_t)-1 +#define STMT_NODATA_FORCED(stmt) ((stmt)->rset.body.cnt == (size_t)-1) /* "An application can unbind the data buffer for a column but still have a * length/indicator buffer bound for the column" */ #define REC_IS_BOUND(rec) ( \ diff --git a/driver/odbc.c b/driver/odbc.c index 54373db8..77ec1390 100644 --- a/driver/odbc.c +++ b/driver/odbc.c @@ -12,6 +12,7 @@ #include "queries.h" #include "convert.h" #include "catalogue.h" +#include "tinycbor.h" //#include "elasticodbc_export.h" //#define SQL_API ELASTICODBC_EXPORT SQL_API @@ -30,11 +31,11 @@ static BOOL driver_init() return FALSE; } INFO("initializing driver."); - convert_init(); - if (! connect_init()) { + if (! queries_init()) { return FALSE; } - if (! queries_init()) { + convert_init(); + if (! connect_init()) { return FALSE; } #ifndef NDEBUG @@ -54,7 +55,7 @@ static BOOL driver_init() static void driver_cleanup() { connect_cleanup(); - log_cleanup(); + tinycbor_cleanup(); } BOOL WINAPI DllMain( @@ -85,17 +86,18 @@ BOOL WINAPI DllMain( // Perform any necessary cleanup. case DLL_PROCESS_DETACH: + driver_cleanup(); #ifndef NDEBUG if (_gf_log) { - ERR("dumping tracked leaks:"); + ERR("dumping tracked leaks (log.c leak is safe to ignore):"); /* _CrtDumpMemoryLeaks() will always report at least one leak, * that of the allocated logger itself that the function uses - * to log into. This is freed below, in driver_cleanup(). */ + * to log into. This is freed below, in log_cleanup(). */ ERR("leaks dumped: %d.", _CrtDumpMemoryLeaks()); } #endif /* !NDEBUG */ INFO("process %u detaching.", GetCurrentProcessId()); - driver_cleanup(); + log_cleanup(); break; } @@ -546,9 +548,6 @@ SQLRETURN SQL_API SQLPrepareW /* * https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/sending-long-data - * Note: must use EsSQLSetDescFieldW() for param data-type setting, to call - * set_defaults_from_type(), to meet the "Other fields implicitly set" - * requirements from the page linked in set_defaults_from_type() comments. */ SQLRETURN SQL_API SQLBindParameter( SQLHSTMT hstmt, diff --git a/driver/queries.c b/driver/queries.c index 758fbeb1..cac528d6 100644 --- a/driver/queries.c +++ b/driver/queries.c @@ -13,17 +13,18 @@ #include "convert.h" /* key names used in Elastic/SQL REST/JSON answers */ -#define JSON_ANSWER_COLUMNS "columns" -#define JSON_ANSWER_ROWS "rows" -#define JSON_ANSWER_CURSOR "cursor" -#define JSON_ANSWER_STATUS "status" -#define JSON_ANSWER_ERROR "error" -#define JSON_ANSWER_ERR_TYPE "type" -#define JSON_ANSWER_ERR_REASON "reason" -#define JSON_ANSWER_ERR_RCAUSE "root_cause" -#define JSON_ANSWER_COL_NAME "name" -#define JSON_ANSWER_COL_TYPE "type" -#define JSON_ANSWER_CURS_CLOSE "succeeded" +#define PACK_PARAM_COLUMNS "columns" +#define PACK_PARAM_ROWS "rows" +#define PACK_PARAM_CURSOR "cursor" +#define PACK_PARAM_STATUS "status" +#define PACK_PARAM_ERROR "error" +#define PACK_PARAM_ERR_TYPE "type" +#define PACK_PARAM_ERR_REASON "reason" +#define PACK_PARAM_ERR_RCAUSE "root_cause" +#define PACK_PARAM_COL_NAME "name" +#define PACK_PARAM_COL_TYPE "type" +#define PACK_PARAM_COL_DSIZE "display_size" +#define PACK_PARAM_CURS_CLOSE "succeeded" #define MSG_INV_SRV_ANS "Invalid server answer" @@ -130,6 +131,10 @@ static inline BOOL update_tz_param() BOOL queries_init() { + /* for the casts in this module */ + ASSERT_INTEGER_TYPES_EQUAL(wchar_t, SQLWCHAR); + ASSERT_INTEGER_TYPES_EQUAL(char, SQLCHAR); + /* needed to correctly run the unit tests */ return update_tz_param(); } @@ -138,11 +143,18 @@ void clear_resultset(esodbc_stmt_st *stmt, BOOL on_close) { INFOH(stmt, "clearing result set; vrows=%zu, nrows=%zu, nset=%zu.", stmt->rset.vrows, stmt->rset.nrows, stmt->nset); - if (stmt->rset.buff) { - free(stmt->rset.buff); + if (stmt->rset.body.str) { + free(stmt->rset.body.str); } - if (stmt->rset.state) { - UJFree(stmt->rset.state); + if (HDRH(stmt)->dbc->pack_json) { + if (stmt->rset.pack.json.state) { + UJFree(stmt->rset.pack.json.state); + } + } else { + if (stmt->rset.pack.cbor.cols_buff.cnt) { + assert(stmt->rset.pack.cbor.cols_buff.str); + free(stmt->rset.pack.cbor.cols_buff.str); + } } memset(&stmt->rset, 0, sizeof(stmt->rset)); @@ -198,28 +210,108 @@ static void set_col_size(esodbc_rec_st *rec) } } -static SQLRETURN attach_columns(esodbc_stmt_st *stmt, UJObject columns) +/* Note: col_name/_type need to reference pre-allocated str. objects. */ +static BOOL attach_one_column(esodbc_rec_st *rec, wstr_st *col_name, + wstr_st *col_type) // TODO: disp size { - esodbc_desc_st *ird; + size_t i; + static const wstr_st EMPTY_WSTR = WSTR_INIT(""); + esodbc_stmt_st *stmt; esodbc_dbc_st *dbc; + + stmt = HDRH(rec->desc)->stmt; + dbc = HDRH(stmt)->dbc; + + rec->name = col_name->cnt ? *col_name : EMPTY_WSTR; + + assert(! rec->es_type); + /* lookup the DBC-cached ES type */ + for (i = 0; i < dbc->no_types; i ++) { + if (EQ_CASE_WSTR(&dbc->es_types[i].type_name, col_type)) { + rec->es_type = &dbc->es_types[i]; + break; + } + } + if (rec->es_type) { + /* copy fields pre-calculated at DB connect time */ + rec->concise_type = rec->es_type->data_type; + rec->type = rec->es_type->sql_data_type; + rec->datetime_interval_code = rec->es_type->sql_datetime_sub; + rec->meta_type = rec->es_type->meta_type; + /* set INTERVAL record's seconds precision */ + if (rec->meta_type == METATYPE_INTERVAL_WSEC) { + assert(rec->precision == 0); + rec->precision = rec->es_type->maximum_scale; + } + } else if (! dbc->no_types) { + /* the connection doesn't have yet the types cached (this is the + * caching call) and don't have access to the data itself either, + * just the column names & type names => set unknowns. */ + rec->concise_type = SQL_UNKNOWN_TYPE; + rec->type = SQL_UNKNOWN_TYPE; + rec->datetime_interval_code = 0; + rec->meta_type = METATYPE_UNKNOWN; + } else { + ERRH(stmt, "type lookup failed for `" LWPDL "`.", LWSTR(col_type)); + return FALSE; + } + + set_col_size(rec); + + /* setting the remaining of settable fields (base table etc.) requires + * server side changes => set them to "" */ + + /* "If a base column name does not exist (as in the case of columns + * that are expressions), then this variable contains an empty + * string." */ + rec->base_column_name = EMPTY_WSTR; + /* "If a column does not have a label, the column name is returned. If + * the column is unlabeled and unnamed, an empty string is ret" */ + rec->label = rec->name.cnt ? rec->name : EMPTY_WSTR; + + assert(rec->name.str && rec->label.str); + rec->unnamed = (rec->name.cnt || rec->label.cnt) ? + SQL_NAMED : SQL_UNNAMED; + + /* All rec fields must be init'ed to a valid string in case their value + * is requested (and written with write_wstr()). The values would + * normally be provided by the data source, this is not the case here + * (yet), though. */ + rec->base_table_name = EMPTY_WSTR; + rec->catalog_name = EMPTY_WSTR; + rec->schema_name = EMPTY_WSTR; + rec->table_name = EMPTY_WSTR; +#ifndef NDEBUG + //dump_record(rec); +#endif /* NDEBUG */ + + DBGH(stmt, "column #%zu: name=`" LWPDL "`, type=%d (`" LWPDL "`).", + ((uintptr_t)rec - (uintptr_t)stmt->ird->recs) / sizeof(*rec), + LWSTR(&rec->name), rec->concise_type, LWSTR(col_type)); + + return TRUE; +} + +static SQLRETURN attach_columns_json(esodbc_stmt_st *stmt, UJObject columns) +{ + esodbc_desc_st *ird; esodbc_rec_st *rec; SQLRETURN ret; SQLSMALLINT recno; void *iter; UJObject col_o, name_o, type_o; - wstr_st col_type; - size_t ncols, i; + wstr_st col_type, col_name; + size_t ncols; const wchar_t *keys[] = { - MK_WPTR(JSON_ANSWER_COL_NAME), - MK_WPTR(JSON_ANSWER_COL_TYPE) + MK_WPTR(PACK_PARAM_COL_NAME), + MK_WPTR(PACK_PARAM_COL_TYPE) }; static const wstr_st EMPTY_WSTR = WSTR_INIT(""); ird = stmt->ird; - dbc = stmt->hdr.dbc; ncols = UJLengthArray(columns); - INFOH(stmt, "columns received: %zd.", ncols); + INFOH(stmt, "columns received: %zu.", ncols); ret = update_rec_count(ird, (SQLSMALLINT)ncols); if (! SQL_SUCCEEDED(ret)) { ERRH(stmt, "failed to set IRD's record count to %d.", ncols); @@ -227,168 +319,78 @@ static SQLRETURN attach_columns(esodbc_stmt_st *stmt, UJObject columns) return ret; } - iter = UJBeginArray(columns); - if (! iter) { + if (! (iter = UJBeginArray(columns))) { ERRH(stmt, "failed to obtain array iterator: %s.", - UJGetError(stmt->rset.state)); - RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); + UJGetError(stmt->rset.pack.json.state)); + goto err; } - recno = 0; - while (UJIterArray(&iter, &col_o)) { + for (recno = 0; UJIterArray(&iter, &col_o); recno ++) { if (UJObjectUnpack(col_o, 2, "SS", keys, &name_o, &type_o) < 2) { ERRH(stmt, "failed to decode JSON column: %s.", - UJGetError(stmt->rset.state)); + UJGetError(stmt->rset.pack.json.state)); RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); } - rec = &ird->recs[recno]; // +recno - - ASSERT_INTEGER_TYPES_EQUAL(wchar_t, SQLWCHAR); - rec->name.str = (SQLWCHAR *)UJReadString(name_o, &rec->name.cnt); - if (! rec->name.str) { - rec->name = MK_WSTR(""); - } + rec = &ird->recs[recno]; + col_name.str = (SQLWCHAR *)UJReadString(name_o, &col_name.cnt); col_type.str = (SQLWCHAR *)UJReadString(type_o, &col_type.cnt); - assert(! rec->es_type); - /* lookup the DBC-cashed ES type */ - for (i = 0; i < dbc->no_types; i ++) { - if (EQ_CASE_WSTR(&dbc->es_types[i].type_name, &col_type)) { - rec->es_type = &dbc->es_types[i]; - break; - } + if (! attach_one_column(rec, &col_name, &col_type)) { + goto err; } - if (rec->es_type) { - /* copy fileds pre-calculated at DB connect time */ - rec->concise_type = rec->es_type->data_type; - rec->type = rec->es_type->sql_data_type; - rec->datetime_interval_code = rec->es_type->sql_datetime_sub; - rec->meta_type = rec->es_type->meta_type; - /* set INTERVAL record's seconds precision */ - if (rec->meta_type == METATYPE_INTERVAL_WSEC) { - assert(rec->precision == 0); - rec->precision = rec->es_type->maximum_scale; - } - } else if (! dbc->no_types) { - /* the connection doesn't have yet the types cached (this is the - * caching call) and don't have access to the data itself either, - * just the column names & type names => set unknowns. */ - rec->concise_type = SQL_UNKNOWN_TYPE; - rec->type = SQL_UNKNOWN_TYPE; - rec->datetime_interval_code = 0; - rec->meta_type = METATYPE_UNKNOWN; - } else { - ERRH(stmt, "type lookup failed for `" LWPDL "`.",LWSTR(&col_type)); - RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); - } - - set_col_size(rec); - - /* setting the remaining of settable fields (base table etc.) requires - * server side changes => set them to "" */ - - /* "If a base column name does not exist (as in the case of columns - * that are expressions), then this variable contains an empty - * string." */ - rec->base_column_name = EMPTY_WSTR; - /* "If a column does not have a label, the column name is returned. If - * the column is unlabeled and unnamed, an empty string is ret" */ - rec->label = rec->name.cnt ? rec->name : EMPTY_WSTR; - - assert(rec->name.str && rec->label.str); - rec->unnamed = (rec->name.cnt || rec->label.cnt) ? - SQL_NAMED : SQL_UNNAMED; - - /* All rec fields must be init'ed to a valid string in case their value - * is requested (and written with write_wstr()). The values would - * normally be provided by the data source, this is not the case here - * (yet), though. */ - rec->base_table_name = EMPTY_WSTR; - rec->catalog_name = EMPTY_WSTR; - rec->schema_name = EMPTY_WSTR; - rec->table_name = EMPTY_WSTR; -#ifndef NDEBUG - //dump_record(rec); -#endif /* NDEBUG */ - - DBGH(stmt, "column #%d: name=`" LWPDL "`, type=%d (`" LWPDL "`).", - recno, LWSTR(&rec->name), rec->concise_type, LWSTR(&col_type)); - recno ++; } - /* new columns attached, need to check compatiblity */ - stmt->sql2c_conversion = CONVERSION_UNCHECKED; - return SQL_SUCCESS; +err: + RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); } - -/* - * Processes a received answer: - * - takes a dynamic buffer, buff, of length blen. Will handle the buff memory - * even if the call fails. - * - parses it, preparing iterators for SQLFetch()'ing. - */ -SQLRETURN TEST_API attach_answer(esodbc_stmt_st *stmt, char *buff, size_t blen) +static SQLRETURN attach_answer_json(esodbc_stmt_st *stmt) { int unpacked; UJObject obj, columns, rows, cursor; - const wchar_t *wcurs; - size_t eccnt; const wchar_t *keys[] = { - MK_WPTR(JSON_ANSWER_COLUMNS), - MK_WPTR(JSON_ANSWER_ROWS), - MK_WPTR(JSON_ANSWER_CURSOR) + MK_WPTR(PACK_PARAM_COLUMNS), + MK_WPTR(PACK_PARAM_ROWS), + MK_WPTR(PACK_PARAM_CURSOR) }; - /* clear any previous result set */ - if (STMT_HAS_RESULTSET(stmt)) { - clear_resultset(stmt, /*on_close*/FALSE); - } - - /* the statement takes ownership of mem obj */ - stmt->rset.buff = buff; - stmt->rset.blen = blen; - DBGH(stmt, "attaching answer [%zd]`" LCPDL "`.", blen, blen, buff); + DBGH(stmt, "attaching JSON answer: [%zu] `" LCPDL "`.", + stmt->rset.body.cnt, LCSTR(&stmt->rset.body)); /* parse the entire JSON answer */ - obj = UJDecode(buff, blen, NULL, &stmt->rset.state); + obj = UJDecode(stmt->rset.body.str, stmt->rset.body.cnt, NULL, + &stmt->rset.pack.json.state); if (! obj) { - ERRH(stmt, "failed to decode JSON answer: %s ([%zu] `%.*s`).", - stmt->rset.state ? UJGetError(stmt->rset.state) : "", - blen, blen, buff); - assert(0); - RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); + ERRH(stmt, "failed to decode JSON answer: %s ([%zu] `" LCPDL "`).", + stmt->rset.pack.json.state ? + UJGetError(stmt->rset.pack.json.state) : "", + stmt->rset.body.cnt, LCSTR(&stmt->rset.body)); + goto err; } columns = rows = cursor = NULL; /* extract the columns and rows objects */ unpacked = UJObjectUnpack(obj, 3, "AAS", keys, &columns, &rows, &cursor); if (unpacked < /* 'rows' must always be present */1) { - ERRH(stmt, "failed to unpack JSON answer (`%.*s`): %s.", - blen, buff, UJGetError(stmt->rset.state)); - assert(0); - RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); + ERRH(stmt, "failed to unpack JSON answer: %s (`" LCPDL "`).", + UJGetError(stmt->rset.pack.json.state), LCSTR(&stmt->rset.body)); + goto err; } /* * set the internal cursor (UJSON4C array iterator) */ if (! rows) { - ERRH(stmt, "no rows JSON object received in answer: `%.*s`[%zd].", - blen, buff, blen); - RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); - } - stmt->rset.rows_iter = UJBeginArray(rows); - if (! stmt->rset.rows_iter) { -#if 0 /* UJSON4C will return NULL above, for empty array (meh!) */ - ERRH(stmt, "failed to get iterrator on received rows: %s.", - UJGetError(stmt->rset.state)); - RET_HDIAGS(stmt, SQL_STATE_HY000); -#else /*0*/ + ERRH(stmt, "no rows object received in answer: `" LCPDL "`.", + LCSTR(&stmt->rset.body)); + goto err; + } + stmt->rset.pack.json.rows_iter = UJBeginArray(rows); + if (! stmt->rset.pack.json.rows_iter) { + /* UJSON4C will return NULL above, for empty array (meh!) */ DBGH(stmt, "received empty resultset array: forcing nodata."); STMT_FORCE_NODATA(stmt); stmt->rset.nrows = 0; -#endif /*0*/ } else { stmt->nset ++; /* the cast is made safe by the decoding format indicator for array */ @@ -398,83 +400,502 @@ SQLRETURN TEST_API attach_answer(esodbc_stmt_st *stmt, char *buff, size_t blen) DBGH(stmt, "rows received in result set: %zd.", stmt->rset.nrows); /* - * copy Elastic's cursor (if there's one) + * copy ref to ES'es cursor (if there's one) */ if (cursor) { - wcurs = UJReadString(cursor, &eccnt); - if (eccnt) { - /* this can happen automatically if hitting scroller size */ - if (! stmt->hdr.dbc->fetch.max) { - DBGH(stmt, "no fetch size defined, but cursor returned."); - } - if (stmt->rset.ecurs.cnt) { - DBGH(stmt, "replacing old cursor `" LWPDL "`.", - LWSTR(&stmt->rset.ecurs)); - } - /* store new cursor vals */ - stmt->rset.ecurs = (wstr_st) { - (SQLWCHAR *)wcurs, eccnt - }; - DBGH(stmt, "new elastic cursor: [%zd] `" LWPDL "`.", - stmt->rset.ecurs.cnt, LWSTR(&stmt->rset.ecurs)); - } else { - WARNH(stmt, "empty cursor found in the answer."); + /* should have been cleared by now */ + assert(! stmt->rset.pack.json.curs.cnt); + /* store new cursor vals */ + stmt->rset.pack.json.curs.str = + (SQLWCHAR *)UJReadString(cursor, &stmt->rset.pack.json.curs.cnt); + DBGH(stmt, "new paginating cursor: [%zd] `" LWPDL "`.", + stmt->rset.pack.json.curs.cnt, LWSTR(&stmt->rset.pack.json.curs)); + } + + /* + * process the received columns, if any. + */ + if (columns) { + if (0 < stmt->ird->count) { + ERRH(stmt, "%d columns already attached.", stmt->ird->count); + goto err; + } + return attach_columns_json(stmt, columns); + } + + return SQL_SUCCESS; +err: + RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); +} + +/* macro valid for attach_*_cbor() functions only */ +#define CHK_RES(_hnd, _fmt, ...) \ + JUMP_ON_CBOR_ERR(res, err, _hnd, _fmt, __VA_ARGS__) + +/* Function iterates over the recived "columns" array (of map elements). + * The recived column names (and their types) are UTF-8 multi-bytes, which + * need to be converted to UTF-16 wide-chars => + * - on first invocation, it calculates the space needed for the converted + * values (one chunk for all wide chars strings, two per column: name and + * type) and allocates it; + * - on second invocation, it converts and attaches the columns to the + * statement. */ +static BOOL iterate_on_columns(esodbc_stmt_st *stmt, CborValue columns) +{ + SQLSMALLINT recno; + esodbc_desc_st *ird; + CborError res; + CborValue it, name_obj, type_obj;//, dsize_obj; + cstr_st name_cstr, type_cstr; + wstr_st name_wstr, type_wstr; + const char *keys[] = { + PACK_PARAM_COL_NAME, + PACK_PARAM_COL_TYPE, + //PACK_PARAM_COL_DSIZE + }; + const size_t lens[] = { + sizeof(PACK_PARAM_COL_NAME) - 1, + sizeof(PACK_PARAM_COL_TYPE) - 1, + //sizeof(PACK_PARAM_COL_DSIZE) - 1 + }; + CborValue *objs[] = {&name_obj, &type_obj};//, &dsize_obj}; + size_t keys_cnt = sizeof(keys)/sizeof(keys[0]); + int n, left, need; + wchar_t *wrptr; /* write pointer */ + + ird = stmt->ird; + + res = cbor_value_enter_container(&columns, &it); + CHK_RES(stmt, "failed to enter '" PACK_PARAM_COLUMNS "' array"); + + if (! stmt->rset.pack.cbor.cols_buff.cnt) { /* 1st iter */ + wrptr = NULL; + need = 0; + left = 0; + } else { /* 2nd iter */ + wrptr = (wchar_t *)stmt->rset.pack.cbor.cols_buff.str; + /* .cnt convered from an int before (in 1st iter) */ + left = (int)stmt->rset.pack.cbor.cols_buff.cnt; + } + + for (recno = 0; ! cbor_value_at_end(&it); recno ++) { + res = cbor_value_skip_tag(&it); + CHK_RES(stmt, "failed to skip tags in '" PACK_PARAM_COLUMNS "' array"); + if (! cbor_value_is_map(&it)) { + ERRH(stmt, "invalid element type in '" PACK_PARAM_COLUMNS + "' array."); + return FALSE; + } + res = cbor_map_lookup_keys(&it, keys_cnt, keys, lens, objs, + /*drain*/TRUE); + CHK_RES(stmt, "failed to lookup keys in '" PACK_PARAM_COLUMNS + "' element #%hd", recno); + + /* + * column "name" + */ + if (! cbor_value_is_text_string(&name_obj)) { + ERRH(stmt, "invalid non-text element '" PACK_PARAM_COL_NAME "'."); + return FALSE; + } + res = cbor_value_get_string_chunk(&name_obj, &name_cstr.str, + &name_cstr.cnt); + CHK_RES(stmt, "can't fetch value of '" PACK_PARAM_COL_NAME "' elem"); + + n = U8MB_TO_U16WC(name_cstr.str, name_cstr.cnt, wrptr, left); + if (n <= 0) { + /* MultiByteToWideChar() can fail with empty string, but that's + * not a valid value in the "columns" anyway, so it should be OK + * to leave that case be handled by this branch. */ + ERRH(stmt, "failed to translate UTF-8 multi-byte stream: 0x%x.", + WAPI_ERRNO()); + return FALSE; + } + if (! wrptr) { /* 1st iter */ + need += n; + } else { /* 2nd iter */ + name_wstr.str = wrptr; + name_wstr.cnt = (size_t)n; + + wrptr += (size_t)n; + left -= n; + } + + /* + * column "type" + */ + if (! cbor_value_is_text_string(&type_obj)) { + ERRH(stmt, "invalid non-text element '" PACK_PARAM_COL_TYPE "'."); + return FALSE; } + res = cbor_value_get_string_chunk(&type_obj, &type_cstr.str, + &type_cstr.cnt); + CHK_RES(stmt, "can't fetch value of '" PACK_PARAM_COL_TYPE "' elem"); + + n = U8MB_TO_U16WC(type_cstr.str, type_cstr.cnt, wrptr, left); + if (n <= 0) { + ERRH(stmt, "failed to translate UTF-8 multi-byte stream: 0x%x.", + WAPI_ERRNO()); + return FALSE; + } + if (! wrptr) { /* 1st iter */ + need += n; + } else { /* 2nd iter */ + type_wstr.str = wrptr; + type_wstr.cnt = (size_t)n; + + wrptr += (size_t)n; + left -= n; + } + + if (! wrptr) { /* 1st iter: collect lengths only */ + continue; + } + /* 2nd iter: attach column */ + if (! attach_one_column(&ird->recs[recno], &name_wstr, &type_wstr)) { + ERRH(stmt, "failed to attach column #%d `" LWPDL "`.", recno + 1, + LWSTR(&name_wstr)); + return FALSE; + } + } + + if ((! wrptr) /* 1st iter: alloc cols slab/buffer */ && (0 < need)) { + if (! (wrptr = malloc(need * sizeof(wchar_t)))) { + ERRNH(stmt, "OOM: %zu B.", need * sizeof(wchar_t)); + return FALSE; + } + /* attach the buffer to the statement */ + stmt->rset.pack.cbor.cols_buff.str = (SQLWCHAR *)wrptr; + /* cast is safe, 'need' checked against overflow */ + stmt->rset.pack.cbor.cols_buff.cnt = (size_t)need; + } + + return TRUE; +err: + return FALSE; +} + +static SQLRETURN attach_columns_cbor(esodbc_stmt_st *stmt, CborValue columns) +{ + size_t ncols; + SQLRETURN ret; + CborError res; + + res = cbor_get_array_count(columns, &ncols); + CHK_RES(stmt, "failed to get '" PACK_PARAM_COLUMNS "' array count."); + INFOH(stmt, "columns received: %zu.", ncols); + ret = update_rec_count(stmt->ird, (SQLSMALLINT)ncols); + if (! SQL_SUCCEEDED(ret)) { + ERRH(stmt, "failed to set IRD's record count to %d.", ncols); + HDIAG_COPY(stmt->ird, stmt); + return ret; + } + + assert(! stmt->rset.pack.cbor.cols_buff.cnt); + /* calculate buffer requirements and allocate it */ + if ((! iterate_on_columns(stmt, columns)) || + /* convert multi-byte to wchar_t and attach columns */ + (! iterate_on_columns(stmt, columns))) { + goto err; + } + + return SQL_SUCCESS; +err: + RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); +} + +static SQLRETURN attach_answer_cbor(esodbc_stmt_st *stmt) +{ + CborError res; + CborParser parser; + CborValue top_obj, cols_obj, curs_obj, rows_obj; + CborType obj_type; + const char *keys[] = { + PACK_PARAM_COLUMNS, + PACK_PARAM_CURSOR, + PACK_PARAM_ROWS, + }; + size_t keys_no = sizeof(keys) / sizeof(keys[0]); + const size_t lens[] = { + sizeof(PACK_PARAM_COLUMNS) - 1, + sizeof(PACK_PARAM_CURSOR) - 1, + sizeof(PACK_PARAM_ROWS) - 1, + }; + CborValue *vals[] = {&cols_obj, &curs_obj, &rows_obj}; + BOOL empty; + + DBGH(stmt, "attaching CBOR answer: [%zu] `%s`.", stmt->rset.body.cnt, + cstr_hex_dump(&stmt->rset.body)); + + res = cbor_parser_init(stmt->rset.body.str, stmt->rset.body.cnt, + ES_CBOR_PARSE_FLAGS, &parser, &top_obj); + CHK_RES(stmt, "failed to init CBOR parser for object: [%zu] `%s`", + stmt->rset.body.cnt, cstr_hex_dump(&stmt->rset.body)); +# ifndef NDEBUG +# if 0 // ES uses indefinite-length containers (TODO) which trips this check + /* the _init() doesn't actually validate the object */ + res = cbor_value_validate(&top_obj, ES_CBOR_PARSE_FLAGS); + CHK_RES(stmt, "failed to validate CBOR object: [%zu] `%s`", + stmt->rset.body.cnt, cstr_hex_dump(&stmt->rset.body)); +# endif /*0*/ +# endif /* !NDEBUG */ + + if ((obj_type = cbor_value_get_type(&top_obj)) != CborMapType) { + ERRH(stmt, "top object (of type 0x%x) is not a map.", obj_type); + goto err; + } + res = cbor_map_lookup_keys(&top_obj, keys_no, keys, lens, vals, + /*drain*/FALSE); + CHK_RES(stmt, "failed to lookup answer keys in map"); + + /* + * set the internal "rows" cursor (tinycbor array object) + */ + /* check that we have a valid array object for "rows" */ + if (! cbor_value_is_array(&rows_obj)) { + ERRH(stmt, "no '" PACK_PARAM_ROWS "' array object received in " + "answer: `%s`.", cstr_hex_dump(&stmt->rset.body)); + goto err; + } + /* ES uses indefinite-length arrays -- meh. */ + res = cbor_container_is_empty(rows_obj, &empty); + CHK_RES(stmt, "failed to check if '" PACK_PARAM_ROWS "' array is empty"); + if (empty) { + STMT_FORCE_NODATA(stmt); + DBGH(stmt, "received empty result set."); } else { + stmt->rset.pack.cbor.rows_iter = rows_obj; + stmt->nset ++; + // TODO: get rid of rows-counting / tf_rows + res = cbor_value_is_length_known(&rows_obj) ? + cbor_value_get_array_length(&rows_obj, &stmt->rset.nrows) : + cbor_container_count(rows_obj, &stmt->rset.nrows); + CHK_RES(stmt, "failed to fetch '" PACK_PARAM_ROWS "' array length"); + stmt->tf_rows += stmt->rset.nrows; + DBGH(stmt, "rows received in result set: %zd.", stmt->rset.nrows); + } + + /* + * copy ref to ES'es cursor (if there's one) + */ + if (cbor_value_is_valid(&curs_obj)) { + obj_type = cbor_value_get_type(&curs_obj); + if (obj_type != CborByteStringType) { + ERRH(stmt, "invalid '" PACK_PARAM_CURSOR "' parameter type " + "(0x%x)", obj_type); + goto err; + } /* should have been cleared by now */ - assert(! stmt->rset.ecurs.cnt); + assert(! stmt->rset.pack.cbor.curs.cnt); + res = cbor_value_get_string_chunk(&curs_obj, + &stmt->rset.pack.cbor.curs.str, + &stmt->rset.pack.cbor.curs.cnt); + CHK_RES(stmt, "failed to read '" PACK_PARAM_CURSOR "' value"); + DBGH(stmt, "new paginating cursor: [%zd] `" LCPDL "`.", + stmt->rset.pack.cbor.curs.cnt, LWSTR(&stmt->rset.pack.cbor.curs)); } /* - * process the sent columns, if any. + * process the received columns, if any. */ - if (columns) { + if (cbor_value_is_valid(&cols_obj)) { if (0 < stmt->ird->count) { ERRH(stmt, "%d columns already attached.", stmt->ird->count); - RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); + goto err; } - return attach_columns(stmt, columns); + return attach_columns_cbor(stmt, cols_obj); + } + + return SQL_SUCCESS; +err: + RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, res); +} + +static BOOL attach_error_cbor(SQLHANDLE hnd, cstr_st *body) +{ + CborError res; + CborParser parser; + CborValue top_obj, err_obj, status_obj, rcause_obj, type_obj, reason_obj; + CborType obj_type; + const char *keys[] = { + PACK_PARAM_ERROR, + PACK_PARAM_STATUS + }; + const size_t lens[] = { + sizeof(PACK_PARAM_ERROR) - 1, + sizeof(PACK_PARAM_STATUS) - 1 + }; + size_t keys_cnt = sizeof(keys) / sizeof(keys[0]); + CborValue *vals[] = {&err_obj, &status_obj}; + + const char *err_keys[] = { + PACK_PARAM_ERR_RCAUSE, + PACK_PARAM_ERR_TYPE, + PACK_PARAM_ERR_REASON + }; + size_t err_lens[] = { + sizeof(PACK_PARAM_ERR_RCAUSE) - 1, + sizeof(PACK_PARAM_ERR_TYPE) - 1, + sizeof(PACK_PARAM_ERR_REASON) - 1 + }; + size_t err_keys_cnt = sizeof(err_keys) / sizeof(err_keys[0]); + CborValue *err_vals[] = {&rcause_obj, &type_obj, &reason_obj}; + wstr_st type_wstr, reason_wstr; + + static const wstr_st msg_sep = WSTR_INIT(": "); + wstr_st msg; + int code; + SQLINTEGER status; + wchar_t wbuff[SQL_MAX_MESSAGE_LENGTH]; + const size_t wbuff_cnt = sizeof(wbuff)/sizeof(wbuff[0]) - /*\0 l8r*/1; + size_t n, pos; + + res = cbor_parser_init(body->str, body->cnt, ES_CBOR_PARSE_FLAGS, + &parser, &top_obj); + CHK_RES(hnd, "failed to parse CBOR object: [%zu] `%s`", body->cnt, + cstr_hex_dump(body)); +# ifndef NDEBUG +# if 0 // ES uses indefinite-length containers (TODO) which trips this check + /* the _init() doesn't actually validate the object */ + res = cbor_value_validate(&top_obj, ES_CBOR_PARSE_FLAGS); + CHK_RES(stmt, "failed to validate CBOR object: [%zu] `%s`", + stmt->rset.body.cnt, cstr_hex_dump(&stmt->rset.body)); +# endif /*0*/ +# endif /* !NDEBUG */ + + if ((obj_type = cbor_value_get_type(&top_obj)) != CborMapType) { + ERRH(hnd, "top object (of type 0x%x) is not a map.", obj_type); + goto err; + } + res = cbor_map_lookup_keys(&top_obj, keys_cnt, keys, lens, vals, + /*drain*/FALSE); + CHK_RES(hnd, "failed to lookup answer keys in map"); + + if ((obj_type = cbor_value_get_type(&status_obj)) == CborIntegerType) { + res = cbor_value_get_int_checked(&status_obj, &code); + CHK_RES(hnd, "can't extract status code"); + status = (SQLINTEGER)code; + } else { + ERRH(hnd, "Status object is not of integer type (0x%x).", obj_type); + /* carry on nevertheless */ + status = 0; + } + + if (cbor_value_is_text_string(&err_obj)) { /* "generic" error */ + res = cbor_value_get_utf16_wstr(&err_obj, &msg); + CHK_RES(hnd, "failed to fetch error message"); + } else if ((obj_type = cbor_value_get_type(&status_obj)) == + CborIntegerType) { /* error with root cause */ + /* unpack "error" object */ + res = cbor_map_lookup_keys(&err_obj, err_keys_cnt, err_keys, err_lens, + err_vals, /*drain*/FALSE); + CHK_RES(hnd, "failed to lookup error object keys in map"); + /* "type" and "reason" objects must be text strings */ + if ((! cbor_value_is_text_string(&type_obj)) || + (! cbor_value_is_text_string(&reason_obj))) { + ERRH(hnd, "unsupported '" PACK_PARAM_ERROR "' obj structure."); + goto err; + } + res = cbor_value_get_utf16_wstr(&type_obj, &type_wstr); + CHK_RES(hnd, "failed to fetch UTF16 '" PACK_PARAM_ERR_TYPE "'"); + n = type_wstr.cnt < wbuff_cnt ? type_wstr.cnt : wbuff_cnt; + wmemcpy(wbuff, type_wstr.str, n); + pos = n; + if (msg_sep.cnt + pos < wbuff_cnt) { + wmemcpy(wbuff + pos, msg_sep.str, msg_sep.cnt); + pos += msg_sep.cnt; + } + res = cbor_value_get_utf16_wstr(&reason_obj, &reason_wstr); + CHK_RES(hnd, "failed to fetch UTF16 '" PACK_PARAM_ERR_REASON "'"); + n = reason_wstr.cnt + pos < wbuff_cnt ? reason_wstr.cnt : wbuff_cnt - + pos; + wmemcpy(wbuff + pos, reason_wstr.str, n); + pos += n; + assert(pos <= wbuff_cnt); + + wbuff[pos] = '\0'; + msg.str = wbuff; + msg.cnt = pos; } else { - /* no cols available in this answer: check if already received */ + ERRH(hnd, "unsupported '" PACK_PARAM_ERROR "' obj type (0x%x).", + obj_type); + goto err; + } + + ERRH(hnd, "request fail msg: [%zu] `" LWPDL "`.", msg.cnt, LWSTR(&msg)); + post_diagnostic(hnd, SQL_STATE_HY000, msg.str, status); + return TRUE; +err: + return FALSE; +} + +#undef CHK_RES + +/* + * Processes a received answer: + * - takes a dynamic buffer, answ->str, of length answ->cnt. Will handle the + * buff memory even if the call fails. + * - parses it, preparing iterators for SQLFetch()'ing. + */ +SQLRETURN TEST_API attach_answer(esodbc_stmt_st *stmt, cstr_st *answer, + BOOL is_json) +{ + SQLRETURN ret; + size_t old_ird_cnt; + + /* clear any previous result set */ + if (STMT_HAS_RESULTSET(stmt)) { + clear_resultset(stmt, /*on_close*/FALSE); + } + + /* the statement takes ownership of mem obj */ + stmt->rset.body = *answer; + old_ird_cnt = stmt->ird->count; + ret = is_json ? attach_answer_json(stmt) : attach_answer_cbor(stmt); + + /* check if the columns either have just or had already been attached */ + if (SQL_SUCCEEDED(ret)) { if (stmt->ird->count <= 0) { ERRH(stmt, "no columns available in result set; answer: " - "`%.*s`[%zd].", blen, buff, blen); + "`" LCPDL "`.", LCSTR(&stmt->rset.body)); RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); + } else if (old_ird_cnt <= 0) { + /* new columns have just been attached => force compat. check */ + stmt->sql2c_conversion = CONVERSION_UNCHECKED; } } - - return SQL_SUCCESS; + return ret; } -/* parse the error as SQL pluggin generated error */ -static BOOL attach_sql_error(SQLHANDLE hnd, cstr_st *body) +static BOOL attach_error_json(SQLHANDLE hnd, cstr_st *body) { BOOL ret; UJObject obj, o_status, o_error, o_type, o_reason, o_rcause; wstr_st type, reason; wchar_t wbuf[SQL_MAX_MESSAGE_LENGTH]; + wstr_st msg = {.str = wbuf}; int cnt; void *state, *iter; /* following grouped JSON unpacking items must remain in sync */ /* {"error": {..}, "status":200} */ const wchar_t *outer_keys[] = { - MK_WPTR(JSON_ANSWER_ERROR), - MK_WPTR(JSON_ANSWER_STATUS) + MK_WPTR(PACK_PARAM_ERROR), + MK_WPTR(PACK_PARAM_STATUS) }; - const char fmt_outer_keys[] = "ON"; + const char fmt_outer_keys[] = "UN"; int cnt_outer_keys = sizeof(fmt_outer_keys) - /*\0*/1; /* "error": {"root_cause":[?], "type":"..", "reason":".." ...} */ const wchar_t *err_keys[] = { - MK_WPTR(JSON_ANSWER_ERR_RCAUSE), - MK_WPTR(JSON_ANSWER_ERR_TYPE), - MK_WPTR(JSON_ANSWER_ERR_REASON), + MK_WPTR(PACK_PARAM_ERR_RCAUSE), + MK_WPTR(PACK_PARAM_ERR_TYPE), + MK_WPTR(PACK_PARAM_ERR_REASON), }; const char fmt_err_keys[] = "aSS"; int cnt_err_keys = sizeof(fmt_err_keys) - /*\0*/1; /* "root_cause":[{"type":"..", "reason":".."} ..] */ const wchar_t *r_err_keys[] = { - MK_WPTR(JSON_ANSWER_ERR_TYPE), - MK_WPTR(JSON_ANSWER_ERR_REASON), + MK_WPTR(PACK_PARAM_ERR_TYPE), + MK_WPTR(PACK_PARAM_ERR_REASON), }; const char fmt_r_err_keys[] = "SS"; int cnt_r_err_keys = sizeof(fmt_r_err_keys) - /*\0*/1; @@ -495,66 +916,76 @@ static BOOL attach_sql_error(SQLHANDLE hnd, cstr_st *body) ERRH(hnd, "JSON answer not a SQL error (%s).", UJGetError(state)); goto end; } - /* unpack error object */ - if (UJObjectUnpack(o_error, cnt_err_keys, fmt_err_keys, err_keys, - &o_rcause, &o_type, &o_reason) < cnt_err_keys) { - ERRH(hnd, "failed to unpack error object (%s).", UJGetError(state)); - goto end; - } + if (UJIsString(o_error)) { /* "generic" error */ + msg.str = (SQLWCHAR *)UJReadString(o_error, &msg.cnt); + assert(msg.str[msg.cnt] == '\0'); + } else if (UJIsObject(o_error)) { /* error has root cause */ + /* unpack error object */ + if (UJObjectUnpack(o_error, cnt_err_keys, fmt_err_keys, err_keys, + &o_rcause, &o_type, &o_reason) < cnt_err_keys) { + ERRH(hnd, "failed to unpack error obj (%s).", UJGetError(state)); + goto end; + } - /* this is safe for NULL o_rcause: => -1 */ - cnt = UJLengthArray(o_rcause); - DBGH(hnd, "root cause(s) received: %d.", cnt); - if (0 < cnt) { - /* print the root_cause, if available */ - iter = UJBeginArray(o_rcause); - /* save, UJIterArray() checks against NULL */ - assert(iter); - while (UJIterArray(&iter, &o_rcause)) { /* reuse o_rcause obj */ - /* unpack root error object */ - if (UJObjectUnpack(o_rcause, cnt_r_err_keys, fmt_r_err_keys, - r_err_keys, &o_type, &o_reason) < cnt_r_err_keys) { - ERRH(hnd, "failed to unpack root error object (%s).", - UJGetError(state)); - goto end; /* TODO: continue on error? */ - } else { - /* stop at first element. TODO: is ever [array] > 1? */ - break; + /* this is safe for NULL o_rcause: => -1 */ + cnt = UJLengthArray(o_rcause); + DBGH(hnd, "root cause(s) received: %d.", cnt); + if (0 < cnt) { + /* print the root_cause, if available */ + iter = UJBeginArray(o_rcause); + /* save, UJIterArray() checks against NULL */ + assert(iter); + while (UJIterArray(&iter, &o_rcause)) { /* reuse o_rcause obj */ + /* unpack root error object */ + if (UJObjectUnpack(o_rcause, cnt_r_err_keys, fmt_r_err_keys, + r_err_keys, &o_type, &o_reason) < cnt_r_err_keys) { + ERRH(hnd, "failed to unpack root error object (%s).", + UJGetError(state)); + goto end; /* TODO: continue on error? */ + } else { + /* stop at first element. TODO: is ever [array] > 1? */ + break; + } } } - } - /* else: root_cause not available, print "generic" reason */ - type.str = (SQLWCHAR *)UJReadString(o_type, &type.cnt); - reason.str = (SQLWCHAR *)UJReadString(o_reason, &reason.cnt); - - /* should be empty string in case of mismatch */ - assert(type.str && reason.str); - DBGH(hnd, "reported failure: type: [%zd] `" LWPDL "`, reason: [%zd] `" - LWPDL "`, status: %d.", type.cnt, LWSTR(&type), - reason.cnt, LWSTR(&reason), UJNumericInt(o_status)); - - /* swprintf will always append the 0-term ("A null character is appended - * after the last character written."), but fail if formated string would - * overrun the buffer size (in an equivocal way: overrun encoding - * error). */ - errno = 0; - cnt = swprintf(wbuf, sizeof(wbuf)/sizeof(*wbuf), - WPFWP_LDESC L": " WPFWP_LDESC, LWSTR(&type), LWSTR(&reason)); - assert(cnt); - if (cnt < 0) { - if (errno) { - ERRH(hnd, "printing the received error message failed."); - goto end; + /* else: root_cause not available, print "generic" reason */ + type.str = (SQLWCHAR *)UJReadString(o_type, &type.cnt); + reason.str = (SQLWCHAR *)UJReadString(o_reason, &reason.cnt); + + /* should be empty string in case of mismatch */ + assert(type.str && reason.str); + DBGH(hnd, "reported failure: type: [%zd] `" LWPDL "`, reason: [%zd] `" + LWPDL "`, status: %d.", type.cnt, LWSTR(&type), + reason.cnt, LWSTR(&reason), UJNumericInt(o_status)); + + /* swprintf will always append the 0-term ("A null character is + * appended after the last character written."), but fail if formated + * string would overrun the buffer size (in an equivocal way: overrun + * encoding error). */ + errno = 0; + cnt = swprintf(wbuf, sizeof(wbuf)/sizeof(*wbuf), + WPFWP_LDESC L": " WPFWP_LDESC, LWSTR(&type), LWSTR(&reason)); + assert(cnt); + if (cnt < 0) { + if (errno) { + ERRH(hnd, "printing the received error message failed."); + goto end; + } + /* partial error message printed */ + WARNH(hnd, "current error buffer to small (%zu) for full error " + "detail.", sizeof(wbuf)/sizeof(*wbuf)); + cnt = sizeof(wbuf)/sizeof(*wbuf) - 1; } - /* partial error message printed */ - WARNH(hnd, "current error buffer to small (%zu) for full error " - "detail.", sizeof(wbuf)/sizeof(*wbuf)); - cnt = sizeof(wbuf)/sizeof(*wbuf) - 1; + msg.cnt = cnt; + assert(wbuf[cnt] == L'\0'); + } else { + ERRH(hnd, "unsupported '" PACK_PARAM_ERROR "' obj type (%d).", + UJGetType(o_error)); + goto end; } - assert(wbuf[cnt] == L'\0'); - ERRH(hnd, "request failure reason: [%d] `" LWPD "`.", cnt, wbuf); + ERRH(hnd, "request fail msg: [%zd] `" LWPDL "`.", msg.cnt, LWSTR(&msg)); - post_diagnostic(hnd, SQL_STATE_HY000, wbuf, UJNumericInt(o_status)); + post_diagnostic(hnd, SQL_STATE_HY000, msg.str, UJNumericInt(o_status)); ret = TRUE; end: @@ -568,28 +999,41 @@ static BOOL attach_sql_error(SQLHANDLE hnd, cstr_st *body) /* * Parse an error and push it as statement diagnostic. */ -SQLRETURN TEST_API attach_error(SQLHANDLE hnd, cstr_st *body, int code) +SQLRETURN TEST_API attach_error(SQLHANDLE hnd, cstr_st *body, BOOL is_json, + long code) { - char buff[SQL_MAX_MESSAGE_LENGTH]; - size_t to_copy; + wchar_t *buff; + int n; + BOOL formatted; - ERRH(hnd, "request failure %d body: len: %zu, content: `%.*s`.", code, - body->cnt, LCSTR(body)); + ERRH(hnd, "request failed with %ld (body len: %zu).", code, body->cnt); if (body->cnt) { - /* try read it as ES/SQL error */ - if (! attach_sql_error(hnd, body)) { - /* if not an ES/SQL failure, attach it as-is (plus \0) */ - to_copy = sizeof(buff) <= body->cnt ? sizeof(buff) - 1 : body->cnt; - memcpy(buff, body->str, to_copy); - buff[to_copy] = '\0'; - - post_c_diagnostic(hnd, SQL_STATE_08S01, buff, code); + /* try to decode it as JSON/CBOR */ + formatted = is_json ? attach_error_json(hnd, body) : + attach_error_cbor(hnd, body); + if (! formatted) { + /* if not an ES-formatted failure, attach it as-is (plus \0) */ + if (! (buff = malloc((body->cnt + 1) * sizeof(wchar_t)))) { + ERRNH(hnd, "OOM: %zu wchar_t.", body->cnt); + goto end; + } + n = U8MB_TO_U16WC(body->str, body->cnt, buff, body->cnt); + if (0 < n) { + buff[n] = '\0'; + post_diagnostic(hnd, SQL_STATE_08S01, buff, code); + } + free(buff); + if (n <= 0) { + ERRH(hnd, "failed to UTF8/UTF16 convert: 0x%x.", WAPI_ERRNO()); + goto end; + } } RET_STATE(HDRH(hnd)->diag.state); } +end: return post_diagnostic(hnd, SQL_STATE_08S01, NULL, code); } @@ -608,9 +1052,9 @@ SQLRETURN TEST_API attach_sql(esodbc_stmt_st *stmt, assert(! stmt->u8sql.str); if (! wstr_to_utf8(&sqlw, &stmt->u8sql)) { - ERRNH(stmt, "conversion UCS2->UTF8 of SQL [%zu] `" LWPDL "` failed.", + ERRNH(stmt, "conversion UTF16->UTF8 of SQL [%zu] `" LWPDL "` failed.", sqlcnt, LWSTR(&sqlw)); - RET_HDIAG(stmt, SQL_STATE_HY000, "UCS2/UTF8 conversion failure", 0); + RET_HDIAG(stmt, SQL_STATE_HY000, "UTF16/UTF8 conversion failure", 0); } /* if the app correctly SQL_CLOSE'es the statement, this would not be @@ -814,26 +1258,26 @@ SQLRETURN copy_one_row(esodbc_stmt_st *stmt, SQLULEN pos) } while (0) /* is current object an array? */ - if (! UJIsArray(stmt->rset.row_array)) { + if (! UJIsArray(stmt->rset.pack.json.row_array)) { ERRH(stmt, "one '%s' element (#%zd) in result set not an array; type:" - " %d.", JSON_ANSWER_ROWS, stmt->rset.vrows, - UJGetType(stmt->rset.row_array)); + " %d.", PACK_PARAM_ROWS, stmt->rset.vrows, + UJGetType(stmt->rset.pack.json.row_array)); RET_ROW_DIAG(SQL_STATE_HY000, MSG_INV_SRV_ANS, SQL_NO_COLUMN_NUMBER); } /* are there elements in this row array to at least match the number of * columns? */ - if (UJLengthArray(stmt->rset.row_array) < ird->count) { + if (UJLengthArray(stmt->rset.pack.json.row_array) < ird->count) { ERRH(stmt, "current row counts less elements (%d) than columns (%hd)", - UJLengthArray(stmt->rset.row_array), ird->count); + UJLengthArray(stmt->rset.pack.json.row_array), ird->count); RET_ROW_DIAG(SQL_STATE_HY000, MSG_INV_SRV_ANS, SQL_NO_COLUMN_NUMBER); - } else if (ird->count < UJLengthArray(stmt->rset.row_array)) { + } else if (ird->count < UJLengthArray(stmt->rset.pack.json.row_array)) { WARNH(stmt, "current row counts more elements (%d) than columns (%hd)", - UJLengthArray(stmt->rset.row_array), ird->count); + UJLengthArray(stmt->rset.pack.json.row_array), ird->count); } /* get an iterator over the row array */ - if (! (iter_row = UJBeginArray(stmt->rset.row_array))) { + if (! (iter_row = UJBeginArray(stmt->rset.pack.json.row_array))) { ERRH(stmt, "Failed to obtain iterator on row (#%zd): %s.", rowno, - UJGetError(stmt->rset.state)); + UJGetError(stmt->rset.pack.json.state)); RET_ROW_DIAG(SQL_STATE_HY000, MSG_INV_SRV_ANS, SQL_NO_COLUMN_NUMBER); } @@ -1009,6 +1453,12 @@ SQLRETURN EsSQLFetch(SQLHSTMT StatementHandle) RET_HDIAGS(stmt, SQL_STATE_HY010); } + /* TODO: remove guard on CBOR complete implementation. */ + if (! HDRH(stmt)->dbc->pack_json) { + FIXME; + return SQL_NO_DATA; + } + /* Check if the data [type] stored in DB is compatiblie with the buffer * [type] the application provides. This test can only be done at * fetch-time, since the application can unbind/rebind columns at any time @@ -1055,10 +1505,11 @@ SQLRETURN EsSQLFetch(SQLHSTMT StatementHandle) i = 0; /* for all rows in rowset/array, iterate over rows in current resultset */ while (i < ard->array_size) { - if (! UJIterArray(&stmt->rset.rows_iter, &stmt->rset.row_array)) { + if (! UJIterArray(&stmt->rset.pack.json.rows_iter, + &stmt->rset.pack.json.row_array)) { DBGH(stmt, "ran out of rows in current result set: nrows=%zd, " "vrows=%zd.", stmt->rset.nrows, stmt->rset.vrows); - if (stmt->rset.ecurs.cnt) { /* is there an Elastic cursor? */ + if (stmt->rset.pack.json.curs.cnt) { /* is there an ES cursor? */ ret = EsSQLExecute(stmt); if (! SQL_SUCCEEDED(ret)) { ERRH(stmt, "failed to fetch next results."); @@ -1155,7 +1606,7 @@ static SQLRETURN gd_checks(esodbc_stmt_st *stmt, SQLUSMALLINT colno) RET_HDIAGS(stmt, SQL_STATE_HYC00); } /* has SQLFetch() been called? rset is reset with every new result */ - if (! stmt->rset.row_array) { + if (! stmt->rset.pack.json.row_array) { /* DM should have detected this case */ ERRH(stmt, "SQLFetch() hasn't yet been called on result set."); RET_HDIAGS(stmt, SQL_STATE_24000); @@ -1385,26 +1836,32 @@ SQLRETURN EsSQLMoreResults(SQLHSTMT hstmt) return SQL_NO_DATA; } -SQLRETURN close_es_answ_handler(esodbc_stmt_st *stmt, char *buff, size_t blen) +SQLRETURN close_es_answ_handler(esodbc_stmt_st *stmt, cstr_st *body, + BOOL is_json) { UJObject obj, succeeded; void *state = NULL; int unpacked; const wchar_t *keys[] = { - MK_WPTR(JSON_ANSWER_CURS_CLOSE) + MK_WPTR(PACK_PARAM_CURS_CLOSE) }; - obj = UJDecode(buff, blen, NULL, &state); + /* TODO: remove guard on CBOR complete implementation. */ + if (! is_json) { + FIXME; + goto err; + } + obj = UJDecode(body->str, body->cnt, NULL, &state); if (! obj) { - ERRH(stmt, "failed to decode JSON answer: %s ([%zu] `%.*s`).", - state ? UJGetError(state) : "", blen, blen, buff); + ERRH(stmt, "failed to decode JSON answer: %s ([%zu] `" LTPDL "`).", + state ? UJGetError(state) : "", body->cnt, LCSTR(body)); goto err; } unpacked = UJObjectUnpack(obj, 1, "B", keys, &succeeded); if (unpacked < 1) { - ERRH(stmt, "failed to unpack JSON answer (`%.*s`): %s.", - blen, buff, UJGetError(state)); + ERRH(stmt, "failed to unpack JSON answer: %s ([%zu] `" LCPDL "`).", + UJGetError(state), body->cnt, LCSTR(body)); goto err; } switch (UJGetType(succeeded)) { @@ -1414,15 +1871,15 @@ SQLRETURN close_es_answ_handler(esodbc_stmt_st *stmt, char *buff, size_t blen) /* no break: not a driver/client error -- server would answer with * an error answer */ case UJT_True: - free(buff); + free(body->str); return SQL_SUCCESS; default: - ERRH(stmt, "invalid object type in answer: %d (`%.*s`).", - UJGetType(succeeded), blen, buff); + ERRH(stmt, "invalid obj type in answer: %d ([%zu] `" LTPDL "`).", + UJGetType(succeeded), body->cnt, LCSTR(body)); } err: - free(buff); + free(body->str); RET_HDIAG(stmt, SQL_STATE_HY000, MSG_INV_SRV_ANS, 0); } @@ -1432,24 +1889,30 @@ SQLRETURN close_es_cursor(esodbc_stmt_st *stmt) char buff[ESODBC_BODY_BUF_START_SIZE]; cstr_st body = {buff, sizeof(buff)}; - if (! stmt->rset.ecurs.cnt) { + if (! STMT_HAS_CURSOR(stmt)) { DBGH(stmt, "no cursor to close."); } ret = serialize_statement(stmt, &body); if (SQL_SUCCEEDED(ret)) { - ret = post_json(stmt, ESODBC_CURL_CLOSE, &body); + ret = curl_post(stmt, ESODBC_CURL_CLOSE, &body); } if (buff != body.str) { free(body.str); } - DBGH(stmt, "cursor cleared (was: [%zd] `" LWPDL "`).", - stmt->rset.ecurs.cnt, LWSTR(&stmt->rset.ecurs)); - /* the actual freeing occurs in clear_resultset() */ - stmt->rset.ecurs.cnt = 0; - stmt->rset.ecurs.str = NULL; + /* the actual cursor freeing occurs in clear_resultset() */ + if (HDRH(stmt)->dbc->pack_json) { + DBGH(stmt, "clearing JSON cursor: [%zd] `" LWPDL "`.", + stmt->rset.pack.json.curs.cnt, LWSTR(&stmt->rset.pack.json.curs)); + stmt->rset.pack.json.curs.cnt = 0; + } else { + DBGH(stmt, "clearing CBOR cursor: [%zd] `%s`.", + stmt->rset.pack.cbor.curs.cnt, + cstr_hex_dump(&stmt->rset.pack.cbor.curs)); + stmt->rset.pack.cbor.curs.cnt = 0; + } return ret; } @@ -1654,8 +2117,8 @@ static esodbc_estype_st *match_es_type(esodbc_rec_st *arec, /* * https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/sending-long-data * Note: must use EsSQLSetDescFieldW() for param data-type setting, to call - * set_defaults_from_type(), to meet the "Other fields implicitly set" - * requirements from the page linked in set_defaults_from_type() comments. + * set_defaults_from_meta_type(), to meet the "Other fields implicitly set" + * requirements from the page linked in set_defaults_from_meta_type() comments. * * "Bindings remain in effect until the application calls SQLBindParameter * again, calls SQLFreeStmt with the SQL_RESET_PARAMS option, or calls @@ -1980,7 +2443,7 @@ static SQLRETURN convert_param_val(esodbc_rec_st *arec, esodbc_rec_st *irec, /* Forms the JSON array with params: * [{"type": "", "value": }(,etc)*] */ static SQLRETURN serialize_params(esodbc_stmt_st *stmt, char *dest, - size_t *len) + size_t *len, BOOL as_json) { /* JSON keys for building one parameter object */ # define JSON_KEY_TYPE "{\"type\": \"" @@ -1991,6 +2454,10 @@ static SQLRETURN serialize_params(esodbc_stmt_st *stmt, char *dest, SQLSMALLINT i; size_t l, pos; + if (! as_json) { + FIXME; // FIXME; add CBOR support + } + pos = 0; if (dest) { dest[pos] = '['; @@ -2051,52 +2518,79 @@ static SQLRETURN serialize_params(esodbc_stmt_st *stmt, char *dest, # undef JSON_KEY_VALUE } -static inline size_t copy_bool_val(char *dest, BOOL val) +static SQLRETURN statement_cbor_len(esodbc_stmt_st *stmt, size_t *outlen, + size_t *keys) { - if (val) { - memcpy(dest, "true", sizeof("true") - 1); - return sizeof("true") - 1; - } else { - memcpy(dest, "false", sizeof("false") - 1); - return sizeof("false") - 1; - } -} + SQLRETURN ret; + size_t bodylen, len; + esodbc_dbc_st *dbc = HDRH(stmt)->dbc; -/* - * Build a serialized JSON object out of the statement. - * If resulting string fits into the given buff, the result is copied in it; - * othewise a new one will be allocated and returned. - */ -SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *buff) -{ - SQLRETURN ret = SQL_SUCCESS; - size_t bodylen, pos, len; - char *body; - esodbc_dbc_st *dbc = stmt->hdr.dbc; - esodbc_desc_st *apd = stmt->apd; - - /* TODO: move escaping/x-coding (to JSON or CBOR) in attach_sql() and/or - * attach_answer() to avoid these operations for each execution of the - * statement (especially for the SQL statement; the cursor might not - * always be used - if app decides to no longer fetch - but would then - * clean this function). */ + /* Initial all-encompassing map preamble. */ + bodylen = cbor_nn_hdr_len(REST_REQ_KEY_COUNT); /* max count */ - /* enforced in EsSQLSetDescFieldW(SQL_DESC_ARRAY_SIZE) */ - assert(apd->array_size <= 1); + *keys = 1; /* cursor or query */ + if (stmt->rset.pack.cbor.curs.cnt) { /* eval CURSOR object length */ + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_CURSOR) - 1); + bodylen += cbor_str_obj_len(stmt->rset.pack.cbor.curs.cnt); + } else { /* eval QUERY object length */ + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_QUERY) - 1); + bodylen += cbor_str_obj_len(stmt->u8sql.cnt); - if (! update_tz_param()) { - RET_HDIAG(stmt, SQL_STATE_HY000, - "Failed to update the timezone parameter", 0); - } + /* does the statement have any parameters? */ + if (stmt->apd->count) { + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_PARAMS) - 1); + ret = serialize_params(stmt, /* no copy, just eval */NULL, &len, + /*as JSON*/FALSE); + if (! SQL_SUCCEEDED(ret)) { + ERRH(stmt, "failed to eval parameters length"); + return ret; + } + bodylen += len; + (*keys) ++; + } + + /* does the statement have any fetch_size? */ + if (dbc->fetch.slen) { + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_FETCH) - 1); + bodylen += CBOR_INT_OBJ_LEN(dbc->fetch.max); + (*keys) ++; + } + /* "field_multi_value_leniency": true/false */ + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_MODE) - 1); + bodylen += CBOR_OBJ_BOOL_LEN; + /* "index_include_frozen": true/false */ + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_MODE) - 1); + bodylen += CBOR_OBJ_BOOL_LEN; + /* "time_zone": "-05:45" */ + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_MODE) - 1); + bodylen += cbor_str_obj_len(tz_param.cnt); /* lax len */ + *keys += 3; /* field_m._val., idx._inc._frozen, time_zone */ + } + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_MODE) - 1); + bodylen += cbor_str_obj_len(sizeof(REQ_VAL_MODE) - 1); + bodylen += cbor_str_obj_len(sizeof(REQ_KEY_CLT_ID) - 1); + bodylen += cbor_str_obj_len(sizeof(REQ_VAL_CLT_ID) - 1); + *keys += 2; /* mode, client_id */ + /* TODO: request_/page_timeout */ + + *outlen = bodylen; + return SQL_SUCCESS; +} + +static SQLRETURN statement_json_len(esodbc_stmt_st *stmt, size_t *outlen) +{ + SQLRETURN ret; + size_t bodylen, len; + esodbc_dbc_st *dbc = HDRH(stmt)->dbc; bodylen = 1; /* { */ /* evaluate how long the stringified REST object will be */ - if (stmt->rset.ecurs.cnt) { /* eval CURSOR object length */ + if (stmt->rset.pack.json.curs.cnt) { /* eval CURSOR object length */ /* assumptions: (1) the cursor is a Base64 encoded string and thus * (2) no JSON escaping needed. - * (both assertions checked on copy, below). */ + * (both assumptions checked on copy, below). */ bodylen += sizeof(JSON_KEY_CURSOR) - 1; /* "cursor": */ - bodylen += stmt->rset.ecurs.cnt; + bodylen += stmt->rset.pack.json.curs.cnt; bodylen += 2; /* 2x `"` for cursor value */ } else { /* eval QUERY object length */ bodylen += sizeof(JSON_KEY_QUERY) - 1; @@ -2104,11 +2598,11 @@ SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *buff) bodylen += 2; /* 2x `"` for query value */ /* does the statement have any parameters? */ - if (apd->count) { + if (stmt->apd->count) { bodylen += sizeof(JSON_KEY_PARAMS) - 1; /* serialize_params will count/copy array delimiters (`[`, `]`) */ - ret = serialize_params(stmt, /* don't copy, just eval len */NULL, - &len); + ret = serialize_params(stmt, /* no copy, just eval */NULL, &len, + /*as JSON*/TRUE); if (! SQL_SUCCEEDED(ret)) { ERRH(stmt, "failed to eval parameters length"); return ret; @@ -2130,47 +2624,146 @@ SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *buff) bodylen += sizeof(JSON_KEY_TIMEZONE) - 1; bodylen += tz_param.cnt; } - /* TODO: request_/page_timeout */ bodylen += sizeof(JSON_KEY_VAL_MODE) - 1; /* "mode": */ bodylen += sizeof(JSON_KEY_CLT_ID) - 1; /* "client_id": */ + /* TODO: request_/page_timeout */ bodylen += 1; /* } */ - /* allocate memory for the stringified buffer, if needed */ - if (buff->cnt < bodylen) { - INFOH(dbc, "local buffer too small (%zd), need %zdB; will alloc.", - buff->cnt, bodylen); - INFOH(dbc, "local buffer too small, SQL: `" LCPDL "`.", - LCSTR(&stmt->u8sql)); - body = malloc(bodylen); - if (! body) { - ERRNH(stmt, "failed to alloc %zdB.", bodylen); - RET_HDIAGS(stmt, SQL_STATE_HY001); + *outlen = bodylen; + return SQL_SUCCESS; +} + +#define FAIL_ON_CBOR_ERR(_hnd, _cbor_err) \ + do { \ + if (err != CborNoError) { \ + ERRH(_hnd, "CBOR: %s.", cbor_error_string(_cbor_err)); \ + RET_HDIAG(_hnd, SQL_STATE_HY000, "CBOR serialization error", \ + _cbor_err); \ + } \ + } while (0) + +static SQLRETURN serialize_to_cbor(esodbc_stmt_st *stmt, cstr_st *dest, + size_t keys) +{ + CborEncoder encoder, map; + CborError err; + cstr_st tz; + esodbc_dbc_st *dbc = HDRH(stmt)->dbc; + + cbor_encoder_init(&encoder, dest->str, dest->cnt, /*flags*/0); + err = cbor_encoder_create_map(&encoder, &map, keys); + FAIL_ON_CBOR_ERR(stmt, err); + + if (stmt->rset.pack.cbor.curs.cnt) { /* copy CURSOR object */ + err = cbor_encode_text_string(&map, REQ_KEY_CURSOR, + sizeof(REQ_KEY_CURSOR) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + err = cbor_encode_text_string(&map, stmt->rset.pack.cbor.curs.str, + stmt->rset.pack.cbor.curs.cnt); + FAIL_ON_CBOR_ERR(stmt, err); + } else { /* copy QUERY object */ + err = cbor_encode_text_string(&map, REQ_KEY_QUERY, + sizeof(REQ_KEY_QUERY) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + err = cbor_encode_text_string(&map, stmt->u8sql.str, stmt->u8sql.cnt); + FAIL_ON_CBOR_ERR(stmt, err); + + /* does the statement have any parameters? */ + if (stmt->apd->count) { + FIXME; + } + /* does the statement have any fetch_size? */ + if (dbc->fetch.slen) { + err = cbor_encode_text_string(&map, REQ_KEY_FETCH, + sizeof(REQ_KEY_FETCH) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + err = cbor_encode_uint(&map, dbc->fetch.slen); + FAIL_ON_CBOR_ERR(stmt, err); } - buff->str = body; + /* "field_multi_value_leniency": true/false */ + err = cbor_encode_text_string(&map, REQ_KEY_MULTIVAL, + sizeof(REQ_KEY_MULTIVAL) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + err = cbor_encode_boolean(&map, dbc->mfield_lenient); + FAIL_ON_CBOR_ERR(stmt, err); + /* "index_include_frozen": true/false */ + err = cbor_encode_text_string(&map, REQ_KEY_IDX_FROZEN, + sizeof(REQ_KEY_IDX_FROZEN) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + err = cbor_encode_boolean(&map, dbc->idx_inc_frozen); + FAIL_ON_CBOR_ERR(stmt, err); + /* "time_zone": "-05:45" */ + err = cbor_encode_text_string(&map, REQ_KEY_TIMEZONE, + sizeof(REQ_KEY_TIMEZONE) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + if (dbc->apply_tz) { + tz = tz_param; + } else { + tz = (cstr_st)CSTR_INIT(REQ_VAL_TIMEZONE_Z); + } + err = cbor_encode_text_string(&map, tz.str, tz.cnt); + FAIL_ON_CBOR_ERR(stmt, err); + } + /* mode : ODBC */ + err = cbor_encode_text_string(&map, REQ_KEY_MODE, + sizeof(REQ_KEY_MODE) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + err = cbor_encode_text_string(&map, REQ_VAL_MODE, + sizeof(REQ_VAL_MODE) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + /* client_id : odbcXX */ + err = cbor_encode_text_string(&map, REQ_KEY_CLT_ID, + sizeof(REQ_KEY_CLT_ID) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + err = cbor_encode_text_string(&map, REQ_VAL_CLT_ID, + sizeof(REQ_VAL_CLT_ID) - 1); + FAIL_ON_CBOR_ERR(stmt, err); + + err = cbor_encoder_close_container(&encoder, &map); + FAIL_ON_CBOR_ERR(stmt, err); + + dest->cnt = cbor_encoder_get_buffer_size(&encoder, dest->str); + DBGH(stmt, "request serialized to CBOR: [%zd] `0x%s`.", dest->cnt, + cstr_hex_dump(dest)); + + return SQL_SUCCESS; +} + +static inline size_t copy_bool_val(char *dest, BOOL val) +{ + if (val) { + memcpy(dest, "true", sizeof("true") - 1); + return sizeof("true") - 1; } else { - body = buff->str; + memcpy(dest, "false", sizeof("false") - 1); + return sizeof("false") - 1; } +} + +static SQLRETURN serialize_to_json(esodbc_stmt_st *stmt, cstr_st *dest) +{ + SQLRETURN ret; + size_t pos, len; + char *body = dest->str; + esodbc_dbc_st *dbc = HDRH(stmt)->dbc; pos = 0; body[pos ++] = '{'; /* build the actual stringified JSON object */ - if (stmt->rset.ecurs.cnt) { /* copy CURSOR object */ + if (stmt->rset.pack.json.curs.cnt) { /* copy CURSOR object */ memcpy(body + pos, JSON_KEY_CURSOR, sizeof(JSON_KEY_CURSOR) - 1); pos += sizeof(JSON_KEY_CURSOR) - 1; body[pos ++] = '"'; - if (ascii_w2c(stmt->rset.ecurs.str, body + pos, - stmt->rset.ecurs.cnt) <= 0) { - if (buff->cnt < bodylen) { /* has it been alloc'd? */ - free(body); - } + if (ascii_w2c(stmt->rset.pack.json.curs.str, body + pos, + stmt->rset.pack.json.curs.cnt) <= 0) { ERRH(stmt, "failed to convert cursor `" LWPDL "` to ASCII.", - LWSTR(&stmt->rset.ecurs)); + LWSTR(&stmt->rset.pack.json.curs)); RET_HDIAGS(stmt, SQL_STATE_24000); } else { /* no character needs JSON escaping */ - assert(stmt->rset.ecurs.cnt == json_escape(body + pos, - stmt->rset.ecurs.cnt, NULL, 0)); - pos += stmt->rset.ecurs.cnt; + assert(stmt->rset.pack.json.curs.cnt == json_escape(body + pos, + stmt->rset.pack.json.curs.cnt, NULL, 0)); + pos += stmt->rset.pack.json.curs.cnt; } body[pos ++] = '"'; } else { /* copy QUERY object */ @@ -2178,15 +2771,15 @@ SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *buff) pos += sizeof(JSON_KEY_QUERY) - 1; body[pos ++] = '"'; pos += json_escape(stmt->u8sql.str, stmt->u8sql.cnt, body + pos, - bodylen - pos); + dest->cnt - pos); body[pos ++] = '"'; /* does the statement have any parameters? */ - if (apd->count) { + if (stmt->apd->count) { memcpy(body + pos, JSON_KEY_PARAMS, sizeof(JSON_KEY_PARAMS) - 1); pos += sizeof(JSON_KEY_PARAMS) - 1; /* serialize_params will count/copy array delimiters (`[`, `]`) */ - ret = serialize_params(stmt, body + pos, &len); + ret = serialize_params(stmt, body + pos, &len, /*as JSON*/TRUE); if (! SQL_SUCCEEDED(ret)) { ERRH(stmt, "failed to serialize parameters"); return ret; @@ -2206,7 +2799,7 @@ SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *buff) pos += copy_bool_val(body + pos, dbc->mfield_lenient); /* "index_include_frozen": true/false */ memcpy(body + pos, JSON_KEY_IDX_FROZEN, - sizeof(JSON_KEY_IDX_FROZEN) - 1); + sizeof(JSON_KEY_IDX_FROZEN) - 1); pos += sizeof(JSON_KEY_IDX_FROZEN) - 1; pos += copy_bool_val(body + pos, dbc->idx_inc_frozen); /* "time_zone": "-05:45" */ @@ -2231,19 +2824,53 @@ SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *buff) pos += sizeof(JSON_KEY_CLT_ID) - 1; body[pos ++] = '}'; - buff->cnt = pos; + dest->cnt = pos; - INFOH(stmt, "JSON request serialized to: [%zd] `" LCPDL "`.", pos, - LCSTR(buff)); - return ret; + INFOH(stmt, "request serialized to JSON: [%zd] `" LCPDL "`.", pos, + LCSTR(dest)); + return SQL_SUCCESS; +} + +/* + * Build a serialized JSON/CBOR object out of the statement. + * If resulting string fits into the given buff, the result is copied in it; + * othewise a new one will be allocated and returned. + */ +SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *dest) +{ + SQLRETURN ret; + size_t len, keys; + esodbc_dbc_st *dbc = HDRH(stmt)->dbc; + + /* enforced in EsSQLSetDescFieldW(SQL_DESC_ARRAY_SIZE) */ + assert(stmt->apd->array_size <= 1); + + if (! update_tz_param()) { + RET_HDIAG(stmt, SQL_STATE_HY000, + "Failed to update the timezone parameter", 0); + } + + ret = dbc->pack_json ? statement_json_len(stmt, &len) : + statement_cbor_len(stmt, &len, &keys); + if (! SQL_SUCCEEDED(ret)) { + return ret; + } + + /* allocate memory for the stringified statement, if needed */ + if (dest->cnt < len) { + INFOH(dbc, "local buffer too small (%zd), need %zdB; will alloc.", + dest->cnt, len); + DBGH(dbc, "local buffer too small, SQL: `" LCPDL "`.", + LCSTR(&stmt->u8sql)); + if (! (dest->str = malloc(len))) { + ERRNH(stmt, "failed to alloc %zdB.", len); + RET_HDIAGS(stmt, SQL_STATE_HY001); + } + dest->cnt = len; + } -# undef JSON_KEY_QUERY -# undef JSON_KEY_CURSOR -# undef JSON_KEY_PARAMS -# undef JSON_KEY_FETCH -# undef JSON_KEY_REQ_TOUT -# undef JSON_KEY_PAGE_TOUT -# undef JSON_KEY_TIME_ZONE + return dbc->pack_json ? serialize_to_json(stmt, dest) : + serialize_to_cbor(stmt, dest, keys); } @@ -2289,7 +2916,7 @@ SQLRETURN EsSQLExecute(SQLHSTMT hstmt) ret = serialize_statement(stmt, &body); if (SQL_SUCCEEDED(ret)) { - ret = post_json(stmt, ESODBC_CURL_QUERY, &body); + ret = curl_post(stmt, ESODBC_CURL_QUERY, &body); } if (buff != body.str) { @@ -2712,7 +3339,7 @@ SQLRETURN EsSQLRowCount(_In_ SQLHSTMT StatementHandle, _Out_ SQLLEN *RowCount) DBGH(stmt, "current resultset rows count: %zd.", stmt->rset.nrows); *RowCount = (SQLLEN)stmt->rset.nrows; - if (stmt->rset.ecurs.cnt) { + if (STMT_HAS_CURSOR(stmt)) { /* fetch_size or scroller size chunks the result */ WARNH(stmt, "this function will only return the row count of the " "partial result set available."); diff --git a/driver/queries.h b/driver/queries.h index 322feb79..d789c988 100644 --- a/driver/queries.h +++ b/driver/queries.h @@ -11,9 +11,10 @@ BOOL queries_init(); void clear_resultset(esodbc_stmt_st *stmt, BOOL on_close); -SQLRETURN TEST_API attach_answer(esodbc_stmt_st *stmt, char *buff, - size_t blen); -SQLRETURN TEST_API attach_error(SQLHANDLE hnd, cstr_st *body, int code); +SQLRETURN TEST_API attach_answer(esodbc_stmt_st *stmt, cstr_st *answer, + BOOL is_json); +SQLRETURN TEST_API attach_error(SQLHANDLE hnd, cstr_st *body, BOOL is_json, + long code); SQLRETURN TEST_API attach_sql(esodbc_stmt_st *stmt, const SQLWCHAR *sql, size_t tlen); void detach_sql(esodbc_stmt_st *stmt); @@ -21,7 +22,8 @@ esodbc_estype_st *lookup_es_type(esodbc_dbc_st *dbc, SQLSMALLINT es_type, SQLULEN col_size); SQLRETURN TEST_API serialize_statement(esodbc_stmt_st *stmt, cstr_st *buff); SQLRETURN close_es_cursor(esodbc_stmt_st *stmt); -SQLRETURN close_es_answ_handler(esodbc_stmt_st *stmt, char *buff, size_t blen); +SQLRETURN close_es_answ_handler(esodbc_stmt_st *stmt, cstr_st *body, + BOOL is_json); SQLRETURN EsSQLBindCol( @@ -125,25 +127,49 @@ SQLRETURN EsSQLNumParams( SQLSMALLINT *ParameterCountPtr); SQLRETURN EsSQLRowCount(_In_ SQLHSTMT StatementHandle, _Out_ SQLLEN *RowCount); -/* JSON body build elements */ -#define JSON_KEY_QUERY "\"query\": " /* will always be the 1st key */ -#define JSON_KEY_CURSOR "\"cursor\": " /* 1st key */ -#define JSON_KEY_PARAMS ", \"params\": " /* n-th key */ -#define JSON_KEY_FETCH ", \"fetch_size\": " /* n-th key */ -#define JSON_KEY_REQ_TOUT ", \"request_timeout\": " /* n-th key */ -#define JSON_KEY_PAGE_TOUT ", \"page_timeout\": " /* n-th key */ -#define JSON_KEY_TIME_ZONE ", \"time_zone\": " /* n-th key */ -#define JSON_KEY_VAL_MODE ", \"mode\": \"ODBC\"" /* n-th key */ +/* + * REST request parameters + */ +#define REQ_KEY_QUERY "query" +#define REQ_KEY_CURSOR "cursor" +#define REQ_KEY_PARAMS "params" +#define REQ_KEY_FETCH "fetch_size" +#define REQ_KEY_REQ_TOUT "request_timeout" +#define REQ_KEY_PAGE_TOUT "page_timeout" +#define REQ_KEY_TIME_ZONE "time_zone" +#define REQ_KEY_MODE "mode" +#define REQ_KEY_CLT_ID "client_id" +#define REQ_KEY_MULTIVAL "field_multi_value_leniency" +#define REQ_KEY_IDX_FROZEN "index_include_frozen" +#define REQ_KEY_TIMEZONE "time_zone" + +#define REST_REQ_KEY_COUNT 11 /* "query" or "cursor" */ + #ifdef _WIN64 -# define JSON_KEY_CLT_ID ", \"client_id\": \"odbc64\"" /* n-th k. */ -#else /* _WIN64 */ -# define JSON_KEY_CLT_ID ", \"client_id\": \"odbc32\"" /* n-th k. */ -#endif /* _WIN64 */ -#define JSON_KEY_MULTIVAL ", \"field_multi_value_leniency\": " /* n-th */ -#define JSON_KEY_IDX_FROZEN ", \"index_include_frozen\": " /* n-th */ -#define JSON_KEY_TIMEZONE ", \"time_zone\": " /* n-th key */ +# define REQ_VAL_CLT_ID "odbc64" +#else +# define REQ_VAL_CLT_ID "odbc32" +#endif +#define REQ_VAL_MODE "ODBC" +#define REQ_VAL_TIMEZONE_Z "Z" + +/* JSON body building blocks */ +#define JSON_KEY_QUERY "\"" REQ_KEY_QUERY "\": " /* 1st key */ +#define JSON_KEY_CURSOR "\"" REQ_KEY_CURSOR "\": " /* 1st key */ +#define JSON_KEY_PARAMS ", \"" REQ_KEY_PARAMS "\": " /* n-th key */ +#define JSON_KEY_FETCH ", \"" REQ_KEY_FETCH "\": " /* n-th key */ +#define JSON_KEY_REQ_TOUT ", \"" REQ_KEY_REQ_TOUT "\": " /* n-th key */ +#define JSON_KEY_PAGE_TOUT ", \"" REQ_KEY_PAGE_TOUT "\": " /* n-th key */ +#define JSON_KEY_TIME_ZONE ", \"" REQ_KEY_TIME_ZONE "\": " /* n-th key */ +#define JSON_KEY_VAL_MODE ", \"" REQ_KEY_MODE "\": \"" \ + REQ_VAL_MODE "\"" /* n-th key */ +#define JSON_KEY_CLT_ID ", \"" REQ_KEY_CLT_ID "\": \"" \ + REQ_VAL_CLT_ID "\"" /* n-th k. */ +#define JSON_KEY_MULTIVAL ", \"" REQ_KEY_MULTIVAL "\": " /* n-th */ +#define JSON_KEY_IDX_FROZEN ", \"" REQ_KEY_IDX_FROZEN "\": " /* n-th */ +#define JSON_KEY_TIMEZONE ", \"" REQ_KEY_TIMEZONE "\": " /* n-th key */ -#define JSON_VAL_TIMEZONE_Z "\"Z\"" +#define JSON_VAL_TIMEZONE_Z "\"" REQ_VAL_TIMEZONE_Z "\"" #endif /* __QUERIES_H__ */ diff --git a/driver/tinycbor.c b/driver/tinycbor.c new file mode 100644 index 00000000..bd211b0d --- /dev/null +++ b/driver/tinycbor.c @@ -0,0 +1,287 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +#include "tinycbor.h" +#include "defs.h" + +/* Vars for track keeping of thread-local UTF8-UTF16 conversion (buffers + * allocated by cbor_value_get_utf16_wstr()). + * Note: these can't be freed per thread, since + * DllMain(DLL_THREAD_ATTACH/DLL_THREAD_DETACH) is optional (and apps are + * inconsistent even calling attach-detach for same thread). */ +static wchar_t **u16buffs = NULL; +static size_t u16buff_cnt = 0; +static esodbc_mutex_lt u16buff_mux = ESODBC_MUX_SINIT; + +/* advance an iterator of an "entered" JSON-sytle map to the value for the + * given key, if that exists */ +CborError cbor_map_advance_to_key(CborValue *it, const char *key, + size_t key_len, CborValue *val) +{ + CborError res; + const char *buffptr; + size_t bufflen; + + while (! cbor_value_at_end(it)) { + /* skip all tags */ + if ((res = cbor_value_skip_tag(it)) != CborNoError) { + return res; + } + /* if current key is a string, get its name */ + if (cbor_value_is_text_string(it)) { + res = cbor_value_get_string_chunk(it, &buffptr, &bufflen); + if (res != CborNoError) { + return res; + } + /* this assumes an ASCII key (which is the case with ES' info, but + * not generally valid for CBOR/JSON-style maps) */ + if (bufflen != key_len || strncasecmp(key, buffptr, key_len)) { + /* skip all tags */ + if ((res = cbor_value_skip_tag(it)) != CborNoError) { + return res; + } + /* advance past param's value */ + if ((res = cbor_value_advance(it)) != CborNoError) { + return res; + } + continue; + } + } + /* found it! is there anything following it? */ + /* TODO: does this check the entire obj or just the container?? */ + if (cbor_value_at_end(it)) { + return CborErrorTooFewItems; + } + *val = *it; + return CborNoError; + } + /* key not found */ + val->type = CborInvalidType; + return CborNoError; +} + +CborError cbor_map_lookup_keys(CborValue *map, size_t cnt, + const char **keys, const size_t *lens, CborValue **objs, BOOL drain) +{ + CborError res; + CborValue it; + const char *buffptr; + size_t bufflen; + size_t i, found; + + assert(cbor_value_is_map(map)); + if ((res = cbor_value_enter_container(map, &it)) != CborNoError) { + return res; + } + + /* mark all out values invalid since only the found keys are going to be + * returned as valid */ + for (i = 0; i < cnt; i ++) { + objs[i]->type = CborInvalidType; + } + + found = 0; + while ((! cbor_value_at_end(&it)) && (found < cnt)) { + /* skip all tags */ + if ((res = cbor_value_skip_tag(&it)) != CborNoError) { + return res; + } + /* is current key is a string, get its name */ + if (cbor_value_is_text_string(&it)) { + res = cbor_value_get_string_chunk(&it, &buffptr, &bufflen); + if (res != CborNoError) { + return res; + } + // TODO: binary search on ordered keys? + for (i = 0; i < cnt; i ++) { + /* this assumes an ASCII key (which is the case with ES' info, + * but not generally valid for CBOR/JSON-like maps) */ + if (bufflen == lens[i] && + strncasecmp(keys[i], buffptr, lens[i]) == 0) { + *objs[i] = it; + found ++; + break; + } + } + } + + /* skip all tags */ + if ((res = cbor_value_skip_tag(&it)) != CborNoError) { + return res; + } + /* advance past param's value */ + if ((res = cbor_value_advance(&it)) != CborNoError) { + return res; + } + } + + if (drain) { + while (! cbor_value_at_end(&it)) { + if ((res = cbor_value_advance(&it)) != CborNoError) { + return res; + } + } + + return cbor_value_leave_container(map, &it); + } else { + return CborNoError; + } +} + +CborError cbor_container_count(CborValue cont, size_t *count) +{ + CborError res; + CborValue it; + size_t cnt = 0; + + assert(cbor_value_is_container(&cont)); + + if ((res = cbor_value_enter_container(&cont, &it)) != CborNoError) { + return res; + } + while (! cbor_value_at_end(&it)) { + if (! cbor_value_is_tag(&it)) { + cnt ++; + } + if ((res = cbor_value_advance(&it)) != CborNoError) { + return res; + } + } + *count = cnt; + return CborNoError; +} + +// XXX cbor_get_map_count() should also be useful +CborError cbor_get_array_count(CborValue arr, size_t *count) +{ + assert(cbor_value_is_array(&arr)); + + return cbor_value_is_length_known(&arr) ? + cbor_value_get_array_length(&arr, count) : + cbor_container_count(arr, count); +} + +CborError cbor_container_is_empty(CborValue cont, BOOL *empty) +{ + CborError res; + CborValue it; + + assert(cbor_value_is_container(&cont)); + + if ((res = cbor_value_enter_container(&cont, &it)) != CborNoError) { + return res; + } + /* skip all tags */ + if ((res = cbor_value_skip_tag(&it)) != CborNoError) { + return res; + } + *empty = cbor_value_at_end(&it) || (! cbor_value_is_valid(&it)); + return CborNoError; +} + +static BOOL enlist_utf16_buffer(wchar_t *old, wchar_t *new) +{ + wchar_t **r; + size_t i; + + if (! old) { + /* new entry must be inserted into list */ + ESODBC_MUX_LOCK(&u16buff_mux); + r = realloc(u16buffs, (u16buff_cnt + 1) * sizeof(wchar_t *)); + if (r) { + u16buffs = r; + u16buffs[u16buff_cnt ++] = new; + } + ESODBC_MUX_UNLOCK(&u16buff_mux); + } else { + ESODBC_MUX_LOCK(&u16buff_mux); + r = NULL; + for (i = 0; i < u16buff_cnt; i ++) { + if (u16buffs[i] == old) { + r = &u16buffs[i]; + u16buffs[i] = new; + break; + } + } + ESODBC_MUX_UNLOCK(&u16buff_mux); + } + + return !!r; +} + +void tinycbor_cleanup() +{ + size_t i; + for (i = 0; i < u16buff_cnt; i ++) { + free(u16buffs[i]); + } + if (i) { + free(u16buffs); + } +} + +/* Fetches and converts a(n always UTF8) text string to UTF16 wide char. + * Uses a dynamically allocated thread-local buffer. */ +CborError cbor_value_get_utf16_wstr(CborValue *it, wstr_st *utf16) +{ + static thread_local wstr_st wbuff = {.str = NULL, .cnt = (size_t)-1}; + wstr_st r; /* reallocated */ + cstr_st mb_str; /* multibyte string */ + CborError res; + int n; + + assert(cbor_value_is_text_string(it)); + /* get the multibyte string to convert */ + res = cbor_value_get_string_chunk(it, &mb_str.str, &mb_str.cnt); + if (res != CborNoError) { + return res; + } + /* attempt string conversion */ + while ((n = U8MB_TO_U16WC(mb_str.str, mb_str.cnt, wbuff.str, + wbuff.cnt)) <= 0) { + /* U8MB_TO_U16WC will return error (though not set it with + * SetLastError()) for empty source strings */ + if (! mb_str.cnt) { + utf16->cnt = 0; + utf16->str = NULL; + return CborNoError; + } + /* is this a non-buffer related error? (like decoding) */ + if ((! WAPI_ERR_EBUFF()) && wbuff.str) { + return CborErrorInvalidUtf8TextString; + } /* else: buffer hasn't yet been allocated or is too small */ + /* what's the minimum space needed? */ + if ((n = U8MB_TO_U16WC(mb_str.str, mb_str.cnt, NULL, 0)) < 0) { + return CborErrorInvalidUtf8TextString; + } + /* double scratchpad size until exceeding min needed space. + * condition on equality, to allow for a 0-term */ + for (r.cnt = wbuff.cnt < (size_t)-1 ? wbuff.cnt : + ESODBC_BODY_BUF_START_SIZE; r.cnt <= (size_t)n; r.cnt *= 2) { + ; + } + if (! (r.str = realloc(wbuff.str, r.cnt))) { + return CborErrorOutOfMemory; + } + if (! enlist_utf16_buffer(wbuff.str, r.str)) { + /* it should only fail on 1st allocation per-thread */ + assert(! wbuff.str); + free(r.str); + return CborErrorOutOfMemory; + } else { + wbuff = r; + } + DBG("new UTF8/16 conv. buffer @0x%p, size %zu.", wbuff.str, wbuff.cnt); + } + + /* U8MB_TO_U16WC() will only convert the 0-term if counted in input*/ + wbuff.str[n] = '\0'; /* set, but not counted */ + utf16->str = wbuff.str; + utf16->cnt = n; + return CborNoError; +} + +/* vim: set noet fenc=utf-8 ff=dos sts=0 sw=4 ts=4 : */ diff --git a/driver/tinycbor.h b/driver/tinycbor.h new file mode 100644 index 00000000..9d61b5a9 --- /dev/null +++ b/driver/tinycbor.h @@ -0,0 +1,92 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the Elastic License; + * you may not use this file except in compliance with the Elastic License. + */ + +#ifndef __TINYCBOR_H__ +#define __TINYCBOR_H__ + +#include + +#include "log.h" +#include "util.h" + +#define JUMP_ON_CBOR_ERR(_res, _lbl, _hdl, _fmt, ...) \ + do { \ + if (_res != CborNoError) { \ + ERRH(_hdl, "CBOR: %s -- " _fmt ".", cbor_error_string(_res), \ + __VA_ARGS__); \ + goto _lbl; \ + } \ + } while (0) + +#ifndef NDEBUG +# define ES_CBOR_PARSE_FLAGS CborValidateStrictest +#else /* !NDEBUG */ +# define ES_CBOR_PARSE_FLAGS CborValidateBasic +#endif /* !NDEBUG */ + +#define CBOR_LEN_IMMEDIATE_MAX 23 /* numeric value encoded within the hdr */ +#define CBOR_OBJ_BOOL_LEN 1 +#define CBOR_OBJ_HFLOAT_LEN (/*initial byte*/1 + /* half prec. float */2) +#define CBOR_OBJ_FLOAT_LEN (/*initial byte*/1 + /* single prec. float */4) +#define CBOR_OBJ_DOUBLE_LEN (/*initial byte*/1 + /* double prec. float */8) + +/* Calculates the length of the preamble/header of a non-nummeric serialized + * object, where 'item_len' is: + * - the length of a text/byte string; or + * - the count of elements in an array; or + * - the count of pairs in a map. + * (Similar functionality covered by tinycbor's encode_number_no_update() + * internal-only function.) */ +static inline size_t cbor_nn_hdr_len(size_t item_len) +{ + size_t len_sz; /* size of the length field in bytes (1/2/4/8) */ + if (item_len <= CBOR_LEN_IMMEDIATE_MAX) { + len_sz = 0; + } else if (item_len <= UINT8_MAX) { + len_sz = sizeof(uint8_t); + } else if (item_len <= UINT16_MAX) { + len_sz = sizeof(uint16_t); + } else if (item_len <= UINT32_MAX) { + len_sz = sizeof(uint32_t); + } else { + len_sz = sizeof(uint64_t); + } + return /*initial leading byte*/1 + len_sz; +} + +#define CBOR_INT_OBJ_LEN(_val) \ + ((0 <= (_val)) ? cbor_nn_hdr_len(_val) : cbor_nn_hdr_len(-1 - (_val))) + + +/* Calculates the serialized object length of a CBOR string (text, byte) + * object. + * (Similar functionality covered by tinycbor's internal only + * encode_number_no_update() function.) */ +static inline size_t cbor_str_obj_len(size_t item_len) +{ + return cbor_nn_hdr_len(item_len) + item_len; +} + + +/* advance an iterator of an "entered" JSON-sytle map to the value for the + * given key, if that exists */ +CborError cbor_map_advance_to_key(CborValue *it, const char *key, + size_t key_len, CborValue *val); +CborError cbor_map_lookup_keys(CborValue *map, size_t cnt, + const char **keys, const size_t *lens, CborValue **objs, BOOL drain); +CborError cbor_container_count(CborValue cont, size_t *count); +CborError cbor_get_array_count(CborValue arr, size_t *count); +CborError cbor_container_is_empty(CborValue cont, BOOL *empty); + +CborError cbor_value_get_utf16_wstr(CborValue *it, wstr_st *utf8); +void tinycbor_cleanup(); + + +/* function defined in cborparser.c file "patched" in CMakeLists.txt */ +CborError cbor_value_get_string_chunk(CborValue *it, + const char **bufferptr, size_t *len); + +#endif /* __TINYCBOR_H__ */ diff --git a/driver/util.c b/driver/util.c index ec11731f..5137594d 100644 --- a/driver/util.c +++ b/driver/util.c @@ -586,7 +586,7 @@ cstr_st TEST_API *wstr_to_utf8(wstr_st *src, cstr_st *dst) nts = !src->str[src->cnt - 1]; /* eval the needed space for conversion */ - len = WCS2U8(src->str, (int)src->cnt, NULL, 0); + len = U16WC_TO_MBU8(src->str, src->cnt, NULL, 0); if (! len) { ERRN("failed to evaluate UTF-8 conversion space necessary for [%zu] " "`" LWPDL "`.", src->cnt, LWSTR(src)); @@ -617,7 +617,7 @@ cstr_st TEST_API *wstr_to_utf8(wstr_st *src, cstr_st *dst) if (0 < src->cnt) { /* convert the string */ - len = WCS2U8(src->str, (int)src->cnt, dst->str, len); + len = U16WC_TO_MBU8(src->str, src->cnt, dst->str, len); if (! len) { /* should not happen, since a first scan already happened */ ERRN("failed to UTF-8 convert `" LWPDL "`.", LWSTR(src)); @@ -675,5 +675,34 @@ BOOL TEST_API metadata_id_escape(wstr_st *src, wstr_st *dst, BOOL force) return ret; } +/* Simple hex printing of a cstr_st object. + * Returns (thread local static) printed buffer, always 0-term'd. */ +char *cstr_hex_dump(const cstr_st *buff) +{ + static thread_local char dest[ESODBC_LOG_BUF_SIZE]; + char *to, *from; + char *to_end, *from_end; + int n; + + to = dest; + to_end = dest + sizeof(dest); + from = buff->str; + from_end = buff->str + buff->cnt; + while (to < to_end && from < from_end) { + n = sprintf(to, "%X", *from ++); + if (n < 0) { + ERRN("failed to print serialized CBOR object"); + return NULL; + } + to += (size_t)n; + } + /* add the 0-terminator */ + if (to < to_end) { /* still space for it? */ + *to ++ = 0; + } else { /* == */ + dest[sizeof(dest) - 1] = 0; /* overwrite last position */ + } + return dest; +} /* vim: set noet fenc=utf-8 ff=dos sts=0 sw=4 ts=4 : */ diff --git a/driver/util.h b/driver/util.h index 8ff7cf31..8783e24a 100644 --- a/driver/util.h +++ b/driver/util.h @@ -170,6 +170,13 @@ size_t ui64tot(uint64_t ui64, void *buff, BOOL wide); #ifdef _WIN32 /* + * https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/unicode : + * "the only Unicode encoding that ODBC supports is UCS-2" (at Driver-DM + * interface level) => the driver will convert between ES's UTF-8 multi-byte + * and DM's UTF-16 wide char. + */ +/* + * WideCharToMultiByte(): * "[D]oes not null-terminate an output string if the input string length is * explicitly specified without a terminating null character. To * null-terminate an output string for this function, the application should @@ -178,13 +185,18 @@ size_t ui64tot(uint64_t ui64, void *buff, BOOL wide); * "If successful, returns the number of bytes written" or required (if * _ubytes == 0), OR "0 if it does not succeed". */ -#define WCS2U8(_wstr, _wchars, _u8, _ubytes) \ +#define U16WC_TO_MBU8(_wstr, _wcnt, _u8str, _u8len) \ WideCharToMultiByte(CP_UTF8, WC_ERR_INVALID_CHARS, \ - _wstr, _wchars, _u8, _ubytes, \ + _wstr, (int)(_wcnt), _u8str, (int)(_u8len), \ NULL, NULL) -#define WCS2U8_BUFF_INSUFFICIENT \ - (GetLastError() == ERROR_INSUFFICIENT_BUFFER) -#define WCS2U8_ERRNO() GetLastError() +#define U8MB_TO_U16WC(_u8str, _u8len, _wstr, _wcnt) \ + MultiByteToWideChar(CP_UTF8, MB_ERR_INVALID_CHARS, \ + _u8str, (int)(_u8len), _wstr, (int)(_wcnt)) + +#define WAPI_ERRNO() GetLastError() +#define WAPI_CLR_ERRNO() SetLastError(ERROR_SUCCESS) +#define WAPI_ERR_EBUFF() (GetLastError() == ERROR_INSUFFICIENT_BUFFER) + /* @@ -204,7 +216,8 @@ typedef SRWLOCK esodbc_mutex_lt; #define thread_local __declspec(thread) #endif /* DRIVER_BUILD && !thread_local */ -#define timegm _mkgmtime +#define timegm _mkgmtime +#define strncasecmp _strnicmp #else /* _WIN32 */ @@ -219,6 +232,7 @@ typedef SRWLOCK esodbc_mutex_lt; // wcstombs(charp, wstr, octet_length); #endif /* _WIN32 */ + #ifdef UNICODE typedef wstr_st tstr_st; #else /* UNICODE */ @@ -278,6 +292,10 @@ cstr_st TEST_API *wstr_to_utf8(wstr_st *src, cstr_st *dst); * Returns: TRUE, if escaping has been applied */ BOOL TEST_API metadata_id_escape(wstr_st *src, wstr_st *dst, BOOL force); +/* Simple hex printing of a cstr_st object. + * Returns (thread local static) printed buffer, always 0-term'd. */ +char *cstr_hex_dump(const cstr_st *buff); + /* * Printing aids. */ @@ -333,6 +351,7 @@ BOOL TEST_API metadata_id_escape(wstr_st *src, wstr_st *dst, BOOL force); #define TIMESTAMP_TEMPLATE_LEN(prec) \ (DATE_TEMPLATE_LEN + /*' '*/1 + TIME_TEMPLATE_LEN(prec)) + #endif /* __UTIL_H__ */ /* vim: set noet fenc=utf-8 ff=dos sts=0 sw=4 ts=4 : */ diff --git a/libs/tinycbor/.appveyor.yml b/libs/tinycbor/.appveyor.yml new file mode 100644 index 00000000..b3abca27 --- /dev/null +++ b/libs/tinycbor/.appveyor.yml @@ -0,0 +1,35 @@ +version: 0.5.3-build-{build} +pull_requests: + do_not_increment_build_number: true +image: +- Visual Studio 2015 +- Visual Studio 2013 +- Visual Studio 2017 +install: +- cmd: >- + set tests=1 + + if /i "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2013" (call "C:\Program Files (x86)\Microsoft Visual Studio 12.0\VC\vcvarsall.bat" amd64) & (set tests=0) + + if /i "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2015" (call "C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\vcvarsall.bat" x86) & (set QTDIR=C:\Qt\5.9\msvc2015) + + if /i "%APPVEYOR_BUILD_WORKER_IMAGE%"=="Visual Studio 2017" (call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Community\VC\Auxiliary\Build\vcvarsall.bat" x64) & (set QTDIR=C:\Qt\5.9\msvc2017_64) + + set path=%PATH%;%QTDIR%\bin +build_script: +- cmd: >- + nmake -f Makefile.nmake -nologo CFLAGS="-W3 -Os -MDd" + + cd tests + + if /i "%tests%"=="1" qmake CONFIG-=release CONFIG+=debug + + if /i "%tests%"=="1" nmake -nologo -s +test_script: +- cmd: >- + if /i "%tests%"=="1" nmake -s -nologo TESTARGS=-silent check + + if /i "%tests%"=="0" echo Tests skipped. +artifacts: +- path: lib\tinycbor.lib +deploy: off diff --git a/libs/tinycbor/.gitattributes b/libs/tinycbor/.gitattributes new file mode 100644 index 00000000..76ed2567 --- /dev/null +++ b/libs/tinycbor/.gitattributes @@ -0,0 +1,4 @@ +.tag export-subst +.gitignore export-ignore +.gitattributes export-ignore +.appveyor.yml text diff --git a/libs/tinycbor/.gitignore b/libs/tinycbor/.gitignore new file mode 100644 index 00000000..3272de33 --- /dev/null +++ b/libs/tinycbor/.gitignore @@ -0,0 +1,81 @@ +# Frequent generated files +callgrind.out.* +pcviewer.cfg +*~ +*.a +*.la +*.core +*.d +*.dylib +*.moc +*.o +*.obj +*.orig +*.swp +*.rej +*.so +*.so.* +*.pbxuser +*.mode1 +*.mode1v3 +*_pch.h.cpp +*_resource.rc +.#* +*.*# +core +.qmake.cache +.qmake.stash +.qmake.vars +.device.vars +tags +.DS_Store +*.debug +Makefile* +*.prl +*.app +*.pro.user* +*.qmlproject.user* +*.gcov +*.gcda +*.gcno +*.flc +.*.swp +tinycbor.pc + +# Visual Studio generated files +*.ib_pdb_index +*.idb +*.ilk +*.pdb +*.sln +*.suo +*.vcproj +*vcproj.*.*.user +*.ncb +*.vcxproj +*.vcxproj.filters +*.vcxproj.user +*.exe.embed.manifest +*.exe_manifest.rc +*.exe_manifest.res + +# MinGW generated files +*.Debug +*.Release + +# INTEGRITY generated files +*.gpj +*.int +*.ael +*.dla +*.dnm +*.dep +*.map + +bin +doc +lib +src/cjson +src/doxygen.log +!/Makefile +.config diff --git a/libs/tinycbor/.tag b/libs/tinycbor/.tag new file mode 100644 index 00000000..6828f88d --- /dev/null +++ b/libs/tinycbor/.tag @@ -0,0 +1 @@ +$Format:%H$ diff --git a/libs/tinycbor/.travis.yml b/libs/tinycbor/.travis.yml new file mode 100644 index 00000000..7ca287dd --- /dev/null +++ b/libs/tinycbor/.travis.yml @@ -0,0 +1,84 @@ +language: cpp +matrix: + include: + - os: linux + dist: xenial + addons: + apt: + sources: + - sourceline: 'ppa:beineri/opt-qt-5.12.1-xenial' + packages: + - qt512base valgrind + - doxygen + env: + - QMAKESPEC=linux-g++ + - EVAL="CC=gcc && CXX=g++" + - CFLAGS="-Os" + - LDFLAGS="-Wl,--no-undefined -lm" + - QMAKEFLAGS="-config release" + - QT_NO_CPU_FEATURE=rdrnd + - os: linux + dist: xenial + addons: + apt: + sources: + - sourceline: 'ppa:beineri/opt-qt-5.12.1-xenial' + packages: + - qt512base + env: + - QMAKESPEC=linux-clang + - EVAL="CC=clang && CXX=clang++" + - CFLAGS="-Oz" + - LDFLAGS="-Wl,--no-undefined -lm" + - QMAKEFLAGS="-config release" + - MAKEFLAGS=-s + - TESTARGS=-silent + - os: linux + dist: xenial + env: + - QMAKESPEC=linux-gcc-freestanding + - EVAL="CXX=false" + - CFLAGS="-ffreestanding -Os" + - LDFLAGS="-Wl,--no-undefined -lm" + - os: linux + dist: xenial + env: + - QMAKESPEC=linux-gcc-no-math + - EVAL="CXX=false && touch src/math.h src/float.h" + - CFLAGS="-ffreestanding -DCBOR_NO_FLOATING_POINT -Os" + - LDFLAGS="-Wl,--no-undefined" + - LDLIBS="" + - os: osx + env: + - QMAKESPEC=macx-clang + - CFLAGS="-Oz" + - QMAKEFLAGS="-config debug" + - MAKEFLAGS=-s + - TESTARGS=-silent + - PATH=/usr/local/opt/qt5/bin:$PATH +install: + - if [ "${TRAVIS_OS_NAME}" != "linux" ]; then + brew update; + brew install qt5; + fi +script: + - PATH=`echo /opt/qt*/bin`:$PATH + - eval "$EVAL" + - make -s -f Makefile.configure configure | tee .config + - make -k + CFLAGS="$CFLAGS -march=native -g1 -Wall -Wextra -Werror" + CPPFLAGS="-DNDEBUG" + lib/libtinycbor.a + - size lib/libtinycbor.a | tee sizes + - make -s clean + - make -k + CFLAGS="$CFLAGS -O0 -g" + LDFLAGS="$LDFLAGS" ${LDLIBS+LDLIBS="$LDLIBS"} + - grep -q freestanding-pass .config || make + QMAKEFLAGS="$QMAKEFLAGS QMAKE_CXX=$CXX" + tests/Makefile + - grep -q freestanding-pass .config || + (cd tests && make TESTARGS=-silent check -k + TESTRUNNER=`which valgrind 2>/dev/null`) + - make -s clean + - ./scripts/update-docs.sh diff --git a/libs/tinycbor/Doxyfile b/libs/tinycbor/Doxyfile new file mode 100644 index 00000000..a7263c2f --- /dev/null +++ b/libs/tinycbor/Doxyfile @@ -0,0 +1,49 @@ +PROJECT_NAME = "TinyCBOR $(VERSION) API" +OUTPUT_DIRECTORY = ../doc +ABBREVIATE_BRIEF = +SHORT_NAMES = YES +JAVADOC_AUTOBRIEF = YES +QT_AUTOBRIEF = YES +TAB_SIZE = 8 +ALIASES = "value=\arg \c" +OPTIMIZE_OUTPUT_FOR_C = YES +EXTRACT_STATIC = YES +EXTRACT_LOCAL_CLASSES = NO +HIDE_UNDOC_MEMBERS = YES +HIDE_UNDOC_CLASSES = YES +GENERATE_TODOLIST = NO +GENERATE_TESTLIST = NO +GENERATE_BUGLIST = NO +GENERATE_DEPRECATEDLIST= NO +SHOW_USED_FILES = NO +WARN_IF_UNDOCUMENTED = NO +WARN_LOGFILE = doxygen.log +INPUT = . +FILE_PATTERNS = *.h \ + *.c \ + *.dox +EXCLUDE_PATTERNS = *_p.h +STRIP_CODE_COMMENTS = NO +REFERENCED_BY_RELATION = YES +IGNORE_PREFIX = cbor_ \ + Cbor +HTML_TIMESTAMP = NO +GENERATE_HTMLHELP = YES +GENERATE_CHI = YES +BINARY_TOC = YES +TOC_EXPAND = YES +MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest +SEARCHENGINE = NO +GENERATE_LATEX = NO +COMPACT_LATEX = YES +MACRO_EXPANSION = YES +PREDEFINED = DOXYGEN \ + CBOR_INLINE_API= +CLASS_DIAGRAMS = NO +CLASS_GRAPH = NO +COLLABORATION_GRAPH = NO +GROUP_GRAPHS = NO +INCLUDE_GRAPH = NO +INCLUDED_BY_GRAPH = NO +GRAPHICAL_HIERARCHY = NO +DIRECTORY_GRAPH = NO diff --git a/libs/tinycbor/LICENSE b/libs/tinycbor/LICENSE new file mode 100644 index 00000000..4aad977c --- /dev/null +++ b/libs/tinycbor/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Intel Corporation + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/libs/tinycbor/Makefile b/libs/tinycbor/Makefile new file mode 100644 index 00000000..239dde8b --- /dev/null +++ b/libs/tinycbor/Makefile @@ -0,0 +1,240 @@ +# Variables: +prefix = /usr/local +exec_prefix = $(prefix) +bindir = $(exec_prefix)/bin +libdir = $(exec_prefix)/lib +includedir = $(prefix)/include +pkgconfigdir = $(libdir)/pkgconfig + +CFLAGS = -Wall -Wextra +LDFLAGS_GCSECTIONS = -Wl,--gc-sections +LDFLAGS += $(if $(gc_sections-pass),$(LDFLAGS_GCSECTIONS)) +LDLIBS = -lm + +GIT_ARCHIVE = git archive --prefix="$(PACKAGE)/" -9 +INSTALL = install +INSTALL_DATA = $(INSTALL) -m 644 +INSTALL_PROGRAM = $(INSTALL) -m 755 +QMAKE = qmake +MKDIR = mkdir -p +RMDIR = rmdir +SED = sed + +# Our sources +TINYCBOR_HEADERS = src/cbor.h src/cborjson.h src/tinycbor-version.h +TINYCBOR_FREESTANDING_SOURCES = \ + src/cborerrorstrings.c \ + src/cborencoder.c \ + src/cborencoder_close_container_checked.c \ + src/cborparser.c \ + src/cborpretty.c \ +# +CBORDUMP_SOURCES = tools/cbordump/cbordump.c + +BUILD_SHARED = $(shell file -L /bin/sh 2>/dev/null | grep -q ELF && echo 1) +BUILD_STATIC = 1 + +ifneq ($(BUILD_STATIC),1) +ifneq ($(BUILD_SHARED),1) + $(error error: BUILD_STATIC and BUILD_SHARED can not be both disabled) +endif +endif + +INSTALL_TARGETS += $(bindir)/cbordump +ifeq ($(BUILD_SHARED),1) +BINLIBRARY=lib/libtinycbor.so +INSTALL_TARGETS += $(libdir)/libtinycbor.so +INSTALL_TARGETS += $(libdir)/libtinycbor.so.$(SOVERSION) +INSTALL_TARGETS += $(libdir)/libtinycbor.so.$(VERSION) +endif +ifeq ($(BUILD_STATIC),1) +BINLIBRARY=lib/libtinycbor.a +INSTALL_TARGETS += $(libdir)/libtinycbor.a +endif +INSTALL_TARGETS += $(pkgconfigdir)/tinycbor.pc +INSTALL_TARGETS += $(TINYCBOR_HEADERS:src/%=$(includedir)/tinycbor/%) + +# setup VPATH +MAKEFILE := $(lastword $(MAKEFILE_LIST)) +SRCDIR := $(dir $(MAKEFILE)) +VPATH = $(SRCDIR):$(SRCDIR)/src + +# Our version +GIT_DIR := $(strip $(shell git -C $(SRCDIR). rev-parse --git-dir 2> /dev/null)) +VERSION = $(shell cat $(SRCDIR)VERSION) +SOVERSION = $(shell cut -f1-2 -d. $(SRCDIR)VERSION) +PACKAGE = tinycbor-$(VERSION) + +# Check that QMAKE is Qt 5 +ifeq ($(origin QMAKE),file) + check_qmake = $(strip $(shell $(1) -query QT_VERSION 2>/dev/null | cut -b1)) + ifneq ($(call check_qmake,$(QMAKE)),5) + QMAKE := qmake -qt5 + ifneq ($(call check_qmake,$(QMAKE)),5) + QMAKE := qmake-qt5 + ifneq ($(call check_qmake,$(QMAKE)),5) + QMAKE := @echo >&2 $(MAKEFILE): Cannot find a Qt 5 qmake; false + endif + endif + endif +endif + +-include .config + +ifeq ($(wildcard .config),) + $(info .config file not yet created) +endif + +ifeq ($(freestanding-pass),1) +TINYCBOR_SOURCES = $(TINYCBOR_FREESTANDING_SOURCES) +else +TINYCBOR_SOURCES = \ + $(TINYCBOR_FREESTANDING_SOURCES) \ + src/cborparser_dup_string.c \ + src/cborpretty_stdio.c \ + src/cbortojson.c \ + src/cborvalidation.c \ +# +# if open_memstream is unavailable on the system, try to implement our own +# version using funopen or fopencookie +ifeq ($(open_memstream-pass),) + ifeq ($(funopen-pass)$(fopencookie-pass),) + CFLAGS += -DWITHOUT_OPEN_MEMSTREAM + ifeq ($(wildcard .config),.config) + $(warning warning: funopen and fopencookie unavailable, open_memstream can not be implemented and conversion to JSON will not work properly!) + endif + else + TINYCBOR_SOURCES += src/open_memstream.c + endif +endif +endif + +# json2cbor depends on an external library (cjson) +ifneq ($(cjson-pass)$(system-cjson-pass),) + JSON2CBOR_SOURCES = tools/json2cbor/json2cbor.c + INSTALL_TARGETS += $(bindir)/json2cbor + ifeq ($(system-cjson-pass),1) + LDFLAGS_CJSON = -lcjson + else + JSON2CBOR_SOURCES += src/cjson/cJSON.c + json2cbor_CCFLAGS = -I$(SRCDIR)src/cjson + endif +endif + +# Rules +all: .config \ + $(if $(subst 0,,$(BUILD_STATIC)),lib/libtinycbor.a) \ + $(if $(subst 0,,$(BUILD_SHARED)),lib/libtinycbor.so) \ + $(if $(freestanding-pass),,bin/cbordump) \ + tinycbor.pc +all: $(if $(JSON2CBOR_SOURCES),bin/json2cbor) +check: tests/Makefile | $(BINLIBRARY) + $(MAKE) -C tests check +silentcheck: | $(BINLIBRARY) + TESTARGS=-silent $(MAKE) -f $(MAKEFILE) -s check +configure: .config +.config: Makefile.configure + $(MAKE) -f $(SRCDIR)Makefile.configure OUT='>&9' configure 9> $@ + +lib/libtinycbor-freestanding.a: $(TINYCBOR_FREESTANDING_SOURCES:.c=.o) + @$(MKDIR) -p lib + $(AR) cqs $@ $^ + +lib/libtinycbor.a: $(TINYCBOR_SOURCES:.c=.o) + @$(MKDIR) -p lib + $(AR) cqs $@ $^ + +lib/libtinycbor.so: $(TINYCBOR_SOURCES:.c=.pic.o) + @$(MKDIR) -p lib + $(CC) -shared -Wl,-soname,libtinycbor.so.$(SOVERSION) -o lib/libtinycbor.so.$(VERSION) $(LDFLAGS) $^ $(LDLIBS) + cd lib ; ln -sf libtinycbor.so.$(VERSION) libtinycbor.so ; ln -sf libtinycbor.so.$(VERSION) libtinycbor.so.$(SOVERSION) + +bin/cbordump: $(CBORDUMP_SOURCES:.c=.o) $(BINLIBRARY) + @$(MKDIR) -p bin + $(CC) -o $@ $(LDFLAGS) $^ $(LDLIBS) + +bin/json2cbor: $(JSON2CBOR_SOURCES:.c=.o) $(BINLIBRARY) + @$(MKDIR) -p bin + $(CC) -o $@ $(LDFLAGS) $^ $(LDFLAGS_CJSON) $(LDLIBS) + +tinycbor.pc: tinycbor.pc.in + $(SED) > $@ < $< \ + -e 's,@prefix@,$(prefix),' \ + -e 's,@exec_prefix@,$(exec_prefix),' \ + -e 's,@libdir@,$(libdir),' \ + -e 's,@includedir@,$(includedir),' \ + -e 's,@version@,$(VERSION),' + +tests/Makefile: tests/tests.pro + $(QMAKE) $(QMAKEFLAGS) -o $@ $< + +$(PACKAGE).tar.gz: | .git + GIT_DIR=$(SRCDIR).git $(GIT_ARCHIVE) --format=tar.gz -o "$(PACKAGE).tar.gz" HEAD +$(PACKAGE).zip: | .git + GIT_DIR=$(SRCDIR).git $(GIT_ARCHIVE) --format=zip -o "$(PACKAGE).zip" HEAD + +$(DESTDIR)$(libdir)/%: lib/% + $(INSTALL) -d $(@D) + $(INSTALL_DATA) $< $@ +$(DESTDIR)$(bindir)/%: bin/% + $(INSTALL) -d $(@D) + $(INSTALL_PROGRAM) $< $@ +$(DESTDIR)$(pkgconfigdir)/%: % + $(INSTALL) -d $(@D) + $(INSTALL_DATA) $< $@ +$(DESTDIR)$(includedir)/tinycbor/%: src/% + $(INSTALL) -d $(@D) + $(INSTALL_DATA) $< $@ + +install-strip: + $(MAKE) -f $(MAKEFILE) INSTALL_PROGRAM='$(INSTALL_PROGRAM) -s' install + +install: $(INSTALL_TARGETS:%=$(DESTDIR)%) +uninstall: + $(RM) $(INSTALL_TARGETS:%=$(DESTDIR)%) + +mostlyclean: + $(RM) $(TINYCBOR_SOURCES:.c=.o) + $(RM) $(TINYCBOR_SOURCES:.c=.pic.o) + $(RM) $(CBORDUMP_SOURCES:.c=.o) + +clean: mostlyclean + $(RM) bin/cbordump + $(RM) bin/json2cbor + $(RM) lib/libtinycbor.a + $(RM) lib/libtinycbor-freestanding.a + $(RM) tinycbor.pc + $(RM) lib/libtinycbor.so* + test -e tests/Makefile && $(MAKE) -C tests clean || : + +distclean: clean + test -e tests/Makefile && $(MAKE) -C tests distclean || : + +docs: + cd $(SRCDIR)src && VERSION=$(VERSION) doxygen $(SRCDIR)/../Doxyfile + +dist: $(PACKAGE).tar.gz $(PACKAGE).zip +distcheck: .git + -$(RM) -r $${TMPDIR-/tmp}/tinycbor-distcheck + GIT_DIR=$(SRCDIR).git git archive --prefix=tinycbor-distcheck/ --format=tar HEAD | tar -xf - -C $${TMPDIR-/tmp} + cd $${TMPDIR-/tmp}/tinycbor-distcheck && $(MAKE) silentcheck + $(RM) -r $${TMPDIR-/tmp}/tinycbor-distcheck + +tag: distcheck + @cd $(SRCDIR). && perl scripts/maketag.pl + +.PHONY: all check silentcheck configure install uninstall +.PHONY: mostlyclean clean distclean +.PHONY: docs dist distcheck release +.SECONDARY: + +cflags := $(CPPFLAGS) -I$(SRCDIR)src +cflags += -std=gnu99 $(CFLAGS) +%.o: %.c + @test -d $(@D) || $(MKDIR) $(@D) + $(CC) $(cflags) $($(basename $(notdir $@))_CCFLAGS) -c -o $@ $< +%.pic.o: %.c + @test -d $(@D) || $(MKDIR) $(@D) + $(CC) $(cflags) -fPIC $($(basename $(notdir $@))_CCFLAGS) -c -o $@ $< + +-include src/*.d diff --git a/libs/tinycbor/Makefile.configure b/libs/tinycbor/Makefile.configure new file mode 100644 index 00000000..c2f51eea --- /dev/null +++ b/libs/tinycbor/Makefile.configure @@ -0,0 +1,35 @@ +ALLTESTS = open_memstream funopen fopencookie gc_sections \ + system-cjson cjson freestanding +MAKEFILE := $(lastword $(MAKEFILE_LIST)) +OUT := + +PROGRAM-open_memstream = extern int open_memstream(); int main() { return open_memstream(); } +PROGRAM-funopen = extern int funopen(); int main() { return funopen(); } +PROGRAM-fopencookie = extern int fopencookie(); int main() { return fopencookie(); } +PROGRAM-gc_sections = int main() {} +CCFLAGS-gc_sections = -Wl,--gc-sections +PROGRAM-freestanding = \#if !defined(__STDC_HOSTED__) || __STDC_HOSTED__-0 == 1\n +PROGRAM-freestanding += \#error Hosted implementation\n +PROGRAM-freestanding += \#endif\n +PROGRAM-freestanding += int main() {} +CCFLAGS-freestanding = $(CFLAGS) + +PROGRAM-cjson = \#include \n +PROGRAM-cjson += \#include \n +PROGRAM-cjson += int main() { return cJSON_False; } +CCFLAGS-cjson = -I$(dir $(MAKEFILE))src +PROGRAM-system-cjson = $(PROGRAM-cjson) +CCFLAGS-system-cjson = -lcjson + +sink: + @echo >&2 Please run from the top-level Makefile. + +configure: $(foreach it,$(ALLTESTS),check-$(it)) + +check-%: + @echo $(subst check-,,$@)-tested := 1 $(OUT) + $(if $(V),,@)if printf "$($(subst check-,PROGRAM-,$@))" | \ + $(CC) -xc $($(subst check-,CCFLAGS-,$@)) -o /dev/null - $(if $(V),,>/dev/null 2>&1); \ + then \ + echo $(subst check-,,$@)-pass := 1 $(OUT); \ + fi diff --git a/libs/tinycbor/Makefile.nmake b/libs/tinycbor/Makefile.nmake new file mode 100644 index 00000000..04b58ab4 --- /dev/null +++ b/libs/tinycbor/Makefile.nmake @@ -0,0 +1,47 @@ +CFLAGS = -W3 + +TINYCBOR_HEADERS = src\cbor.h src\cborjson.h +TINYCBOR_SOURCES = \ + src\cborerrorstrings.c \ + src\cborencoder.c \ + src\cborencoder_close_container_checked.c \ + src\cborparser.c \ + src\cborparser_dup_string.c \ + src\cborpretty.c \ + src\cborpretty_stdio.c \ + src\cborvalidation.c +TINYCBOR_OBJS = \ + src\cborerrorstrings.obj \ + src\cborencoder.obj \ + src\cborencoder_close_container_checked.obj \ + src\cborparser.obj \ + src\cborparser_dup_string.obj \ + src\cborpretty.obj \ + src\cborpretty_stdio.obj \ + src\cborvalidation.obj + +all: lib\tinycbor.lib +check: tests\Makefile lib\tinycbor.lib + cd tests & $(MAKE) check +silentcheck: + cd tests & set TESTARGS=-silent & $(MAKE) -s check +tests\Makefile: tests\tests.pro + qmake -o $@ $** + +lib\tinycbor.lib: $(TINYCBOR_OBJS) + -if not exist lib\NUL md lib + lib -nologo /out:$@ $** + +mostlyclean: + -del $(TINYCBOR_OBJS) +clean: mostlyclean + -del lib\tinycbor.lib + if exist tests\Makefile (cd tests & $(MAKE) clean) +distclean: clean + if exist tests\Makefile (cd tests & $(MAKE) distclean) +tag: + @perl maketag.pl + +{src\}.c{src\}.obj: + $(CC) -nologo $(CFLAGS) -Isrc -c -Fo$@ $< + diff --git a/libs/tinycbor/README b/libs/tinycbor/README new file mode 100644 index 00000000..167efa06 --- /dev/null +++ b/libs/tinycbor/README @@ -0,0 +1,13 @@ +Concise Binary Object Representation (CBOR) Library +--------------------------------------------------- + +To build TinyCBOR: + + make + +If you want to change the compiler or pass extra compiler flags: + + make CC=clang CFLAGS="-m32 -Oz" LDFLAGS="-m32" + +Documentation: https://intel.github.io/tinycbor/current/ + diff --git a/libs/tinycbor/TODO b/libs/tinycbor/TODO new file mode 100644 index 00000000..e9103ee6 --- /dev/null +++ b/libs/tinycbor/TODO @@ -0,0 +1,25 @@ +==== To Do list for libcbor ==== +=== General === +* API review +* Benchmark +* Write examples +** Simple decoder +** Decoder to JSON +** Windowed encoding/decoding (limited memory) + +=== Encoder === +* Write API docs +* Add API for creating indeterminate-length arrays and maps +* Add API for creating indeterminate-length strings +* Add API for relaxing doubles to floats and to integers +* Add length-checking of the sub-containers (#ifndef CBOR_ENCODER_NO_USER_CHECK) +* Decide how to indicate number of bytes needed +** Suggestion: return negative number from the functions + +=== Decoder === +* Write functions not yet implemented +* Add API for stream-decoding strings +* Add API for checking known tags and simple types +* (unlikely) Add API for checking the pairing of a tag and the tagged type +* Write tests for error conditions +* Fuzzy-test the decoder diff --git a/libs/tinycbor/VERSION b/libs/tinycbor/VERSION new file mode 100644 index 00000000..be14282b --- /dev/null +++ b/libs/tinycbor/VERSION @@ -0,0 +1 @@ +0.5.3 diff --git a/libs/tinycbor/examples/examples.pro b/libs/tinycbor/examples/examples.pro new file mode 100644 index 00000000..22071ac3 --- /dev/null +++ b/libs/tinycbor/examples/examples.pro @@ -0,0 +1,2 @@ +TEMPLATE = subdirs +SUBDIRS = simplereader.pro diff --git a/libs/tinycbor/examples/simplereader.c b/libs/tinycbor/examples/simplereader.c new file mode 100644 index 00000000..4752c08d --- /dev/null +++ b/libs/tinycbor/examples/simplereader.c @@ -0,0 +1,181 @@ +#include "../src/cbor.h" + +#include +#include +#include +#include +#include + +static uint8_t *readfile(const char *fname, size_t *size) +{ + struct stat st; + FILE *f = fopen(fname, "rb"); + if (!f) + return NULL; + if (fstat(fileno(f), &st) == -1) + return NULL; + uint8_t *buf = malloc(st.st_size); + *size = fread(buf, st.st_size, 1, f) == 1 ? st.st_size : 0; + fclose(f); + return buf; +} + +static void indent(int nestingLevel) +{ + while (nestingLevel--) + puts(" "); +} + +static void dumpbytes(const uint8_t *buf, size_t len) +{ + while (len--) + printf("%02X ", *buf++); +} + +static CborError dumprecursive(CborValue *it, int nestingLevel) +{ + while (!cbor_value_at_end(it)) { + CborError err; + CborType type = cbor_value_get_type(it); + + indent(nestingLevel); + switch (type) { + case CborArrayType: + case CborMapType: { + // recursive type + CborValue recursed; + assert(cbor_value_is_container(it)); + puts(type == CborArrayType ? "Array[" : "Map["); + err = cbor_value_enter_container(it, &recursed); + if (err) + return err; // parse error + err = dumprecursive(&recursed, nestingLevel + 1); + if (err) + return err; // parse error + err = cbor_value_leave_container(it, &recursed); + if (err) + return err; // parse error + indent(nestingLevel); + puts("]"); + continue; + } + + case CborIntegerType: { + int64_t val; + cbor_value_get_int64(it, &val); // can't fail + printf("%lld\n", (long long)val); + break; + } + + case CborByteStringType: { + uint8_t *buf; + size_t n; + err = cbor_value_dup_byte_string(it, &buf, &n, it); + if (err) + return err; // parse error + dumpbytes(buf, n); + puts(""); + free(buf); + continue; + } + + case CborTextStringType: { + char *buf; + size_t n; + err = cbor_value_dup_text_string(it, &buf, &n, it); + if (err) + return err; // parse error + puts(buf); + free(buf); + continue; + } + + case CborTagType: { + CborTag tag; + cbor_value_get_tag(it, &tag); // can't fail + printf("Tag(%lld)\n", (long long)tag); + break; + } + + case CborSimpleType: { + uint8_t type; + cbor_value_get_simple_type(it, &type); // can't fail + printf("simple(%u)\n", type); + break; + } + + case CborNullType: + puts("null"); + break; + + case CborUndefinedType: + puts("undefined"); + break; + + case CborBooleanType: { + bool val; + cbor_value_get_boolean(it, &val); // can't fail + puts(val ? "true" : "false"); + break; + } + + case CborDoubleType: { + double val; + if (false) { + float f; + case CborFloatType: + cbor_value_get_float(it, &f); + val = f; + } else { + cbor_value_get_double(it, &val); + } + printf("%g\n", val); + break; + } + case CborHalfFloatType: { + uint16_t val; + cbor_value_get_half_float(it, &val); + printf("__f16(%04x)\n", val); + break; + } + + case CborInvalidType: + assert(false); // can't happen + break; + } + + err = cbor_value_advance_fixed(it); + if (err) + return err; + } + return CborNoError; +} + +int main(int argc, char **argv) +{ + if (argc == 1) { + puts("simplereader "); + return 0; + } + + size_t length; + uint8_t *buf = readfile(argv[1], &length); + if (!buf) { + perror("readfile"); + return 1; + } + + CborParser parser; + CborValue it; + CborError err = cbor_parser_init(buf, length, 0, &parser, &it); + if (!err) + err = dumprecursive(&it, 0); + free(buf); + + if (err) { + fprintf(stderr, "CBOR parsing failure at offset %ld: %s\n", + it.ptr - buf, cbor_error_string(err)); + return 1; + } + return 0; +} diff --git a/libs/tinycbor/examples/simplereader.pro b/libs/tinycbor/examples/simplereader.pro new file mode 100644 index 00000000..07fdc6ac --- /dev/null +++ b/libs/tinycbor/examples/simplereader.pro @@ -0,0 +1,3 @@ +CONFIG -= qt +SOURCES = simplereader.c +include(../src/src.pri) diff --git a/libs/tinycbor/scripts/maketag.pl b/libs/tinycbor/scripts/maketag.pl new file mode 100644 index 00000000..5b1a8b79 --- /dev/null +++ b/libs/tinycbor/scripts/maketag.pl @@ -0,0 +1,91 @@ +#!perl +use strict; +sub run(@) { + open PROC, "-|", @_ or die("Cannot run $_[0]: $!"); + my @out; + while () { + chomp; + push @out, $_; + } + close PROC; + return @out; +} + +my @tags = run("git", "tag"); +my @v = run("git", "show", "HEAD:VERSION"); +my $v = $v[0]; + +my $tagfile = ".git/TAG_EDITMSG"; +open TAGFILE, ">", $tagfile + or die("Cannot create file for editing tag message: $!"); +select TAGFILE; +print "TinyCBOR release $v\n"; +print "\n"; +print "# Write something nice about this release here\n"; + +# Do we have a commit template? +my @result = run("git", "config", "--get", "commit.template"); +if (scalar @result) { + open TEMPLATE, "<", $result[0]; + map { print $_; }