From ddbd59597453b02cec1ec2747e9043e185cf41bc Mon Sep 17 00:00:00 2001 From: Jaeyun Jung Date: Thu, 7 Mar 2024 14:56:44 +0900 Subject: [PATCH] [C-Api/Service] add new API (ML-API ext) Add new API set for ML service, these functions support constructing new handle from json configuration. - type for ml-service: single, pipeline Signed-off-by: Jaeyun Jung --- c/include/ml-api-service.h | 207 ++- c/include/nnstreamer-tizen-internal.h | 11 +- c/src/meson.build | 2 +- c/src/ml-api-service-agent-client.c | 2 - c/src/ml-api-service-common.c | 638 ++++++++- c/src/ml-api-service-extension.c | 865 ++++++++++++ c/src/ml-api-service-extension.h | 64 + c/src/ml-api-service-private.h | 41 +- packaging/machine-learning-api.spec | 1 + tests/capi/meson.build | 8 + tests/capi/unittest_capi_service_extension.cc | 1250 +++++++++++++++++ .../config_pipeline_duplicated_name.conf | 28 + .../config/config_pipeline_imgclf.conf | 28 + .../config/config_pipeline_invalid_info.conf | 27 + .../config/config_pipeline_no_info.conf | 22 + .../config/config_pipeline_no_name.conf | 27 + .../test_models/config/config_single_add.conf | 7 + .../config/config_single_imgclf.conf | 24 + .../config/config_single_imgclf_file.conf | 6 + .../config_single_imgclf_invalid_info.conf | 18 + .../config_single_imgclf_max_input.conf | 25 + .../config/config_single_no_model.conf | 18 + .../config/config_unknown_type.conf | 7 + 23 files changed, 3297 insertions(+), 29 deletions(-) create mode 100644 c/src/ml-api-service-extension.c create mode 100644 c/src/ml-api-service-extension.h create mode 100755 tests/capi/unittest_capi_service_extension.cc create mode 100644 tests/test_models/config/config_pipeline_duplicated_name.conf create mode 100644 tests/test_models/config/config_pipeline_imgclf.conf create mode 100644 tests/test_models/config/config_pipeline_invalid_info.conf create mode 100644 tests/test_models/config/config_pipeline_no_info.conf create mode 100644 tests/test_models/config/config_pipeline_no_name.conf create mode 100644 tests/test_models/config/config_single_add.conf create mode 100644 tests/test_models/config/config_single_imgclf.conf create mode 100644 tests/test_models/config/config_single_imgclf_file.conf create mode 100644 tests/test_models/config/config_single_imgclf_invalid_info.conf create mode 100644 tests/test_models/config/config_single_imgclf_max_input.conf create mode 100644 tests/test_models/config/config_single_no_model.conf create mode 100644 tests/test_models/config/config_unknown_type.conf diff --git a/c/include/ml-api-service.h b/c/include/ml-api-service.h index 21b8f69d..7c02ec11 100644 --- a/c/include/ml-api-service.h +++ b/c/include/ml-api-service.h @@ -30,6 +30,7 @@ #define __ML_API_SERVICE_H__ #include +#include #ifdef __cplusplus extern "C" { @@ -46,15 +47,209 @@ extern "C" { typedef void *ml_service_h; /** - * @brief Destroys the given service handle. - * @details If given service handle is created by ml_service_launch_pipeline(), this requests machine learning agent daemon to destroy the pipeline. + * @brief Callbacks for the events from ml-service. + * @since_tizen 9.0 + */ +typedef struct { + void (*new_data) (ml_service_h handle, const char *name, const ml_tensors_data_h data, void *user_data); /**< Called when new data is processed from ml-service. */ + void (*event) (ml_service_h handle, int event, void *event_data, void *user_data); /**< Called when new event is occured from ml-service. */ +} ml_service_callbacks_s; + +/** + * @brief Creates a handle for machine learning service using a configuration file. + * @since_tizen 9.0 + * @remarks %http://tizen.org/privilege/mediastorage is needed if the configuration is relevant to media storage. + * @remarks %http://tizen.org/privilege/externalstorage is needed if the configuration is relevant to external storage. + * @remarks The @a handle should be released using ml_service_destroy(). + * @param[in] config The absolute path to configuration file. + * @param[out] handle The handle of ml-service. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_PERMISSION_DENIED The application does not have the privilege to access to the media storage or external storage. + * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid. + * @retval #ML_ERROR_IO_ERROR Failed to parse the configuration file. + * @retval #ML_ERROR_STREAMS_PIPE Failed to open the model. + * @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory. + * + * Here is an example of the usage: + * @code + * + * // Callback function for ml-service. + * // Note that the handle of tensors-data will be deallocated after the return and this is synchronously called. + * // Thus, if you need the data afterwards, copy the data to another buffer and return fast. + * // Do not spend too much time in the callback. + * static void + * _ml_service_cb_new_data (ml_service_h handle, const char *name, const ml_tensors_data_h data, void *user_data) + * { + * void *_data; + * size_t _size; + * + * ml_tensors_data_get_tensor_data (data, 0, &_data, &_size); + * // Handle output data. + * } + * + * // The path to the configuration file. + * const char config_path[] = "/path/to/application/configuration/my_application_config.conf"; + * + * // Create ml-service for model inference from configuration. + * ml_service_h handle; + * ml_service_callbacks_s cb = { 0 }; + * + * cb.new_data = _ml_service_cb_new_data; + * + * ml_service_new (config_path, &handle); + * ml_service_set_event_cb (handle, &cb, NULL); + * + * // Get input information and allocate input buffer. + * ml_tensors_info_h input_info; + * void *input_buffer; + * size_t input_size; + + * ml_service_get_input_information (handle, NULL, &input_info); + * + * ml_tensors_info_get_tensor_size (input_info, 0, &input_size); + * input_buffer = malloc (input_size); + * + * // Create input data handle. + * ml_tensors_data_h input; + * + * ml_tensors_data_create (input_info, &input); + * ml_tensors_data_set_tensor_data (input, 0, input_buffer, input_size); + * + * // Push input data into ml-service and process the output in the callback. + * ml_service_request (handle, NULL, input); + * + * // Finally, release all handles and allocated memories. + * ml_tensors_info_destroy (input_info); + * ml_tensors_data_destroy (input); + * ml_service_destroy (handle); + * free (input_buffer); + * + * @endcode + */ +int ml_service_new (const char *config, ml_service_h *handle); + +/** + * @brief Sets the callbacks which will be invoked when a new event occurs from ml-service. + * @since_tizen 9.0 + * @param[in] handle The handle of ml-service. + * @param[in] cb The callbacks to handle the events from ml-service. + * @param[in] user_data Private data for the callback. This value is passed to the callback when it's invoked. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + */ +int ml_service_set_event_cb (ml_service_h handle, ml_service_callbacks_s *cb, void *user_data); + +/** + * @brief Starts the process of ml-service. + * @since_tizen 9.0 + * @param[in] handle The handle of ml-service. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + * @retval #ML_ERROR_STREAMS_PIPE Failed to start the process. + */ +int ml_service_start (ml_service_h handle); + +/** + * @brief Stops the process of ml-service. + * @since_tizen 9.0 + * @param[in] handle The handle of ml-service. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER Given parameter is invalid. + * @retval #ML_ERROR_STREAMS_PIPE Failed to stop the process. + */ +int ml_service_stop (ml_service_h handle); + +/** + * @brief Gets the information of required input data. + * @details Note that a model may not have such information if its input type is not determined statically. + * @since_tizen 9.0 + * @remarks The @a info should be released using ml_tensors_info_destroy(). + * @param[in] handle The handle of ml-service. + * @param[in] name The name of input node in the pipeline. You can set NULL if ml-service is constructed from model configuration. + * @param[out] info The handle of input tensors information. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid. + */ +int ml_service_get_input_information (ml_service_h handle, const char *name, ml_tensors_info_h *info); + +/** + * @brief Gets the information of output data. + * @details Note that a model may not have such information if its output is not determined statically. + * @since_tizen 9.0 + * @remarks The @a info should be released using ml_tensors_info_destroy(). + * @param[in] handle The handle of ml-service. + * @param[in] name The name of output node in the pipeline. You can set NULL if ml-service is constructed from model configuration. + * @param[out] info The handle of output tensors information. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid. + */ +int ml_service_get_output_information (ml_service_h handle, const char *name, ml_tensors_info_h *info); + +/** + * @brief Sets the information for ml-service. + * @since_tizen 9.0 + * @param[in] handle The handle of ml-service. + * @param[in] name The name to set the corresponding value. + * @param[out] value The value of the name. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid. + */ +int ml_service_set_information (ml_service_h handle, const char *name, const char *value); + +/** + * @brief Gets the information from ml-service. + * @details Note that a configuration file may not have such information field. + * @since_tizen 9.0 + * @remarks The @a value should be released using free(). + * @param[in] handle The handle of ml-service. + * @param[in] name The name to get the corresponding value. + * @param[out] value The value of the name. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid. + */ +int ml_service_get_information (ml_service_h handle, const char *name, char **value); + +/** + * @brief Adds an input data to process the model in ml-service handle. + * @since_tizen 9.0 + * @param[in] handle The handle of ml-service. + * @param[in] name The name of input node in the pipeline. You can set NULL if ml-service is constructed from model configuration. + * @param[in] data The handle of tensors data to be processed. + * @return @c 0 on success. Otherwise a negative error value. + * @retval #ML_ERROR_NONE Successful. + * @retval #ML_ERROR_NOT_SUPPORTED Not supported. + * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid. + * @retval #ML_ERROR_STREAMS_PIPE Failed to process the input data. + * @retval #ML_ERROR_OUT_OF_MEMORY Failed to allocate required memory. + */ +int ml_service_request (ml_service_h handle, const char *name, const ml_tensors_data_h data); + +/** + * @brief Destroys the handle for machine learning service. + * @details If given service handle is created by ml_service_launch_pipeline(), this requests machine learning agent to destroy the pipeline. * @since_tizen 7.0 - * @param[in] handle The service handle. - * @return @c 0 on Success. Otherwise a negative error value. + * @param[in] handle The handle of ml-service. + * @return @c 0 on success. Otherwise a negative error value. * @retval #ML_ERROR_NONE Successful. * @retval #ML_ERROR_NOT_SUPPORTED Not supported. - * @retval #ML_ERROR_INVALID_PARAMETER Fail. The parameter is invalid. - * @retval #ML_ERROR_STREAMS_PIPE Failed to access the pipeline state. + * @retval #ML_ERROR_INVALID_PARAMETER The parameter is invalid. + * @retval #ML_ERROR_STREAMS_PIPE Failed to stop the process. */ int ml_service_destroy (ml_service_h handle); diff --git a/c/include/nnstreamer-tizen-internal.h b/c/include/nnstreamer-tizen-internal.h index 4e8639bb..16b98bf2 100644 --- a/c/include/nnstreamer-tizen-internal.h +++ b/c/include/nnstreamer-tizen-internal.h @@ -39,16 +39,15 @@ typedef struct { char *fw_name; /**< The explicit framework name given by user */ } ml_single_preset; -typedef void *ml_service_event_h; - /** * @brief Enumeration for the event types of ml-service. + * @since_tizen 9.0 + * @todo TBU, need ACR later (update enum for ml-service event, see ml_service_callbacks_s) */ typedef enum { - ML_SERVICE_EVENT_MODEL_REGISTERED = 0, - ML_SERVICE_EVENT_PIPELINE_REGISTERED, - - ML_SERVICE_EVENT_UNKNOWN + ML_SERVICE_EVENT_MODEL_REGISTERED = 0, /**< TBU */ + ML_SERVICE_EVENT_PIPELINE_REGISTERED = 1, /**< TBU */ + ML_SERVICE_EVENT_UNKNOWN /**< Unknown or invalid event type. */ } ml_service_event_e; /** diff --git a/c/src/meson.build b/c/src/meson.build index 6e028518..ae4ab2f5 100644 --- a/c/src/meson.build +++ b/c/src/meson.build @@ -1,7 +1,7 @@ nns_capi_common_srcs = files('ml-api-common.c', 'ml-api-inference-internal.c') nns_capi_single_srcs = files('ml-api-inference-single.c') nns_capi_pipeline_srcs = files('ml-api-inference-pipeline.c') -nns_capi_service_srcs = files('ml-api-service-common.c', 'ml-api-service-agent-client.c', 'ml-api-service-query-client.c') +nns_capi_service_srcs = files('ml-api-service-common.c', 'ml-api-service-extension.c', 'ml-api-service-agent-client.c', 'ml-api-service-query-client.c') if support_remote_service nns_capi_service_srcs += files('ml-api-service-remote.c') endif diff --git a/c/src/ml-api-service-agent-client.c b/c/src/ml-api-service-agent-client.c index ac7f0842..d88cc79b 100644 --- a/c/src/ml-api-service-agent-client.c +++ b/c/src/ml-api-service-agent-client.c @@ -13,8 +13,6 @@ #include #include -#include - #include "ml-api-internal.h" #include "ml-api-service-private.h" #include "ml-api-service.h" diff --git a/c/src/ml-api-service-common.c b/c/src/ml-api-service-common.c index 8c91aff0..9144cb84 100644 --- a/c/src/ml-api-service-common.c +++ b/c/src/ml-api-service-common.c @@ -10,8 +10,10 @@ * @bug No known bugs except for NYI items */ +#include + #include "ml-api-service.h" -#include "ml-api-service-private.h" +#include "ml-api-service-extension.h" #define ML_SERVICE_MAGIC 0xfeeedeed #define ML_SERVICE_MAGIC_DEAD 0xdeaddead @@ -32,6 +34,7 @@ _ml_service_handle_is_valid (ml_service_s * mls) case ML_SERVICE_TYPE_SERVER_PIPELINE: case ML_SERVICE_TYPE_CLIENT_QUERY: case ML_SERVICE_TYPE_REMOTE: + case ML_SERVICE_TYPE_EXTENSION: if (mls->priv == NULL) return FALSE; break; @@ -43,6 +46,34 @@ _ml_service_handle_is_valid (ml_service_s * mls) return TRUE; } +/** + * @brief Internal function to set information. + */ +static int +_ml_service_set_information_internal (ml_service_s * mls, const char *name, + const char *value) +{ + int status = ML_ERROR_NONE; + + /* Prevent empty string case. */ + if (!STR_IS_VALID (name) || !STR_IS_VALID (value)) + return ML_ERROR_INVALID_PARAMETER; + + status = ml_option_set (mls->information, name, g_strdup (value), g_free); + if (status != ML_ERROR_NONE) + return status; + + switch (mls->type) { + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_set_information (mls, name, value); + break; + default: + break; + } + + return status; +} + /** * @brief Internal function to create new ml-service handle. */ @@ -50,11 +81,21 @@ ml_service_s * _ml_service_create_internal (ml_service_type_e ml_service_type) { ml_service_s *mls; + int status; mls = g_try_new0 (ml_service_s, 1); if (mls) { + status = ml_option_create (&mls->information); + if (status != ML_ERROR_NONE) { + g_free (mls); + _ml_error_report_return (NULL, + "Failed to create ml-option handle in ml-service."); + } + mls->magic = ML_SERVICE_MAGIC; mls->type = ml_service_type; + g_mutex_init (&mls->lock); + g_cond_init (&mls->cond); } return mls; @@ -66,45 +107,618 @@ _ml_service_create_internal (ml_service_type_e ml_service_type) int _ml_service_destroy_internal (ml_service_s * mls) { - int ret = ML_ERROR_NONE; + ml_service_cb_info_s old_cb; + int status = ML_ERROR_NONE; - if (!_ml_service_handle_is_valid (mls)) { - _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, - "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance."); + if (!mls) { + /* Internal error? */ + return ML_ERROR_INVALID_PARAMETER; } + /* Clear callback before closing internal handles. */ + g_mutex_lock (&mls->lock); + old_cb = mls->cb_info; + memset (&mls->cb_info, 0, sizeof (ml_service_cb_info_s)); + g_mutex_unlock (&mls->lock); + switch (mls->type) { case ML_SERVICE_TYPE_SERVER_PIPELINE: - ret = ml_service_pipeline_release_internal (mls); + status = ml_service_pipeline_release_internal (mls); break; case ML_SERVICE_TYPE_CLIENT_QUERY: - ret = ml_service_query_release_internal (mls); + status = ml_service_query_release_internal (mls); break; case ML_SERVICE_TYPE_REMOTE: - ret = ml_service_remote_release_internal (mls); + status = ml_service_remote_release_internal (mls); + break; + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_destroy (mls); break; default: _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, "Invalid type of ml_service_h."); } - if (ret == ML_ERROR_NONE) { + if (status == ML_ERROR_NONE) { mls->magic = ML_SERVICE_MAGIC_DEAD; + ml_option_destroy (mls->information); + + g_cond_clear (&mls->cond); + g_mutex_clear (&mls->lock); g_free (mls); } else { _ml_error_report ("Failed to release ml-service handle, internal error?"); + + g_mutex_lock (&mls->lock); + mls->cb_info = old_cb; + g_mutex_unlock (&mls->lock); + } + + return status; +} + +/** + * @brief Internal function to get ml-service callback. + */ +void +_ml_service_get_callback_info (ml_service_s * mls, + ml_service_cb_info_s * cb_info) +{ + if (!mls || !cb_info) + return; + + g_mutex_lock (&mls->lock); + *cb_info = mls->cb_info; + g_mutex_unlock (&mls->lock); +} + +/** + * @brief Internal function to parse model path from json. + */ +int +_ml_service_conf_parse_path (JsonNode * file_node, gchar ** path) +{ + guint i, n; + + *path = NULL; + + if (JSON_NODE_HOLDS_ARRAY (file_node)) { + JsonArray *array = json_node_get_array (file_node); + GString *val = g_string_new (NULL); + + n = (array) ? json_array_get_length (array) : 0U; + for (i = 0; i < n; i++) { + const gchar *p = json_array_get_string_element (array, i); + + g_string_append (val, p); + if (i < n - 1) + g_string_append (val, ","); + } + + *path = g_string_free (val, FALSE); + } else { + *path = g_strdup (json_node_get_string (file_node)); + } + + return (*path != NULL) ? ML_ERROR_NONE : ML_ERROR_INVALID_PARAMETER; +} + +/** + * @brief Internal function to parse tensors-info from json. + */ +int +_ml_service_conf_parse_tensors_info (JsonNode * info_node, + ml_tensors_info_h * info_h) +{ + JsonArray *array = NULL; + JsonObject *object; + GstTensorsInfo info; + GstTensorInfo *_info; + const gchar *_str; + guint i; + int status; + + gst_tensors_info_init (&info); + + info.num_tensors = 1; + if (JSON_NODE_HOLDS_ARRAY (info_node)) { + array = json_node_get_array (info_node); + info.num_tensors = json_array_get_length (array); + } + + for (i = 0; i < info.num_tensors; i++) { + _info = gst_tensors_info_get_nth_info (&info, i); + + if (array) + object = json_array_get_object_element (array, i); + else + object = json_node_get_object (info_node); + + if (json_object_has_member (object, "type")) { + _str = json_object_get_string_member (object, "type"); + + if (STR_IS_VALID (_str)) + _info->type = gst_tensor_get_type (_str); + } + + if (json_object_has_member (object, "dimension")) { + _str = json_object_get_string_member (object, "dimension"); + + if (STR_IS_VALID (_str)) + gst_tensor_parse_dimension (_str, _info->dimension); + } + + if (json_object_has_member (object, "name")) { + _str = json_object_get_string_member (object, "name"); + + if (STR_IS_VALID (_str)) + _info->name = g_strdup (_str); + } + } + + if (gst_tensors_info_validate (&info)) + status = _ml_tensors_info_create_from_gst (info_h, &info); + else + status = ML_ERROR_INVALID_PARAMETER; + + gst_tensors_info_free (&info); + return status; +} + +/** + * @brief Internal function to parse app information from json. + */ +int +_ml_service_conf_parse_information (ml_service_s * mls, JsonObject * info) +{ + g_autoptr (GList) members = NULL; + GList *iter; + int status; + + members = json_object_get_members (info); + for (iter = members; iter; iter = g_list_next (iter)) { + const gchar *name = iter->data; + const gchar *value = json_object_get_string_member (info, name); + + status = _ml_service_set_information_internal (mls, name, value); + if (status != ML_ERROR_NONE) + return status; + } + + return ML_ERROR_NONE; +} + +/** + * @brief Creates a handle for machine learning service with configuration. + */ +int +ml_service_new (const char *config, ml_service_h * handle) +{ + ml_service_s *mls; + ml_service_type_e service_type = ML_SERVICE_TYPE_UNKNOWN; + g_autofree gchar *json_string = NULL; + g_autoptr (JsonParser) parser = NULL; + g_autoptr (GError) err = NULL; + JsonNode *root; + JsonObject *object; + int status; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!handle) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is NULL. It should be a valid pointer to create new instance."); + } + + /* Init null. */ + *handle = NULL; + + if (!STR_IS_VALID (config) || + !g_file_test (config, (G_FILE_TEST_EXISTS | G_FILE_TEST_IS_REGULAR))) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, config, is invalid. It should be a valid path."); + } + + if (!g_file_get_contents (config, &json_string, NULL, NULL)) { + _ml_error_report_return (ML_ERROR_IO_ERROR, + "Failed to read configuration file '%s'.", config); + } + + parser = json_parser_new (); + if (!parser) { + _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY, + "Failed to parse configuration file, cannot allocate memory for JsonParser. Out of memory?"); + } + + if (!json_parser_load_from_data (parser, json_string, -1, &err)) { + _ml_error_report_return (ML_ERROR_IO_ERROR, + "Failed to parse configuration file, cannot load json string (%s).", + err ? err->message : "Unknown error"); + } + + root = json_parser_get_root (parser); + if (!root) { + _ml_error_report_return (ML_ERROR_IO_ERROR, + "Failed to parse configuration file, cannot get the top node from json string."); } - return ret; + object = json_node_get_object (root); + + if (json_object_has_member (object, "single") || + json_object_has_member (object, "pipeline")) { + service_type = ML_SERVICE_TYPE_EXTENSION; + } else { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "Failed to parse configuration file, cannot get the valid type from configuration."); + } + + /* Parse each service type. */ + mls = _ml_service_create_internal (service_type); + if (mls == NULL) { + _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY, + "Failed to allocate memory for the ml-service handle. Out of memory?"); + } + + switch (service_type) { + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_create (mls, object); + break; + default: + /* Invalid handle type. */ + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + if (status != ML_ERROR_NONE) + goto error; + + /* Parse information. */ + if (json_object_has_member (object, "information")) { + JsonObject *info = json_object_get_object_member (object, "information"); + + status = _ml_service_conf_parse_information (mls, info); + if (status != ML_ERROR_NONE) + goto error; + } + +error: + if (status == ML_ERROR_NONE) { + *handle = mls; + } else { + _ml_error_report ("Failed to open the ml-service configuration."); + _ml_service_destroy_internal (mls); + } + + return status; +} + +/** + * @brief Sets the callbacks which will be invoked when a new event occurs from ml-service. + */ +int +ml_service_set_event_cb (ml_service_h handle, ml_service_callbacks_s * cb, + void *user_data) +{ + ml_service_s *mls = (ml_service_s *) handle; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + g_mutex_lock (&mls->lock); + + /* Clear all if given callback is null. */ + if (cb) { + mls->cb_info.cb = *cb; + mls->cb_info.pdata = user_data; + } else { + memset (&mls->cb_info, 0, sizeof (ml_service_cb_info_s)); + } + + g_mutex_unlock (&mls->lock); + + return ML_ERROR_NONE; } /** - * @brief Destroy the service handle. + * @brief Starts the process of ml-service. + */ +int +ml_service_start (ml_service_h handle) +{ + ml_service_s *mls = (ml_service_s *) handle; + int status = ML_ERROR_NONE; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + switch (mls->type) { + case ML_SERVICE_TYPE_SERVER_PIPELINE: + { + _ml_service_server_s *server = (_ml_service_server_s *) mls->priv; + + status = ml_agent_pipeline_start (server->id); + if (status < 0) + _ml_error_report ("Failed to invoke the method start_pipeline."); + + break; + } + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_start (mls); + break; + default: + /* Invalid handle type. */ + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * @brief Stops the process of ml-service. + */ +int +ml_service_stop (ml_service_h handle) +{ + ml_service_s *mls = (ml_service_s *) handle; + int status = ML_ERROR_NONE; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + switch (mls->type) { + case ML_SERVICE_TYPE_SERVER_PIPELINE: + { + _ml_service_server_s *server = (_ml_service_server_s *) mls->priv; + + status = ml_agent_pipeline_stop (server->id); + if (status < 0) + _ml_error_report ("Failed to invoke the method stop_pipeline."); + + break; + } + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_stop (mls); + break; + default: + /* Invalid handle type. */ + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * @brief Gets the information of required input data. + */ +int +ml_service_get_input_information (ml_service_h handle, const char *name, + ml_tensors_info_h * info) +{ + ml_service_s *mls = (ml_service_s *) handle; + int status; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + if (!info) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, info (ml_tensors_info_h), is NULL. It should be a valid pointer to create new instance."); + } + + /* Init null. */ + *info = NULL; + + switch (mls->type) { + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_get_input_information (mls, name, info); + break; + default: + /* Invalid handle type. */ + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + if (status != ML_ERROR_NONE) { + if (*info) { + ml_tensors_info_destroy (*info); + *info = NULL; + } + } + + return status; +} + +/** + * @brief Gets the information of output data. + */ +int +ml_service_get_output_information (ml_service_h handle, const char *name, + ml_tensors_info_h * info) +{ + ml_service_s *mls = (ml_service_s *) handle; + int status; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + if (!info) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, info (ml_tensors_info_h), is NULL. It should be a valid pointer to create new instance."); + } + + /* Init null. */ + *info = NULL; + + switch (mls->type) { + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_get_output_information (mls, name, info); + break; + default: + /* Invalid handle type. */ + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + if (status != ML_ERROR_NONE) { + if (*info) { + ml_tensors_info_destroy (*info); + *info = NULL; + } + } + + return status; +} + +/** + * @brief Sets the information for ml-service. + */ +int +ml_service_set_information (ml_service_h handle, const char *name, + const char *value) +{ + ml_service_s *mls = (ml_service_s *) handle; + int status; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + if (!STR_IS_VALID (name)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, name '%s', is invalid.", name); + } + + if (!STR_IS_VALID (value)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, value '%s', is invalid.", value); + } + + g_mutex_lock (&mls->lock); + status = _ml_service_set_information_internal (mls, name, value); + g_mutex_unlock (&mls->lock); + + if (status != ML_ERROR_NONE) { + _ml_error_report_return (status, + "Failed to set the information '%s'.", name); + } + + return ML_ERROR_NONE; +} + +/** + * @brief Gets the information from ml-service. + */ +int +ml_service_get_information (ml_service_h handle, const char *name, char **value) +{ + ml_service_s *mls = (ml_service_s *) handle; + gchar *val = NULL; + int status; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + if (!STR_IS_VALID (name)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, name '%s', is invalid.", name); + } + + if (!value) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, value, is NULL. It should be a valid pointer."); + } + + g_mutex_lock (&mls->lock); + status = ml_option_get (mls->information, name, (void **) (&val)); + g_mutex_unlock (&mls->lock); + + if (status != ML_ERROR_NONE) { + _ml_error_report_return (status, + "The ml-service handle does not include the information '%s'.", name); + } + + *value = g_strdup (val); + return ML_ERROR_NONE; +} + +/** + * @brief Adds an input data to process the model in ml-service extension handle. + */ +int +ml_service_request (ml_service_h handle, const char *name, + const ml_tensors_data_h data) +{ + ml_service_s *mls = (ml_service_s *) handle; + int status; + + check_feature_state (ML_FEATURE_SERVICE); + + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + if (!data) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, data (ml_tensors_data_h), is NULL. It should be a valid ml_tensor_data_h instance, which is usually created by ml_tensors_data_create()."); + } + + switch (mls->type) { + case ML_SERVICE_TYPE_EXTENSION: + status = ml_service_extension_request (mls, name, data); + break; + default: + /* Invalid handle type. */ + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * @brief Destroys the handle for machine learning service. */ int ml_service_destroy (ml_service_h handle) { + ml_service_s *mls = (ml_service_s *) handle; + check_feature_state (ML_FEATURE_SERVICE); - return _ml_service_destroy_internal ((ml_service_s *) handle); + if (!_ml_service_handle_is_valid (mls)) { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, 'handle' (ml_service_h), is invalid. It should be a valid ml_service_h instance, which is usually created by ml_service_new()."); + } + + return _ml_service_destroy_internal (mls); } diff --git a/c/src/ml-api-service-extension.c b/c/src/ml-api-service-extension.c new file mode 100644 index 00000000..0eb34720 --- /dev/null +++ b/c/src/ml-api-service-extension.c @@ -0,0 +1,865 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved. + * + * @file ml-api-service-extension.c + * @date 1 September 2023 + * @brief ML service extension C-API. + * @see https://github.com/nnstreamer/api + * @author Jaeyun Jung + * @bug No known bugs except for NYI items + */ + +#include "ml-api-service-extension.h" + +/** + * @brief The time to wait for new input data in message thread, in millisecond. + */ +#define DEFAULT_TIMEOUT 200 + +/** + * @brief The max number of input data in message queue (0 for no limit). + */ +#define DEFAULT_MAX_INPUT 5 + +/** + * @brief Internal enumeration for ml-service extension types. + */ +typedef enum +{ + ML_EXTENSION_TYPE_UNKNOWN = 0, + ML_EXTENSION_TYPE_SINGLE = 1, + ML_EXTENSION_TYPE_PIPELINE = 2, + + ML_EXTENSION_TYPE_MAX +} ml_extension_type_e; + +/** + * @brief Internal enumeration for the node type in pipeline. + */ +typedef enum +{ + ML_EXTENSION_NODE_TYPE_UNKNOWN = 0, + ML_EXTENSION_NODE_TYPE_INPUT = 1, + ML_EXTENSION_NODE_TYPE_OUTPUT = 2, + + ML_EXTENSION_NODE_TYPE_MAX +} ml_extension_node_type_e; + +/** + * @brief Internal structure of the node info in pipeline. + */ +typedef struct +{ + gchar *name; + ml_extension_node_type_e type; + ml_tensors_info_h info; + void *handle; + void *mls; +} ml_extension_node_info_s; + +/** + * @brief Internal structure of the message in ml-service extension handle. + */ +typedef struct +{ + gchar *name; + ml_tensors_data_h input; + ml_tensors_data_h output; +} ml_extension_msg_s; + +/** + * @brief Internal structure for ml-service extension handle. + */ +typedef struct +{ + ml_extension_type_e type; + gboolean running; + guint timeout; /**< The time to wait for new input data in message thread, in millisecond (see DEFAULT_TIMEOUT). */ + guint max_input; /**< The max number of input data in message queue (see DEFAULT_MAX_INPUT). */ + GThread *msg_thread; + GAsyncQueue *msg_queue; + + /** + * Handles for each ml-service extension type. + * - single : Default. Open model file and prepare invoke. The configuration should include model information. + * - pipeline : Construct a pipeline from configuration. The configuration should include pipeline description. + */ + ml_single_h single; + + ml_pipeline_h pipeline; + GHashTable *node_table; +} ml_extension_s; + +/** + * @brief Internal function to create node info in pipeline. + */ +static ml_extension_node_info_s * +_ml_extension_node_info_new (ml_service_s * mls, const gchar * name, + ml_extension_node_type_e type) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + ml_extension_node_info_s *node_info; + + if (!STR_IS_VALID (name)) + _ml_error_report_return (NULL, + "Cannot add new node info, invalid node name '%s'.", name); + + if (g_hash_table_lookup (ext->node_table, name)) + _ml_error_report_return (NULL, + "Cannot add duplicated node '%s' in ml-service pipeline.", name); + + node_info = g_try_new0 (ml_extension_node_info_s, 1); + if (!node_info) + _ml_error_report_return (NULL, + "Failed to allocate new memory for node info in ml-service pipeline. Out of memory?"); + + node_info->name = g_strdup (name); + node_info->type = type; + node_info->mls = mls; + + g_hash_table_insert (ext->node_table, g_strdup (name), node_info); + + return node_info; +} + +/** + * @brief Internal function to release pipeline node info. + */ +static void +_ml_extension_node_info_free (gpointer data) +{ + ml_extension_node_info_s *node_info = (ml_extension_node_info_s *) data; + + if (!node_info) + return; + + if (node_info->info) + ml_tensors_info_destroy (node_info->info); + + g_free (node_info->name); + g_free (node_info); +} + +/** + * @brief Internal function to get the node info in ml-service extension. + */ +static ml_extension_node_info_s * +_ml_extension_node_info_get (ml_extension_s * ext, const gchar * name) +{ + if (!STR_IS_VALID (name)) + return NULL; + + return g_hash_table_lookup (ext->node_table, name); +} + +/** + * @brief Internal callback for sink node in pipeline description. + */ +static void +_ml_extension_pipeline_sink_cb (const ml_tensors_data_h data, + const ml_tensors_info_h info, void *user_data) +{ + ml_extension_node_info_s *node_info = (ml_extension_node_info_s *) user_data; + ml_service_s *mls = (ml_service_s *) node_info->mls; + ml_service_cb_info_s cb_info; + + _ml_service_get_callback_info (mls, &cb_info); + + if (cb_info.cb.new_data) + cb_info.cb.new_data (mls, node_info->name, data, cb_info.pdata); +} + +/** + * @brief Internal function to release ml-service extension message. + */ +static void +_ml_extension_msg_free (gpointer data) +{ + ml_extension_msg_s *msg = (ml_extension_msg_s *) data; + + if (!msg) + return; + + if (msg->input) + ml_tensors_data_destroy (msg->input); + if (msg->output) + ml_tensors_data_destroy (msg->output); + + g_free (msg->name); + g_free (msg); +} + +/** + * @brief Internal function to process ml-service extension message. + */ +static gpointer +_ml_extension_msg_thread (gpointer data) +{ + ml_service_s *mls = (ml_service_s *) data; + ml_extension_s *ext = (ml_extension_s *) mls->priv; + int status; + + g_mutex_lock (&mls->lock); + ext->running = TRUE; + g_cond_signal (&mls->cond); + g_mutex_unlock (&mls->lock); + + while (ext->running) { + ml_extension_msg_s *msg; + + msg = g_async_queue_timeout_pop (ext->msg_queue, + ext->timeout * G_TIME_SPAN_MILLISECOND); + + if (msg) { + switch (ext->type) { + case ML_EXTENSION_TYPE_SINGLE: + { + status = ml_single_invoke (ext->single, msg->input, &msg->output); + + if (status == ML_ERROR_NONE) { + ml_service_cb_info_s cb_info; + + _ml_service_get_callback_info (mls, &cb_info); + + if (cb_info.cb.new_data) + cb_info.cb.new_data (mls, msg->name, msg->output, cb_info.pdata); + } else { + _ml_error_report + ("Failed to invoke the model in ml-service extension thread."); + } + break; + } + case ML_EXTENSION_TYPE_PIPELINE: + { + ml_extension_node_info_s *node_info; + + node_info = _ml_extension_node_info_get (ext, msg->name); + + if (node_info && node_info->type == ML_EXTENSION_NODE_TYPE_INPUT) { + /* The input data will be released in the pipeline. */ + status = ml_pipeline_src_input_data (node_info->handle, msg->input, + ML_PIPELINE_BUF_POLICY_AUTO_FREE); + msg->input = NULL; + + if (status != ML_ERROR_NONE) { + _ml_error_report + ("Failed to push input data into the pipeline in ml-service extension thread."); + } + } else { + _ml_error_report + ("Failed to push input data into the pipeline, cannot find input node '%s'.", + msg->name); + } + break; + } + default: + /* Unknown ml-service extension type, skip this. */ + break; + } + + _ml_extension_msg_free (msg); + } + } + + return NULL; +} + +/** + * @brief Wrapper to release tensors-info handle. + */ +static void +_ml_extension_destroy_tensors_info (void *data) +{ + ml_tensors_info_h info = (ml_tensors_info_h) data; + + if (info) + ml_tensors_info_destroy (info); +} + +/** + * @brief Internal function to parse single-shot info from json. + */ +static int +_ml_extension_conf_parse_single (ml_service_s * mls, JsonObject * single) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + ml_option_h option; + int status; + + status = ml_option_create (&option); + if (status != ML_ERROR_NONE) + _ml_error_report_return (status, + "Failed to parse configuration file, cannot create ml-option handle."); + + /** + * 1. "key" : load model info from ml-service agent. + * 2. "model" : configuration file includes model path. + */ + if (json_object_has_member (single, "key")) { + const gchar *key = json_object_get_string_member (single, "key"); + + if (STR_IS_VALID (key)) { + ml_information_h model_info; + + status = ml_service_model_get_activated (key, &model_info); + if (status == ML_ERROR_NONE) { + gchar *paths = NULL; + + /** @todo parse desc and other information if necessary. */ + ml_information_get (model_info, "path", (void **) (&paths)); + ml_option_set (option, "models", g_strdup (paths), g_free); + + ml_information_destroy (model_info); + } else { + _ml_error_report + ("Failed to parse configuration file, cannot get the model of '%s'.", + key); + goto error; + } + } + } else if (json_object_has_member (single, "model")) { + JsonNode *file_node = json_object_get_member (single, "model"); + gchar *paths = NULL; + + status = _ml_service_conf_parse_path (file_node, &paths); + if (status != ML_ERROR_NONE) { + _ml_error_report + ("Failed to parse configuration file, it should have valid model path."); + goto error; + } + + ml_option_set (option, "models", paths, g_free); + } else { + status = ML_ERROR_INVALID_PARAMETER; + _ml_error_report + ("Failed to parse configuration file, cannot get the model path."); + goto error; + } + + if (json_object_has_member (single, "framework")) { + const gchar *fw = json_object_get_string_member (single, "framework"); + + if (STR_IS_VALID (fw)) + ml_option_set (option, "framework_name", g_strdup (fw), g_free); + } + + if (json_object_has_member (single, "input_info")) { + JsonNode *info_node = json_object_get_member (single, "input_info"); + ml_tensors_info_h in_info; + + status = _ml_service_conf_parse_tensors_info (info_node, &in_info); + if (status != ML_ERROR_NONE) { + _ml_error_report + ("Failed to parse configuration file, cannot parse input information."); + goto error; + } + + ml_option_set (option, "input_info", in_info, + _ml_extension_destroy_tensors_info); + } + + if (json_object_has_member (single, "output_info")) { + JsonNode *info_node = json_object_get_member (single, "output_info"); + ml_tensors_info_h out_info; + + status = _ml_service_conf_parse_tensors_info (info_node, &out_info); + if (status != ML_ERROR_NONE) { + _ml_error_report + ("Failed to parse configuration file, cannot parse output information."); + goto error; + } + + ml_option_set (option, "output_info", out_info, + _ml_extension_destroy_tensors_info); + } + + if (json_object_has_member (single, "custom")) { + const gchar *custom = json_object_get_string_member (single, "custom"); + + if (STR_IS_VALID (custom)) + ml_option_set (option, "custom", g_strdup (custom), g_free); + } + +error: + if (status == ML_ERROR_NONE) + status = ml_single_open_with_option (&ext->single, option); + + ml_option_destroy (option); + return status; +} + +/** + * @brief Internal function to parse the node info in pipeline. + */ +static int +_ml_extension_conf_parse_pipeline_node (ml_service_s * mls, JsonNode * node, + ml_extension_node_type_e type) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + JsonArray *array = NULL; + JsonObject *object; + guint i, n; + int status; + + n = 1; + if (JSON_NODE_HOLDS_ARRAY (node)) { + array = json_node_get_array (node); + n = json_array_get_length (array); + } + + for (i = 0; i < n; i++) { + const gchar *name = NULL; + ml_extension_node_info_s *node_info; + + if (array) + object = json_array_get_object_element (array, i); + else + object = json_node_get_object (node); + + if (json_object_has_member (object, "name")) + name = json_object_get_string_member (object, "name"); + + node_info = _ml_extension_node_info_new (mls, name, type); + if (!node_info) + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "Failed to parse configuration file, cannot add new node information."); + + if (json_object_has_member (object, "info")) { + JsonNode *info_node = json_object_get_member (object, "info"); + + status = _ml_service_conf_parse_tensors_info (info_node, + &node_info->info); + if (status != ML_ERROR_NONE) + _ml_error_report_return (status, + "Failed to parse configuration file, cannot parse the information."); + } else { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "Failed to parse configuration file, cannot find node information."); + } + + switch (type) { + case ML_EXTENSION_NODE_TYPE_INPUT: + status = ml_pipeline_src_get_handle (ext->pipeline, name, + &node_info->handle); + break; + case ML_EXTENSION_NODE_TYPE_OUTPUT: + status = ml_pipeline_sink_register (ext->pipeline, name, + _ml_extension_pipeline_sink_cb, node_info, &node_info->handle); + break; + default: + status = ML_ERROR_INVALID_PARAMETER; + break; + } + + if (status != ML_ERROR_NONE) + _ml_error_report_return (status, + "Failed to parse configuration file, cannot get the handle for pipeline node."); + } + + return ML_ERROR_NONE; +} + +/** + * @brief Internal function to parse pipeline info from json. + */ +static int +_ml_extension_conf_parse_pipeline (ml_service_s * mls, JsonObject * pipe) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + g_autofree gchar *desc = NULL; + int status; + + /** + * 1. "key" : load pipeline from ml-service agent. + * 2. "description" : configuration file includes pipeline description. + */ + if (json_object_has_member (pipe, "key")) { + const gchar *key = json_object_get_string_member (pipe, "key"); + + if (STR_IS_VALID (key)) { + status = ml_service_get_pipeline (key, &desc); + if (status != ML_ERROR_NONE) { + _ml_error_report_return (status, + "Failed to parse configuration file, cannot get the pipeline of '%s'.", + key); + } + } + } else if (json_object_has_member (pipe, "description")) { + desc = g_strdup (json_object_get_string_member (pipe, "description")); + } else { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "Failed to parse configuration file, cannot get the pipeline description."); + } + + status = ml_pipeline_construct (desc, NULL, NULL, &ext->pipeline); + if (status != ML_ERROR_NONE) { + _ml_error_report_return (status, + "Failed to parse configuration file, cannot construct the pipeline."); + } + + if (json_object_has_member (pipe, "input_node")) { + JsonNode *node = json_object_get_member (pipe, "input_node"); + + status = _ml_extension_conf_parse_pipeline_node (mls, node, + ML_EXTENSION_NODE_TYPE_INPUT); + if (status != ML_ERROR_NONE) + _ml_error_report_return (status, + "Failed to parse configuration file, cannot get the input node."); + } else { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "Failed to parse configuration file, cannot find the input node."); + } + + if (json_object_has_member (pipe, "output_node")) { + JsonNode *node = json_object_get_member (pipe, "output_node"); + + status = _ml_extension_conf_parse_pipeline_node (mls, node, + ML_EXTENSION_NODE_TYPE_OUTPUT); + if (status != ML_ERROR_NONE) + _ml_error_report_return (status, + "Failed to parse configuration file, cannot get the output node."); + } else { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "Failed to parse configuration file, cannot find the output node."); + } + + /* Start pipeline when creating ml-service handle to check pipeline description. */ + status = ml_pipeline_start (ext->pipeline); + if (status != ML_ERROR_NONE) + _ml_error_report_return (status, + "Failed to parse configuration file, cannot start the pipeline."); + + return ML_ERROR_NONE; +} + +/** + * @brief Internal function to parse configuration file. + */ +static int +_ml_extension_conf_parse_json (ml_service_s * mls, JsonObject * object) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + int status; + + if (json_object_has_member (object, "single")) { + JsonObject *single = json_object_get_object_member (object, "single"); + + status = _ml_extension_conf_parse_single (mls, single); + if (status != ML_ERROR_NONE) + return status; + + ext->type = ML_EXTENSION_TYPE_SINGLE; + } else if (json_object_has_member (object, "pipeline")) { + JsonObject *pipe = json_object_get_object_member (object, "pipeline"); + + status = _ml_extension_conf_parse_pipeline (mls, pipe); + if (status != ML_ERROR_NONE) + return status; + + ext->type = ML_EXTENSION_TYPE_PIPELINE; + } else { + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "Failed to parse configuration file, cannot get the valid type from configuration."); + } + + return ML_ERROR_NONE; +} + +/** + * @brief Internal function to create ml-service extension. + */ +int +ml_service_extension_create (ml_service_s * mls, JsonObject * object) +{ + ml_extension_s *ext; + g_autofree gchar *thread_name = g_strdup_printf ("ml-ext-msg-%d", getpid ()); + int status; + + mls->priv = ext = g_try_new0 (ml_extension_s, 1); + if (ext == NULL) { + _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY, + "Failed to allocate memory for ml-service extension. Out of memory?"); + } + + ext->type = ML_EXTENSION_TYPE_UNKNOWN; + ext->running = FALSE; + ext->timeout = DEFAULT_TIMEOUT; + ext->max_input = DEFAULT_MAX_INPUT; + ext->node_table = g_hash_table_new_full (g_str_hash, g_str_equal, g_free, + _ml_extension_node_info_free); + + status = _ml_extension_conf_parse_json (mls, object); + if (status != ML_ERROR_NONE) { + _ml_error_report_return (status, + "Failed to parse the ml-service extension configuration."); + } + + g_mutex_lock (&mls->lock); + + ext->msg_queue = g_async_queue_new_full (_ml_extension_msg_free); + ext->msg_thread = g_thread_new (thread_name, _ml_extension_msg_thread, mls); + + /* Wait until the message thread has been initialized. */ + g_cond_wait (&mls->cond, &mls->lock); + g_mutex_unlock (&mls->lock); + + return ML_ERROR_NONE; +} + +/** + * @brief Internal function to release ml-service extension. + */ +int +ml_service_extension_destroy (ml_service_s * mls) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + + /* Supposed internal function call to release handle. */ + if (!ext) + return ML_ERROR_NONE; + + /** + * Close message thread. + * If model inference is running, it may wait for the result in message thread. + * This takes time, so do not call join with extension lock. + */ + ext->running = FALSE; + if (ext->msg_thread) { + g_thread_join (ext->msg_thread); + ext->msg_thread = NULL; + } + + if (ext->msg_queue) { + g_async_queue_unref (ext->msg_queue); + ext->msg_queue = NULL; + } + + if (ext->single) { + ml_single_close (ext->single); + ext->single = NULL; + } + + if (ext->pipeline) { + ml_pipeline_stop (ext->pipeline); + ml_pipeline_destroy (ext->pipeline); + ext->pipeline = NULL; + } + + if (ext->node_table) { + g_hash_table_destroy (ext->node_table); + ext->node_table = NULL; + } + + g_free (ext); + mls->priv = NULL; + + return ML_ERROR_NONE; +} + +/** + * @brief Internal function to start ml-service extension. + */ +int +ml_service_extension_start (ml_service_s * mls) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + int status = ML_ERROR_NONE; + + switch (ext->type) { + case ML_EXTENSION_TYPE_PIPELINE: + status = ml_pipeline_start (ext->pipeline); + break; + case ML_EXTENSION_TYPE_SINGLE: + /* Do nothing. */ + break; + default: + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * @brief Internal function to stop ml-service extension. + */ +int +ml_service_extension_stop (ml_service_s * mls) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + int status = ML_ERROR_NONE; + + switch (ext->type) { + case ML_EXTENSION_TYPE_PIPELINE: + status = ml_pipeline_stop (ext->pipeline); + break; + case ML_EXTENSION_TYPE_SINGLE: + /* Do nothing. */ + break; + default: + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * @brief Internal function to get the information of required input data. + */ +int +ml_service_extension_get_input_information (ml_service_s * mls, + const char *name, ml_tensors_info_h * info) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + int status; + + switch (ext->type) { + case ML_EXTENSION_TYPE_SINGLE: + status = ml_single_get_input_info (ext->single, info); + break; + case ML_EXTENSION_TYPE_PIPELINE: + { + ml_extension_node_info_s *node_info; + + node_info = _ml_extension_node_info_get (ext, name); + + if (node_info && node_info->type == ML_EXTENSION_NODE_TYPE_INPUT) { + status = ml_tensors_info_create (info); + if (status != ML_ERROR_NONE) + break; + status = ml_tensors_info_clone (*info, node_info->info); + if (status != ML_ERROR_NONE) + break; + } else { + status = ML_ERROR_INVALID_PARAMETER; + } + break; + } + default: + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + return status; +} + +/** + * @brief Internal function to get the information of output data. + */ +int +ml_service_extension_get_output_information (ml_service_s * mls, + const char *name, ml_tensors_info_h * info) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + int status; + + switch (ext->type) { + case ML_EXTENSION_TYPE_SINGLE: + status = ml_single_get_output_info (ext->single, info); + break; + case ML_EXTENSION_TYPE_PIPELINE: + { + ml_extension_node_info_s *node_info; + + node_info = _ml_extension_node_info_get (ext, name); + + if (node_info && node_info->type == ML_EXTENSION_NODE_TYPE_OUTPUT) { + status = ml_tensors_info_create (info); + if (status != ML_ERROR_NONE) + break; + status = ml_tensors_info_clone (*info, node_info->info); + if (status != ML_ERROR_NONE) + break; + } else { + status = ML_ERROR_INVALID_PARAMETER; + } + break; + } + default: + status = ML_ERROR_NOT_SUPPORTED; + break; + } + + if (status != ML_ERROR_NONE) { + if (*info) { + ml_tensors_info_destroy (*info); + *info = NULL; + } + } + + return status; +} + +/** + * @brief Internal function to set the information for ml-service extension. + */ +int +ml_service_extension_set_information (ml_service_s * mls, const char *name, + const char *value) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + + /* Check limitation of message queue and other options. */ + if (g_ascii_strcasecmp (name, "input_queue_size") == 0 || + g_ascii_strcasecmp (name, "max_input") == 0) { + ext->max_input = (guint) g_ascii_strtoull (value, NULL, 10); + } else if (g_ascii_strcasecmp (name, "timeout") == 0) { + ext->timeout = (guint) g_ascii_strtoull (value, NULL, 10); + } + + return ML_ERROR_NONE; +} + +/** + * @brief Internal function to add an input data to process the model in ml-service extension handle. + */ +int +ml_service_extension_request (ml_service_s * mls, const char *name, + const ml_tensors_data_h data) +{ + ml_extension_s *ext = (ml_extension_s *) mls->priv; + ml_extension_msg_s *msg; + int status, len; + + if (ext->type == ML_EXTENSION_TYPE_PIPELINE) { + ml_extension_node_info_s *node_info; + + if (!STR_IS_VALID (name)) + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, name '%s', is invalid.", name); + + node_info = _ml_extension_node_info_get (ext, name); + + if (!node_info || node_info->type != ML_EXTENSION_NODE_TYPE_INPUT) + _ml_error_report_return (ML_ERROR_INVALID_PARAMETER, + "The parameter, name '%s', is invalid, cannot find the input node from pipeline.", + name); + } + + len = g_async_queue_length (ext->msg_queue); + + if (ext->max_input > 0 && len > 0 && ext->max_input <= len) { + _ml_error_report_return (ML_ERROR_STREAMS_PIPE, + "Failed to push input data into the queue, the max number of input is %u.", + ext->max_input); + } + + msg = g_try_new0 (ml_extension_msg_s, 1); + if (!msg) + _ml_error_report_return (ML_ERROR_OUT_OF_MEMORY, + "Failed to allocate the ml-service extension message. Out of memory?"); + + msg->name = g_strdup (name); + status = ml_tensors_data_clone (data, &msg->input); + + if (status != ML_ERROR_NONE) { + _ml_extension_msg_free (msg); + _ml_error_report_return (status, "Failed to clone input data."); + } + + g_async_queue_push (ext->msg_queue, msg); + + return ML_ERROR_NONE; +} diff --git a/c/src/ml-api-service-extension.h b/c/src/ml-api-service-extension.h new file mode 100644 index 00000000..791c94fe --- /dev/null +++ b/c/src/ml-api-service-extension.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: Apache-2.0 */ +/** + * Copyright (c) 2023 Samsung Electronics Co., Ltd. All Rights Reserved. + * + * @file ml-api-service-extension.h + * @date 1 September 2023 + * @brief ML service extension C-API. + * @see https://github.com/nnstreamer/api + * @author Jaeyun Jung + * @bug No known bugs except for NYI items + */ +#ifndef __ML_API_SERVICE_EXTENSION_H__ +#define __ML_API_SERVICE_EXTENSION_H__ + +#include "ml-api-service-private.h" + +#ifdef __cplusplus +extern "C" { +#endif /* __cplusplus */ + +/** + * @brief Internal function to create ml-service extension. + */ +int ml_service_extension_create (ml_service_s *mls, JsonObject *object); + +/** + * @brief Internal function to release ml-service extension. + */ +int ml_service_extension_destroy (ml_service_s *mls); + +/** + * @brief Internal function to start ml-service extension. + */ +int ml_service_extension_start (ml_service_s *mls); + +/** + * @brief Internal function to stop ml-service extension. + */ +int ml_service_extension_stop (ml_service_s *mls); + +/** + * @brief Internal function to get the information of required input data. + */ +int ml_service_extension_get_input_information (ml_service_s *mls, const char *name, ml_tensors_info_h *info); + +/** + * @brief Internal function to get the information of output data. + */ +int ml_service_extension_get_output_information (ml_service_s *mls, const char *name, ml_tensors_info_h *info); + +/** + * @brief Internal function to set the information for ml-service extension. + */ +int ml_service_extension_set_information (ml_service_s *mls, const char *name, const char *value); + +/** + * @brief Internal function to add an input data to process the model in ml-service extension handle. + */ +int ml_service_extension_request (ml_service_s *mls, const char *name, const ml_tensors_data_h data); + +#ifdef __cplusplus +} +#endif /* __cplusplus */ +#endif /* __ML_API_SERVICE_EXTENSION_H__ */ diff --git a/c/src/ml-api-service-private.h b/c/src/ml-api-service-private.h index 73d2e00f..3e9fe364 100644 --- a/c/src/ml-api-service-private.h +++ b/c/src/ml-api-service-private.h @@ -14,8 +14,12 @@ #ifndef __ML_API_SERVICE_PRIVATE_DATA_H__ #define __ML_API_SERVICE_PRIVATE_DATA_H__ +#include +#include + #include #include +#include #ifdef __cplusplus extern "C" { @@ -30,10 +34,20 @@ typedef enum ML_SERVICE_TYPE_SERVER_PIPELINE, ML_SERVICE_TYPE_CLIENT_QUERY, ML_SERVICE_TYPE_REMOTE, + ML_SERVICE_TYPE_EXTENSION, ML_SERVICE_TYPE_MAX } ml_service_type_e; +/** + * @brief Structure for ml-service callback. + */ +typedef struct +{ + ml_service_callbacks_s cb; + void *pdata; +} ml_service_cb_info_s; + /** * @brief Structure for ml_service_h */ @@ -41,7 +55,10 @@ typedef struct { uint32_t magic; ml_service_type_e type; - + GMutex lock; + GCond cond; + ml_option_h information; + ml_service_cb_info_s cb_info; void *priv; } ml_service_s; @@ -50,7 +67,7 @@ typedef struct */ typedef struct { - gint64 id; + int64_t id; gchar *service_name; } _ml_service_server_s; @@ -69,6 +86,26 @@ ml_service_s * _ml_service_create_internal (ml_service_type_e ml_service_type); */ int _ml_service_destroy_internal (ml_service_s * mls); +/** + * @brief Internal function to get ml-service callback. + */ +void _ml_service_get_callback_info (ml_service_s *mls, ml_service_cb_info_s *cb_info); + +/** + * @brief Internal function to parse model path from json. + */ +int _ml_service_conf_parse_path (JsonNode *file_node, gchar **path); + +/** + * @brief Internal function to parse tensors-info from json. + */ +int _ml_service_conf_parse_tensors_info (JsonNode *info_node, ml_tensors_info_h *info_h); + +/** + * @brief Internal function to parse app information from json. + */ +int _ml_service_conf_parse_information (ml_service_s *mls, JsonObject *info); + /** * @brief Internal function to release ml-service pipeline data. */ diff --git a/packaging/machine-learning-api.spec b/packaging/machine-learning-api.spec index b3240717..c682962a 100644 --- a/packaging/machine-learning-api.spec +++ b/packaging/machine-learning-api.spec @@ -387,6 +387,7 @@ bash %{test_script} ./tests/capi/unittest_capi_inference bash %{test_script} ./tests/capi/unittest_capi_datatype_consistency %if 0%{?enable_ml_service} +bash %{test_script} ./tests/capi/unittest_capi_service_extension bash %{test_script} ./tests/capi/unittest_capi_service_agent_client %if 0%{?nnstreamer_edge_support} bash %{test_script} ./tests/capi/unittest_capi_remote_service diff --git a/tests/capi/meson.build b/tests/capi/meson.build index 19d2a1fd..d11176ca 100644 --- a/tests/capi/meson.build +++ b/tests/capi/meson.build @@ -31,6 +31,14 @@ unittest_capi_datatype_consistency = executable('unittest_capi_datatype_consiste test('unittest_capi_datatype_consistency', unittest_capi_datatype_consistency, env: testenv, timeout: 100) if get_option('enable-ml-service') + unittest_capi_service_extension = executable('unittest_capi_service_extension', + 'unittest_capi_service_extension.cc', + dependencies: [unittest_common_dep, nns_capi_service_dep], + install: get_option('install-test'), + install_dir: unittest_install_dir + ) + test('unittest_capi_extension', unittest_capi_service_extension, env: testenv, timeout: 100) + unittest_capi_service_agent_client = executable('unittest_capi_service_agent_client', 'unittest_capi_service_agent_client.cc', dependencies: [unittest_common_dep, nns_capi_service_dep], diff --git a/tests/capi/unittest_capi_service_extension.cc b/tests/capi/unittest_capi_service_extension.cc new file mode 100755 index 00000000..4d662775 --- /dev/null +++ b/tests/capi/unittest_capi_service_extension.cc @@ -0,0 +1,1250 @@ +/** + * @file unittest_capi_service_extension.cc + * @date 1 September 2023 + * @brief Unittest for ML service extension C-API. + * @see https://github.com/nnstreamer/api + * @author Jaeyun Jung + * @bug No known bugs except for NYI items + */ + +#include +#include + +#include +#include + +#if defined(ENABLE_TENSORFLOW_LITE) || defined(ENABLE_TENSORFLOW2_LITE) +#define TEST_REQUIRE_TFLITE(Case, Name) TEST (Case, Name) +#define TEST_F_REQUIRE_TFLITE(Case, Name) TEST_F (Case, Name) +#else +#define TEST_REQUIRE_TFLITE(Case, Name) TEST (Case, DISABLED_##Name) +#define TEST_F_REQUIRE_TFLITE(Case, Name) TEST_F (Case, DISABLED_##Name) +#endif + +#define TEST_SET_MAGIC(h, m) \ + do { \ + if (h) { \ + *((uint32_t *) (h)) = (m); \ + } \ + } while (0); + +/** + * @brief Internal structure for test. + */ +typedef struct { + gint received; +} extension_test_data_s; + +/** + * @brief Internal function to create test-data. + */ +static extension_test_data_s * +_create_test_data (void) +{ + extension_test_data_s *tdata; + + tdata = g_try_new0 (extension_test_data_s, 1); + if (tdata) { + tdata->received = 0; + } + + return tdata; +} + +/** + * @brief Internal function to free test-data. + */ +static void +_free_test_data (extension_test_data_s *tdata) +{ + g_free (tdata); +} + +/** + * @brief Internal function to get the config file path. + */ +static gchar * +_get_config_path (const gchar *config_name) +{ + const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH"); + + /* Supposed to run test in build directory. */ + if (root_path == NULL) + root_path = ".."; + + gchar *config_file = g_build_filename ( + root_path, "tests", "test_models", "config", config_name, NULL); + + return config_file; +} + +/** + * @brief Internal function to get the data file path. + */ +static gchar * +_get_data_path (const gchar *data_name) +{ + const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH"); + + /* Supposed to run test in build directory. */ + if (root_path == NULL) + root_path = ".."; + + gchar *data_file + = g_build_filename (root_path, "tests", "test_models", "data", data_name, NULL); + + return data_file; +} + +/** + * @brief Callback function for scenario test. + */ +static void +_extension_test_add_cb_new_data (ml_service_h handle, const char *name, + const ml_tensors_data_h data, void *user_data) +{ + extension_test_data_s *tdata = (extension_test_data_s *) user_data; + void *_raw = NULL; + size_t _size = 0; + int status; + + status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size); + EXPECT_EQ (status, ML_ERROR_NONE); + + /* (input 1.0 + invoke 2.0) */ + EXPECT_EQ (((float *) _raw)[0], 3.0f); + + if (tdata) + tdata->received++; +} + +/** + * @brief Internal function to run test with ml-service extension handle. + */ +static inline void +_extension_test_add (ml_service_h handle) +{ + extension_test_data_s *tdata; + ml_tensors_info_h info; + ml_tensors_data_h input; + ml_service_callbacks_s cb = { 0 }; + int i, status, tried; + + tdata = _create_test_data (); + ASSERT_TRUE (tdata != NULL); + + cb.new_data = _extension_test_add_cb_new_data; + + status = ml_service_set_event_cb (handle, &cb, tdata); + EXPECT_EQ (status, ML_ERROR_NONE); + + /* Create and push input data. */ + status = ml_service_get_input_information (handle, NULL, &info); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_data_create (info, &input); + + for (i = 0; i < 5; i++) { + g_usleep (50000U); + + float tmp_input[] = { 1.0f }; + + ml_tensors_data_set_tensor_data (input, 0U, tmp_input, sizeof (float)); + + status = ml_service_request (handle, NULL, input); + EXPECT_EQ (status, ML_ERROR_NONE); + } + + /* Let the data frames are passed into ml-service extension handle. */ + tried = 0; + do { + g_usleep (30000U); + } while (tdata->received < 3 || tried++ < 10); + + EXPECT_TRUE (tdata->received > 0); + + /* Clear callback before releasing tdata. */ + status = ml_service_set_event_cb (handle, NULL, NULL); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); + ml_tensors_data_destroy (input); + + _free_test_data (tdata); +} + +/** + * @brief Callback function for scenario test. + */ +static void +_extension_test_imgclf_cb_new_data (ml_service_h handle, const char *name, + const ml_tensors_data_h data, void *user_data) +{ + extension_test_data_s *tdata = (extension_test_data_s *) user_data; + void *_raw = NULL; + size_t _size = 0; + int status; + + /* The output tensor has type ML_TENSOR_TYPE_UINT8, dimension 1001:1. */ + status = ml_tensors_data_get_tensor_data (data, 0U, &_raw, &_size); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (_size, 1001); + + /* Check max score, label 'orange' (index 951). */ + if (_raw != NULL && _size > 0) { + size_t i, max_idx = 0; + uint8_t cur, max_value = 0; + + for (i = 0; i < _size; i++) { + cur = ((uint8_t *) _raw)[i]; + + if (max_value < cur) { + max_idx = i; + max_value = cur; + } + } + + EXPECT_EQ (max_idx, 951); + } + + if (tdata) + tdata->received++; +} + +/** + * @brief Internal function to run test with ml-service extension handle. + */ +static inline void +_extension_test_imgclf (ml_service_h handle) +{ + extension_test_data_s *tdata; + ml_tensors_info_h in_info = NULL; + ml_tensors_info_h out_info = NULL; + ml_tensors_data_h input = NULL; + ml_service_callbacks_s cb = { 0 }; + unsigned int count; + ml_tensor_type_e type; + ml_tensor_dimension in_dim = { 0 }; + ml_tensor_dimension out_dim = { 0 }; + int i, status, tried; + void *_raw = NULL; + size_t _size = 0; + + g_autofree gchar *data_file = _get_data_path ("orange.raw"); + + ASSERT_TRUE (g_file_get_contents (data_file, (gchar **) &_raw, &_size, NULL)); + ASSERT_TRUE (_size == 3U * 224 * 224); + + tdata = _create_test_data (); + ASSERT_TRUE (tdata != NULL); + + cb.new_data = _extension_test_imgclf_cb_new_data; + + status = ml_service_set_event_cb (handle, &cb, tdata); + EXPECT_EQ (status, ML_ERROR_NONE); + + /* Check input information. */ + status = ml_service_get_input_information (handle, "input_img", &in_info); + EXPECT_EQ (status, ML_ERROR_NONE); + + count = 0U; + status = ml_tensors_info_get_count (in_info, &count); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (count, 1U); + + type = ML_TENSOR_TYPE_UNKNOWN; + status = ml_tensors_info_get_tensor_type (in_info, 0, &type); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8); + + status = ml_tensors_info_get_tensor_dimension (in_info, 0, in_dim); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (in_dim[0], 3U); + EXPECT_EQ (in_dim[1], 224U); + EXPECT_EQ (in_dim[2], 224U); + EXPECT_EQ (in_dim[3], 1U); + EXPECT_LE (in_dim[4], 1U); + + /* Check output information. */ + status = ml_service_get_output_information (handle, "result_clf", &out_info); + EXPECT_EQ (status, ML_ERROR_NONE); + + count = 0U; + status = ml_tensors_info_get_count (out_info, &count); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (count, 1U); + + type = ML_TENSOR_TYPE_UNKNOWN; + status = ml_tensors_info_get_tensor_type (out_info, 0, &type); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (type, ML_TENSOR_TYPE_UINT8); + + status = ml_tensors_info_get_tensor_dimension (out_info, 0, out_dim); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (out_dim[0], 1001U); + EXPECT_EQ (out_dim[1], 1U); + EXPECT_LE (out_dim[2], 1U); + + /* Create and push input data (orange). */ + ml_tensors_data_create (in_info, &input); + ml_tensors_data_set_tensor_data (input, 0U, _raw, _size); + + for (i = 0; i < 5; i++) { + g_usleep (50000U); + + status = ml_service_request (handle, "input_img", input); + EXPECT_EQ (status, ML_ERROR_NONE); + } + + /* Let the data frames are passed into ml-service extension handle. */ + tried = 0; + do { + g_usleep (30000U); + } while (tdata->received < 3 || tried++ < 10); + + EXPECT_TRUE (tdata->received > 0); + + /* Clear callback before releasing tdata. */ + status = ml_service_set_event_cb (handle, NULL, NULL); + EXPECT_EQ (status, ML_ERROR_NONE); + + if (in_info) + ml_tensors_info_destroy (in_info); + if (out_info) + ml_tensors_info_destroy (out_info); + if (input) + ml_tensors_data_destroy (input); + g_free (_raw); + + _free_test_data (tdata); +} + +/** + * @brief Usage of ml-service extension API. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfigAdd) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + _extension_test_add (handle); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Usage of ml-service extension API. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfig1ImgClf) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + _extension_test_imgclf (handle); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Usage of ml-service extension API. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfig2ImgClf) +{ + ml_service_h handle; + int status; + + /* The configuration file includes model path only. */ + g_autofree gchar *config = _get_config_path ("config_single_imgclf_file.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + _extension_test_imgclf (handle); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Usage of ml-service extension API. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, scenarioConfig3ImgClf) +{ + ml_service_h handle; + int status; + + /* The configuration file includes model path only. */ + g_autofree gchar *config = _get_config_path ("config_pipeline_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + _extension_test_imgclf (handle); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam01_n) +{ + ml_service_h handle; + int status; + + status = ml_service_new (NULL, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam02_n) +{ + ml_service_h handle; + int status; + + status = ml_service_new ("", &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, createConfigInvalidParam03_n) +{ + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, NULL); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam04_n) +{ + ml_service_h handle; + int status; + + /* The configuration file does not exist. */ + g_autofree gchar *config = _get_config_path ("invalid_path.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam05_n) +{ + ml_service_h handle; + int status; + + /* The configuration file has invalid tensor information. */ + g_autofree gchar *config = _get_config_path ("config_single_imgclf_invalid_info.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam06_n) +{ + ml_service_h handle; + int status; + + /* The configuration file has invalid type. */ + g_autofree gchar *config = _get_config_path ("config_unknown_type.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam07_n) +{ + ml_service_h handle; + int status; + + /* The configuration file does not have model file. */ + g_autofree gchar *config = _get_config_path ("config_single_no_model.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam08_n) +{ + ml_service_h handle; + int status; + + /* The configuration file has invalid information. */ + g_autofree gchar *config = _get_config_path ("config_pipeline_invalid_info.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam09_n) +{ + ml_service_h handle; + int status; + + /* The configuration file does not have node information. */ + g_autofree gchar *config = _get_config_path ("config_pipeline_no_info.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam10_n) +{ + ml_service_h handle; + int status; + + /* The configuration file has duplicated node name. */ + g_autofree gchar *config = _get_config_path ("config_pipeline_duplicated_name.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, createConfigInvalidParam11_n) +{ + ml_service_h handle; + int status; + + /* The configuration file does not have node name. */ + g_autofree gchar *config = _get_config_path ("config_pipeline_no_name.conf"); + + status = ml_service_new (config, &handle); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, destroyInvalidParam01_n) +{ + int status; + + status = ml_service_destroy (NULL); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, destroyInvalidParam02_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_destroy (handle); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, setCallbackInvalidParam01_n) +{ + int status; + + status = ml_service_set_event_cb (NULL, NULL, NULL); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, setCallbackInvalidParam02_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_set_event_cb (handle, NULL, NULL); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_set_event_cb (handle, NULL, NULL); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, startInvalidParam01_n) +{ + int status; + + status = ml_service_start (NULL); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, startInvalidParam02_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_pipeline_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_start (handle); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_start (handle); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, stopInvalidParam01_n) +{ + int status; + + status = ml_service_stop (NULL); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, stopInvalidParam02_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_pipeline_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_stop (handle); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_stop (handle); + EXPECT_EQ (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, getInputInfoInvalidParam01_n) +{ + ml_tensors_info_h info; + int status; + + status = ml_service_get_input_information (NULL, NULL, &info); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getInputInfoInvalidParam02_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_input_information (handle, NULL, NULL); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getInputInfoInvalidParam03_n) +{ + ml_service_h handle; + ml_tensors_info_h info; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_get_input_information (handle, NULL, &info); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_get_input_information (handle, NULL, &info); + EXPECT_EQ (status, ML_ERROR_NONE); + + unsigned int count = 0U; + status = ml_tensors_info_get_count (info, &count); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (count, 1U); + + ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; + status = ml_tensors_info_get_tensor_type (info, 0, &type); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (type, ML_TENSOR_TYPE_FLOAT32); + + ml_tensor_dimension dimension = { 0 }; + status = ml_tensors_info_get_tensor_dimension (info, 0, dimension); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (dimension[0], 1U); + EXPECT_LE (dimension[1], 1U); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getInputInfoInvalidParam04_n) +{ + ml_service_h handle; + ml_tensors_info_h info; + int status; + + g_autofree gchar *config = _get_config_path ("config_pipeline_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_input_information (handle, NULL, &info); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_input_information (handle, "", &info); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_input_information (handle, "invalid_name", &info); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_input_information (handle, "result_clf", &info); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, getOutputInfoInvalidParam01_n) +{ + ml_tensors_info_h info; + int status; + + status = ml_service_get_output_information (NULL, NULL, &info); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getOutputInfoInvalidParam02_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_output_information (handle, NULL, NULL); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getOutputInfoInvalidParam03_n) +{ + ml_service_h handle; + ml_tensors_info_h info; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_get_output_information (handle, NULL, &info); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_get_output_information (handle, NULL, &info); + EXPECT_EQ (status, ML_ERROR_NONE); + + unsigned int count = 0U; + status = ml_tensors_info_get_count (info, &count); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (count, 1U); + + ml_tensor_type_e type = ML_TENSOR_TYPE_UNKNOWN; + status = ml_tensors_info_get_tensor_type (info, 0, &type); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (type, ML_TENSOR_TYPE_FLOAT32); + + ml_tensor_dimension dimension = { 0 }; + status = ml_tensors_info_get_tensor_dimension (info, 0, dimension); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_EQ (dimension[0], 1U); + EXPECT_LE (dimension[1], 1U); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getOutputInfoInvalidParam04_n) +{ + ml_service_h handle; + ml_tensors_info_h info; + int status; + + g_autofree gchar *config = _get_config_path ("config_pipeline_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_output_information (handle, NULL, &info); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_output_information (handle, "", &info); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_output_information (handle, "invalid_name", &info); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_output_information (handle, "input_img", &info); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, setInfoInvalidParam01_n) +{ + int status; + + status = ml_service_set_information (NULL, "test-threshold", "0.1"); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, setInfoInvalidParam02_n) +{ + ml_service_h handle; + char *value; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_set_information (handle, "test-threshold", "0.1"); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_set_information (handle, "test-threshold", "0.1"); + EXPECT_EQ (status, ML_ERROR_NONE); + status = ml_service_get_information (handle, "test-threshold", &value); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_STREQ (value, "0.1"); + g_free (value); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, setInfoInvalidParam03_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_set_information (handle, NULL, "0.1"); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_set_information (handle, "", "0.1"); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_set_information (handle, "test-threshold", NULL); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_set_information (handle, "test-threshold", ""); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, getInfoInvalidParam01_n) +{ + char *value; + int status; + + status = ml_service_get_information (NULL, "threshold", &value); + EXPECT_NE (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getInfoInvalidParam02_n) +{ + ml_service_h handle; + char *value; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_get_information (handle, "threshold", &value); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_get_information (handle, "threshold", &value); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_STREQ (value, "0.5"); + g_free (value); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getInfoInvalidParam03_n) +{ + ml_service_h handle; + char *value; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_information (handle, NULL, &value); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_information (handle, "", &value); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_get_information (handle, "invalid_name", &value); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, getInfoInvalidParam04_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_information (handle, "threshold", NULL); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST (MLServiceExtension, addInputInvalidParam01_n) +{ + ml_tensors_info_h info; + ml_tensors_data_h input; + ml_tensor_dimension dimension = { 0 }; + int status; + + dimension[0] = 4U; + ml_tensors_info_create (&info); + ml_tensors_info_set_count (info, 1U); + ml_tensors_info_set_tensor_type (info, 0U, ML_TENSOR_TYPE_INT32); + ml_tensors_info_set_tensor_dimension (info, 0U, dimension); + ml_tensors_data_create (info, &input); + + status = ml_service_request (NULL, NULL, input); + EXPECT_NE (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); + ml_tensors_data_destroy (input); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, addInputInvalidParam02_n) +{ + ml_service_h handle; + ml_tensors_info_h info; + ml_tensors_data_h input; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_input_information (handle, NULL, &info); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_data_create (info, &input); + + /* Set invalid magic. */ + TEST_SET_MAGIC (handle, 0U); + status = ml_service_request (handle, NULL, input); + EXPECT_NE (status, ML_ERROR_NONE); + TEST_SET_MAGIC (handle, 0xfeeedeed); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); + ml_tensors_data_destroy (input); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, addInputInvalidParam03_n) +{ + ml_service_h handle; + int status; + + g_autofree gchar *config = _get_config_path ("config_single_add.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_request (handle, NULL, NULL); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); +} + +/** + * @brief Testcase with invalid param. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, addInputInvalidParam04_n) +{ + ml_service_h handle; + ml_tensors_info_h info; + ml_tensors_data_h input; + int status; + + g_autofree gchar *config = _get_config_path ("config_pipeline_imgclf.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_input_information (handle, "input_img", &info); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_data_create (info, &input); + + status = ml_service_request (handle, NULL, input); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_request (handle, "", input); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_request (handle, "invalid_name", input); + EXPECT_NE (status, ML_ERROR_NONE); + status = ml_service_request (handle, "result_clf", input); + EXPECT_NE (status, ML_ERROR_NONE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); + ml_tensors_data_destroy (input); +} + +/** + * @brief Testcase with max buffer. + */ +TEST_REQUIRE_TFLITE (MLServiceExtension, addInputMaxBuffer_n) +{ + ml_service_h handle; + ml_tensors_info_h info; + ml_tensors_data_h input; + char *value; + int i, status; + + g_autofree gchar *config = _get_config_path ("config_single_imgclf_max_input.conf"); + + status = ml_service_new (config, &handle); + ASSERT_EQ (status, ML_ERROR_NONE); + + status = ml_service_get_information (handle, "max_input", &value); + EXPECT_EQ (status, ML_ERROR_NONE); + EXPECT_STREQ (value, "5"); + g_free (value); + + ml_service_get_input_information (handle, NULL, &info); + ml_tensors_data_create (info, &input); + + for (i = 0; i < 200; i++) { + g_usleep (20000U); + + status = ml_service_request (handle, NULL, input); + if (status != ML_ERROR_NONE) { + /* Supposed max input data in queue. */ + break; + } + } + + EXPECT_EQ (status, ML_ERROR_STREAMS_PIPE); + + status = ml_service_destroy (handle); + EXPECT_EQ (status, ML_ERROR_NONE); + + ml_tensors_info_destroy (info); + ml_tensors_data_destroy (input); +} + +/** + * @brief Main function to run the test. + */ +int +main (int argc, char **argv) +{ + int result = -1; + + try { + testing::InitGoogleTest (&argc, argv); + } catch (...) { + g_warning ("catch 'testing::internal::::ClassUniqueToAlwaysTrue'"); + } + + /* ignore tizen feature status while running the testcases */ + set_feature_state (ML_FEATURE, SUPPORTED); + set_feature_state (ML_FEATURE_INFERENCE, SUPPORTED); + set_feature_state (ML_FEATURE_SERVICE, SUPPORTED); + + try { + result = RUN_ALL_TESTS (); + } catch (...) { + g_warning ("catch `testing::internal::GoogleTestFailureException`"); + } + + set_feature_state (ML_FEATURE, NOT_CHECKED_YET); + set_feature_state (ML_FEATURE_INFERENCE, NOT_CHECKED_YET); + set_feature_state (ML_FEATURE_SERVICE, NOT_CHECKED_YET); + + return result; +} diff --git a/tests/test_models/config/config_pipeline_duplicated_name.conf b/tests/test_models/config/config_pipeline_duplicated_name.conf new file mode 100644 index 00000000..63bb19bb --- /dev/null +++ b/tests/test_models/config/config_pipeline_duplicated_name.conf @@ -0,0 +1,28 @@ +{ + "pipeline" : + { + "description" : "appsrc name=input_img caps=other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:224:224:1,framerate=0/1 ! tensor_filter framework=tensorflow-lite model=../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite ! tensor_sink name=result_clf", + "input_node" : [ + { + "name" : "input_img", + "info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ] + } + ], + "output_node" : [ + { + "name" : "input_img", + "info" : [ + { + "type" : "uint8", + "dimension" : "1001:1" + } + ] + } + ] + } +} diff --git a/tests/test_models/config/config_pipeline_imgclf.conf b/tests/test_models/config/config_pipeline_imgclf.conf new file mode 100644 index 00000000..463bfed1 --- /dev/null +++ b/tests/test_models/config/config_pipeline_imgclf.conf @@ -0,0 +1,28 @@ +{ + "pipeline" : + { + "description" : "appsrc name=input_img caps=other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:224:224:1,framerate=0/1 ! tensor_filter framework=tensorflow-lite model=../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite ! tensor_sink name=result_clf", + "input_node" : [ + { + "name" : "input_img", + "info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ] + } + ], + "output_node" : [ + { + "name" : "result_clf", + "info" : [ + { + "type" : "uint8", + "dimension" : "1001:1" + } + ] + } + ] + } +} diff --git a/tests/test_models/config/config_pipeline_invalid_info.conf b/tests/test_models/config/config_pipeline_invalid_info.conf new file mode 100644 index 00000000..85f3d3fe --- /dev/null +++ b/tests/test_models/config/config_pipeline_invalid_info.conf @@ -0,0 +1,27 @@ +{ + "pipeline" : + { + "description" : "appsrc name=input_img caps=other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:224:224:1,framerate=0/1 ! tensor_filter framework=tensorflow-lite model=../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite ! tensor_sink name=result_clf", + "input_node" : [ + { + "name" : "input_img", + "info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ] + } + ], + "output_node" : [ + { + "name" : "result_clf", + "info" : [ + { + "type" : "uint8" + } + ] + } + ] + } +} diff --git a/tests/test_models/config/config_pipeline_no_info.conf b/tests/test_models/config/config_pipeline_no_info.conf new file mode 100644 index 00000000..d0b9e291 --- /dev/null +++ b/tests/test_models/config/config_pipeline_no_info.conf @@ -0,0 +1,22 @@ +{ + "pipeline" : + { + "description" : "appsrc name=input_img caps=other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:224:224:1,framerate=0/1 ! tensor_filter framework=tensorflow-lite model=../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite ! tensor_sink name=result_clf", + "input_node" : [ + { + "name" : "input_img", + "info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ] + } + ], + "output_node" : [ + { + "name" : "result_clf" + } + ] + } +} diff --git a/tests/test_models/config/config_pipeline_no_name.conf b/tests/test_models/config/config_pipeline_no_name.conf new file mode 100644 index 00000000..45f4907b --- /dev/null +++ b/tests/test_models/config/config_pipeline_no_name.conf @@ -0,0 +1,27 @@ +{ + "pipeline" : + { + "description" : "appsrc name=input_img caps=other/tensors,num_tensors=1,format=static,types=uint8,dimensions=3:224:224:1,framerate=0/1 ! tensor_filter framework=tensorflow-lite model=../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite ! tensor_sink name=result_clf", + "input_node" : [ + { + "name" : "input_img", + "info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ] + } + ], + "output_node" : [ + { + "info" : [ + { + "type" : "uint8", + "dimension" : "1001:1" + } + ] + } + ] + } +} diff --git a/tests/test_models/config/config_single_add.conf b/tests/test_models/config/config_single_add.conf new file mode 100644 index 00000000..96e3b199 --- /dev/null +++ b/tests/test_models/config/config_single_add.conf @@ -0,0 +1,7 @@ +{ + "single" : + { + "framework" : "tensorflow-lite", + "model" : ["../tests/test_models/models/add.tflite"] + } +} diff --git a/tests/test_models/config/config_single_imgclf.conf b/tests/test_models/config/config_single_imgclf.conf new file mode 100644 index 00000000..cf340cf1 --- /dev/null +++ b/tests/test_models/config/config_single_imgclf.conf @@ -0,0 +1,24 @@ +{ + "single" : + { + "framework" : "tensorflow-lite", + "model" : ["../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite"], + "input_info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ], + "output_info" : [ + { + "type" : "uint8", + "dimension" : "1001:1" + } + ] + }, + "information" : + { + "threshold" : "0.5", + "description" : "Config file to run unittest for ml-extension." + } +} diff --git a/tests/test_models/config/config_single_imgclf_file.conf b/tests/test_models/config/config_single_imgclf_file.conf new file mode 100644 index 00000000..7a17b509 --- /dev/null +++ b/tests/test_models/config/config_single_imgclf_file.conf @@ -0,0 +1,6 @@ +{ + "single" : + { + "model" : "../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite" + } +} diff --git a/tests/test_models/config/config_single_imgclf_invalid_info.conf b/tests/test_models/config/config_single_imgclf_invalid_info.conf new file mode 100644 index 00000000..8909aa74 --- /dev/null +++ b/tests/test_models/config/config_single_imgclf_invalid_info.conf @@ -0,0 +1,18 @@ +{ + "single" : + { + "framework" : "tensorflow-lite", + "model" : ["../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite"], + "input_info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ], + "output_info" : [ + { + "type" : "uint8" + } + ] + } +} diff --git a/tests/test_models/config/config_single_imgclf_max_input.conf b/tests/test_models/config/config_single_imgclf_max_input.conf new file mode 100644 index 00000000..42d1545e --- /dev/null +++ b/tests/test_models/config/config_single_imgclf_max_input.conf @@ -0,0 +1,25 @@ +{ + "single" : + { + "framework" : "tensorflow-lite", + "model" : ["../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite"], + "input_info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ], + "output_info" : [ + { + "type" : "uint8", + "dimension" : "1001:1" + } + ] + }, + "information" : + { + "max_input" : "5", + "threshold" : "0.5", + "description" : "Config file to run unittest for ml-extension." + } +} diff --git a/tests/test_models/config/config_single_no_model.conf b/tests/test_models/config/config_single_no_model.conf new file mode 100644 index 00000000..4d7d9eb5 --- /dev/null +++ b/tests/test_models/config/config_single_no_model.conf @@ -0,0 +1,18 @@ +{ + "single" : + { + "framework" : "tensorflow-lite", + "input_info" : [ + { + "type" : "uint8", + "dimension" : "3:224:224:1" + } + ], + "output_info" : [ + { + "type" : "uint8", + "dimension" : "1001:1" + } + ] + } +} diff --git a/tests/test_models/config/config_unknown_type.conf b/tests/test_models/config/config_unknown_type.conf new file mode 100644 index 00000000..6baf1cea --- /dev/null +++ b/tests/test_models/config/config_unknown_type.conf @@ -0,0 +1,7 @@ +{ + "unknown" : + { + "framework" : "tensorflow-lite", + "model" : "../tests/test_models/models/mobilenet_v1_1.0_224_quant.tflite" + } +}