Skip to content

Commit

Permalink
[C-Api] new nnfw enum
Browse files Browse the repository at this point in the history
Add new enum - onnxruntime and ncnn.
This requires new ACR for next release.

Signed-off-by: Suyeon Kim <suyeon5.kim@samsung.com>
  • Loading branch information
yeonykim2 committed Jan 18, 2024
1 parent 5d27677 commit ebd0318
Show file tree
Hide file tree
Showing 8 changed files with 298 additions and 1 deletion.
2 changes: 2 additions & 0 deletions c/include/ml-api-common.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,8 @@ typedef enum {
ML_NNFW_TYPE_TRIX_ENGINE = 14, /**< TRIxENGINE accesses TRIV/TRIA NPU low-level drivers directly (.tvn). (Since 6.5) You may need to use high-level drivers wrapping this low-level driver in some devices: e.g., AIFW */
ML_NNFW_TYPE_MXNET = 15, /**< Apache MXNet (Since 7.0) */
ML_NNFW_TYPE_TVM = 16, /**< Apache TVM (Since 7.0) */
ML_NNFW_TYPE_ONNXRUNTIME = 17, /**< ONNX Runtime (Since 9.0) */
ML_NNFW_TYPE_NCNN = 18, /**< ncnn (Since 9.0) */
ML_NNFW_TYPE_SNAP = 0x2001, /**< SNAP (Samsung Neural Acceleration Platform), only for Android. (Since 6.0) */
} ml_nnfw_type_e;

Expand Down
7 changes: 6 additions & 1 deletion c/src/ml-api-inference-single.c
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,8 @@ static const char *ml_nnfw_subplugin_name[] = {
[ML_NNFW_TYPE_TRIX_ENGINE] = "trix-engine",
[ML_NNFW_TYPE_MXNET] = "mxnet",
[ML_NNFW_TYPE_TVM] = "tvm",
[ML_NNFW_TYPE_ONNXRUNTIME] = "onnxruntime",
[ML_NNFW_TYPE_NCNN] = "ncnn",
NULL
};

Expand Down Expand Up @@ -991,7 +993,8 @@ ml_single_open_custom (ml_single_h * single, ml_single_preset * info)
* Note that we do not construct a pipeline since 2019.12.
*/
if (nnfw == ML_NNFW_TYPE_TENSORFLOW || nnfw == ML_NNFW_TYPE_SNAP ||
nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIX_ENGINE) {
nnfw == ML_NNFW_TYPE_PYTORCH || nnfw == ML_NNFW_TYPE_TRIX_ENGINE ||
nnfw == ML_NNFW_TYPE_NCNN) {
/* set input and output tensors information */
if (in_tensors_info && out_tensors_info) {
status =
Expand Down Expand Up @@ -1936,6 +1939,8 @@ _ml_validate_model_file (const char *const *model,
switch (*nnfw) {
case ML_NNFW_TYPE_NNFW:
case ML_NNFW_TYPE_TVM:
case ML_NNFW_TYPE_ONNXRUNTIME:
case ML_NNFW_TYPE_NCNN:
/**
* We cannot check the file ext with NNFW.
* NNFW itself will validate metadata and model file.
Expand Down
12 changes: 12 additions & 0 deletions meson.build
Original file line number Diff line number Diff line change
Expand Up @@ -152,6 +152,18 @@ if armnn_dep.found()
add_project_arguments('-DENABLE_ARMNN=1', language: ['c', 'cpp'])
endif

# ONNX Runtime
onnxruntime_dep = dependency('libonnxruntime', required: false)
if onnxruntime_dep.found()
add_project_arguments('-DENABLE_ONNXRUNTIME=1', language: ['c', 'cpp'])
endif

# ncnn
ncnn_dep = dependency('ncnn', required: false)
if ncnn_dep.found()
add_project_arguments('-DENABLE_NCNN=1', language: ['c', 'cpp'])
endif

# Set install path
api_install_prefix = get_option('prefix')
api_install_libdir = join_paths(api_install_prefix, get_option('libdir'))
Expand Down
12 changes: 12 additions & 0 deletions packaging/machine-learning-api.spec
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@
%define tensorflow2_gpu_delegate_support 1
%define nnfw_support 1
%define armnn_support 0
%define onnxruntime_support 0
%define ncnn_support 0

%define release_test 0
%define test_script $(pwd)/packaging/run_unittests.sh
Expand Down Expand Up @@ -152,6 +154,16 @@ BuildRequires: nnstreamer-armnn
BuildRequires: libarmcl
BuildConflicts: libarmcl-release
%endif

%if 0%{?onnxruntime_support}
BuildRequires: onnxruntime-devel
BuildRequires: nnstreamer-onnxruntime
%endif

%if 0%{?ncnn_support}
BuildRequires: ncnn-devel
BuildRequires: nnstreamer-ncnn
%endif
%endif # unit_test

%if 0%{?enable_ml_service}
Expand Down
189 changes: 189 additions & 0 deletions tests/capi/unittest_capi_inference_single.cc
Original file line number Diff line number Diff line change
Expand Up @@ -2991,6 +2991,195 @@ TEST (nnstreamer_capi_singleshot, invoke_09_n)
}
#endif /* ENABLE_ARMNN */

#ifdef ENABLE_ONNXRUNTIME
/**
* @brief Test NNStreamer single shot (ONNX Runtime)
*/
TEST (nnstreamer_capi_singleshot, invoke_10)
{
ml_single_h single;
ml_tensors_info_h in_info, out_info;
ml_tensors_data_h input, output;
ml_tensor_dimension in_dim, out_dim;
ml_tensor_type_e type;
unsigned int count = 0;
char *name = NULL;
int status;
void *data_ptr;
size_t data_size, expected_size;

const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");

/* supposed to run test in build directory */
if (root_path == NULL)
root_path = "..";

g_autofree gchar *test_model = g_build_filename (root_path, "tests", "test_models", "models",
"mobilenet_v2_quant.onnx", NULL);
ASSERT_TRUE (g_file_test (test_model, G_FILE_TEST_EXISTS));

status = ml_single_open (&single, test_model, NULL, NULL,
ML_NNFW_TYPE_ONNXRUNTIME, ML_NNFW_HW_ANY);
ASSERT_EQ (status, ML_ERROR_NONE);

/* input tensor info */
status = ml_single_get_input_info (single, &in_info);
EXPECT_EQ (status, ML_ERROR_NONE);

status = ml_tensors_info_get_count (in_info, &count);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (count, 1U);

status = ml_tensors_info_get_tensor_name (in_info, 0, &name);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_STREQ (name, "input");
g_free (name);

status = ml_tensors_info_get_tensor_type (in_info, 0, &type);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_TENSOR_TYPE_FLOAT32);

status = ml_tensors_info_get_tensor_dimension (in_info, 0, in_dim);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (in_dim[0], 224U);
EXPECT_EQ (in_dim[1], 224U);
EXPECT_EQ (in_dim[2], 3U);
EXPECT_EQ (in_dim[3], 1U);

/* output tensor info */
status = ml_single_get_output_info (single, &out_info);
EXPECT_EQ (status, ML_ERROR_NONE);

status = ml_tensors_info_get_count (out_info, &count);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (count, 1U);

status = ml_tensors_info_get_tensor_name (out_info, 0, &name);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_STREQ (name, "output");
g_free (name);

status = ml_tensors_info_get_tensor_type (out_info, 0, &type);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (type, ML_TENSOR_TYPE_FLOAT32);

status = ml_tensors_info_get_tensor_dimension (out_info, 0, out_dim);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (out_dim[0], 1000U);
EXPECT_EQ (out_dim[1], 1U);

status = ml_tensors_info_get_tensor_size (out_info, 0, &expected_size);
EXPECT_EQ (status, ML_ERROR_NONE);

input = output = NULL;

/* invoke dummy data */
status = ml_tensors_data_create (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (input != NULL);

status = ml_single_invoke (single, input, &output);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (output != NULL);

status = ml_tensors_data_get_tensor_data (output, 1, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);

status = ml_tensors_data_get_tensor_data (output, 0, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (data_size, expected_size);

status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);

ml_tensors_data_destroy (output);
ml_tensors_data_destroy (input);
ml_tensors_info_destroy (in_info);
ml_tensors_info_destroy (out_info);
}
#endif /* ENABLE_ONNXRUNTIME */

#ifdef ENABLE_NCNN
/**
* @brief Test NNStreamer single shot (ncnn)
*/
TEST (nnstreamer_capi_singleshot, invoke_11)
{
ml_single_h single;
ml_tensors_info_h in_info, out_info;
ml_tensors_data_h input, output;
ml_tensor_dimension in_dim = { 0 };
ml_tensor_dimension out_dim = { 0 };
int status;
void *data_ptr;
size_t data_size, expected_size;

const gchar *root_path = g_getenv ("MLAPI_SOURCE_ROOT_PATH");

/* supposed to run test in build directory */
if (root_path == NULL)
root_path = "..";

g_autofree gchar *param_file = g_build_filename (root_path, "tests", "test_models", "models",
"squeezenet_v1.1.param", NULL);
ASSERT_TRUE (g_file_test (param_file, G_FILE_TEST_EXISTS));
g_autofree gchar *bin_file = g_build_filename (root_path, "tests", "test_models", "models",
"squeezenet_v1.1.bin", NULL);
ASSERT_TRUE (g_file_test (bin_file, G_FILE_TEST_EXISTS));

g_autofree gchar *test_model = g_strdup_printf ("%s,%s", param_file, bin_file);

/* input/output tensor info */
in_dim[0] = 227U;
in_dim[1] = 227U;
in_dim[2] = 3U;
ml_tensors_info_create (&in_info);
ml_tensors_info_set_count (in_info, 1U);
ml_tensors_info_set_tensor_type (in_info, 0U, ML_TENSOR_TYPE_FLOAT32);
ml_tensors_info_set_tensor_dimension (in_info, 0U, in_dim);

out_dim[0] = 1000U;
out_dim[1] = 1U;
ml_tensors_info_create (&out_info);
ml_tensors_info_set_count (out_info, 1U);
ml_tensors_info_set_tensor_type (out_info, 0U, ML_TENSOR_TYPE_FLOAT32);
ml_tensors_info_set_tensor_dimension (out_info, 0U, out_dim);

status = ml_single_open (&single, test_model, in_info, out_info,
ML_NNFW_TYPE_NCNN, ML_NNFW_HW_ANY);
ASSERT_EQ (status, ML_ERROR_NONE);

status = ml_tensors_info_get_tensor_size (out_info, 0, &expected_size);
EXPECT_EQ (status, ML_ERROR_NONE);

input = output = NULL;

/* invoke dummy data */
status = ml_tensors_data_create (in_info, &input);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (input != NULL);

status = ml_single_invoke (single, input, &output);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_TRUE (output != NULL);

status = ml_tensors_data_get_tensor_data (output, 1, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_INVALID_PARAMETER);

status = ml_tensors_data_get_tensor_data (output, 0, &data_ptr, &data_size);
EXPECT_EQ (status, ML_ERROR_NONE);
EXPECT_EQ (data_size, expected_size);

status = ml_single_close (single);
EXPECT_EQ (status, ML_ERROR_NONE);

ml_tensors_data_destroy (output);
ml_tensors_data_destroy (input);
ml_tensors_info_destroy (in_info);
ml_tensors_info_destroy (out_info);
}
#endif /* ENABLE_NCNN */

/**
* @brief Test NNStreamer single shot (custom filter)
* @detail Run pipeline with custom filter with allocate in invoke, handle multi tensors.
Expand Down
Binary file added tests/test_models/models/mobilenet_v2_quant.onnx
Binary file not shown.
Binary file added tests/test_models/models/squeezenet_v1.1.bin
Binary file not shown.
77 changes: 77 additions & 0 deletions tests/test_models/models/squeezenet_v1.1.param
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
7767517
75 83
Input data 0 1 data 0=227 1=227 2=3
Convolution conv1 1 1 data conv1 0=64 1=3 2=1 3=2 4=0 5=1 6=1728
ReLU relu_conv1 1 1 conv1 conv1_relu_conv1 0=0.000000
Pooling pool1 1 1 conv1_relu_conv1 pool1 0=0 1=3 2=2 3=0 4=0
Convolution fire2/squeeze1x1 1 1 pool1 fire2/squeeze1x1 0=16 1=1 2=1 3=1 4=0 5=1 6=1024
ReLU fire2/relu_squeeze1x1 1 1 fire2/squeeze1x1 fire2/squeeze1x1_fire2/relu_squeeze1x1 0=0.000000
Split splitncnn_0 1 2 fire2/squeeze1x1_fire2/relu_squeeze1x1 fire2/squeeze1x1_fire2/relu_squeeze1x1_splitncnn_0 fire2/squeeze1x1_fire2/relu_squeeze1x1_splitncnn_1
Convolution fire2/expand1x1 1 1 fire2/squeeze1x1_fire2/relu_squeeze1x1_splitncnn_1 fire2/expand1x1 0=64 1=1 2=1 3=1 4=0 5=1 6=1024
ReLU fire2/relu_expand1x1 1 1 fire2/expand1x1 fire2/expand1x1_fire2/relu_expand1x1 0=0.000000
Convolution fire2/expand3x3 1 1 fire2/squeeze1x1_fire2/relu_squeeze1x1_splitncnn_0 fire2/expand3x3 0=64 1=3 2=1 3=1 4=1 5=1 6=9216
ReLU fire2/relu_expand3x3 1 1 fire2/expand3x3 fire2/expand3x3_fire2/relu_expand3x3 0=0.000000
Concat fire2/concat 2 1 fire2/expand1x1_fire2/relu_expand1x1 fire2/expand3x3_fire2/relu_expand3x3 fire2/concat 0=0
Convolution fire3/squeeze1x1 1 1 fire2/concat fire3/squeeze1x1 0=16 1=1 2=1 3=1 4=0 5=1 6=2048
ReLU fire3/relu_squeeze1x1 1 1 fire3/squeeze1x1 fire3/squeeze1x1_fire3/relu_squeeze1x1 0=0.000000
Split splitncnn_1 1 2 fire3/squeeze1x1_fire3/relu_squeeze1x1 fire3/squeeze1x1_fire3/relu_squeeze1x1_splitncnn_0 fire3/squeeze1x1_fire3/relu_squeeze1x1_splitncnn_1
Convolution fire3/expand1x1 1 1 fire3/squeeze1x1_fire3/relu_squeeze1x1_splitncnn_1 fire3/expand1x1 0=64 1=1 2=1 3=1 4=0 5=1 6=1024
ReLU fire3/relu_expand1x1 1 1 fire3/expand1x1 fire3/expand1x1_fire3/relu_expand1x1 0=0.000000
Convolution fire3/expand3x3 1 1 fire3/squeeze1x1_fire3/relu_squeeze1x1_splitncnn_0 fire3/expand3x3 0=64 1=3 2=1 3=1 4=1 5=1 6=9216
ReLU fire3/relu_expand3x3 1 1 fire3/expand3x3 fire3/expand3x3_fire3/relu_expand3x3 0=0.000000
Concat fire3/concat 2 1 fire3/expand1x1_fire3/relu_expand1x1 fire3/expand3x3_fire3/relu_expand3x3 fire3/concat 0=0
Pooling pool3 1 1 fire3/concat pool3 0=0 1=3 2=2 3=0 4=0
Convolution fire4/squeeze1x1 1 1 pool3 fire4/squeeze1x1 0=32 1=1 2=1 3=1 4=0 5=1 6=4096
ReLU fire4/relu_squeeze1x1 1 1 fire4/squeeze1x1 fire4/squeeze1x1_fire4/relu_squeeze1x1 0=0.000000
Split splitncnn_2 1 2 fire4/squeeze1x1_fire4/relu_squeeze1x1 fire4/squeeze1x1_fire4/relu_squeeze1x1_splitncnn_0 fire4/squeeze1x1_fire4/relu_squeeze1x1_splitncnn_1
Convolution fire4/expand1x1 1 1 fire4/squeeze1x1_fire4/relu_squeeze1x1_splitncnn_1 fire4/expand1x1 0=128 1=1 2=1 3=1 4=0 5=1 6=4096
ReLU fire4/relu_expand1x1 1 1 fire4/expand1x1 fire4/expand1x1_fire4/relu_expand1x1 0=0.000000
Convolution fire4/expand3x3 1 1 fire4/squeeze1x1_fire4/relu_squeeze1x1_splitncnn_0 fire4/expand3x3 0=128 1=3 2=1 3=1 4=1 5=1 6=36864
ReLU fire4/relu_expand3x3 1 1 fire4/expand3x3 fire4/expand3x3_fire4/relu_expand3x3 0=0.000000
Concat fire4/concat 2 1 fire4/expand1x1_fire4/relu_expand1x1 fire4/expand3x3_fire4/relu_expand3x3 fire4/concat 0=0
Convolution fire5/squeeze1x1 1 1 fire4/concat fire5/squeeze1x1 0=32 1=1 2=1 3=1 4=0 5=1 6=8192
ReLU fire5/relu_squeeze1x1 1 1 fire5/squeeze1x1 fire5/squeeze1x1_fire5/relu_squeeze1x1 0=0.000000
Split splitncnn_3 1 2 fire5/squeeze1x1_fire5/relu_squeeze1x1 fire5/squeeze1x1_fire5/relu_squeeze1x1_splitncnn_0 fire5/squeeze1x1_fire5/relu_squeeze1x1_splitncnn_1
Convolution fire5/expand1x1 1 1 fire5/squeeze1x1_fire5/relu_squeeze1x1_splitncnn_1 fire5/expand1x1 0=128 1=1 2=1 3=1 4=0 5=1 6=4096
ReLU fire5/relu_expand1x1 1 1 fire5/expand1x1 fire5/expand1x1_fire5/relu_expand1x1 0=0.000000
Convolution fire5/expand3x3 1 1 fire5/squeeze1x1_fire5/relu_squeeze1x1_splitncnn_0 fire5/expand3x3 0=128 1=3 2=1 3=1 4=1 5=1 6=36864
ReLU fire5/relu_expand3x3 1 1 fire5/expand3x3 fire5/expand3x3_fire5/relu_expand3x3 0=0.000000
Concat fire5/concat 2 1 fire5/expand1x1_fire5/relu_expand1x1 fire5/expand3x3_fire5/relu_expand3x3 fire5/concat 0=0
Pooling pool5 1 1 fire5/concat pool5 0=0 1=3 2=2 3=0 4=0
Convolution fire6/squeeze1x1 1 1 pool5 fire6/squeeze1x1 0=48 1=1 2=1 3=1 4=0 5=1 6=12288
ReLU fire6/relu_squeeze1x1 1 1 fire6/squeeze1x1 fire6/squeeze1x1_fire6/relu_squeeze1x1 0=0.000000
Split splitncnn_4 1 2 fire6/squeeze1x1_fire6/relu_squeeze1x1 fire6/squeeze1x1_fire6/relu_squeeze1x1_splitncnn_0 fire6/squeeze1x1_fire6/relu_squeeze1x1_splitncnn_1
Convolution fire6/expand1x1 1 1 fire6/squeeze1x1_fire6/relu_squeeze1x1_splitncnn_1 fire6/expand1x1 0=192 1=1 2=1 3=1 4=0 5=1 6=9216
ReLU fire6/relu_expand1x1 1 1 fire6/expand1x1 fire6/expand1x1_fire6/relu_expand1x1 0=0.000000
Convolution fire6/expand3x3 1 1 fire6/squeeze1x1_fire6/relu_squeeze1x1_splitncnn_0 fire6/expand3x3 0=192 1=3 2=1 3=1 4=1 5=1 6=82944
ReLU fire6/relu_expand3x3 1 1 fire6/expand3x3 fire6/expand3x3_fire6/relu_expand3x3 0=0.000000
Concat fire6/concat 2 1 fire6/expand1x1_fire6/relu_expand1x1 fire6/expand3x3_fire6/relu_expand3x3 fire6/concat 0=0
Convolution fire7/squeeze1x1 1 1 fire6/concat fire7/squeeze1x1 0=48 1=1 2=1 3=1 4=0 5=1 6=18432
ReLU fire7/relu_squeeze1x1 1 1 fire7/squeeze1x1 fire7/squeeze1x1_fire7/relu_squeeze1x1 0=0.000000
Split splitncnn_5 1 2 fire7/squeeze1x1_fire7/relu_squeeze1x1 fire7/squeeze1x1_fire7/relu_squeeze1x1_splitncnn_0 fire7/squeeze1x1_fire7/relu_squeeze1x1_splitncnn_1
Convolution fire7/expand1x1 1 1 fire7/squeeze1x1_fire7/relu_squeeze1x1_splitncnn_1 fire7/expand1x1 0=192 1=1 2=1 3=1 4=0 5=1 6=9216
ReLU fire7/relu_expand1x1 1 1 fire7/expand1x1 fire7/expand1x1_fire7/relu_expand1x1 0=0.000000
Convolution fire7/expand3x3 1 1 fire7/squeeze1x1_fire7/relu_squeeze1x1_splitncnn_0 fire7/expand3x3 0=192 1=3 2=1 3=1 4=1 5=1 6=82944
ReLU fire7/relu_expand3x3 1 1 fire7/expand3x3 fire7/expand3x3_fire7/relu_expand3x3 0=0.000000
Concat fire7/concat 2 1 fire7/expand1x1_fire7/relu_expand1x1 fire7/expand3x3_fire7/relu_expand3x3 fire7/concat 0=0
Convolution fire8/squeeze1x1 1 1 fire7/concat fire8/squeeze1x1 0=64 1=1 2=1 3=1 4=0 5=1 6=24576
ReLU fire8/relu_squeeze1x1 1 1 fire8/squeeze1x1 fire8/squeeze1x1_fire8/relu_squeeze1x1 0=0.000000
Split splitncnn_6 1 2 fire8/squeeze1x1_fire8/relu_squeeze1x1 fire8/squeeze1x1_fire8/relu_squeeze1x1_splitncnn_0 fire8/squeeze1x1_fire8/relu_squeeze1x1_splitncnn_1
Convolution fire8/expand1x1 1 1 fire8/squeeze1x1_fire8/relu_squeeze1x1_splitncnn_1 fire8/expand1x1 0=256 1=1 2=1 3=1 4=0 5=1 6=16384
ReLU fire8/relu_expand1x1 1 1 fire8/expand1x1 fire8/expand1x1_fire8/relu_expand1x1 0=0.000000
Convolution fire8/expand3x3 1 1 fire8/squeeze1x1_fire8/relu_squeeze1x1_splitncnn_0 fire8/expand3x3 0=256 1=3 2=1 3=1 4=1 5=1 6=147456
ReLU fire8/relu_expand3x3 1 1 fire8/expand3x3 fire8/expand3x3_fire8/relu_expand3x3 0=0.000000
Concat fire8/concat 2 1 fire8/expand1x1_fire8/relu_expand1x1 fire8/expand3x3_fire8/relu_expand3x3 fire8/concat 0=0
Convolution fire9/squeeze1x1 1 1 fire8/concat fire9/squeeze1x1 0=64 1=1 2=1 3=1 4=0 5=1 6=32768
ReLU fire9/relu_squeeze1x1 1 1 fire9/squeeze1x1 fire9/squeeze1x1_fire9/relu_squeeze1x1 0=0.000000
Split splitncnn_7 1 2 fire9/squeeze1x1_fire9/relu_squeeze1x1 fire9/squeeze1x1_fire9/relu_squeeze1x1_splitncnn_0 fire9/squeeze1x1_fire9/relu_squeeze1x1_splitncnn_1
Convolution fire9/expand1x1 1 1 fire9/squeeze1x1_fire9/relu_squeeze1x1_splitncnn_1 fire9/expand1x1 0=256 1=1 2=1 3=1 4=0 5=1 6=16384
ReLU fire9/relu_expand1x1 1 1 fire9/expand1x1 fire9/expand1x1_fire9/relu_expand1x1 0=0.000000
Convolution fire9/expand3x3 1 1 fire9/squeeze1x1_fire9/relu_squeeze1x1_splitncnn_0 fire9/expand3x3 0=256 1=3 2=1 3=1 4=1 5=1 6=147456
ReLU fire9/relu_expand3x3 1 1 fire9/expand3x3 fire9/expand3x3_fire9/relu_expand3x3 0=0.000000
Concat fire9/concat 2 1 fire9/expand1x1_fire9/relu_expand1x1 fire9/expand3x3_fire9/relu_expand3x3 fire9/concat 0=0
Dropout drop9 1 1 fire9/concat fire9/concat_drop9
Convolution conv10 1 1 fire9/concat_drop9 conv10 0=1000 1=1 2=1 3=1 4=1 5=1 6=512000
ReLU relu_conv10 1 1 conv10 conv10_relu_conv10 0=0.000000
Pooling pool10 1 1 conv10_relu_conv10 pool10 0=1 1=0 2=1 3=0 4=1
Softmax prob 1 1 pool10 prob 0=0

0 comments on commit ebd0318

Please sign in to comment.