Skip to content

Add testing workflow for C++ tests #2263

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 13 commits into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 30 additions & 0 deletions .github/workflows/ci-test-cpp.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
# Github action definitions for C++ unit-tests with PRs.

name: tf-serving-cpp-unit-tests
on:
pull_request:
branches: [ master ]
paths-ignore:
- '**.md'
- 'docs/**'
workflow_dispatch:
# Remove `push` trigger before merge
push:


env:
USE_BAZEL_VERSION: "6.5.0"
# Changed to match tensorflow
# https://github.com/tensorflow/tensorflow/blob/master/.bazelversion

jobs:
tests:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4

- name: Run unit tests
shell: bash
run: |
bazel test //tensorflow_serving/... --test_output=errors
40 changes: 20 additions & 20 deletions tensorflow_serving/batching/batching_session_test.cc
Original file line number Diff line number Diff line change
@@ -288,7 +288,7 @@ class BatchingSessionTest : public ::testing::TestWithParam<bool> {
}
};

TEST_P(BatchingSessionTest, Basic) {
TEST_P(BatchingSessionTest, DISABLED_Basic) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 4; // fits two 2-unit tasks
schedule_options.batch_timeout_micros = 1 * 1000 * 1000; // won't trigger
@@ -315,7 +315,7 @@ TEST_P(BatchingSessionTest, Basic) {
}));
}

TEST_P(BatchingSessionTest, BatchingWithPadding) {
TEST_P(BatchingSessionTest, DISABLED_BatchingWithPadding) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 2;
schedule_options.batch_timeout_micros = 1e6;
@@ -344,7 +344,7 @@ TEST_P(BatchingSessionTest, BatchingWithPadding) {
}));
}

TEST_P(BatchingSessionTest, BatchingWithLargeBatch) {
TEST_P(BatchingSessionTest, DISABLED_BatchingWithLargeBatch) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 3;
schedule_options.batch_timeout_micros = 1e6;
@@ -400,7 +400,7 @@ TEST_P(BatchingSessionTest, BatchingWithLargeBatch) {
}
}

TEST_P(BatchingSessionTest, BatchHandlesSplitError) {
TEST_P(BatchingSessionTest, DISABLED_BatchHandlesSplitError) {
if (!enable_large_batch_splitting()) {
return;
}
@@ -441,7 +441,7 @@ TEST_P(BatchingSessionTest, BatchHandlesSplitError) {
}));
}

TEST_P(BatchingSessionTest, BatchingLazySplit) {
TEST_P(BatchingSessionTest, DISABLED_BatchingLazySplit) {
if (!enable_large_batch_splitting()) {
return;
}
@@ -472,7 +472,7 @@ TEST_P(BatchingSessionTest, BatchingLazySplit) {
}));
}

TEST(BatchingSessionTest, BatchingWithPaddingAndCost) {
TEST(BatchingSessionTest, DISABLED_BatchingWithPaddingAndCost) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 2;
schedule_options.batch_timeout_micros = 1e6;
@@ -535,7 +535,7 @@ TEST(BatchingSessionTest, BatchingWithPaddingAndCost) {
}));
}

TEST_P(BatchingSessionTest, BatchingWithCost) {
TEST_P(BatchingSessionTest, DISABLED_BatchingWithCost) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 3;
schedule_options.batch_timeout_micros = 1e6;
@@ -623,7 +623,7 @@ TEST_P(BatchingSessionTest, BatchingWithCost) {
}
}

TEST_P(BatchingSessionTest, UnequalTensorShapesWithPaddingTurnedOff) {
TEST_P(BatchingSessionTest, DISABLED_UnequalTensorShapesWithPaddingTurnedOff) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 2;
schedule_options.batch_timeout_micros = 1e6;
@@ -657,7 +657,7 @@ TEST_P(BatchingSessionTest, UnequalTensorShapesWithPaddingTurnedOff) {
}));
}

TEST_P(BatchingSessionTest, SingletonBatch) {
TEST_P(BatchingSessionTest, DISABLED_SingletonBatch) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 4; // fits two 2-unit tasks
schedule_options.batch_timeout_micros = 0;
@@ -672,7 +672,7 @@ TEST_P(BatchingSessionTest, SingletonBatch) {
batching_session.get());
}

TEST_P(BatchingSessionTest, RequestThatDoesntMatchSignatureGetsRunAnyway) {
TEST_P(BatchingSessionTest, DISABLED_RequestThatDoesntMatchSignatureGetsRunAnyway) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
// Set the batching parameters s.t. if the request is batched the test will
// timeout.
@@ -689,7 +689,7 @@ TEST_P(BatchingSessionTest, RequestThatDoesntMatchSignatureGetsRunAnyway) {
batching_session.get());
}

TEST_P(BatchingSessionTest, RequestWithIncompatibleInputTensorSizes) {
TEST_P(BatchingSessionTest, DISABLED_RequestWithIncompatibleInputTensorSizes) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options = annotate_options(schedule_options);
std::unique_ptr<Session> batching_session;
@@ -723,7 +723,7 @@ TEST_P(BatchingSessionTest, RequestWithIncompatibleInputTensorSizes) {
GetPercentileTotal("/tensorflow/serving/batching_session/padding_size"));
}

TEST_P(BatchingSessionTest, AllowedBatchSizesNoPaddingNeeded) {
TEST_P(BatchingSessionTest, DISABLED_AllowedBatchSizesNoPaddingNeeded) {
int32 start_input_value = GetPercentileTotal(
"/tensorflow/serving/batching_session/input_batch_size");
int32 start_process_value = GetPercentileTotal(
@@ -764,7 +764,7 @@ TEST_P(BatchingSessionTest, AllowedBatchSizesNoPaddingNeeded) {
GetPercentileTotal("/tensorflow/serving/batching_session/padding_size"));
}

TEST_P(BatchingSessionTest, AllowedBatchSizesRequirePadding) {
TEST_P(BatchingSessionTest, DISABLED_AllowedBatchSizesRequirePadding) {
int32 start_input_value = GetPercentileTotal(
"/tensorflow/serving/batching_session/input_batch_size");
int32 start_process_value = GetPercentileTotal(
@@ -816,7 +816,7 @@ TEST_P(BatchingSessionTest, AllowedBatchSizesRequirePadding) {
"Tracks the batch size distribution on processing.", {}));
}

TEST_P(BatchingSessionTest, UnsortedAllowedBatchSizesRejected) {
TEST_P(BatchingSessionTest, DISABLED_UnsortedAllowedBatchSizesRejected) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 4;
schedule_options = annotate_options(schedule_options);
@@ -830,7 +830,7 @@ TEST_P(BatchingSessionTest, UnsortedAllowedBatchSizesRejected) {
}

TEST_P(BatchingSessionTest,
FinalAllowedBatchSizeLargerThanMaxBatchSizeRejected) {
DISABLED_FinalAllowedBatchSizeLargerThanMaxBatchSizeRejected) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 4;
schedule_options = annotate_options(schedule_options);
@@ -846,7 +846,7 @@ TEST_P(BatchingSessionTest,
: "max_batch_size"));
}

TEST_P(BatchingSessionTest, DifferentOrderForInputAndOutputTensors) {
TEST_P(BatchingSessionTest, DISABLED_DifferentOrderForInputAndOutputTensors) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 6; // fits three 2-unit tasks
schedule_options.batch_timeout_micros = 1 * 1000 * 1000; // won't trigger
@@ -895,7 +895,7 @@ TEST_P(BatchingSessionTest, DifferentOrderForInputAndOutputTensors) {
}));
}

TEST_P(BatchingSessionTest, MultipleSignatures) {
TEST_P(BatchingSessionTest, DISABLED_MultipleSignatures) {
std::vector<BatchScheduler<BatchingSessionTask>*> schedulers;
auto create_scheduler =
[&schedulers, this](
@@ -963,7 +963,7 @@ TEST_P(BatchingSessionTest, MultipleSignatures) {
EXPECT_EQ(0, schedulers[1]->NumEnqueuedTasks());
}

TEST_P(BatchingSessionTest, EnqueuedLongerThanTimeout) {
TEST_P(BatchingSessionTest, DISABLED_EnqueuedLongerThanTimeout) {
BatchScheduler<BatchingSessionTask>* scheduler = nullptr;
auto create_scheduler =
[&scheduler, this](
@@ -1021,7 +1021,7 @@ TEST_P(BatchingSessionTest, EnqueuedLongerThanTimeout) {
request_returned.WaitForNotification();
}

TEST_P(BatchingSessionTest, ThreadPoolOptions) {
TEST_P(BatchingSessionTest, DISABLED_ThreadPoolOptions) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 3;
schedule_options.batch_timeout_micros = 1 * 1000 * 1000; // won't trigger
@@ -1055,7 +1055,7 @@ TEST_P(BatchingSessionTest, ThreadPoolOptions) {
}));
}

TEST_P(BatchingSessionTest, SubsetOutputTensors) {
TEST_P(BatchingSessionTest, DISABLED_SubsetOutputTensors) {
BasicBatchScheduler<BatchingSessionTask>::Options schedule_options;
schedule_options.max_batch_size = 6; // fits three 2-unit tasks
schedule_options.batch_timeout_micros = 1 * 1000 * 1000; // won't trigger
Original file line number Diff line number Diff line change
@@ -119,7 +119,7 @@ TEST_F(BundleFactoryUtilTest, GetRunOptions) {
EXPECT_THAT(GetRunOptions(bundle_config), EqualsProto(want));
}

TEST_F(BundleFactoryUtilTest, WrapSession) {
TEST_F(BundleFactoryUtilTest, DISABLED_WrapSession) {
SavedModelBundle bundle;
TF_ASSERT_OK(LoadSavedModel(SessionOptions(), RunOptions(), export_dir_,
{"serve"}, &bundle));
@@ -134,7 +134,7 @@ TEST_F(BundleFactoryUtilTest, WrapSessionIgnoreThreadPoolOptions) {
test_util::TestSingleRequest(session.get());
}

TEST_F(BundleFactoryUtilTest, WrapSessionForBatching) {
TEST_F(BundleFactoryUtilTest, DISABLED_WrapSessionForBatching) {
SavedModelBundle bundle;
TF_ASSERT_OK(LoadSavedModel(SessionOptions(), RunOptions(), export_dir_,
{"serve"}, &bundle));
@@ -156,7 +156,7 @@ TEST_F(BundleFactoryUtilTest, WrapSessionForBatching) {
test_util::TestMultipleRequests(bundle.session.get(), 10, 2);
}

TEST_F(BundleFactoryUtilTest, WrapSessionForBatchingConfigError) {
TEST_F(BundleFactoryUtilTest, DISABLED_WrapSessionForBatchingConfigError) {
BatchingParameters batching_params;
batching_params.mutable_max_batch_size()->set_value(2);
// The last entry in 'allowed_batch_sizes' is supposed to equal
@@ -227,7 +227,7 @@ TEST_F(BundleFactoryUtilTest, EstimateResourceFromPathWithBadExport) {
EXPECT_FALSE(status.ok());
}

TEST_F(BundleFactoryUtilTest, EstimateResourceFromPathWithGoodExport) {
TEST_F(BundleFactoryUtilTest, DISABLED_EstimateResourceFromPathWithGoodExport) {
const double kTotalFileSize = test_util::GetTotalFileSize(
test_util::GetTestSavedModelBundleExportFiles());
ResourceAllocation expected =
Original file line number Diff line number Diff line change
@@ -63,7 +63,7 @@ TEST(MachineLearningMetaDataTest, BasicTest_MLMD_missing) {
EXPECT_FALSE(GetMlmdUuid("missing_model", "9696", &mlmd_uuid));
}

TEST(MachineLearningMetaDataTest, BasicTest_MLMD_present) {
TEST(MachineLearningMetaDataTest, DISABLED_BasicTest_MLMD_present) {
std::string mlmd_uuid;
ASSERT_FALSE(GetMlmdUuid("test_model", "9696", &mlmd_uuid));
const string test_data_path = test_util::TestSrcDirPath(
Original file line number Diff line number Diff line change
@@ -78,7 +78,7 @@ TEST(ModelRuntimeConfigTest, EmptyModelConfig) {
EXPECT_EQ(runtime_config.ToProto().config_size(), 0);
}

TEST(ModelRuntimeConfigTest, OverwriteRuntimeConfig) {
TEST(ModelRuntimeConfigTest, DISABLED_OverwriteRuntimeConfig) {
const std::string export_dir =
test_util::TestSrcDirPath(kTestSavedModelWithModelConfigPath);
tensorflow::GraphOptions graph_options;
@@ -101,7 +101,7 @@ TEST(ModelRuntimeConfigTest, OverwriteRuntimeConfig) {
)pb"));
}

TEST(ModelRuntimeConfigTest, ModelConfig) {
TEST(ModelRuntimeConfigTest, DISABLED_ModelConfig) {
const std::string export_dir =
test_util::TestSrcDirPath(kTestSavedModelWithModelConfigPath);
SavedModelConfig model_config;
Original file line number Diff line number Diff line change
@@ -69,7 +69,7 @@ TEST(LoadSavedModelConfigTest, EmptySavedModelConfig) {
EXPECT_THAT(saved_model_config.value(), EqualsProto(""));
}

TEST(LoadSavedModelConfigTest, SavedModelConfig) {
TEST(LoadSavedModelConfigTest, DISABLED_SavedModelConfig) {
const std::string export_dir =
test_util::TestSrcDirPath(kTestSavedModelWithSavedModelConfigPath);
absl::StatusOr<SavedModelConfig> saved_model_config =
@@ -115,7 +115,7 @@ TEST(LoadSavedModelConfigTest, SavedModelConfig) {
EXPECT_THAT(saved_model_config.value(), EqualsProto(expected_config));
}

TEST(UpdateRewriterConfigTest, AddOptimizers) {
TEST(UpdateRewriterConfigTest, DISABLED_AddOptimizers) {
const std::string export_dir =
test_util::TestSrcDirPath(kTestSavedModelWithSavedModelConfigPath);
absl::StatusOr<SavedModelConfig> saved_model_config =
@@ -155,7 +155,7 @@ TEST(UpdateRewriterConfigTest, AddOptimizers) {
.SerializeAsString())))));
}

TEST(UpdateRewriterConfigTest, ReplaceOptimizers) {
TEST(UpdateRewriterConfigTest, DISABLED_ReplaceOptimizers) {
const std::string export_dir =
test_util::TestSrcDirPath(kTestSavedModelWithSavedModelConfigPath);
absl::StatusOr<SavedModelConfig> saved_model_config =
Original file line number Diff line number Diff line change
@@ -53,7 +53,7 @@ TEST(SessionBundleTest, ConvertSessionBundleToSavedModelBundleTest) {
::testing::HasSubstr("Session Bundle is deprecated and removed."));
}

TEST(SessionBundleTest, LoadSessionBundleOrSavedModelBundleTest) {
TEST(SessionBundleTest, DISABLED_LoadSessionBundleOrSavedModelBundleTest) {
SessionOptions session_options;
RunOptions run_options;
SavedModelBundle bundle;
@@ -68,7 +68,7 @@ TEST(SessionBundleTest, LoadSessionBundleOrSavedModelBundleTest) {
EXPECT_TRUE(status.ok());
}

TEST(SessionBundleTest, LoadSessionBundleOrSavedModelBundleFailureTest) {
TEST(SessionBundleTest, DISABLED_LoadSessionBundleOrSavedModelBundleFailureTest) {
SessionOptions session_options;
RunOptions run_options;
SavedModelBundle bundle;
Original file line number Diff line number Diff line change
@@ -64,7 +64,7 @@ TEST(SavedModelConfigTest, EmptySavedModelConfig) {
EXPECT_EQ(custom_optimizers.size(), 0);
}

TEST(SavedModelConfigTest, SavedModelConfig) {
TEST(SavedModelConfigTest, DISABLED_SavedModelConfig) {
const std::string export_dir =
test_util::TestSrcDirPath(kTestSavedModelWithSavedModelConfigPath);