Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[ci]Update perf test #222

Merged
merged 1 commit into from
Oct 16, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
28 changes: 0 additions & 28 deletions .github/workflows/main.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,37 +2,9 @@ name: Test

on:
push:
paths:
- 'build/**'
- 'cmd/**'
- 'configs/**'
- 'core/**'
- 'example/**'
- 'internal/**'
- 'tests/**'
- '!**.md'
- '.github/workflows/main.yaml'
- 'deployment/**'
- '**/*.go'
- '**/go.mod'
- '**/go.sum'
branches:
- main
pull_request:
paths:
- 'build/**'
- 'cmd/**'
- 'configs/**'
- 'core/**'
- 'example/**'
- 'internal/**'
- 'tests/**'
- '!**.md'
- '.github/workflows/main.yaml'
- 'deployment/**'
- '**/*.go'
- '**/go.mod'
- '**/go.sum'
branches:
- main
workflow_dispatch:
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/nightly.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ jobs:
shell: bash
working-directory: tests
run: |
pytest -s -v --tags L0, L1, L2, L3
pytest -s -v --tags L0 L1 L2 L3

- name: Get Milvus status
shell: bash
Expand Down
13 changes: 11 additions & 2 deletions .github/workflows/perf.yaml
Original file line number Diff line number Diff line change
@@ -1,9 +1,18 @@
name: Perf Test

on:
push:
branches:
- main
pull_request:
branches:
- main
workflow_dispatch:
schedule:
- cron: '0 2 * * *'
- cron: '0 4 * * *'
concurrency:
group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
cancel-in-progress: true

jobs:
test-backup-restore-api:
Expand Down Expand Up @@ -91,7 +100,7 @@ jobs:
shell: bash
working-directory: tests
run: |
pytest -s -v --tags Perf
pytest -s -v --tags Perf --log-cli-level=INFO --capture=no

- name: Get Milvus status
shell: bash
Expand Down
4 changes: 3 additions & 1 deletion tests/requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -42,4 +42,6 @@ protobuf==3.20.0
minio==7.1.5

# for benchmark
h5py==3.1.0
h5py==3.1.0
pytest-benchmark==4.0.0

86 changes: 43 additions & 43 deletions tests/testcases/test_backup_perf.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
from time import sleep
import pytest

import time
from base.client_base import TestcaseBase
from common import common_func as cf
from common.common_type import CaseLabel
from utils.util_log import test_log as log
from api.milvus_backup import MilvusBackupClient
from checker import Op, BackupCreateChecker, BackupRestoreChecker, start_monitor_threads
from checker import BackupCreateChecker, BackupRestoreChecker

c_name_prefix = "perf_backup"
backup_prefix = "backup"
Expand All @@ -17,54 +17,54 @@
class TestPerf(TestcaseBase):
""" Test case of performance"""

def test_milvus_create_backup_perf(self):
# prepare data
total_nb = 10000
cnt = 10
coll_num = 2
collections_to_backup = []
for i in range(coll_num):
collection_to_backup = cf.gen_unique_str(c_name_prefix)
for j in range(cnt):
self.prepare_data(collection_to_backup, nb=total_nb // cnt)
collections_to_backup.append(collection_to_backup)
checkers = {
Op.create: BackupCreateChecker(collections_to_backup)
}
start_monitor_threads(checkers)
log.info("*********************Perf Test Start**********************")
sleep(360)
for k, v in checkers.items():
v.check_result()
for k, v in checkers.items():
v.terminate()
sleep(10)
log.info("*********************Perf Test End**********************")
prepare_data_done = False

def test_milvus_restore_backup_perf(self):
# prepare data
total_nb = 10000
def setup_perf(self, nb=1000):
log.info(f"*****************Test Perf Setup With nb {nb}*****************")
if self.prepare_data_done:
log.info(f"*****************Test Perf Setup With nb {nb} Done, Skip*****************")
return
else:
log.info(f"*****************Test Perf Setup With nb {nb} Start*****************")
total_nb = nb
cnt = 10
coll_num = 2
coll_num = 1
collections_to_backup = []
for i in range(coll_num):
collection_to_backup = cf.gen_unique_str(c_name_prefix)
for j in range(cnt):
self.prepare_data(collection_to_backup, nb=total_nb // cnt)
collections_to_backup.append(collection_to_backup)
backup_name = cf.gen_unique_str(backup_prefix)
suffix = "_bak"

client.create_backup({"async": False, "backup_name": backup_name, "collection_names": collections_to_backup})
checkers = {
Op.restore: BackupRestoreChecker(backup_name, suffix, collections_to_backup)
}
start_monitor_threads(checkers)
log.info("*********************Perf Test Start**********************")
sleep(360)
for k, v in checkers.items():
v.check_result()
for k, v in checkers.items():
v.terminate()
sleep(10)
log.info("*********************Perf Test End**********************")
self.collections_to_backup = collections_to_backup
self.backup_name = backup_name
self.prepare_data_done = True

def backup_perf(self):
log.info("*****************Test Backup Perf Start*****************")
t0 = time.perf_counter()
res, result = BackupCreateChecker(self.collections_to_backup).run_task()
t1 = time.perf_counter()
log.info(f"create backup time: {t1 - t0} with {res}, {result}")
return res, result

def restore_perf(self):
log.info("*****************Test Restore Perf Start*****************")
t0 = time.perf_counter()
res, result= BackupRestoreChecker(self.backup_name, "_bak", self.collections_to_backup).run_task()
t1 = time.perf_counter()
log.info(f"create backup time: {t1 - t0} with {res}, {result}")
return res, result

@pytest.mark.parametrize("nb", [100000])
def test_milvus_create_backup_perf(self, benchmark, nb):
self.setup_perf(nb=nb)
res, result = benchmark.pedantic(self.backup_perf, iterations=1, rounds=5)
assert result is True

@pytest.mark.parametrize("nb", [100000])
def test_milvus_restore_backup_perf(self, benchmark, nb):
self.setup_perf(nb=nb)
res, result = benchmark.pedantic(self.restore_perf, setup=self.setup_perf, iterations=1, rounds=5)
assert result is True