Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

improve speed of value generation #301

Merged
merged 12 commits into from
Oct 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
44 changes: 44 additions & 0 deletions .github/workflows/cargo.yml
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,49 @@ jobs:
memcached -t 1 -p 11211 -m 256 &
target/release/rpc-perf configs/smoketest-cache.toml

smoketest-oltp:
name: smoketest-oltp
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- uses: ./.github/actions/setup-rust
- uses: Swatinem/rust-cache@v2
with:
shared-key: smoketest-ubuntu-latest
- name: build
shell: bash
run: |
cargo build --workspace --all-targets --locked --release
- name: start mysql
shell: bash
run: |
sudo systemctl start mysql
- name: install sysbench
shell: bash
run: |
curl -s https://packagecloud.io/install/repositories/akopytov/sysbench/script.deb.sh | sudo bash
sudo apt -y install sysbench
- name: configure sysbench
shell: bash
run: |
echo "mysql-user=root" >> sysbench.config
echo "mysql-password=root" >> sysbench.config
echo "time=60" >> sysbench.config
echo "db-driver=mysql" >> sysbench.config
- name: prepare database
shell: bash
run: |
mysql --user=root --password=root -e "create database sbtest";
sysbench --config-file=sysbench.config oltp_point_select --tables=1 --table-size=100000 prepare
- name: warmup database
shell: bash
run: |
sysbench --config-file=sysbench.config oltp_point_select --tables=1 --table-size=100000 prewarm
- name: test
shell: bash
run: |
target/release/rpc-perf configs/smoketest-oltp.toml

check-success:
name: verify all tests pass
runs-on: ubuntu-latest
Expand All @@ -187,6 +230,7 @@ jobs:
- clippy-upload
- audit
- smoketest-cache
- smoketest-oltp

steps:
- name: no-op
Expand Down
63 changes: 63 additions & 0 deletions configs/smoketest-oltp.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
[general]
# specify the protocol to be used
protocol = "mysql"
# the interval for stats integration and reporting
interval = 1
# the number of intervals to run the test for
duration = 60
# run the admin thread with a HTTP listener at the address provided, this allows
# stats exposition via HTTP
admin = "127.0.0.1:9090"
# optionally, set an initial seed for the PRNGs used to generate the workload.
# The default is to intialize from the OS entropy pool.
#initial_seed = "0"

#[metrics]
# output file for detailed stats during the run
#output = "stats.json"
# format of the output file (possible values are json, msgpack, parquet)
#format = "json"
# optionally specify batch size for parquet row groups
# only valid for parquet output
#batch_size = 100_000
# optionally specify histogram type (can be standard (default) or sparse)
# only valid for parquet output
#histogram = "sparse"
# optionally, specify the sampling interval for metrics. Input is a string
# with the unit attached; for example "100ms" or "1s". Defaults to 1s.
#interval = "1s"

[debug]
# choose from: error, warn, info, debug, trace
log_level = "info"
# optionally, log to the file below instead of standard out
# log_file = "rpc-perf.log"
# backup file name for use with log rotation
log_backup = "rpc-perf.log.old"
# trigger log rotation when the file grows beyond this size (in bytes). Set this
# option to '0' to disable log rotation.
log_max_size = 1073741824

[target]
# specify one or more mysql connection strings
endpoints = [
"mysql://root:root@localhost:3306/sbtest",
]

[oltp]
# number of threads used to drive oltp requests
threads = 4
# the total number of connections to each endpoint
poolsize = 20

[workload]
# the number of threads that will be used to generate the workload
threads = 1

[workload.ratelimit]
# set a global ratelimit for the workload
start = 10_000

[workload.oltp]
tables = 1
keys = 100_000
5 changes: 5 additions & 0 deletions src/clients/cache/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,11 @@ pub fn launch(
config: &Config,
work_receiver: Receiver<ClientWorkItemKind<ClientRequest>>,
) -> Option<Runtime> {
if config.client().is_none() {
debug!("No client configuration specified");
return None;
}

debug!("Launching clients...");

config.client()?;
Expand Down
2 changes: 1 addition & 1 deletion src/clients/cache/momento/commands/hash_set.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ pub async fn hash_set(
let d: Vec<(Vec<u8>, Vec<u8>)> = request
.data
.into_iter()
.map(|(k, v)| (k.to_vec(), v))
.map(|(k, v)| (k.to_vec(), v.into()))
.collect();

let r = DictionarySetFieldsRequest::new(cache_name, &*request.key, d)
Expand Down
10 changes: 4 additions & 6 deletions src/clients/oltp/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -10,19 +10,17 @@ pub fn launch(
config: &Config,
work_receiver: Receiver<ClientWorkItemKind<OltpRequest>>,
) -> Option<Runtime> {
if config.storage().is_none() {
debug!("No storage configuration specified");
if config.oltp().is_none() {
debug!("No oltp configuration specified");
return None;
}

debug!("Launching clients...");

config.client()?;
debug!("Launching oltp clients...");

// spawn the request drivers on their own runtime
let mut client_rt = Builder::new_multi_thread()
.enable_all()
.worker_threads(config.client().unwrap().threads())
.worker_threads(config.oltp().unwrap().threads())
.build()
.expect("failed to initialize tokio runtime");

Expand Down
16 changes: 11 additions & 5 deletions src/clients/store/s3/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ pub struct S3RequestBuilder {
inner: http::request::Builder,
region: String,
relative_uri: String,
content: Vec<u8>,
content: Bytes,
content_sha256: String,
timestamp: DateTime<Utc>,
}
Expand All @@ -354,7 +354,7 @@ impl S3RequestBuilder {
bucket: String,
method: Method,
relative_uri: String,
content: Vec<u8>,
content: Bytes,
) -> Self {
let now = Utc::now();
// let date = format!("{}", now.format("%Y%m%d"));
Expand Down Expand Up @@ -450,15 +450,21 @@ impl S3RequestBuilder {
bucket,
Method::DELETE,
format!("/{key}"),
Vec::new(),
Vec::new().into(),
)
}

pub fn get_object(region: String, bucket: String, key: String) -> Self {
Self::new(region, bucket, Method::GET, format!("/{key}"), Vec::new())
Self::new(
region,
bucket,
Method::GET,
format!("/{key}"),
Vec::new().into(),
)
}

pub fn put_object(region: String, bucket: String, key: String, value: Vec<u8>) -> Self {
pub fn put_object(region: String, bucket: String, key: String, value: Bytes) -> Self {
let mut s = Self::new(region, bucket, Method::PUT, format!("/{key}"), value);

s.inner = s
Expand Down
9 changes: 5 additions & 4 deletions src/workload/client.rs
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
use bytes::Bytes;
use core::time::Duration;
use std::collections::HashMap;
use std::sync::Arc;
Expand All @@ -8,7 +9,7 @@ pub struct Ping {}
#[derive(Debug, PartialEq)]
pub struct Add {
pub key: Arc<[u8]>,
pub value: Vec<u8>,
pub value: Bytes,
pub ttl: Option<Duration>,
}

Expand All @@ -30,14 +31,14 @@ pub struct Delete {
#[derive(Debug, PartialEq)]
pub struct Replace {
pub key: Arc<[u8]>,
pub value: Vec<u8>,
pub value: Bytes,
pub ttl: Option<Duration>,
}

#[derive(Debug, PartialEq)]
pub struct Set {
pub key: Arc<[u8]>,
pub value: Vec<u8>,
pub value: Bytes,
pub ttl: Option<Duration>,
}

Expand Down Expand Up @@ -77,7 +78,7 @@ pub struct HashIncrement {
#[derive(Debug, PartialEq)]
pub struct HashSet {
pub key: Arc<[u8]>,
pub data: HashMap<Arc<[u8]>, Vec<u8>>,
pub data: HashMap<Arc<[u8]>, Bytes>,
pub ttl: Option<Duration>,
}

Expand Down
Loading
Loading