Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Tuples! #94

Merged
merged 39 commits into from
Feb 9, 2021
Merged
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
39 commits
Select commit Hold shift + click to select a range
eaaee59
add construct tuple and tuple get item to glenside ir
hypercubestart Dec 4, 2020
4b63d37
add yolov3 stuff
hypercubestart Dec 4, 2020
847fd4f
from_relay
hypercubestart Dec 4, 2020
003564c
add rest of relay ops
hypercubestart Dec 4, 2020
73e362f
ad updated efficientnet
hypercubestart Dec 8, 2020
7f46310
fix efficientnet test
hypercubestart Dec 8, 2020
bf1e886
add stuff
hypercubestart Dec 10, 2020
43a6f67
update module def
hypercubestart Dec 10, 2020
dc1950c
temp tvm head
hypercubestart Dec 12, 2020
459058a
add from_relay branches
hypercubestart Dec 12, 2020
eddb105
update tvm ref
hypercubestart Dec 12, 2020
2cc98b1
done with transpose and sigmoid
hypercubestart Dec 12, 2020
8531085
add more relay ops
hypercubestart Dec 15, 2020
43e7ff3
code for from_relay
hypercubestart Dec 21, 2020
7652b58
update glenside temp
hypercubestart Dec 21, 2020
6765c41
small fix
hypercubestart Dec 21, 2020
e1e09be
update tvm
hypercubestart Dec 21, 2020
00cc595
fixing from_relay
hypercubestart Dec 22, 2020
d82759d
add tuple variant
hypercubestart Dec 23, 2020
d8940be
error in writenpy for vector i think
hypercubestart Dec 23, 2020
1a801fa
cast to float and min/max analysis
hypercubestart Dec 24, 2020
3664815
from relay done
hypercubestart Dec 24, 2020
24f78ef
cargo fmt
hypercubestart Dec 25, 2020
d190fd5
cleanup
hypercubestart Dec 25, 2020
9a03c60
fmt
hypercubestart Dec 25, 2020
9da68b8
dont do codegen yet
hypercubestart Dec 25, 2020
41fe39b
fix tests
hypercubestart Dec 25, 2020
8d10910
fix suggestions
hypercubestart Jan 12, 2021
fb92d47
update tvm
hypercubestart Jan 13, 2021
0e3257d
use relay type information
hypercubestart Jan 17, 2021
e2baa41
fix parts
hypercubestart Jan 20, 2021
1e7fdc5
move comments
hypercubestart Jan 20, 2021
b5ca9d0
suggestions
hypercubestart Jan 26, 2021
d13f565
add working tests for non-opaque relay ops
hypercubestart Jan 27, 2021
7f3cd27
new tests
hypercubestart Jan 28, 2021
89ab002
add round
hypercubestart Feb 9, 2021
ff24a48
Fix errors in ilp.rs, plus typos
gussmith23 Feb 9, 2021
49dc976
Simplify logic
gussmith23 Feb 9, 2021
663b9a7
remove warning + rerun tests
gussmith23 Feb 9, 2021
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ git = "https://github.com/gussmith23/rplex"
# issue.
# NOTE Keep glenside-evaluation in sync with this
# If the versions get out of whack, we'll probably have some weird errors.
rev = "ff5450f9cbe07d57e73594c5fbd68f3bd665c2dc"
git = "https://github.com/gussmith23/tvm"
rev = "4ed759f6b6cafbab707f95b57762913a1f57c021"
git = "https://github.com/hypercubestart/incubator-tvm"
gussmith23 marked this conversation as resolved.
Show resolved Hide resolved
optional = true

[dependencies.egg]
Expand All @@ -53,4 +53,4 @@ features = ["approx"]

[dependencies.serde]
version = "1.0"
features = ["derive"]
features = ["derive"]
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ ENV LLVM_CONFIG_PATH=/usr/lib/llvm-10/bin/llvm-config
# Build TVM with Rust bindings
# TODO(@gussmith23) Switch this to TVM mainline
# once https://github.com/apache/incubator-tvm/pull/6563 is merged
RUN cd /root && git clone https://github.com/gussmith23/tvm tvm --recursive
RUN cd /root && git clone https://github.com/hypercubestart/incubator-tvm tvm --recursive
WORKDIR /root/tvm
RUN git fetch
RUN git checkout 14a786d3885304e5964df397a50edd19a759f903
RUN git checkout 4ed759f6b6cafbab707f95b57762913a1f57c021
RUN git submodule sync && git submodule update
RUN echo 'set(USE_LLVM $ENV{LLVM_CONFIG_PATH})' >> config.cmake
RUN echo 'set(USE_RPC ON)' >> config.cmake
Expand Down
344 changes: 344 additions & 0 deletions models/efficientnet-lite4-11.relay

Large diffs are not rendered by default.

357 changes: 357 additions & 0 deletions models/yolov3.relay

Large diffs are not rendered by default.

215 changes: 210 additions & 5 deletions src/codegen.rs
Original file line number Diff line number Diff line change
Expand Up @@ -306,7 +306,10 @@ pub fn find_vars(expr: &Expr, id: Id) -> Vec<String> {
find_vars_recursive_helper(set, expr, id);
}
// Box<[Id]>
Language::RelayOperatorCall(ids) | Language::List(ids) | Language::Shape(ids) => {
Language::RelayOperatorCall(ids)
| Language::List(ids)
| Language::Shape(ids)
| Language::ConstructTuple(ids) => {
for id in ids.iter() {
find_vars_recursive_helper(set, expr, *id);
}
Expand All @@ -324,7 +327,8 @@ pub fn find_vars(expr: &Expr, id: Id) -> Vec<String> {
| &Language::ShapeInsertAxis(ids)
| &Language::ShapeRemoveAxis(ids)
| &Language::AccessShape(ids)
| &Language::AccessSqueeze(ids) => {
| &Language::AccessSqueeze(ids)
| &Language::TupleGetItem(ids) => {
for id in ids.iter() {
find_vars_recursive_helper(set, expr, *id);
}
Expand Down Expand Up @@ -411,7 +415,10 @@ pub fn generate_worklist_for_codegen(expr: &Expr, id: Id) -> Vec<Id> {
}
}
// Box<[Id]>
Language::RelayOperatorCall(ids) | Language::Shape(ids) | Language::List(ids) => {
Language::RelayOperatorCall(ids)
| Language::Shape(ids)
| Language::List(ids)
| Language::ConstructTuple(ids) => {
for id in ids.iter() {
helper(worklist, expr, *id);
}
Expand All @@ -423,7 +430,8 @@ pub fn generate_worklist_for_codegen(expr: &Expr, id: Id) -> Vec<Id> {
| &Language::AccessReshape(ids)
| &Language::ShapeInsertAxis(ids)
| &Language::ShapeRemoveAxis(ids)
| &Language::AccessSqueeze(ids) => {
| &Language::AccessSqueeze(ids)
| &Language::TupleGetItem(ids) => {
for id in ids.iter() {
helper(worklist, expr, *id);
}
Expand Down Expand Up @@ -1043,6 +1051,12 @@ add_with_broadcasting((float*) {out}, (float*) {X}, (float*) {Y}, (int*) {out_s

Some(add_out)
}
RelayOperator::RelayLeakyReLU => todo!(),
hypercubestart marked this conversation as resolved.
Show resolved Hide resolved
RelayOperator::RelaySigmoid => todo!(),
RelayOperator::RelayAvgPool2D => todo!(),
RelayOperator::RelayUpSampling => todo!(),
RelayOperator::RelayMaximum => todo!(),
RelayOperator::RelayMinimum => todo!(),
}
}
&Language::AccessWindows([access_id, filters_shape_id, stride_shape_id]) => {
Expand Down Expand Up @@ -1730,6 +1744,8 @@ if (i{i} < {dim_len}) {{
| Language::RelayOperator(_) => None,

&Language::Literal(_)
| &Language::ConstructTuple(_)
| &Language::TupleGetItem(_)
| &Language::SystolicArrayConv2dIm2colNchwOihwWithBlocking(_)
| &Language::SystolicArrayConv2dIm2colNhwcHwioWithBlocking(_)
| &Language::SystolicArrayConv2dNchwOihwWithBlocking(_)
Expand Down Expand Up @@ -1761,7 +1777,7 @@ mod tests {
use ndarray::{SliceInfo, SliceOrIndex};
use ndarray_npy::{read_npy, write_npy};
use ndarray_rand::{rand_distr::Uniform, RandomExt};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use rand::{rngs::SmallRng, SeedableRng};
use std::fs::File;
use std::io::Write;
use std::iter::FromIterator;
Expand Down Expand Up @@ -1795,7 +1811,9 @@ mod tests {

let mut cmd = Command::new("python3");
cmd.arg(script_filepath);
cmd.arg("--npy_out_filepath");
cmd.arg(&output_filepath);
cmd.arg("--npy_arg_filepath");
cmd.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
Expand Down Expand Up @@ -1836,6 +1854,90 @@ mod tests {
relay_output
}

fn run_relay_tuple_out(
env: &HashMap<String, ArrayD<f32>>,
shapes_vec: &Vec<(String, Vec<usize>)>,
relay_str: &str,
outputs: usize,
) -> Vec<ArrayD<f32>> {
let script_filepath = format!(
"{}/src/language/from_relay/run_relay.py",
env!("CARGO_MANIFEST_DIR")
);

let mut cmd = Command::new("python3");
cmd.arg(script_filepath);
cmd.arg("--npy_out_filepath");
let mut output_paths = Vec::new();
for _ in 0..outputs {
// https://www.reddit.com/r/rust/comments/38jhva/piping_string_to_child_process_stdin/crvlqcd/?utm_source=reddit&utm_medium=web2x&context=3
// Output filename
// TODO(@gussmith23) Do we want this RNG to use SEED?
// I initially attempted to do this, but was running into issues
// (I think the same filename kept being generated b/c I wasn't
// using the RNG carefully...but maybe there's also something
// wrong w/ how I'm reading files!)
let output_filepath = std::env::temp_dir().with_file_name(format!(
"output-{}.npy",
rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
.take(30)
.collect::<String>()
));
cmd.arg(&output_filepath);
output_paths.push(
output_filepath
.clone()
.into_os_string()
.into_string()
.unwrap(),
);
}
cmd.arg("--npy_arg_filepath");
cmd.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped());
for (name, _) in shapes_vec.iter() {
let value = env.get(name).unwrap();
// TODO(@gussmith23) output type assumption
let filepath = std::env::temp_dir().with_file_name(format!(
"arg-{}.npy",
rand::thread_rng()
.sample_iter(&rand::distributions::Alphanumeric)
.take(30)
.collect::<String>()
));
write_npy(&filepath, value).unwrap();
cmd.arg(filepath);
}

let mut proc = cmd.spawn().ok().expect("Failed to spawn process");
proc.stdin
.as_mut()
.unwrap()
.write_all(relay_str.as_bytes())
.unwrap();
let output = proc.wait_with_output().unwrap();
// Check that it ran.
assert!(
output.status.success(),
"Running Relay code failed with code {:?}.\nstdout:\n{}\nstderr:\n{}",
output.status.code(),
std::str::from_utf8(output.stdout.as_slice())
.expect("Could not convert stderr to UTF8"),
std::str::from_utf8(output.stderr.as_slice())
.expect("Could not convert stderr to UTF8")
);

// TODO(@gussmith23) output type assumption
let mut relay_outputs = Vec::new();
for output_filepath in output_paths.iter() {
let relay_output: ndarray::ArrayD<f32> = read_npy(output_filepath).unwrap();
relay_outputs.push(relay_output);
}
relay_outputs
}

#[test]
fn transpose() {
let shape = vec![1, 20, 300, 3];
Expand Down Expand Up @@ -4042,4 +4144,107 @@ int main() {{
.expect("Could not convert stderr to UTF8")
);
}

#[test]
#[should_panic(expected = "unfinished test")]
hypercubestart marked this conversation as resolved.
Show resolved Hide resolved
fn relay_model_yolov3() {
// Generate yolov3 with directions from:
// https://tvm.apache.org/docs/tutorials/frontend/from_darknet.html
let filename = PathBuf::from(format!(
"{}/models/yolov3.relay",
env!("CARGO_MANIFEST_DIR")
));
let relay = std::fs::read_to_string(&filename).unwrap();
const SEED: u64 = 23;
let mut tensor_rng = SmallRng::seed_from_u64(SEED);

let module = tvm::ir::module::IRModule::parse("", relay.clone()).unwrap();
let (expr, shapes_vec) = crate::language::from_relay::from_relay(
&module,
true,
&vec![
crate::language::RelayOperator::RelayBatchNormInference,
crate::language::RelayOperator::RelaySoftmax,
crate::language::RelayOperator::RelayLeakyReLU,
crate::language::RelayOperator::RelayBiasAdd,
crate::language::RelayOperator::RelayAdd,
crate::language::RelayOperator::RelaySigmoid,
crate::language::RelayOperator::RelayUpSampling,
],
);
let mut env = HashMap::default();
let mut value_env = HashMap::default();
for (k, v) in &shapes_vec {
env.insert(k.clone(), v.clone());
value_env.insert(
k.clone(),
ndarray::ArrayD::<f32>::random_using(
v.clone(),
Uniform::new(-2f32, 2f32),
&mut tensor_rng,
),
);
}

let mut egraph = EGraph::new(MyAnalysis {
name_to_shape: env.clone(),
});

let _id = egraph.add_expr(&expr);

let _result = run_relay_tuple_out(&value_env, &shapes_vec, &relay, 12);
todo!("unfinished test")
}

#[test]
#[should_panic(expected = "unfinished test")]
fn relay_model_efficientnet_lite4_11() {
// efficientnet onnx model source: https://github.com/onnx/models/blob/master/vision/classification/efficientnet-lite4/model/efficientnet-lite4-11.onnx
// imported into relay
let filename = PathBuf::from(format!(
"{}/models/efficientnet-lite4-11.relay",
env!("CARGO_MANIFEST_DIR")
));
let relay = std::fs::read_to_string(&filename).unwrap();
const SEED: u64 = 23;
let mut tensor_rng = SmallRng::seed_from_u64(SEED);

let module = tvm::ir::module::IRModule::parse("", relay.clone()).unwrap();
let (expr, shapes_vec) = crate::language::from_relay::from_relay(
&module,
true,
&vec![
crate::language::RelayOperator::RelayBatchNormInference,
crate::language::RelayOperator::RelaySoftmax,
crate::language::RelayOperator::RelayLeakyReLU,
crate::language::RelayOperator::RelayBiasAdd,
crate::language::RelayOperator::RelayAdd,
crate::language::RelayOperator::RelayMaximum,
crate::language::RelayOperator::RelayMinimum,
crate::language::RelayOperator::RelayAvgPool2D,
],
);
let mut env = HashMap::default();
let mut value_env = HashMap::default();
for (k, v) in &shapes_vec {
env.insert(k.clone(), v.clone());
value_env.insert(
k.clone(),
ndarray::ArrayD::<f32>::random_using(
v.clone(),
Uniform::new(-2f32, 2f32),
&mut tensor_rng,
),
);
}

let mut egraph = EGraph::new(MyAnalysis {
name_to_shape: env.clone(),
});

let _id = egraph.add_expr(&expr);

let _result = run_relay(&value_env, &shapes_vec, &relay);
todo!("unfinished test")
}
}
4 changes: 4 additions & 0 deletions src/extraction/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,8 @@ impl egg::CostFunction<Language> for MonolithicCostFunction<'_> {
| Language::SystolicArray(_)
| Language::SystolicArrayWithBlocking(_)
| Language::Usize(_)
| Language::ConstructTuple(_)
| Language::TupleGetItem(_)
| Language::AccessSlice(_)
| Language::AccessConcatenate(_)
| Language::AccessPad(_)
Expand Down Expand Up @@ -173,6 +175,8 @@ impl CostFunction<Language> for SimpleCostFunction {
Language::SystolicArrayConv2dNhwcHwioWithBlocking(_) => todo!(),
Language::SystolicArrayConv2dIm2colNchwOihwWithBlocking(_) => todo!(),
Language::SystolicArrayConv2dIm2colNhwcHwioWithBlocking(_) => todo!(),
Language::ConstructTuple(_) => todo!(),
Language::TupleGetItem(_) => todo!(),

// Cannot extract compute: compute must be lowered to an atom.
Compute(_) => std::usize::MAX,
Expand Down
Loading