Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Scan to loop #1090

Draft
wants to merge 16 commits into
base: 0.21.pre
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
11 changes: 1 addition & 10 deletions .travis/bundle-entrypoint.sh
Original file line number Diff line number Diff line change
Expand Up @@ -90,17 +90,8 @@ net_bench mobilenet_v2_1 pass $CACHEDIR/mobilenet_v2_1.4_224_frozen.pb -i 1,224,
net_bench inceptionv1q pass $CACHEDIR/inceptionv1_quant.nnef.tar.gz --nnef-tract-core
net_bench inceptionv3 pass $CACHEDIR/inception_v3_2016_08_28_frozen.pb -i 1,299,299,3,f32

net_bench kaldi_librispeech_clean_tdnn_lstm_1e_256 2600ms \
$CACHEDIR/en_libri_real/model.raw -f kaldi --output-node output \
--kaldi-downsample 3 --kaldi-left-context 5 --kaldi-right-context 15 --kaldi-adjust-final-offset -5 \
-i 264,40

net_bench kaldi_librispeech_clean_tdnn_lstm_1e_256 pulse_240ms \
$CACHEDIR/en_libri_real/model.raw -f kaldi --output-node output \
--kaldi-downsample 3 --kaldi-left-context 5 --kaldi-right-context 15 --kaldi-adjust-final-offset -5 \
-i S,40 --pulse 24 \

net_bench mdl-en-2019-Q3-librispeech_onnx 2600ms $CACHEDIR/en_libri_real/model.onnx --output-node output -i 264,40
net_bench mdl-en-2019-Q3-librispeech_onnx 2600ms $CACHEDIR/en_libri_real/model.onnx --output-node output -i 264,40
net_bench mdl-en-2019-Q3-librispeech_onnx pulse_240ms $CACHEDIR/en_libri_real/model.onnx --output-node output -i S,40 --pulse 24
net_bench en_tdnn_lstm_bn_q7 2600ms $CACHEDIR/en_tdnn_lstm_bn_q7/model.onnx --output-node output -i 264,40
net_bench en_tdnn_lstm_bn_q7 pulse_240ms $CACHEDIR/en_tdnn_lstm_bn_q7/model.onnx --output-node output -i S,40 --pulse 24
Expand Down
26 changes: 1 addition & 25 deletions .travis/cli-tests.sh
Original file line number Diff line number Diff line change
Expand Up @@ -55,12 +55,6 @@ do
$t
done

echo
echo $WHITE • kaldi/test_cases $NC
echo

( cd kaldi/test_cases ; ./run_all.sh )

echo
echo $WHITE • onnx/test_cases $NC
echo
Expand Down Expand Up @@ -145,27 +139,9 @@ $TRACT_RUN $CACHEDIR/hey_snips_v4_model17.pb -i S,20,f32 \
dump -q \
--assert-op-count AddAxis 0

$TRACT_RUN $CACHEDIR/en_libri_real/model.raw.txt \
-f kaldi --output-node output \
--kaldi-downsample 3 --kaldi-left-context 5 --kaldi-right-context 15 --kaldi-adjust-final-offset -5 \
--input-facts-from-bundle $CACHEDIR/en_libri_real/io.npz \
run \
--input-from-bundle $CACHEDIR/en_libri_real/io.npz \
--allow-random-input \
--assert-output-bundle $CACHEDIR/en_libri_real/io.npz

$TRACT_RUN $CACHEDIR/en_libri_real/model.raw \
-f kaldi --output-node output \
--kaldi-downsample 3 --kaldi-left-context 5 --kaldi-right-context 15 --kaldi-adjust-final-offset -5 \
--input-facts-from-bundle $CACHEDIR/en_libri_real/io.npz \
run \
--input-from-bundle $CACHEDIR/en_libri_real/io.npz \
--allow-random-input \
--assert-output-bundle $CACHEDIR/en_libri_real/io.npz

$TRACT_RUN $CACHEDIR/en_libri_real/model.onnx \
--output-node output \
--kaldi-left-context 5 --kaldi-right-context 15 --kaldi-adjust-final-offset -5 \
--edge-left-context 5 --edge-right-context 15 \
--input-facts-from-bundle $CACHEDIR/en_libri_real/io.npz \
run \
--input-from-bundle $CACHEDIR/en_libri_real/io.npz \
Expand Down
7 changes: 7 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,10 @@
# Unreleased, targetting 0.21
* [kaldi] remove experimental kaldi support

# 0.20.5 - 2023-05-26
* Various bugfix around Einsum
* Einsum now has functions to translate to MatMul and other axes manipulations

# 0.20.0, 0.20.1, 0,20.2, 0.20.3 - 2023-04-25
* [optim] 32x32 f32 AMX kernel (for Apple Silicon M family)
* [optim] bunch of AMX512F kernels (square, skinny, vector)
Expand Down
1 change: 0 additions & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ members = [
"tensorflow",
"onnx-opl",
"onnx",
"kaldi",
"libcli",
"cli",
"ffi",
Expand Down
2 changes: 1 addition & 1 deletion ci/tract-ci-minion/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "tract-ci-minion"
version = "0.20.5-pre"
version = "0.20.6-pre"
edition = "2021"

[workspace]
Expand Down
22 changes: 10 additions & 12 deletions cli/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "tract"
version = "0.20.5-pre"
version = "0.20.6-pre"
authors = [ "Romain Liautaud <romain.liautaud@snips.ai>", "Mathieu Poumeyrol <kali@zoy.org>"]
license = "MIT/Apache-2.0"
description = "Tiny, no-nonsense, self contained, TensorFlow and ONNX inference"
Expand Down Expand Up @@ -34,19 +34,17 @@ rustls.workspace = true
scan_fmt.workspace = true
serde.workspace = true
serde_json.workspace = true
tract-core = { version = "=0.20.5-pre", path = "../core" }
tract-hir = { version = "=0.20.5-pre", path = "../hir" }
tract-nnef = { version = "=0.20.5-pre", path = "../nnef" }
tract-libcli = { version = "=0.20.5-pre", path = "../libcli" }
tract-pulse-opl = { optional = true, version = "=0.20.5-pre", path = "../pulse-opl" }
tract-pulse = { optional = true, version = "=0.20.5-pre", path = "../pulse" }
tract-kaldi = { optional = true, version = "=0.20.5-pre", path = "../kaldi" }
tract-onnx = { optional = true, version = "=0.20.5-pre", path = "../onnx" }
tract-tensorflow = { optional = true, version = "=0.20.5-pre", path = "../tensorflow" }
tract-core = { version = "=0.20.6-pre", path = "../core" }
tract-hir = { version = "=0.20.6-pre", path = "../hir" }
tract-nnef = { version = "=0.20.6-pre", path = "../nnef" }
tract-libcli = { version = "=0.20.6-pre", path = "../libcli" }
tract-pulse-opl = { optional = true, version = "=0.20.6-pre", path = "../pulse-opl" }
tract-pulse = { optional = true, version = "=0.20.6-pre", path = "../pulse" }
tract-onnx = { optional = true, version = "=0.20.6-pre", path = "../onnx" }
tract-tensorflow = { optional = true, version = "=0.20.6-pre", path = "../tensorflow" }

[features]
default = ["kaldi", "onnx", "tf", "pulse", "pulse-opl"]
kaldi = [ "tract-kaldi", "tract-libcli/hir" ]
default = ["onnx", "tf", "pulse", "pulse-opl"]
onnx = [ "tract-onnx", "tract-libcli/hir", "tract-libcli/onnx" ]
pulse-opl = [ "tract-pulse-opl" ]
pulse = [ "tract-pulse", "tract-pulse-opl" ]
Expand Down
60 changes: 21 additions & 39 deletions cli/src/dump.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
use crate::params::SomeGraphDef;
use crate::Parameters;
use crate::tensor::run_params_from_subcommand;
use crate::Parameters;
#[allow(unused_imports)]
use nu_ansi_term::Style;
use tract_hir::internal::*;
Expand All @@ -19,8 +19,6 @@ pub fn annotate_with_graph_def(
) -> TractResult<()> {
match graph_def {
SomeGraphDef::NoGraphDef => Ok(()),
#[cfg(feature = "kaldi")]
SomeGraphDef::Kaldi(kaldi) => annotate_with_kaldi(annotations, model, kaldi),
SomeGraphDef::Nnef(_) => todo!(),
#[cfg(feature = "onnx")]
SomeGraphDef::Onnx(onnx, _) => annotate_with_onnx_model(annotations, model, onnx),
Expand All @@ -29,30 +27,6 @@ pub fn annotate_with_graph_def(
}
}

#[cfg(feature = "kaldi")]
fn annotate_with_kaldi(
annotations: &mut Annotations,
model: &dyn Model,
proto_model: &tract_kaldi::KaldiProtoModel,
) -> TractResult<()> {
use tract_kaldi::model::NodeLine;
let bold = Style::new().bold();
for (name, proto_node) in &proto_model.config_lines.nodes {
if let Ok(node_id) = model.node_id_by_name(name) {
let mut vs = vec![];
if let NodeLine::Component(compo) = proto_node {
let comp = &proto_model.components[&compo.component];
for (k, v) in &comp.attributes {
let value = format!("{v:?}");
vs.push(format!("Attr {}: {:.240}", bold.paint(k), value));
}
}
annotations.node_mut(node_id.into()).sections.push(vs)
}
}
Ok(())
}

#[cfg(feature = "tf")]
fn annotate_with_tf_graph_def(
annotations: &mut Annotations,
Expand All @@ -72,7 +46,7 @@ fn annotate_with_tf_graph_def(
} else {
format!("{:?}", a.1)
};
v.push(format!("Attr {}: {:.240}", bold.paint(a.0), value));
v.push(format!("Attr {}: {:.300}", bold.paint(a.0), value));
}
annotations.node_mut(node_id.into()).sections.push(v);
}
Expand All @@ -88,13 +62,11 @@ fn annotate_with_onnx_model(
) -> TractResult<()> {
let bold = Style::new().bold();
for gnode in model_proto.graph.as_ref().unwrap().node.iter() {
let mut node_name = &gnode.name;
if !node_name.is_empty() && gnode.output.len() > 0 {
node_name = &gnode.output[0];
} else if let Some(n) = gnode.output.get(0) {
node_name = n;
}
if let Ok(id) = model.node_id_by_name(node_name) {
if let Some(id) = model
.node_id_by_name(&gnode.name)
.ok()
.or_else(|| gnode.output.get(0).and_then(|n| model.node_id_by_name(n).ok()))
{
let mut v = vec![];
for a in gnode.attribute.iter() {
let value = if let Some(t) = &a.t {
Expand Down Expand Up @@ -131,7 +103,14 @@ pub fn handle(
.downcast_ref::<TypedModel>()
.context("Can only profile typed models")?;
let inputs = retrieve_or_make_inputs(model, &run_params)?;
tract_libcli::profile::profile(model, bench_limits, &mut annotations, &inputs[0], None, options.folded)?;
tract_libcli::profile::profile(
model,
bench_limits,
&mut annotations,
&inputs[0],
None,
options.folded,
)?;
}

if sub_matches.is_present("axes") || sub_matches.is_present("axes-names") {
Expand Down Expand Up @@ -174,7 +153,8 @@ pub fn handle(
rename_outputs(&mut typed, sub_matches)?;
let file = std::fs::File::create(path)?;
let encoder = flate2::write::GzEncoder::new(file, flate2::Compression::default());
nnef.write_to_tar_with_config(&typed, encoder, compress_submodels).context("Writting model to tgz")?;
nnef.write_to_tar_with_config(&typed, encoder, compress_submodels)
.context("Writting model to tgz")?;
} else {
bail!("Only typed model can be dumped")
}
Expand All @@ -185,7 +165,8 @@ pub fn handle(
if let Some(mut typed) = model.downcast_ref::<TypedModel>().cloned() {
rename_outputs(&mut typed, sub_matches)?;
let file = std::fs::File::create(path)?;
nnef.write_to_tar_with_config(&typed, file, compress_submodels).context("Writting model to tar")?;
nnef.write_to_tar_with_config(&typed, file, compress_submodels)
.context("Writting model to tar")?;
} else {
bail!("Only typed model can be dumped")
}
Expand Down Expand Up @@ -217,7 +198,8 @@ pub fn handle(
rename_outputs(&mut typed, sub_matches)?;
let proto = tract_nnef::ser::to_proto_model(&nnef, &typed)?;
if path == "-" {
tract_nnef::ast::dump::Dumper::new(&nnef, &mut std::io::stdout()).document(&proto.doc)?;
tract_nnef::ast::dump::Dumper::new(&nnef, &mut std::io::stdout())
.document(&proto.doc)?;
} else {
let mut file = std::fs::File::create(path)?;
tract_nnef::ast::dump::Dumper::new(&nnef, &mut file).document(&proto.doc)?;
Expand Down
8 changes: 3 additions & 5 deletions cli/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ fn main() -> tract_core::anyhow::Result<()> {
.arg(arg!(verbose: -v ... "Sets the level of verbosity."))
.arg(arg!([model] "Sets the model to use"))
.arg(arg!(-f --format [format]
"Hint the model format ('kaldi', 'onnx', 'nnef' or 'tf') instead of guess from extension."))
"Hint the model format ('onnx', 'nnef' or 'tf') instead of guess from extension."))
.arg(Arg::new("input").long("input").short('i').multiple_occurrences(true).takes_value(true).long_help(
"Set input shape and type (@file.pb or @file.npz:thing.npy or 3x4xi32)."))
.arg(Arg::new("constantize").long("constantize").multiple_occurrences(true).takes_value(true).long_help(
Expand All @@ -88,10 +88,8 @@ fn main() -> tract_core::anyhow::Result<()> {

.arg(arg!(--"input-facts-from-bundle" [input_bundle] "Path to an input container (.npz). This only sets input facts."))

.arg(arg!(--"kaldi-adjust-final-offset" [frames] "Adjust value of final offset in network (for reproducibility)"))
.arg(arg!(--"kaldi-downsample" [frames] "Add a subsampling to output on axis 0"))
.arg(arg!(--"kaldi-left-context" [frames] "Add lines of left context to input (dupping first time frame)"))
.arg(arg!(--"kaldi-right-context" [frames] "Add lines of right context to input (dupping last time frame)"))
.arg(arg!(--"edge-left-context" [frames] "Add lines of left context to input (dupping first time frame)").alias("kaldi-left-context"))
.arg(arg!(--"edge-right-context" [frames] "Add lines of right context to input (dupping last time frame)").alias("kaldi-right-context"))

.arg(arg!(--"onnx-test-data-set" [data_set] "Use onnx-test data-set as input (expect test_data_set_N dir with input_X.pb, etc. inside)"))
.arg(arg!(--"onnx-ignore-output-shapes" "Ignore output shapes from model (workaround for pytorch export bug with mask axes)"))
Expand Down
66 changes: 6 additions & 60 deletions cli/src/params.rs
Original file line number Diff line number Diff line change
Expand Up @@ -65,8 +65,6 @@ impl ModelLocation {
#[allow(clippy::large_enum_variant)]
pub enum SomeGraphDef {
NoGraphDef,
#[cfg(feature = "kaldi")]
Kaldi(tract_kaldi::KaldiProtoModel),
Nnef(tract_nnef::ProtoModel),
#[cfg(feature = "onnx")]
Onnx(tract_onnx::pb::ModelProto, tract_onnx::model::ParseResult),
Expand Down Expand Up @@ -157,9 +155,6 @@ impl Parameters {
let format = matches.value_of("format").unwrap_or(
if location.path().extension().map(|s| s == "onnx").unwrap_or(false) {
"onnx"
} else if location.path().extension().map(|s| s == "raw" || s == "txt").unwrap_or(false)
{
"kaldi"
} else if location.is_dir()
|| location.path().to_string_lossy().ends_with(".tar")
|| location.path().to_string_lossy().ends_with(".tar.gz")
Expand All @@ -171,22 +166,6 @@ impl Parameters {
},
);
let triplet: (SomeGraphDef, Box<dyn Model>, Option<TfExt>) = match format {
#[cfg(feature = "kaldi")]
"kaldi" => {
let kaldi = tract_kaldi::kaldi();
info_usage("loaded framework (kaldi)", probe);
let mut graph = kaldi.proto_model_for_read(&mut *location.read()?)?;
info_usage("proto model loaded", probe);
if let Some(i) = matches.value_of("kaldi-adjust-final-offset") {
graph.adjust_final_offset = i.parse()?;
}
let parsed = kaldi.model_for_proto_model_with_symbols(&graph, symbol_table)?;
if need_graph {
(SomeGraphDef::Kaldi(graph), Box::new(parsed), Option::<TfExt>::None)
} else {
(SomeGraphDef::NoGraphDef, Box::new(parsed), Option::<TfExt>::None)
}
}
"nnef" => {
let nnef = super::nnef(matches);
let mut proto_model = if location.is_dir() {
Expand Down Expand Up @@ -328,36 +307,7 @@ impl Parameters {
Ok(triplet)
}

fn kaldi_downsample<F, O>(raw_model: &mut Graph<F, O>, period: isize) -> TractResult<()>
where
F: std::fmt::Debug + Clone + Fact,
O: std::fmt::Debug + std::fmt::Display + AsRef<dyn Op> + AsMut<dyn Op> + Clone,
Graph<F, O>: SpecialOps<F, O>,
tract_core::ops::Downsample: Into<O>,
{
if period != 1 {
let mut outputs = raw_model.output_outlets()?.to_vec();
let output_name = raw_model.node(outputs[0].node).name.clone();
raw_model.node_mut(outputs[0].node).name = format!("{output_name}-old");
let id = raw_model.wire_node(
output_name,
tract_core::ops::Downsample::new(0, period as _, 0),
&outputs[0..1],
)?[0];
if let Some(label) = raw_model.outlet_label(outputs[0]).map(|s| s.to_string()) {
raw_model.set_outlet_label(id, label)?;
}
outputs[0] = id;
raw_model.set_output_outlets(&outputs)?;
}
Ok(())
}

fn kaldi_context<F, O>(
raw_model: &mut Graph<F, O>,
left: usize,
right: usize,
) -> TractResult<()>
fn edge_context<F, O>(raw_model: &mut Graph<F, O>, left: usize, right: usize) -> TractResult<()>
where
F: std::fmt::Debug + Clone + Fact,
O: std::fmt::Debug + std::fmt::Display + AsRef<dyn Op> + AsMut<dyn Op> + Clone,
Expand Down Expand Up @@ -856,16 +806,12 @@ impl Parameters {
_ => Assertions::default(),
};

if let Some(sub) = matches.value_of("kaldi-downsample") {
dispatch_model_mut_no_pulse!(raw_model, |m| Self::kaldi_downsample(m, sub.parse()?))?;
}

if matches.value_of("kaldi-left-context").is_some()
|| matches.value_of("kaldi-right-context").is_some()
if matches.value_of("edge-left-context").is_some()
|| matches.value_of("edge-right-context").is_some()
{
let left = matches.value_of("kaldi-left-context").unwrap_or("0").parse()?;
let right = matches.value_of("kaldi-right-context").unwrap_or("0").parse()?;
dispatch_model_mut_no_pulse!(raw_model, |m| Self::kaldi_context(m, left, right))?;
let left = matches.value_of("edge-left-context").unwrap_or("0").parse()?;
let right = matches.value_of("edge-right-context").unwrap_or("0").parse()?;
dispatch_model_mut_no_pulse!(raw_model, |m| Self::edge_context(m, left, right))?;
}

if let Some(infer) = raw_model.downcast_mut::<InferenceModel>() {
Expand Down
6 changes: 3 additions & 3 deletions core/Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[package]
name = "tract-core"
version = "0.20.5-pre"
version = "0.20.6-pre"
license = "MIT/Apache-2.0"
authors = ["Mathieu Poumeyrol <kali@zoy.org>"]
description = "Tiny, no-nonsense, self contained, TensorFlow and ONNX inference"
Expand Down Expand Up @@ -29,8 +29,8 @@ num-traits.workspace = true
num-complex.workspace = true
rustfft.workspace = true
smallvec.workspace = true
tract-linalg = { version = "=0.20.5-pre", path = "../linalg" }
tract-data = { version = "=0.20.5-pre", path = "../data" }
tract-linalg = { version = "=0.20.6-pre", path = "../linalg" }
tract-data = { version = "=0.20.6-pre", path = "../data" }

[features]
default = [ ]
Expand Down
Loading