Skip to content
This repository has been archived by the owner on Dec 21, 2023. It is now read-only.

Add NMS Layer to the OD Neural Network #3274

Merged
merged 6 commits into from
Jul 29, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
34 changes: 33 additions & 1 deletion src/ml/neural_net/model_spec.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -38,13 +38,15 @@ using CoreML::Specification::NeuralNetwork;
using CoreML::Specification::NeuralNetworkImageScaler;
using CoreML::Specification::NeuralNetworkLayer;
using CoreML::Specification::NeuralNetworkPreprocessing;
using CoreML::Specification::NonMaximumSuppressionLayerParams;
using CoreML::Specification::PaddingLayerParams;
using CoreML::Specification::PaddingLayerParams_PaddingConstant;
using CoreML::Specification::Pipeline;
using CoreML::Specification::PoolingLayerParams;
using CoreML::Specification::ReshapeDynamicLayerParams;
using CoreML::Specification::ReshapeStaticLayerParams;
using CoreML::Specification::SamePadding;
using CoreML::Specification::SliceDynamicLayerParams;
using CoreML::Specification::SplitNDLayerParams;
using CoreML::Specification::SqueezeLayerParams;
using CoreML::Specification::TransposeLayerParams;
Expand Down Expand Up @@ -105,7 +107,6 @@ void update_weight_params(const std::string& name, const float_array& value, Wei
}

Span<const float> out(value.data(), value.size());

#ifdef TURI_USE_FLOAT16

if (use_quantization && is_convertible_to_fp16(out)) {
Expand Down Expand Up @@ -1200,6 +1201,37 @@ void model_spec::add_get_shape(const std::string& name,
layer->mutable_getshape();
}

void model_spec::add_nms_layer(const std::string& name, const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs, float iou_threshold,
float confidence_threshold, size_t max_boxes,
bool per_class_supression)
{
NeuralNetworkLayer* layer = impl_->add_layers();
layer->set_name(name);
for (const std::string& input : inputs) {
layer->add_input(input);
}
for (const std::string& output : outputs) {
layer->add_output(output);
}
NonMaximumSuppressionLayerParams* nms_params = layer->mutable_nonmaximumsuppression();
nms_params->set_iouthreshold(iou_threshold);
nms_params->set_scorethreshold(confidence_threshold);
nms_params->set_maxboxes(static_cast<::_tc_google::protobuf::uint64>(max_boxes));
nms_params->set_perclasssuppression(per_class_supression);
}

void model_spec::add_slice_dynamic(const std::string& name, const std::vector<std::string>& inputs)
{
NeuralNetworkLayer* layer = impl_->add_layers();
layer->set_name(name);
for (const std::string& input : inputs) {
layer->add_input(input);
}
layer->add_output(name);
layer->mutable_slicedynamic();
}

pipeline_spec::pipeline_spec(std::unique_ptr<Pipeline> impl)
: impl_(std::move(impl)) {}

Expand Down
24 changes: 24 additions & 0 deletions src/ml/neural_net/model_spec.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -530,6 +530,30 @@ class model_spec {
*/
void add_get_shape(const std::string& name, const std::string& input);

/**
* Appends dynamic slicing.
*
* \param name The name of the layer and its output
* \param inputs The name of the layer's inputs
*/
void add_slice_dynamic(const std::string& name, const std::vector<std::string>& inputs);

/**
* Appends a non maximum suppression layer.
*
* \param name The name of the layer and its output
* \param inputs The name of the layer's inputs
* \param outputs The outputs of the layer
* \param iou_thrsshold The default value for the iou threshold
* \param confidence_threshold The default value for the confidence threshold
* \param max_boxes The maximum number of boxes you want NMS to run
* \param per_class_suppression When false, suppression happens for all
* classes.
*/
void add_nms_layer(const std::string& name, const std::vector<std::string>& inputs,
const std::vector<std::string>& outputs, float iou_threshold,
float confidence_threshold, size_t max_boxes, bool per_class_supression);

private:
std::unique_ptr<CoreML::Specification::NeuralNetwork> impl_;
};
Expand Down
Loading