Skip to content

Commit

Permalink
Remove some unused macros. (#2618)
Browse files Browse the repository at this point in the history
* Remove some unused macros.

* More unused fixes.
  • Loading branch information
LaurentMazare authored Nov 15, 2024
1 parent f689ce5 commit 00d8a0c
Show file tree
Hide file tree
Showing 9 changed files with 13 additions and 14 deletions.
2 changes: 1 addition & 1 deletion candle-examples/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ intel-mkl-src = { workspace = true, optional = true }
num-traits = { workspace = true }
palette = { version = "0.7.6", optional = true }
enterpolation = { version = "0.2.1", optional = true}
pyo3 = { version = "0.22.0", features = ["auto-initialize"], optional = true }
pyo3 = { version = "0.22.0", features = ["auto-initialize", "abi3-py311"], optional = true }
rayon = { workspace = true }
rubato = { version = "0.15.0", optional = true }
safetensors = { workspace = true }
Expand Down
8 changes: 5 additions & 3 deletions candle-examples/examples/reinforcement-learning/ddpg.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
use std::collections::VecDeque;
use std::fmt::Display;

use candle::{DType, Device, Error, Module, Result, Tensor, Var};
use candle_nn::{
Expand Down Expand Up @@ -167,6 +166,7 @@ fn track(
Ok(())
}

#[allow(unused)]
struct Actor<'a> {
varmap: VarMap,
vb: VarBuilder<'a>,
Expand Down Expand Up @@ -211,7 +211,7 @@ impl Actor<'_> {
let target_network = make_network("target-actor")?;

// this sets the two networks to be equal to each other using tau = 1.0
track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0);
track(&mut varmap, &vb, "target-actor", "actor", &dims, 1.0)?;

Ok(Self {
varmap,
Expand Down Expand Up @@ -244,6 +244,7 @@ impl Actor<'_> {
}
}

#[allow(unused)]
struct Critic<'a> {
varmap: VarMap,
vb: VarBuilder<'a>,
Expand Down Expand Up @@ -287,7 +288,7 @@ impl Critic<'_> {
let target_network = make_network("target-critic")?;

// this sets the two networks to be equal to each other using tau = 1.0
track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0);
track(&mut varmap, &vb, "target-critic", "critic", &dims, 1.0)?;

Ok(Self {
varmap,
Expand Down Expand Up @@ -322,6 +323,7 @@ impl Critic<'_> {
}
}

#[allow(unused)]
#[allow(clippy::upper_case_acronyms)]
pub struct DDPG<'a> {
actor: Actor<'a>,
Expand Down
1 change: 0 additions & 1 deletion candle-examples/examples/reinforcement-learning/gym_env.rs
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
#![allow(unused)]
//! Wrappers around the Python API of Gymnasium (the new version of OpenAI gym)
use candle::{Device, Result, Tensor};
use pyo3::prelude::*;
Expand Down
2 changes: 0 additions & 2 deletions candle-examples/examples/reinforcement-learning/main.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,3 @@
#![allow(unused)]

#[cfg(feature = "mkl")]
extern crate intel_mkl_src;

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ fn new_model(
) -> Result<(impl Module, VarMap)> {
let input_size = input_shape.iter().product();

let mut varmap = VarMap::new();
let varmap = VarMap::new();
let var_builder = VarBuilder::from_varmap(&varmap, dtype, device);

let model = seq()
Expand Down
Original file line number Diff line number Diff line change
@@ -1,16 +1,16 @@
#![allow(unused)]
//! Vectorized version of the gym environment.
use candle::{DType, Device, Result, Tensor};
use pyo3::prelude::*;
use pyo3::types::PyDict;

#[allow(unused)]
#[derive(Debug)]
pub struct Step {
pub obs: Tensor,
pub reward: Tensor,
pub is_done: Tensor,
}

#[allow(unused)]
pub struct VecGymEnv {
env: PyObject,
action_space: usize,
Expand All @@ -21,6 +21,7 @@ fn w(res: PyErr) -> candle::Error {
candle::Error::wrap(res)
}

#[allow(unused)]
impl VecGymEnv {
pub fn new(name: &str, img_dir: Option<&str>, nprocesses: usize) -> Result<VecGymEnv> {
Python::with_gil(|py| {
Expand Down
2 changes: 1 addition & 1 deletion candle-pyo3/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ candle-nn = { workspace = true }
candle-onnx = { workspace = true, optional = true }
half = { workspace = true }
intel-mkl-src = { workspace = true, optional = true }
pyo3 = { version = "0.22.0", features = ["extension-module", "abi3-py38"] }
pyo3 = { version = "0.22.0", features = ["extension-module", "abi3-py311"] }

[build-dependencies]
pyo3-build-config = "0.22"
Expand Down
4 changes: 2 additions & 2 deletions candle-transformers/src/models/encodec.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,8 @@
//!
//! Based on implementation from [huggingface/transformers](https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py)

#![allow(unused)]
use candle::{DType, IndexOp, Layout, Module, Result, Shape, Tensor, D};
use candle_nn::{conv1d, Conv1d, Conv1dConfig, ConvTranspose1d, VarBuilder};
use candle_nn::{conv1d, Conv1d, ConvTranspose1d, VarBuilder};

// Encodec Model
// https://github.com/huggingface/transformers/blob/main/src/transformers/models/encodec/modeling_encodec.py
Expand Down Expand Up @@ -226,6 +225,7 @@ impl candle::CustomOp2 for CodebookEncode {
}

// https://github.com/huggingface/transformers/blob/abaca9f9432a84cfaa95531de4c72334f38a42f2/src/transformers/models/encodec/modeling_encodec.py#L340
#[allow(unused)]
#[derive(Clone, Debug)]
pub struct EuclideanCodebook {
inited: Tensor,
Expand Down
1 change: 0 additions & 1 deletion candle-transformers/src/models/starcoder2.rs
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
//! - [Model Card](https://huggingface.co/bigcode/starcoder)
//!

#![allow(unused)]
use candle::{DType, Device, Module, Result, Tensor, D};
use candle_nn::{layer_norm, linear_b, LayerNorm, Linear, VarBuilder};
use std::sync::Arc;
Expand Down

0 comments on commit 00d8a0c

Please sign in to comment.