Skip to content

Commit

Permalink
Remove unused variables (#1955)
Browse files Browse the repository at this point in the history
  • Loading branch information
zasdfgbnm authored Sep 3, 2022
1 parent df3393a commit f7bc341
Show file tree
Hide file tree
Showing 10 changed files with 0 additions and 32 deletions.
4 changes: 0 additions & 4 deletions benchmarks/cpp/nvfuser/batch_norm_channels_first.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -73,10 +73,6 @@ static void NvFuserScheduler_BatchNorm(
DataType dtype) {
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);

const bool kTraining = true;
const float kMomentum = 0.1;
const float kEps = 1e-5;

std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
Expand Down
4 changes: 0 additions & 4 deletions benchmarks/cpp/nvfuser/batch_norm_channels_first_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ static void setupBatchNorm_BWD(Fusion* fusion, DataType dtype) {
FusionGuard fg(fusion);

const bool kTraining = true;
const float kMomentum = 0.1;
const float kEps = 1e-5;

// setup fusion
Expand Down Expand Up @@ -85,9 +84,6 @@ static void NvFuserScheduler_BatchNorm_BWD(
DataType dtype) {
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);

const bool kTraining = true;
const float kEps = 1e-5;

std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(1),
Expand Down
4 changes: 0 additions & 4 deletions benchmarks/cpp/nvfuser/batch_norm_channels_last.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,10 +74,6 @@ static void NvFuserScheduler_BatchNorm_nhwc(
DataType dtype) {
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);

const bool kTraining = true;
const float kMomentum = 0.1;
const float kEps = 1e-5;

std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(2),
Expand Down
4 changes: 0 additions & 4 deletions benchmarks/cpp/nvfuser/batch_norm_channels_last_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,6 @@ static void setupBatchNorm_nhwc_BWD(Fusion* fusion, DataType dtype) {
FusionGuard fg(fusion);

const bool kTraining = true;
const float kMomentum = 0.1;
const float kEps = 1e-5;

// setup fusion
Expand Down Expand Up @@ -86,9 +85,6 @@ static void NvFuserScheduler_BatchNorm_nhwc_BWD(
DataType dtype) {
TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);

const bool kTraining = true;
const float kEps = 1e-5;

std::vector<int64_t> input_shape{
benchmark_state.range(0),
benchmark_state.range(2),
Expand Down
3 changes: 0 additions & 3 deletions benchmarks/cpp/nvfuser/gelu_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -113,9 +113,6 @@ BENCHMARK(GeluBackward_AutoSchedule)->Unit(benchmark::kMicrosecond);
//------------------------------------------------------------------------------

static void GeluBackward_Lower(benchmark::State& benchmark_state) {
constexpr int kHiddenFeatures = 512;
constexpr int kBatchSize = 64;

Fusion fusion;

// setup fusion
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/cpp/nvfuser/layer_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,6 @@ static void setupLayerNorm(Fusion* fusion, DataType dtype) {

FusionGuard fg(fusion);

const int kReductionAxis = 1;
const float kEps = 1e-5;

Double* eps_ptr = IrBuilder::create<Double>(kEps);
Expand Down Expand Up @@ -61,7 +60,6 @@ static void NvFuserScheduler_LayerNorm(

std::vector<int64_t> input_shape{
benchmark_state.range(0), benchmark_state.range(1)};
const float kEps = 1e-5;

// inputs
at::manual_seed(0);
Expand Down
3 changes: 0 additions & 3 deletions benchmarks/cpp/nvfuser/layer_norm_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -22,9 +22,6 @@ static void setupLayerNorm_BWD(Fusion* fusion, DataType dtype) {

TORCH_INTERNAL_ASSERT(dtype == DataType::Float || dtype == DataType::Half);

const int kReductionAxis = 1;
Double* eps_ptr = IrBuilder::create<Double>(1e-5);

// setup fusion
auto grad_out = makeContigTensor(2, dtype);
auto input = makeContigTensor(2, dtype);
Expand Down
2 changes: 0 additions & 2 deletions benchmarks/cpp/nvfuser/rms_norm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@ static void setupRMSNorm(Fusion* fusion, DataType dtype) {

FusionGuard fg(fusion);

const int kReductionAxis = 2;
const float kEps = 1e-6;

Double* eps_ptr = IrBuilder::create<Double>(kEps);
Expand Down Expand Up @@ -61,7 +60,6 @@ static void NvFuserScheduler_RMSNorm(
dtype == DataType::BFloat16);

std::vector<int64_t> input_shape{8, benchmark_state.range(0), 1024};
const float kEps = 1e-6;

// inputs
at::manual_seed(0);
Expand Down
3 changes: 0 additions & 3 deletions benchmarks/cpp/nvfuser/rms_norm_backward.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,9 +24,6 @@ static void setupRMSNorm_BWD(Fusion* fusion, DataType dtype) {
dtype == DataType::Float || dtype == DataType::Half ||
dtype == DataType::BFloat16);

const int kReductionAxis = 2;
Double* eps_ptr = IrBuilder::create<Double>(1e-6);

// setup fusion
auto grad_out = makeContigTensor(3, dtype);
auto input = makeContigTensor(3, dtype);
Expand Down
3 changes: 0 additions & 3 deletions benchmarks/cpp/nvfuser/timm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,6 @@ static void setup_vit_base_patch16_224_bcast5(Fusion* fusion, void* null) {
auto t20 = sum(t37, {2});
auto t24 = broadcast(t20, bcast_pattern1);
auto d95 = castOp(DataType::Double, t2->axis(2)->extent());
auto d96 = mul(IrBuilder::create<Double>(1.0), d95);
auto d105 = reciprocal(d95);
auto t25 = mul(t24, d105);
auto t26 = add(t25, IrBuilder::create<Double>(1e-6));
Expand Down Expand Up @@ -320,8 +319,6 @@ static void NvFuserScheduler_TIMM_vit_base_patch16_224_norm_inner3(

at::manual_seed(0);
auto fp16_options = at::TensorOptions().dtype(at::kHalf).device(at::kCUDA, 0);
auto fp32_options =
at::TensorOptions().dtype(at::kFloat).device(at::kCUDA, 0);

auto t0 = at::randn(input_shape, fp16_options);

Expand Down

0 comments on commit f7bc341

Please sign in to comment.