Skip to content

Commit

Permalink
fix lint
Browse files Browse the repository at this point in the history
  • Loading branch information
merrymercy committed Sep 18, 2020
1 parent 2b3ef79 commit 8f46f2e
Show file tree
Hide file tree
Showing 9 changed files with 29 additions and 25 deletions.
1 change: 1 addition & 0 deletions include/tvm/auto_scheduler/search_policy.h
Original file line number Diff line number Diff line change
Expand Up @@ -65,6 +65,7 @@
#include <tvm/auto_scheduler/search_task.h>
#include <tvm/node/node.h>

#include <string>
#include <unordered_set>
#include <vector>

Expand Down
3 changes: 2 additions & 1 deletion src/auto_scheduler/search_policy/sketch_policy.cc
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@
#include <algorithm>
#include <iomanip>
#include <limits>
#include <memory>
#include <queue>
#include <set>
#include <string>
Expand Down Expand Up @@ -88,7 +89,7 @@ SketchPolicy::SketchPolicy(SearchTask task, CostModel program_cost_model,
}

// NOTE: There are strong dependency among the rules below,
// so the order to push them into the vector should be consid carefully.
// so the order to push them into the vector should be considered carefully.
if (IsCPUTask(node->search_task)) {
// Sketch Generation Rules
node->sketch_rules.push_back(&rule_always_inline);
Expand Down
1 change: 1 addition & 0 deletions src/auto_scheduler/search_policy/sketch_policy.h
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@
#include <tvm/auto_scheduler/cost_model.h>
#include <tvm/auto_scheduler/search_policy.h>

#include <memory>
#include <set>
#include <string>
#include <unordered_set>
Expand Down
22 changes: 11 additions & 11 deletions src/auto_scheduler/search_policy/sketch_policy_rules.cc
Original file line number Diff line number Diff line change
Expand Up @@ -492,8 +492,8 @@ PopulationGenerationRule::ResultKind InitChangeComputeLocation::Apply(SketchPoli
continue;
}

std::vector<std::pair<int, int>> candidates
= GetComputeLocationCandidates(policy->search_task, *state, stage_id);
std::vector<std::pair<int, int>> candidates =
GetComputeLocationCandidates(policy->search_task, *state, stage_id);

int choice = (policy->rand_gen)() % (candidates.size() + 2);

Expand Down Expand Up @@ -958,23 +958,23 @@ PopulationGenerationRule::ResultKind MutateTileSize::Apply(SketchPolicyNode* pol
PopulationGenerationRule::ResultKind MutateAutoUnroll::Apply(SketchPolicyNode* policy,
State* state) const {
// Extract all auto_unroll_max_step pragma steps.
std::vector<int> annotate_steps;
std::vector<int> pragma_steps;
for (size_t i = 0; i < (*state)->transform_steps.size(); ++i) {
if (auto ps = (*state)->transform_steps[i].as<PragmaStepNode>()) {
if (StrStartsWith(ps->pragma_type, "auto_unroll_max_step")) {
annotate_steps.push_back(i);
pragma_steps.push_back(i);
}
}
}
if (annotate_steps.empty()) {
if (pragma_steps.empty()) {
return ResultKind::kInvalid;
}

std::vector<int>& auto_unroll_configs =
IsGPUTask(policy->search_task) ? auto_unroll_configs_gpu : auto_unroll_configs_cpu;

// Randomly pick up an unroll step
auto step_id = annotate_steps[(policy->rand_gen)() % annotate_steps.size()];
// Randomly pick up an auto unroll pragma step
auto step_id = pragma_steps[(policy->rand_gen)() % pragma_steps.size()];
auto ps = (*state)->transform_steps[step_id].as<PragmaStepNode>();
CHECK(ps);

Expand Down Expand Up @@ -1018,8 +1018,8 @@ PopulationGenerationRule::ResultKind MutateComputeLocation::Apply(SketchPolicyNo
int stage_inc = GetTargetStageIDInState(*state, step_id) - ps->stage_id;
CHECK(ps != nullptr);

std::vector<std::pair<int, int>> candidates
= GetComputeLocationCandidates(policy->search_task, *state, ps->stage_id + stage_inc);
std::vector<std::pair<int, int>> candidates =
GetComputeLocationCandidates(policy->search_task, *state, ps->stage_id + stage_inc);

if (candidates.empty()) {
return PopulationGenerationRule::ResultKind::kInvalid;
Expand All @@ -1039,8 +1039,8 @@ PopulationGenerationRule::ResultKind MutateComputeLocation::Apply(SketchPolicyNo
tmp_s.CopyOnWrite()->transform_steps.push_back((*state)->transform_steps[s]);
}
try {
StepApplyToState(tmp_s->transform_steps.back(), &tmp_s, policy->search_task->compute_dag);
} catch (dmlc::Error &e) {
StepApplyToState(tmp_s->transform_steps.back(), &tmp_s, policy->search_task->compute_dag);
} catch (dmlc::Error& e) {
return PopulationGenerationRule::ResultKind::kInvalid;
}
}
Expand Down
4 changes: 2 additions & 2 deletions src/auto_scheduler/search_policy/sketch_policy_rules.h
Original file line number Diff line number Diff line change
Expand Up @@ -178,7 +178,7 @@ class PopulationMutationRule : public PopulationGenerationRule {
* \param selection_weight the probabiliy of applying this rule is
* proportional to this weight
*/
PopulationMutationRule(double selection_weight) : weight(selection_weight) {}
explicit PopulationMutationRule(double selection_weight) : weight(selection_weight) {}

/* \brief The weight of this rule */
double weight;
Expand All @@ -188,7 +188,7 @@ class PopulationMutationRule : public PopulationGenerationRule {
#define DEFINE_MUTATE_POPULATION_RULE(rule_name) \
class rule_name : public PopulationMutationRule { \
public: \
rule_name(double weight) : PopulationMutationRule(weight) {} \
explicit rule_name(double weight) : PopulationMutationRule(weight) {} \
ResultKind Apply(SketchPolicyNode* policy, State* state) const final; \
};

Expand Down
5 changes: 2 additions & 3 deletions src/auto_scheduler/search_policy/utils.cc
Original file line number Diff line number Diff line change
Expand Up @@ -67,9 +67,8 @@ Array<Integer> GetSpatialSplitStepIds(const State& s, int stage_id) {
return spatial_split_step_ids;
}


std::vector<std::pair<int, int>> GetComputeLocationCandidates(
const SearchTask& task, const State& state, int stage_id) {
std::vector<std::pair<int, int>> GetComputeLocationCandidates(const SearchTask& task,
const State& state, int stage_id) {
int target_stage_id = GetSingleConsumerId(task, state, stage_id);
if (target_stage_id < 0) {
return {};
Expand Down
6 changes: 3 additions & 3 deletions src/auto_scheduler/search_policy/utils.h
Original file line number Diff line number Diff line change
Expand Up @@ -645,7 +645,7 @@ inline void ComputePrefixSumProb(const std::vector<float>& weights,
for (size_t i = 0; i < weights.size(); ++i) {
(*prefix_sum_probs)[i] /= sum;
}
};
}

/*! \brief Random choose an index according to a prefix sum probability. */
inline int RandomChoose(const std::vector<double>& prefix_sum_probs, std::mt19937* random_gen) {
Expand Down Expand Up @@ -692,8 +692,8 @@ class SplitFactorizationMemo {
Array<Integer> GetSpatialSplitStepIds(const State& s, int stage_id);

/*! \brief Get the possible compute locations for a stage. */
std::vector<std::pair<int, int>> GetComputeLocationCandidates(
const SearchTask& task, const State& state, int stage_id);
std::vector<std::pair<int, int>> GetComputeLocationCandidates(const SearchTask& task,
const State& state, int stage_id);

// Apply multi-level tiling structure according to a string format,
// where "S" stands a space level, "R" stands for a reduction level.
Expand Down
10 changes: 6 additions & 4 deletions tutorials/auto_scheduler/tune_conv2d_layer_cuda.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,13 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):

######################################################################
# Next, we set parameters for the auto-scheduler. These parameters
# mainly specify how we do the measurement.
# mainly specify how we do the measurement during the search and auto-tuning.
#
# * `measure_ctx` launches a different process for measurement. This
# provides an isolation. It can protect the master process from any crashes
# provides an isolation. It can protect the master process from GPU crashes
# happended during measurement and avoid other runtime conflicts.
# * `min_repeat_ms` defines the minimum duration of one "repeat" in every measurement.
# This can warmup the GPU, which is necessary to get reliable measurement results.
# This can warmup the GPU, which is necessary to get accurate measurement results.
# * `num_measure_trials` is the number of measurement trials we can use during the search.
# We only make 10 trials in this tutorial for a fast demonstration. In practice, 1000 is a
# good value for the search to converge. You can do more trials according to your time budget.
Expand Down Expand Up @@ -109,7 +109,7 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
######################################################################
# We can lower the schedule to see the IR after auto-scheduling.
# The auto-scheduler correctly performs optimizations including multi-level tiling,
# parallelization, vectorization, unrolling and fusion.
# cooperative fetching, unrolling and operator fusion.

print(tvm.lower(sch, args, simple_mode=True))

Expand Down Expand Up @@ -172,6 +172,7 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
# and resume the status of search policy and cost model with the log file.
# In the example below we resume the status and do more 5 trials.


log_file = "conv2d.json"
cost_model = auto_scheduler.XGBModel()
cost_model.update_from_file(log_file)
Expand All @@ -185,4 +186,5 @@ def conv2d_layer(N, H, W, CO, CI, KH, KW, stride, padding):
)
sch, args = auto_scheduler.auto_schedule(task, search_policy, tuning_options=tune_option)

# kill the measurement process
del measure_ctx
2 changes: 1 addition & 1 deletion tutorials/auto_scheduler/tune_matmul_x86.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,7 +96,7 @@ def matmul_add(N, L, M, dtype):
######################################################################
# We can lower the schedule to see the IR after auto-scheduling.
# The auto-scheduler correctly performs optimizations including multi-level tiling,
# parallelization, vectorization, unrolling and fusion.
# parallelization, vectorization, unrolling and operator fusion.

print(tvm.lower(sch, args, simple_mode=True))

Expand Down

0 comments on commit 8f46f2e

Please sign in to comment.