Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use int64_t and double for Int and Float, respectively #43

Merged
merged 2 commits into from
May 23, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/clang_format.yml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ jobs:
set -eu
# This is necessary to get the same results regardless of whether the
# PR was opened directly or from a forked repo. See: `9f890a92` for more info.
git remote add upstream https://github.com/pytorch/pytorch
git remote add upstream https://github.com/csarofeen/pytorch
git fetch upstream "$GITHUB_BASE_REF"
BASE_SHA=${{ github.event.pull_request.base.sha }}
HEAD_SHA=${{ github.event.pull_request.head.sha }}
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/lint.yml
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ jobs:
- name: Run clang-tidy
run: |
set -eux
git remote add upstream https://github.com/pytorch/pytorch
git remote add upstream https://github.com/csarofeen/pytorch
git fetch upstream "$GITHUB_BASE_REF"
BASE_SHA=${{ github.event.pull_request.base.sha }}
HEAD_SHA=${{ github.event.pull_request.head.sha }}
Expand Down
17 changes: 11 additions & 6 deletions torch/csrc/jit/codegen/cuda/ir_interface_nodes.h
Original file line number Diff line number Diff line change
Expand Up @@ -57,11 +57,13 @@ struct TORCH_CUDA_API Bool : public Val {
* is compiled) or a constant value (inlined into the kernel definition).
*/
struct TORCH_CUDA_API Float : public Val {
using ScalarType = double;

~Float() = default;

Float() : Val(ValType::Scalar, DataType::Float), maybe_value_{c10::nullopt} {}

Float(float _value)
Float(ScalarType _value)
: Val(ValType::Scalar, DataType::Float), maybe_value_{_value} {}

Float(const Float& other) = delete;
Expand All @@ -76,14 +78,14 @@ struct TORCH_CUDA_API Float : public Val {
bool isConst() const {
return maybe_value_.has_value();
}
c10::optional<float> value() const noexcept {
c10::optional<ScalarType> value() const noexcept {
return maybe_value_;
}

bool sameAs(const Float* const other) const;

private:
const c10::optional<float> maybe_value_;
const c10::optional<ScalarType> maybe_value_;
};

/*
Expand Down Expand Up @@ -124,11 +126,14 @@ struct TORCH_CUDA_API Half : public Val {
// An Int64 value. If used for indexing it's set as size_t. Otherwise it's an
// inlined literal in the kernel.
struct TORCH_CUDA_API Int : public Val {
using ScalarType = int64_t;

~Int() = default;

Int() : Val(ValType::Scalar, DataType::Int), maybe_value_{c10::nullopt} {}

Int(int _value) : Val(ValType::Scalar, DataType::Int), maybe_value_{_value} {}
Int(ScalarType _value)
: Val(ValType::Scalar, DataType::Int), maybe_value_{_value} {}

Int(const Int& other) = delete;
Int& operator=(const Int& other) = delete;
Expand All @@ -142,14 +147,14 @@ struct TORCH_CUDA_API Int : public Val {
virtual bool isConst() const {
return maybe_value_.has_value();
}
virtual c10::optional<int> value() const noexcept {
virtual c10::optional<ScalarType> value() const noexcept {
return maybe_value_;
}

virtual bool sameAs(const Int* const other) const;

private:
const c10::optional<int> maybe_value_;
const c10::optional<ScalarType> maybe_value_;
};

struct TransformReplay;
Expand Down
3 changes: 2 additions & 1 deletion torch/csrc/jit/codegen/cuda/ir_iostream.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,7 +170,8 @@ void IRPrinter::handle(const Float* const f) {
os << "f" << f->name();
} else {
os << "float("
<< std::setprecision(std::numeric_limits<float>::max_digits10)
<< std::setprecision(
std::numeric_limits<Float::ScalarType>::max_digits10)
<< *(f->value()) << ")";
}
}
Expand Down