Skip to content

Commit

Permalink
Added support for slogdet in LazyTensor shape inference (#77904)
Browse files Browse the repository at this point in the history
Fixes #3576

Added support for `slogdet` in LazyTensor shape inference
Pull Request resolved: pytorch/pytorch#77904
Approved by: https://github.com/wconstab, https://github.com/JackCaoG
  • Loading branch information
miladm authored and pytorchmergebot committed May 20, 2022
1 parent d6ae650 commit e67284d
Show file tree
Hide file tree
Showing 2 changed files with 12 additions and 0 deletions.
11 changes: 11 additions & 0 deletions torch/csrc/lazy/core/shape_inference.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -443,6 +443,17 @@ std::vector<Shape> compute_shape_smooth_l1_loss(
}
}

std::vector<Shape> compute_shape_slogdet(const at::Tensor & self) {
// assumes self.shape is {*, n, n} and returns shape *
TORCH_INTERNAL_ASSERT(self.dim() >= 2);
std::vector<int64_t> out_sizes(self.sizes().begin(), self.sizes().end() - 2);
// Doesn't check input dtype, but output dtype either matches it,
// or the actual slogdet operation will throw if it's an unsupported type.
// Sign and det outputs hold the same shape, dtype.
return {Shape(self.scalar_type(), out_sizes),
Shape(self.scalar_type(), out_sizes)};
}

std::vector<Shape> compute_shape_smooth_l1_loss_backward(
const at::Tensor& grad_output, const at::Tensor& self,
const at::Tensor& target, int64_t reduction, double beta) {
Expand Down
1 change: 1 addition & 0 deletions torch/csrc/lazy/core/shape_inference.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,7 @@ TORCH_API std::vector<torch::lazy::Shape> compute_shape_random_functional(const
TORCH_API std::vector<torch::lazy::Shape> compute_shape_random_functional(const at::Tensor & self, int64_t from, c10::optional<int64_t> to, c10::optional<at::Generator> generator);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_relu(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_repeat(const at::Tensor & self, at::IntArrayRef repeats);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_slogdet(const at::Tensor & self);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_smooth_l1_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double beta);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_sort(const at::Tensor & self, int64_t dim, bool descending);
TORCH_API std::vector<torch::lazy::Shape> compute_shape_stack(at::TensorList tensors, int64_t dim);
Expand Down

0 comments on commit e67284d

Please sign in to comment.