Skip to content

Commit

Permalink
Don't patch tensor ops that aren't present (#899)
Browse files Browse the repository at this point in the history
* Only attempt to patch Tensor methods if defined

* syntax

Co-authored-by: Michael Carilli <mcarilli@nvidia.com>
  • Loading branch information
mcarilli and definitelynotmcarilli authored Jun 30, 2020
1 parent 44532b3 commit 43a6f9f
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 6 deletions.
4 changes: 4 additions & 0 deletions apex/amp/compat.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,3 +40,7 @@ def scalar_python_val(x):
return x.data[0]
else:
return x[0]

# Accounts for the possibility that some ops may be removed from a namespace.
def filter_attrs(module, attrs):
return list(attrname for attrname in attrs if hasattr(module, attrname))
12 changes: 6 additions & 6 deletions apex/amp/lists/tensor_overrides.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,20 +11,20 @@
# MODULE = torch.autograd.Variable


FP16_FUNCS = [
FP16_FUNCS = compat.filter_attrs(MODULE, [
'__matmul__',
]
])

FP32_FUNCS = [
FP32_FUNCS = compat.filter_attrs(MODULE, [
'__ipow__',
'__pow__',
'__rpow__',

# Cast to fp32 before transfer to CPU
'cpu',
]
])

CASTS = [
CASTS = compat.filter_attrs(MODULE, [
'__add__',
'__div__',
'__eq__',
Expand All @@ -46,7 +46,7 @@
'__rtruediv__',
'__sub__',
'__truediv__',
]
])

# None of these, but here to make code cleaner.
SEQUENCE_CASTS = []
Expand Down

0 comments on commit 43a6f9f

Please sign in to comment.