Skip to content

Commit 3c6bc58

Browse files
cyyeverpytorchmergebot
authored andcommitted
use C10_API in libc10.so (pytorch#94171)
MSVC emits several C4273 warning when compiling c10. I think the offending files should use C10_API instead of TORCH_API. If the tests pass, the changes should be safe. Pull Request resolved: pytorch#94171 Approved by: https://github.com/Skylion007
1 parent a07d129 commit 3c6bc58

File tree

7 files changed

+20
-20
lines changed

7 files changed

+20
-20
lines changed

c10/core/GeneratorImpl.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ struct C10_API GeneratorImpl : public c10::intrusive_ptr_target {
9898

9999
namespace detail {
100100

101-
TORCH_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
101+
C10_API uint64_t getNonDeterministicRandom(bool is_cuda = false);
102102

103103
} // namespace detail
104104

c10/core/GradMode.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,14 @@
55

66
namespace c10 {
77

8-
struct TORCH_API GradMode {
8+
struct C10_API GradMode {
99
static bool is_enabled();
1010
static void set_enabled(bool enabled);
1111
};
1212

1313
// A RAII, thread local (!) guard that enables or disables grad mode upon
1414
// construction, and sets it back to the original value upon destruction.
15-
struct TORCH_API AutoGradMode {
15+
struct C10_API AutoGradMode {
1616
AutoGradMode(bool enabled) : prev_mode(GradMode::is_enabled()) {
1717
GradMode::set_enabled(enabled);
1818
}
@@ -24,13 +24,13 @@ struct TORCH_API AutoGradMode {
2424

2525
// A RAII, thread local (!) guard that stops future operations from building
2626
// gradients.
27-
struct TORCH_API NoGradGuard : public AutoGradMode {
27+
struct C10_API NoGradGuard : public AutoGradMode {
2828
NoGradGuard() : AutoGradMode(/*enabled=*/false) {}
2929
};
3030

3131
// A RAII, thread local (!) guard that enables or disables forward grad mode
3232
// upon construction, and sets it back to the original value upon destruction.
33-
struct TORCH_API AutoFwGradMode {
33+
struct C10_API AutoFwGradMode {
3434
AutoFwGradMode(bool enabled)
3535
: prev_mode(AutogradState::get_tls_state().get_fw_grad_mode()) {
3636
AutogradState::get_tls_state().set_fw_grad_mode(enabled);

c10/core/InferenceMode.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace c10 {
99

1010
// A RAII, thread local (!) guard that enables or disables inference mode upon
1111
// construction, and sets it back to the original value upon destruction.
12-
struct TORCH_API InferenceMode {
12+
struct C10_API InferenceMode {
1313
// Note [Expected TLS state in InferenceMode]:
1414
// InferenceMode: ADInplaceOrView not in
1515
// raw_local_dispatch_key_set.included(),

c10/cuda/CUDAStream.h

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -203,7 +203,7 @@ class C10_CUDA_API CUDAStream {
203203
* isHighPriority to true, or a stream for a specific device by setting device
204204
* (defaulting to the current CUDA stream.)
205205
*/
206-
TORCH_API CUDAStream
206+
C10_API CUDAStream
207207
getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
208208

209209
/**
@@ -213,7 +213,7 @@ getStreamFromPool(const bool isHighPriority = false, DeviceIndex device = -1);
213213
* want to operate on a non-torch allocated stream for data exchange or similar
214214
* purposes
215215
*/
216-
TORCH_API CUDAStream
216+
C10_API CUDAStream
217217
getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);
218218

219219
/**
@@ -222,7 +222,7 @@ getStreamFromExternal(cudaStream_t ext_stream, DeviceIndex device_index);
222222
* where most computation occurs when you aren't explicitly using
223223
* streams.
224224
*/
225-
TORCH_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
225+
C10_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
226226

227227
/**
228228
* Get the current CUDA stream, for the passed CUDA device, or for the
@@ -231,7 +231,7 @@ TORCH_API CUDAStream getDefaultCUDAStream(DeviceIndex device_index = -1);
231231
* be different if someone called 'setCurrentCUDAStream' or used 'StreamGuard'
232232
* or 'CUDAStreamGuard'.
233233
*/
234-
TORCH_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
234+
C10_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
235235

236236
/**
237237
* Set the current stream on the device of the passed in stream to be
@@ -243,7 +243,7 @@ TORCH_API CUDAStream getCurrentCUDAStream(DeviceIndex device_index = -1);
243243
* (which will switch both your current device and current stream in the way you
244244
* expect, and reset it back to its original state afterwards).
245245
*/
246-
TORCH_API void setCurrentCUDAStream(CUDAStream stream);
246+
C10_API void setCurrentCUDAStream(CUDAStream stream);
247247

248248
C10_API std::ostream& operator<<(std::ostream& stream, const CUDAStream& s);
249249

c10/util/UniqueVoidPtr.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,7 @@ using DeleterFnPtr = void (*)(void*);
1010
namespace detail {
1111

1212
// Does not delete anything
13-
TORCH_API void deleteNothing(void*);
13+
C10_API void deleteNothing(void*);
1414

1515
// A detail::UniqueVoidPtr is an owning smart pointer like unique_ptr, but
1616
// with three major differences:

c10/util/complex_math.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -51,10 +51,10 @@ C10_HOST_DEVICE inline c10::complex<T> log2(const c10::complex<T>& x) {
5151
#if defined(_LIBCPP_VERSION) || \
5252
(defined(__GLIBCXX__) && !defined(_GLIBCXX11_USE_C99_COMPLEX))
5353
namespace _detail {
54-
TORCH_API c10::complex<float> sqrt(const c10::complex<float>& in);
55-
TORCH_API c10::complex<double> sqrt(const c10::complex<double>& in);
56-
TORCH_API c10::complex<float> acos(const c10::complex<float>& in);
57-
TORCH_API c10::complex<double> acos(const c10::complex<double>& in);
54+
C10_API c10::complex<float> sqrt(const c10::complex<float>& in);
55+
C10_API c10::complex<double> sqrt(const c10::complex<double>& in);
56+
C10_API c10::complex<float> acos(const c10::complex<float>& in);
57+
C10_API c10::complex<double> acos(const c10::complex<double>& in);
5858
}; // namespace _detail
5959
#endif
6060

c10/util/signal_handler.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020

2121
namespace c10 {
2222

23-
class TORCH_API SignalHandler {
23+
class C10_API SignalHandler {
2424
public:
2525
enum class Action { NONE, STOP };
2626

@@ -40,13 +40,13 @@ class TORCH_API SignalHandler {
4040
};
4141

4242
#if defined(C10_SUPPORTS_FATAL_SIGNAL_HANDLERS)
43-
class TORCH_API FatalSignalHandler {
43+
class C10_API FatalSignalHandler {
4444
// This works by setting up certain fatal signal handlers. Previous fatal
4545
// signal handlers will still be called when the signal is raised. Defaults
4646
// to being off.
4747
public:
48-
TORCH_API void setPrintStackTracesOnFatalSignal(bool print);
49-
TORCH_API bool printStackTracesOnFatalSignal();
48+
C10_API void setPrintStackTracesOnFatalSignal(bool print);
49+
C10_API bool printStackTracesOnFatalSignal();
5050
static FatalSignalHandler& getInstance();
5151
virtual ~FatalSignalHandler();
5252

0 commit comments

Comments
 (0)