Skip to content

Commit 05dd99b

Browse files
r-barnesfacebook-github-bot
authored andcommitted
fixes for Glow
Reviewed By: jfix71 Differential Revision: D57075318 fbshipit-source-id: c66c5bb48f8f9491e62f746ef44a4db084f15b00
1 parent 0b05ae6 commit 05dd99b

File tree

13 files changed

+52
-47
lines changed

13 files changed

+52
-47
lines changed

include/glow/LLVMIRCodeGen/LLVMBackend.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ class LLVMBackendOptions {
4444
/// ABI to be used by this backend.
4545
std::string abi_;
4646
/// Float ABI to be used by this backend.
47-
llvm::Optional<llvm::FloatABI::ABIType> floatABI_;
47+
std::optional<llvm::FloatABI::ABIType> floatABI_;
4848
/// Code model used by this backend.
4949
llvm::CodeModel::Model codeModel_;
5050
/// Code model used by this backend for bundles.
@@ -75,11 +75,11 @@ class LLVMBackendOptions {
7575
/// Sets ABI used by this backend.
7676
void setABIName(llvm::StringRef abi) { abi_ = abi.str(); }
7777
/// \returns Float ABI used by this backend.
78-
llvm::Optional<llvm::FloatABI::ABIType> getFloatABI() const {
78+
std::optional<llvm::FloatABI::ABIType> getFloatABI() const {
7979
return floatABI_;
8080
}
8181
/// Sets Float ABI used by this backend.
82-
void setFloatABI(llvm::Optional<llvm::FloatABI::ABIType> floatABI) {
82+
void setFloatABI(std::optional<llvm::FloatABI::ABIType> floatABI) {
8383
floatABI_ = floatABI;
8484
}
8585
/// \returns code model used by this backend.

include/glow/Optimizer/GraphOptimizer/CompilationContext.h

+5-3
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@
2121
#include "glow/Quantization/Base/Base.h"
2222
#include "glow/Support/Error.h"
2323

24+
#include <optional>
25+
2426
namespace glow {
2527
namespace runtime {
2628
struct PartitionConfig;
@@ -273,7 +275,7 @@ struct OptimizationOptions {
273275
/// If it is true (false), perform (not perform) ASAP op placement in DAG
274276
/// optimization; If it is not set, use acc perf GFlag APLASAPPlacement to
275277
/// determine whether to perform ASAP op placement or not
276-
llvm::Optional<bool> enableAPLASAPPlacement;
278+
std::optional<bool> enableAPLASAPPlacement;
277279

278280
/// If true does int64 to int32 type demotion if backend supports for specific
279281
/// nodes.
@@ -311,8 +313,8 @@ struct OptimizationOptions {
311313
PRINT_VALUE(foldElemKindConversionIntoIO, dump_str)
312314
PRINT_VALUE(foldStaticPlaceholderConversions, dump_str)
313315
PRINT_VALUE(useSparseNNPartitioningScheme, dump_str)
314-
if (enableAPLASAPPlacement.hasValue()) {
315-
PRINT_VALUE(enableAPLASAPPlacement.getValue(), dump_str)
316+
if (enableAPLASAPPlacement) {
317+
PRINT_VALUE(enableAPLASAPPlacement.value(), dump_str)
316318
}
317319
PRINT_VALUE(enableTypeDemotion, dump_str)
318320
PRINT_VALUE(enableQuantParamChanges, dump_str)

include/glow/Support/TensorPool.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -17,11 +17,11 @@
1717
#define GLOW_TENSORPOOL_H
1818

1919
#include "glow/Base/Tensor.h"
20-
#include "llvm/ADT/Optional.h"
2120

2221
#include <atomic>
2322
#include <iostream>
2423
#include <mutex>
24+
#include <optional>
2525
#include <unordered_map>
2626
#include <vector>
2727

@@ -74,7 +74,7 @@ class TensorPool final {
7474
/// previously been added by initialize. If the pool is empty this will
7575
/// allocate a new Tensor unless preventAllocs was set true at construction
7676
/// time.
77-
llvm::Optional<Tensor> get(TypeRef ty);
77+
std::optional<Tensor> get(TypeRef ty);
7878

7979
/// Return a Tensor \p t to the pool. This Tensor must have been previously
8080
/// allocated by this TensorPool.

lib/Backends/Interpreter/InterpreterNodes.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1476,7 +1476,7 @@ static void fwdMaxPool(Tensor *inW, Tensor *outW, Tensor *argmaxW,
14761476
ShapeHW kdim(kernelSizes);
14771477
ShapeHW sdim(strides);
14781478

1479-
llvm::Optional<Handle<int64_t>> argmaxH;
1479+
std::optional<Handle<int64_t>> argmaxH;
14801480
if (argmaxW) {
14811481
argmaxH = argmaxW->getHandle<int64_t>();
14821482
}
@@ -6678,7 +6678,7 @@ void BoundInterpreterFunction::fwdIntNBitSplitEmbeddingWeightedBagsImpl(
66786678
auto weightsTysH = weightsTys->getHandle<uint8_t>();
66796679
auto dimOffsetsH = dimOffsets->getHandle<int32_t>();
66806680
auto weightsOffsetsH = weightsOffsets->getHandle<WeightsOffsetTy>();
6681-
llvm::Optional<Handle<IndiceWeightTy>> indiceWeightsH;
6681+
std::optional<Handle<IndiceWeightTy>> indiceWeightsH;
66826682
if (indiceWeights) {
66836683
indiceWeightsH = indiceWeights->getHandle<IndiceWeightTy>();
66846684
}

lib/CodeGen/MemoryAllocator.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "llvm/ADT/SmallVector.h"
2121
#include "llvm/Support/Casting.h"
2222
#include "llvm/Support/Debug.h"
23+
#include "llvm/Support/ErrorHandling.h"
2324
#include "llvm/Support/raw_ostream.h"
2425

2526
#define DEBUG_TYPE "memory-allocator"

lib/Graph/Log.cpp

+1
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@
2020
#include "glow/Graph/Graph.h"
2121
#include "glow/Graph/Node.h"
2222
#include "glow/Graph/NodeValue.h"
23+
#include "llvm/ADT/StringExtras.h"
2324
#include "llvm/Support/CommandLine.h"
2425
#include "llvm/Support/FileSystem.h"
2526
#include "llvm/Support/FormatVariadic.h"

lib/LLVMIRCodeGen/LLVMIRGen.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -99,8 +99,8 @@ static std::mutex initTargetMutex;
9999

100100
void LLVMIRGen::initTargetOptions(llvm::TargetOptions &targetOpts,
101101
const LLVMBackendOptions &backendOpts) {
102-
if (backendOpts.getFloatABI().hasValue()) {
103-
targetOpts.FloatABIType = backendOpts.getFloatABI().getValue();
102+
if (backendOpts.getFloatABI().has_value()) {
103+
targetOpts.FloatABIType = backendOpts.getFloatABI().value();
104104
}
105105
if (!backendOpts.getABIName().empty()) {
106106
targetOpts.MCOptions.ABIName = backendOpts.getABIName();

lib/Onnxifi/Base.cpp

+3-4
Original file line numberDiff line numberDiff line change
@@ -347,15 +347,14 @@ onnxStatus Graph::adjustInputs(uint32_t inputsCount,
347347
continue;
348348
}
349349

350-
llvm::Optional<Tensor> inputTensorOpt = tensorPool_.get(inPhPtr->getType());
351-
if (!inputTensorOpt.hasValue()) {
350+
std::optional<Tensor> inputTensorOpt = tensorPool_.get(inPhPtr->getType());
351+
if (!inputTensorOpt.has_value()) {
352352
DLOG(FATAL) << "Tensorpool tensor not found for input "
353353
<< inOnnxTensor.name;
354354
return ONNXIFI_STATUS_INTERNAL_ERROR;
355355
}
356356
// We want fresh DeviceResidencyInfo for this fresh Tensor.
357-
externalIOBindings.emplace_back(inPhPtr,
358-
std::move(inputTensorOpt.getValue()));
357+
externalIOBindings.emplace_back(inPhPtr, std::move(inputTensorOpt.value()));
359358
Tensor &inputTensor = externalIOBindings.back().second;
360359
inputTensor.resetDeviceInfo();
361360

lib/Optimizer/GraphOptimizer/GraphOptimizer.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -5474,9 +5474,9 @@ struct ChannelShuffleParams {
54745474
/// as ReshapeNode->TransposeNode->ReshapeNode) for which \p node is the leading
54755475
/// ReshapeNode. \returns The original ChannelShuffle parameters if possible and
54765476
/// empty Optional otherwise.
5477-
static llvm::Optional<ChannelShuffleParams>
5477+
static std::optional<ChannelShuffleParams>
54785478
getChannelShuffleParams(const ReshapeNode &node) {
5479-
auto resM = llvm::Optional<ChannelShuffleParams>();
5479+
std::optional<ChannelShuffleParams> resM;
54805480

54815481
llvm::ArrayRef<dim_t> inputDims = node.getInput().dims();
54825482
llvm::ArrayRef<dim_t> resultDims = node.getDims();
@@ -5539,7 +5539,7 @@ bool FoldChannelShuffle::run(Function *F, const CompilationContext &cctx) {
55395539

55405540
// Compute the original parameters to ChannelShuffle.
55415541
auto paramsM = getChannelShuffleParams(*RN1);
5542-
if (!paramsM.hasValue()) {
5542+
if (!paramsM.has_value()) {
55435543
continue;
55445544
}
55455545

lib/Runtime/HostManager/HostManager.cpp

+3-3
Original file line numberDiff line numberDiff line change
@@ -901,7 +901,7 @@ Error HostManager::runNetworkBlocking(
901901
}
902902

903903
void HostManager::dispatchNextRun() {
904-
llvm::Optional<InferRequest> pRequest;
904+
std::optional<InferRequest> pRequest;
905905
std::shared_lock<std::shared_timed_mutex> networkLock(networkLock_);
906906
{
907907
// hmm this lock is hot but I still have it as a unique lock because
@@ -921,8 +921,8 @@ void HostManager::dispatchNextRun() {
921921
}
922922
}
923923

924-
assert(pRequest.hasValue());
925-
InferRequest request = std::move(pRequest.getValue());
924+
assert(pRequest.has_value());
925+
InferRequest request = std::move(pRequest.value());
926926
auto startTime = TraceEvent::now();
927927
auto requestReceived = request.startTime;
928928
executor_->run(

lib/Support/TensorPool/TensorPool.cpp

+5-3
Original file line numberDiff line numberDiff line change
@@ -16,9 +16,11 @@
1616

1717
#include "glow/Support/TensorPool.h"
1818

19+
#include <optional>
20+
1921
namespace glow {
2022

21-
llvm::Optional<Tensor> TensorPool::get(TypeRef ty) {
23+
std::optional<Tensor> TensorPool::get(TypeRef ty) {
2224
stats_.totalGets++;
2325

2426
std::unique_lock<std::mutex> l(lock_);
@@ -27,7 +29,7 @@ llvm::Optional<Tensor> TensorPool::get(TypeRef ty) {
2729

2830
if (it == pools_.end()) {
2931
if (preventInlineAllocs_) {
30-
return llvm::Optional<Tensor>();
32+
return std::nullopt;
3133
}
3234

3335
stats_.totalTypes++;
@@ -36,7 +38,7 @@ llvm::Optional<Tensor> TensorPool::get(TypeRef ty) {
3638

3739
if (it->second.empty()) {
3840
if (preventInlineAllocs_) {
39-
return llvm::Optional<Tensor>();
41+
return std::nullopt;
4042
}
4143

4244
// Don't need to alloc under the lock.

tests/unittests/TensorPoolTest.cpp

+20-20
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ TEST(TensorPool, BasicTest) {
3131
Type ty(ElemKind::FloatTy, {1, 2, 3});
3232
pool.reserve(&ty, 1);
3333

34-
Tensor T = std::move(pool.get(&ty).getValue());
34+
Tensor T = std::move(pool.get(&ty).value());
3535
EXPECT_TRUE(T.getType().isEqual(ty));
3636
EXPECT_EQ(T.dims(), ty.dims());
3737

@@ -52,12 +52,12 @@ TEST(TensorPool, ReclaimAndGet) {
5252
Type ty(ElemKind::FloatTy, {1, 2, 3});
5353
pool.reserve(&ty, 1);
5454

55-
Tensor T = std::move(pool.get(&ty).getValue());
55+
Tensor T = std::move(pool.get(&ty).value());
5656
auto *backingPtr = T.getUnsafePtr();
5757

5858
pool.reclaim(std::move(T));
5959

60-
Tensor T2 = std::move(pool.get(&ty).getValue());
60+
Tensor T2 = std::move(pool.get(&ty).value());
6161
// They are the same buffer.
6262
EXPECT_EQ(T2.getUnsafePtr(), backingPtr);
6363

@@ -78,8 +78,8 @@ TEST(TensorPool, Extends) {
7878
Type ty(ElemKind::FloatTy, {1, 2, 3});
7979
pool.reserve(&ty, 1);
8080

81-
Tensor T = std::move(pool.get(&ty).getValue());
82-
Tensor T2 = std::move(pool.get(&ty).getValue());
81+
Tensor T = std::move(pool.get(&ty).value());
82+
Tensor T2 = std::move(pool.get(&ty).value());
8383
EXPECT_TRUE(T.getType().isEqual(T2.getType()));
8484
EXPECT_TRUE(T.getType().isEqual(ty));
8585
EXPECT_TRUE(T2.getType().isEqual(ty));
@@ -105,15 +105,15 @@ TEST(TensorPool, DoesntExtend) {
105105
Type ty(ElemKind::FloatTy, {1, 2, 3});
106106
pool.reserve(&ty, 1);
107107

108-
Tensor T = std::move(pool.get(&ty).getValue());
108+
Tensor T = std::move(pool.get(&ty).value());
109109
Type Tt = T.getType();
110110

111111
auto T2opt = pool.get(&ty);
112-
EXPECT_FALSE(T2opt.hasValue());
112+
EXPECT_FALSE(T2opt.has_value());
113113

114114
pool.reclaim(std::move(T));
115115

116-
T = std::move(pool.get(&ty).getValue());
116+
T = std::move(pool.get(&ty).value());
117117
EXPECT_EQ(Tt, T.getType());
118118

119119
const auto &stats = pool.getStats();
@@ -132,8 +132,8 @@ TEST(TensorPool, Noreserve) {
132132
TensorPool pool;
133133
Type ty(ElemKind::FloatTy, {1, 2, 3});
134134

135-
Tensor T = std::move(pool.get(&ty).getValue());
136-
Tensor T2 = std::move(pool.get(&ty).getValue());
135+
Tensor T = std::move(pool.get(&ty).value());
136+
Tensor T2 = std::move(pool.get(&ty).value());
137137

138138
EXPECT_TRUE(T.getType().isEqual(T2.getType()));
139139

@@ -162,8 +162,8 @@ TEST(TensorPool, MultipleTypes) {
162162
std::vector<Tensor> tensors;
163163
// Ten total allocs.
164164
for (int i = 0; i < 5; ++i) {
165-
Tensor T = std::move(pool.get(&ty).getValue());
166-
Tensor T2 = std::move(pool.get(&ty2).getValue());
165+
Tensor T = std::move(pool.get(&ty).value());
166+
Tensor T2 = std::move(pool.get(&ty2).value());
167167
EXPECT_FALSE(T.getType().isEqual(T2.getType()));
168168
EXPECT_TRUE(T.getType().isEqual(ty));
169169
EXPECT_TRUE(T2.getType().isEqual(ty2));
@@ -200,14 +200,14 @@ TEST(TensorPool, MultipleTypesReclaim) {
200200
pool.reserve(&ty, 1);
201201
pool.reserve(&ty2, 1);
202202

203-
Tensor T = std::move(pool.get(&ty).getValue());
204-
Tensor T2 = std::move(pool.get(&ty2).getValue());
203+
Tensor T = std::move(pool.get(&ty).value());
204+
Tensor T2 = std::move(pool.get(&ty2).value());
205205

206206
pool.reclaim(std::move(T));
207207
pool.reclaim(std::move(T2));
208208

209-
T = std::move(pool.get(&ty).getValue());
210-
T2 = std::move(pool.get(&ty2).getValue());
209+
T = std::move(pool.get(&ty).value());
210+
T2 = std::move(pool.get(&ty2).value());
211211

212212
pool.reclaim(std::move(T));
213213
pool.reclaim(std::move(T2));
@@ -231,7 +231,7 @@ TEST(TensorPool, PlaceholderBindingsReclaim) {
231231
Module mod;
232232

233233
auto *PH = mod.createPlaceholder(&ty, "test", false);
234-
bindings.insert(PH, std::move(pool.get(&ty).getValue()));
234+
bindings.insert(PH, std::move(pool.get(&ty).value()));
235235

236236
/// Insert a non managed tensor.
237237
auto *PH2 = mod.createPlaceholder(&ty, "test2", false);
@@ -249,7 +249,7 @@ TEST(TensorPool, PlaceholderBindingsReclaim) {
249249
EXPECT_EQ(stats.totalGets, 1);
250250
EXPECT_EQ(stats.totalReclaims, 1);
251251

252-
bindings.insert(PH, std::move(pool.get(&ty).getValue()));
252+
bindings.insert(PH, std::move(pool.get(&ty).value()));
253253

254254
bindings.erase(PH);
255255
const auto &stats2 = pool.getStats();
@@ -263,7 +263,7 @@ TEST(TensorPool, Clear) {
263263
TensorPool pool;
264264
Type ty(ElemKind::FloatTy, {1, 2, 3});
265265

266-
Tensor T = std::move(pool.get(&ty).getValue());
266+
Tensor T = std::move(pool.get(&ty).value());
267267
pool.reclaim(std::move(T));
268268

269269
const auto &stats = pool.getStats();
@@ -277,7 +277,7 @@ TEST(TensorPool, Clear) {
277277

278278
pool.clear();
279279

280-
T = std::move(pool.get(&ty).getValue());
280+
T = std::move(pool.get(&ty).value());
281281
pool.reclaim(std::move(T));
282282

283283
const auto &stats2 = pool.getStats();

torch_glow/src/CachingGraphRunner.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -913,13 +913,13 @@ CachingGraphRunner::convertPyTorchInputToGlowInput(
913913
// For backends that does not support partial tensor, last-element padding
914914
// based on size
915915
auto inputTensorOpt = tensorPool_.get(ty);
916-
if (!inputTensorOpt.hasValue()) {
916+
if (!inputTensorOpt) {
917917
std::stringstream ss;
918918
ss << "Tensorpool tensor not found for input " << ptTensor.name();
919919
return MAKE_ERR(ss.str());
920920
}
921921
// We want fresh DeviceResidencyInfo for this fresh Tensor.
922-
glow::Tensor inputTensor(std::move(inputTensorOpt.getValue()));
922+
glow::Tensor inputTensor(std::move(inputTensorOpt.value()));
923923
inputTensor.resetDeviceInfo();
924924
if (ptTensor.data_ptr()) {
925925
auto *inTensorPtr = inputTensor.getUnsafePtr();

0 commit comments

Comments
 (0)