Skip to content

Commit ad8aef0

Browse files
malfetpytorchmergebot
authored andcommitted
[BE] [3/N] Use nested namespaces (pytorch#110314)
Mostly in torch/csrc/jit/runtime and in `ATen/cuda/` Pull Request resolved: pytorch#110314 Approved by: https://github.com/seemethere
1 parent 8745d2d commit ad8aef0

File tree

151 files changed

+317
-652
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

151 files changed

+317
-652
lines changed

aten/src/ATen/CachedTensorUtils.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33

44
#include <c10/util/flat_hash_map.h>
55

6-
namespace at {
7-
namespace caching {
6+
namespace at::caching {
87

98

109
using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>;
@@ -45,5 +44,4 @@ size_t adjusted_use_count(const at::Tensor& t) {
4544
return t.use_count() - (is_cached_tensor(t) ? 1 : 0);
4645
}
4746

48-
}
49-
}
47+
} // namespace at::caching

aten/src/ATen/CachedTensorUtils.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <ATen/ATen.h>
44

5-
namespace at {
6-
namespace caching {
5+
namespace at::caching {
76

87
// Some systems (just cudagraphs currently) will persist a static tensor output
98
// whose TensorImpl does not change across iterations. For these tensors caching
@@ -22,5 +21,4 @@ TORCH_API void set_cached_tensors_enabled(bool enable);
2221
// count of tensors with hooks.
2322
TORCH_API size_t adjusted_use_count(const at::Tensor& t);
2423

25-
} // namespace caching
26-
} // namespace at
24+
} // namespace at::caching

aten/src/ATen/EmptyTensor.cpp

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66

77
#include <limits>
88

9-
namespace at {
10-
namespace detail {
9+
namespace at::detail {
1110
namespace {
1211
c10::Allocator* GetCPUAllocatorMaybePinned(bool pin_memory) {
1312
if (pin_memory) {
@@ -441,4 +440,4 @@ TensorBase empty_strided_symint_meta(
441440
options.pinned_memory_opt());
442441
}
443442

444-
}} // namespace at::detail
443+
} // namespace at::detail

aten/src/ATen/EmptyTensor.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
#pragma once
22
#include <ATen/core/TensorBase.h>
33

4-
namespace at {
5-
namespace detail {
4+
namespace at::detail {
65

76
inline void check_size_nonnegative(ArrayRef<int64_t> size) {
87
for (const auto& x : size) {
@@ -158,5 +157,4 @@ TORCH_API TensorBase empty_strided_symint_meta(
158157
SymIntArrayRef stride,
159158
const TensorOptions& options);
160159

161-
} // namespace detail
162-
} // namespace at
160+
} // namespace at::detail

aten/src/ATen/ExpandUtils.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace internal {
99
TensorBase expand_slow_path(const TensorBase &self, IntArrayRef size) {
1010
return OptionalTensorRef(self)->expand(size);
1111
}
12-
}
12+
} // namespace internal
1313

1414
namespace {
1515
// NOTE: are_expandable did a similar check, please keep them sync if change is needed

aten/src/ATen/FuncTorchTLS.cpp

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#include <ATen/FuncTorchTLS.h>
22

3-
namespace at { namespace functorch {
3+
namespace at::functorch {
44

55
namespace {
66

@@ -28,4 +28,4 @@ std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor() {
2828
}
2929

3030

31-
}}
31+
} // namespace at::functorch

aten/src/ATen/FuncTorchTLS.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
#include <c10/macros/Macros.h>
44
#include <memory>
55

6-
namespace at {
7-
namespace functorch {
6+
namespace at::functorch {
87

98
// NOTE [functorch TLS in pytorch/pytorch]
109
//
@@ -44,5 +43,4 @@ TORCH_API void setFuncTorchTLS(
4443
// get a mutable reference to the functorch tls
4544
TORCH_API std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor();
4645

47-
} // namespace functorch
48-
} // namespace at
46+
} // namespace at::functorch

aten/src/ATen/FunctionalInverses.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
#include <ATen/WrapDimUtilsMulti.h>
77

88
#include <utility>
9-
namespace at {
10-
namespace functionalization {
9+
namespace at::functionalization {
1110

1211
// This logic is similar to autograd code for view backwards calls.
1312
// We can't easily share it though, because (eventually) these functions
@@ -348,5 +347,4 @@ Tensor FunctionalInverses::alias_copy_inverse(const Tensor& base, const Tensor&
348347
}
349348
}
350349

351-
} // functionalization
352-
} // at
350+
} // namespace at::functionalization

aten/src/ATen/FunctionalStorageImpl.cpp

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
#include <c10/util/Exception.h>
77
#include <vector>
88

9-
namespace at {
10-
namespace functionalization {
9+
namespace at::functionalization {
1110

1211
ViewMeta ViewMeta::to_out_idx(int64_t out_idx) {
1312
if (out_idx == this->out_index) return *this;
@@ -122,5 +121,4 @@ bool FunctionalStorageImpl::apply_updates() {
122121
return any_updates;
123122
}
124123

125-
} // namespace functionalization
126-
} // namespace at
124+
} // namespace at::functionalization

aten/src/ATen/FunctionalStorageImpl.h

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <ATen/Tensor.h>
44

5-
namespace at {
6-
namespace functionalization {
5+
namespace at::functionalization {
76

87
// See Note [Functionalization Pass In Core]
98

@@ -117,5 +116,4 @@ struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl {
117116
bool frozen_ = false;
118117
};
119118

120-
} // namespace functionalization
121-
} // namespace at
119+
} // namespace at::functionalization

0 commit comments

Comments
 (0)