Skip to content

Commit ad8aef0

Browse files
malfetpytorchmergebot
authored andcommitted
[BE] [3/N] Use nested namespaces (pytorch#110314)
Mostly in torch/csrc/jit/runtime and in `ATen/cuda/` Pull Request resolved: pytorch#110314 Approved by: https://github.com/seemethere
1 parent 8745d2d commit ad8aef0

File tree

151 files changed

+317
-652
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

151 files changed

+317
-652
lines changed

aten/src/ATen/CachedTensorUtils.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33

44
#include <c10/util/flat_hash_map.h>
55

6-
namespace at {
7-
namespace caching {
6+
namespace at::caching {
87

98

109
using weakref_type = c10::weak_intrusive_ptr<TensorImpl, UndefinedTensorImpl>;
@@ -45,5 +44,4 @@ size_t adjusted_use_count(const at::Tensor& t) {
4544
return t.use_count() - (is_cached_tensor(t) ? 1 : 0);
4645
}
4746

48-
}
49-
}
47+
} // namespace at::caching

aten/src/ATen/CachedTensorUtils.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <ATen/ATen.h>
44

5-
namespace at {
6-
namespace caching {
5+
namespace at::caching {
76

87
// Some systems (just cudagraphs currently) will persist a static tensor output
98
// whose TensorImpl does not change across iterations. For these tensors caching
@@ -22,5 +21,4 @@ TORCH_API void set_cached_tensors_enabled(bool enable);
2221
// count of tensors with hooks.
2322
TORCH_API size_t adjusted_use_count(const at::Tensor& t);
2423

25-
} // namespace caching
26-
} // namespace at
24+
} // namespace at::caching

aten/src/ATen/EmptyTensor.cpp

+2-3
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66

77
#include <limits>
88

9-
namespace at {
10-
namespace detail {
9+
namespace at::detail {
1110
namespace {
1211
c10::Allocator* GetCPUAllocatorMaybePinned(bool pin_memory) {
1312
if (pin_memory) {
@@ -441,4 +440,4 @@ TensorBase empty_strided_symint_meta(
441440
options.pinned_memory_opt());
442441
}
443442

444-
}} // namespace at::detail
443+
} // namespace at::detail

aten/src/ATen/EmptyTensor.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
#pragma once
22
#include <ATen/core/TensorBase.h>
33

4-
namespace at {
5-
namespace detail {
4+
namespace at::detail {
65

76
inline void check_size_nonnegative(ArrayRef<int64_t> size) {
87
for (const auto& x : size) {
@@ -158,5 +157,4 @@ TORCH_API TensorBase empty_strided_symint_meta(
158157
SymIntArrayRef stride,
159158
const TensorOptions& options);
160159

161-
} // namespace detail
162-
} // namespace at
160+
} // namespace at::detail

aten/src/ATen/ExpandUtils.cpp

+1-1
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@ namespace internal {
99
TensorBase expand_slow_path(const TensorBase &self, IntArrayRef size) {
1010
return OptionalTensorRef(self)->expand(size);
1111
}
12-
}
12+
} // namespace internal
1313

1414
namespace {
1515
// NOTE: are_expandable did a similar check, please keep them sync if change is needed

aten/src/ATen/FuncTorchTLS.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
#include <ATen/FuncTorchTLS.h>
22

3-
namespace at { namespace functorch {
3+
namespace at::functorch {
44

55
namespace {
66

@@ -28,4 +28,4 @@ std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor() {
2828
}
2929

3030

31-
}}
31+
} // namespace at::functorch

aten/src/ATen/FuncTorchTLS.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
#include <c10/macros/Macros.h>
44
#include <memory>
55

6-
namespace at {
7-
namespace functorch {
6+
namespace at::functorch {
87

98
// NOTE [functorch TLS in pytorch/pytorch]
109
//
@@ -44,5 +43,4 @@ TORCH_API void setFuncTorchTLS(
4443
// get a mutable reference to the functorch tls
4544
TORCH_API std::unique_ptr<FuncTorchTLSBase>& functorchTLSAccessor();
4645

47-
} // namespace functorch
48-
} // namespace at
46+
} // namespace at::functorch

aten/src/ATen/FunctionalInverses.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
#include <ATen/WrapDimUtilsMulti.h>
77

88
#include <utility>
9-
namespace at {
10-
namespace functionalization {
9+
namespace at::functionalization {
1110

1211
// This logic is similar to autograd code for view backwards calls.
1312
// We can't easily share it though, because (eventually) these functions
@@ -348,5 +347,4 @@ Tensor FunctionalInverses::alias_copy_inverse(const Tensor& base, const Tensor&
348347
}
349348
}
350349

351-
} // functionalization
352-
} // at
350+
} // namespace at::functionalization

aten/src/ATen/FunctionalStorageImpl.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -6,8 +6,7 @@
66
#include <c10/util/Exception.h>
77
#include <vector>
88

9-
namespace at {
10-
namespace functionalization {
9+
namespace at::functionalization {
1110

1211
ViewMeta ViewMeta::to_out_idx(int64_t out_idx) {
1312
if (out_idx == this->out_index) return *this;
@@ -122,5 +121,4 @@ bool FunctionalStorageImpl::apply_updates() {
122121
return any_updates;
123122
}
124123

125-
} // namespace functionalization
126-
} // namespace at
124+
} // namespace at::functionalization

aten/src/ATen/FunctionalStorageImpl.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <ATen/Tensor.h>
44

5-
namespace at {
6-
namespace functionalization {
5+
namespace at::functionalization {
76

87
// See Note [Functionalization Pass In Core]
98

@@ -117,5 +116,4 @@ struct TORCH_API FunctionalStorageImpl : public c10::StorageImpl {
117116
bool frozen_ = false;
118117
};
119118

120-
} // namespace functionalization
121-
} // namespace at
119+
} // namespace at::functionalization

aten/src/ATen/LegacyVmapMode.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#include <ATen/LegacyVmapMode.h>
22

3-
namespace at {
4-
namespace impl {
3+
namespace at::impl {
54

65
thread_local int64_t VmapMode_current_vmap_level = 0;
76

@@ -24,5 +23,4 @@ int64_t VmapMode::decrement_nesting() {
2423
}
2524
return VmapMode_current_vmap_level;
2625
}
27-
} // namespace impl
28-
} // namespace at
26+
} // namespace at::impl

aten/src/ATen/LegacyVmapMode.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <c10/core/impl/LocalDispatchKeySet.h>
44

5-
namespace at {
6-
namespace impl {
5+
namespace at::impl {
76

87
// VmapMode contains a thread local count of how many nested vmaps
98
// we are currently inside. That number is known as the `vmap level`.
@@ -24,5 +23,4 @@ struct TORCH_API VmapMode {
2423
static int64_t decrement_nesting();
2524
};
2625

27-
} // namespace impl
28-
} // namespace at
26+
} // namespace at::impl

aten/src/ATen/NestedTensorImpl.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -10,8 +10,7 @@
1010
#include <c10/util/Metaprogramming.h>
1111
#include <c10/util/irange.h>
1212

13-
namespace at {
14-
namespace native {
13+
namespace at::native {
1514
struct NestedTensorImpl;
1615
inline bool nested_tensor_impl_is_contiguous(const NestedTensorImpl* nt);
1716
int64_t get_numel_from_nested_size_tensor(const at::Tensor& tensor);
@@ -276,5 +275,4 @@ inline const at::Tensor& get_nested_sizes(const at::Tensor& tensor) {
276275
return get_nested_tensor_impl(tensor)->get_nested_sizes();
277276
}
278277

279-
} // namespace native
280-
} // namespace at
278+
} // namespace at::native

aten/src/ATen/ParallelNative.h

+2-5
Original file line numberDiff line numberDiff line change
@@ -8,15 +8,12 @@
88

99
#define INTRA_OP_PARALLEL
1010

11-
namespace at {
12-
namespace internal {
11+
namespace at::internal {
1312

1413
TORCH_API void invoke_parallel(
1514
const int64_t begin,
1615
const int64_t end,
1716
const int64_t grain_size,
1817
const std::function<void(int64_t, int64_t)>& f);
1918

20-
} // namespace internal
21-
22-
} // namespace at
19+
} // namespace at::internal

aten/src/ATen/ParallelNativeTBB.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -15,8 +15,7 @@
1515

1616
#define INTRA_OP_PARALLEL
1717

18-
namespace at {
19-
namespace internal {
18+
namespace at::internal {
2019

2120
template <typename F>
2221
inline void invoke_parallel(
@@ -50,5 +49,4 @@ inline void invoke_parallel(
5049
}
5150
}
5251

53-
} // namespace internal
54-
} // namespace at
52+
} // namespace at::internal

aten/src/ATen/PythonTorchFunctionTLS.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,7 @@
11
#include <ATen/PythonTorchFunctionTLS.h>
22
#include <c10/core/TensorImpl.h>
33

4-
namespace at {
5-
namespace impl {
4+
namespace at::impl {
65

76
static thread_local PythonTorchFunctionTLS pythonTorchFunctionState;
87

@@ -47,5 +46,4 @@ bool torch_function_mode_enabled() {
4746
PythonTorchFunctionTLS::stack_len() > 0;
4847
}
4948

50-
} // namespace impl
51-
} // namespace at
49+
} // namespace at::impl

aten/src/ATen/PythonTorchFunctionTLS.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -3,8 +3,7 @@
33
#include <c10/core/SafePyObject.h>
44
#include <c10/macros/Macros.h>
55

6-
namespace at {
7-
namespace impl {
6+
namespace at::impl {
87

98
enum TorchFunctionDisabledState { ENABLED, SUBCLASSES_DISABLED, ALL_DISABLED };
109

@@ -32,5 +31,4 @@ struct TORCH_API PythonTorchFunctionTLS {
3231

3332
TORCH_API bool torch_function_mode_enabled();
3433

35-
} // namespace impl
36-
} // namespace at
34+
} // namespace at::impl

aten/src/ATen/ScalarOps.h

+4-8
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,7 @@
99
#include <ATen/ops/scalar_tensor.h>
1010
#endif
1111

12-
namespace at {
13-
namespace detail {
12+
namespace at::detail {
1413
// When filling a number to 1-element CPU tensor, we want to skip
1514
// everything but manipulate data ptr directly.
1615
// Ideally this fast pass should be implemented in TensorIterator,
@@ -21,8 +20,7 @@ TORCH_API Tensor scalar_tensor_static(
2120
const Scalar& s,
2221
c10::optional<ScalarType> dtype_opt,
2322
c10::optional<Device> device_opt);
24-
} // namespace detail
25-
} // namespace at
23+
} // namespace at::detail
2624

2725
// This is in the c10 namespace because we use ADL to find the functions in it.
2826
namespace c10 {
@@ -60,8 +58,7 @@ inline at::Tensor scalar_to_tensor(
6058

6159
} // namespace c10
6260

63-
namespace at {
64-
namespace native {
61+
namespace at::native {
6562

6663
inline Tensor wrapped_scalar_tensor(
6764
const Scalar& scalar,
@@ -71,5 +68,4 @@ inline Tensor wrapped_scalar_tensor(
7168
return tensor;
7269
}
7370

74-
} // namespace native
75-
} // namespace at
71+
} // namespace at::native

aten/src/ATen/SequenceNumber.cpp

+2-4
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,6 @@
11
#include <ATen/SequenceNumber.h>
22

3-
namespace at {
4-
namespace sequence_number {
3+
namespace at::sequence_number {
54

65
namespace {
76
thread_local uint64_t sequence_nr_ = 0;
@@ -15,5 +14,4 @@ uint64_t get_and_increment() {
1514
return sequence_nr_++;
1615
}
1716

18-
} // namespace sequence_number
19-
} // namespace at
17+
} // namespace at::sequence_number

aten/src/ATen/SequenceNumber.h

+2-5
Original file line numberDiff line numberDiff line change
@@ -3,14 +3,11 @@
33
#include <c10/macros/Export.h>
44
#include <cstdint>
55

6-
namespace at {
7-
86
// A simple thread local enumeration, used to link forward and backward pass
97
// ops and is used by autograd and observers framework
10-
namespace sequence_number {
8+
namespace at::sequence_number {
119

1210
TORCH_API uint64_t peek();
1311
TORCH_API uint64_t get_and_increment();
1412

15-
} // namespace sequence_number
16-
} // namespace at
13+
} // namespace at::sequence_number

aten/src/ATen/TensorIndexing.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -22,8 +22,7 @@
2222

2323
#include <utility>
2424

25-
namespace at {
26-
namespace indexing {
25+
namespace at::indexing {
2726

2827
const int64_t INDEX_MIN = c10::SymInt::min_representable_int();
2928
const int64_t INDEX_MAX = -(INDEX_MIN + 1);
@@ -728,5 +727,4 @@ static inline void set_item(
728727
return;
729728
}
730729

731-
} // namespace indexing
732-
} // namespace at
730+
} // namespace at::indexing

aten/src/ATen/TensorNames.cpp

+2-2
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
#include <ATen/WrapDimUtils.h>
33
#include <c10/util/irange.h>
44

5-
namespace at { namespace namedinference {
5+
namespace at::namedinference {
66

77

88
Dimname TensorName::toDimname() const {
@@ -126,4 +126,4 @@ std::vector<Dimname> TensorNames::toDimnameVec() const {
126126
}
127127

128128

129-
}} // namespace at::namedinference
129+
} // namespace at::namedinference

aten/src/ATen/TensorNames.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -2,8 +2,7 @@
22

33
#include <ATen/WrapDimUtils.h>
44

5-
namespace at {
6-
namespace namedinference {
5+
namespace at::namedinference {
76

87
// TensorName and TensorNames are wrappers around Dimname and DimnameList
98
// that contain helper functions to make writing name inference rules easier.
@@ -71,5 +70,4 @@ struct TORCH_API TensorNames {
7170
TensorNameVec names_;
7271
};
7372

74-
} // namespace namedinference
75-
} // namespace at
73+
} // namespace at::namedinference

0 commit comments

Comments
 (0)