Skip to content

Commit ab0c51b

Browse files
authored
Added toeplitz operator (#683)
* Added toeplitz operator * Renamed `scalar_type` to `value_type`
1 parent c2d9e0c commit ab0c51b

File tree

148 files changed

+938
-583
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

148 files changed

+938
-583
lines changed

docs_input/api/creation/tensors/make.rst

+5-5
Original file line numberDiff line numberDiff line change
@@ -18,18 +18,18 @@ Return by Value
1818
.. doxygenfunction:: make_tensor( TensorType &tensor, ShapeType &&shape, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
1919
.. doxygenfunction:: make_tensor( TensorType &tensor, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
2020
.. doxygenfunction:: make_tensor( T *data, const index_t (&shape)[RANK], bool owning = false)
21-
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *data, const index_t (&shape)[TensorType::Rank()], bool owning = false)
21+
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *data, const index_t (&shape)[TensorType::Rank()], bool owning = false)
2222
.. doxygenfunction:: make_tensor( T *data, ShapeType &&shape, bool owning = false)
23-
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *data, typename TensorType::shape_container &&shape, bool owning = false)
24-
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *ptr, bool owning = false)
23+
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *data, typename TensorType::shape_container &&shape, bool owning = false)
24+
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *ptr, bool owning = false)
2525
.. doxygenfunction:: make_tensor( Storage &&s, ShapeType &&shape)
2626
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::storage_type &&s, typename TensorType::shape_container &&shape)
2727
.. doxygenfunction:: make_tensor( T* const data, D &&desc, bool owning = false)
28-
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type* const data, typename TensorType::desc_type &&desc, bool owning = false)
28+
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type* const data, typename TensorType::desc_type &&desc, bool owning = false)
2929
.. doxygenfunction:: make_tensor( D &&desc, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
3030
.. doxygenfunction:: make_tensor( TensorType &&tensor, typename TensorType::desc_type &&desc, matxMemorySpace_t space = MATX_MANAGED_MEMORY, cudaStream_t stream = 0)
3131
.. doxygenfunction:: make_tensor( T *const data, const index_t (&shape)[RANK], const index_t (&strides)[RANK], bool owning = false)
32-
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::scalar_type *const data, const index_t (&shape)[TensorType::Rank()], const index_t (&strides)[TensorType::Rank()], bool owning = false)
32+
.. doxygenfunction:: make_tensor( TensorType &tensor, typename TensorType::value_type *const data, const index_t (&shape)[TensorType::Rank()], const index_t (&strides)[TensorType::Rank()], bool owning = false)
3333

3434
Return by Pointer
3535
~~~~~~~~~~~~~~~~~
+33
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,33 @@
1+
.. _toeplitz_func:
2+
3+
toeplitz
4+
========
5+
6+
Generate a toeplitz matrix
7+
8+
`c` represents the first column of the matrix while `r` represents the first row. `c` and `r` must
9+
have the same first value; if they don't match, the first value from `c` will be used.
10+
11+
Passing a single array/operator as input is equivalent to passing the conjugate of the same
12+
input as the second parameter.
13+
14+
.. doxygenfunction:: toeplitz(const T (&c)[D])
15+
.. doxygenfunction:: toeplitz(const Op &c)
16+
.. doxygenfunction:: toeplitz(const T (&c)[D1], const T (&r)[D2])
17+
.. doxygenfunction:: toeplitz(const COp &cop, const ROp &rop)
18+
19+
Examples
20+
~~~~~~~~
21+
22+
.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
23+
:language: cpp
24+
:start-after: example-begin toeplitz-test-1
25+
:end-before: example-end toeplitz-test-1
26+
:dedent:
27+
28+
.. literalinclude:: ../../../../test/00_operators/OperatorTests.cu
29+
:language: cpp
30+
:start-after: example-begin toeplitz-test-2
31+
:end-before: example-end toeplitz-test-2
32+
:dedent:
33+

docs_input/api/signalimage/general/chirp.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@ chirp
66
Creates a real chirp signal (swept-frequency cosine)
77

88
.. doxygenfunction:: chirp(index_t num, TimeType last, FreqType f0, TimeType t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
9-
.. doxygenfunction:: chirp(SpaceOp t, FreqType f0, typename SpaceOp::scalar_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
9+
.. doxygenfunction:: chirp(SpaceOp t, FreqType f0, typename SpaceOp::value_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
1010

1111
Examples
1212
~~~~~~~~
@@ -24,7 +24,7 @@ cchirp
2424
Creates a complex chirp signal (swept-frequency cosine)
2525

2626
.. doxygenfunction:: cchirp(index_t num, TimeType last, FreqType f0, TimeType t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
27-
.. doxygenfunction:: cchirp(SpaceOp t, FreqType f0, typename SpaceOp::scalar_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
27+
.. doxygenfunction:: cchirp(SpaceOp t, FreqType f0, typename SpaceOp::value_type t1, FreqType f1, ChirpMethod method = ChirpMethod::CHIRP_METHOD_LINEAR)
2828

2929
Examples
3030
~~~~~~~~

docs_input/api/stats/hist/hist.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@ hist
55

66
Compute a histogram of input `a` with bounds specified by `upper` and `lower`
77

8-
.. doxygenfunction:: hist(const InputOperator &a, const typename InputOperator::scalar_type lower, const typename InputOperator::scalar_type upper)
8+
.. doxygenfunction:: hist(const InputOperator &a, const typename InputOperator::value_type lower, const typename InputOperator::value_type upper)
99

1010
Examples
1111
~~~~~~~~

include/matx/core/file_io.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -158,7 +158,7 @@ void read_csv(TensorType &t, const std::string fname,
158158
auto np = pybind11::module_::import("numpy");
159159
auto obj = np.attr("genfromtxt")("fname"_a = fname.c_str(), "delimiter"_a = delimiter,
160160
"skip_header"_a = skip_header,
161-
"dtype"_a = detail::MatXPybind::GetNumpyDtype<typename TensorType::scalar_type>());
161+
"dtype"_a = detail::MatXPybind::GetNumpyDtype<typename TensorType::value_type>());
162162
pb->NumpyToTensorView(t, obj);
163163
}
164164

include/matx/core/iterator.h

+2-4
Original file line numberDiff line numberDiff line change
@@ -47,8 +47,7 @@ namespace matx {
4747
template <typename OperatorType, bool ConvertType = true>
4848
struct RandomOperatorIterator {
4949
using self_type = RandomOperatorIterator<OperatorType, ConvertType>;
50-
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::scalar_type>, typename OperatorType::scalar_type>;
51-
using scalar_type = value_type;
50+
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
5251
// using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
5352
// index_t>;
5453
using stride_type = index_t;
@@ -174,8 +173,7 @@ __MATX_INLINE__ __MATX_HOST__ __MATX_DEVICE__ index_t operator-(const RandomOper
174173
template <typename OperatorType, bool ConvertType = true>
175174
struct RandomOperatorOutputIterator {
176175
using self_type = RandomOperatorOutputIterator<OperatorType, ConvertType>;
177-
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::scalar_type>, typename OperatorType::scalar_type>;
178-
using scalar_type = value_type;
176+
using value_type = typename std::conditional_t<ConvertType, detail::convert_matx_type_t<typename OperatorType::value_type>, typename OperatorType::value_type>;
179177
// using stride_type = std::conditional_t<is_tensor_view_v<OperatorType>, typename OperatorType::desc_type::stride_type,
180178
// index_t>;
181179
using stride_type = index_t;

include/matx/core/make_tensor.h

+15-15
Original file line numberDiff line numberDiff line change
@@ -78,7 +78,7 @@ void make_tensor( TensorType &tensor,
7878
cudaStream_t stream = 0) {
7979
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
8080

81-
auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(shape, space, stream);
81+
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(shape, space, stream);
8282
tensor.Shallow(tmp);
8383
}
8484

@@ -166,7 +166,7 @@ auto make_tensor( TensorType &tensor,
166166
cudaStream_t stream = 0) {
167167
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
168168

169-
auto tmp = make_tensor<typename TensorType::scalar_type, ShapeType>(std::forward<ShapeType>(shape), space, stream);
169+
auto tmp = make_tensor<typename TensorType::value_type, ShapeType>(std::forward<ShapeType>(shape), space, stream);
170170
tensor.Shallow(tmp);
171171
}
172172

@@ -225,7 +225,7 @@ auto make_tensor_p( TensorType &tensor,
225225
cudaStream_t stream = 0) {
226226
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
227227

228-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::shape_container>(std::forward<typename TensorType::shape_container>(shape), space, stream);
228+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::shape_container>(std::forward<typename TensorType::shape_container>(shape), space, stream);
229229
tensor.Shallow(tmp);
230230
}
231231

@@ -261,7 +261,7 @@ template <typename TensorType,
261261
auto make_tensor( TensorType &tensor,
262262
matxMemorySpace_t space = MATX_MANAGED_MEMORY,
263263
cudaStream_t stream = 0) {
264-
auto tmp = make_tensor<typename TensorType::scalar_type>({}, space, stream);
264+
auto tmp = make_tensor<typename TensorType::value_type>({}, space, stream);
265265
tensor.Shallow(tmp);
266266
}
267267

@@ -322,12 +322,12 @@ auto make_tensor( T *data,
322322
template <typename TensorType,
323323
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
324324
auto make_tensor( TensorType &tensor,
325-
typename TensorType::scalar_type *data,
325+
typename TensorType::value_type *data,
326326
const index_t (&shape)[TensorType::Rank()],
327327
bool owning = false) {
328328
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
329329

330-
auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(data, shape, owning);
330+
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(data, shape, owning);
331331
tensor.Shallow(tmp);
332332
}
333333

@@ -373,12 +373,12 @@ auto make_tensor( T *data,
373373
template <typename TensorType,
374374
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
375375
auto make_tensor( TensorType &tensor,
376-
typename TensorType::scalar_type *data,
376+
typename TensorType::value_type *data,
377377
typename TensorType::shape_container &&shape,
378378
bool owning = false) {
379379
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
380380

381-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::shape_container>(data, std::forward<typename TensorType::shape_container>(shape), owning);
381+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::shape_container>(data, std::forward<typename TensorType::shape_container>(shape), owning);
382382
tensor.Shallow(tmp);
383383
}
384384

@@ -414,9 +414,9 @@ auto make_tensor( T *ptr,
414414
template <typename TensorType,
415415
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
416416
auto make_tensor( TensorType &tensor,
417-
typename TensorType::scalar_type *ptr,
417+
typename TensorType::value_type *ptr,
418418
bool owning = false) {
419-
auto tmp = make_tensor<typename TensorType::scalar_type>(ptr, owning);
419+
auto tmp = make_tensor<typename TensorType::value_type>(ptr, owning);
420420
tensor.Shallow(tmp);
421421
}
422422

@@ -534,12 +534,12 @@ auto make_tensor( T* const data,
534534
template <typename TensorType,
535535
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
536536
auto make_tensor( TensorType &tensor,
537-
typename TensorType::scalar_type* const data,
537+
typename TensorType::value_type* const data,
538538
typename TensorType::desc_type &&desc,
539539
bool owning = false) {
540540
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
541541

542-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::desc_type>(data, std::forward<typename TensorType::desc_type>(desc), owning);
542+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::desc_type>(data, std::forward<typename TensorType::desc_type>(desc), owning);
543543
tensor.Shallow(tmp);
544544
}
545545

@@ -585,7 +585,7 @@ auto make_tensor( TensorType &&tensor,
585585
cudaStream_t stream = 0) {
586586
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
587587

588-
auto tmp = make_tensor<typename TensorType::scalar_type, typename TensorType::desc_type>(std::forward<typename TensorType::desc_type>(desc), space, stream);
588+
auto tmp = make_tensor<typename TensorType::value_type, typename TensorType::desc_type>(std::forward<typename TensorType::desc_type>(desc), space, stream);
589589
tensor.Shallow(tmp);
590590
}
591591

@@ -633,13 +633,13 @@ auto make_tensor( T *const data,
633633
template <typename TensorType,
634634
std::enable_if_t<is_tensor_view_v<TensorType>, bool> = true>
635635
auto make_tensor( TensorType &tensor,
636-
typename TensorType::scalar_type *const data,
636+
typename TensorType::value_type *const data,
637637
const index_t (&shape)[TensorType::Rank()],
638638
const index_t (&strides)[TensorType::Rank()],
639639
bool owning = false) {
640640
MATX_NVTX_START("", matx::MATX_NVTX_LOG_API)
641641

642-
auto tmp = make_tensor<typename TensorType::scalar_type, TensorType::Rank()>(data, shape, strides, owning);
642+
auto tmp = make_tensor<typename TensorType::value_type, TensorType::Rank()>(data, shape, strides, owning);
643643
tensor.Shallow(tmp);
644644
}
645645

include/matx/core/operator_utils.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -44,13 +44,13 @@ namespace matx {
4444
if (out.IsContiguous()) {
4545
if constexpr(ConvertType) {
4646
return func( in,
47-
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<OutputOp>::scalar_type> *>(out.Data()),
47+
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<OutputOp>::value_type> *>(out.Data()),
4848
bi,
4949
ei);
5050
}
5151
else {
5252
return func( in,
53-
reinterpret_cast<typename remove_cvref_t<OutputOp>::scalar_type *>(out.Data()),
53+
reinterpret_cast<typename remove_cvref_t<OutputOp>::value_type *>(out.Data()),
5454
bi,
5555
ei);
5656
}
@@ -70,14 +70,14 @@ namespace matx {
7070
if constexpr (ConvertType) {
7171
return ReduceOutput<ConvertType>( std::forward<Func>(func),
7272
std::forward<OutputOp>(out),
73-
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<InputOp>::scalar_type> *>(in_base.Data()),
73+
reinterpret_cast<detail::convert_matx_type_t<typename remove_cvref_t<InputOp>::value_type> *>(in_base.Data()),
7474
BeginOffset{in_base},
7575
EndOffset{in_base});
7676
}
7777
else {
7878
return ReduceOutput<ConvertType>( std::forward<Func>(func),
7979
std::forward<OutputOp>(out),
80-
reinterpret_cast<typename remove_cvref_t<InputOp>::scalar_type *>(in_base.Data()),
80+
reinterpret_cast<typename remove_cvref_t<InputOp>::value_type *>(in_base.Data()),
8181
BeginOffset{in_base},
8282
EndOffset{in_base});
8383
}
@@ -118,9 +118,9 @@ namespace matx {
118118
namespace detail {
119119
// Used inside of transforms to allocate temporary output
120120
template <typename TensorType, typename Executor, typename ShapeType>
121-
__MATX_HOST__ __MATX_INLINE__ void AllocateTempTensor(TensorType &tensor, Executor &&ex, ShapeType &&shape, typename TensorType::scalar_type **ptr) {
121+
__MATX_HOST__ __MATX_INLINE__ void AllocateTempTensor(TensorType &tensor, Executor &&ex, ShapeType &&shape, typename TensorType::value_type **ptr) {
122122
const auto ttl_size = std::accumulate(shape.begin(), shape.end(), static_cast<index_t>(1),
123-
std::multiplies<index_t>()) * sizeof(typename TensorType::scalar_type);
123+
std::multiplies<index_t>()) * sizeof(typename TensorType::value_type);
124124
if constexpr (is_cuda_executor_v<Executor>) {
125125
matxAlloc((void**)ptr, ttl_size, MATX_ASYNC_DEVICE_MEMORY, ex.getStream());
126126
make_tensor(tensor, *ptr, shape);

include/matx/core/pybind.h

+6-6
Original file line numberDiff line numberDiff line change
@@ -176,7 +176,7 @@ class MatXPybind {
176176
template <typename TensorType>
177177
static pybind11::object GetEmptyNumpy(const TensorType &ten)
178178
{
179-
using T = typename TensorType::scalar_type;
179+
using T = typename TensorType::value_type;
180180
auto np = pybind11::module_::import("numpy");
181181
pybind11::list dims;
182182

@@ -329,7 +329,7 @@ class MatXPybind {
329329
void NumpyToTensorView(TensorType ten,
330330
const pybind11::object &np_ten)
331331
{
332-
using T = typename TensorType::scalar_type;
332+
using T = typename TensorType::value_type;
333333
constexpr int RANK = TensorType::Rank();
334334
static_assert(RANK <=5, "NumpyToTensorView only supports max(RANK) = 5 at the moment.");
335335

@@ -377,7 +377,7 @@ class MatXPybind {
377377
template <typename TensorType>
378378
auto NumpyToTensorView(const pybind11::object &np_ten)
379379
{
380-
using T = typename TensorType::scalar_type;
380+
using T = typename TensorType::value_type;
381381
constexpr int RANK = TensorType::Rank();
382382
using ntype = matx_convert_complex_type<T>;
383383
auto ften = pybind11::array_t<ntype, pybind11::array::c_style | pybind11::array::forcecast>(np_ten);
@@ -398,7 +398,7 @@ class MatXPybind {
398398

399399
template <typename TensorType>
400400
auto TensorViewToNumpy(const TensorType &ten) {
401-
using tensor_type = typename TensorType::scalar_type;
401+
using tensor_type = typename TensorType::value_type;
402402
using ntype = matx_convert_complex_type<tensor_type>;
403403
constexpr int RANK = TensorType::Rank();
404404

@@ -466,12 +466,12 @@ class MatXPybind {
466466

467467

468468
template <typename TensorType,
469-
typename CT = matx_convert_cuda_complex_type<typename TensorType::scalar_type>>
469+
typename CT = matx_convert_cuda_complex_type<typename TensorType::value_type>>
470470
std::optional<TestFailResult<CT>>
471471
CompareOutput(const TensorType &ten,
472472
const std::string fname, double thresh, bool debug = false)
473473
{
474-
using raw_type = typename TensorType::scalar_type;
474+
using raw_type = typename TensorType::value_type;
475475
using ntype = matx_convert_complex_type<raw_type>;
476476
using ctype = matx_convert_cuda_complex_type<raw_type>;
477477
auto resobj = res_dict[fname.c_str()];

include/matx/core/tensor.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ class tensor_t : public detail::tensor_impl_t<T,RANK,Desc> {
8787
public:
8888
// Type specifier for reflection on class
8989
using type = T; ///< Type of traits
90-
using scalar_type = T; ///< Type of traits
90+
using value_type = T; ///< Type of traits
9191
// Type specifier for signaling this is a matx operation or tensor view
9292
using matxop = bool; ///< Indicate this is a MatX operator
9393
using matxoplvalue = bool; ///< Indicate this is a MatX operator that can be on the lhs of an equation

include/matx/core/tensor_impl.h

-1
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,6 @@ class tensor_impl_t {
7171
public:
7272
// Type specifier for reflection on class
7373
using type = T; // TODO is this necessary
74-
using scalar_type = T;
7574
using value_type = T;
7675
using tensor_view = bool;
7776
using desc_type = Desc;

0 commit comments

Comments
 (0)