Skip to content

Commit

Permalink
[Draft]
Browse files Browse the repository at this point in the history
  • Loading branch information
ax3l committed Sep 29, 2023
1 parent 56d5c98 commit cccdd5f
Show file tree
Hide file tree
Showing 8 changed files with 187 additions and 68 deletions.
73 changes: 73 additions & 0 deletions src/Base/Array4.H
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/* Copyright 2021-2023 The AMReX Community
*
* Authors: Axel Huebl
* License: BSD-3-Clause-LBNL
*/
#pragma once

#include "pyAMReX.H"

#include <AMReX_Array4.H>
#include <AMReX_BLassert.H>
#include <AMReX_IntVect.H>

#include <cstdint>
#include <sstream>
#include <type_traits>


namespace pyAMReX
{
using namespace amrex;

/** CPU: __array_interface__ v3
*
* https://numpy.org/doc/stable/reference/arrays.interface.html
*/
template<typename T>
py::dict
array_interface(Array4<T> const & a4)
{
auto d = py::dict();
auto const len = length(a4);
// F->C index conversion here
// p[(i-begin.x)+(j-begin.y)*jstride+(k-begin.z)*kstride+n*nstride];
// Buffer dimensions: zero-size shall not skip dimension
auto shape = py::make_tuple(
py::ssize_t(a4.ncomp),
py::ssize_t(len.z <= 0 ? 1 : len.z),
py::ssize_t(len.y <= 0 ? 1 : len.y),
py::ssize_t(len.x <= 0 ? 1 : len.x) // fastest varying index
);
// buffer protocol strides are in bytes, AMReX strides are elements
auto const strides = py::make_tuple(
py::ssize_t(sizeof(T) * a4.nstride),
py::ssize_t(sizeof(T) * a4.kstride),
py::ssize_t(sizeof(T) * a4.jstride),
py::ssize_t(sizeof(T)) // fastest varying index
);
bool const read_only = false;
d["data"] = py::make_tuple(std::intptr_t(a4.dataPtr()), read_only);
// note: if we want to keep the same global indexing with non-zero
// box small_end as in AMReX, then we can explore playing with
// this offset as well
//d["offset"] = 0; // default
//d["mask"] = py::none(); // default

d["shape"] = shape;
// we could also set this after checking the strides are C-style contiguous:
//if (is_contiguous<T>(shape, strides))
// d["strides"] = py::none(); // C-style contiguous
//else
d["strides"] = strides;

// type description
// for more complicated types, e.g., tuples/structs
//d["descr"] = ...;
// we currently only need this
d["typestr"] = py::format_descriptor<T>::format();

d["version"] = 3;
return d;
}
}
83 changes: 24 additions & 59 deletions src/Base/Array4.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,70 +5,17 @@
*/
#include "pyAMReX.H"

#include "Array4.H"

#include <AMReX_Array4.H>
#include <AMReX_BLassert.H>
#include <AMReX_GpuContainers.H>
#include <AMReX_IntVect.H>

#include <cstdint>
#include <sstream>
#include <type_traits>


namespace
{
using namespace amrex;

/** CPU: __array_interface__ v3
*
* https://numpy.org/doc/stable/reference/arrays.interface.html
*/
template<typename T>
py::dict
array_interface(Array4<T> const & a4)
{
auto d = py::dict();
auto const len = length(a4);
// F->C index conversion here
// p[(i-begin.x)+(j-begin.y)*jstride+(k-begin.z)*kstride+n*nstride];
// Buffer dimensions: zero-size shall not skip dimension
auto shape = py::make_tuple(
a4.ncomp,
len.z <= 0 ? 1 : len.z,
len.y <= 0 ? 1 : len.y,
len.x <= 0 ? 1 : len.x // fastest varying index
);
// buffer protocol strides are in bytes, AMReX strides are elements
auto const strides = py::make_tuple(
sizeof(T) * a4.nstride,
sizeof(T) * a4.kstride,
sizeof(T) * a4.jstride,
sizeof(T) // fastest varying index
);
bool const read_only = false;
d["data"] = py::make_tuple(std::intptr_t(a4.dataPtr()), read_only);
// note: if we want to keep the same global indexing with non-zero
// box small_end as in AMReX, then we can explore playing with
// this offset as well
//d["offset"] = 0; // default
//d["mask"] = py::none(); // default

d["shape"] = shape;
// we could also set this after checking the strides are C-style contiguous:
//if (is_contiguous<T>(shape, strides))
// d["strides"] = py::none(); // C-style contiguous
//else
d["strides"] = strides;

// type description
// for more complicated types, e.g., tuples/structs
//d["descr"] = ...;
// we currently only need this
d["typestr"] = py::format_descriptor<T>::format();

d["version"] = 3;
return d;
}
}
#include <vector>


template< typename T >
Expand Down Expand Up @@ -150,7 +97,7 @@ void make_Array4(py::module &m, std::string typestr)
// CPU: __array_interface__ v3
// https://numpy.org/doc/stable/reference/arrays.interface.html
.def_property_readonly("__array_interface__", [](Array4<T> const & a4) {
return array_interface(a4);
return pyAMReX::array_interface(a4);
})

// CPU: __array_function__ interface (TODO)
Expand All @@ -164,7 +111,7 @@ void make_Array4(py::module &m, std::string typestr)
// Nvidia GPUs: __cuda_array_interface__ v3
// https://numba.readthedocs.io/en/latest/cuda/cuda_array_interface.html
.def_property_readonly("__cuda_array_interface__", [](Array4<T> const & a4) {
auto d = array_interface(a4);
auto d = pyAMReX::array_interface(a4);

// data:
// Because the user of the interface may or may not be in the same context, the most common case is to use cuPointerGetAttribute with CU_POINTER_ATTRIBUTE_DEVICE_POINTER in the CUDA driver API (or the equivalent CUDA Runtime API) to retrieve a device pointer that is usable in the currently active context.
Expand Down Expand Up @@ -192,6 +139,24 @@ void make_Array4(py::module &m, std::string typestr)
// https://github.com/dmlc/dlpack/blob/master/include/dlpack/dlpack.h
// https://docs.cupy.dev/en/stable/user_guide/interoperability.html#dlpack-data-exchange-protocol

.def("to_host", [](Array4<T> const & a4) {
// allocate host memory copy
auto const a4i = pyAMReX::array_interface(a4);
auto shape = py::cast(a4i["shape"]);
auto strides = py::cast(a4i["strides"]);
auto h_data = py::array_t<std::remove_cv_t<T>>(
shape, strides
);
py::print(h_data);

// sync copy: host data is unpinned
Gpu::copy(Gpu::deviceToHost,
a4.dataPtr(), a4.dataPtr() + a4.size(),
h_data.mutable_data()
);
return h_data;
}
)

.def("contains", &Array4<T>::contains)
//.def("__contains__", &Array4<T>::contains)
Expand Down
69 changes: 67 additions & 2 deletions src/Base/BaseFab.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
*/
#include "pyAMReX.H"

#include "Array4.H"

#include <AMReX_FArrayBox.H>

#include <istream>
Expand Down Expand Up @@ -67,8 +69,71 @@ namespace
// prefetchToDevice
.def("is_allocated", &BaseFab<T>::isAllocated )

//.def("array", &BaseFab<T>::array )
//.def("const_array", &BaseFab<T>::const_array )
.def("array", [](BaseFab<T> & bf)
{ return bf.array(); },
// as long as the return value (argument 0) exists, keep the fa (argument 1) alive
py::keep_alive<0, 1>()
)
.def("const_array", [](BaseFab<T> const & bf)
{ return bf.const_array(); },
// as long as the return value (argument 0) exists, keep the fa (argument 1) alive
py::keep_alive<0, 1>()
)

.def("to_host", [](BaseFab<T> const & bf) {
BaseFab<T> hbf(bf.box(), bf.nComp(), The_Pinned_Arena());
Array4<T> ha = hbf.array();
Gpu::copyAsync(Gpu::deviceToHost,
bf.dataPtr(), bf.dataPtr() + bf.size(),
ha.dataPtr());
Gpu::streamSynchronize();
return hbf;
})

// CPU: __array_interface__ v3
// https://numpy.org/doc/stable/reference/arrays.interface.html
.def_property_readonly("__array_interface__", [](BaseFab<T> & bf) {
return pyAMReX::array_interface(bf.array());
})

// CPU: __array_function__ interface (TODO)
//
// NEP 18 — A dispatch mechanism for NumPy's high level array functions.
// https://numpy.org/neps/nep-0018-array-function-protocol.html
// This enables code using NumPy to be directly operated on Array4 arrays.
// __array_function__ feature requires NumPy 1.16 or later.


// Nvidia GPUs: __cuda_array_interface__ v3
// https://numba.readthedocs.io/en/latest/cuda/cuda_array_interface.html
.def_property_readonly("__cuda_array_interface__", [](BaseFab<T> & bf) {
auto d = pyAMReX::array_interface(bf.array());

// data:
// Because the user of the interface may or may not be in the same context, the most common case is to use cuPointerGetAttribute with CU_POINTER_ATTRIBUTE_DEVICE_POINTER in the CUDA driver API (or the equivalent CUDA Runtime API) to retrieve a device pointer that is usable in the currently active context.
// TODO For zero-size arrays, use 0 here.

// None or integer
// An optional stream upon which synchronization must take place at the point of consumption, either by synchronizing on the stream or enqueuing operations on the data on the given stream. Integer values in this entry are as follows:
// 0: This is disallowed as it would be ambiguous between None and the default stream, and also between the legacy and per-thread default streams. Any use case where 0 might be given should either use None, 1, or 2 instead for clarity.
// 1: The legacy default stream.
// 2: The per-thread default stream.
// Any other integer: a cudaStream_t represented as a Python integer.
// When None, no synchronization is required.
d["stream"] = py::none();

d["version"] = 3;
return d;
})


// TODO: __dlpack__ __dlpack_device__
// DLPack protocol (CPU, NVIDIA GPU, AMD GPU, Intel GPU, etc.)
// https://dmlc.github.io/dlpack/latest/
// https://data-apis.org/array-api/latest/design_topics/data_interchange.html
// https://github.com/data-apis/consortium-feedback/issues/1
// https://github.com/dmlc/dlpack/blob/master/include/dlpack/dlpack.h
// https://docs.cupy.dev/en/stable/user_guide/interoperability.html#dlpack-data-exchange-protocol

// getVal
// setVal
Expand Down
2 changes: 1 addition & 1 deletion src/Base/MultiFab.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -137,7 +137,7 @@ void init_MultiFab(py::module &m)
.def("const_array", [](FabArray<FArrayBox> & fa, MFIter const & mfi)
{ return fa.const_array(mfi); },
// as long as the return value (argument 0) exists, keep the fa (argument 1) alive
py::keep_alive<0, 1>()
py::keep_alive<0, 1>()
)

.def_static("saxpy", py::overload_cast< FabArray<FArrayBox> &, Real, FabArray<FArrayBox> const &, int, int, int, IntVect const & >(&FabArray<FArrayBox>::template Saxpy<FArrayBox>)
Expand Down
7 changes: 3 additions & 4 deletions src/Base/PODVector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,13 +74,12 @@ void make_PODVector(py::module &m, std::string typestr, std::string allocstr)
.def("reserve", &PODVector_type::reserve)
.def("shrink_to_fit", &PODVector_type::shrink_to_fit)
.def("to_host", [](PODVector_type const & pv) {
PODVector<T, std::allocator<T>> h_data(pv.size());
//py::array_t<T> h_data(pv.size());
amrex::Gpu::copy(amrex::Gpu::deviceToHost,
PODVector<T, amrex::PinnedArenaAllocator<T>> h_data(pv.size());
amrex::Gpu::copyAsync(amrex::Gpu::deviceToHost,
pv.begin(), pv.end(),
h_data.begin()
//h_data.ptr()
);
Gpu::streamSynchronize();
return h_data;
})

Expand Down
15 changes: 13 additions & 2 deletions src/amrex/Array4.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,21 @@ def array4_to_numpy(self, copy=False, order="F"):
"""
import numpy as np

if copy:
# This supports a device-to-host copy.
#
# todo: validate of the to_host() returned object
# lifetime is always managed correctly by
# Python's GC - otherwise copy twice via copy=True
# data = np.array(self.to_host(), copy=False)
data = self.to_host()
else:
data = np.array(self, copy=False)

if order == "F":
return np.array(self, copy=copy).T
return data.T
elif order == "C":
return np.array(self, copy=copy)
return data
else:
raise ValueError("The order argument must be F or C.")

Expand Down
2 changes: 2 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,8 @@ def amrex_init(tmpdir):
"amrex.signal_handling=0",
# abort GPU runs if out-of-memory instead of swapping to host RAM
# "abort_on_out_of_gpu_memory=1",
# do not rely on implicit host-device memory transfers
"amrex.the_arena_is_managed=0",
]
)
yield
Expand Down
4 changes: 4 additions & 0 deletions tests/test_multifab.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,10 @@ def test_mfab_dtoh_copy(make_mfab_device):
local_boxes_host = mfab_device.to_numpy(copy=True)
assert max([np.max(box) for box in local_boxes_host]) == device_max

# numpy bindings (w/ copy)
for mfi in mfab_device:
marr = mfab_device.array(mfi).to_numpy(copy=True)

Check notice

Code scanning / CodeQL

Unused local variable Note test

Variable marr is not used.

# cupy bindings (w/o copy)
import cupy as cp

Expand Down

0 comments on commit cccdd5f

Please sign in to comment.