Skip to content

Commit

Permalink
Corrected bug in GPU memory allocations causing effective memory leak…
Browse files Browse the repository at this point in the history
… with repeated modelling calls.
  • Loading branch information
LyceanEM committed Oct 17, 2024
1 parent 7b67d10 commit e13f4de
Show file tree
Hide file tree
Showing 2 changed files with 5 additions and 0 deletions.
2 changes: 2 additions & 0 deletions lyceanem/electromagnetics/empropagation.py
Original file line number Diff line number Diff line change
Expand Up @@ -2208,6 +2208,8 @@ def EMGPUFreqDomain(
"""
# ctx = cuda.current_context()
# ctx.reset()
#clear GPU memory
cuda.current_context().memory_manager.deallocations.clear()
free_mem, total_mem = cuda.current_context().get_memory_info()
max_mem = np.ceil(free_mem).astype(np.int64)
ray_num = full_index.shape[0]
Expand Down
3 changes: 3 additions & 0 deletions lyceanem/raycasting/rayfunctions.py
Original file line number Diff line number Diff line change
Expand Up @@ -2746,6 +2746,8 @@ def workchunkingv2(
sources.shape[0] * (scattering_points.shape[0] * sinks.shape[0] * (max_scatter))
)
# print("Total of {:3.1f} rays required".format(ray_estimate))
# Clear GPU memory for simulation
cuda.current_context().memory_manager.deallocations.clear()
# establish memory limits
free_mem, total_mem = cuda.current_context().get_memory_info()
max_mem = np.ceil(free_mem * 0.8).astype(np.int64)
Expand All @@ -2770,6 +2772,7 @@ def workchunkingv2(
)
if io_indexing.shape[0] >= ray_limit:
# need to split the array and process seperatly

sub_io = np.array_split(
io_indexing, np.ceil(io_indexing.shape[0] / ray_limit).astype(int)
)
Expand Down

0 comments on commit e13f4de

Please sign in to comment.